summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass30
-rw-r--r--meta/classes/archiver.bbclass267
-rw-r--r--meta/classes/autotools.bbclass162
-rw-r--r--meta/classes/base.bbclass377
-rw-r--r--meta/classes/bin_package.bbclass5
-rw-r--r--meta/classes/binconfig-disabled.bbclass1
-rw-r--r--meta/classes/binconfig.bbclass21
-rw-r--r--meta/classes/blacklist.bbclass29
-rw-r--r--meta/classes/bluetooth.bbclass14
-rw-r--r--meta/classes/bugzilla.bbclass187
-rw-r--r--meta/classes/buildhistory.bbclass327
-rw-r--r--meta/classes/buildstats-summary.bbclass2
-rw-r--r--meta/classes/buildstats.bbclass56
-rw-r--r--meta/classes/ccache.bbclass70
-rw-r--r--meta/classes/ccmake.bbclass97
-rw-r--r--meta/classes/chrpath.bbclass37
-rw-r--r--meta/classes/clutter.bbclass14
-rw-r--r--meta/classes/cmake.bbclass110
-rw-r--r--meta/classes/cml1.bbclass11
-rw-r--r--meta/classes/compress_doc.bbclass51
-rw-r--r--meta/classes/copyleft_compliance.bbclass10
-rw-r--r--meta/classes/copyleft_filter.bbclass40
-rw-r--r--meta/classes/core-image.bbclass5
-rw-r--r--meta/classes/cpan-base.bbclass30
-rw-r--r--meta/classes/cpan.bbclass24
-rw-r--r--meta/classes/cpan_build.bbclass9
-rw-r--r--meta/classes/cross-canadian.bbclass49
-rw-r--r--meta/classes/cross.bbclass27
-rw-r--r--meta/classes/crosssdk.bbclass16
-rw-r--r--meta/classes/cve-check.bbclass271
-rw-r--r--meta/classes/debian.bbclass63
-rw-r--r--meta/classes/deploy.bbclass2
-rw-r--r--meta/classes/devicetree.bbclass148
-rw-r--r--meta/classes/devshell.bbclass18
-rw-r--r--meta/classes/devtool-source.bbclass233
-rw-r--r--meta/classes/devupstream.bbclass48
-rw-r--r--meta/classes/distro_features_check.bbclass40
-rw-r--r--meta/classes/distrodata.bbclass479
-rw-r--r--meta/classes/distrooverrides.bbclass32
-rw-r--r--meta/classes/distutils-base.bbclass2
-rw-r--r--meta/classes/distutils-common-base.bbclass12
-rw-r--r--meta/classes/distutils-tools.bbclass73
-rw-r--r--meta/classes/distutils.bbclass61
-rw-r--r--meta/classes/distutils3-base.bbclass2
-rw-r--r--meta/classes/distutils3.bbclass53
-rw-r--r--meta/classes/dos2unix.bbclass14
-rw-r--r--meta/classes/externalsrc.bbclass131
-rw-r--r--meta/classes/extrausers.bbclass23
-rw-r--r--meta/classes/features_check.bbclass85
-rw-r--r--meta/classes/fontcache.bbclass19
-rw-r--r--meta/classes/fs-uuid.bbclass4
-rw-r--r--meta/classes/gconf.bbclass17
-rw-r--r--meta/classes/gettext.bbclass19
-rw-r--r--meta/classes/gio-module-cache.bbclass13
-rw-r--r--meta/classes/glide.bbclass9
-rw-r--r--meta/classes/gnome.bbclass1
-rw-r--r--meta/classes/gnomebase.bbclass8
-rw-r--r--meta/classes/go-ptest.bbclass54
-rw-r--r--meta/classes/go.bbclass154
-rw-r--r--meta/classes/goarch.bbclass118
-rw-r--r--meta/classes/gobject-introspection.bbclass20
-rw-r--r--meta/classes/godep.bbclass8
-rw-r--r--meta/classes/grub-efi-cfg.bbclass116
-rw-r--r--meta/classes/grub-efi.bbclass156
-rw-r--r--meta/classes/gsettings.bbclass47
-rw-r--r--meta/classes/gtk-doc.bbclass43
-rw-r--r--meta/classes/gtk-icon-cache.bbclass22
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass84
-rw-r--r--meta/classes/gummiboot.bbclass121
-rw-r--r--meta/classes/gzipnative.bbclass5
-rw-r--r--meta/classes/icecc.bbclass296
-rw-r--r--meta/classes/image-buildinfo.bbclass31
-rw-r--r--meta/classes/image-combined-dbg.bbclass9
-rw-r--r--meta/classes/image-container.bbclass21
-rw-r--r--meta/classes/image-live.bbclass64
-rw-r--r--meta/classes/image-mklibs.bbclass2
-rw-r--r--meta/classes/image-postinst-intercepts.bbclass23
-rw-r--r--meta/classes/image-prelink.bbclass16
-rw-r--r--meta/classes/image-vm.bbclass179
-rw-r--r--meta/classes/image.bbclass353
-rw-r--r--meta/classes/image_types.bbclass263
-rw-r--r--meta/classes/image_types_uboot.bbclass26
-rw-r--r--meta/classes/image_types_wic.bbclass142
-rw-r--r--meta/classes/insane.bbclass862
-rw-r--r--meta/classes/kernel-arch.bbclass20
-rw-r--r--meta/classes/kernel-artifact-names.bbclass18
-rw-r--r--meta/classes/kernel-devicetree.bbclass95
-rw-r--r--meta/classes/kernel-fitimage.bbclass242
-rw-r--r--meta/classes/kernel-grub.bbclass2
-rw-r--r--meta/classes/kernel-module-split.bbclass79
-rw-r--r--meta/classes/kernel-uboot.bbclass10
-rw-r--r--meta/classes/kernel-uimage.bbclass36
-rw-r--r--meta/classes/kernel-yocto.bbclass144
-rw-r--r--meta/classes/kernel.bbclass421
-rw-r--r--meta/classes/kernelsrc.bbclass6
-rw-r--r--meta/classes/libc-common.bbclass14
-rw-r--r--meta/classes/libc-package.bbclass194
-rw-r--r--meta/classes/license.bbclass506
-rw-r--r--meta/classes/license_image.bbclass256
-rw-r--r--meta/classes/linux-kernel-base.bbclass4
-rw-r--r--meta/classes/linuxloader.bbclass87
-rw-r--r--meta/classes/live-vm-common.bbclass50
-rw-r--r--meta/classes/logging.bbclass2
-rw-r--r--meta/classes/manpages.bbclass37
-rw-r--r--meta/classes/mcextend.bbclass16
-rw-r--r--meta/classes/meson.bbclass161
-rw-r--r--meta/classes/metadata_scm.bbclass46
-rw-r--r--meta/classes/migrate_localcount.bbclass12
-rw-r--r--meta/classes/mime.bbclass17
-rw-r--r--meta/classes/mirrors.bbclass68
-rw-r--r--meta/classes/module-base.bbclass14
-rw-r--r--meta/classes/module.bbclass47
-rw-r--r--meta/classes/multilib.bbclass144
-rw-r--r--meta/classes/multilib_global.bbclass76
-rw-r--r--meta/classes/multilib_header.bbclass12
-rw-r--r--meta/classes/multilib_script.bbclass34
-rw-r--r--meta/classes/native.bbclass52
-rw-r--r--meta/classes/nativesdk.bbclass34
-rw-r--r--meta/classes/npm.bbclass51
-rw-r--r--meta/classes/oelint.bbclass4
-rw-r--r--meta/classes/own-mirrors.bbclass26
-rw-r--r--meta/classes/package.bbclass1026
-rw-r--r--meta/classes/package_deb.bbclass210
-rw-r--r--meta/classes/package_ipk.bbclass177
-rw-r--r--meta/classes/package_pkgdata.bbclass167
-rw-r--r--meta/classes/package_rpm.bbclass303
-rw-r--r--meta/classes/package_tar.bbclass22
-rw-r--r--meta/classes/packagedata.bbclass10
-rw-r--r--meta/classes/packagefeed-stability.bbclass24
-rw-r--r--meta/classes/packagegroup.bbclass29
-rw-r--r--meta/classes/patch.bbclass174
-rw-r--r--meta/classes/perl-version.bbclass66
-rw-r--r--meta/classes/pixbufcache.bbclass38
-rw-r--r--meta/classes/podfix.bbclass35
-rw-r--r--meta/classes/populate_sdk_base.bbclass147
-rw-r--r--meta/classes/populate_sdk_ext.bbclass376
-rw-r--r--meta/classes/prexport.bbclass4
-rw-r--r--meta/classes/ptest-perl.bbclass30
-rw-r--r--meta/classes/ptest.bbclass56
-rw-r--r--meta/classes/pypi.bbclass26
-rw-r--r--meta/classes/python3-dir.bbclass2
-rw-r--r--meta/classes/python3native.bbclass21
-rw-r--r--meta/classes/pythonnative.bbclass17
-rw-r--r--meta/classes/qemu.bbclass10
-rw-r--r--meta/classes/qemuboot.bbclass93
-rw-r--r--meta/classes/recipe_sanity.bbclass37
-rw-r--r--meta/classes/relative_symlinks.bbclass5
-rw-r--r--meta/classes/relocatable.bbclass13
-rw-r--r--meta/classes/report-error.bbclass52
-rw-r--r--meta/classes/reproducible_build.bbclass179
-rw-r--r--meta/classes/reproducible_build_simple.bbclass11
-rw-r--r--meta/classes/rm_work.bbclass170
-rw-r--r--meta/classes/rm_work_and_downloads.bbclass33
-rw-r--r--meta/classes/rootfs-postcommands.bbclass182
-rw-r--r--meta/classes/rootfs_deb.bbclass11
-rw-r--r--meta/classes/rootfs_ipk.bbclass10
-rw-r--r--meta/classes/rootfs_rpm.bbclass30
-rw-r--r--meta/classes/rootfsdebugfiles.bbclass7
-rw-r--r--meta/classes/sanity.bbclass397
-rw-r--r--meta/classes/scons.bbclass26
-rw-r--r--meta/classes/setuptools.bbclass7
-rw-r--r--meta/classes/setuptools3.bbclass4
-rw-r--r--meta/classes/sign_ipk.bbclass12
-rw-r--r--meta/classes/sign_package_feed.bbclass9
-rw-r--r--meta/classes/sign_rpm.bbclass44
-rw-r--r--meta/classes/siteconfig.bbclass18
-rw-r--r--meta/classes/siteinfo.bbclass73
-rw-r--r--meta/classes/spdx.bbclass53
-rw-r--r--meta/classes/sstate.bbclass691
-rw-r--r--meta/classes/staging.bbclass637
-rw-r--r--meta/classes/syslinux.bbclass51
-rw-r--r--meta/classes/systemd-boot-cfg.bbclass71
-rw-r--r--meta/classes/systemd-boot.bbclass103
-rw-r--r--meta/classes/systemd.bbclass115
-rw-r--r--meta/classes/terminal.bbclass26
-rw-r--r--meta/classes/testexport.bbclass238
-rw-r--r--meta/classes/testimage-auto.bbclass23
-rw-r--r--meta/classes/testimage.bbclass405
-rw-r--r--meta/classes/testsdk.bbclass158
-rw-r--r--meta/classes/texinfo.bbclass11
-rw-r--r--meta/classes/tinderclient.bbclass368
-rw-r--r--meta/classes/toaster.bbclass76
-rw-r--r--meta/classes/toolchain-scripts.bbclass66
-rw-r--r--meta/classes/typecheck.bbclass2
-rw-r--r--meta/classes/uboot-config.bbclass29
-rw-r--r--meta/classes/uboot-extlinux-config.bbclass157
-rw-r--r--meta/classes/uboot-sign.bbclass126
-rw-r--r--meta/classes/uninative.bbclass92
-rw-r--r--meta/classes/update-alternatives.bbclass186
-rw-r--r--meta/classes/update-rc.d.bbclass74
-rw-r--r--meta/classes/upstream-version-is-even.bbclass2
-rw-r--r--meta/classes/useradd-staticids.bbclass236
-rw-r--r--meta/classes/useradd.bbclass123
-rw-r--r--meta/classes/useradd_base.bbclass22
-rw-r--r--meta/classes/utility-tasks.bbclass27
-rw-r--r--meta/classes/utils.bbclass144
-rw-r--r--meta/classes/waf.bbclass75
-rw-r--r--meta/classes/xmlcatalog.bbclass26
198 files changed, 11109 insertions, 7927 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
index 208cde6e5e..5bd5c44a27 100644
--- a/meta/classes/allarch.bbclass
+++ b/meta/classes/allarch.bbclass
@@ -2,21 +2,29 @@
# This class is used for architecture independent recipes/data files (usually scripts)
#
-# Expand STAGING_DIR_HOST since for cross-canadian/native/nativesdk, this will
-# point elsewhere after these changes.
-STAGING_DIR_HOST := "${STAGING_DIR_HOST}"
+python allarch_package_arch_handler () {
+ if bb.data.inherits_class("native", d) or bb.data.inherits_class("nativesdk", d) \
+ or bb.data.inherits_class("crosssdk", d):
+ return
-PACKAGE_ARCH = "all"
+ variants = d.getVar("MULTILIB_VARIANTS")
+ if not variants:
+ d.setVar("PACKAGE_ARCH", "all" )
+}
+
+addhandler allarch_package_arch_handler
+allarch_package_arch_handler[eventmask] = "bb.event.RecipePreFinalise"
python () {
# Allow this class to be included but overridden - only set
# the values if we're still "all" package arch.
- if d.getVar("PACKAGE_ARCH", True) == "all":
+ if d.getVar("PACKAGE_ARCH") == "all":
# No need for virtual/libc or a cross compiler
d.setVar("INHIBIT_DEFAULT_DEPS","1")
# Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
# naming anyway
+ d.setVar("baselib", "lib")
d.setVar("TARGET_ARCH", "allarch")
d.setVar("TARGET_OS", "linux")
d.setVar("TARGET_CC_ARCH", "none")
@@ -24,13 +32,16 @@ python () {
d.setVar("TARGET_AS_ARCH", "none")
d.setVar("TARGET_FPU", "")
d.setVar("TARGET_PREFIX", "")
- d.setVar("PACKAGE_EXTRA_ARCHS", "")
+ # Expand PACKAGE_EXTRA_ARCHS since the staging code needs this
+ # (this removes any dependencies from the hash perspective)
+ d.setVar("PACKAGE_EXTRA_ARCHS", d.getVar("PACKAGE_EXTRA_ARCHS"))
d.setVar("SDK_ARCH", "none")
d.setVar("SDK_CC_ARCH", "none")
d.setVar("TARGET_CPPFLAGS", "none")
d.setVar("TARGET_CFLAGS", "none")
d.setVar("TARGET_CXXFLAGS", "none")
d.setVar("TARGET_LDFLAGS", "none")
+ d.setVar("POPULATESYSROOTDEPS", "")
# Avoid this being unnecessarily different due to nuances of
# the target machine that aren't important for "all" arch
@@ -41,7 +52,12 @@ python () {
d.setVar("EXCLUDE_FROM_SHLIBS", "1")
d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
d.setVar("INHIBIT_PACKAGE_STRIP", "1")
+
+ # These multilib values shouldn't change allarch packages so exclude them
+ d.appendVarFlag("emit_pkgdata", "vardepsexclude", " MULTILIB_VARIANTS")
+ d.appendVarFlag("write_specfile", "vardepsexclude", " MULTILIBS")
+ d.appendVarFlag("do_package", "vardepsexclude", " package_do_shlibs")
elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
- bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE", True))
+ bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
}
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index 1d8e863bdc..7c46cff91f 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -23,9 +23,6 @@
# COPYLEFT_RECIPE_TYPES = 'target'
#
-# Don't filter the license by default
-COPYLEFT_LICENSE_INCLUDE ?= ''
-COPYLEFT_LICENSE_EXCLUDE ?= ''
# Create archive for all the recipe types
COPYLEFT_RECIPE_TYPES ?= 'target native nativesdk cross crosssdk cross-canadian'
inherit copyleft_filter
@@ -40,22 +37,24 @@ ARCHIVER_MODE[recipe] ?= "0"
DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources"
ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
+ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${TARGET_SYS}/${PF}/"
ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
+
do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
do_deploy_archives[dirs] = "${WORKDIR}"
-do_deploy_all_archives[dirs] = "${WORKDIR}"
# This is a convenience for the shell script to use it
python () {
- pn = d.getVar('PN', True)
- assume_provided = (d.getVar("ASSUME_PROVIDED", True) or "").split()
+ pn = d.getVar('PN')
+ assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
if pn in assume_provided:
- for p in d.getVar("PROVIDES", True).split():
+ for p in d.getVar("PROVIDES").split():
if p != pn:
pn = p
break
@@ -67,18 +66,32 @@ python () {
else:
bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
+
+ # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
+ # so avoid archiving source here.
+ if pn.startswith('glibc-locale'):
+ return
+
# We just archive gcc-source for all the gcc related recipes
- if d.getVar('BPN', True) in ['gcc', 'libgcc'] \
+ if d.getVar('BPN') in ['gcc', 'libgcc'] \
and not pn.startswith('gcc-source'):
bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
return
- ar_src = d.getVarFlag('ARCHIVER_MODE', 'src', True)
- ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata', True)
- ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe', True)
+ def hasTask(task):
+ return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
+
+ ar_src = d.getVarFlag('ARCHIVER_MODE', 'src')
+ ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata')
+ ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe')
if ar_src == "original":
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_original' % pn)
+ # 'patched' and 'configured' invoke do_unpack_and_patch because
+ # do_ar_patched resp. do_ar_configured depend on it, but for 'original'
+ # we have to add it explicitly.
+ if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
+ d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_unpack_and_patch' % pn)
elif ar_src == "patched":
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
@@ -88,9 +101,9 @@ python () {
# There is a corner case with "gcc-source-${PV}" recipes, they don't have
# the "do_configure" task, so we need to use "do_preconfigure"
- if pn.startswith("gcc-source-"):
+ if hasTask("do_preconfigure"):
d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_preconfigure' % pn)
- else:
+ elif hasTask("do_configure"):
d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
@@ -103,11 +116,17 @@ python () {
if ar_recipe == "1":
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_recipe' % pn)
- # Output the srpm package
- ar_srpm = d.getVarFlag('ARCHIVER_MODE', 'srpm', True)
- if ar_srpm == "1":
- if d.getVar('PACKAGES', True) != '' and d.getVar('IMAGE_PKGTYPE', True) == 'rpm':
+ # Output the SRPM package
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm') == "1" and d.getVar('PACKAGES'):
+ if "package_rpm" not in d.getVar('PACKAGE_CLASSES'):
+ bb.fatal("ARCHIVER_MODE[srpm] needs package_rpm in PACKAGE_CLASSES")
+
+ # Some recipes do not have any packaging tasks
+ if hasTask("do_package_write_rpm"):
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
+ d.appendVarFlag('do_package_write_rpm', 'dirs', ' ${ARCHIVER_RPMTOPDIR}')
+ d.appendVarFlag('do_package_write_rpm', 'sstate-inputdirs', ' ${ARCHIVER_RPMTOPDIR}')
+ d.appendVarFlag('do_package_write_rpm', 'sstate-outputdirs', ' ${DEPLOY_DIR_SRC}')
if ar_dumpdata == "1":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn)
if ar_recipe == "1":
@@ -125,14 +144,14 @@ python () {
# (e.g. git repositories) is "unpacked" and then put into a tarball.
python do_ar_original() {
- import shutil, tarfile, tempfile
+ import shutil, tempfile
- if d.getVarFlag('ARCHIVER_MODE', 'src', True) != "original":
+ if d.getVarFlag('ARCHIVER_MODE', 'src') != "original":
return
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR')
bb.note('Archiving the original source...')
- urls = d.getVar("SRC_URI", True).split()
+ urls = d.getVar("SRC_URI").split()
# destsuffix (git fetcher) and subdir (everything else) are allowed to be
# absolute paths (for example, destsuffix=${S}/foobar).
# That messes with unpacking inside our tmpdir below, because the fetchers
@@ -157,7 +176,7 @@ python do_ar_original() {
if os.path.isfile(local):
shutil.copy(local, ar_outdir)
elif os.path.isdir(local):
- tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR', True))
+ tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR'))
fetch.unpack(tmpdir, (url,))
# To handle recipes with more than one source, we add the "name"
# URL parameter as suffix. We treat it as an error when
@@ -166,12 +185,18 @@ python do_ar_original() {
# to be set when using the git fetcher, otherwise SRCREV cannot
# be set separately for each URL.
params = bb.fetch2.decodeurl(url)[5]
+ type = bb.fetch2.decodeurl(url)[0]
+ location = bb.fetch2.decodeurl(url)[2]
name = params.get('name', '')
- if name in tarball_suffix:
- if not name:
- bb.fatal("Cannot determine archive names for original source because 'name' URL parameter is unset in more than one URL. Add it to at least one of these: %s %s" % (tarball_suffix[name], url))
- else:
- bb.fatal("Cannot determine archive names for original source because 'name=' URL parameter '%s' is used twice. Make it unique in: %s %s" % (tarball_suffix[name], url))
+ if type.lower() == 'file':
+ name_tmp = location.rstrip("*").rstrip("/")
+ name = os.path.basename(name_tmp)
+ else:
+ if name in tarball_suffix:
+ if not name:
+ bb.fatal("Cannot determine archive names for original source because 'name' URL parameter is unset in more than one URL. Add it to at least one of these: %s %s" % (tarball_suffix[name], url))
+ else:
+ bb.fatal("Cannot determine archive names for original source because 'name=' URL parameter '%s' is used twice. Make it unique in: %s %s" % (tarball_suffix[name], url))
tarball_suffix[name] = url
create_tarball(d, tmpdir + '/.', name, ar_outdir)
@@ -191,28 +216,35 @@ python do_ar_original() {
python do_ar_patched() {
- if d.getVarFlag('ARCHIVER_MODE', 'src', True) != 'patched':
+ if d.getVarFlag('ARCHIVER_MODE', 'src') != 'patched':
return
# Get the ARCHIVER_OUTDIR before we reset the WORKDIR
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
- ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR')
+ if not is_work_shared(d):
+ ar_workdir = d.getVar('ARCHIVER_WORKDIR')
+ d.setVar('WORKDIR', ar_workdir)
bb.note('Archiving the patched source...')
- d.setVar('WORKDIR', ar_workdir)
- create_tarball(d, d.getVar('S', True), 'patched', ar_outdir)
+ create_tarball(d, d.getVar('S'), 'patched', ar_outdir)
}
python do_ar_configured() {
import shutil
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
- if d.getVarFlag('ARCHIVER_MODE', 'src', True) == 'configured':
+ # Forcibly expand the sysroot paths as we're about to change WORKDIR
+ d.setVar('STAGING_DIR_HOST', d.getVar('STAGING_DIR_HOST'))
+ d.setVar('STAGING_DIR_TARGET', d.getVar('STAGING_DIR_TARGET'))
+ d.setVar('RECIPE_SYSROOT', d.getVar('RECIPE_SYSROOT'))
+ d.setVar('RECIPE_SYSROOT_NATIVE', d.getVar('RECIPE_SYSROOT_NATIVE'))
+
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR')
+ if d.getVarFlag('ARCHIVER_MODE', 'src') == 'configured':
bb.note('Archiving the configured source...')
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
# "gcc-source-${PV}" recipes don't have "do_configure"
# task, so we need to run "do_preconfigure" instead
if pn.startswith("gcc-source-"):
- d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
+ d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR'))
bb.build.exec_func('do_preconfigure', d)
# The libtool-native's do_configure will remove the
@@ -220,30 +252,43 @@ python do_ar_configured() {
# do_configure, we archive the already configured ${S} to
# instead of.
elif pn != 'libtool-native':
+ def runTask(task):
+ prefuncs = d.getVarFlag(task, 'prefuncs') or ''
+ for func in prefuncs.split():
+ if func != "sysroot_cleansstate":
+ bb.build.exec_func(func, d)
+ bb.build.exec_func(task, d)
+ postfuncs = d.getVarFlag(task, 'postfuncs') or ''
+ for func in postfuncs.split():
+ if func != 'do_qa_configure':
+ bb.build.exec_func(func, d)
+
# Change the WORKDIR to make do_configure run in another dir.
- d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
- if bb.data.inherits_class('kernel-yocto', d):
- bb.build.exec_func('do_kernel_configme', d)
- if bb.data.inherits_class('cmake', d):
- bb.build.exec_func('do_generate_toolchain_file', d)
- prefuncs = d.getVarFlag('do_configure', 'prefuncs', True)
- for func in (prefuncs or '').split():
- if func != "sysroot_cleansstate":
- bb.build.exec_func(func, d)
- bb.build.exec_func('do_configure', d)
- postfuncs = d.getVarFlag('do_configure', 'postfuncs', True)
- for func in (postfuncs or '').split():
- if func != "do_qa_configure":
- bb.build.exec_func(func, d)
- srcdir = d.getVar('S', True)
- builddir = d.getVar('B', True)
+ d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR'))
+
+ preceeds = bb.build.preceedtask('do_configure', False, d)
+ for task in preceeds:
+ if task != 'do_patch' and task != 'do_prepare_recipe_sysroot':
+ runTask(task)
+ runTask('do_configure')
+
+ srcdir = d.getVar('S')
+ builddir = d.getVar('B')
if srcdir != builddir:
if os.path.exists(builddir):
oe.path.copytree(builddir, os.path.join(srcdir, \
- 'build.%s.ar_configured' % d.getVar('PF', True)))
+ 'build.%s.ar_configured' % d.getVar('PF')))
create_tarball(d, srcdir, 'configured', ar_outdir)
}
+def exclude_useless_paths(tarinfo):
+ if tarinfo.isdir():
+ if tarinfo.name.endswith('/temp') or tarinfo.name.endswith('/patches') or tarinfo.name.endswith('/.pc'):
+ return None
+ elif tarinfo.name == 'temp' or tarinfo.name == 'patches' or tarinfo.name == '.pc':
+ return None
+ return tarinfo
+
def create_tarball(d, srcdir, suffix, ar_outdir):
"""
create the tarball from srcdir
@@ -251,23 +296,24 @@ def create_tarball(d, srcdir, suffix, ar_outdir):
import tarfile
# Make sure we are only creating a single tarball for gcc sources
- if (d.getVar('SRC_URI', True) == ""):
+ if (d.getVar('SRC_URI') == ""):
return
+ # For the kernel archive, srcdir may just be a link to the
+ # work-shared location. Use os.path.realpath to make sure
+ # that we archive the actual directory and not just the link.
+ srcdir = os.path.realpath(srcdir)
+
bb.utils.mkdirhier(ar_outdir)
if suffix:
- filename = '%s-%s.tar.gz' % (d.getVar('PF', True), suffix)
+ filename = '%s-%s.tar.gz' % (d.getVar('PF'), suffix)
else:
- filename = '%s.tar.gz' % d.getVar('PF', True)
+ filename = '%s.tar.gz' % d.getVar('PF')
tarname = os.path.join(ar_outdir, filename)
- srcdir = srcdir.rstrip('/')
- dirname = os.path.dirname(srcdir)
- basename = os.path.basename(srcdir)
- os.chdir(dirname)
bb.note('Creating %s' % tarname)
tar = tarfile.open(tarname, 'w:gz')
- tar.add(basename)
+ tar.add(srcdir, arcname=os.path.basename(srcdir), filter=exclude_useless_paths)
tar.close()
# creating .diff.gz between source.orig and source
@@ -283,56 +329,82 @@ def create_diff_gz(d, src_orig, src, ar_outdir):
# exclude.
src_patched = src + '.patched'
oe.path.copyhardlinktree(src, src_patched)
- for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude', True).split():
+ for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude').split():
bb.utils.remove(os.path.join(src_orig, i), recurse=True)
bb.utils.remove(os.path.join(src_patched, i), recurse=True)
dirname = os.path.dirname(src)
basename = os.path.basename(src)
- os.chdir(dirname)
- out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF', True))
- diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file)
- subprocess.call(diff_cmd, shell=True)
- bb.utils.remove(src_patched, recurse=True)
+ bb.utils.mkdirhier(ar_outdir)
+ cwd = os.getcwd()
+ try:
+ os.chdir(dirname)
+ out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF'))
+ diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file)
+ subprocess.check_call(diff_cmd, shell=True)
+ bb.utils.remove(src_patched, recurse=True)
+ finally:
+ os.chdir(cwd)
+
+def is_work_shared(d):
+ pn = d.getVar('PN')
+ return bb.data.inherits_class('kernel', d) or pn.startswith('gcc-source')
# Run do_unpack and do_patch
python do_unpack_and_patch() {
- if d.getVarFlag('ARCHIVER_MODE', 'src', True) not in \
+ if d.getVarFlag('ARCHIVER_MODE', 'src') not in \
[ 'patched', 'configured'] and \
- d.getVarFlag('ARCHIVER_MODE', 'diff', True) != '1':
+ d.getVarFlag('ARCHIVER_MODE', 'diff') != '1':
return
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
- ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR')
+ ar_workdir = d.getVar('ARCHIVER_WORKDIR')
+ ar_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
+ pn = d.getVar('PN')
# The kernel class functions require it to be on work-shared, so we dont change WORKDIR
- if not bb.data.inherits_class('kernel-yocto', d):
+ if not is_work_shared(d):
# Change the WORKDIR to make do_unpack do_patch run in another dir.
d.setVar('WORKDIR', ar_workdir)
+ # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
+ d.setVar('STAGING_DIR_NATIVE', ar_sysroot_native)
# The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
# possibly requiring of the following tasks (such as some recipes's
# do_patch required 'B' existed).
- bb.utils.mkdirhier(d.getVar('B', True))
+ bb.utils.mkdirhier(d.getVar('B'))
bb.build.exec_func('do_unpack', d)
# Save the original source for creating the patches
- if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
- src = d.getVar('S', True).rstrip('/')
+ if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
+ src = d.getVar('S').rstrip('/')
src_orig = '%s.orig' % src
oe.path.copytree(src, src_orig)
# Make sure gcc and kernel sources are patched only once
- if not ((d.getVar('SRC_URI', True) == "" or bb.data.inherits_class('kernel-yocto', d))):
+ if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
bb.build.exec_func('do_patch', d)
# Create the patches
- if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
+ if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
bb.note('Creating diff gz...')
create_diff_gz(d, src_orig, src, ar_outdir)
bb.utils.remove(src_orig, recurse=True)
}
+# BBINCLUDED is special (excluded from basehash signature
+# calculation). Using it in a task signature can cause "basehash
+# changed" errors.
+#
+# Depending on BBINCLUDED also causes do_ar_recipe to run again
+# for unrelated changes, like adding or removing buildhistory.bbclass.
+#
+# For these reasons we ignore the dependency completely. The versioning
+# of the output file ensures that we create it each time the recipe
+# gets rebuilt, at least as long as a PR server is used. We also rely
+# on that mechanism to catch changes in the file content, because the
+# file content is not part of of the task signature either.
+do_ar_recipe[vardepsexclude] += "BBINCLUDED"
python do_ar_recipe () {
"""
archive the recipe, including .bb and .inc.
@@ -342,24 +414,24 @@ python do_ar_recipe () {
require_re = re.compile( r"require\s+(.+)" )
include_re = re.compile( r"include\s+(.+)" )
- bbfile = d.getVar('FILE', True)
- outdir = os.path.join(d.getVar('WORKDIR', True), \
- '%s-recipe' % d.getVar('PF', True))
+ bbfile = d.getVar('FILE')
+ outdir = os.path.join(d.getVar('WORKDIR'), \
+ '%s-recipe' % d.getVar('PF'))
bb.utils.mkdirhier(outdir)
shutil.copy(bbfile, outdir)
- pn = d.getVar('PN', True)
- bbappend_files = d.getVar('BBINCLUDED', True).split()
+ pn = d.getVar('PN')
+ bbappend_files = d.getVar('BBINCLUDED').split()
# If recipe name is aa, we need to match files like aa.bbappend and aa_1.1.bbappend
# Files like aa1.bbappend or aa1_1.1.bbappend must be excluded.
- bbappend_re = re.compile( r".*/%s_[^/]*\.bbappend$" %pn)
- bbappend_re1 = re.compile( r".*/%s\.bbappend$" %pn)
+ bbappend_re = re.compile( r".*/%s_[^/]*\.bbappend$" % re.escape(pn))
+ bbappend_re1 = re.compile( r".*/%s\.bbappend$" % re.escape(pn))
for file in bbappend_files:
if bbappend_re.match(file) or bbappend_re1.match(file):
shutil.copy(file, outdir)
dirname = os.path.dirname(bbfile)
- bbpath = '%s:%s' % (dirname, d.getVar('BBPATH', True))
+ bbpath = '%s:%s' % (dirname, d.getVar('BBPATH'))
f = open(bbfile, 'r')
for line in f.readlines():
incfile = None
@@ -368,12 +440,13 @@ python do_ar_recipe () {
elif include_re.match(line):
incfile = include_re.match(line).group(1)
if incfile:
- incfile = bb.data.expand(incfile, d)
+ incfile = d.expand(incfile)
+ if incfile:
incfile = bb.utils.which(bbpath, incfile)
- if incfile:
- shutil.copy(incfile, outdir)
+ if incfile:
+ shutil.copy(incfile, outdir)
- create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR', True))
+ create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR'))
bb.utils.remove(outdir, recurse=True)
}
@@ -382,8 +455,8 @@ python do_dumpdata () {
dump environment data to ${PF}-showdata.dump
"""
- dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR', True), \
- '%s-showdata.dump' % d.getVar('PF', True))
+ dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR'), \
+ '%s-showdata.dump' % d.getVar('PF'))
bb.note('Dumping metadata into %s' % dumpfile)
with open(dumpfile, "w") as f:
# emit variables and shell functions
@@ -414,15 +487,11 @@ addtask do_dumpdata
addtask do_ar_recipe
addtask do_deploy_archives before do_build
-addtask do_deploy_all_archives after do_deploy_archives
-do_deploy_all_archives[recrdeptask] = "do_deploy_archives"
-do_deploy_all_archives[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_deploy_all_archives() {
- :
-}
-
python () {
- # Add tasks in the correct order, specifically for linux-yocto to avoid race condition
+ # Add tasks in the correct order, specifically for linux-yocto to avoid race condition.
+ # sstatesig.py:sstate_rundepfilter has special support that excludes this dependency
+ # so that do_kernel_configme does not need to run again when do_unpack_and_patch
+ # gets added or removed (by adding or removing archiver.bbclass).
if bb.data.inherits_class('kernel-yocto', d):
bb.build.addtask('do_kernel_configme', 'do_configure', 'do_unpack_and_patch', d)
}
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
index ecbba9f603..3d22ad0255 100644
--- a/meta/classes/autotools.bbclass
+++ b/meta/classes/autotools.bbclass
@@ -1,8 +1,8 @@
def autotools_dep_prepend(d):
- if d.getVar('INHIBIT_AUTOTOOLS_DEPS', True):
+ if d.getVar('INHIBIT_AUTOTOOLS_DEPS'):
return ''
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
deps = ''
if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
@@ -14,7 +14,7 @@ def autotools_dep_prepend(d):
if not bb.data.inherits_class('native', d) \
and not bb.data.inherits_class('nativesdk', d) \
and not bb.data.inherits_class('cross', d) \
- and not d.getVar('INHIBIT_DEFAULT_DEPS', True):
+ and not d.getVar('INHIBIT_DEFAULT_DEPS'):
deps += 'libtool-cross '
return deps + 'gnu-config-native '
@@ -25,9 +25,11 @@ inherit siteinfo
# Space separated list of shell scripts with variables defined to supply test
# results for autoconf tests we cannot run at build time.
-export CONFIG_SITE = "${@siteinfo_get_files(d)}"
+# The value of this variable is filled in in a prefunc because it depends on
+# the contents of the sysroot.
+export CONFIG_SITE
-acpaths = "default"
+acpaths ?= "default"
EXTRA_AUTORECONF = "--exclude=autopoint"
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
@@ -131,133 +133,21 @@ EXTRACONFFUNCS ??= ""
EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
-do_configure[prefuncs] += "autotools_preconfigure autotools_copy_aclocals ${EXTRACONFFUNCS}"
+do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
+do_compile[prefuncs] += "autotools_aclocals"
+do_install[prefuncs] += "autotools_aclocals"
do_configure[postfuncs] += "autotools_postconfigure"
-ACLOCALDIR = "${WORKDIR}/aclocal-copy"
-
-python autotools_copy_aclocals () {
- import copy
-
- s = d.getVar("AUTOTOOLS_SCRIPT_PATH", True)
- if not os.path.exists(s + "/configure.in") and not os.path.exists(s + "/configure.ac"):
- if not d.getVar("AUTOTOOLS_COPYACLOCAL", False):
- return
-
- taskdepdata = d.getVar("BB_TASKDEPDATA", False)
- #bb.warn(str(taskdepdata))
- pn = d.getVar("PN", True)
- aclocaldir = d.getVar("ACLOCALDIR", True)
- oe.path.remove(aclocaldir)
- bb.utils.mkdirhier(aclocaldir)
- start = None
- configuredeps = []
- # Detect bitbake -b usage
- # Everything but quilt-native would have dependencies
- nodeps = (pn != "quilt-native")
-
- for dep in taskdepdata:
- data = taskdepdata[dep]
- if data[1] == "do_configure" and data[0] == pn:
- start = dep
- if not nodeps and start:
- break
- if nodeps and data[0] != pn:
- nodeps = False
- if start is None:
- bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
-
- # We need to figure out which m4 files we need to expose to this do_configure task.
- # This needs to match what would get restored from sstate, which is controlled
- # ultimately by calls from bitbake to setscene_depvalid().
- # That function expects a setscene dependency tree. We build a dependency tree
- # condensed to do_populate_sysroot -> do_populate_sysroot dependencies, similar to
- # that used by setscene tasks. We can then call into setscene_depvalid() and decide
- # which dependencies we can "see" and should expose the m4 files for.
- setscenedeps = copy.deepcopy(taskdepdata)
-
- start = set([start])
-
- # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
- for dep in taskdepdata:
- data = setscenedeps[dep]
- if data[1] != "do_populate_sysroot":
- for dep2 in setscenedeps:
- data2 = setscenedeps[dep2]
- if dep in data2[3]:
- data2[3].update(setscenedeps[dep][3])
- data2[3].remove(dep)
- if dep in start:
- start.update(setscenedeps[dep][3])
- start.remove(dep)
- del setscenedeps[dep]
-
- # Remove circular references
- for dep in setscenedeps:
- if dep in setscenedeps[dep][3]:
- setscenedeps[dep][3].remove(dep)
-
- # Direct dependencies should be present and can be depended upon
- for dep in start:
- configuredeps.append(setscenedeps[dep][0])
-
- # Call into setscene_depvalid for each sub-dependency and only copy m4 files
- # for ones that would be restored from sstate.
- done = list(start)
- next = list(start)
- while next:
- new = []
- for dep in next:
- data = setscenedeps[dep]
- for datadep in data[3]:
- if datadep in done:
- continue
- taskdeps = {}
- taskdeps[dep] = setscenedeps[dep][:2]
- taskdeps[datadep] = setscenedeps[datadep][:2]
- retval = setscene_depvalid(datadep, taskdeps, [], d)
- if retval:
- bb.note("Skipping setscene dependency %s for m4 macro copying" % datadep)
- continue
- done.append(datadep)
- new.append(datadep)
- configuredeps.append(setscenedeps[datadep][0])
- next = new
-
- cp = []
- if nodeps:
- bb.warn("autotools: Unable to find task dependencies, -b being used? Pulling in all m4 files")
- for l in [d.expand("${STAGING_DATADIR_NATIVE}/aclocal/"), d.expand("${STAGING_DATADIR}/aclocal/")]:
- cp.extend(os.path.join(l, f) for f in os.listdir(l))
-
- for c in configuredeps:
- if c.endswith("-native"):
- manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
- elif c.startswith("nativesdk-"):
- manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}_${SDK_OS}-%s.populate_sysroot" % c)
- elif "-cross-" in c or "-crosssdk" in c:
- continue
- else:
- manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${MACHINE}-%s.populate_sysroot" % c)
- try:
- f = open(manifest, "r")
- for l in f:
- if "/aclocal/" in l and l.strip().endswith(".m4"):
- cp.append(l.strip())
- elif "config_site.d/" in l:
- cp.append(l.strip())
- except:
- bb.warn("%s not found" % manifest)
-
- for c in cp:
- t = os.path.join(aclocaldir, os.path.basename(c))
- if not os.path.exists(t):
- os.symlink(c, t)
-
- # Refresh variable with cache files
- d.setVar("CONFIG_SITE", siteinfo_get_files(d, aclocalcache=True))
+ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
+ACLOCALEXTRAPATH = ""
+ACLOCALEXTRAPATH_class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
+ACLOCALEXTRAPATH_class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
+
+python autotools_aclocals () {
+ d.setVar("CONFIG_SITE", siteinfo_get_files(d, sysrootcache=True))
}
-autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+
+CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
autotools_do_configure() {
# WARNING: gross hack follows:
@@ -277,6 +167,7 @@ autotools_do_configure() {
if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
olddir=`pwd`
cd ${AUTOTOOLS_SCRIPT_PATH}
+ mkdir -p ${ACLOCALDIR}
ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
if [ x"${acpaths}" = xdefault ]; then
acpaths=
@@ -287,6 +178,7 @@ autotools_do_configure() {
else
acpaths="${acpaths}"
fi
+ acpaths="$acpaths ${ACLOCALEXTRAPATH}"
AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
automake --version
echo "AUTOV is $AUTOV"
@@ -304,14 +196,14 @@ autotools_do_configure() {
else
CONFIGURE_AC=configure.ac
fi
- if grep "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
- if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
+ if grep -q "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC; then
+ if grep -q "sed.*POTFILES" $CONFIGURE_AC; then
: do nothing -- we still have an old unmodified configure.ac
else
bbnote Executing glib-gettextize --force --copy
echo "no" | glib-gettextize --force --copy
fi
- elif grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
+ elif [ "${BPN}" != "gettext" ] && grep -q "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC; then
# We'd call gettextize here if it wasn't so broken...
cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
if [ -d ${S}/po/ ]; then
@@ -323,7 +215,7 @@ autotools_do_configure() {
PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
fi
mkdir -p m4
- if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
+ if grep -q "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC; then
if ! echo "${DEPENDS}" | grep -q intltool-native; then
bbwarn "Missing DEPENDS on intltool-native"
fi
@@ -336,7 +228,7 @@ autotools_do_configure() {
find ${S} -ignore_readdir_race -name $i -delete
done
- bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
+ bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
cd $olddir
fi
@@ -348,7 +240,7 @@ autotools_do_configure() {
}
autotools_do_compile() {
- oe_runmake
+ oe_runmake
}
autotools_do_install() {
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 21957d84a1..145daea3ec 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -10,13 +10,17 @@ inherit utility-tasks
inherit metadata_scm
inherit logging
-OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath"
+OE_EXTRA_IMPORTS ?= ""
+
+OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license ${OE_EXTRA_IMPORTS}"
OE_IMPORTS[type] = "list"
+PACKAGECONFIG_CONFARGS ??= ""
+
def oe_import(d):
import sys
- bbpath = d.getVar("BBPATH", True).split(":")
+ bbpath = d.getVar("BBPATH").split(":")
sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
def inject(name, value):
@@ -28,16 +32,18 @@ def oe_import(d):
import oe.data
for toimport in oe.data.typed_value("OE_IMPORTS", d):
- imported = __import__(toimport)
- inject(toimport.split(".", 1)[0], imported)
-
+ try:
+ imported = __import__(toimport)
+ inject(toimport.split(".", 1)[0], imported)
+ except AttributeError as e:
+ bb.error("Error importing OE modules: %s" % str(e))
return ""
# We need the oe module name space early (before INHERITs get added)
OE_IMPORTED := "${@oe_import(d)}"
def lsb_distro_identifier(d):
- adjust = d.getVar('LSB_DISTRO_ADJUST', True)
+ adjust = d.getVar('LSB_DISTRO_ADJUST')
adjust_func = None
if adjust:
try:
@@ -61,33 +67,26 @@ oe_runmake() {
def base_dep_prepend(d):
- #
- # Ideally this will check a flag so we will operate properly in
- # the case where host == build == target, for now we don't work in
- # that case though.
- #
+ if d.getVar('INHIBIT_DEFAULT_DEPS', False):
+ return ""
+ return "${BASE_DEFAULT_DEPS}"
- deps = ""
- # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
- # we need that built is the responsibility of the patch function / class, not
- # the application.
- if not d.getVar('INHIBIT_DEFAULT_DEPS', False):
- if (d.getVar('HOST_SYS', True) != d.getVar('BUILD_SYS', True)):
- deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc "
- return deps
+BASE_DEFAULT_DEPS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc"
-BASEDEPENDS = "${@base_dep_prepend(d)}"
+BASEDEPENDS = ""
+BASEDEPENDS_class-target = "${@base_dep_prepend(d)}"
+BASEDEPENDS_class-nativesdk = "${@base_dep_prepend(d)}"
DEPENDS_prepend="${BASEDEPENDS} "
FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
# THISDIR only works properly with imediate expansion as it has to run
# in the context of the location its used (:=)
-THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}"
+THISDIR = "${@os.path.dirname(d.getVar('FILE'))}"
def extra_path_elements(d):
path = ""
- elements = (d.getVar('EXTRANATIVEPATH', True) or "").split()
+ elements = (d.getVar('EXTRANATIVEPATH') or "").split()
for e in elements:
path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
return path
@@ -96,26 +95,53 @@ PATH_prepend = "${@extra_path_elements(d)}"
def get_lic_checksum_file_list(d):
filelist = []
- lic_files = d.getVar("LIC_FILES_CHKSUM", True) or ''
- tmpdir = d.getVar("TMPDIR", True)
+ lic_files = d.getVar("LIC_FILES_CHKSUM") or ''
+ tmpdir = d.getVar("TMPDIR")
+ s = d.getVar("S")
+ b = d.getVar("B")
+ workdir = d.getVar("WORKDIR")
urls = lic_files.split()
for url in urls:
# We only care about items that are absolute paths since
# any others should be covered by SRC_URI.
try:
- path = bb.fetch.decodeurl(url)[2]
- if not path:
+ (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
+ if method != "file" or not path:
raise bb.fetch.MalformedUrl(url)
if path[0] == '/':
- if path.startswith(tmpdir):
+ if path.startswith((tmpdir, s, b, workdir)):
continue
filelist.append(path + ":" + str(os.path.exists(path)))
except bb.fetch.MalformedUrl:
- raise bb.build.FuncFailed(d.getVar('PN', True) + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
+ bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
return " ".join(filelist)
+def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
+ tools = d.getVar(toolsvar).split()
+ origbbenv = d.getVar("BB_ORIGENV", False)
+ path = origbbenv.getVar("PATH")
+ bb.utils.mkdirhier(dest)
+ notfound = []
+ for tool in tools:
+ desttool = os.path.join(dest, tool)
+ if not os.path.exists(desttool):
+ srctool = bb.utils.which(path, tool, executable=True)
+ # gcc/g++ may link to ccache on some hosts, e.g.,
+ # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
+ # would return /usr/local/bin/ccache/gcc, but what we need is
+ # /usr/bin/gcc, this code can check and fix that.
+ if "ccache" in srctool:
+ srctool = bb.utils.which(path, tool, executable=True, direction=1)
+ if srctool:
+ os.symlink(srctool, desttool)
+ else:
+ notfound.append(tool)
+
+ if notfound and fatal:
+ bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
+
addtask fetch
do_fetch[dirs] = "${DL_DIR}"
do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
@@ -123,7 +149,7 @@ do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
do_fetch[vardeps] += "SRCREV"
python base_do_fetch() {
- src_uri = (d.getVar('SRC_URI', True) or "").split()
+ src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
return
@@ -131,39 +157,29 @@ python base_do_fetch() {
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.download()
except bb.fetch2.BBFetchException as e:
- raise bb.build.FuncFailed(e)
+ bb.fatal(str(e))
}
addtask unpack after do_fetch
do_unpack[dirs] = "${WORKDIR}"
-python () {
- if d.getVar('S', True) != d.getVar('WORKDIR', True):
- d.setVarFlag('do_unpack', 'cleandirs', '${S}')
- else:
- d.setVarFlag('do_unpack', 'cleandirs', os.path.join('${S}', 'patches'))
-}
+do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != os.path.normpath(d.getVar('WORKDIR')) else os.path.join('${S}', 'patches')}"
+
python base_do_unpack() {
- src_uri = (d.getVar('SRC_URI', True) or "").split()
+ src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
- fetcher.unpack(d.getVar('WORKDIR', True))
+ fetcher.unpack(d.getVar('WORKDIR'))
except bb.fetch2.BBFetchException as e:
- raise bb.build.FuncFailed(e)
+ bb.fatal(str(e))
}
-def pkgarch_mapping(d):
- # Compatibility mappings of TUNE_PKGARCH (opt in)
- if d.getVar("PKGARCHCOMPAT_ARMV7A", True):
- if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
- d.setVar("TUNE_PKGARCH", "armv7a")
-
def get_layers_branch_rev(d):
- layers = (d.getVar("BBLAYERS", True) or "").split()
- layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
+ layers = (d.getVar("BBLAYERS") or "").split()
+ layers_branch_rev = ["%-20s = \"%s:%s\"" % (os.path.basename(i), \
base_get_metadata_git_branch(i, None).strip(), \
base_get_metadata_git_revision(i, None)) \
for i in layers]
@@ -189,15 +205,15 @@ BUILDCFG_FUNCS[type] = "list"
def buildcfg_vars(d):
statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
for var in statusvars:
- value = d.getVar(var, True)
+ value = d.getVar(var)
if value is not None:
- yield '%-17s = "%s"' % (var, value)
+ yield '%-20s = "%s"' % (var, value)
def buildcfg_neededvars(d):
needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
pesteruser = []
for v in needed_vars:
- val = d.getVar(v, True)
+ val = d.getVar(v)
if not val or val == 'INVALID':
pesteruser.append(v)
@@ -205,21 +221,36 @@ def buildcfg_neededvars(d):
bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
addhandler base_eventhandler
-base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.runqueue.sceneQueueComplete bb.event.RecipeParsed"
+base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed"
python base_eventhandler() {
import bb.runqueue
if isinstance(e, bb.event.ConfigParsed):
- if not e.data.getVar("NATIVELSBSTRING", False):
- e.data.setVar("NATIVELSBSTRING", lsb_distro_identifier(e.data))
- e.data.setVar('BB_VERSION', bb.__version__)
- pkgarch_mapping(e.data)
- oe.utils.features_backfill("DISTRO_FEATURES", e.data)
- oe.utils.features_backfill("MACHINE_FEATURES", e.data)
+ if not d.getVar("NATIVELSBSTRING", False):
+ d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
+ d.setVar('BB_VERSION', bb.__version__)
+
+ # There might be no bb.event.ConfigParsed event if bitbake server is
+ # running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR}
+ # exists.
+ if isinstance(e, bb.event.ConfigParsed) or \
+ (isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))):
+ # Works with the line in layer.conf which changes PATH to point here
+ setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
+ setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
+
+ if isinstance(e, bb.event.MultiConfigParsed):
+ # We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores
+ # own contexts so the variables get expanded correctly for that arch, then inject back into
+ # the main data store.
+ deps = []
+ for config in e.mcdata:
+ deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS"))
+ deps = " ".join(deps)
+ e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps)
if isinstance(e, bb.event.BuildStarted):
- localdata = bb.data.createCopy(e.data)
- bb.data.update_data(localdata)
+ localdata = bb.data.createCopy(d)
statuslines = []
for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
g = globals()
@@ -230,7 +261,7 @@ python base_eventhandler() {
if flines:
statuslines.extend(flines)
- statusheader = e.data.getVar('BUILDCFG_HEADER', True)
+ statusheader = d.getVar('BUILDCFG_HEADER')
if statusheader:
bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
@@ -238,24 +269,11 @@ python base_eventhandler() {
# target ones and we'd see dulpicate key names overwriting each other
# for various PREFERRED_PROVIDERS
if isinstance(e, bb.event.RecipePreFinalise):
- if e.data.getVar("TARGET_PREFIX", True) == e.data.getVar("SDK_PREFIX", True):
- e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
- e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc-initial")
- e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
- e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
- e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
-
- if isinstance(e, bb.runqueue.sceneQueueComplete):
- completions = e.data.expand("${STAGING_DIR}/sstatecompletions")
- if os.path.exists(completions):
- cmds = set()
- with open(completions, "r") as f:
- cmds = set(f)
- e.data.setVar("completion_function", "\n".join(cmds))
- e.data.setVarFlag("completion_function", "func", "1")
- bb.debug(1, "Executing SceneQueue Completion commands: %s" % "\n".join(cmds))
- bb.build.exec_func("completion_function", e.data)
- os.remove(completions)
+ if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"):
+ d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
+ d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
+ d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
+ d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
if isinstance(e, bb.event.RecipeParsed):
#
@@ -264,16 +282,16 @@ python base_eventhandler() {
# sysroot since they're now "unreachable". This makes switching virtual/kernel work in
# particular.
#
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
if not source_mirror_fetch:
- provs = (d.getVar("PROVIDES", True) or "").split()
- multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
+ provs = (d.getVar("PROVIDES") or "").split()
+ multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
for p in provs:
if p.startswith("virtual/") and p not in multiwhitelist:
- profprov = d.getVar("PREFERRED_PROVIDER_" + p, True)
+ profprov = d.getVar("PREFERRED_PROVIDER_" + p)
if profprov and pn != profprov:
- raise bb.parse.SkipPackage("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
+ raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
}
CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
@@ -281,7 +299,6 @@ CLEANBROKEN = "0"
addtask configure after do_patch
do_configure[dirs] = "${B}"
-do_configure[deptask] = "do_populate_sysroot"
base_do_configure() {
if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
@@ -289,7 +306,9 @@ base_do_configure() {
if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
oe_runmake clean
fi
- find ${B} -ignore_readdir_race -name \*.la -delete
+ # -ignore_readdir_race does not work correctly with -delete;
+ # use xargs to avoid spurious build failures
+ find ${B} -ignore_readdir_race -name \*.la -type f -print0 | xargs -0 rm -f
fi
fi
if [ -n "${CONFIGURESTAMPFILE}" ]; then
@@ -333,9 +352,9 @@ def set_packagetriplet(d):
tos = []
tvs = []
- archs.append(d.getVar("PACKAGE_ARCHS", True).split())
- tos.append(d.getVar("TARGET_OS", True))
- tvs.append(d.getVar("TARGET_VENDOR", True))
+ archs.append(d.getVar("PACKAGE_ARCHS").split())
+ tos.append(d.getVar("TARGET_OS"))
+ tvs.append(d.getVar("TARGET_VENDOR"))
def settriplet(d, varname, archs, tos, tvs):
triplets = []
@@ -347,34 +366,37 @@ def set_packagetriplet(d):
settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
- variants = d.getVar("MULTILIB_VARIANTS", True) or ""
+ variants = d.getVar("MULTILIB_VARIANTS") or ""
for item in variants.split():
localdata = bb.data.createCopy(d)
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
localdata.setVar("OVERRIDES", overrides)
- bb.data.update_data(localdata)
- archs.append(localdata.getVar("PACKAGE_ARCHS", True).split())
- tos.append(localdata.getVar("TARGET_OS", True))
- tvs.append(localdata.getVar("TARGET_VENDOR", True))
+ archs.append(localdata.getVar("PACKAGE_ARCHS").split())
+ tos.append(localdata.getVar("TARGET_OS"))
+ tvs.append(localdata.getVar("TARGET_VENDOR"))
settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
python () {
import string, re
+ # Handle backfilling
+ oe.utils.features_backfill("DISTRO_FEATURES", d)
+ oe.utils.features_backfill("MACHINE_FEATURES", d)
+
# Handle PACKAGECONFIG
#
# These take the form:
#
# PACKAGECONFIG ??= "<default options>"
- # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends"
+ # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends"
pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
if pkgconfigflags:
- pkgconfig = (d.getVar('PACKAGECONFIG', True) or "").split()
- pn = d.getVar("PN", True)
+ pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
+ pn = d.getVar("PN")
- mlprefix = d.getVar("MLPREFIX", True)
+ mlprefix = d.getVar("MLPREFIX")
def expandFilter(appends, extension, prefix):
appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
@@ -410,44 +432,52 @@ python () {
extradeps = []
extrardeps = []
+ extrarrecs = []
extraconf = []
for flag, flagval in sorted(pkgconfigflags.items()):
items = flagval.split(",")
num = len(items)
- if num > 4:
- bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend can be specified!"
- % (d.getVar('PN', True), flag))
+ if num > 5:
+ bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend can be specified!"
+ % (d.getVar('PN'), flag))
if flag in pkgconfig:
if num >= 3 and items[2]:
extradeps.append(items[2])
if num >= 4 and items[3]:
extrardeps.append(items[3])
+ if num >= 5 and items[4]:
+ extrarrecs.append(items[4])
if num >= 1 and items[0]:
extraconf.append(items[0])
elif num >= 2 and items[1]:
extraconf.append(items[1])
appendVar('DEPENDS', extradeps)
appendVar('RDEPENDS_${PN}', extrardeps)
+ appendVar('RRECOMMENDS_${PN}', extrarrecs)
appendVar('PACKAGECONFIG_CONFARGS', extraconf)
- pn = d.getVar('PN', True)
- license = d.getVar('LICENSE', True)
- if license == "INVALID":
+ pn = d.getVar('PN')
+ license = d.getVar('LICENSE')
+ if license == "INVALID" and pn != "defaultpkgname":
bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
if bb.data.inherits_class('license', d):
check_license_format(d)
- unmatched_license_flag = check_license_flags(d)
- if unmatched_license_flag:
- bb.debug(1, "Skipping %s because it has a restricted license not"
- " whitelisted in LICENSE_FLAGS_WHITELIST" % pn)
- raise bb.parse.SkipPackage("because it has a restricted license not"
- " whitelisted in LICENSE_FLAGS_WHITELIST")
+ unmatched_license_flags = check_license_flags(d)
+ if unmatched_license_flags:
+ if len(unmatched_license_flags) == 1:
+ message = "because it has a restricted license '{0}'. Which is not whitelisted in LICENSE_FLAGS_WHITELIST".format(unmatched_license_flags[0])
+ else:
+ message = "because it has restricted licenses {0}. Which are not whitelisted in LICENSE_FLAGS_WHITELIST".format(
+ ", ".join("'{0}'".format(f) for f in unmatched_license_flags))
+ bb.debug(1, "Skipping %s %s" % (pn, message))
+ raise bb.parse.SkipRecipe(message)
# If we're building a target package we need to use fakeroot (pseudo)
# in order to capture permissions, owners, groups and special files
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_unpack', 'umask', '022')
d.setVarFlag('do_configure', 'umask', '022')
d.setVarFlag('do_compile', 'umask', '022')
@@ -462,26 +492,26 @@ python () {
d.setVarFlag('do_devshell', 'fakeroot', '1')
d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
- need_machine = d.getVar('COMPATIBLE_MACHINE', True)
+ need_machine = d.getVar('COMPATIBLE_MACHINE')
if need_machine:
import re
- compat_machines = (d.getVar('MACHINEOVERRIDES', True) or "").split(":")
+ compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
for m in compat_machines:
if re.match(need_machine, m):
break
else:
- raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True))
+ raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
if not source_mirror_fetch:
- need_host = d.getVar('COMPATIBLE_HOST', True)
+ need_host = d.getVar('COMPATIBLE_HOST')
if need_host:
import re
- this_host = d.getVar('HOST_SYS', True)
+ this_host = d.getVar('HOST_SYS')
if not re.match(need_host, this_host):
- raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
+ raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
- bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
+ bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
check_license = False if pn.startswith("nativesdk-") else True
for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
@@ -496,54 +526,54 @@ python () {
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
whitelist = []
- incompatwl = []
for lic in bad_licenses:
spdx_license = return_spdx(d, lic)
- for w in ["LGPLv2_WHITELIST_", "WHITELIST_"]:
- whitelist.extend((d.getVar(w + lic, True) or "").split())
- if spdx_license:
- whitelist.extend((d.getVar(w + spdx_license, True) or "").split())
- '''
- We need to track what we are whitelisting and why. If pn is
- incompatible we need to be able to note that the image that
- is created may infact contain incompatible licenses despite
- INCOMPATIBLE_LICENSE being set.
- '''
- incompatwl.extend((d.getVar(w + lic, True) or "").split())
- if spdx_license:
- incompatwl.extend((d.getVar(w + spdx_license, True) or "").split())
-
- if not pn in whitelist:
- pkgs = d.getVar('PACKAGES', True).split()
- skipped_pkgs = []
+ whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
+ if spdx_license:
+ whitelist.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
+
+ if pn in whitelist:
+ '''
+ We need to track what we are whitelisting and why. If pn is
+ incompatible we need to be able to note that the image that
+ is created may infact contain incompatible licenses despite
+ INCOMPATIBLE_LICENSE being set.
+ '''
+ bb.note("Including %s as buildable despite it having an incompatible license because it has been whitelisted" % pn)
+ else:
+ pkgs = d.getVar('PACKAGES').split()
+ skipped_pkgs = {}
unskipped_pkgs = []
for pkg in pkgs:
- if incompatible_license(d, bad_licenses, pkg):
- skipped_pkgs.append(pkg)
+ incompatible_lic = incompatible_license(d, bad_licenses, pkg)
+ if incompatible_lic:
+ skipped_pkgs[pkg] = incompatible_lic
else:
unskipped_pkgs.append(pkg)
- all_skipped = skipped_pkgs and not unskipped_pkgs
if unskipped_pkgs:
for pkg in skipped_pkgs:
- bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + license)
- mlprefix = d.getVar('MLPREFIX', True)
- d.setVar('LICENSE_EXCLUSION-' + mlprefix + pkg, 1)
+ bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
+ mlprefix = d.getVar('MLPREFIX')
+ d.setVar('LICENSE_EXCLUSION-' + mlprefix + pkg, ' '.join(skipped_pkgs[pkg]))
for pkg in unskipped_pkgs:
- bb.debug(1, "INCLUDING the package " + pkg)
- elif all_skipped or incompatible_license(d, bad_licenses):
- bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, license))
- raise bb.parse.SkipPackage("incompatible with license %s" % license)
- elif pn in whitelist:
- if pn in incompatwl:
- bb.note("INCLUDING " + pn + " as buildable despite INCOMPATIBLE_LICENSE because it has been whitelisted")
+ bb.debug(1, "Including the package %s" % pkg)
+ else:
+ incompatible_lic = incompatible_license(d, bad_licenses)
+ for pkg in skipped_pkgs:
+ incompatible_lic += skipped_pkgs[pkg]
+ incompatible_lic = sorted(list(set(incompatible_lic)))
+
+ if incompatible_lic:
+ bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
+ raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
# Try to verify per-package (LICENSE_<pkg>) values. LICENSE should be a
# superset of all per-package licenses. We do not do advanced (pattern)
# matching of license expressions - just check that all license strings
# in LICENSE_<pkg> are found in LICENSE.
license_set = oe.license.list_licenses(license)
- for pkg in d.getVar('PACKAGES', True).split():
- pkg_license = d.getVar('LICENSE_' + pkg, True)
+ for pkg in d.getVar('PACKAGES').split():
+ pkg_license = d.getVar('LICENSE_' + pkg)
if pkg_license:
unlisted = oe.license.list_licenses(pkg_license) - license_set
if unlisted:
@@ -551,7 +581,7 @@ python () {
"listed in LICENSE" % (pkg, ' '.join(unlisted)))
needsrcrev = False
- srcuri = d.getVar('SRC_URI', True)
+ srcuri = d.getVar('SRC_URI')
for uri in srcuri.split():
(scheme, _ , path) = bb.fetch.decodeurl(uri)[:3]
@@ -572,6 +602,7 @@ python () {
# Mercurial packages should DEPEND on mercurial-native
elif scheme == "hg":
needsrcrev = True
+ d.appendVar("EXTRANATIVEPATH", ' python3-native ')
d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
# Perforce packages support SRCREV = "${AUTOREV}"
@@ -594,25 +625,41 @@ python () {
d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
# *.xz should DEPEND on xz-native for unpacking
- elif path.endswith('.xz'):
+ elif path.endswith('.xz') or path.endswith('.txz'):
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
# .zip should DEPEND on unzip-native for unpacking
- elif path.endswith('.zip'):
+ elif path.endswith('.zip') or path.endswith('.jar'):
d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
- # file is needed by rpm2cpio.sh
- elif path.endswith('.src.rpm'):
- d.appendVarFlag('do_unpack', 'depends', ' file-native:do_populate_sysroot')
+ # Some rpm files may be compressed internally using xz (for example, rpms from Fedora)
+ elif path.endswith('.rpm'):
+ d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
+
+ # *.deb should DEPEND on xz-native for unpacking
+ elif path.endswith('.deb'):
+ d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
if needsrcrev:
d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
+ # Gather all named SRCREVs to add to the sstate hash calculation
+ # This anonymous python snippet is called multiple times so we
+ # need to be careful to not double up the appends here and cause
+ # the base hash to mismatch the task hash
+ for uri in srcuri.split():
+ parm = bb.fetch.decodeurl(uri)[5]
+ uri_names = parm.get("name", "").split(",")
+ for uri_name in filter(None, uri_names):
+ srcrev_name = "SRCREV_{}".format(uri_name)
+ if srcrev_name not in (d.getVarFlag("do_fetch", "vardeps") or "").split():
+ d.appendVarFlag("do_fetch", "vardeps", " {}".format(srcrev_name))
+
set_packagetriplet(d)
# 'multimachine' handling
- mach_arch = d.getVar('MACHINE_ARCH', True)
- pkg_arch = d.getVar('PACKAGE_ARCH', True)
+ mach_arch = d.getVar('MACHINE_ARCH')
+ pkg_arch = d.getVar('PACKAGE_ARCH')
if (pkg_arch == mach_arch):
# Already machine specific - nothing further to do
@@ -622,11 +669,11 @@ python () {
# We always try to scan SRC_URI for urls with machine overrides
# unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
#
- override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', True)
+ override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH')
if override != '0':
paths = []
- fpaths = (d.getVar('FILESPATH', True) or '').split(':')
- machine = d.getVar('MACHINE', True)
+ fpaths = (d.getVar('FILESPATH') or '').split(':')
+ machine = d.getVar('MACHINE')
for p in fpaths:
if os.path.basename(p) == machine and os.path.isdir(p):
paths.append(p)
@@ -643,16 +690,16 @@ python () {
d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
return
- packages = d.getVar('PACKAGES', True).split()
+ packages = d.getVar('PACKAGES').split()
for pkg in packages:
- pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg, True)
+ pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg)
# We could look for != PACKAGE_ARCH here but how to choose
# if multiple differences are present?
# Look through PACKAGE_ARCHS for the priority order?
if pkgarch and pkgarch == mach_arch:
d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
- bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True))
+ bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN"))
}
addtask cleansstate after do_clean
@@ -663,7 +710,7 @@ addtask cleanall after do_cleansstate
do_cleansstate[nostamp] = "1"
python do_cleanall() {
- src_uri = (d.getVar('SRC_URI', True) or "").split()
+ src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
return
@@ -671,7 +718,7 @@ python do_cleanall() {
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.clean()
except bb.fetch2.BBFetchException as e:
- raise bb.build.FuncFailed(e)
+ bb.fatal(str(e))
}
do_cleanall[nostamp] = "1"
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
index a52b75be5c..cbc9b1fa13 100644
--- a/meta/classes/bin_package.bbclass
+++ b/meta/classes/bin_package.bbclass
@@ -26,7 +26,10 @@ do_compile[noexec] = "1"
bin_package_do_install () {
# Do it carefully
[ -d "${S}" ] || exit 1
- cd ${S} || exit 1
+ if [ -z "$(ls -A ${S})" ]; then
+ bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
+ fi
+ cd ${S}
tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
| tar --no-same-owner -xpf - -C ${D}
}
diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass
index 602a669aa1..096b670e12 100644
--- a/meta/classes/binconfig-disabled.bbclass
+++ b/meta/classes/binconfig-disabled.bbclass
@@ -15,6 +15,7 @@ do_install_append () {
echo "echo 'ERROR: $x should not be used, use an alternative such as pkg-config' >&2" >> ${D}$x
echo "echo '--should-not-have-used-$x'" >> ${D}$x
echo "exit 1" >> ${D}$x
+ chmod +x ${D}$x
done
}
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
index cbc4173601..9112ed4608 100644
--- a/meta/classes/binconfig.bbclass
+++ b/meta/classes/binconfig.bbclass
@@ -13,16 +13,16 @@ def get_binconfig_mangle(d):
s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
+ s += " -e 's:-L${WORKDIR}:-LOELIBDIR:'"
+ s += " -e 's:-I${WORKDIR}:-IOEINCDIR:'"
s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
- s += " -e 's:-I${WORKDIR}:-I${STAGING_INCDIR}:'"
- s += " -e 's:-L${WORKDIR}:-L${STAGING_LIBDIR}:'"
- if bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d):
- s += bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d)
+ if d.getVar("OE_BINCONFIG_EXTRA_MANGLE", False):
+ s += d.getVar("OE_BINCONFIG_EXTRA_MANGLE")
return s
@@ -31,7 +31,7 @@ BINCONFIG_GLOB ?= "*-config"
PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
binconfig_package_preprocess () {
- for config in `find ${PKGD} -name '${BINCONFIG_GLOB}'`; do
+ for config in `find ${PKGD} -type f -name '${BINCONFIG_GLOB}'`; do
sed -i \
-e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
-e 's:${STAGING_LIBDIR}:${libdir}:g;' \
@@ -40,21 +40,12 @@ binconfig_package_preprocess () {
-e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
$config
done
- for lafile in `find ${PKGD} -name "*.la"` ; do
- sed -i \
- -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
- -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
- -e 's:${STAGING_INCDIR}:${includedir}:g;' \
- -e 's:${STAGING_DATADIR}:${datadir}:' \
- -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
- $lafile
- done
}
SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
binconfig_sysroot_preprocess () {
- for config in `find ${S} -name '${BINCONFIG_GLOB}'` `find ${B} -name '${BINCONFIG_GLOB}'`; do
+ for config in `find ${S} -type f -name '${BINCONFIG_GLOB}'` `find ${B} -type f -name '${BINCONFIG_GLOB}'`; do
configname=`basename $config`
install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
sed ${@get_binconfig_mangle(d)} $config > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
index a0141a82c0..dc794228ff 100644
--- a/meta/classes/blacklist.bbclass
+++ b/meta/classes/blacklist.bbclass
@@ -12,34 +12,9 @@
# PNBLACKLIST[pn] = "message"
#
-# Cope with PNBLACKLIST flags for multilib case
-addhandler blacklist_multilib_eventhandler
-blacklist_multilib_eventhandler[eventmask] = "bb.event.ConfigParsed"
-python blacklist_multilib_eventhandler() {
- multilibs = e.data.getVar('MULTILIBS', True)
- if not multilibs:
- return
-
- # this block has been copied from base.bbclass so keep it in sync
- prefixes = []
- for ext in multilibs.split():
- eext = ext.split(':')
- if len(eext) > 1 and eext[0] == 'multilib':
- prefixes.append(eext[1])
-
- blacklists = e.data.getVarFlags('PNBLACKLIST') or {}
- for pkg, reason in blacklists.items():
- if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in pkg:
- continue
- for p in prefixes:
- newpkg = p + "-" + pkg
- if not e.data.getVarFlag('PNBLACKLIST', newpkg, True):
- e.data.setVarFlag('PNBLACKLIST', newpkg, reason)
-}
-
python () {
- blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN', True), True)
+ blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN'))
if blacklist:
- raise bb.parse.SkipPackage("Recipe is blacklisted: %s" % (blacklist))
+ raise bb.parse.SkipRecipe("Recipe is blacklisted: %s" % (blacklist))
}
diff --git a/meta/classes/bluetooth.bbclass b/meta/classes/bluetooth.bbclass
deleted file mode 100644
index f88b4ae5b8..0000000000
--- a/meta/classes/bluetooth.bbclass
+++ /dev/null
@@ -1,14 +0,0 @@
-# Avoid code duplication in bluetooth-dependent recipes.
-
-# Define a variable that expands to the recipe (package) providing core
-# bluetooth support on the platform:
-# "" if bluetooth is not in DISTRO_FEATURES
-# else "bluez5" if bluez5 is in DISTRO_FEATURES
-# else "bluez4"
-
-# Use this with:
-# inherit bluetooth
-# PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', '${BLUEZ}', '', d)}
-# PACKAGECONFIG[bluez4] = "--enable-bluez4,--disable-bluez4,bluez4"
-
-BLUEZ ?= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', bb.utils.contains('DISTRO_FEATURES', 'bluez5', 'bluez5', 'bluez4', d), '', d)}"
diff --git a/meta/classes/bugzilla.bbclass b/meta/classes/bugzilla.bbclass
deleted file mode 100644
index 3fc8956428..0000000000
--- a/meta/classes/bugzilla.bbclass
+++ /dev/null
@@ -1,187 +0,0 @@
-#
-# Small event handler to automatically open URLs and file
-# bug reports at a bugzilla of your choiche
-# it uses XML-RPC interface, so you must have it enabled
-#
-# Before using you must define BUGZILLA_USER, BUGZILLA_PASS credentials,
-# BUGZILLA_XMLRPC - uri of xmlrpc.cgi,
-# BUGZILLA_PRODUCT, BUGZILLA_COMPONENT - a place in BTS for build bugs
-# BUGZILLA_VERSION - version against which to report new bugs
-#
-
-def bugzilla_find_bug_report(debug_file, server, args, bugname):
- args['summary'] = bugname
- bugs = server.Bug.search(args)
- if len(bugs['bugs']) == 0:
- print >> debug_file, "Bugs not found"
- return (False,None)
- else: # silently pick the first result
- print >> debug_file, "Result of bug search is "
- print >> debug_file, bugs
- status = bugs['bugs'][0]['status']
- id = bugs['bugs'][0]['id']
- return (not status in ["CLOSED", "RESOLVED", "VERIFIED"],id)
-
-def bugzilla_file_bug(debug_file, server, args, name, text, version):
- args['summary'] = name
- args['comment'] = text
- args['version'] = version
- args['op_sys'] = 'Linux'
- args['platform'] = 'Other'
- args['severity'] = 'normal'
- args['priority'] = 'Normal'
- try:
- return server.Bug.create(args)['id']
- except Exception, e:
- print >> debug_file, repr(e)
- return None
-
-def bugzilla_reopen_bug(debug_file, server, args, bug_number):
- args['ids'] = [bug_number]
- args['status'] = "CONFIRMED"
- try:
- server.Bug.update(args)
- return True
- except Exception, e:
- print >> debug_file, repr(e)
- return False
-
-def bugzilla_create_attachment(debug_file, server, args, bug_number, text, file_name, log, logdescription):
- args['ids'] = [bug_number]
- args['file_name'] = file_name
- args['summary'] = logdescription
- args['content_type'] = "text/plain"
- args['data'] = log
- args['comment'] = text
- try:
- server.Bug.add_attachment(args)
- return True
- except Exception, e:
- print >> debug_file, repr(e)
- return False
-
-def bugzilla_add_comment(debug_file, server, args, bug_number, text):
- args['id'] = bug_number
- args['comment'] = text
- try:
- server.Bug.add_comment(args)
- return True
- except Exception, e:
- print >> debug_file, repr(e)
- return False
-
-addhandler bugzilla_eventhandler
-bugzilla_eventhandler[eventmask] = "bb.event.MsgNote bb.build.TaskFailed"
-python bugzilla_eventhandler() {
- import glob
- import xmlrpclib, httplib
-
- class ProxiedTransport(xmlrpclib.Transport):
- def __init__(self, proxy, use_datetime = 0):
- xmlrpclib.Transport.__init__(self, use_datetime)
- self.proxy = proxy
- self.user = None
- self.password = None
-
- def set_user(self, user):
- self.user = user
-
- def set_password(self, password):
- self.password = password
-
- def make_connection(self, host):
- self.realhost = host
- return httplib.HTTP(self.proxy)
-
- def send_request(self, connection, handler, request_body):
- connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))
- if self.user != None:
- if self.password != None:
- auth = "%s:%s" % (self.user, self.password)
- else:
- auth = self.user
- connection.putheader("Proxy-authorization", "Basic " + base64.encodestring(auth))
-
- event = e
- data = e.data
- name = bb.event.getName(event)
- if name == "MsgNote":
- # avoid recursion
- return
-
- if name == "TaskFailed":
- xmlrpc = data.getVar("BUGZILLA_XMLRPC", True)
- user = data.getVar("BUGZILLA_USER", True)
- passw = data.getVar("BUGZILLA_PASS", True)
- product = data.getVar("BUGZILLA_PRODUCT", True)
- compon = data.getVar("BUGZILLA_COMPONENT", True)
- version = data.getVar("BUGZILLA_VERSION", True)
-
- proxy = data.getVar('http_proxy', True )
- if (proxy):
- import urllib2
- s, u, p, hostport = urllib2._parse_proxy(proxy)
- transport = ProxiedTransport(hostport)
- else:
- transport = None
-
- server = xmlrpclib.ServerProxy(xmlrpc, transport=transport, verbose=0)
- args = {
- 'Bugzilla_login': user,
- 'Bugzilla_password': passw,
- 'product': product,
- 'component': compon}
-
- # evil hack to figure out what is going on
- debug_file = open(os.path.join(data.getVar("TMPDIR", True),"..","bugzilla-log"),"a")
-
- file = None
- bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN", True),
- "pv" : data.getVar("PV", True),
- }
- log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
- text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN", True), data.getVar('DATETIME', True), data.getVar( 'MACHINE', True ) )
- if len(log_file) != 0:
- print >> debug_file, "Adding log file %s" % log_file[0]
- file = open(log_file[0], 'r')
- log = file.read()
- file.close();
- else:
- print >> debug_file, "No log file found for the glob"
- log = None
-
- (bug_open, bug_number) = bugzilla_find_bug_report(debug_file, server, args.copy(), bugname)
- print >> debug_file, "Bug is open: %s and bug number: %s" % (bug_open, bug_number)
-
- # The bug is present and still open, attach an error log
- if not bug_number:
- bug_number = bugzilla_file_bug(debug_file, server, args.copy(), bugname, text, version)
- if not bug_number:
- print >> debug_file, "Couldn't acquire a new bug_numer, filing a bugreport failed"
- else:
- print >> debug_file, "The new bug_number: '%s'" % bug_number
- elif not bug_open:
- if not bugzilla_reopen_bug(debug_file, server, args.copy(), bug_number):
- print >> debug_file, "Failed to reopen the bug #%s" % bug_number
- else:
- print >> debug_file, "Reopened the bug #%s" % bug_number
-
- if bug_number and log:
- print >> debug_file, "The bug is known as '%s'" % bug_number
- desc = "Build log for machine %s" % (data.getVar('MACHINE', True))
- if not bugzilla_create_attachment(debug_file, server, args.copy(), bug_number, text, log_file[0], log, desc):
- print >> debug_file, "Failed to attach the build log for bug #%s" % bug_number
- else:
- print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
- else:
- print >> debug_file, "Not trying to create an attachment for bug #%s" % bug_number
- if not bugzilla_add_comment(debug_file, server, args.copy(), bug_number, text, ):
- print >> debug_file, "Failed to create a comment the build log for bug #%s" % bug_number
- else:
- print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
-
- # store bug number for oestats-client
- if bug_number:
- data.setVar('OESTATS_BUG_NUMBER', bug_number)
-}
-
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
index 6e5de0ef69..affdf272d7 100644
--- a/meta/classes/buildhistory.bbclass
+++ b/meta/classes/buildhistory.bbclass
@@ -37,7 +37,7 @@ BUILDHISTORY_OLD_DIR_PACKAGE = "${BUILDHISTORY_OLD_DIR}/packages/${MULTIMACH_TAR
BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}${SDK_EXT}/${IMAGE_BASENAME}"
BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group"
BUILDHISTORY_SDK_FILES ?= "conf/local.conf conf/bblayers.conf conf/auto.conf conf/locked-sigs.inc conf/devtool.conf"
-BUILDHISTORY_COMMIT ?= "0"
+BUILDHISTORY_COMMIT ?= "1"
BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
BUILDHISTORY_PUSH_REPO ?= ""
@@ -47,6 +47,11 @@ sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
# then the value added to SSTATEPOSTINSTFUNCS:
SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
+# Similarly for our function that gets the output signatures
+SSTATEPOSTUNPACKFUNCS_append = " buildhistory_emit_outputsigs"
+sstate_installpkgdir[vardepsexclude] += "buildhistory_emit_outputsigs"
+SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs"
+
# All items excepts those listed here will be removed from a recipe's
# build history directory by buildhistory_emit_pkghistory(). This is
# necessary because some of these items (package directories, files that
@@ -55,24 +60,47 @@ SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
# When extending build history, derive your class from buildhistory.bbclass
# and extend this list here with the additional files created by the derived
# class.
-BUILDHISTORY_PRESERVE = "latest latest_srcrev"
+BUILDHISTORY_PRESERVE = "latest latest_srcrev sysroot"
+
+PATCH_GIT_USER_EMAIL ?= "buildhistory@oe"
+PATCH_GIT_USER_NAME ?= "OpenEmbedded"
+
+#
+# Write out the contents of the sysroot
+#
+buildhistory_emit_sysroot() {
+ mkdir --parents ${BUILDHISTORY_DIR_PACKAGE}
+ case ${CLASSOVERRIDE} in
+ class-native|class-cross|class-crosssdk)
+ BASE=${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}
+ ;;
+ *)
+ BASE=${SYSROOT_DESTDIR}
+ ;;
+ esac
+ buildhistory_list_files_no_owners $BASE ${BUILDHISTORY_DIR_PACKAGE}/sysroot
+}
#
# Write out metadata about this package for comparison when writing future packages
#
python buildhistory_emit_pkghistory() {
- if not d.getVar('BB_CURRENTTASK', True) in ['packagedata', 'packagedata_setscene']:
+ if d.getVar('BB_CURRENTTASK') in ['populate_sysroot', 'populate_sysroot_setscene']:
+ bb.build.exec_func("buildhistory_emit_sysroot", d)
+
+ if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
return 0
- if not "package" in (d.getVar('BUILDHISTORY_FEATURES', True) or "").split():
+ if not "package" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
return 0
import re
import json
+ import shlex
import errno
- pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
- oldpkghistdir = d.getVar('BUILDHISTORY_OLD_DIR_PACKAGE', True)
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
+ oldpkghistdir = d.getVar('BUILDHISTORY_OLD_DIR_PACKAGE')
class RecipeInfo:
def __init__(self, name):
@@ -83,6 +111,7 @@ python buildhistory_emit_pkghistory() {
self.depends = ""
self.packages = ""
self.srcrev = ""
+ self.layer = ""
class PackageInfo:
@@ -179,12 +208,13 @@ python buildhistory_emit_pkghistory() {
items.sort()
return ' '.join(items)
- pn = d.getVar('PN', True)
- pe = d.getVar('PE', True) or "0"
- pv = d.getVar('PV', True)
- pr = d.getVar('PR', True)
+ pn = d.getVar('PN')
+ pe = d.getVar('PE') or "0"
+ pv = d.getVar('PV')
+ pr = d.getVar('PR')
+ layer = bb.utils.get_file_layer(d.getVar('FILE'), d)
- pkgdata_dir = d.getVar('PKGDATA_DIR', True)
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
packages = ""
try:
with open(os.path.join(pkgdata_dir, pn)) as f:
@@ -200,7 +230,7 @@ python buildhistory_emit_pkghistory() {
raise
packagelist = packages.split()
- preserve = d.getVar('BUILDHISTORY_PRESERVE', True).split()
+ preserve = d.getVar('BUILDHISTORY_PRESERVE').split()
if not os.path.exists(pkghistdir):
bb.utils.mkdirhier(pkghistdir)
else:
@@ -220,11 +250,12 @@ python buildhistory_emit_pkghistory() {
rcpinfo.pe = pe
rcpinfo.pv = pv
rcpinfo.pr = pr
- rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS', True) or ""))
+ rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS') or ""))
rcpinfo.packages = packages
+ rcpinfo.layer = layer
write_recipehistory(rcpinfo, d)
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
for pkg in packagelist:
pkgdata = {}
with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
@@ -233,7 +264,7 @@ python buildhistory_emit_pkghistory() {
key = item[0]
if key.endswith('_' + pkg):
key = key[:-len(pkg)-1]
- pkgdata[key] = item[1]
+ pkgdata[key] = item[1].encode('latin-1').decode('unicode_escape')
pkge = pkgdata.get('PKGE', '0')
pkgv = pkgdata['PKGV']
@@ -249,7 +280,7 @@ python buildhistory_emit_pkghistory() {
last_pkgr = lastversion.pkgr
r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
if r < 0:
- msg = "Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
+ msg = "Package version for package %s went backwards which would break package feeds (from %s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
package_qa_handle_error("version-going-backwards", msg, d)
pkginfo = PackageInfo(pkg)
@@ -276,7 +307,7 @@ python buildhistory_emit_pkghistory() {
dictval = json.loads(val)
filelist = list(dictval.keys())
filelist.sort()
- pkginfo.filelist = " ".join(filelist)
+ pkginfo.filelist = " ".join([shlex.quote(x) for x in filelist])
pkginfo.size = int(pkgdata['PKGSIZE'])
@@ -286,11 +317,46 @@ python buildhistory_emit_pkghistory() {
bb.build.exec_func("buildhistory_list_pkg_files", d)
}
+python buildhistory_emit_outputsigs() {
+ if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
+ return
+
+ import hashlib
+
+ taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task', 'output')
+ bb.utils.mkdirhier(taskoutdir)
+ currenttask = d.getVar('BB_CURRENTTASK')
+ pn = d.getVar('PN')
+ taskfile = os.path.join(taskoutdir, '%s.%s' % (pn, currenttask))
+
+ cwd = os.getcwd()
+ filesigs = {}
+ for root, _, files in os.walk(cwd):
+ for fname in files:
+ if fname == 'fixmepath':
+ continue
+ fullpath = os.path.join(root, fname)
+ try:
+ if os.path.islink(fullpath):
+ sha256 = hashlib.sha256(os.readlink(fullpath).encode('utf-8')).hexdigest()
+ elif os.path.isfile(fullpath):
+ sha256 = bb.utils.sha256_file(fullpath)
+ else:
+ continue
+ except OSError:
+ bb.warn('buildhistory: unable to read %s to get output signature' % fullpath)
+ continue
+ filesigs[os.path.relpath(fullpath, cwd)] = sha256
+ with open(taskfile, 'w') as f:
+ for fpath, fsig in sorted(filesigs.items(), key=lambda item: item[0]):
+ f.write('%s %s\n' % (fpath, fsig))
+}
+
def write_recipehistory(rcpinfo, d):
bb.debug(2, "Writing recipe history")
- pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
infofile = os.path.join(pkghistdir, "latest")
with open(infofile, "w") as f:
@@ -300,12 +366,14 @@ def write_recipehistory(rcpinfo, d):
f.write(u"PR = %s\n" % rcpinfo.pr)
f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
+ f.write(u"LAYER = %s\n" % rcpinfo.layer)
+ write_latest_srcrev(d, pkghistdir)
def write_pkghistory(pkginfo, d):
bb.debug(2, "Writing package history for package %s" % pkginfo.name)
- pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
pkgpath = os.path.join(pkghistdir, pkginfo.name)
if not os.path.exists(pkgpath):
@@ -366,7 +434,7 @@ def buildhistory_list_installed(d, rootfs_type="image"):
pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target")
for output_type, output_file in process_list:
- output_file_full = os.path.join(d.getVar('WORKDIR', True), output_file)
+ output_file_full = os.path.join(d.getVar('WORKDIR'), output_file)
with open(output_file_full, 'w') as output:
output.write(format_pkg_list(pkgs, output_type))
@@ -399,19 +467,26 @@ buildhistory_get_installed() {
# Produce dependency graph
# First, quote each name to handle characters that cause issues for dot
- sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp && \
+ sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp &&
rm ${WORKDIR}/bh_installed_pkgs_deps.txt
- # Change delimiter from pipe to -> and set style for recommend lines
- sed -i -e 's:|: -> :' -e 's:"\[REC\]":[style=dotted]:' -e 's:$:;:' $1/depends.tmp
+ # Remove lines with rpmlib(...) and config(...) dependencies, change the
+ # delimiter from pipe to "->", set the style for recommend lines and
+ # turn versioned dependencies into edge labels.
+ sed -i -e '/rpmlib(/d' \
+ -e '/config(/d' \
+ -e 's:|: -> :' \
+ -e 's:"\[REC\]":[style=dotted]:' \
+ -e 's:"\([<>=]\+\)" "\([^"]*\)":[label="\1 \2"]:' \
+ $1/depends.tmp
# Add header, sorted and de-duped contents and footer and then delete the temp file
printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
- cat $1/depends.tmp | sort | uniq >> $1/depends.dot
+ cat $1/depends.tmp | sort -u >> $1/depends.dot
echo "}" >> $1/depends.dot
rm $1/depends.tmp
# Produce installed package sizes list
oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
- cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB " $1}' | sort -n -r > $1/installed-package-sizes.txt
+ cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB\t" $1}' | sort -n -r > $1/installed-package-sizes.txt
rm $1/installed-package-sizes.tmp
# We're now done with the cache, delete it
@@ -463,12 +538,28 @@ buildhistory_get_sdk_installed_target() {
buildhistory_list_files() {
# List the files in the specified directory, but exclude date/time etc.
- # This awk script is somewhat messy, but handles where the size is not printed for device files under pseudo
+ # This is somewhat messy, but handles where the size is not printed for device files under pseudo
+ ( cd $1
+ find_cmd='find . ! -path . -printf "%M %-10u %-10g %10s %p -> %l\n"'
if [ "$3" = "fakeroot" ] ; then
- ( cd $1 && ${FAKEROOTENV} ${FAKEROOTCMD} find . ! -path . -printf "%M %-10u %-10g %10s %p -> %l\n" | sort -k5 | sed 's/ * -> $//' > $2 )
+ eval ${FAKEROOTENV} ${FAKEROOTCMD} $find_cmd
else
- ( cd $1 && find . ! -path . -printf "%M %-10u %-10g %10s %p -> %l\n" | sort -k5 | sed 's/ * -> $//' > $2 )
- fi
+ eval $find_cmd
+ fi | sort -k5 | sed 's/ * -> $//' > $2 )
+}
+
+buildhistory_list_files_no_owners() {
+ # List the files in the specified directory, but exclude date/time etc.
+ # Also don't output the ownership data, but instead output just - - so
+ # that the same parsing code as for _list_files works.
+ # This is somewhat messy, but handles where the size is not printed for device files under pseudo
+ ( cd $1
+ find_cmd='find . ! -path . -printf "%M - - %10s %p -> %l\n"'
+ if [ "$3" = "fakeroot" ] ; then
+ eval ${FAKEROOTENV} ${FAKEROOTCMD} "$find_cmd"
+ else
+ eval "$find_cmd"
+ fi | sort -k5 | sed 's/ * -> $//' > $2 )
}
buildhistory_list_pkg_files() {
@@ -546,34 +637,33 @@ END
python buildhistory_get_extra_sdkinfo() {
import operator
- import math
- if d.getVar('BB_CURRENTTASK', True) == 'populate_sdk_ext':
- tasksizes = {}
- filesizes = {}
- for root, _, files in os.walk(d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')):
- for fn in files:
- if fn.endswith('.tgz'):
- fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
- task = fn.rsplit(':', 1)[1].split('_', 1)[1].split('.')[0]
- origtotal = tasksizes.get(task, 0)
- tasksizes[task] = origtotal + fsize
- filesizes[fn] = fsize
+ from oe.sdk import get_extra_sdkinfo
+
+ sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
+ extra_info = get_extra_sdkinfo(sstate_dir)
+
+ if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext' and \
+ "sdk" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-package-sizes.txt'), 'w') as f:
- filesizes_sorted = sorted(filesizes.items(), key=operator.itemgetter(1, 0), reverse=True)
+ filesizes_sorted = sorted(extra_info['filesizes'].items(), key=operator.itemgetter(1, 0), reverse=True)
for fn, size in filesizes_sorted:
f.write('%10d KiB %s\n' % (size, fn))
with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-task-sizes.txt'), 'w') as f:
- tasksizes_sorted = sorted(tasksizes.items(), key=operator.itemgetter(1, 0), reverse=True)
+ tasksizes_sorted = sorted(extra_info['tasksizes'].items(), key=operator.itemgetter(1, 0), reverse=True)
for task, size in tasksizes_sorted:
f.write('%10d KiB %s\n' % (size, task))
}
# By using ROOTFS_POSTUNINSTALL_COMMAND we get in after uninstallation of
# unneeded packages but before the removal of packaging files
-ROOTFS_POSTUNINSTALL_COMMAND += " buildhistory_list_installed_image ;\
- buildhistory_get_image_installed ; "
+ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_list_installed_image ;"
+ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_get_image_installed ;"
+ROOTFS_POSTUNINSTALL_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_image ;| buildhistory_get_image_installed ;"
+ROOTFS_POSTUNINSTALL_COMMAND[vardepsexclude] += "buildhistory_list_installed_image buildhistory_get_image_installed"
-IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
+IMAGE_POSTPROCESS_COMMAND += "buildhistory_get_imageinfo ;"
+IMAGE_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_imageinfo ;"
+IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo"
# We want these to be the last run so that we get called after complementary package installation
POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;"
@@ -587,11 +677,21 @@ POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_insta
SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+python buildhistory_write_sigs() {
+ if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
+ return
+
+ # Create sigs file
+ if hasattr(bb.parse.siggen, 'dump_siglist'):
+ taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task')
+ bb.utils.mkdirhier(taskoutdir)
+ bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt'))
+}
+
def buildhistory_get_build_id(d):
- if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
statuslines = []
for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
g = globals()
@@ -602,15 +702,32 @@ def buildhistory_get_build_id(d):
if flines:
statuslines.extend(flines)
- statusheader = d.getVar('BUILDCFG_HEADER', True)
+ statusheader = d.getVar('BUILDCFG_HEADER')
return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
+def buildhistory_get_modified(path):
+ # copied from get_layer_git_status() in image-buildinfo.bbclass
+ import subprocess
+ try:
+ subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
+ git diff --quiet --no-ext-diff
+ git diff --quiet --no-ext-diff --cached""" % path,
+ shell=True,
+ stderr=subprocess.STDOUT)
+ return ""
+ except subprocess.CalledProcessError as ex:
+ # Silently treat errors as "modified", without checking for the
+ # (expected) return code 1 in a modified git repo. For example, we get
+ # output and a 129 return code when a layer isn't a git repo at all.
+ return " -- modified"
+
def buildhistory_get_metadata_revs(d):
# We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want
- layers = (d.getVar("BBLAYERS", True) or "").split()
- medadata_revs = ["%-17s = %s:%s" % (os.path.basename(i), \
+ layers = (d.getVar("BBLAYERS") or "").split()
+ medadata_revs = ["%-17s = %s:%s%s" % (os.path.basename(i), \
base_get_metadata_git_branch(i, None).strip(), \
- base_get_metadata_git_revision(i, None)) \
+ base_get_metadata_git_revision(i, None), \
+ buildhistory_get_modified(i)) \
for i in layers]
return '\n'.join(medadata_revs)
@@ -619,7 +736,7 @@ def outputvars(vars, listvars, d):
listvars = listvars.split()
ret = ""
for var in vars:
- value = d.getVar(var, True) or ""
+ value = d.getVar(var) or ""
if var in listvars:
# Squash out spaces
value = oe.utils.squashspaces(value)
@@ -627,17 +744,17 @@ def outputvars(vars, listvars, d):
return ret.rstrip('\n')
def buildhistory_get_imagevars(d):
- if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND"
listvars = "USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
return outputvars(imagevars, listvars, d)
def buildhistory_get_sdkvars(d):
- if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
- if d.getVar('BB_CURRENTTASK', True) == 'populate_sdk_ext':
+ if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext':
# Extensible SDK uses some additional variables
sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST"
@@ -645,20 +762,23 @@ def buildhistory_get_sdkvars(d):
def buildhistory_get_cmdline(d):
- if sys.argv[0].endswith('bin/bitbake'):
- bincmd = 'bitbake'
- else:
- bincmd = sys.argv[0]
- return '%s %s' % (bincmd, ' '.join(sys.argv[1:]))
+ argv = d.getVar('BB_CMDLINE', False)
+ if argv:
+ if argv[0].endswith('bin/bitbake'):
+ bincmd = 'bitbake'
+ else:
+ bincmd = argv[0]
+ return '%s %s' % (bincmd, ' '.join(argv[1:]))
+ return ''
buildhistory_single_commit() {
if [ "$3" = "" ] ; then
commitopts="${BUILDHISTORY_DIR}/ --allow-empty"
- item="No changes"
+ shortlogprefix="No changes: "
else
- commitopts="$3 metadata-revs"
- item="$3"
+ commitopts=""
+ shortlogprefix=""
fi
if [ "${BUILDHISTORY_BUILD_FAILURES}" = "0" ] ; then
result="succeeded"
@@ -675,7 +795,7 @@ buildhistory_single_commit() {
esac
commitmsgfile=`mktemp`
cat > $commitmsgfile << END
-$item: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $2
+${shortlogprefix}Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $2
cmd: $1
@@ -708,15 +828,9 @@ END
git tag -f build-minus-2 build-minus-1 > /dev/null 2>&1 || true
git tag -f build-minus-1 > /dev/null 2>&1 || true
fi
- # If the user hasn't set up their name/email, set some defaults
- # just for this repo (otherwise the commit will fail with older
- # versions of git)
- if ! git config user.email > /dev/null ; then
- git config --local user.email "buildhistory@${DISTRO}"
- fi
- if ! git config user.name > /dev/null ; then
- git config --local user.name "buildhistory"
- fi
+
+ check_git_config
+
# Check if there are new/changed files to commit (other than metadata-revs)
repostatus=`git status --porcelain | grep -v " metadata-revs$"`
HOSTNAME=`hostname 2>/dev/null || echo unknown`
@@ -725,9 +839,7 @@ END
git add -A .
# porcelain output looks like "?? packages/foo/bar"
# Ensure we commit metadata-revs with the first commit
- for entry in `echo "$repostatus" | awk '{print $2}' | awk -F/ '{print $1}' | sort | uniq` ; do
- buildhistory_single_commit "$CMDLINE" "$HOSTNAME" "$entry"
- done
+ buildhistory_single_commit "$CMDLINE" "$HOSTNAME" dummy
git gc --auto --quiet
else
buildhistory_single_commit "$CMDLINE" "$HOSTNAME"
@@ -738,16 +850,16 @@ END
}
python buildhistory_eventhandler() {
- if e.data.getVar('BUILDHISTORY_FEATURES', True).strip():
- reset = e.data.getVar("BUILDHISTORY_RESET", True)
- olddir = e.data.getVar("BUILDHISTORY_OLD_DIR", True)
+ if e.data.getVar('BUILDHISTORY_FEATURES').strip():
+ reset = e.data.getVar("BUILDHISTORY_RESET")
+ olddir = e.data.getVar("BUILDHISTORY_OLD_DIR")
if isinstance(e, bb.event.BuildStarted):
if reset:
import shutil
# Clean up after potentially interrupted build.
if os.path.isdir(olddir):
shutil.rmtree(olddir)
- rootdir = e.data.getVar("BUILDHISTORY_DIR", True)
+ rootdir = e.data.getVar("BUILDHISTORY_DIR")
entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
bb.utils.mkdirhier(olddir)
for entry in entries:
@@ -757,13 +869,20 @@ python buildhistory_eventhandler() {
if reset:
import shutil
shutil.rmtree(olddir)
- if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
+ if e.data.getVar("BUILDHISTORY_COMMIT") == "1":
bb.note("Writing buildhistory")
+ bb.build.exec_func("buildhistory_write_sigs", d)
+ import time
+ start=time.time()
localdata = bb.data.createCopy(e.data)
localdata.setVar('BUILDHISTORY_BUILD_FAILURES', str(e._failures))
interrupted = getattr(e, '_interrupted', 0)
localdata.setVar('BUILDHISTORY_BUILD_INTERRUPTED', str(interrupted))
bb.build.exec_func("buildhistory_commit", localdata)
+ stop=time.time()
+ bb.note("Writing buildhistory took: %s seconds" % round(stop-start))
+ else:
+ bb.note("No commit since BUILDHISTORY_COMMIT != '1'")
}
addhandler buildhistory_eventhandler
@@ -777,7 +896,7 @@ def _get_srcrev_values(d):
"""
scms = []
- fetcher = bb.fetch.Fetch(d.getVar('SRC_URI', True).split(), d)
+ fetcher = bb.fetch.Fetch(d.getVar('SRC_URI').split(), d)
urldata = fetcher.ud
for u in urldata:
if urldata[u].method.supports_srcrev():
@@ -809,7 +928,10 @@ def _get_srcrev_values(d):
do_fetch[postfuncs] += "write_srcrev"
do_fetch[vardepsexclude] += "write_srcrev"
python write_srcrev() {
- pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+ write_latest_srcrev(d, d.getVar('BUILDHISTORY_DIR_PACKAGE'))
+}
+
+def write_latest_srcrev(d, pkghistdir):
srcrevfile = os.path.join(pkghistdir, 'latest_srcrev')
srcrevs, tag_srcrevs = _get_srcrev_values(d)
@@ -830,21 +952,50 @@ python write_srcrev() {
if orig_srcrev != 'INVALID':
f.write('# SRCREV = "%s"\n' % orig_srcrev)
if len(srcrevs) > 1:
- for name, srcrev in srcrevs.items():
+ for name, srcrev in sorted(srcrevs.items()):
orig_srcrev = d.getVar('SRCREV_%s' % name, False)
if orig_srcrev:
f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
else:
- f.write('SRCREV = "%s"\n' % srcrevs.values())
+ f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values())))
if len(tag_srcrevs) > 0:
- for name, srcrev in tag_srcrevs.items():
+ for name, srcrev in sorted(tag_srcrevs.items()):
f.write('# tag_%s = "%s"\n' % (name, srcrev))
if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
- pkg = d.getVar('PN', True)
+ pkg = d.getVar('PN')
bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
else:
if os.path.exists(srcrevfile):
os.remove(srcrevfile)
+
+do_testimage[postfuncs] += "write_ptest_result"
+do_testimage[vardepsexclude] += "write_ptest_result"
+
+python write_ptest_result() {
+ write_latest_ptest_result(d, d.getVar('BUILDHISTORY_DIR'))
}
+
+def write_latest_ptest_result(d, histdir):
+ import glob
+ import subprocess
+ test_log_dir = d.getVar('TEST_LOG_DIR')
+ input_ptest = os.path.join(test_log_dir, 'ptest_log')
+ output_ptest = os.path.join(histdir, 'ptest')
+ if os.path.exists(input_ptest):
+ try:
+ # Lock it avoid race issue
+ lock = bb.utils.lockfile(output_ptest + "/ptest.lock")
+ bb.utils.mkdirhier(output_ptest)
+ oe.path.copytree(input_ptest, output_ptest)
+ # Sort test result
+ for result in glob.glob('%s/pass.fail.*' % output_ptest):
+ bb.debug(1, 'Processing %s' % result)
+ cmd = ['sort', result, '-o', result]
+ bb.debug(1, 'Running %s' % cmd)
+ ret = subprocess.call(cmd)
+ if ret != 0:
+ bb.error('Failed to run %s!' % cmd)
+ finally:
+ bb.utils.unlockfile(lock)
diff --git a/meta/classes/buildstats-summary.bbclass b/meta/classes/buildstats-summary.bbclass
index b86abcc3f1..f9b241b6c5 100644
--- a/meta/classes/buildstats-summary.bbclass
+++ b/meta/classes/buildstats-summary.bbclass
@@ -7,7 +7,7 @@ python buildstats_summary () {
if not os.path.exists(bsdir):
return
- sstatetasks = (e.data.getVar('SSTATETASKS', True) or '').split()
+ sstatetasks = (e.data.getVar('SSTATETASKS') or '').split()
built = collections.defaultdict(lambda: [set(), set()])
for pf in os.listdir(bsdir):
taskdir = os.path.join(bsdir, pf)
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
index 34ecb03861..2590c60c63 100644
--- a/meta/classes/buildstats.bbclass
+++ b/meta/classes/buildstats.bbclass
@@ -31,6 +31,11 @@ def get_process_cputime(pid):
i = f.readline().strip()
if not i:
break
+ if not ":" in i:
+ # one more extra line is appended (empty or containing "0")
+ # most probably due to race condition in kernel while
+ # updating IO stats
+ break
i = i.split(": ")
iostats[i[0]] = i[1]
resources = resource.getrusage(resource.RUSAGE_SELF)
@@ -75,13 +80,13 @@ def get_buildtimedata(var, d):
return timediff, cpuperc
def write_task_data(status, logfile, e, d):
- bn = d.getVar('BUILDNAME', True)
- bsdir = os.path.join(d.getVar('BUILDSTATS_BASE', True), bn)
+ bn = d.getVar('BUILDNAME')
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
with open(os.path.join(logfile), "a") as f:
elapsedtime = get_timedata("__timedata_task", d, e.time)
if elapsedtime:
- f.write(d.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
- (e.task, elapsedtime)))
+ f.write(d.expand("${PF}: %s\n" % e.task))
+ f.write(d.expand("Elapsed time: %0.2f seconds\n" % elapsedtime))
cpu, iostats, resources, childres = get_process_cputime(os.getpid())
if cpu:
f.write("utime: %s\n" % cpu['utime'])
@@ -95,7 +100,7 @@ def write_task_data(status, logfile, e, d):
f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
for i in rusages:
f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
- if status is "passed":
+ if status == "passed":
f.write("Status: PASSED \n")
else:
f.write("Status: FAILED \n")
@@ -106,9 +111,9 @@ python run_buildstats () {
import bb.event
import time, subprocess, platform
- bn = d.getVar('BUILDNAME', True)
- bsdir = os.path.join(d.getVar('BUILDSTATS_BASE', True), bn)
- taskdir = os.path.join(bsdir, d.getVar('PF', True))
+ bn = d.getVar('BUILDNAME')
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
+ taskdir = os.path.join(bsdir, d.getVar('PF'))
if isinstance(e, bb.event.BuildStarted):
########################################################################
@@ -162,9 +167,14 @@ python run_buildstats () {
if e.task == "do_rootfs":
bs = os.path.join(bsdir, "build_stats")
with open(bs, "a") as f:
- rootfs = d.getVar('IMAGE_ROOTFS', True)
- rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read()
- f.write("Uncompressed Rootfs size: %s" % rootfs_size)
+ rootfs = d.getVar('IMAGE_ROOTFS')
+ if os.path.isdir(rootfs):
+ try:
+ rootfs_size = subprocess.check_output(["du", "-sh", rootfs],
+ stderr=subprocess.STDOUT).decode('utf-8')
+ f.write("Uncompressed Rootfs size: %s" % rootfs_size)
+ except subprocess.CalledProcessError as err:
+ bb.warn("Failed to get rootfs size: %s" % err.output.decode('utf-8'))
elif isinstance(e, bb.build.TaskFailed):
# Can have a failure before TaskStarted so need to mkdir here too
@@ -183,3 +193,27 @@ python run_buildstats () {
addhandler run_buildstats
run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
+python runqueue_stats () {
+ import buildstats
+ from bb import event, runqueue
+ # We should not record any samples before the first task has started,
+ # because that's the first activity shown in the process chart.
+ # Besides, at that point we are sure that the build variables
+ # are available that we need to find the output directory.
+ # The persistent SystemStats is stored in the datastore and
+ # closed when the build is done.
+ system_stats = d.getVar('_buildstats_system_stats', False)
+ if not system_stats and isinstance(e, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted)):
+ system_stats = buildstats.SystemStats(d)
+ d.setVar('_buildstats_system_stats', system_stats)
+ if system_stats:
+ # Ensure that we sample at important events.
+ done = isinstance(e, bb.event.BuildCompleted)
+ system_stats.sample(e, force=done)
+ if done:
+ system_stats.close()
+ d.delVar('_buildstats_system_stats')
+}
+
+addhandler runqueue_stats
+runqueue_stats[eventmask] = "bb.runqueue.sceneQueueTaskStarted bb.runqueue.runQueueTaskStarted bb.event.HeartbeatEvent bb.event.BuildCompleted bb.event.MonitorDiskEvent"
diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass
index 2e9837cf07..b5457359ca 100644
--- a/meta/classes/ccache.bbclass
+++ b/meta/classes/ccache.bbclass
@@ -1,6 +1,66 @@
-CCACHE = "${@bb.utils.which(d.getVar('PATH', True), 'ccache') and 'ccache '}"
-export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_HOST_SYS}/${PN}"
-CCACHE_DISABLE[unexport] = "1"
+#
+# Usage:
+# - Enable ccache
+# Add the following line to a conffile such as conf/local.conf:
+# INHERIT += "ccache"
+#
+# - Disable ccache for a recipe
+# Add the following line to the recipe if it can't be built with ccache:
+# CCACHE_DISABLE = '1'
+#
+# - Share ccache files between different builds
+# Set CCACHE_TOP_DIR to a shared dir
+# CCACHE_TOP_DIR = /path/to/shared_ccache/
+#
+# - TO debug ccahe
+# export CCACHE_DEBUG = "1"
+# export CCACHE_LOGFILE = "${CCACHE_DIR}/logfile.log"
+# And also set PARALLEL_MAKE = "-j 1" to get make the log in order
+#
-do_configure[dirs] =+ "${CCACHE_DIR}"
-do_kernel_configme[dirs] =+ "${CCACHE_DIR}"
+# Set it to a shared location for different builds, so that cache files can
+# be shared between different builds.
+CCACHE_TOP_DIR ?= "${TMPDIR}/ccache"
+
+# ccahe removes CCACHE_BASEDIR from file path, so that hashes will be the same
+# in different builds.
+export CCACHE_BASEDIR ?= "${TMPDIR}"
+
+# Used for sharing cache files after compiler is rebuilt
+export CCACHE_COMPILERCHECK ?= "%compiler% -dumpspecs"
+
+export CCACHE_CONFIGPATH ?= "${COREBASE}/meta/conf/ccache.conf"
+
+export CCACHE_DIR ?= "${CCACHE_TOP_DIR}/${MULTIMACH_TARGET_SYS}/${PN}"
+
+# We need to stop ccache considering the current directory or the
+# debug-prefix-map target directory to be significant when calculating
+# its hash. Without this the cache would be invalidated every time
+# ${PV} or ${PR} change.
+export CCACHE_NOHASHDIR ?= "1"
+
+python() {
+ """
+ Enable ccache for the recipe
+ """
+ pn = d.getVar('PN')
+ # quilt-native doesn't need ccache since no c files
+ if not (pn in ('ccache-native', 'quilt-native') or
+ bb.utils.to_boolean(d.getVar('CCACHE_DISABLE'))):
+ d.appendVar('DEPENDS', ' ccache-native')
+ d.setVar('CCACHE', 'ccache ')
+}
+
+addtask cleanccache after do_clean
+python do_cleanccache() {
+ import shutil
+
+ ccache_dir = d.getVar('CCACHE_DIR')
+ if os.path.exists(ccache_dir):
+ bb.note("Removing %s" % ccache_dir)
+ shutil.rmtree(ccache_dir)
+ else:
+ bb.note("%s doesn't exist" % ccache_dir)
+}
+addtask cleanall after do_cleanccache
+do_cleanccache[nostamp] = "1"
diff --git a/meta/classes/ccmake.bbclass b/meta/classes/ccmake.bbclass
new file mode 100644
index 0000000000..df5134a108
--- /dev/null
+++ b/meta/classes/ccmake.bbclass
@@ -0,0 +1,97 @@
+inherit terminal
+
+python do_ccmake() {
+ import shutil
+
+ # copy current config for diffing
+ config = os.path.join(d.getVar("B"), "CMakeCache.txt")
+ if os.path.exists(config):
+ shutil.copy(config, config + ".orig")
+
+ oe_terminal(d.expand("ccmake ${OECMAKE_GENERATOR_ARGS} ${OECMAKE_SOURCEPATH} -Wno-dev"),
+ d.getVar("PN") + " - ccmake", d)
+
+ if os.path.exists(config) and os.path.exists(config + ".orig"):
+ if bb.utils.md5_file(config) != bb.utils.md5_file(config + ".orig"):
+ # the cmake class uses cmake --build, which will by default
+ # regenerate configuration, simply mark the compile step as tainted
+ # to ensure it is re-run
+ bb.note("Configuration changed, recompile will be forced")
+ bb.build.write_taint('do_compile', d)
+
+}
+do_ccmake[depends] += "cmake-native:do_populate_sysroot"
+do_ccmake[nostamp] = "1"
+do_ccmake[dirs] = "${B}"
+addtask ccmake after do_configure
+
+def cmake_parse_config_cache(path):
+ with open(path, "r") as f:
+ for i in f:
+ i = i.rstrip("\n")
+ if len(i) == 0 or i.startswith("//") or i.startswith("#"):
+ continue # empty or comment
+ key, value = i.split("=", 1)
+ key, keytype = key.split(":")
+ if keytype in ["INTERNAL", "STATIC"]:
+ continue # skip internal and static config options
+ yield key, keytype, value
+
+def cmake_diff_config_vars(a, b):
+ removed, added = [], []
+
+ for ak, akt, av in a:
+ found = False
+ for bk, bkt, bv in b:
+ if bk == ak:
+ found = True
+ if bkt != akt or bv != av: # changed
+ removed.append((ak, akt, av))
+ added.append((bk, bkt, bv))
+ break
+ # remove any missing from b
+ if not found:
+ removed.append((ak, akt, av))
+
+ # add any missing from a
+ for bk, bkt, bv in b:
+ if not any(bk == ak for ak, akt, av in a):
+ added.append((bk, bkt, bv))
+
+ return removed, added
+
+python do_ccmake_diffconfig() {
+ import shutil
+ config = os.path.join(d.getVar("B"), "CMakeCache.txt")
+ if os.path.exists(config) and os.path.exists(config + ".orig"):
+ if bb.utils.md5_file(config) != bb.utils.md5_file(config + ".orig"):
+ # scan the changed options
+ old = list(cmake_parse_config_cache(config + ".orig"))
+ new = list(cmake_parse_config_cache(config))
+ _, added = cmake_diff_config_vars(old, new)
+
+ if len(added) != 0:
+ with open(d.expand("${WORKDIR}/configuration.inc"), "w") as f:
+ f.write("EXTRA_OECMAKE += \" \\\n")
+ for k, kt, v in added:
+ escaped = v if " " not in v else "\"{0}\"".format(v)
+ f.write(" -D{0}:{1}={2} \\\n".format(k, kt, escaped))
+ f.write(" \"\n")
+ bb.plain("Configuration recipe fragment written to: {0}".format(d.expand("${WORKDIR}/configuration.inc")))
+
+ with open(d.expand("${WORKDIR}/site-file.cmake"), "w") as f:
+ for k, kt, v in added:
+ f.write("SET({0} \"{1}\" CACHE {2} \"\")\n".format(k, v, kt))
+ bb.plain("Configuration cmake fragment written to: {0}".format(d.expand("${WORKDIR}/site-file.cmake")))
+
+ # restore the original config
+ shutil.copy(config + ".orig", config)
+ else:
+ bb.plain("No configuration differences, skipping configuration fragment generation.")
+ else:
+ bb.fatal("No config files found. Did you run ccmake?")
+}
+do_ccmake_diffconfig[nostamp] = "1"
+do_ccmake_diffconfig[dirs] = "${B}"
+addtask ccmake_diffconfig
+
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
index 72e8a122e3..2870c10d51 100644
--- a/meta/classes/chrpath.bbclass
+++ b/meta/classes/chrpath.bbclass
@@ -1,7 +1,7 @@
CHRPATH_BIN ?= "chrpath"
PREPROCESS_RELOCATE_DIRS ?= ""
-def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
+def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
import subprocess as sub
p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
@@ -17,32 +17,39 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
# Throw away everything other than the rpath list
curr_rpath = out.partition("RPATH=")[2]
#bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
- rpaths = curr_rpath.split(":")
+ rpaths = curr_rpath.strip().split(":")
new_rpaths = []
modified = False
for rpath in rpaths:
# If rpath is already dynamic copy it to new_rpath and continue
if rpath.find("$ORIGIN") != -1:
- new_rpaths.append(rpath.strip())
+ new_rpaths.append(rpath)
continue
rpath = os.path.normpath(rpath)
if baseprefix not in rpath and tmpdir not in rpath:
- new_rpaths.append(rpath.strip())
+ # Skip standard search paths
+ if rpath in ['/lib', '/usr/lib', '/lib64/', '/usr/lib64']:
+ bb.warn("Skipping RPATH %s as is a standard search path for %s" % (rpath, fpath))
+ modified = True
+ continue
+ new_rpaths.append(rpath)
continue
- new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath.strip(), os.path.dirname(fpath.replace(rootdir, "/"))))
+ new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/"))))
modified = True
# if we have modified some rpaths call chrpath to update the binary
if modified:
+ if break_hardlinks:
+ bb.utils.break_hardlinks(fpath)
+
args = ":".join(new_rpaths)
#bb.note("Setting rpath for %s to %s" %(fpath, args))
p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
out, err = p.communicate()
if p.returncode != 0:
- bb.error("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN', True), p.returncode, out, err))
- raise bb.build.FuncFailed
+ bb.fatal("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN'), p.returncode, out, err))
-def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
+def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
import subprocess as sub
p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
@@ -57,18 +64,21 @@ def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
if baseprefix not in rpath:
continue
+ if break_hardlinks:
+ bb.utils.break_hardlinks(fpath)
+
newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
out, err = p.communicate()
-def process_dir (rootdir, directory, d):
+def process_dir(rootdir, directory, d, break_hardlinks = False):
import stat
rootdir = os.path.normpath(rootdir)
cmd = d.expand('${CHRPATH_BIN}')
tmpdir = os.path.normpath(d.getVar('TMPDIR', False))
baseprefix = os.path.normpath(d.expand('${base_prefix}'))
- hostos = d.getVar("HOST_OS", True)
+ hostos = d.getVar("HOST_OS")
#bb.debug("Checking %s for binaries to process" % directory)
if not os.path.exists(directory):
@@ -91,7 +101,7 @@ def process_dir (rootdir, directory, d):
continue
if os.path.isdir(fpath):
- process_dir(rootdir, fpath, d)
+ process_dir(rootdir, fpath, d, break_hardlinks = break_hardlinks)
else:
#bb.note("Testing %s for relocatability" % fpath)
@@ -104,8 +114,9 @@ def process_dir (rootdir, directory, d):
else:
# Temporarily make the file writeable so we can chrpath it
os.chmod(fpath, perms|stat.S_IRWXU)
- process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d)
-
+
+ process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = break_hardlinks)
+
if perms:
os.chmod(fpath, perms)
diff --git a/meta/classes/clutter.bbclass b/meta/classes/clutter.bbclass
index 167407dfdc..24b53a13e4 100644
--- a/meta/classes/clutter.bbclass
+++ b/meta/classes/clutter.bbclass
@@ -1,22 +1,18 @@
-
def get_minor_dir(v):
import re
- m = re.match("^([0-9]+)\.([0-9]+)", v)
+ m = re.match(r"^([0-9]+)\.([0-9]+)", v)
return "%s.%s" % (m.group(1), m.group(2))
def get_real_name(n):
import re
- m = re.match("^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
+ m = re.match(r"^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
return "%s" % (m.group(1))
VERMINOR = "${@get_minor_dir("${PV}")}"
REALNAME = "${@get_real_name("${BPN}")}"
-CLUTTER_SRC_FTP = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
-
-CLUTTER_SRC_GIT = "git://git.gnome.org/${REALNAME}"
-
-SRC_URI = "${CLUTTER_SRC_FTP}"
+SRC_URI = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
S = "${WORKDIR}/${REALNAME}-${PV}"
-inherit autotools pkgconfig gtk-doc gettext
+CLUTTERBASEBUILDCLASS ??= "autotools"
+inherit ${CLUTTERBASEBUILDCLASS} pkgconfig gtk-doc gettext
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index 7091f8ba81..8ccb1eefc7 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -1,15 +1,44 @@
# Path to the CMake file to process.
-OECMAKE_SOURCEPATH ?= "${S}"
+OECMAKE_SOURCEPATH ??= "${S}"
DEPENDS_prepend = "cmake-native "
B = "${WORKDIR}/build"
-# We need to unset CCACHE otherwise cmake gets too confused
-CCACHE = ""
-
-# C/C++ Compiler (without cpu arch/tune arguments)
-OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
-OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
+# What CMake generator to use.
+# The supported options are "Unix Makefiles" or "Ninja".
+OECMAKE_GENERATOR ?= "Ninja"
+
+python() {
+ generator = d.getVar("OECMAKE_GENERATOR")
+ if "Unix Makefiles" in generator:
+ args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
+ d.setVar("OECMAKE_GENERATOR_ARGS", args)
+ d.setVarFlag("do_compile", "progress", "percent")
+ elif "Ninja" in generator:
+ args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=ninja"
+ d.appendVar("DEPENDS", " ninja-native")
+ d.setVar("OECMAKE_GENERATOR_ARGS", args)
+ d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+")
+ else:
+ bb.fatal("Unknown CMake Generator %s" % generator)
+
+ # C/C++ Compiler (without cpu arch/tune arguments)
+ if not d.getVar('OECMAKE_C_COMPILER'):
+ cc_list = d.getVar('CC').split()
+ if cc_list[0] == 'ccache':
+ d.setVar('OECMAKE_C_COMPILER_LAUNCHER', cc_list[0])
+ d.setVar('OECMAKE_C_COMPILER', cc_list[1])
+ else:
+ d.setVar('OECMAKE_C_COMPILER', cc_list[0])
+
+ if not d.getVar('OECMAKE_CXX_COMPILER'):
+ cxx_list = d.getVar('CXX').split()
+ if cxx_list[0] == 'ccache':
+ d.setVar('OECMAKE_CXX_COMPILER_LAUNCHER', cxx_list[0])
+ d.setVar('OECMAKE_CXX_COMPILER', cxx_list[1])
+ else:
+ d.setVar('OECMAKE_CXX_COMPILER', cxx_list[0])
+}
OECMAKE_AR ?= "${AR}"
# Compiler flags
@@ -19,6 +48,11 @@ OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
+CXXFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
+CFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
+
+OECMAKE_C_COMPILER_LAUNCHER ?= ""
+OECMAKE_CXX_COMPILER_LAUNCHER ?= ""
OECMAKE_RPATH ?= ""
OECMAKE_PERLNATIVE_DIR ??= ""
@@ -29,6 +63,12 @@ OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM_class-native = "BOTH"
EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OECMAKE_BUILD_prepend_task-compile = "${PARALLEL_MAKE} "
+EXTRA_OECMAKE_BUILD_prepend_task-install = "${PARALLEL_MAKEINST} "
+
+OECMAKE_TARGET_COMPILE ?= "all"
+OECMAKE_TARGET_INSTALL ?= "install"
+
# CMake expects target architectures in the format of uname(2),
# which do not always match TARGET_ARCH, so all the necessary
# conversions should happen here.
@@ -40,13 +80,19 @@ def map_target_arch_to_uname_arch(target_arch):
return target_arch
cmake_do_generate_toolchain_file() {
+ if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
+ cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )"
+ fi
cat > ${WORKDIR}/toolchain.cmake <<EOF
# CMake system name must be something like "Linux".
# This is important for cross-compiling.
+$cmake_crosscompiling
set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
-set( CMAKE_SYSTEM_PROCESSOR ${@map_target_arch_to_uname_arch(d.getVar('TARGET_ARCH', True))} )
+set( CMAKE_SYSTEM_PROCESSOR ${@map_target_arch_to_uname_arch(d.getVar('TARGET_ARCH'))} )
set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
+set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
+set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} )
set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" )
set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
@@ -60,11 +106,12 @@ set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
# only search in the paths provided so cmake doesnt pick
# up libraries and tools from the native build machine
-set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN})
+set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${HOSTTOOLS_DIR})
set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
+set( CMAKE_PROGRAM_PATH "/" )
# Use qt.conf settings
set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
@@ -73,17 +120,26 @@ set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
# directory as rpath by default
set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
-# Use native cmake modules
+# Use RPATHs relative to build directory for reproducibility
+set( CMAKE_BUILD_RPATH_USE_ORIGIN ON )
+
+# Use our cmake modules
list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR}/cmake/Modules/")
# add for non /usr/lib libdir, e.g. /usr/lib64
set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
+# add include dir to implicit includes in case it differs from /usr/include
+list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
+list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
+
EOF
}
addtask generate_toolchain_file after do_patch before do_configure
+CONFIGURE_FILES = "CMakeLists.txt"
+
cmake_do_configure() {
if [ "${OECMAKE_BUILDPATH}" ]; then
bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
@@ -99,41 +155,43 @@ cmake_do_configure() {
# Just like autotools cmake can use a site file to cache result that need generated binaries to run
if [ -e ${WORKDIR}/site-file.cmake ] ; then
- OECMAKE_SITEFILE=" -C ${WORKDIR}/site-file.cmake"
+ oecmake_sitefile="-C ${WORKDIR}/site-file.cmake"
else
- OECMAKE_SITEFILE=""
+ oecmake_sitefile=
fi
cmake \
- ${OECMAKE_SITEFILE} \
+ ${OECMAKE_GENERATOR_ARGS} \
+ $oecmake_sitefile \
${OECMAKE_SOURCEPATH} \
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
- -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir', True), d.getVar('prefix', True))} \
- -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir', True), d.getVar('prefix', True))} \
- -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir', True), d.getVar('prefix', True))} \
+ -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
-DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
- -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir', True), d. getVar('prefix', True))} \
+ -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \
-DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
- -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir', True), d.getVar('prefix', True))} \
- -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir', True), d.getVar('prefix', True))} \
- -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir', True), d.getVar('prefix', True))} \
+ -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \
-DCMAKE_INSTALL_SO_NO_EXE=0 \
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
- -DCMAKE_VERBOSE_MAKEFILE=1 \
-DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
${EXTRA_OECMAKE} \
-Wno-dev
}
-do_compile[progress] = "percent"
+cmake_runcmake_build() {
+ bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }VERBOSE=1 cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
+ eval ${DESTDIR:+DESTDIR=${DESTDIR} }VERBOSE=1 cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
+}
+
cmake_do_compile() {
- cd ${B}
- base_do_compile VERBOSE=1
+ cmake_runcmake_build --target ${OECMAKE_TARGET_COMPILE}
}
cmake_do_install() {
- cd ${B}
- oe_runmake 'DESTDIR=${D}' install
+ DESTDIR='${D}' cmake_runcmake_build --target ${OECMAKE_TARGET_INSTALL}
}
EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
index 5834806269..c7f6723cb3 100644
--- a/meta/classes/cml1.bbclass
+++ b/meta/classes/cml1.bbclass
@@ -1,7 +1,7 @@
cml1_do_configure() {
set -e
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- oe_runmake oldconfig
+ yes '' | oe_runmake oldconfig
}
EXPORT_FUNCTIONS do_configure
@@ -26,8 +26,8 @@ python do_menuconfig() {
except OSError:
mtime = 0
- oe_terminal("${SHELL} -c \"make %s; if [ \$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND', True),
- d.getVar('PN', True ) + ' Configuration', d)
+ oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
+ d.getVar('PN') + ' Configuration', d)
# FIXME this check can be removed when the minimum bitbake version has been bumped
if hasattr(bb.build, 'write_taint'):
@@ -49,7 +49,7 @@ python do_diffconfig() {
import shutil
import subprocess
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
fragment = workdir + '/fragment.cfg'
configorig = '.config.orig'
config = '.config'
@@ -64,7 +64,8 @@ python do_diffconfig() {
if isdiff:
statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
subprocess.call(statement, shell=True)
-
+ # No need to check the exit code as we know it's going to be
+ # non-zero, but that's what we expect.
shutil.copy(configorig, config)
bb.plain("Config fragment has been dumped into:\n %s" % fragment)
diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass
index 8073c173e5..d6d11fad26 100644
--- a/meta/classes/compress_doc.bbclass
+++ b/meta/classes/compress_doc.bbclass
@@ -31,25 +31,25 @@ DOC_DECOMPRESS_CMD[xz] ?= "unxz -v"
PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives"
python package_do_compress_doc() {
- compress_mode = d.getVar('DOC_COMPRESS', True)
- compress_list = (d.getVar('DOC_COMPRESS_LIST', True) or '').split()
+ compress_mode = d.getVar('DOC_COMPRESS')
+ compress_list = (d.getVar('DOC_COMPRESS_LIST') or '').split()
if compress_mode not in compress_list:
bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list))
- dvar = d.getVar('PKGD', True)
+ dvar = d.getVar('PKGD')
compress_cmds = {}
decompress_cmds = {}
for mode in compress_list:
- compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode, True)
- decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode, True)
+ compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode)
+ decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode)
- mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir", True))
+ mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir"))
if os.path.exists(mandir):
# Decompress doc files which format is not compress_mode
decompress_doc(mandir, compress_mode, decompress_cmds)
compress_doc(mandir, compress_mode, compress_cmds)
- infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir", True))
+ infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir"))
if os.path.exists(infodir):
# Decompress doc files which format is not compress_mode
decompress_doc(infodir, compress_mode, decompress_cmds)
@@ -79,6 +79,7 @@ def _collect_hardlink(hardlink_dict, file):
return hardlink_dict
def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False):
+ import subprocess
for target in hardlink_dict:
if decompress:
compress_format = _get_compress_format(target, shell_cmds.keys())
@@ -87,7 +88,7 @@ def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False
else:
cmd = "%s -f %s" % (shell_cmds[compress_mode], target)
bb.note('compress hardlink %s' % target)
- (retval, output) = oe.utils.getstatusoutput(cmd)
+ (retval, output) = subprocess.getstatusoutput(cmd)
if retval:
bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
return
@@ -159,6 +160,7 @@ def _is_compress_doc(file, compress_format_list):
return False, ''
def compress_doc(topdir, compress_mode, compress_cmds):
+ import subprocess
hardlink_dict = {}
for root, dirs, files in os.walk(topdir):
for f in files:
@@ -176,7 +178,7 @@ def compress_doc(topdir, compress_mode, compress_cmds):
# Normal file
elif os.path.isfile(file):
cmd = "%s %s" % (compress_cmds[compress_mode], file)
- (retval, output) = oe.utils.getstatusoutput(cmd)
+ (retval, output) = subprocess.getstatusoutput(cmd)
if retval:
bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
continue
@@ -186,6 +188,7 @@ def compress_doc(topdir, compress_mode, compress_cmds):
# Decompress doc files which format is not compress_mode
def decompress_doc(topdir, compress_mode, decompress_cmds):
+ import subprocess
hardlink_dict = {}
decompress = True
for root, dirs, files in os.walk(topdir):
@@ -206,7 +209,7 @@ def decompress_doc(topdir, compress_mode, decompress_cmds):
# Normal file
elif os.path.isfile(file):
cmd = "%s %s" % (decompress_cmds[compress_format], file)
- (retval, output) = oe.utils.getstatusoutput(cmd)
+ (retval, output) = subprocess.getstatusoutput(cmd)
if retval:
bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
continue
@@ -218,18 +221,18 @@ python compress_doc_updatealternatives () {
if not bb.data.inherits_class('update-alternatives', d):
return
- mandir = d.getVar("mandir", True)
- infodir = d.getVar("infodir", True)
- compress_mode = d.getVar('DOC_COMPRESS', True)
- for pkg in (d.getVar('PACKAGES', True) or "").split():
- old_names = (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split()
+ mandir = d.getVar("mandir")
+ infodir = d.getVar("infodir")
+ compress_mode = d.getVar('DOC_COMPRESS')
+ for pkg in (d.getVar('PACKAGES') or "").split():
+ old_names = (d.getVar('ALTERNATIVE_%s' % pkg) or "").split()
new_names = []
for old_name in old_names:
- old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name, True)
- old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True) or \
- d.getVarFlag('ALTERNATIVE_TARGET', old_name, True) or \
- d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or \
- d.getVar('ALTERNATIVE_TARGET', True) or \
+ old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name)
+ old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name) or \
+ d.getVarFlag('ALTERNATIVE_TARGET', old_name) or \
+ d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
+ d.getVar('ALTERNATIVE_TARGET') or \
old_link
# Sometimes old_target is specified as relative to the link name.
old_target = os.path.join(os.path.dirname(old_link), old_target)
@@ -241,15 +244,15 @@ python compress_doc_updatealternatives () {
new_target = old_target + '.' + compress_mode
d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name)
d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link)
- if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True):
+ if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name):
d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name)
d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target)
- elif d.getVarFlag('ALTERNATIVE_TARGET', old_name, True):
+ elif d.getVarFlag('ALTERNATIVE_TARGET', old_name):
d.delVarFlag('ALTERNATIVE_TARGET', old_name)
d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target)
- elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True):
+ elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg):
d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target)
- elif d.getVar('ALTERNATIVE_TARGET', old_name, True):
+ elif d.getVar('ALTERNATIVE_TARGET'):
d.setVar('ALTERNATIVE_TARGET', new_target)
new_names.append(new_name)
diff --git a/meta/classes/copyleft_compliance.bbclass b/meta/classes/copyleft_compliance.bbclass
index 907c1836b3..eabf12ce7a 100644
--- a/meta/classes/copyleft_compliance.bbclass
+++ b/meta/classes/copyleft_compliance.bbclass
@@ -13,7 +13,7 @@ python do_prepare_copyleft_sources () {
import os.path
import shutil
- p = d.getVar('P', True)
+ p = d.getVar('P')
included, reason = copyleft_should_include(d)
if not included:
bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
@@ -21,13 +21,13 @@ python do_prepare_copyleft_sources () {
else:
bb.debug(1, 'copyleft: %s is included: %s' % (p, reason))
- sources_dir = d.getVar('COPYLEFT_SOURCES_DIR', True)
- dl_dir = d.getVar('DL_DIR', True)
- src_uri = d.getVar('SRC_URI', True).split()
+ sources_dir = d.getVar('COPYLEFT_SOURCES_DIR')
+ dl_dir = d.getVar('DL_DIR')
+ src_uri = d.getVar('SRC_URI').split()
fetch = bb.fetch2.Fetch(src_uri, d)
ud = fetch.ud
- pf = d.getVar('PF', True)
+ pf = d.getVar('PF')
dest = os.path.join(sources_dir, pf)
shutil.rmtree(dest, ignore_errors=True)
bb.utils.mkdirhier(dest)
diff --git a/meta/classes/copyleft_filter.bbclass b/meta/classes/copyleft_filter.bbclass
index 46be7f7d2f..c36bce431a 100644
--- a/meta/classes/copyleft_filter.bbclass
+++ b/meta/classes/copyleft_filter.bbclass
@@ -6,7 +6,7 @@
#
# vi:sts=4:sw=4:et
-COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL*'
+COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL* AGPL*'
COPYLEFT_LICENSE_INCLUDE[type] = 'list'
COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
@@ -47,32 +47,32 @@ def copyleft_should_include(d):
import oe.license
from fnmatch import fnmatchcase as fnmatch
- included, motive = False, 'recipe did not match anything'
-
- recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE', True)
+ recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE')
if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
- include, motive = False, 'recipe type "%s" is excluded' % recipe_type
+ included, motive = False, 'recipe type "%s" is excluded' % recipe_type
+ else:
+ included, motive = False, 'recipe did not match anything'
- include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
- exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
+ include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
+ exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
- try:
- is_included, reason = oe.license.is_included(d.getVar('LICENSE', True), include, exclude)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
- else:
- if is_included:
- if reason:
- included, motive = True, 'recipe has included licenses: %s' % ', '.join(reason)
- else:
- included, motive = False, 'recipe does not include a copyleft license'
+ try:
+ is_included, reason = oe.license.is_included(d.getVar('LICENSE'), include, exclude)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('PF'), exc))
else:
- included, motive = False, 'recipe has excluded licenses: %s' % ', '.join(reason)
+ if is_included:
+ if reason:
+ included, motive = True, 'recipe has included licenses: %s' % ', '.join(reason)
+ else:
+ included, motive = False, 'recipe does not include a copyleft license'
+ else:
+ included, motive = False, 'recipe has excluded licenses: %s' % ', '.join(reason)
- if any(fnmatch(d.getVar('PN', True), name) \
+ if any(fnmatch(d.getVar('PN'), name) \
for name in oe.data.typed_value('COPYLEFT_PN_INCLUDE', d)):
included, motive = True, 'recipe included by name'
- if any(fnmatch(d.getVar('PN', True), name) \
+ if any(fnmatch(d.getVar('PN'), name) \
for name in oe.data.typed_value('COPYLEFT_PN_EXCLUDE', d)):
included, motive = False, 'recipe excluded by name'
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
index 8431440db4..94f112c397 100644
--- a/meta/classes/core-image.bbclass
+++ b/meta/classes/core-image.bbclass
@@ -24,11 +24,16 @@
# - hwcodecs - Install hardware acceleration codecs
# - package-management - installs package management tools and preserves the package manager database
# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
+# - empty-root-password
+# - allow-empty-password
+# - post-install-logging
# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
# - doc-pkgs - documentation packages for all installed packages in the rootfs
+# - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass
# - ptest-pkgs - ptest packages for all ptest-enabled recipes
# - read-only-rootfs - tweaks an image to support read-only rootfs
+# - splash - bootup splash screen
#
FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
index 55ac052695..867edf8707 100644
--- a/meta/classes/cpan-base.bbclass
+++ b/meta/classes/cpan-base.bbclass
@@ -2,39 +2,17 @@
# cpan-base providers various perl related information needed for building
# cpan modules
#
-FILES_${PN} += "${libdir}/perl ${datadir}/perl"
+FILES_${PN} += "${libdir}/perl5 ${datadir}/perl5"
DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
-PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
-
-# Determine the staged version of perl from the perl configuration file
-# Assign vardepvalue, because otherwise signature is changed before and after
-# perl is built (from None to real version in config.sh).
-get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
-def get_perl_version(d):
- import re
- cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh')
- try:
- f = open(cfg, 'r')
- except IOError:
- return None
- l = f.readlines();
- f.close();
- r = re.compile("^version='(\d*\.\d*\.\d*)'")
- for s in l:
- m = r.match(s)
- if m:
- return m.group(1)
- return None
+inherit perl-version
def is_target(d):
if not bb.data.inherits_class('native', d):
return "yes"
return "no"
-PERLLIBDIRS = "${libdir}/perl"
-PERLLIBDIRS_class-native = "${libdir}/perl-native"
-PERLVERSION := "${@get_perl_version(d)}"
-PERLVERSION[vardepvalue] = ""
+PERLLIBDIRS = "${libdir}/perl5"
+PERLLIBDIRS_class-native = "${libdir}/perl5"
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
index 8e079e0d55..e9908ae4b8 100644
--- a/meta/classes/cpan.bbclass
+++ b/meta/classes/cpan.bbclass
@@ -10,14 +10,14 @@ EXTRA_PERLFLAGS ?= ""
export PERLCONFIGTARGET = "${@is_target(d)}"
# Env var which tells perl where the perl include files are
-export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}/CORE"
-export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
-export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
-export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
+export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}/CORE"
+export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}"
+export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
+export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
+export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
cpan_do_configure () {
- export PERL5LIB="${PERL_ARCHLIB}"
- yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor ${EXTRA_CPANFLAGS}
+ yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 PERL=$(which perl) ${EXTRA_CPANFLAGS}
# Makefile.PLs can exit with success without generating a
# Makefile, e.g. in cases of missing configure time
@@ -28,7 +28,7 @@ cpan_do_configure () {
[ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
- . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh
+ . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh
# Use find since there can be a Makefile generated for each Makefile.PL
for f in `find -name Makefile.PL`; do
f2=`echo $f | sed -e 's/.PL//'`
@@ -41,6 +41,16 @@ cpan_do_configure () {
fi
}
+do_configure_append_class-target() {
+ find . -name Makefile | xargs sed -E -i \
+ -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
+}
+
+do_configure_append_class-nativesdk() {
+ find . -name Makefile | xargs sed -E -i \
+ -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
+}
+
cpan_do_compile () {
oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
}
diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass
index fac074d610..f3fb4666ef 100644
--- a/meta/classes/cpan_build.bbclass
+++ b/meta/classes/cpan_build.bbclass
@@ -7,14 +7,15 @@ EXTRA_CPAN_BUILD_FLAGS ?= ""
# Env var which tells perl if it should use host (no) or target (yes) settings
export PERLCONFIGTARGET = "${@is_target(d)}"
-export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
-export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
+export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
+export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
+export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
export LD = "${CCLD}"
cpan_build_do_configure () {
if [ "${@is_target(d)}" = "yes" ]; then
# build for target
- . ${STAGING_LIBDIR}/perl/config.sh
+ . ${STAGING_LIBDIR}/perl5/config.sh
fi
perl Build.PL --installdirs vendor --destdir ${D} \
@@ -30,7 +31,7 @@ cpan_build_do_configure () {
}
cpan_build_do_compile () {
- perl Build verbose=1
+ perl Build --perl "${bindir}/perl" verbose=1
}
cpan_build_do_install () {
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
index 099c0daf42..f5c9f61595 100644
--- a/meta/classes/cross-canadian.bbclass
+++ b/meta/classes/cross-canadian.bbclass
@@ -8,6 +8,8 @@
# SDK packages are built either explicitly by the user,
# or indirectly via dependency. No need to be in 'world'.
EXCLUDE_FROM_WORLD = "1"
+NATIVESDKLIBC ?= "libc-glibc"
+LIBCOVERRIDE = ":${NATIVESDKLIBC}"
CLASSOVERRIDE = "class-cross-canadian"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
@@ -15,32 +17,30 @@ STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${S
# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
#
PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
-BASECANADIANEXTRAOS ?= "linux-uclibc linux-musl"
+BASECANADIANEXTRAOS ?= "linux-musl"
CANADIANEXTRAOS = "${BASECANADIANEXTRAOS}"
CANADIANEXTRAVENDOR = ""
MODIFYTOS ??= "1"
python () {
- archs = d.getVar('PACKAGE_ARCHS', True).split()
+ archs = d.getVar('PACKAGE_ARCHS').split()
sdkarchs = []
for arch in archs:
sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
# Allow the following code segment to be disabled, e.g. meta-environment
- if d.getVar("MODIFYTOS", True) != "1":
+ if d.getVar("MODIFYTOS") != "1":
return
- if d.getVar("TCLIBC", True) == "baremetal":
+ if d.getVar("TCLIBC") in [ 'baremetal', 'newlib' ]:
return
- tos = d.getVar("TARGET_OS", True)
+ tos = d.getVar("TARGET_OS")
whitelist = []
extralibcs = [""]
- if "uclibc" in d.getVar("BASECANADIANEXTRAOS", True):
- extralibcs.append("uclibc")
- if "musl" in d.getVar("BASECANADIANEXTRAOS", True):
+ if "musl" in d.getVar("BASECANADIANEXTRAOS"):
extralibcs.append("musl")
- for variant in ["", "spe", "x32", "eabi", "n32"]:
+ for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]:
for libc in extralibcs:
entry = "linux"
if variant and libc:
@@ -51,45 +51,45 @@ python () {
entry = entry + "-" + libc
whitelist.append(entry)
if tos not in whitelist:
- bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS", True))
+ bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS"))
for n in ["PROVIDES", "DEPENDS"]:
- d.setVar(n, d.getVar(n, True))
- d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN", True))
+ d.setVar(n, d.getVar(n))
+ d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN"))
for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]:
n = prefix + "_FOR_TARGET"
- d.setVar(n, d.getVar(n, True))
+ d.setVar(n, d.getVar(n))
# This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS
# however we need the old value in some variables. We expand those here first.
- tarch = d.getVar("TARGET_ARCH", True)
+ tarch = d.getVar("TARGET_ARCH")
if tarch == "x86_64":
d.setVar("LIBCEXTENSION", "")
d.setVar("ABIEXTENSION", "")
d.appendVar("CANADIANEXTRAOS", " linux-gnux32")
- for extraos in d.getVar("BASECANADIANEXTRAOS", True).split():
+ for extraos in d.getVar("BASECANADIANEXTRAOS").split():
d.appendVar("CANADIANEXTRAOS", " " + extraos + "x32")
elif tarch == "powerpc":
# PowerPC can build "linux" and "linux-gnuspe"
d.setVar("LIBCEXTENSION", "")
d.setVar("ABIEXTENSION", "")
d.appendVar("CANADIANEXTRAOS", " linux-gnuspe")
- for extraos in d.getVar("BASECANADIANEXTRAOS", True).split():
+ for extraos in d.getVar("BASECANADIANEXTRAOS").split():
d.appendVar("CANADIANEXTRAOS", " " + extraos + "spe")
elif tarch == "mips64":
d.appendVar("CANADIANEXTRAOS", " linux-gnun32")
- for extraos in d.getVar("BASECANADIANEXTRAOS", True).split():
+ for extraos in d.getVar("BASECANADIANEXTRAOS").split():
d.appendVar("CANADIANEXTRAOS", " " + extraos + "n32")
if tarch == "arm" or tarch == "armeb":
- d.appendVar("CANADIANEXTRAOS", " linux-musleabi linux-uclibceabi")
+ d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi")
d.setVar("TARGET_OS", "linux-gnueabi")
else:
d.setVar("TARGET_OS", "linux")
# Also need to handle multilib target vendors
- vendors = d.getVar("CANADIANEXTRAVENDOR", True)
+ vendors = d.getVar("CANADIANEXTRAVENDOR")
if not vendors:
vendors = all_multilib_tune_values(d, 'TARGET_VENDOR')
- origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL", True)
+ origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL")
if origvendor:
d.setVar("TARGET_VENDOR", origvendor)
if origvendor not in vendors.split():
@@ -100,9 +100,9 @@ MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
INHIBIT_DEFAULT_DEPS = "1"
-STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
+STAGING_DIR_HOST = "${RECIPE_SYSROOT}"
-TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
+TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
@@ -116,7 +116,7 @@ HOST_LD_ARCH = "${SDK_LD_ARCH}"
HOST_AS_ARCH = "${SDK_AS_ARCH}"
#assign DPKG_ARCH
-DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH', True), '')}"
+DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH'), '')}"
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
@@ -125,8 +125,6 @@ LDFLAGS = "${BUILDSDK_LDFLAGS} \
-Wl,-rpath-link,${STAGING_LIBDIR}/.. \
-Wl,-rpath,${libdir}/.. "
-DEPENDS_GETTEXT = "gettext-native nativesdk-gettext"
-
#
# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
# binaries
@@ -169,6 +167,7 @@ USE_NLS = "${SDKUSE_NLS}"
# and not any particular tune that is enabled.
TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
+PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
# If MLPREFIX is set by multilib code, shlibs
# points to the wrong place so force it
SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
index 8d1e7795aa..6dcddd6f2e 100644
--- a/meta/classes/cross.bbclass
+++ b/meta/classes/cross.bbclass
@@ -17,29 +17,40 @@ HOST_CC_ARCH = "${BUILD_CC_ARCH}"
HOST_LD_ARCH = "${BUILD_LD_ARCH}"
HOST_AS_ARCH = "${BUILD_AS_ARCH}"
-STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
+# No strip sysroot when DEBUG_BUILD is enabled
+INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
+
+export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
+
+STAGING_DIR_HOST = "${RECIPE_SYSROOT_NATIVE}"
PACKAGE_ARCH = "${BUILD_ARCH}"
-MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
+MULTIMACH_TARGET_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = ""
+TARGET_CPPFLAGS = ""
+TARGET_CFLAGS = ""
+TARGET_CXXFLAGS = ""
+TARGET_LDFLAGS = ""
+
CPPFLAGS = "${BUILD_CPPFLAGS}"
CFLAGS = "${BUILD_CFLAGS}"
CXXFLAGS = "${BUILD_CFLAGS}"
LDFLAGS = "${BUILD_LDFLAGS}"
-LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE}"
TOOLCHAIN_OPTIONS = ""
-DEPENDS_GETTEXT = "gettext-native"
+# This class encodes staging paths into its scripts data so can only be
+# reused if we manipulate the paths.
+SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
# Path mangling needed by the cross packaging
# Note that we use := here to ensure that libdir and includedir are
# target paths.
-target_base_prefix := "${base_prefix}"
+target_base_prefix := "${root_prefix}"
target_prefix := "${prefix}"
target_exec_prefix := "${exec_prefix}"
target_base_libdir = "${target_base_prefix}/${baselib}"
@@ -81,3 +92,9 @@ export STRIP = "${BUILD_STRIP}"
export NM = "${BUILD_NM}"
inherit nopackages
+
+python do_addto_recipe_sysroot () {
+ bb.build.exec_func("extend_recipe_sysroot", d)
+}
+addtask addto_recipe_sysroot after do_populate_sysroot
+do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
index 7315c38f13..c0c0bfee16 100644
--- a/meta/classes/crosssdk.bbclass
+++ b/meta/classes/crosssdk.bbclass
@@ -1,16 +1,21 @@
inherit cross
CLASSOVERRIDE = "class-crosssdk"
+NATIVESDKLIBC ?= "libc-glibc"
+LIBCOVERRIDE = ":${NATIVESDKLIBC}"
MACHINEOVERRIDES = ""
PACKAGE_ARCH = "${SDK_ARCH}"
python () {
- # set TUNE_PKGARCH to SDK_ARCH
- d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH', True))
+ # set TUNE_PKGARCH to SDK_ARCH
+ d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH'))
}
-STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
+# This class encodes staging paths into its scripts data so can only be
+# reused if we manipulate the paths.
+SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
+
TARGET_ARCH = "${SDK_ARCH}"
TARGET_VENDOR = "${SDK_VENDOR}"
TARGET_OS = "${SDK_OS}"
@@ -18,8 +23,13 @@ TARGET_PREFIX = "${SDK_PREFIX}"
TARGET_CC_ARCH = "${SDK_CC_ARCH}"
TARGET_LD_ARCH = "${SDK_LD_ARCH}"
TARGET_AS_ARCH = "${SDK_AS_ARCH}"
+TARGET_CPPFLAGS = ""
+TARGET_CFLAGS = ""
+TARGET_CXXFLAGS = ""
+TARGET_LDFLAGS = ""
TARGET_FPU = ""
+
target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index 8251ca7c97..01b3637469 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -20,11 +20,15 @@
# the only method to check against CVEs. Running this tool
# doesn't guarantee your packages are free of CVEs.
+# The product name that the CVE database uses. Defaults to BPN, but may need to
+# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
+CVE_PRODUCT ??= "${BPN}"
+CVE_VERSION ??= "${PV}"
+
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
-CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvd.db"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
-CVE_CHECK_LOCAL_DIR ?= "${WORKDIR}/cve"
-CVE_CHECK_LOCAL_FILE ?= "${CVE_CHECK_LOCAL_DIR}/cve.log"
+CVE_CHECK_LOG ?= "${T}/cve.log"
CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
@@ -33,40 +37,40 @@ CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
# Whitelist for packages (PN)
-CVE_CHECK_PN_WHITELIST = "\
- glibc-locale \
-"
+CVE_CHECK_PN_WHITELIST ?= ""
-# Whitelist for CVE and version of package
-CVE_CHECK_CVE_WHITELIST = "{\
- 'CVE-2014-2524': ('6.3',), \
-}"
+# Whitelist for CVE. If a CVE is found, then it is considered patched.
+# The value is a string containing space separated CVE values:
+#
+# CVE_CHECK_WHITELIST = 'CVE-2014-2524 CVE-2018-1234'
+#
+CVE_CHECK_WHITELIST ?= ""
python do_cve_check () {
"""
Check recipe for patched and unpatched CVEs
"""
- if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE", True)):
+ if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
patched_cves = get_patches_cves(d)
patched, unpatched = check_cves(d, patched_cves)
if patched or unpatched:
cve_data = get_cve_info(d, patched + unpatched)
cve_write_data(d, patched, unpatched, cve_data)
else:
- bb.note("Failed to update CVE database, skipping CVE check")
+ bb.note("No CVE database found, skipping CVE check")
+
}
-addtask cve_check after do_unpack before do_build
-do_cve_check[depends] = "cve-check-tool-native:do_populate_cve_db"
+addtask cve_check before do_build
+do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
"""
Delete the file used to gather all the CVE information.
"""
-
- bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE", True))
+ bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE"))
}
addhandler cve_check_cleanup
@@ -79,12 +83,17 @@ python cve_check_write_rootfs_manifest () {
import shutil
- if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE", True)):
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ deploy_file = os.path.join(d.getVar("CVE_CHECK_DIR"), d.getVar("PN"))
+ if os.path.exists(deploy_file):
+ bb.utils.remove(deploy_file)
+
+ if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")):
bb.note("Writing rootfs CVE manifest")
- deploy_dir = d.getVar("DEPLOY_DIR_IMAGE", True)
- link_name = d.getVar("IMAGE_LINK_NAME", True)
- manifest_name = d.getVar("CVE_CHECK_MANIFEST", True)
- cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE", True)
+ deploy_dir = d.getVar("DEPLOY_DIR_IMAGE")
+ link_name = d.getVar("IMAGE_LINK_NAME")
+ manifest_name = d.getVar("CVE_CHECK_MANIFEST")
+ cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
shutil.copyfile(cve_tmp_file, manifest_name)
@@ -92,14 +101,13 @@ python cve_check_write_rootfs_manifest () {
manifest_link = os.path.join(deploy_dir, "%s.cve" % link_name)
# If we already have another manifest, update symlinks
if os.path.exists(os.path.realpath(manifest_link)):
- if d.getVar('RM_OLD_IMAGE', True) == "1":
- os.remove(os.path.realpath(manifest_link))
os.remove(manifest_link)
os.symlink(os.path.basename(manifest_name), manifest_link)
bb.plain("Image CVE report stored in: %s" % manifest_name)
}
-ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST', True) == '1' else ''}"
+ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
def get_patches_cves(d):
"""
@@ -108,12 +116,26 @@ def get_patches_cves(d):
import re
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
+
+ # Matches last CVE-1234-211432 in the file name, also if written
+ # with small letters. Not supporting multiple CVE id's in a single
+ # file name.
+ cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
+
patched_cves = set()
bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
for url in src_patches(d):
patch_file = bb.fetch.decodeurl(url)[2]
+
+ # Check patch file name for CVE ID
+ fname_match = cve_file_name_match.search(patch_file)
+ if fname_match:
+ cve = fname_match.group(1).upper()
+ patched_cves.add(cve)
+ bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
+
with open(patch_file, "r", encoding="utf-8") as f:
try:
patch_text = f.read()
@@ -124,103 +146,138 @@ def get_patches_cves(d):
with open(patch_file, "r", encoding="iso8859-1") as f:
patch_text = f.read()
- # Search for the "CVE: " line
- match = cve_match.search(patch_text)
- if match:
+ # Search for one or more "CVE: " lines
+ text_match = False
+ for match in cve_match.finditer(patch_text):
# Get only the CVEs without the "CVE: " tag
cves = patch_text[match.start()+5:match.end()]
for cve in cves.split():
bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
patched_cves.add(cve)
- else:
+ text_match = True
+
+ if not fname_match and not text_match:
bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
return patched_cves
def check_cves(d, patched_cves):
"""
- Run cve-check-tool looking for patched and unpatched CVEs.
+ Connect to the NVD database and find unpatched cves.
"""
+ from distutils.version import LooseVersion
- import ast, csv, tempfile, subprocess, io
-
- cves_patched = []
cves_unpatched = []
- bpn = d.getVar("BPN", True)
- pv = d.getVar("PV", True).split("git+")[0]
- cves = " ".join(patched_cves)
- cve_db_dir = d.getVar("CVE_CHECK_DB_DIR", True)
- cve_whitelist = ast.literal_eval(d.getVar("CVE_CHECK_CVE_WHITELIST", True))
- cve_cmd = "cve-check-tool"
- cmd = [cve_cmd, "--no-html", "--csv", "--not-affected", "-t", "faux", "-d", cve_db_dir]
+ # CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
+ products = d.getVar("CVE_PRODUCT").split()
+ # If this has been unset then we're not scanning for CVEs here (for example, image recipes)
+ if not products:
+ return ([], [])
+ pv = d.getVar("CVE_VERSION").split("+git")[0]
# If the recipe has been whitlisted we return empty lists
- if d.getVar("PN", True) in d.getVar("CVE_CHECK_PN_WHITELIST", True).split():
+ if d.getVar("PN") in d.getVar("CVE_CHECK_PN_WHITELIST").split():
bb.note("Recipe has been whitelisted, skipping check")
return ([], [])
- # It is needed to export the proxies to download the database using HTTP
- bb.utils.export_proxies(d)
-
- try:
- # Write the faux CSV file to be used with cve-check-tool
- fd, faux = tempfile.mkstemp(prefix="cve-faux-")
- with os.fdopen(fd, "w") as f:
- f.write("%s,%s,%s," % (bpn, pv, cves))
- cmd.append(faux)
-
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
- bb.debug(2, "Output of command %s:\n%s" % ("\n".join(cmd), output))
- except subprocess.CalledProcessError as e:
- bb.warn("Couldn't check for CVEs: %s (output %s)" % (e, e.output))
- finally:
- os.remove(faux)
-
- for row in csv.reader(io.StringIO(output)):
- # Third row has the unpatched CVEs
- if row[2]:
- for cve in row[2].split():
- # Skip if the CVE has been whitlisted for the current version
- if pv in cve_whitelist.get(cve,[]):
- bb.note("%s-%s has been whitelisted for %s" % (bpn, pv, cve))
+ old_cve_whitelist = d.getVar("CVE_CHECK_CVE_WHITELIST")
+ if old_cve_whitelist:
+ bb.warn("CVE_CHECK_CVE_WHITELIST is deprecated, please use CVE_CHECK_WHITELIST.")
+ cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split()
+
+ import sqlite3
+ db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
+ conn = sqlite3.connect(db_file, uri=True)
+
+ # For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
+ for product in products:
+ if ":" in product:
+ vendor, product = product.split(":", 1)
+ else:
+ vendor = "%"
+
+ # Find all relevant CVE IDs.
+ for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
+ cve = cverow[0]
+
+ if cve in cve_whitelist:
+ bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve))
+ # TODO: this should be in the report as 'whitelisted'
+ patched_cves.add(cve)
+ continue
+ elif cve in patched_cves:
+ bb.note("%s has been patched" % (cve))
+ continue
+
+ vulnerable = False
+ for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)):
+ (_, _, _, version_start, operator_start, version_end, operator_end) = row
+ #bb.debug(2, "Evaluating row " + str(row))
+
+ if (operator_start == '=' and pv == version_start):
+ vulnerable = True
else:
+ if operator_start:
+ try:
+ vulnerable_start = (operator_start == '>=' and LooseVersion(pv) >= LooseVersion(version_start))
+ vulnerable_start |= (operator_start == '>' and LooseVersion(pv) > LooseVersion(version_start))
+ except:
+ bb.warn("%s: Failed to compare %s %s %s for %s" %
+ (product, pv, operator_start, version_start, cve))
+ vulnerable_start = False
+ else:
+ vulnerable_start = False
+
+ if operator_end:
+ try:
+ vulnerable_end = (operator_end == '<=' and LooseVersion(pv) <= LooseVersion(version_end))
+ vulnerable_end |= (operator_end == '<' and LooseVersion(pv) < LooseVersion(version_end))
+ except:
+ bb.warn("%s: Failed to compare %s %s %s for %s" %
+ (product, pv, operator_end, version_end, cve))
+ vulnerable_end = False
+ else:
+ vulnerable_end = False
+
+ if operator_start and operator_end:
+ vulnerable = vulnerable_start and vulnerable_end
+ else:
+ vulnerable = vulnerable_start or vulnerable_end
+
+ if vulnerable:
+ bb.note("%s-%s is vulnerable to %s" % (product, pv, cve))
cves_unpatched.append(cve)
- bb.debug(2, "%s-%s is not patched for %s" % (bpn, pv, cve))
- # Fourth row has patched CVEs
- if row[3]:
- for cve in row[3].split():
- cves_patched.append(cve)
- bb.debug(2, "%s-%s is patched for %s" % (bpn, pv, cve))
+ break
- return (cves_patched, cves_unpatched)
+ if not vulnerable:
+ bb.note("%s-%s is not vulnerable to %s" % (product, pv, cve))
+ # TODO: not patched but not vulnerable
+ patched_cves.add(cve)
+
+ conn.close()
+
+ return (list(patched_cves), cves_unpatched)
def get_cve_info(d, cves):
"""
- Get CVE information from the database used by cve-check-tool.
-
- Unfortunately the only way to get CVE info is set the output to
- html (hard to parse) or query directly the database.
+ Get CVE information from the database.
"""
- try:
- import sqlite3
- except ImportError:
- from pysqlite2 import dbapi2 as sqlite3
+ import sqlite3
cve_data = {}
- db_file = d.getVar("CVE_CHECK_DB_FILE", True)
- placeholder = ",".join("?" * len(cves))
- query = "SELECT * FROM NVD WHERE id IN (%s)" % placeholder
- conn = sqlite3.connect(db_file)
- cur = conn.cursor()
- for row in cur.execute(query, tuple(cves)):
- cve_data[row[0]] = {}
- cve_data[row[0]]["summary"] = row[1]
- cve_data[row[0]]["score"] = row[2]
- cve_data[row[0]]["modified"] = row[3]
- cve_data[row[0]]["vector"] = row[4]
- conn.close()
+ conn = sqlite3.connect(d.getVar("CVE_CHECK_DB_FILE"))
+ for cve in cves:
+ for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
+ cve_data[row[0]] = {}
+ cve_data[row[0]]["summary"] = row[1]
+ cve_data[row[0]]["scorev2"] = row[2]
+ cve_data[row[0]]["scorev3"] = row[3]
+ cve_data[row[0]]["modified"] = row[4]
+ cve_data[row[0]]["vector"] = row[5]
+
+ conn.close()
return cve_data
def cve_write_data(d, patched, unpatched, cve_data):
@@ -229,39 +286,41 @@ def cve_write_data(d, patched, unpatched, cve_data):
CVE manifest if enabled.
"""
- cve_file = d.getVar("CVE_CHECK_LOCAL_FILE", True)
+ cve_file = d.getVar("CVE_CHECK_LOG")
nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
write_string = ""
- first_alert = True
- bb.utils.mkdirhier(d.getVar("CVE_CHECK_LOCAL_DIR", True))
+ unpatched_cves = []
+ bb.utils.mkdirhier(os.path.dirname(cve_file))
for cve in sorted(cve_data):
- write_string += "PACKAGE NAME: %s\n" % d.getVar("PN", True)
- write_string += "PACKAGE VERSION: %s\n" % d.getVar("PV", True)
+ write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
+ write_string += "PACKAGE VERSION: %s\n" % d.getVar("PV")
write_string += "CVE: %s\n" % cve
if cve in patched:
write_string += "CVE STATUS: Patched\n"
else:
+ unpatched_cves.append(cve)
write_string += "CVE STATUS: Unpatched\n"
- if first_alert:
- bb.warn("Found unpatched CVE, for more information check %s" % cve_file)
- first_alert = False
write_string += "CVE SUMMARY: %s\n" % cve_data[cve]["summary"]
- write_string += "CVSS v2 BASE SCORE: %s\n" % cve_data[cve]["score"]
+ write_string += "CVSS v2 BASE SCORE: %s\n" % cve_data[cve]["scorev2"]
+ write_string += "CVSS v3 BASE SCORE: %s\n" % cve_data[cve]["scorev3"]
write_string += "VECTOR: %s\n" % cve_data[cve]["vector"]
write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve)
+ if unpatched_cves:
+ bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
+
with open(cve_file, "w") as f:
bb.note("Writing file %s with CVE information" % cve_file)
f.write(write_string)
- if d.getVar("CVE_CHECK_COPY_FILES", True) == "1":
- cve_dir = d.getVar("CVE_CHECK_DIR", True)
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ cve_dir = d.getVar("CVE_CHECK_DIR")
bb.utils.mkdirhier(cve_dir)
- deploy_file = os.path.join(cve_dir, d.getVar("PN", True))
+ deploy_file = os.path.join(cve_dir, d.getVar("PN"))
with open(deploy_file, "w") as f:
f.write(write_string)
- if d.getVar("CVE_CHECK_CREATE_MANIFEST", True) == "1":
- with open(d.getVar("CVE_CHECK_TMP_FILE", True), "a") as f:
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
f.write("%s" % write_string)
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
index be7cacca98..6f8a599ccb 100644
--- a/meta/classes/debian.bbclass
+++ b/meta/classes/debian.bbclass
@@ -20,22 +20,20 @@ do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
python () {
- if not d.getVar("PACKAGES", True):
+ if not d.getVar("PACKAGES"):
d.setVar("DEBIANRDEP", "")
}
python debian_package_name_hook () {
- import glob, copy, stat, errno, re
+ import glob, copy, stat, errno, re, pathlib, subprocess
- pkgdest = d.getVar('PKGDEST', True)
- packages = d.getVar('PACKAGES', True)
- bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir", True)) + "$")
- lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir", True)) + "$")
- so_re = re.compile("lib.*\.so")
+ pkgdest = d.getVar("PKGDEST")
+ packages = d.getVar('PACKAGES')
+ so_re = re.compile(r"lib.*\.so")
def socrunch(s):
s = s.lower().replace('_', '-')
- m = re.match("^(.*)(.)\.so\.(.*)$", s)
+ m = re.match(r"^(.*)(.)\.so\.(.*)$", s)
if m is None:
return None
if m.group(2) in '0123456789':
@@ -53,38 +51,45 @@ python debian_package_name_hook () {
return (s[stat.ST_MODE] & stat.S_IEXEC)
def add_rprovides(pkg, d):
- newpkg = d.getVar('PKG_' + pkg, True)
+ newpkg = d.getVar('PKG_' + pkg)
if newpkg and newpkg != pkg:
- provs = (d.getVar('RPROVIDES_' + pkg, True) or "").split()
+ provs = (d.getVar('RPROVIDES_' + pkg) or "").split()
if pkg not in provs:
- d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV", True) + ")")
+ d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
def auto_libname(packages, orig_pkg):
+ p = lambda var: pathlib.PurePath(d.getVar(var))
+ libdirs = (p("base_libdir"), p("libdir"))
+ bindirs = (p("base_bindir"), p("base_sbindir"), p("bindir"), p("sbindir"))
+
sonames = []
has_bins = 0
has_libs = 0
- for file in pkgfiles[orig_pkg]:
- root = os.path.dirname(file)
- if bin_re.match(root):
+ for f in pkgfiles[orig_pkg]:
+ # This is .../packages-split/orig_pkg/
+ pkgpath = pathlib.PurePath(pkgdest, orig_pkg)
+ # Strip pkgpath off the full path to a file in the package, re-root
+ # so it is absolute, and then get the parent directory of the file.
+ path = pathlib.PurePath("/") / (pathlib.PurePath(f).relative_to(pkgpath).parent)
+ if path in bindirs:
has_bins = 1
- if lib_re.match(root):
+ if path in libdirs:
has_libs = 1
- if so_re.match(os.path.basename(file)):
- cmd = (d.getVar('TARGET_PREFIX', True) or "") + "objdump -p " + file + " 2>/dev/null"
- fd = os.popen(cmd)
- lines = fd.readlines()
- fd.close()
- for l in lines:
- m = re.match("\s+SONAME\s+([^\s]*)", l)
- if m and not m.group(1) in sonames:
- sonames.append(m.group(1))
-
+ if so_re.match(os.path.basename(f)):
+ try:
+ cmd = [d.expand("${TARGET_PREFIX}objdump"), "-p", f]
+ output = subprocess.check_output(cmd).decode("utf-8")
+ for m in re.finditer(r"\s+SONAME\s+([^\s]+)", output):
+ if m.group(1) not in sonames:
+ sonames.append(m.group(1))
+ except subprocess.CalledProcessError:
+ pass
bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
soname = None
if len(sonames) == 1:
soname = sonames[0]
elif len(sonames) > 1:
- lead = d.getVar('LEAD_SONAME', True)
+ lead = d.getVar('LEAD_SONAME')
if lead:
r = re.compile(lead)
filtered = []
@@ -115,11 +120,12 @@ python debian_package_name_hook () {
newpkg = pkgname
else:
newpkg = pkg.replace(orig_pkg, devname, 1)
- mlpre=d.getVar('MLPREFIX', True)
+ mlpre=d.getVar('MLPREFIX')
if mlpre:
if not newpkg.find(mlpre) == 0:
newpkg = mlpre + newpkg
if newpkg != pkg:
+ bb.note("debian: renaming %s to %s" % (pkg, newpkg))
d.setVar('PKG_' + pkg, newpkg)
add_rprovides(pkg, d)
else:
@@ -131,11 +137,10 @@ python debian_package_name_hook () {
# and later
# DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
# so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
- for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS', True) or "").split(), reverse=True):
+ for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS') or "").split(), reverse=True):
auto_libname(packages, pkg)
}
EXPORT_FUNCTIONS package_name_hook
DEBIAN_NAMES = "1"
-
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
index 8ad07da015..6d52908783 100644
--- a/meta/classes/deploy.bbclass
+++ b/meta/classes/deploy.bbclass
@@ -8,4 +8,4 @@ python do_deploy_setscene () {
}
addtask do_deploy_setscene
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
-do_deploy[stamp-extra-info] = "${MACHINE}"
+do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
diff --git a/meta/classes/devicetree.bbclass b/meta/classes/devicetree.bbclass
new file mode 100644
index 0000000000..d8779c7943
--- /dev/null
+++ b/meta/classes/devicetree.bbclass
@@ -0,0 +1,148 @@
+# This bbclass implements device tree compliation for user provided device tree
+# sources. The compilation of the device tree sources is the same as the kernel
+# device tree compilation process, this includes being able to include sources
+# from the kernel such as soc dtsi files or header files such as gpio.h. In
+# addition to device trees this bbclass also handles compilation of device tree
+# overlays.
+#
+# The output of this class behaves similar to how kernel-devicetree.bbclass
+# operates in that the output files are installed into /boot/devicetree.
+# However this class on purpose separates the deployed device trees into the
+# 'devicetree' subdirectory. This prevents clashes with the kernel-devicetree
+# output. Additionally the device trees are populated into the sysroot for
+# access via the sysroot from within other recipes.
+
+SECTION ?= "bsp"
+
+# The default inclusion of kernel device tree includes and headers means that
+# device trees built with them are at least GPLv2 (and in some cases dual
+# licensed). Default to GPLv2 if the recipe does not specify a license.
+LICENSE ?= "GPLv2"
+LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6"
+
+INHIBIT_DEFAULT_DEPS = "1"
+DEPENDS += "dtc-native"
+
+inherit deploy kernel-arch
+
+COMPATIBLE_MACHINE ?= "^$"
+
+PROVIDES = "virtual/dtb"
+
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+SYSROOT_DIRS += "/boot/devicetree"
+FILES_${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
+
+S = "${WORKDIR}"
+B = "${WORKDIR}/build"
+
+# Default kernel includes, these represent what are normally used for in-kernel
+# sources.
+KERNEL_INCLUDE ??= " \
+ ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts \
+ ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts/* \
+ ${STAGING_KERNEL_DIR}/scripts/dtc/include-prefixes \
+ "
+
+DT_INCLUDE[doc] = "Search paths to be made available to both the device tree compiler and preprocessor for inclusion."
+DT_INCLUDE ?= "${DT_FILES_PATH} ${KERNEL_INCLUDE}"
+DT_FILES_PATH[doc] = "Defaults to source directory, can be used to select dts files that are not in source (e.g. generated)."
+DT_FILES_PATH ?= "${S}"
+
+DT_PADDING_SIZE[doc] = "Size of padding on the device tree blob, used as extra space typically for additional properties during boot."
+DT_PADDING_SIZE ??= "0x3000"
+DT_RESERVED_MAP[doc] = "Number of reserved map entires."
+DT_RESERVED_MAP ??= "8"
+DT_BOOT_CPU[doc] = "The boot cpu, defaults to 0"
+DT_BOOT_CPU ??= "0"
+
+DTC_FLAGS ?= "-R ${DT_RESERVED_MAP} -b ${DT_BOOT_CPU}"
+DTC_PPFLAGS ?= "-nostdinc -undef -D__DTS__ -x assembler-with-cpp"
+DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE}"
+DTC_OFLAGS ?= "-p 0 -@ -H epapr"
+
+python () {
+ if d.getVar("KERNEL_INCLUDE"):
+ # auto add dependency on kernel tree, but only if kernel include paths
+ # are specified.
+ d.appendVarFlag("do_compile", "depends", " virtual/kernel:do_configure")
+}
+
+def expand_includes(varname, d):
+ import glob
+ includes = set()
+ # expand all includes with glob
+ for i in (d.getVar(varname) or "").split():
+ for g in glob.glob(i):
+ if os.path.isdir(g): # only add directories to include path
+ includes.add(g)
+ return includes
+
+def devicetree_source_is_overlay(path):
+ # determine if a dts file is an overlay by checking if it uses "/plugin/;"
+ with open(path, "r") as f:
+ for i in f:
+ if i.startswith("/plugin/;"):
+ return True
+ return False
+
+def devicetree_compile(dtspath, includes, d):
+ import subprocess
+ dts = os.path.basename(dtspath)
+ dtname = os.path.splitext(dts)[0]
+ bb.note("Processing {0} [{1}]".format(dtname, dts))
+
+ # preprocess
+ ppargs = d.getVar("BUILD_CPP").split()
+ ppargs += (d.getVar("DTC_PPFLAGS") or "").split()
+ for i in includes:
+ ppargs.append("-I{0}".format(i))
+ ppargs += ["-o", "{0}.pp".format(dts), dtspath]
+ bb.note("Running {0}".format(" ".join(ppargs)))
+ subprocess.run(ppargs, check = True)
+
+ # determine if the file is an overlay or not (using the preprocessed file)
+ isoverlay = devicetree_source_is_overlay("{0}.pp".format(dts))
+
+ # compile
+ dtcargs = ["dtc"] + (d.getVar("DTC_FLAGS") or "").split()
+ if isoverlay:
+ dtcargs += (d.getVar("DTC_OFLAGS") or "").split()
+ else:
+ dtcargs += (d.getVar("DTC_BFLAGS") or "").split()
+ for i in includes:
+ dtcargs += ["-i", i]
+ dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")]
+ dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)]
+ bb.note("Running {0}".format(" ".join(dtcargs)))
+ subprocess.run(dtcargs, check = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+python devicetree_do_compile() {
+ includes = expand_includes("DT_INCLUDE", d)
+ listpath = d.getVar("DT_FILES_PATH")
+ for dts in os.listdir(listpath):
+ dtspath = os.path.join(listpath, dts)
+ try:
+ if not(os.path.isfile(dtspath)) or not(dts.endswith(".dts") or devicetree_source_is_overlay(dtspath)):
+ continue # skip non-.dts files and non-overlay files
+ except:
+ continue # skip if can't determine if overlay
+ devicetree_compile(dtspath, includes, d)
+}
+
+devicetree_do_install() {
+ for DTB_FILE in `ls *.dtb *.dtbo`; do
+ install -Dm 0644 ${B}/${DTB_FILE} ${D}/boot/devicetree/${DTB_FILE}
+ done
+}
+
+devicetree_do_deploy() {
+ for DTB_FILE in `ls *.dtb *.dtbo`; do
+ install -Dm 0644 ${B}/${DTB_FILE} ${DEPLOYDIR}/devicetree/${DTB_FILE}
+ done
+}
+addtask deploy before do_build after do_install
+
+EXPORT_FUNCTIONS do_compile do_install do_deploy
+
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index be71aff35f..fdf7dc100f 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -3,19 +3,19 @@ inherit terminal
DEVSHELL = "${SHELL}"
python do_devshell () {
- if d.getVarFlag("do_devshell", "manualfakeroot", True):
+ if d.getVarFlag("do_devshell", "manualfakeroot"):
d.prependVar("DEVSHELL", "pseudo ")
- fakeenv = d.getVar("FAKEROOTENV", True).split()
+ fakeenv = d.getVar("FAKEROOTENV").split()
for f in fakeenv:
k = f.split("=")
- d.setVar(k[0], k[1])
+ d.setVar(k[0], k[1])
d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
d.delVarFlag("do_devshell", "fakeroot")
- oe_terminal(d.getVar('DEVSHELL', True), 'OpenEmbedded Developer Shell', d)
+ oe_terminal(d.getVar('DEVSHELL'), 'OpenEmbedded Developer Shell', d)
}
-addtask devshell after do_patch
+addtask devshell after do_patch do_prepare_recipe_sysroot
# The directory that the terminal starts in
DEVSHELL_STARTDIR ?= "${S}"
@@ -27,7 +27,7 @@ do_devshell[nostamp] = "1"
# be done as the normal user. We therfore carefully construct the envionment
# manually
python () {
- if d.getVarFlag("do_devshell", "fakeroot", True):
+ if d.getVarFlag("do_devshell", "fakeroot"):
# We need to signal our code that we want fakeroot however we
# can't manipulate the environment and variables here yet (see YOCTO #4795)
d.setVarFlag("do_devshell", "manualfakeroot", "1")
@@ -49,7 +49,7 @@ def devpyshell(d):
old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
# &~ termios.ISIG
termios.tcsetattr(fd, termios.TCSADRAIN, old)
-
+
# No echo or buffering over the pty
noechoicanon(s)
@@ -82,7 +82,7 @@ def devpyshell(d):
more = False
i = code.InteractiveInterpreter(locals=_context)
- print("OE PyShell (PN = %s)\n" % d.getVar("PN", True))
+ print("OE PyShell (PN = %s)\n" % d.getVar("PN"))
def prompt(more):
if more:
@@ -145,7 +145,7 @@ python do_devpyshell() {
try:
devpyshell(d)
except SystemExit:
- # Stop the SIGTERM above causing an error exit code
+ # Stop the SIGTERM above causing an error exit code
return
finally:
return
diff --git a/meta/classes/devtool-source.bbclass b/meta/classes/devtool-source.bbclass
new file mode 100644
index 0000000000..280d6009f3
--- /dev/null
+++ b/meta/classes/devtool-source.bbclass
@@ -0,0 +1,233 @@
+# Development tool - source extraction helper class
+#
+# NOTE: this class is intended for use by devtool and should not be
+# inherited manually.
+#
+# Copyright (C) 2014-2017 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+DEVTOOL_TEMPDIR ?= ""
+DEVTOOL_PATCH_SRCDIR = "${DEVTOOL_TEMPDIR}/patchworkdir"
+
+
+python() {
+ tempdir = d.getVar('DEVTOOL_TEMPDIR')
+
+ if not tempdir:
+ bb.fatal('devtool-source class is for internal use by devtool only')
+
+ # Make a subdir so we guard against WORKDIR==S
+ workdir = os.path.join(tempdir, 'workdir')
+ d.setVar('WORKDIR', workdir)
+ if not d.getVar('S').startswith(workdir):
+ # Usually a shared workdir recipe (kernel, gcc)
+ # Try to set a reasonable default
+ if bb.data.inherits_class('kernel', d):
+ d.setVar('S', '${WORKDIR}/source')
+ else:
+ d.setVar('S', '${WORKDIR}/%s' % os.path.basename(d.getVar('S')))
+ if bb.data.inherits_class('kernel', d):
+ # We don't want to move the source to STAGING_KERNEL_DIR here
+ d.setVar('STAGING_KERNEL_DIR', '${S}')
+
+ d.setVar('STAMPS_DIR', os.path.join(tempdir, 'stamps'))
+ d.setVar('T', os.path.join(tempdir, 'temp'))
+
+ # Hook in pre/postfuncs
+ is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
+ if is_kernel_yocto:
+ unpacktask = 'do_kernel_checkout'
+ d.appendVarFlag('do_configure', 'postfuncs', ' devtool_post_configure')
+ else:
+ unpacktask = 'do_unpack'
+ d.appendVarFlag(unpacktask, 'postfuncs', ' devtool_post_unpack')
+ d.prependVarFlag('do_patch', 'prefuncs', ' devtool_pre_patch')
+ d.appendVarFlag('do_patch', 'postfuncs', ' devtool_post_patch')
+
+ # NOTE: in order for the patch stuff to be fully functional,
+ # PATCHTOOL and PATCH_COMMIT_FUNCTIONS need to be set; we can't
+ # do that here because we can't guarantee the order of the anonymous
+ # functions, so it gets done in the bbappend we create.
+}
+
+
+python devtool_post_unpack() {
+ import oe.recipeutils
+ import shutil
+ sys.path.insert(0, os.path.join(d.getVar('COREBASE'), 'scripts', 'lib'))
+ import scriptutils
+ from devtool import setup_git_repo
+
+ tempdir = d.getVar('DEVTOOL_TEMPDIR')
+ workdir = d.getVar('WORKDIR')
+ srcsubdir = d.getVar('S')
+
+ def _move_file(src, dst):
+ """Move a file. Creates all the directory components of destination path."""
+ dst_d = os.path.dirname(dst)
+ if dst_d:
+ bb.utils.mkdirhier(dst_d)
+ shutil.move(src, dst)
+
+ def _ls_tree(directory):
+ """Recursive listing of files in a directory"""
+ ret = []
+ for root, dirs, files in os.walk(directory):
+ ret.extend([os.path.relpath(os.path.join(root, fname), directory) for
+ fname in files])
+ return ret
+
+ is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
+ # Move local source files into separate subdir
+ recipe_patches = [os.path.basename(patch) for patch in
+ oe.recipeutils.get_recipe_patches(d)]
+ local_files = oe.recipeutils.get_recipe_local_files(d)
+
+ if is_kernel_yocto:
+ for key in [f for f in local_files if f.endswith('scc')]:
+ with open(local_files[key], 'r') as sccfile:
+ for l in sccfile:
+ line = l.split()
+ if line and line[0] in ('kconf', 'patch'):
+ cfg = os.path.join(os.path.dirname(local_files[key]), line[-1])
+ if cfg not in local_files.values():
+ local_files[line[-1]] = cfg
+ shutil.copy2(cfg, workdir)
+
+ # Ignore local files with subdir={BP}
+ srcabspath = os.path.abspath(srcsubdir)
+ local_files = [fname for fname in local_files if
+ os.path.exists(os.path.join(workdir, fname)) and
+ (srcabspath == workdir or not
+ os.path.join(workdir, fname).startswith(srcabspath +
+ os.sep))]
+ if local_files:
+ for fname in local_files:
+ _move_file(os.path.join(workdir, fname),
+ os.path.join(tempdir, 'oe-local-files', fname))
+ with open(os.path.join(tempdir, 'oe-local-files', '.gitignore'),
+ 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file '
+ 'if you want to commit the directory to Git\n*\n')
+
+ if srcsubdir == workdir:
+ # Find non-patch non-local sources that were "unpacked" to srctree
+ # directory
+ src_files = [fname for fname in _ls_tree(workdir) if
+ os.path.basename(fname) not in recipe_patches]
+ srcsubdir = d.getVar('DEVTOOL_PATCH_SRCDIR')
+ # Move source files to S
+ for path in src_files:
+ _move_file(os.path.join(workdir, path),
+ os.path.join(srcsubdir, path))
+ elif os.path.dirname(srcsubdir) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.join(workdir, os.path.relpath(srcsubdir, workdir).split(os.sep)[0])
+
+ scriptutils.git_convert_standalone_clone(srcsubdir)
+
+ # Make sure that srcsubdir exists
+ bb.utils.mkdirhier(srcsubdir)
+ if not os.listdir(srcsubdir):
+ bb.warn("No source unpacked to S - either the %s recipe "
+ "doesn't use any source or the correct source "
+ "directory could not be determined" % d.getVar('PN'))
+
+ devbranch = d.getVar('DEVTOOL_DEVBRANCH')
+ setup_git_repo(srcsubdir, d.getVar('PV'), devbranch, d=d)
+
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srcsubdir)
+ initial_rev = stdout.rstrip()
+ with open(os.path.join(tempdir, 'initial_rev'), 'w') as f:
+ f.write(initial_rev)
+
+ with open(os.path.join(tempdir, 'srcsubdir'), 'w') as f:
+ f.write(srcsubdir)
+}
+
+python devtool_pre_patch() {
+ if d.getVar('S') == d.getVar('WORKDIR'):
+ d.setVar('S', '${DEVTOOL_PATCH_SRCDIR}')
+}
+
+python devtool_post_patch() {
+ import shutil
+ tempdir = d.getVar('DEVTOOL_TEMPDIR')
+ with open(os.path.join(tempdir, 'srcsubdir'), 'r') as f:
+ srcsubdir = f.read()
+ with open(os.path.join(tempdir, 'initial_rev'), 'r') as f:
+ initial_rev = f.read()
+
+ def rm_patches():
+ patches_dir = os.path.join(srcsubdir, 'patches')
+ if os.path.exists(patches_dir):
+ shutil.rmtree(patches_dir)
+ # Restore any "patches" directory that was actually part of the source tree
+ try:
+ bb.process.run('git checkout -- patches', cwd=srcsubdir)
+ except bb.process.ExecutionError:
+ pass
+
+ extra_overrides = d.getVar('DEVTOOL_EXTRA_OVERRIDES')
+ if extra_overrides:
+ extra_overrides = set(extra_overrides.split(':'))
+ devbranch = d.getVar('DEVTOOL_DEVBRANCH')
+ default_overrides = d.getVar('OVERRIDES').split(':')
+ no_overrides = []
+ # First, we may have some overrides that are referred to in the recipe set in
+ # our configuration, so we need to make a branch that excludes those
+ for override in default_overrides:
+ if override not in extra_overrides:
+ no_overrides.append(override)
+ if default_overrides != no_overrides:
+ # Some overrides are active in the current configuration, so
+ # we need to create a branch where none of the overrides are active
+ bb.process.run('git checkout %s -b devtool-no-overrides' % initial_rev, cwd=srcsubdir)
+ # Run do_patch function with the override applied
+ localdata = bb.data.createCopy(d)
+ localdata.setVar('OVERRIDES', ':'.join(no_overrides))
+ bb.build.exec_func('do_patch', localdata)
+ rm_patches()
+ # Now we need to reconcile the dev branch with the no-overrides one
+ # (otherwise we'd likely be left with identical commits that have different hashes)
+ bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir)
+ bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir)
+ else:
+ bb.process.run('git checkout %s -b devtool-no-overrides' % devbranch, cwd=srcsubdir)
+
+ for override in extra_overrides:
+ localdata = bb.data.createCopy(d)
+ if override in default_overrides:
+ bb.process.run('git branch devtool-override-%s %s' % (override, devbranch), cwd=srcsubdir)
+ else:
+ # Reset back to the initial commit on a new branch
+ bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir)
+ # Run do_patch function with the override applied
+ localdata.appendVar('OVERRIDES', ':%s' % override)
+ bb.build.exec_func('do_patch', localdata)
+ rm_patches()
+ # Now we need to reconcile the new branch with the no-overrides one
+ # (otherwise we'd likely be left with identical commits that have different hashes)
+ bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir)
+ bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir)
+ bb.process.run('git tag -f devtool-patched', cwd=srcsubdir)
+}
+
+python devtool_post_configure() {
+ import shutil
+ tempdir = d.getVar('DEVTOOL_TEMPDIR')
+ shutil.copy2(os.path.join(d.getVar('B'), '.config'), tempdir)
+}
diff --git a/meta/classes/devupstream.bbclass b/meta/classes/devupstream.bbclass
new file mode 100644
index 0000000000..7780c5482c
--- /dev/null
+++ b/meta/classes/devupstream.bbclass
@@ -0,0 +1,48 @@
+# Class for use in BBCLASSEXTEND to make it easier to have a single recipe that
+# can build both stable tarballs and snapshots from upstream source
+# repositories.
+#
+# Usage:
+# BBCLASSEXTEND = "devupstream:target"
+# SRC_URI_class-devupstream = "git://git.example.com/example"
+# SRCREV_class-devupstream = "abcdef"
+#
+# If the first entry in SRC_URI is a git: URL then S is rewritten to
+# WORKDIR/git.
+#
+# There are a few caveats that remain to be solved:
+# - You can't build native or nativesdk recipes using for example
+# devupstream:native, you can only build target recipes.
+# - If the fetcher requires native tools (such as subversion-native) then
+# bitbake won't be able to add them automatically.
+
+CLASSOVERRIDE .= ":class-devupstream"
+
+python devupstream_virtclass_handler () {
+ # Do nothing if this is inherited, as it's for BBCLASSEXTEND
+ if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""):
+ bb.error("Don't inherit devupstream, use BBCLASSEXTEND")
+ return
+
+ variant = d.getVar("BBEXTENDVARIANT")
+ if variant not in ("target"):
+ bb.error("Pass the variant when using devupstream, for example devupstream:target")
+ return
+
+ # Develpment releases are never preferred by default
+ d.setVar("DEFAULT_PREFERENCE", "-1")
+
+ uri = bb.fetch2.URI(d.getVar("SRC_URI").split()[0])
+
+ if uri.scheme == "git":
+ d.setVar("S", "${WORKDIR}/git")
+
+ # Modify the PV if the recipe hasn't already overridden it
+ pv = d.getVar("PV")
+ proto_marker = "+" + uri.scheme
+ if proto_marker not in pv:
+ d.setVar("PV", pv + proto_marker + "${SRCPV}")
+}
+
+addhandler devupstream_virtclass_handler
+devupstream_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
diff --git a/meta/classes/distro_features_check.bbclass b/meta/classes/distro_features_check.bbclass
index 7e91dbcf4a..8124a8ca27 100644
--- a/meta/classes/distro_features_check.bbclass
+++ b/meta/classes/distro_features_check.bbclass
@@ -1,37 +1,7 @@
-# Allow checking of required and conflicting DISTRO_FEATURES
-#
-# ANY_OF_DISTRO_FEATURES: ensure at least one item on this list is included
-# in DISTRO_FEATURES.
-# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included
-# in DISTRO_FEATURES.
-# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in
-# DISTRO_FEATURES.
-#
-# Copyright 2013 (C) O.S. Systems Software LTDA.
+# Temporarily provide fallback to the old name of the class
-python () {
- # Assume at least one var is set.
- distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split()
-
- any_of_distro_features = d.getVar('ANY_OF_DISTRO_FEATURES', True)
- if any_of_distro_features:
- any_of_distro_features = any_of_distro_features.split()
- if set.isdisjoint(set(any_of_distro_features),set(distro_features)):
- raise bb.parse.SkipPackage("one of '%s' needs to be in DISTRO_FEATURES" % any_of_distro_features)
-
- required_distro_features = d.getVar('REQUIRED_DISTRO_FEATURES', True)
- if required_distro_features:
- required_distro_features = required_distro_features.split()
- for f in required_distro_features:
- if f in distro_features:
- continue
- else:
- raise bb.parse.SkipPackage("missing required distro feature '%s' (not in DISTRO_FEATURES)" % f)
-
- conflict_distro_features = d.getVar('CONFLICT_DISTRO_FEATURES', True)
- if conflict_distro_features:
- conflict_distro_features = conflict_distro_features.split()
- for f in conflict_distro_features:
- if f in distro_features:
- raise bb.parse.SkipPackage("conflicting distro feature '%s' (in DISTRO_FEATURES)" % f)
+python __anonymous() {
+ bb.warn("distro_features_check.bbclass is deprecated, please use features_check.bbclass instead")
}
+
+inherit features_check
diff --git a/meta/classes/distrodata.bbclass b/meta/classes/distrodata.bbclass
deleted file mode 100644
index fbb7402e0c..0000000000
--- a/meta/classes/distrodata.bbclass
+++ /dev/null
@@ -1,479 +0,0 @@
-include conf/distro/include/upstream_tracking.inc
-include conf/distro/include/distro_alias.inc
-include conf/distro/include/maintainers.inc
-
-addhandler distro_eventhandler
-distro_eventhandler[eventmask] = "bb.event.BuildStarted"
-python distro_eventhandler() {
- import oe.distro_check as dc
- import csv
- logfile = dc.create_log_file(e.data, "distrodata.csv")
-
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- writer = csv.writer(f)
- writer.writerow(['Package', 'Description', 'Owner', 'License',
- 'VerMatch', 'Version', 'Upstream', 'Reason', 'Recipe Status',
- 'Distro 1', 'Distro 2', 'Distro 3'])
- f.close()
- bb.utils.unlockfile(lf)
-
- return
-}
-
-addtask distrodata_np
-do_distrodata_np[nostamp] = "1"
-python do_distrodata_np() {
- localdata = bb.data.createCopy(d)
- pn = d.getVar("PN", True)
- bb.note("Package Name: %s" % pn)
-
- import oe.distro_check as dist_check
- tmpdir = d.getVar('TMPDIR', True)
- distro_check_dir = os.path.join(tmpdir, "distro_check")
- datetime = localdata.getVar('DATETIME', True)
- dist_check.update_distro_data(distro_check_dir, datetime, localdata)
-
- if pn.find("-native") != -1:
- pnstripped = pn.split("-native")
- bb.note("Native Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- if pn.find("-cross") != -1:
- pnstripped = pn.split("-cross")
- bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- if pn.find("-crosssdk") != -1:
- pnstripped = pn.split("-crosssdk")
- bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- if pn.startswith("nativesdk-"):
- pnstripped = pn.replace("nativesdk-", "")
- bb.note("NativeSDK Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
-
- if pn.find("-initial") != -1:
- pnstripped = pn.split("-initial")
- bb.note("initial Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- """generate package information from .bb file"""
- pname = localdata.getVar('PN', True)
- pcurver = localdata.getVar('PV', True)
- pdesc = localdata.getVar('DESCRIPTION', True)
- if pdesc is not None:
- pdesc = pdesc.replace(',','')
- pdesc = pdesc.replace('\n','')
-
- pgrp = localdata.getVar('SECTION', True)
- plicense = localdata.getVar('LICENSE', True).replace(',','_')
-
- rstatus = localdata.getVar('RECIPE_COLOR', True)
- if rstatus is not None:
- rstatus = rstatus.replace(',','')
-
- pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
- if pcurver == pupver:
- vermatch="1"
- else:
- vermatch="0"
- noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
- if noupdate_reason is None:
- noupdate="0"
- else:
- noupdate="1"
- noupdate_reason = noupdate_reason.replace(',','')
-
- maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
- rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
- result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
-
- bb.note("DISTRO: %s,%s,%s,%s,%s,%s,%s,%s,%s\n" % \
- (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus))
- line = pn
- for i in result:
- line = line + "," + i
- bb.note("%s\n" % line)
-}
-do_distrodata_np[vardepsexclude] = "DATETIME"
-
-addtask distrodata
-do_distrodata[nostamp] = "1"
-python do_distrodata() {
- import csv
- logpath = d.getVar('LOG_DIR', True)
- bb.utils.mkdirhier(logpath)
- logfile = os.path.join(logpath, "distrodata.csv")
-
- import oe.distro_check as dist_check
- localdata = bb.data.createCopy(d)
- tmpdir = d.getVar('TMPDIR', True)
- distro_check_dir = os.path.join(tmpdir, "distro_check")
- datetime = localdata.getVar('DATETIME', True)
- dist_check.update_distro_data(distro_check_dir, datetime, localdata)
-
- pn = d.getVar("PN", True)
- bb.note("Package Name: %s" % pn)
-
- if pn.find("-native") != -1:
- pnstripped = pn.split("-native")
- bb.note("Native Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- if pn.startswith("nativesdk-"):
- pnstripped = pn.replace("nativesdk-", "")
- bb.note("NativeSDK Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- if pn.find("-cross") != -1:
- pnstripped = pn.split("-cross")
- bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- if pn.find("-crosssdk") != -1:
- pnstripped = pn.split("-crosssdk")
- bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- if pn.find("-initial") != -1:
- pnstripped = pn.split("-initial")
- bb.note("initial Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- """generate package information from .bb file"""
- pname = localdata.getVar('PN', True)
- pcurver = localdata.getVar('PV', True)
- pdesc = localdata.getVar('DESCRIPTION', True)
- if pdesc is not None:
- pdesc = pdesc.replace(',','')
- pdesc = pdesc.replace('\n','')
-
- pgrp = localdata.getVar('SECTION', True)
- plicense = localdata.getVar('LICENSE', True).replace(',','_')
-
- rstatus = localdata.getVar('RECIPE_COLOR', True)
- if rstatus is not None:
- rstatus = rstatus.replace(',','')
-
- pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
- if pcurver == pupver:
- vermatch="1"
- else:
- vermatch="0"
-
- noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
- if noupdate_reason is None:
- noupdate="0"
- else:
- noupdate="1"
- noupdate_reason = noupdate_reason.replace(',','')
-
- maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
- rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
- # do the comparison
- result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
-
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- row = [pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus]
- row.extend(result)
-
- writer = csv.writer(f)
- writer.writerow(row)
- f.close()
- bb.utils.unlockfile(lf)
-}
-do_distrodata[vardepsexclude] = "DATETIME"
-
-addtask distrodataall after do_distrodata
-do_distrodataall[recrdeptask] = "do_distrodataall do_distrodata"
-do_distrodataall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_distrodataall[nostamp] = "1"
-do_distrodataall() {
- :
-}
-
-addhandler checkpkg_eventhandler
-checkpkg_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
-python checkpkg_eventhandler() {
- import csv
-
- def parse_csv_file(filename):
- package_dict = {}
-
- with open(filename, "r") as f:
- reader = csv.reader(f, delimiter='\t')
- for row in reader:
- pn = row[0]
-
- if reader.line_num == 1:
- header = row
- continue
-
- if not pn in package_dict.keys():
- package_dict[pn] = row
- f.close()
-
- with open(filename, "w") as f:
- writer = csv.writer(f, delimiter='\t')
- writer.writerow(header)
- for pn in package_dict.keys():
- writer.writerow(package_dict[pn])
- f.close()
-
- del package_dict
-
- if bb.event.getName(e) == "BuildStarted":
- import oe.distro_check as dc
- logfile = dc.create_log_file(e.data, "checkpkg.csv")
-
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- writer = csv.writer(f, delimiter='\t')
- headers = ['Package', 'Version', 'Upver', 'License', 'Section',
- 'Home', 'Release', 'Depends', 'BugTracker', 'PE', 'Description',
- 'Status', 'Tracking', 'URI', 'MAINTAINER', 'NoUpReason']
- writer.writerow(headers)
- f.close()
- bb.utils.unlockfile(lf)
- elif bb.event.getName(e) == "BuildCompleted":
- import os
- filename = "tmp/log/checkpkg.csv"
- if os.path.isfile(filename):
- lf = bb.utils.lockfile("%s.lock"%filename)
- parse_csv_file(filename)
- bb.utils.unlockfile(lf)
- return
-}
-
-addtask checkpkg
-do_checkpkg[nostamp] = "1"
-python do_checkpkg() {
- localdata = bb.data.createCopy(d)
- import csv
- import re
- import tempfile
- import subprocess
- import oe.recipeutils
- from bb.utils import vercmp_string
- from bb.fetch2 import FetchError, NoMethodError, decodeurl
-
- """first check whether a uri is provided"""
- src_uri = (d.getVar('SRC_URI', True) or '').split()
- if src_uri:
- uri_type, _, _, _, _, _ = decodeurl(src_uri[0])
- else:
- uri_type = "none"
-
- """initialize log files."""
- logpath = d.getVar('LOG_DIR', True)
- bb.utils.mkdirhier(logpath)
- logfile = os.path.join(logpath, "checkpkg.csv")
-
- """generate package information from .bb file"""
- pname = d.getVar('PN', True)
-
- if pname.find("-native") != -1:
- if d.getVar('BBCLASSEXTEND', True):
- return
- pnstripped = pname.split("-native")
- bb.note("Native Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- if pname.startswith("nativesdk-"):
- if d.getVar('BBCLASSEXTEND', True):
- return
- pnstripped = pname.replace("nativesdk-", "")
- bb.note("NativeSDK Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- if pname.find("-cross") != -1:
- pnstripped = pname.split("-cross")
- bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- if pname.find("-initial") != -1:
- pnstripped = pname.split("-initial")
- bb.note("initial Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- pdesc = localdata.getVar('DESCRIPTION', True)
- pgrp = localdata.getVar('SECTION', True)
- pversion = localdata.getVar('PV', True)
- plicense = localdata.getVar('LICENSE', True)
- psection = localdata.getVar('SECTION', True)
- phome = localdata.getVar('HOMEPAGE', True)
- prelease = localdata.getVar('PR', True)
- pdepends = localdata.getVar('DEPENDS', True)
- pbugtracker = localdata.getVar('BUGTRACKER', True)
- ppe = localdata.getVar('PE', True)
- psrcuri = localdata.getVar('SRC_URI', True)
- maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
-
- """ Get upstream version version """
- pupver = ""
- pstatus = ""
-
- try:
- uv = oe.recipeutils.get_recipe_upstream_version(localdata)
-
- pupver = uv['version']
- except Exception as e:
- if e is FetchError:
- pstatus = "ErrAccess"
- elif e is NoMethodError:
- pstatus = "ErrUnsupportedProto"
- else:
- pstatus = "ErrUnknown"
-
- """Set upstream version status"""
- if not pupver:
- pupver = "N/A"
- else:
- pv, _, _ = oe.recipeutils.get_recipe_pv_without_srcpv(pversion, uri_type)
- upv, _, _ = oe.recipeutils.get_recipe_pv_without_srcpv(pupver, uri_type)
-
- cmp = vercmp_string(pv, upv)
- if cmp == -1:
- pstatus = "UPDATE"
- elif cmp == 0:
- pstatus = "MATCH"
-
- if psrcuri:
- psrcuri = psrcuri.split()[0]
- else:
- psrcuri = "none"
- pdepends = "".join(pdepends.split("\t"))
- pdesc = "".join(pdesc.split("\t"))
- no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON', True)
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- writer = csv.writer(f, delimiter='\t')
- writer.writerow([pname, pversion, pupver, plicense, psection, phome,
- prelease, pdepends, pbugtracker, ppe, pdesc, pstatus, pupver,
- psrcuri, maintainer, no_upgr_reason])
- f.close()
- bb.utils.unlockfile(lf)
-}
-
-addtask checkpkgall after do_checkpkg
-do_checkpkgall[recrdeptask] = "do_checkpkgall do_checkpkg"
-do_checkpkgall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_checkpkgall[nostamp] = "1"
-do_checkpkgall() {
- :
-}
-
-addhandler distro_check_eventhandler
-distro_check_eventhandler[eventmask] = "bb.event.BuildStarted"
-python distro_check_eventhandler() {
- """initialize log files."""
- import oe.distro_check as dc
- result_file = dc.create_log_file(e.data, "distrocheck.csv")
- return
-}
-
-addtask distro_check
-do_distro_check[nostamp] = "1"
-python do_distro_check() {
- """checks if the package is present in other public Linux distros"""
- import oe.distro_check as dc
- import shutil
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk',d):
- return
-
- localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
- tmpdir = d.getVar('TMPDIR', True)
- distro_check_dir = os.path.join(tmpdir, "distro_check")
- logpath = d.getVar('LOG_DIR', True)
- bb.utils.mkdirhier(logpath)
- result_file = os.path.join(logpath, "distrocheck.csv")
- datetime = localdata.getVar('DATETIME', True)
- dc.update_distro_data(distro_check_dir, datetime, localdata)
-
- # do the comparison
- result = dc.compare_in_distro_packages_list(distro_check_dir, d)
-
- # save the results
- dc.save_distro_check_result(result, datetime, result_file, d)
-}
-
-addtask distro_checkall after do_distro_check
-do_distro_checkall[recrdeptask] = "do_distro_checkall do_distro_check"
-do_distro_checkall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_distro_checkall[nostamp] = "1"
-do_distro_checkall() {
- :
-}
-#
-#Check Missing License Text.
-#Use this task to generate the missing license text data for pkg-report system,
-#then we can search those recipes which license text isn't exsit in common-licenses directory
-#
-addhandler checklicense_eventhandler
-checklicense_eventhandler[eventmask] = "bb.event.BuildStarted"
-python checklicense_eventhandler() {
- """initialize log files."""
- import csv
- import oe.distro_check as dc
- logfile = dc.create_log_file(e.data, "missinglicense.csv")
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- writer = csv.writer(f, delimiter='\t')
- writer.writerow(['Package', 'License', 'MissingLicense'])
- f.close()
- bb.utils.unlockfile(lf)
- return
-}
-
-addtask checklicense
-do_checklicense[nostamp] = "1"
-python do_checklicense() {
- import csv
- import shutil
- logpath = d.getVar('LOG_DIR', True)
- bb.utils.mkdirhier(logpath)
- pn = d.getVar('PN', True)
- logfile = os.path.join(logpath, "missinglicense.csv")
- generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
- license_types = d.getVar('LICENSE', True)
- for license_type in ((license_types.replace('+', '').replace('|', '&')
- .replace('(', '').replace(')', '').replace(';', '')
- .replace(',', '').replace(" ", "").split("&"))):
- if not os.path.isfile(os.path.join(generic_directory, license_type)):
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- writer = csv.writer(f, delimiter='\t')
- writer.writerow([pn, license_types, license_type])
- f.close()
- bb.utils.unlockfile(lf)
- return
-}
-
-addtask checklicenseall after do_checklicense
-do_checklicenseall[recrdeptask] = "do_checklicenseall do_checklicense"
-do_checklicenseall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_checklicenseall[nostamp] = "1"
-do_checklicenseall() {
- :
-}
-
-
diff --git a/meta/classes/distrooverrides.bbclass b/meta/classes/distrooverrides.bbclass
new file mode 100644
index 0000000000..9f4db0d771
--- /dev/null
+++ b/meta/classes/distrooverrides.bbclass
@@ -0,0 +1,32 @@
+# Turns certain DISTRO_FEATURES into overrides with the same
+# name plus a df- prefix. Ensures that these special
+# distro features remain set also for native and nativesdk
+# recipes, so that these overrides can also be used there.
+#
+# This makes it simpler to write .bbappends that only change the
+# task signatures of the recipe if the change is really enabled,
+# for example with:
+# do_install_append_df-my-feature () { ... }
+# where "my-feature" is a DISTRO_FEATURE.
+#
+# The class is meant to be used in a layer.conf or distro
+# .inc file with:
+# INHERIT += "distrooverrides"
+# DISTRO_FEATURES_OVERRIDES += "my-feature"
+#
+# Beware that this part of OVERRIDES changes during parsing, so usage
+# of these overrides should be limited to .bb and .bbappend files,
+# because then DISTRO_FEATURES is final.
+
+DISTRO_FEATURES_OVERRIDES ?= ""
+DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
+Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
+
+DISTRO_FEATURES_FILTER_NATIVE_append = " ${DISTRO_FEATURES_OVERRIDES}"
+DISTRO_FEATURES_FILTER_NATIVESDK_append = " ${DISTRO_FEATURES_OVERRIDES}"
+
+# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
+# signature because of this line, then the task dependency on
+# OVERRIDES itself should be fixed. Excluding these two variables
+# with DISTROOVERRIDES[vardepsexclude] would just work around the problem.
+DISTROOVERRIDES .= "${@ ''.join([':df-' + x for x in sorted(set(d.getVar('DISTRO_FEATURES_OVERRIDES').split()) & set((d.getVar('DISTRO_FEATURES') or '').split()))]) }"
diff --git a/meta/classes/distutils-base.bbclass b/meta/classes/distutils-base.bbclass
index aa18e8b292..9f398d7051 100644
--- a/meta/classes/distutils-base.bbclass
+++ b/meta/classes/distutils-base.bbclass
@@ -1,4 +1,4 @@
-DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
+DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
inherit distutils-common-base pythonnative
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/distutils-common-base.bbclass
index 824a1b68b1..94b5fd426d 100644
--- a/meta/classes/distutils-common-base.bbclass
+++ b/meta/classes/distutils-common-base.bbclass
@@ -1,6 +1,18 @@
export STAGING_INCDIR
export STAGING_LIBDIR
+# LDSHARED is the ld *command* used to create shared library
+export LDSHARED = "${CCLD} -shared"
+# LDXXSHARED is the ld *command* used to create shared library of C++
+# objects
+export LDCXXSHARED = "${CXX} -shared"
+# CCSHARED are the C *flags* used to create objects to go into a shared
+# library (module)
+export CCSHARED = "-fPIC -DPIC"
+# LINKFORSHARED are the flags passed to the $(CC) command that links
+# the python executable
+export LINKFORSHARED = "{SECURITY_CFLAGS} -Xlinker -export-dynamic"
+
FILES_${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
FILES_${PN}-staticdev += "\
diff --git a/meta/classes/distutils-tools.bbclass b/meta/classes/distutils-tools.bbclass
deleted file mode 100644
index 3ef9cc5a78..0000000000
--- a/meta/classes/distutils-tools.bbclass
+++ /dev/null
@@ -1,73 +0,0 @@
-DISTUTILS_BUILD_ARGS ?= ""
-DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
-DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
- --install-data=${STAGING_DATADIR}"
-DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
- --install-data=${D}/${datadir}"
-
-distutils_do_compile() {
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py build_ext execution failed."
-}
-
-distutils_stage_headers() {
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
-}
-
-distutils_stage_all() {
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
-}
-
-distutils_do_install() {
- echo "Beginning ${PN} Install ..."
- install -d ${D}${PYTHON_SITEPACKAGES_DIR}
- echo "Step 2 of ${PN} Install ..."
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install execution failed."
-
- echo "Step 3 of ${PN} Install ..."
- # support filenames with *spaces*
- find ${D} -name "*.py" -print0 | while read -d $'\0' i ; do \
- sed -i -e s:${D}::g $i
- done
-
- echo "Step 4 of ${PN} Install ..."
- if test -e ${D}${bindir} ; then
- for i in ${D}${bindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- echo "Step 4 of ${PN} Install ..."
- if test -e ${D}${sbindir}; then
- for i in ${D}${sbindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- echo "Step 5 of ${PN} Install ..."
- rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
-
- #
- # FIXME: Bandaid against wrong datadir computation
- #
- if test -e ${D}${datadir}/share; then
- mv -f ${D}${datadir}/share/* ${D}${datadir}/
- fi
-}
-
-#EXPORT_FUNCTIONS do_compile do_install
-
-export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/distutils.bbclass b/meta/classes/distutils.bbclass
index 857572d759..3759b58263 100644
--- a/meta/classes/distutils.bbclass
+++ b/meta/classes/distutils.bbclass
@@ -4,20 +4,33 @@ DISTUTILS_BUILD_ARGS ?= ""
DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
--install-data=${STAGING_DATADIR}"
-DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
- --install-data=${D}/${datadir}"
+DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
+ --prefix=${prefix} \
+ --install-lib=${PYTHON_SITEPACKAGES_DIR} \
+ --install-data=${datadir}"
+
+DISTUTILS_PYTHON = "python"
+DISTUTILS_PYTHON_class-native = "nativepython"
+
+distutils_do_configure() {
+ if [ "${CLEANBROKEN}" != "1" ] ; then
+ NO_FETCH_BUILD=1 \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py clean ${DISTUTILS_BUILD_ARGS}
+ fi
+}
distutils_do_compile() {
+ NO_FETCH_BUILD=1 \
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py build execution failed."
+ bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
}
distutils_stage_headers() {
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
+ bbfatal_log "'${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS}' execution for stage_headers failed."
}
distutils_stage_all() {
@@ -26,7 +39,7 @@ distutils_stage_all() {
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
+ bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS}' execution for stage_all failed."
}
distutils_do_install() {
@@ -34,49 +47,41 @@ distutils_do_install() {
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install execution failed."
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
# support filenames with *spaces*
# only modify file if it contains path and recompile it
- find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \; -exec ${STAGING_BINDIR_NATIVE}/python-native/python -mcompileall {} \;
+ find ${D} -name "*.py" -exec grep -q ${D} {} \; \
+ -exec sed -i -e s:${D}::g {} \; \
+ -exec ${STAGING_BINDIR_NATIVE}/python-native/python -mcompileall {} \;
- if test -e ${D}${bindir} ; then
- for i in ${D}${bindir}/* ; do \
- if [ ${PN} != "${BPN}-native" ]; then
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
- fi
+ for i in ${D}${bindir}/* ${D}${sbindir}/*; do
+ if [ -f "$i" ]; then
+ sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${DISTUTILS_PYTHON}:g $i
sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- if test -e ${D}${sbindir}; then
- for i in ${D}${sbindir}/* ; do \
- if [ ${PN} != "${BPN}-native" ]; then
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
- fi
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
+ fi
+ done
rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/site.py*
-
+
#
# FIXME: Bandaid against wrong datadir computation
#
- if test -e ${D}${datadir}/share; then
+ if [ -e ${D}${datadir}/share ]; then
mv -f ${D}${datadir}/share/* ${D}${datadir}/
rmdir ${D}${datadir}/share
fi
# Fix backport modules
- if test -e ${STAGING_LIBDIR}/${PYTHON_DIR}/site-packages/backports/__init__.py && test -e ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py; then
+ if [ -e ${STAGING_LIBDIR}/${PYTHON_DIR}/site-packages/backports/__init__.py ] &&
+ [ -e ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py ]; then
rm ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py;
rm ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.pyc;
fi
}
-EXPORT_FUNCTIONS do_compile do_install
+EXPORT_FUNCTIONS do_configure do_compile do_install
export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/distutils3-base.bbclass b/meta/classes/distutils3-base.bbclass
index 82ab6a3d1c..7dbf07ac4b 100644
--- a/meta/classes/distutils3-base.bbclass
+++ b/meta/classes/distutils3-base.bbclass
@@ -1,4 +1,4 @@
-DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
+DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
inherit distutils-common-base python3native
diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass
index a6720c5b6b..05a24bfe26 100644
--- a/meta/classes/distutils3.bbclass
+++ b/meta/classes/distutils3.bbclass
@@ -5,22 +5,35 @@ DISTUTILS_BUILD_EXT_ARGS ?= ""
DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
--install-data=${STAGING_DATADIR}"
-DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
- --install-data=${D}/${datadir}"
+DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
+ --prefix=${prefix} \
+ --install-lib=${PYTHON_SITEPACKAGES_DIR} \
+ --install-data=${datadir}"
+
+DISTUTILS_PYTHON = "python3"
+DISTUTILS_PYTHON_class-native = "nativepython3"
+
+distutils3_do_configure() {
+ if [ "${CLEANBROKEN}" != "1" ] ; then
+ NO_FETCH_BUILD=1 \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py clean ${DISTUTILS_BUILD_ARGS}
+ fi
+}
distutils3_do_compile() {
+ NO_FETCH_BUILD=1 \
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
build ${DISTUTILS_BUILD_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py build_ext execution failed."
+ bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
}
distutils3_do_compile[vardepsexclude] = "MACHINE"
distutils3_stage_headers() {
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
+ bbfatal_log "'${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS}' execution for stage_headers failed."
}
distutils3_stage_headers[vardepsexclude] = "MACHINE"
@@ -30,7 +43,7 @@ distutils3_stage_all() {
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
+ bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS}' execution for stage_all failed."
}
distutils3_stage_all[vardepsexclude] = "MACHINE"
@@ -39,38 +52,32 @@ distutils3_do_install() {
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install execution failed."
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
# support filenames with *spaces*
- find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \;
-
- if test -e ${D}${bindir} ; then
- for i in ${D}${bindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
+ find ${D} -name "*.py" -exec grep -q ${D} {} \; \
+ -exec sed -i -e s:${D}::g {} \;
- if test -e ${D}${sbindir}; then
- for i in ${D}${sbindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-${PYTHON_PN}/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i
+ for i in ${D}${bindir}/* ${D}${sbindir}/*; do
+ if [ -f "$i" ]; then
+ sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${DISTUTILS_PYTHON}:g $i
sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
+ fi
+ done
rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
-
+
#
# FIXME: Bandaid against wrong datadir computation
#
- if test -e ${D}${datadir}/share; then
+ if [ -e ${D}${datadir}/share ]; then
mv -f ${D}${datadir}/share/* ${D}${datadir}/
rmdir ${D}${datadir}/share
fi
}
distutils3_do_install[vardepsexclude] = "MACHINE"
-EXPORT_FUNCTIONS do_compile do_install
+EXPORT_FUNCTIONS do_configure do_compile do_install
export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/dos2unix.bbclass b/meta/classes/dos2unix.bbclass
new file mode 100644
index 0000000000..3fc17e2196
--- /dev/null
+++ b/meta/classes/dos2unix.bbclass
@@ -0,0 +1,14 @@
+# Class for use to convert all CRLF line terminators to LF
+# provided that some projects are being developed/maintained
+# on Windows so they have different line terminators(CRLF) vs
+# on Linux(LF), which can cause annoying patching errors during
+# git push/checkout processes.
+
+do_convert_crlf_to_lf[depends] += "dos2unix-native:do_populate_sysroot"
+
+# Convert CRLF line terminators to LF
+do_convert_crlf_to_lf () {
+ find ${S} -type f -exec dos2unix {} \;
+}
+
+addtask convert_crlf_to_lf after do_unpack before do_patch
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index 5c65d2b742..ea59d02ed9 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -4,7 +4,7 @@
# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
# Released under the MIT license (see COPYING.MIT for the terms)
#
-# externalsrc.bbclass enables use of an existing source tree, usually external to
+# externalsrc.bbclass enables use of an existing source tree, usually external to
# the build system to build a piece of software rather than the usual fetch/unpack/patch
# process.
#
@@ -28,34 +28,42 @@ SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
python () {
- externalsrc = d.getVar('EXTERNALSRC', True)
+ externalsrc = d.getVar('EXTERNALSRC')
+ externalsrcbuild = d.getVar('EXTERNALSRC_BUILD')
+
+ if externalsrc and not externalsrc.startswith("/"):
+ bb.error("EXTERNALSRC must be an absolute path")
+ if externalsrcbuild and not externalsrcbuild.startswith("/"):
+ bb.error("EXTERNALSRC_BUILD must be an absolute path")
# If this is the base recipe and EXTERNALSRC is set for it or any of its
# derivatives, then enable BB_DONT_CACHE to force the recipe to always be
# re-parsed so that the file-checksums function for do_compile is run every
# time.
- bpn = d.getVar('BPN', True)
- if bpn == d.getVar('PN', True):
- classextend = (d.getVar('BBCLASSEXTEND', True) or '').split()
+ bpn = d.getVar('BPN')
+ classextend = (d.getVar('BBCLASSEXTEND') or '').split()
+ if bpn == d.getVar('PN') or not classextend:
if (externalsrc or
('native' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-native' % bpn, True)) or
+ d.getVar('EXTERNALSRC_pn-%s-native' % bpn)) or
('nativesdk' in classextend and
- d.getVar('EXTERNALSRC_pn-nativesdk-%s' % bpn, True)) or
+ d.getVar('EXTERNALSRC_pn-nativesdk-%s' % bpn)) or
('cross' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-cross' % bpn, True))):
+ d.getVar('EXTERNALSRC_pn-%s-cross' % bpn))):
d.setVar('BB_DONT_CACHE', '1')
if externalsrc:
+ import oe.recipeutils
+ import oe.path
+
d.setVar('S', externalsrc)
- externalsrcbuild = d.getVar('EXTERNALSRC_BUILD', True)
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
else:
d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
local_srcuri = []
- fetch = bb.fetch2.Fetch((d.getVar('SRC_URI', True) or '').split(), d)
+ fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
for url in fetch.urls:
url_data = fetch.ud[url]
parm = url_data.parm
@@ -69,7 +77,10 @@ python () {
# Dummy value because the default function can't be called with blank SRC_URI
d.setVar('SRCPV', '999')
- tasks = filter(lambda k: d.getVarFlag(k, "task", True), d.keys())
+ if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
+ d.setVar('CONFIGUREOPT_DEPTRACK', '')
+
+ tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
for task in tasks:
if task.endswith("_setscene"):
@@ -80,10 +91,10 @@ python () {
d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
# We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
- cleandirs = (d.getVarFlag(task, 'cleandirs', False) or '').split()
+ cleandirs = oe.recipeutils.split_var_value(d.getVarFlag(task, 'cleandirs', False) or '')
setvalue = False
for cleandir in cleandirs[:]:
- if d.expand(cleandir) == externalsrc:
+ if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
cleandirs.remove(cleandir)
setvalue = True
if setvalue:
@@ -94,7 +105,7 @@ python () {
# Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
- for task in d.getVar("SRCTREECOVEREDTASKS", True).split():
+ for task in d.getVar("SRCTREECOVEREDTASKS").split():
if local_srcuri and task in fetch_tasks:
continue
bb.build.deltask(task, d)
@@ -103,26 +114,34 @@ python () {
d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
+ d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
# We don't want the workdir to go away
- d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN', True))
+ d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN'))
+
+ bb.build.addtask('do_buildclean',
+ 'do_clean' if d.getVar('S') == d.getVar('B') else None,
+ None, d)
# If B=S the same builddir is used even for different architectures.
# Thus, use a shared CONFIGURESTAMPFILE and STAMP directory so that
# change of do_configure task hash is correctly detected and stamps are
# invalidated if e.g. MACHINE changes.
- if d.getVar('S', True) == d.getVar('B', True):
+ if d.getVar('S') == d.getVar('B'):
configstamp = '${TMPDIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}/configure.sstate'
d.setVar('CONFIGURESTAMPFILE', configstamp)
d.setVar('STAMP', '${STAMPS_DIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}')
+ d.setVar('STAMPCLEAN', '${STAMPS_DIR}/work-shared/${PN}/*-*')
}
python externalsrc_configure_prefunc() {
+ s_dir = d.getVar('S')
# Create desired symlinks
- symlinks = (d.getVar('EXTERNALSRC_SYMLINKS', True) or '').split()
+ symlinks = (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()
+ newlinks = []
for symlink in symlinks:
symsplit = symlink.split(':', 1)
- lnkfile = os.path.join(d.getVar('S', True), symsplit[0])
+ lnkfile = os.path.join(s_dir, symsplit[0])
target = d.expand(symsplit[1])
if len(symsplit) > 1:
if os.path.islink(lnkfile):
@@ -134,35 +153,91 @@ python externalsrc_configure_prefunc() {
# File/dir exists with same name as link, just leave it alone
continue
os.symlink(target, lnkfile)
+ newlinks.append(symsplit[0])
+ # Hide the symlinks from git
+ try:
+ git_exclude_file = os.path.join(s_dir, '.git/info/exclude')
+ if os.path.exists(git_exclude_file):
+ with open(git_exclude_file, 'r+') as efile:
+ elines = efile.readlines()
+ for link in newlinks:
+ if link in elines or '/'+link in elines:
+ continue
+ efile.write('/' + link + '\n')
+ except IOError as ioe:
+ bb.note('Failed to hide EXTERNALSRC_SYMLINKS from git')
}
python externalsrc_compile_prefunc() {
# Make it obvious that this is happening, since forgetting about it could lead to much confusion
- bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN', True), d.getVar('EXTERNALSRC', True)))
+ bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN'), d.getVar('EXTERNALSRC')))
}
-def srctree_hash_files(d):
+do_buildclean[dirs] = "${S} ${B}"
+do_buildclean[nostamp] = "1"
+do_buildclean[doc] = "Call 'make clean' or equivalent in ${B}"
+externalsrc_do_buildclean() {
+ if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
+ rm -f ${@' '.join([x.split(':')[0] for x in (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()])}
+ if [ "${CLEANBROKEN}" != "1" ]; then
+ oe_runmake clean || die "make failed"
+ fi
+ else
+ bbnote "nothing to do - no makefile found"
+ fi
+}
+
+def srctree_hash_files(d, srcdir=None):
import shutil
import subprocess
import tempfile
- s_dir = d.getVar('EXTERNALSRC', True)
- git_dir = os.path.join(s_dir, '.git')
- oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1')
+ s_dir = srcdir or d.getVar('EXTERNALSRC')
+ git_dir = None
+
+ try:
+ git_dir = os.path.join(s_dir,
+ subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ except subprocess.CalledProcessError:
+ pass
ret = " "
- if os.path.exists(git_dir):
- with tempfile.NamedTemporaryFile(dir=git_dir, prefix='oe-devtool-index') as tmp_index:
+ if git_dir is not None:
+ oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1-%s' % d.getVar('PN'))
+ with tempfile.NamedTemporaryFile(prefix='oe-devtool-index') as tmp_index:
# Clone index
- shutil.copy2(os.path.join(git_dir, 'index'), tmp_index.name)
+ shutil.copyfile(os.path.join(git_dir, 'index'), tmp_index.name)
# Update our custom index
env = os.environ.copy()
env['GIT_INDEX_FILE'] = tmp_index.name
- subprocess.check_output(['git', 'add', '.'], cwd=s_dir, env=env)
+ subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
ret = oe_hash_file + ':True'
else:
- ret = d.getVar('EXTERNALSRC', True) + '/*:True'
+ ret = s_dir + '/*:True'
return ret
+
+def srctree_configure_hash_files(d):
+ """
+ Get the list of files that should trigger do_configure to re-execute,
+ based on the value of CONFIGURE_FILES
+ """
+ in_files = (d.getVar('CONFIGURE_FILES') or '').split()
+ out_items = []
+ search_files = []
+ for entry in in_files:
+ if entry.startswith('/'):
+ out_items.append('%s:%s' % (entry, os.path.exists(entry)))
+ else:
+ search_files.append(entry)
+ if search_files:
+ s_dir = d.getVar('EXTERNALSRC')
+ for root, _, files in os.walk(s_dir):
+ for f in files:
+ if f in search_files:
+ out_items.append('%s:True' % os.path.join(root, f))
+ return ' '.join(out_items)
+
+EXPORT_FUNCTIONS do_buildclean
diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass
index 43900f359d..32569e97db 100644
--- a/meta/classes/extrausers.bbclass
+++ b/meta/classes/extrausers.bbclass
@@ -1,21 +1,20 @@
-# This bbclass is mainly used for image level user/group configuration.
+# This bbclass is used for image level user/group configuration.
# Inherit this class if you want to make EXTRA_USERS_PARAMS effective.
# Below is an example showing how to use this functionality.
-# INHERIT += "extrausers"
+# IMAGE_CLASSES += "extrausers"
# EXTRA_USERS_PARAMS = "\
-# useradd -p '' tester; \
-# groupadd developers; \
-# userdel nobody; \
-# groupdel -g video; \
-# groupmod -g 1020 developers; \
-# usermod -s /bin/sh tester; \
+# useradd -p '' tester; \
+# groupadd developers; \
+# userdel nobody; \
+# groupdel -g video; \
+# groupmod -g 1020 developers; \
+# usermod -s /bin/sh tester; \
# "
-
inherit useradd_base
-IMAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS', True))]}"
+PACKAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
# Image level user / group settings
ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;"
@@ -63,3 +62,7 @@ set_user_group () {
remaining=`echo $remaining | cut -d ';' -f2-`
done
}
+
+USERADDEXTENSION ?= ""
+
+inherit ${USERADDEXTENSION}
diff --git a/meta/classes/features_check.bbclass b/meta/classes/features_check.bbclass
new file mode 100644
index 0000000000..391fbe1c94
--- /dev/null
+++ b/meta/classes/features_check.bbclass
@@ -0,0 +1,85 @@
+# Allow checking of required and conflicting DISTRO_FEATURES
+#
+# ANY_OF_DISTRO_FEATURES: ensure at least one item on this list is included
+# in DISTRO_FEATURES.
+# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included
+# in DISTRO_FEATURES.
+# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in
+# DISTRO_FEATURES.
+# ANY_OF_MACHINE_FEATURES: ensure at least one item on this list is included
+# in MACHINE_FEATURES.
+# REQUIRED_MACHINE_FEATURES: ensure every item on this list is included
+# in MACHINE_FEATURES.
+# CONFLICT_MACHINE_FEATURES: ensure no item in this list is included in
+# MACHINE_FEATURES.
+# ANY_OF_COMBINED_FEATURES: ensure at least one item on this list is included
+# in COMBINED_FEATURES.
+# REQUIRED_COMBINED_FEATURES: ensure every item on this list is included
+# in COMBINED_FEATURES.
+# CONFLICT_COMBINED_FEATURES: ensure no item in this list is included in
+# COMBINED_FEATURES.
+#
+# Copyright 2019 (C) Texas Instruments Inc.
+# Copyright 2013 (C) O.S. Systems Software LTDA.
+
+python () {
+ # Assume at least one var is set.
+ distro_features = set((d.getVar('DISTRO_FEATURES') or '').split())
+
+ any_of_distro_features = set((d.getVar('ANY_OF_DISTRO_FEATURES') or '').split())
+ if any_of_distro_features:
+ if set.isdisjoint(any_of_distro_features, distro_features):
+ raise bb.parse.SkipRecipe("one of '%s' needs to be in DISTRO_FEATURES" % ' '.join(any_of_distro_features))
+
+ required_distro_features = set((d.getVar('REQUIRED_DISTRO_FEATURES') or '').split())
+ if required_distro_features:
+ missing = set.difference(required_distro_features, distro_features)
+ if missing:
+ raise bb.parse.SkipRecipe("missing required distro feature%s '%s' (not in DISTRO_FEATURES)" % ('s' if len(missing) > 1 else '', ' '.join(missing)))
+
+ conflict_distro_features = set((d.getVar('CONFLICT_DISTRO_FEATURES') or '').split())
+ if conflict_distro_features:
+ conflicts = set.intersection(conflict_distro_features, distro_features)
+ if conflicts:
+ raise bb.parse.SkipRecipe("conflicting distro feature%s '%s' (in DISTRO_FEATURES)" % ('s' if len(conflicts) > 1 else '', ' '.join(conflicts)))
+
+ # Assume at least one var is set.
+ machine_features = set((d.getVar('MACHINE_FEATURES') or '').split())
+
+ any_of_machine_features = set((d.getVar('ANY_OF_MACHINE_FEATURES') or '').split())
+ if any_of_machine_features:
+ if set.isdisjoint(any_of_machine_features, machine_features):
+ raise bb.parse.SkipRecipe("one of '%s' needs to be in MACHINE_FEATURES" % ' '.join(any_of_machine_features))
+
+ required_machine_features = set((d.getVar('REQUIRED_MACHINE_FEATURES') or '').split())
+ if required_machine_features:
+ missing = set.difference(required_machine_features, machine_features)
+ if missing:
+ raise bb.parse.SkipRecipe("missing required machine feature%s '%s' (not in MACHINE_FEATURES)" % ('s' if len(missing) > 1 else '', ' '.join(missing)))
+
+ conflict_machine_features = set((d.getVar('CONFLICT_MACHINE_FEATURES') or '').split())
+ if conflict_machine_features:
+ conflicts = set.intersection(conflict_machine_features, machine_features)
+ if conflicts:
+ raise bb.parse.SkipRecipe("conflicting machine feature%s '%s' (in MACHINE_FEATURES)" % ('s' if len(conflicts) > 1 else '', ' '.join(conflicts)))
+
+ # Assume at least one var is set.
+ combined_features = set((d.getVar('COMBINED_FEATURES') or '').split())
+
+ any_of_combined_features = set((d.getVar('ANY_OF_COMBINED_FEATURES') or '').split())
+ if any_of_combined_features:
+ if set.isdisjoint(any_of_combined_features, combined_features):
+ raise bb.parse.SkipRecipe("one of '%s' needs to be in COMBINED_FEATURES" % ' '.join(any_of_combined_features))
+
+ required_combined_features = set((d.getVar('REQUIRED_COMBINED_FEATURES') or '').split())
+ if required_combined_features:
+ missing = set.difference(required_combined_features, combined_features)
+ if missing:
+ raise bb.parse.SkipRecipe("missing required machine feature%s '%s' (not in COMBINED_FEATURES)" % ('s' if len(missing) > 1 else '', ' '.join(missing)))
+
+ conflict_combined_features = set((d.getVar('CONFLICT_COMBINED_FEATURES') or '').split())
+ if conflict_combined_features:
+ conflicts = set.intersection(conflict_combined_features, combined_features)
+ if conflicts:
+ raise bb.parse.SkipRecipe("conflicting machine feature%s '%s' (in COMBINED_FEATURES)" % ('s' if len(conflicts) > 1 else '', ' '.join(conflicts)))
+}
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
index 8ebdfc4f5c..13f9df1592 100644
--- a/meta/classes/fontcache.bbclass
+++ b/meta/classes/fontcache.bbclass
@@ -3,7 +3,7 @@
# packages.
#
-DEPENDS += "qemu-native"
+PACKAGE_WRITE_DEPS += "qemu-native"
inherit qemu
FONT_PACKAGES ??= "${PN}"
@@ -17,9 +17,10 @@ FONTCONFIG_CACHE_PARAMS ?= "-v"
FONTCONFIG_CACHE_ENV ?= "FC_DEBUG=1"
fontcache_common() {
if [ -n "$D" ] ; then
- $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} \
+ $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} \
'bindir="${bindir}"' \
'libdir="${libdir}"' \
+ 'libexecdir="${libexecdir}"' \
'base_libdir="${base_libdir}"' \
'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \
'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \
@@ -30,26 +31,26 @@ fi
}
python () {
- font_pkgs = d.getVar('FONT_PACKAGES', True).split()
- deps = d.getVar("FONT_EXTRA_RDEPENDS", True)
+ font_pkgs = d.getVar('FONT_PACKAGES').split()
+ deps = d.getVar("FONT_EXTRA_RDEPENDS")
for pkg in font_pkgs:
if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
}
python add_fontcache_postinsts() {
- for pkg in d.getVar('FONT_PACKAGES', True).split():
+ for pkg in d.getVar('FONT_PACKAGES').split():
bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('fontcache_common', True)
+ postinst += d.getVar('fontcache_common')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('fontcache_common', True)
+ postrm += d.getVar('fontcache_common')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/meta/classes/fs-uuid.bbclass b/meta/classes/fs-uuid.bbclass
index bd2613cf10..9b53dfba7a 100644
--- a/meta/classes/fs-uuid.bbclass
+++ b/meta/classes/fs-uuid.bbclass
@@ -3,7 +3,7 @@
# on ext file systems and depends on tune2fs.
def get_rootfs_uuid(d):
import subprocess
- rootfs = d.getVar('ROOTFS', True)
+ rootfs = d.getVar('ROOTFS')
output = subprocess.check_output(['tune2fs', '-l', rootfs])
for line in output.split('\n'):
if line.startswith('Filesystem UUID:'):
@@ -13,7 +13,7 @@ def get_rootfs_uuid(d):
bb.fatal('Could not determine filesystem UUID of %s' % rootfs)
# Replace the special <<uuid-of-rootfs>> inside a string (like the
-# root= APPEND string in a syslinux.cfg or gummiboot entry) with the
+# root= APPEND string in a syslinux.cfg or systemd-boot entry) with the
# actual UUID of the rootfs. Does nothing if the special string
# is not used.
def replace_rootfs_uuid(d, string):
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
index d7afa7282f..3e3c509d5f 100644
--- a/meta/classes/gconf.bbclass
+++ b/meta/classes/gconf.bbclass
@@ -1,4 +1,5 @@
-DEPENDS += "gconf gconf-native"
+DEPENDS += "gconf"
+PACKAGE_WRITE_DEPS += "gconf-native"
# These are for when gconftool is used natively and the prefix isn't necessarily
# the sysroot. TODO: replicate the postinst logic for -native packages going
@@ -42,13 +43,13 @@ done
python populate_packages_append () {
import re
- packages = d.getVar('PACKAGES', True).split()
- pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES').split()
+ pkgdest = d.getVar('PKGDEST')
for pkg in packages:
schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
schemas = []
- schema_re = re.compile(".*\.schemas$")
+ schema_re = re.compile(r".*\.schemas$")
if os.path.exists(schema_dir):
for f in os.listdir(schema_dir):
if schema_re.match(f):
@@ -56,15 +57,15 @@ python populate_packages_append () {
if schemas != []:
bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
d.setVar('SCHEMA_FILES', " ".join(schemas))
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('gconf_postinst', True)
+ postinst += d.getVar('gconf_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg, True)
+ prerm = d.getVar('pkg_prerm_%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
- prerm += d.getVar('gconf_prerm', True)
+ prerm += d.getVar('gconf_prerm')
d.setVar('pkg_prerm_%s' % pkg, prerm)
d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
index 03b89b2455..be2ef3b311 100644
--- a/meta/classes/gettext.bbclass
+++ b/meta/classes/gettext.bbclass
@@ -1,19 +1,22 @@
def gettext_dependencies(d):
- if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
+ if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
return ""
- if d.getVar('USE_NLS', True) == 'no':
+ if d.getVar('USE_NLS') == 'no':
return "gettext-minimal-native"
- return d.getVar('DEPENDS_GETTEXT', False)
+ return "gettext-native"
def gettext_oeconf(d):
- if d.getVar('USE_NLS', True) == 'no':
+ if d.getVar('USE_NLS') == 'no':
return '--disable-nls'
# Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
- if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
+ if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
return '--disable-nls'
return "--enable-nls"
-DEPENDS_GETTEXT ??= "virtual/gettext gettext-native"
-
-BASEDEPENDS =+ "${@gettext_dependencies(d)}"
+BASEDEPENDS_append = " ${@gettext_dependencies(d)}"
EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
+
+# Without this, msgfmt from gettext-native will not find ITS files
+# provided by target recipes (for example, polkit.its).
+GETTEXTDATADIRS_append_class-target = ":${STAGING_DATADIR}/gettext"
+export GETTEXTDATADIRS
diff --git a/meta/classes/gio-module-cache.bbclass b/meta/classes/gio-module-cache.bbclass
index 91461b11e7..e429bd3197 100644
--- a/meta/classes/gio-module-cache.bbclass
+++ b/meta/classes/gio-module-cache.bbclass
@@ -1,4 +1,4 @@
-DEPENDS += "qemu-native"
+PACKAGE_WRITE_DEPS += "qemu-native"
inherit qemu
GIO_MODULE_PACKAGES ??= "${PN}"
@@ -9,6 +9,7 @@ if [ "x$D" != "x" ]; then
mlprefix=${MLPREFIX} \
binprefix=${MLPREFIX} \
libdir=${libdir} \
+ libexecdir=${libexecdir} \
base_libdir=${base_libdir} \
bindir=${bindir}
else
@@ -17,21 +18,21 @@ fi
}
python populate_packages_append () {
- packages = d.getVar('GIO_MODULE_PACKAGES', True).split()
+ packages = d.getVar('GIO_MODULE_PACKAGES').split()
for pkg in packages:
bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('gio_module_cache_common', True)
+ postinst += d.getVar('gio_module_cache_common')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('gio_module_cache_common', True)
+ postrm += d.getVar('gio_module_cache_common')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/meta/classes/glide.bbclass b/meta/classes/glide.bbclass
new file mode 100644
index 0000000000..db421745bd
--- /dev/null
+++ b/meta/classes/glide.bbclass
@@ -0,0 +1,9 @@
+# Handle Glide Vendor Package Management use
+#
+# Copyright 2018 (C) O.S. Systems Software LTDA.
+
+DEPENDS_append = " glide-native"
+
+do_compile_prepend() {
+ ( cd ${B}/src/${GO_IMPORT} && glide install )
+}
diff --git a/meta/classes/gnome.bbclass b/meta/classes/gnome.bbclass
deleted file mode 100644
index c6202bbb75..0000000000
--- a/meta/classes/gnome.bbclass
+++ /dev/null
@@ -1 +0,0 @@
-inherit gnomebase gtk-icon-cache gconf mime
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
index e5c67760ce..efcb6caae1 100644
--- a/meta/classes/gnomebase.bbclass
+++ b/meta/classes/gnomebase.bbclass
@@ -6,8 +6,6 @@ SECTION ?= "x11/gnome"
GNOMEBN ?= "${BPN}"
SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
-DEPENDS += "gnome-common-native"
-
FILES_${PN} += "${datadir}/application-registry \
${datadir}/mime-info \
${datadir}/mime/packages \
@@ -16,15 +14,17 @@ FILES_${PN} += "${datadir}/application-registry \
${datadir}/polkit* \
${datadir}/GConf \
${datadir}/glib-2.0/schemas \
+ ${datadir}/appdata \
+ ${datadir}/icons \
"
FILES_${PN}-doc += "${datadir}/devhelp"
-inherit autotools pkgconfig
+GNOMEBASEBUILDCLASS ??= "autotools"
+inherit ${GNOMEBASEBUILDCLASS} pkgconfig
do_install_append() {
rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
rm -rf ${D}${localstatedir}/scrollkeeper/*
rm -f ${D}${datadir}/applications/*.cache
}
-
diff --git a/meta/classes/go-ptest.bbclass b/meta/classes/go-ptest.bbclass
new file mode 100644
index 0000000000..e230a80587
--- /dev/null
+++ b/meta/classes/go-ptest.bbclass
@@ -0,0 +1,54 @@
+inherit go ptest
+
+do_compile_ptest_base() {
+ export TMPDIR="${GOTMPDIR}"
+ rm -f ${B}/.go_compiled_tests.list
+ go_list_package_tests | while read pkg; do
+ cd ${B}/src/$pkg
+ ${GO} test ${GOPTESTBUILDFLAGS} $pkg
+ find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
+ sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
+ done
+ do_compile_ptest
+}
+
+do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
+
+go_make_ptest_wrapper() {
+ cat >${D}${PTEST_PATH}/run-ptest <<EOF
+#!/bin/sh
+RC=0
+run_test() (
+ cd "\$1"
+ ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
+ exit \$?)
+EOF
+
+}
+
+do_install_ptest_base() {
+ test -f "${B}/.go_compiled_tests.list" || exit 0
+ install -d ${D}${PTEST_PATH}
+ go_stage_testdata
+ go_make_ptest_wrapper
+ havetests=""
+ while read test; do
+ testdir=`dirname $test`
+ testprog=`basename $test`
+ install -d ${D}${PTEST_PATH}/$testdir
+ install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
+ echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
+ havetests="yes"
+ done < ${B}/.go_compiled_tests.list
+ if [ -n "$havetests" ]; then
+ echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
+ chmod +x ${D}${PTEST_PATH}/run-ptest
+ else
+ rm -rf ${D}${PTEST_PATH}
+ fi
+ do_install_ptest
+ chown -R root:root ${D}${PTEST_PATH}
+}
+
+INSANE_SKIP_${PN}-ptest += "ldflags"
+
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
new file mode 100644
index 0000000000..e40e55689d
--- /dev/null
+++ b/meta/classes/go.bbclass
@@ -0,0 +1,154 @@
+inherit goarch
+
+GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
+
+GOROOT_class-native = "${STAGING_LIBDIR_NATIVE}/go"
+GOROOT_class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
+GOROOT = "${STAGING_LIBDIR}/go"
+export GOROOT
+export GOROOT_FINAL = "${libdir}/go"
+export GOCACHE = "${B}/.cache"
+
+export GOARCH = "${TARGET_GOARCH}"
+export GOOS = "${TARGET_GOOS}"
+export GOHOSTARCH="${BUILD_GOARCH}"
+export GOHOSTOS="${BUILD_GOOS}"
+
+GOARM[export] = "0"
+GOARM_arm_class-target = "${TARGET_GOARM}"
+GOARM_arm_class-target[export] = "1"
+
+GO386[export] = "0"
+GO386_x86_class-target = "${TARGET_GO386}"
+GO386_x86_class-target[export] = "1"
+
+GOMIPS[export] = "0"
+GOMIPS_mips_class-target = "${TARGET_GOMIPS}"
+GOMIPS_mips_class-target[export] = "1"
+
+DEPENDS_GOLANG_class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
+DEPENDS_GOLANG_class-native = "go-native"
+DEPENDS_GOLANG_class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
+
+DEPENDS_append = " ${DEPENDS_GOLANG}"
+
+GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH_class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH_LINK_class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
+GO_LINKMODE ?= ""
+GO_LINKMODE_class-nativesdk = "--linkmode=external"
+GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} -extldflags '${GO_EXTLDFLAGS}'"'
+export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS}"
+export GOPATH_OMIT_IN_ACTIONID ?= "1"
+export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
+export GOPTESTFLAGS ?= ""
+GOBUILDFLAGS_prepend_task-compile = "${GO_PARALLEL_BUILD} "
+
+export GO = "${HOST_PREFIX}go"
+GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}"
+GOTOOLDIR_class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
+export GOTOOLDIR
+
+export CGO_ENABLED ?= "1"
+export CGO_CFLAGS ?= "${CFLAGS}"
+export CGO_CPPFLAGS ?= "${CPPFLAGS}"
+export CGO_CXXFLAGS ?= "${CXXFLAGS}"
+export CGO_LDFLAGS ?= "${LDFLAGS}"
+
+GO_INSTALL ?= "${GO_IMPORT}/..."
+GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
+
+B = "${WORKDIR}/build"
+export GOPATH = "${B}"
+export GOTMPDIR ?= "${WORKDIR}/go-tmp"
+GOTMPDIR[vardepvalue] = ""
+
+python go_do_unpack() {
+ src_uri = (d.getVar('SRC_URI') or "").split()
+ if len(src_uri) == 0:
+ return
+
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+ for url in fetcher.urls:
+ if fetcher.ud[url].type == 'git':
+ if fetcher.ud[url].parm.get('destsuffix') is None:
+ s_dirname = os.path.basename(d.getVar('S'))
+ fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src', d.getVar('GO_IMPORT')) + '/'
+ fetcher.unpack(d.getVar('WORKDIR'))
+}
+
+go_list_packages() {
+ ${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
+ egrep -v '${GO_INSTALL_FILTEROUT}'
+}
+
+go_list_package_tests() {
+ ${GO} list -f '{{.ImportPath}} {{.TestGoFiles}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
+ grep -v '\[\]$' | \
+ egrep -v '${GO_INSTALL_FILTEROUT}' | \
+ awk '{ print $1 }'
+}
+
+go_do_configure() {
+ ln -snf ${S}/src ${B}/
+}
+do_configure[dirs] =+ "${GOTMPDIR}"
+
+go_do_compile() {
+ export TMPDIR="${GOTMPDIR}"
+ if [ -n "${GO_INSTALL}" ]; then
+ if [ -n "${GO_LINKSHARED}" ]; then
+ ${GO} install ${GOBUILDFLAGS} `go_list_packages`
+ rm -rf ${B}/bin
+ fi
+ ${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} `go_list_packages`
+ fi
+}
+do_compile[dirs] =+ "${GOTMPDIR}"
+do_compile[cleandirs] = "${B}/bin ${B}/pkg"
+
+go_do_install() {
+ install -d ${D}${libdir}/go/src/${GO_IMPORT}
+ tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
+ tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
+ tar -C ${B} -cf - --exclude-vcs pkg | tar -C ${D}${libdir}/go --no-same-owner -xf -
+
+ if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
+ install -d ${D}${bindir}
+ install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
+ fi
+}
+
+go_stage_testdata() {
+ oldwd="$PWD"
+ cd ${S}/src
+ find ${GO_IMPORT} -depth -type d -name testdata | while read d; do
+ if echo "$d" | grep -q '/vendor/'; then
+ continue
+ fi
+ parent=`dirname $d`
+ install -d ${D}${PTEST_PATH}/$parent
+ cp --preserve=mode,timestamps -R $d ${D}${PTEST_PATH}/$parent/
+ done
+ cd "$oldwd"
+}
+
+EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
+
+FILES_${PN}-dev = "${libdir}/go/src"
+FILES_${PN}-staticdev = "${libdir}/go/pkg"
+
+INSANE_SKIP_${PN} += "ldflags"
+
+# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
+# doesn't support -buildmode=pie, so skip the QA checking for mips and its
+# variants.
+python() {
+ if 'mips' in d.getVar('TARGET_ARCH'):
+ d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
+ else:
+ d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
+}
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass
new file mode 100644
index 0000000000..1147b6d233
--- /dev/null
+++ b/meta/classes/goarch.bbclass
@@ -0,0 +1,118 @@
+BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS'), d)}"
+BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH'), d)}"
+BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
+HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}"
+HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
+HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}"
+HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+HOST_GOARM_class-native = "7"
+HOST_GO386_class-native = "sse2"
+HOST_GOMIPS_class-native = "hardfloat"
+HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
+TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
+TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
+TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}"
+TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+TARGET_GOARM_class-native = "7"
+TARGET_GO386_class-native = "sse2"
+TARGET_GOMIPS_class-native = "hardfloat"
+TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
+GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
+
+# Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM.
+# This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM.
+BASE_GOARM = ''
+BASE_GOARM_armv7ve = '7'
+BASE_GOARM_armv7a = '7'
+BASE_GOARM_armv6 = '6'
+BASE_GOARM_armv5 = '5'
+
+# Go supports dynamic linking on a limited set of architectures.
+# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
+GO_DYNLINK = ""
+GO_DYNLINK_arm = "1"
+GO_DYNLINK_aarch64 = "1"
+GO_DYNLINK_x86 = "1"
+GO_DYNLINK_x86-64 = "1"
+GO_DYNLINK_powerpc64 = "1"
+GO_DYNLINK_class-native = ""
+GO_DYNLINK_class-nativesdk = ""
+
+# define here because everybody inherits this class
+#
+COMPATIBLE_HOST_linux-gnux32 = "null"
+COMPATIBLE_HOST_linux-muslx32 = "null"
+COMPATIBLE_HOST_powerpc = "null"
+COMPATIBLE_HOST_powerpc64 = "null"
+COMPATIBLE_HOST_mipsarchn32 = "null"
+
+ARM_INSTRUCTION_SET_armv4 = "arm"
+ARM_INSTRUCTION_SET_armv5 = "arm"
+ARM_INSTRUCTION_SET_armv6 = "arm"
+
+TUNE_CCARGS_remove = "-march=mips32r2"
+SECURITY_CFLAGS_mipsarch = "${SECURITY_NOPIE_CFLAGS}"
+SECURITY_NOPIE_CFLAGS ??= ""
+
+# go can't be built with ccache:
+# gcc: fatal error: no input files
+CCACHE_DISABLE ?= "1"
+
+def go_map_arch(a, d):
+ import re
+ if re.match('i.86', a):
+ return '386'
+ elif a == 'x86_64':
+ return 'amd64'
+ elif re.match('arm.*', a):
+ return 'arm'
+ elif re.match('aarch64.*', a):
+ return 'arm64'
+ elif re.match('mips64el.*', a):
+ return 'mips64le'
+ elif re.match('mips64.*', a):
+ return 'mips64'
+ elif a == 'mips':
+ return 'mips'
+ elif a == 'mipsel':
+ return 'mipsle'
+ elif re.match('p(pc|owerpc)(64)', a):
+ return 'ppc64'
+ elif re.match('p(pc|owerpc)(64el)', a):
+ return 'ppc64le'
+ elif a == 'riscv64':
+ return 'riscv64'
+ else:
+ raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
+
+def go_map_arm(a, d):
+ if a.startswith("arm"):
+ return d.getVar('BASE_GOARM')
+ return ''
+
+def go_map_386(a, f, d):
+ import re
+ if re.match('i.86', a):
+ if ('core2' in f) or ('corei7' in f):
+ return 'sse2'
+ else:
+ return '387'
+ return ''
+
+def go_map_mips(a, f, d):
+ import re
+ if a == 'mips' or a == 'mipsel':
+ if 'fpu-hard' in f:
+ return 'hardfloat'
+ else:
+ return 'softfloat'
+ return ''
+
+def go_map_os(o, d):
+ if o.startswith('linux'):
+ return 'linux'
+ return o
+
+
diff --git a/meta/classes/gobject-introspection.bbclass b/meta/classes/gobject-introspection.bbclass
index 37389cbc8b..504f75e28d 100644
--- a/meta/classes/gobject-introspection.bbclass
+++ b/meta/classes/gobject-introspection.bbclass
@@ -6,18 +6,28 @@
# This also sets up autoconf-based recipes to build introspection data (or not),
# depending on distro and machine features (see gobject-introspection-data class).
inherit python3native gobject-introspection-data
+
+# meson: default option name to enable/disable introspection. This matches most
+# project's configuration. In doubts - check meson_options.txt in project's
+# source path.
+GIR_MESON_OPTION ?= 'introspection'
+GIR_MESON_ENABLE_FLAG ?= 'true'
+GIR_MESON_DISABLE_FLAG ?= 'false'
+
+# Auto enable/disable based on GI_DATA_ENABLED
EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
+EXTRA_OEMESON_prepend_class-target = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
# When building native recipes, disable introspection, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
EXTRA_OECONF_prepend_class-native = "--disable-introspection "
EXTRA_OECONF_prepend_class-nativesdk = "--disable-introspection "
-
-UNKNOWN_CONFIGURE_WHITELIST_append = " --enable-introspection --disable-introspection"
+EXTRA_OEMESON_prepend_class-native = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
+EXTRA_OEMESON_prepend_class-nativesdk = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
# Generating introspection data depends on a combination of native and target
# introspection tools, and qemu to run the target tools.
-DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native"
+DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native prelink-native"
# Even though introspection is disabled on -native, gobject-introspection package is still
# needed for m4 macros.
@@ -25,7 +35,7 @@ DEPENDS_append_class-native = " gobject-introspection-native"
DEPENDS_append_class-nativesdk = " gobject-introspection-native"
# This is used by introspection tools to find .gir includes
-export XDG_DATA_DIRS = "${STAGING_DATADIR}"
+export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
do_configure_prepend_class-target () {
# introspection.m4 pre-packaged with upstream tarballs does not yet
@@ -40,4 +50,4 @@ FILES_${PN}_append = " ${libdir}/girepository-*/*.typelib"
# .gir files go to dev package, as they're needed for developing (but not for
# running) things that depends on introspection.
-FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir"
+FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
diff --git a/meta/classes/godep.bbclass b/meta/classes/godep.bbclass
new file mode 100644
index 0000000000..c82401c313
--- /dev/null
+++ b/meta/classes/godep.bbclass
@@ -0,0 +1,8 @@
+DEPENDS_append = " go-dep-native"
+
+do_compile_prepend() {
+ rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.toml
+ rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.lock
+ ( cd ${WORKDIR}/build/src/${GO_IMPORT} && dep init && dep ensure )
+}
+
diff --git a/meta/classes/grub-efi-cfg.bbclass b/meta/classes/grub-efi-cfg.bbclass
new file mode 100644
index 0000000000..8b5ff20c72
--- /dev/null
+++ b/meta/classes/grub-efi-cfg.bbclass
@@ -0,0 +1,116 @@
+# grub-efi.bbclass
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# Released under the MIT license (see packages/COPYING)
+
+# Provide grub-efi specific functions for building bootable images.
+
+# External variables
+# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
+# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
+# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
+# ${LABELS} - a list of targets for the automatic config
+# ${APPEND} - an override list of append strings for each label
+# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
+# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
+# ${GRUB_ROOT} - grub's root device.
+
+GRUB_SERIAL ?= "console=ttyS0,115200"
+GRUB_CFG_VM = "${S}/grub_vm.cfg"
+GRUB_CFG_LIVE = "${S}/grub_live.cfg"
+GRUB_TIMEOUT ?= "10"
+#FIXME: build this from the machine config
+GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
+
+GRUB_ROOT ?= "${ROOT}"
+APPEND ?= ""
+
+# Uses MACHINE specific KERNEL_IMAGETYPE
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+# Need UUID utility code.
+inherit fs-uuid
+
+python build_efi_cfg() {
+ import sys
+
+ workdir = d.getVar('WORKDIR')
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ gfxserial = d.getVar('GRUB_GFXSERIAL') or ""
+
+ labels = d.getVar('LABELS')
+ if not labels:
+ bb.debug(1, "LABELS not defined, nothing to do")
+ return
+
+ if labels == []:
+ bb.debug(1, "No labels, nothing to do")
+ return
+
+ cfile = d.getVar('GRUB_CFG')
+ if not cfile:
+ bb.fatal('Unable to read GRUB_CFG')
+
+ try:
+ cfgfile = open(cfile, 'w')
+ except OSError:
+ bb.fatal('Unable to open %s' % cfile)
+
+ cfgfile.write('# Automatically created by OE\n')
+
+ opts = d.getVar('GRUB_OPTS')
+ if opts:
+ for opt in opts.split(';'):
+ cfgfile.write('%s\n' % opt)
+
+ cfgfile.write('default=%s\n' % (labels.split()[0]))
+
+ timeout = d.getVar('GRUB_TIMEOUT')
+ if timeout:
+ cfgfile.write('timeout=%s\n' % timeout)
+ else:
+ cfgfile.write('timeout=50\n')
+
+ root = d.getVar('GRUB_ROOT')
+ if not root:
+ bb.fatal('GRUB_ROOT not defined')
+
+ if gfxserial == "1":
+ btypes = [ [ " graphics console", "" ],
+ [ " serial console", d.getVar('GRUB_SERIAL') or "" ] ]
+ else:
+ btypes = [ [ "", "" ] ]
+
+ for label in labels.split():
+ localdata = d.createCopy()
+
+ for btype in btypes:
+ cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
+ lb = label
+ if label == "install":
+ lb = "install-efi"
+ kernel = localdata.getVar('KERNEL_IMAGETYPE')
+ cfgfile.write('linux /%s LABEL=%s' % (kernel, lb))
+
+ cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
+
+ append = localdata.getVar('APPEND')
+ initrd = localdata.getVar('INITRD')
+
+ if append:
+ append = replace_rootfs_uuid(d, append)
+ cfgfile.write(' %s' % (append))
+
+ cfgfile.write(' %s' % btype[1])
+ cfgfile.write('\n')
+
+ if initrd:
+ cfgfile.write('initrd /initrd')
+ cfgfile.write('\n}\n')
+
+ cfgfile.close()
+}
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
index 178d0c8350..8fc6999e52 100644
--- a/meta/classes/grub-efi.bbclass
+++ b/meta/classes/grub-efi.bbclass
@@ -1,158 +1,8 @@
-# grub-efi.bbclass
-# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
-#
-# Released under the MIT license (see packages/COPYING)
-
-# Provide grub-efi specific functions for building bootable images.
-
-# External variables
-# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
-# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
-# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
-# ${LABELS} - a list of targets for the automatic config
-# ${APPEND} - an override list of append strings for each label
-# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
-# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
-# ${GRUB_ROOT} - grub's root device.
-
-do_bootimg[depends] += "${MLPREFIX}grub-efi:do_deploy"
-do_bootdirectdisk[depends] += "${MLPREFIX}grub-efi:do_deploy"
-
-GRUB_SERIAL ?= "console=ttyS0,115200"
-GRUB_CFG_VM = "${S}/grub_vm.cfg"
-GRUB_CFG_LIVE = "${S}/grub_live.cfg"
-GRUB_TIMEOUT ?= "10"
-#FIXME: build this from the machine config
-GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
-
-EFIDIR = "/EFI/BOOT"
-GRUB_ROOT ?= "${ROOT}"
-APPEND ?= ""
-
-# Need UUID utility code.
-inherit fs-uuid
+inherit grub-efi-cfg
+require conf/image-uefi.conf
efi_populate() {
- # DEST must be the root of the image so that EFIDIR is not
- # nested under a top level directory.
- DEST=$1
-
- install -d ${DEST}${EFIDIR}
-
- GRUB_IMAGE="bootia32.efi"
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- GRUB_IMAGE="bootx64.efi"
- fi
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$GRUB_IMAGE" >${DEST}/startup.nsh
+ efi_populate_common "$1" grub-efi
install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
}
-
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- # Build a EFI directory to create efi.img
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
- cp $iso_dir/vmlinuz ${EFIIMGDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$GRUB_IMAGE" > ${EFIIMGDIR}/startup.nsh
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
-}
-
-python build_efi_cfg() {
- import sys
-
- workdir = d.getVar('WORKDIR', True)
- if not workdir:
- bb.error("WORKDIR not defined, unable to package")
- return
-
- gfxserial = d.getVar('GRUB_GFXSERIAL', True) or ""
-
- labels = d.getVar('LABELS', True)
- if not labels:
- bb.debug(1, "LABELS not defined, nothing to do")
- return
-
- if labels == []:
- bb.debug(1, "No labels, nothing to do")
- return
-
- cfile = d.getVar('GRUB_CFG', True)
- if not cfile:
- raise bb.build.FuncFailed('Unable to read GRUB_CFG')
-
- try:
- cfgfile = open(cfile, 'w')
- except OSError:
- raise bb.build.FuncFailed('Unable to open %s' % (cfile))
-
- cfgfile.write('# Automatically created by OE\n')
-
- opts = d.getVar('GRUB_OPTS', True)
- if opts:
- for opt in opts.split(';'):
- cfgfile.write('%s\n' % opt)
-
- cfgfile.write('default=%s\n' % (labels.split()[0]))
-
- timeout = d.getVar('GRUB_TIMEOUT', True)
- if timeout:
- cfgfile.write('timeout=%s\n' % timeout)
- else:
- cfgfile.write('timeout=50\n')
-
- root = d.getVar('GRUB_ROOT', True)
- if not root:
- raise bb.build.FuncFailed('GRUB_ROOT not defined')
-
- if gfxserial == "1":
- btypes = [ [ " graphics console", "" ],
- [ " serial console", d.getVar('GRUB_SERIAL', True) or "" ] ]
- else:
- btypes = [ [ "", "" ] ]
-
- for label in labels.split():
- localdata = d.createCopy()
-
- overrides = localdata.getVar('OVERRIDES', True)
- if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
-
- for btype in btypes:
- localdata.setVar('OVERRIDES', label + ':' + overrides)
- bb.data.update_data(localdata)
-
- cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
- lb = label
- if label == "install":
- lb = "install-efi"
- cfgfile.write('linux /vmlinuz LABEL=%s' % (lb))
-
- cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
-
- append = localdata.getVar('APPEND', True)
- initrd = localdata.getVar('INITRD', True)
-
- if append:
- append = replace_rootfs_uuid(d, append)
- cfgfile.write('%s' % (append))
- cfgfile.write(' %s' % btype[1])
- cfgfile.write('\n')
-
- if initrd:
- cfgfile.write('initrd /initrd')
- cfgfile.write('\n}\n')
-
- cfgfile.close()
-}
diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass
index dec5abc026..33afc96a9c 100644
--- a/meta/classes/gsettings.bbclass
+++ b/meta/classes/gsettings.bbclass
@@ -7,31 +7,36 @@
# TODO use a trigger so that this runs once per package operation run
-DEPENDS += "glib-2.0-native"
-
-RDEPENDS_${PN} += "glib-2.0-utils"
-
-FILES_${PN} += "${datadir}/glib-2.0/schemas"
+GSETTINGS_PACKAGE ?= "${PN}"
+
+python __anonymous() {
+ pkg = d.getVar("GSETTINGS_PACKAGE")
+ if pkg:
+ d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native")
+ d.appendVar("RDEPENDS_" + pkg, " ${MLPREFIX}glib-2.0-utils")
+ d.appendVar("FILES_" + pkg, " ${datadir}/glib-2.0/schemas")
+}
gsettings_postinstrm () {
glib-compile-schemas $D${datadir}/glib-2.0/schemas
}
python populate_packages_append () {
- pkg = d.getVar('PN', True)
- bb.note("adding gsettings postinst scripts to %s" % pkg)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('gsettings_postinstrm', True)
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- bb.note("adding gsettings postrm scripts to %s" % pkg)
-
- postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += d.getVar('gsettings_postinstrm', True)
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ pkg = d.getVar('GSETTINGS_PACKAGE')
+ if pkg:
+ bb.note("adding gsettings postinst scripts to %s" % pkg)
+
+ postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('gsettings_postinstrm')
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ bb.note("adding gsettings postrm scripts to %s" % pkg)
+
+ postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('gsettings_postinstrm')
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
index 297eac63b7..7dd662bf86 100644
--- a/meta/classes/gtk-doc.bbclass
+++ b/meta/classes/gtk-doc.bbclass
@@ -10,47 +10,62 @@
GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
+# meson: default option name to enable/disable gtk-doc. This matches most
+# project's configuration. In doubts - check meson_options.txt in project's
+# source path.
+GTKDOC_MESON_OPTION ?= 'docs'
+GTKDOC_MESON_ENABLE_FLAG ?= 'true'
+GTKDOC_MESON_DISABLE_FLAG ?= 'false'
+
+# Auto enable/disable based on GTKDOC_ENABLED
EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
'--disable-gtk-doc', d)} "
+EXTRA_OEMESON_prepend_class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
# When building native recipes, disable gtkdoc, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
EXTRA_OECONF_prepend_class-native = "--disable-gtk-doc "
EXTRA_OECONF_prepend_class-nativesdk = "--disable-gtk-doc "
-
-DEPENDS_append_class-target = " gtk-doc-native qemu-native"
+EXTRA_OEMESON_prepend_class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
+EXTRA_OEMESON_prepend_class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
# Even though gtkdoc is disabled on -native, gtk-doc package is still
# needed for m4 macros.
-DEPENDS_append_class-native = " gtk-doc-native"
-DEPENDS_append_class-nativesdk = " gtk-doc-native"
+DEPENDS_append = " gtk-doc-native"
# The documentation directory, where the infrastructure will be copied.
# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
GTKDOC_DOCDIR ?= "${S}"
-do_configure_prepend () {
- ( cd ${S}; gtkdocize --docdir ${GTKDOC_DOCDIR} || true )
-}
+export STAGING_DIR_HOST
-inherit qemu
+inherit python3native pkgconfig qemu
+DEPENDS_append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
-export STAGING_DIR_HOST
+do_configure_prepend () {
+ # Need to use ||true as this is only needed if configure.ac both exists
+ # and uses GTK_DOC_CHECK.
+ gtkdocize --srcdir ${S} --docdir ${GTKDOC_DOCDIR} || true
+}
do_compile_prepend_class-target () {
-
+ if [ ${GTKDOC_ENABLED} = True ]; then
# Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
# can run target helper binaries through that.
- qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
+ qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
cat > ${B}/gtkdoc-qemuwrapper << EOF
#!/bin/sh
# Use a modules directory which doesn't exist so we don't load random things
# which may then get deleted (or their dependencies) and potentially segfault
export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
+GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
-if test -d ".libs"; then
+# meson sets this wrongly (only to libs in build-dir), qemu-wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
+unset LD_LIBRARY_PATH
+
+if [ -d ".libs" ]; then
$qemu_binary ".libs/\$@"
else
$qemu_binary "\$@"
@@ -63,7 +78,5 @@ if [ \$? -ne 0 ]; then
fi
EOF
chmod +x ${B}/gtkdoc-qemuwrapper
+ fi
}
-
-
-inherit pkgconfig
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
index 0f1052b08c..91cb4ad409 100644
--- a/meta/classes/gtk-icon-cache.bbclass
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -1,10 +1,12 @@
FILES_${PN} += "${datadir}/icons/hicolor"
-DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk-icon-utils-native"
+DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk+3-native"
+
+PACKAGE_WRITE_DEPS += "gtk+3-native gdk-pixbuf-native"
gtk_icon_cache_postinst() {
if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
+ $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
mlprefix=${MLPREFIX} \
libdir_native=${libdir_native}
else
@@ -22,7 +24,7 @@ fi
gtk_icon_cache_postrm() {
if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
+ $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
mlprefix=${MLPREFIX} \
libdir=${libdir}
else
@@ -35,11 +37,11 @@ fi
}
python populate_packages_append () {
- packages = d.getVar('PACKAGES', True).split()
- pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES').split()
+ pkgdest = d.getVar('PKGDEST')
for pkg in packages:
- icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir', True))
+ icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir'))
if not os.path.exists(icon_dir):
continue
@@ -49,16 +51,16 @@ python populate_packages_append () {
bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('gtk_icon_cache_postinst', True)
+ postinst += d.getVar('gtk_icon_cache_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('gtk_icon_cache_postrm', True)
+ postrm += d.getVar('gtk_icon_cache_postrm')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass
index 3a4634c4ed..9bb0af8b26 100644
--- a/meta/classes/gtk-immodules-cache.bbclass
+++ b/meta/classes/gtk-immodules-cache.bbclass
@@ -2,7 +2,7 @@
#
# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
-DEPENDS =+ "qemu-native"
+PACKAGE_WRITE_DEPS += "qemu-native"
inherit qemu
@@ -10,72 +10,58 @@ GTKIMMODULES_PACKAGES ?= "${PN}"
gtk_immodule_cache_postinst() {
if [ "x$D" != "x" ]; then
- if [ -x $D${bindir}/gtk-query-immodules-2.0 ]; then
- IMFILES=$(ls $D${libdir}/gtk-2.0/*/immodules/*.so)
- ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-2.0')} \
- $IMFILES > $D${libdir}/gtk-2.0/2.10.0/immodules.cache 2>/dev/null &&
- sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
- fi
- if [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
- IMFILES=$(ls $D${libdir}/gtk-3.0/*/immodules/*.so)
- ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-3.0')} \
- $IMFILES > $D${libdir}/gtk-3.0/3.0.0/immodules.cache 2>/dev/null &&
- sed -i -e "s:$D::" $D${libdir}/gtk-3.0/3.0.0/immodules.cache
- fi
-
- [ $? -ne 0 ] && exit 1
- exit 0
-fi
-if [ ! -z `which gtk-query-immodules-2.0` ]; then
- gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
-fi
-if [ ! -z `which gtk-query-immodules-3.0` ]; then
- gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
+ $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
+ mlprefix=${MLPREFIX} \
+ binprefix=${MLPREFIX} \
+ libdir=${libdir} \
+ libexecdir=${libexecdir} \
+ base_libdir=${base_libdir} \
+ bindir=${bindir}
+else
+ if [ ! -z `which gtk-query-immodules-2.0` ]; then
+ gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
+ fi
+ if [ ! -z `which gtk-query-immodules-3.0` ]; then
+ gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
+ fi
fi
}
gtk_immodule_cache_postrm() {
if [ "x$D" != "x" ]; then
- if [ -x $D${bindir}/gtk-query-immodules-2.0 ]; then
- IMFILES=$(ls $D${libdir}/gtk-2.0/*/immodules/*.so)
- ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-2.0')} \
- $IMFILES > $D${libdir}/gtk-2.0/2.10.0/immodules.cache 2>/dev/null &&
- sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
- fi
- if [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
- IMFILES=$(ls $D${libdir}/gtk-3.0/*/immodules/*.so)
- ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-3.0')} \
- $IMFILES > $D${libdir}/gtk-3.0/3.0.0/immodules.cache 2>/dev/null &&
- sed -i -e "s:$D::" $D${libdir}/gtk-3.0/3.0.0/immodules.cache
- fi
-
- [ $? -ne 0 ] && exit 1
- exit 0
-fi
-if [ ! -z `which gtk-query-immodules-2.0` ]; then
- gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
-fi
-if [ ! -z `which gtk-query-immodules-3.0` ]; then
- gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
+ $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
+ mlprefix=${MLPREFIX} \
+ binprefix=${MLPREFIX} \
+ libdir=${libdir} \
+ libexecdir=${libexecdir} \
+ base_libdir=${base_libdir} \
+ bindir=${bindir}
+else
+ if [ ! -z `which gtk-query-immodules-2.0` ]; then
+ gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
+ fi
+ if [ ! -z `which gtk-query-immodules-3.0` ]; then
+ gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
+ fi
fi
}
python populate_packages_append () {
- gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES', True).split()
+ gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split()
for pkg in gtkimmodules_pkgs:
bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('gtk_immodule_cache_postinst', True)
+ postinst += d.getVar('gtk_immodule_cache_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('gtk_immodule_cache_postrm', True)
+ postrm += d.getVar('gtk_immodule_cache_postrm')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
@@ -84,6 +70,6 @@ python __anonymous() {
gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES', False)
if not gtkimmodules_check:
bb_filename = d.getVar('FILE', False)
- raise bb.build.FuncFailed("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
+ bb.fatal("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
}
diff --git a/meta/classes/gummiboot.bbclass b/meta/classes/gummiboot.bbclass
deleted file mode 100644
index e6eba17da1..0000000000
--- a/meta/classes/gummiboot.bbclass
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright (C) 2014 Intel Corporation
-#
-# Released under the MIT license (see COPYING.MIT)
-
-# gummiboot.bbclass - equivalent of grub-efi.bbclass
-# Set EFI_PROVIDER = "gummiboot" to use gummiboot on your live images instead of grub-efi
-# (images built by image-live.bbclass or image-vm.bbclass)
-
-do_bootimg[depends] += "${MLPREFIX}gummiboot:do_deploy"
-do_bootdirectdisk[depends] += "${MLPREFIX}gummiboot:do_deploy"
-
-EFIDIR = "/EFI/BOOT"
-
-GUMMIBOOT_CFG ?= "${S}/loader.conf"
-GUMMIBOOT_ENTRIES ?= ""
-GUMMIBOOT_TIMEOUT ?= "10"
-
-# Need UUID utility code.
-inherit fs-uuid
-
-efi_populate() {
- DEST=$1
-
- EFI_IMAGE="gummibootia32.efi"
- DEST_EFI_IMAGE="bootia32.efi"
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- EFI_IMAGE="gummibootx64.efi"
- DEST_EFI_IMAGE="bootx64.efi"
- fi
-
- install -d ${DEST}${EFIDIR}
- # gummiboot requires these paths for configuration files
- # they are not customizable so no point in new vars
- install -d ${DEST}/loader
- install -d ${DEST}/loader/entries
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_EFI_IMAGE" >${DEST}/startup.nsh
- install -m 0644 ${GUMMIBOOT_CFG} ${DEST}/loader/loader.conf
- for i in ${GUMMIBOOT_ENTRIES}; do
- install -m 0644 ${i} ${DEST}/loader/entries
- done
-}
-
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
- cp $iso_dir/vmlinuz ${EFIIMGDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
-}
-
-python build_efi_cfg() {
- s = d.getVar("S", True)
- labels = d.getVar('LABELS', True)
- if not labels:
- bb.debug(1, "LABELS not defined, nothing to do")
- return
-
- if labels == []:
- bb.debug(1, "No labels, nothing to do")
- return
-
- cfile = d.getVar('GUMMIBOOT_CFG', True)
- try:
- cfgfile = open(cfile, 'w')
- except OSError:
- raise bb.build.FuncFailed('Unable to open %s' % (cfile))
-
- cfgfile.write('# Automatically created by OE\n')
- cfgfile.write('default %s\n' % (labels.split()[0]))
- timeout = d.getVar('GUMMIBOOT_TIMEOUT', True)
- if timeout:
- cfgfile.write('timeout %s\n' % timeout)
- else:
- cfgfile.write('timeout 10\n')
- cfgfile.close()
-
- for label in labels.split():
- localdata = d.createCopy()
-
- overrides = localdata.getVar('OVERRIDES', True)
- if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
-
- entryfile = "%s/%s.conf" % (s, label)
- d.appendVar("GUMMIBOOT_ENTRIES", " " + entryfile)
- try:
- entrycfg = open(entryfile, "w")
- except OSError:
- raise bb.build.FuncFailed('Unable to open %s' % (entryfile))
- localdata.setVar('OVERRIDES', label + ':' + overrides)
- bb.data.update_data(localdata)
-
- entrycfg.write('title %s\n' % label)
- entrycfg.write('linux /vmlinuz\n')
-
- append = localdata.getVar('APPEND', True)
- initrd = localdata.getVar('INITRD', True)
-
- if initrd:
- entrycfg.write('initrd /initrd\n')
- lb = label
- if label == "install":
- lb = "install-efi"
- entrycfg.write('options LABEL=%s ' % lb)
- if append:
- append = replace_rootfs_uuid(d, append)
- entrycfg.write('%s' % append)
- entrycfg.write('\n')
- entrycfg.close()
-}
diff --git a/meta/classes/gzipnative.bbclass b/meta/classes/gzipnative.bbclass
deleted file mode 100644
index 326cbbb6f6..0000000000
--- a/meta/classes/gzipnative.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-EXTRANATIVEPATH += "pigz-native gzip-native"
-DEPENDS += "gzip-native"
-
-# tar may get run by do_unpack or do_populate_lic which could call gzip
-do_unpack[depends] += "gzip-native:do_populate_sysroot"
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
index e1c06c49cb..bc3d6f4cc8 100644
--- a/meta/classes/icecc.bbclass
+++ b/meta/classes/icecc.bbclass
@@ -28,70 +28,106 @@
#Error checking is kept to minimum so double check any parameters you pass to the class
###########################################################################################
-BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC"
+BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL \
+ ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC \
+ ICECC_CARET_WORKAROUND ICECC_CFLAGS ICECC_ENV_VERSION \
+ ICECC_DEBUG ICECC_LOGFILE ICECC_REPEAT_RATE ICECC_PREFERRED_HOST \
+ ICECC_CLANG_REMOTE_CPP ICECC_IGNORE_UNVERIFIED ICECC_TEST_SOCKET \
+ ICECC_ENV_DEBUG ICECC_SYSTEM_PACKAGE_BL ICECC_SYSTEM_CLASS_BL \
+ ICECC_REMOTE_CPP \
+ "
ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
+HOSTTOOLS_NONFATAL += "icecc patchelf"
+
+# This version can be incremented when changes are made to the environment that
+# invalidate the version on the compile nodes. Changing it will cause a new
+# environment to be created.
+#
+# A useful thing to do for testing Icecream changes locally is to add a
+# subversion in local.conf:
+# ICECC_ENV_VERSION_append = "-my-ver-1"
+ICECC_ENV_VERSION = "2"
+
+# Default to disabling the caret workaround, If set to "1" in local.conf, icecc
+# will locally recompile any files that have warnings, which can adversely
+# affect performance.
+#
+# See: https://github.com/icecc/icecream/issues/190
+export ICECC_CARET_WORKAROUND ??= "0"
+
+export ICECC_REMOTE_CPP ??= "0"
+
+ICECC_CFLAGS = ""
+CFLAGS += "${ICECC_CFLAGS}"
+CXXFLAGS += "${ICECC_CFLAGS}"
+
+# Debug flags when generating environments
+ICECC_ENV_DEBUG ??= ""
+
+# "system" recipe blacklist contains a list of packages that can not distribute
+# compile tasks for one reason or the other. When adding new entry, please
+# document why (how it failed) so that we can re-evaluate it later e.g. when
+# there is new version
+#
+# libgcc-initial - fails with CPP sanity check error if host sysroot contains
+# cross gcc built for another target tune/variant
+# pixman - prng_state: TLS reference mismatches non-TLS reference, possibly due to
+# pragma omp threadprivate(prng_state)
+# systemtap - _HelperSDT.c undefs macros and uses the identifiers in macros emitting
+# inline assembly
+# target-sdk-provides-dummy - ${HOST_PREFIX} is empty which triggers the "NULL
+# prefix" error.
+ICECC_SYSTEM_PACKAGE_BL += "\
+ libgcc-initial \
+ pixman \
+ systemtap \
+ target-sdk-provides-dummy \
+ "
+
+# "system" classes that should be blacklisted. When adding new entry, please
+# document why (how it failed) so that we can re-evaluate it later
+#
+# image - Image aren't compiling, but the testing framework for images captures
+# PARALLEL_MAKE as part of the test environment. Many tests won't use
+# icecream, but leaving the high level of parallelism can cause them to
+# consume an unnecessary amount of resources.
+ICECC_SYSTEM_CLASS_BL += "\
+ image \
+ "
+
def icecc_dep_prepend(d):
# INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
- if not d.getVar('INHIBIT_DEFAULT_DEPS', False):
+ if not d.getVar('INHIBIT_DEFAULT_DEPS'):
return "icecc-create-env-native"
return ""
DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
+get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC"
def get_cross_kernel_cc(bb,d):
- kernel_cc = d.getVar('KERNEL_CC', False)
+ if not icecc_is_kernel(bb, d):
+ return None
# evaluate the expression by the shell if necessary
+ kernel_cc = d.getVar('KERNEL_CC')
if '`' in kernel_cc or '$(' in kernel_cc:
- kernel_cc = os.popen("echo %s" % kernel_cc).read()[:-1]
+ import subprocess
+ kernel_cc = subprocess.check_output("echo %s" % kernel_cc, shell=True).decode("utf-8")[:-1]
- kernel_cc = d.expand(kernel_cc)
kernel_cc = kernel_cc.replace('ccache', '').strip()
kernel_cc = kernel_cc.split(' ')[0]
kernel_cc = kernel_cc.strip()
return kernel_cc
def get_icecc(d):
- return d.getVar('ICECC_PATH', False) or bb.utils.which(os.getenv("PATH"), "icecc")
-
-def create_path(compilers, bb, d):
- """
- Create Symlinks for the icecc in the staging directory
- """
- staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice")
- if icecc_is_kernel(bb, d):
- staging += "-kernel"
-
- #check if the icecc path is set by the user
- icecc = get_icecc(d)
-
- # Create the dir if necessary
- try:
- os.stat(staging)
- except:
- try:
- os.makedirs(staging)
- except:
- pass
-
- for compiler in compilers:
- gcc_path = os.path.join(staging, compiler)
- try:
- os.stat(gcc_path)
- except:
- try:
- os.symlink(icecc, gcc_path)
- except:
- pass
-
- return staging
+ return d.getVar('ICECC_PATH') or bb.utils.which(os.getenv("PATH"), "icecc")
def use_icecc(bb,d):
- if d.getVar('ICECC_DISABLED', False) == "1":
+ if d.getVar('ICECC_DISABLED') == "1":
# don't even try it, when explicitly disabled
return "no"
@@ -99,10 +135,24 @@ def use_icecc(bb,d):
if icecc_is_allarch(bb, d):
return "no"
- pn = d.getVar('PN', True)
+ if icecc_is_cross_canadian(bb, d):
+ return "no"
- system_class_blacklist = []
- user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL', False) or "none").split()
+ if d.getVar('INHIBIT_DEFAULT_DEPS', False):
+ # We don't have a compiler, so no icecc
+ return "no"
+
+ pn = d.getVar('PN')
+ bpn = d.getVar('BPN')
+
+ # Blacklist/whitelist checks are made against BPN, because there is a good
+ # chance that if icecc should be skipped for a recipe, it should be skipped
+ # for all the variants of that recipe. PN is still checked in case a user
+ # specified a more specific recipe.
+ check_pn = set([pn, bpn])
+
+ system_class_blacklist = (d.getVar('ICECC_SYSTEM_CLASS_BL') or "").split()
+ user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
package_class_blacklist = system_class_blacklist + user_class_blacklist
for black in package_class_blacklist:
@@ -110,35 +160,27 @@ def use_icecc(bb,d):
bb.debug(1, "%s: class %s found in blacklist, disable icecc" % (pn, black))
return "no"
- # "system" recipe blacklist contains a list of packages that can not distribute compile tasks
- # for one reason or the other
- # this is the old list (which doesn't seem to be valid anymore, because I was able to build
- # all these with icecc enabled)
- # system_package_blacklist = [ "uclibc", "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ]
- # when adding new entry, please document why (how it failed) so that we can re-evaluate it later
- # e.g. when there is new version
- # building libgcc-initial with icecc fails with CPP sanity check error if host sysroot contains cross gcc built for another target tune/variant
- system_package_blacklist = ["libgcc-initial"]
- user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL', False) or "").split()
- user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL', False) or "").split()
+ system_package_blacklist = (d.getVar('ICECC_SYSTEM_PACKAGE_BL') or "").split()
+ user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
+ user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
package_blacklist = system_package_blacklist + user_package_blacklist
- if pn in package_blacklist:
+ if check_pn & set(package_blacklist):
bb.debug(1, "%s: found in blacklist, disable icecc" % pn)
return "no"
- if pn in user_package_whitelist:
+ if check_pn & set(user_package_whitelist):
bb.debug(1, "%s: found in whitelist, enable icecc" % pn)
return "yes"
- if d.getVar('PARALLEL_MAKE', False) == "":
+ if d.getVar('PARALLEL_MAKE') == "":
bb.debug(1, "%s: has empty PARALLEL_MAKE, disable icecc" % pn)
return "no"
return "yes"
def icecc_is_allarch(bb, d):
- return d.getVar("PACKAGE_ARCH", True) == "all" or bb.data.inherits_class('allarch', d)
+ return d.getVar("PACKAGE_ARCH") == "all"
def icecc_is_kernel(bb, d):
return \
@@ -149,16 +191,27 @@ def icecc_is_native(bb, d):
bb.data.inherits_class("cross", d) or \
bb.data.inherits_class("native", d);
+def icecc_is_cross_canadian(bb, d):
+ return bb.data.inherits_class("cross-canadian", d)
+
+def icecc_dir(bb, d):
+ return d.expand('${TMPDIR}/work-shared/ice')
+
# Don't pollute allarch signatures with TARGET_FPU
icecc_version[vardepsexclude] += "TARGET_FPU"
def icecc_version(bb, d):
if use_icecc(bb, d) == "no":
return ""
- parallel = d.getVar('ICECC_PARALLEL_MAKE', False) or ""
- if not d.getVar('PARALLEL_MAKE', False) == "" and parallel:
+ parallel = d.getVar('ICECC_PARALLEL_MAKE') or ""
+ if not d.getVar('PARALLEL_MAKE') == "" and parallel:
d.setVar("PARALLEL_MAKE", parallel)
+ # Disable showing the caret in the GCC compiler output if the workaround is
+ # disabled
+ if d.getVar('ICECC_CARET_WORKAROUND') == '0':
+ d.setVar('ICECC_CFLAGS', '-fno-diagnostics-show-caret')
+
if icecc_is_native(bb, d):
archive_name = "local-host-env"
elif d.expand('${HOST_PREFIX}') == "":
@@ -167,14 +220,18 @@ def icecc_version(bb, d):
prefix = d.expand('${HOST_PREFIX}' )
distro = d.expand('${DISTRO}')
target_sys = d.expand('${TARGET_SYS}')
- float = d.getVar('TARGET_FPU', False) or "hard"
+ float = d.getVar('TARGET_FPU') or "hard"
archive_name = prefix + distro + "-" + target_sys + "-" + float
if icecc_is_kernel(bb, d):
archive_name += "-kernel"
import socket
- ice_dir = d.expand('${STAGING_DIR_NATIVE}${prefix_native}')
- tar_file = os.path.join(ice_dir, 'ice', archive_name + "-@VERSION@-" + socket.gethostname() + '.tar.gz')
+ ice_dir = icecc_dir(bb, d)
+ tar_file = os.path.join(ice_dir, "{archive}-{version}-@VERSION@-{hostname}.tar.gz".format(
+ archive=archive_name,
+ version=d.getVar('ICECC_ENV_VERSION'),
+ hostname=socket.gethostname()
+ ))
return tar_file
@@ -183,46 +240,70 @@ def icecc_path(bb,d):
# don't create unnecessary directories when icecc is disabled
return
+ staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice")
if icecc_is_kernel(bb, d):
- return create_path( [get_cross_kernel_cc(bb,d), ], bb, d)
+ staging += "-kernel"
- else:
- prefix = d.expand('${HOST_PREFIX}')
- return create_path( [prefix+"gcc", prefix+"g++"], bb, d)
+ return staging
def icecc_get_external_tool(bb, d, tool):
external_toolchain_bindir = d.expand('${EXTERNAL_TOOLCHAIN}${bindir_cross}')
target_prefix = d.expand('${TARGET_PREFIX}')
return os.path.join(external_toolchain_bindir, '%s%s' % (target_prefix, tool))
+def icecc_get_tool_link(tool, d):
+ import subprocess
+ try:
+ return subprocess.check_output("readlink -f %s" % tool, shell=True).decode("utf-8")[:-1]
+ except subprocess.CalledProcessError as e:
+ bb.note("icecc: one of the tools probably disappeared during recipe parsing, cmd readlink -f %s returned %d:\n%s" % (tool, e.returncode, e.output.decode("utf-8")))
+ return tool
+
+def icecc_get_path_tool(tool, d):
+ # This is a little ugly, but we want to make sure we add an actual
+ # compiler to the toolchain, not ccache. Some distros (e.g. Fedora)
+ # have ccache enabled by default using symlinks PATH, meaning ccache
+ # would be found first when looking for the compiler.
+ paths = os.getenv("PATH").split(':')
+ while True:
+ p, hist = bb.utils.which(':'.join(paths), tool, history=True)
+ if not p or os.path.basename(icecc_get_tool_link(p, d)) != 'ccache':
+ return p
+ paths = paths[len(hist):]
+
+ return ""
+
# Don't pollute native signatures with target TUNE_PKGARCH through STAGING_BINDIR_TOOLCHAIN
icecc_get_tool[vardepsexclude] += "STAGING_BINDIR_TOOLCHAIN"
def icecc_get_tool(bb, d, tool):
if icecc_is_native(bb, d):
- return bb.utils.which(os.getenv("PATH"), tool)
+ return icecc_get_path_tool(tool, d)
elif icecc_is_kernel(bb, d):
- return bb.utils.which(os.getenv("PATH"), get_cross_kernel_cc(bb, d))
+ return icecc_get_path_tool(get_cross_kernel_cc(bb, d), d)
else:
ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}')
target_sys = d.expand('${TARGET_SYS}')
- tool_bin = os.path.join(ice_dir, "%s-%s" % (target_sys, tool))
- if os.path.isfile(tool_bin):
- return tool_bin
- else:
- external_tool_bin = icecc_get_external_tool(bb, d, tool)
- if os.path.isfile(external_tool_bin):
- return external_tool_bin
- else:
- return ""
+ for p in ice_dir.split(':'):
+ tool_bin = os.path.join(p, "%s-%s" % (target_sys, tool))
+ if os.path.isfile(tool_bin):
+ return tool_bin
+ external_tool_bin = icecc_get_external_tool(bb, d, tool)
+ if os.path.isfile(external_tool_bin):
+ return external_tool_bin
+ return ""
def icecc_get_and_check_tool(bb, d, tool):
# Check that g++ or gcc is not a symbolic link to icecc binary in
# PATH or icecc-create-env script will silently create an invalid
# compiler environment package.
t = icecc_get_tool(bb, d, tool)
- if t and os.popen("readlink -f %s" % t).read()[:-1] == get_icecc(d):
- bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d)))
- return ""
+ if t:
+ link_path = icecc_get_tool_link(t, d)
+ if link_path == get_icecc(d):
+ bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, link_path))
+ return ""
+ else:
+ return t
else:
return t
@@ -245,6 +326,7 @@ def set_icecc_env():
# dummy python version of set_icecc_env
return
+set_icecc_env[vardepsexclude] += "KERNEL_CC"
set_icecc_env() {
if [ "${@use_icecc(bb, d)}" = "no" ]
then
@@ -264,6 +346,16 @@ set_icecc_env() {
return
fi
+ ICECC_BIN="${@get_icecc(d)}"
+ if [ -z "${ICECC_BIN}" ]; then
+ bbwarn "Cannot use icecc: icecc binary not found"
+ return
+ fi
+ if [ -z "$(which patchelf patchelf-uninative)" ]; then
+ bbwarn "Cannot use icecc: patchelf not found"
+ return
+ fi
+
ICECC_CC="${@icecc_get_and_check_tool(bb, d, "gcc")}"
ICECC_CXX="${@icecc_get_and_check_tool(bb, d, "g++")}"
# cannot use icecc_get_and_check_tool here because it assumes as without target_sys prefix
@@ -282,6 +374,26 @@ set_icecc_env() {
return
fi
+ # Create symlinks to icecc and wrapper-scripts in the recipe-sysroot directory
+ mkdir -p $ICE_PATH/symlinks
+ if [ -n "${KERNEL_CC}" ]; then
+ compilers="${@get_cross_kernel_cc(bb,d)}"
+ else
+ compilers="${HOST_PREFIX}gcc ${HOST_PREFIX}g++"
+ fi
+ for compiler in $compilers; do
+ ln -sf $ICECC_BIN $ICE_PATH/symlinks/$compiler
+ rm -f $ICE_PATH/$compiler
+ cat <<-__EOF__ > $ICE_PATH/$compiler
+ #!/bin/sh -e
+ export ICECC_VERSION=$ICECC_VERSION
+ export ICECC_CC=$ICECC_CC
+ export ICECC_CXX=$ICECC_CXX
+ $ICE_PATH/symlinks/$compiler "\$@"
+ __EOF__
+ chmod 775 $ICE_PATH/$compiler
+ done
+
ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
# for target recipes should return something like:
# /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
@@ -298,10 +410,10 @@ set_icecc_env() {
# the ICECC_VERSION generation step must be locked by a mutex
# in order to prevent race conditions
if flock -n "${ICECC_VERSION}.lock" \
- ${ICECC_ENV_EXEC} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
+ ${ICECC_ENV_EXEC} ${ICECC_ENV_DEBUG} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
then
touch "${ICECC_VERSION}.done"
- elif [ ! wait_for_file "${ICECC_VERSION}.done" 30 ]
+ elif ! wait_for_file "${ICECC_VERSION}.done" 30
then
# locking failed so wait for ${ICECC_VERSION}.done to appear
bbwarn "Timeout waiting for ${ICECC_VERSION}.done"
@@ -309,11 +421,15 @@ set_icecc_env() {
fi
fi
- export ICECC_VERSION ICECC_CC ICECC_CXX
- export PATH="$ICE_PATH:$PATH"
+ # Don't let ccache find the icecream compiler links that have been created, otherwise
+ # it can end up invoking icecream recursively.
export CCACHE_PATH="$PATH"
+ export CCACHE_DISABLE="1"
+
+ export PATH="$ICE_PATH:$PATH"
- bbnote "Using icecc"
+ bbnote "Using icecc path: $ICE_PATH"
+ bbnote "Using icecc tarball: $ICECC_VERSION"
}
do_configure_prepend() {
@@ -331,3 +447,13 @@ do_compile_kernelmodules_prepend() {
do_install_prepend() {
set_icecc_env
}
+
+# IceCream is not (currently) supported in the extensible SDK
+ICECC_SDK_HOST_TASK = "nativesdk-icecc-toolchain"
+ICECC_SDK_HOST_TASK_task-populate-sdk-ext = ""
+
+# Don't include IceCream in uninative tarball
+ICECC_SDK_HOST_TASK_pn-uninative-tarball = ""
+
+# Add the toolchain scripts to the SDK
+TOOLCHAIN_HOST_TASK_append = " ${ICECC_SDK_HOST_TASK}"
diff --git a/meta/classes/image-buildinfo.bbclass b/meta/classes/image-buildinfo.bbclass
index 83d0db37d8..94c585d4cd 100644
--- a/meta/classes/image-buildinfo.bbclass
+++ b/meta/classes/image-buildinfo.bbclass
@@ -12,14 +12,16 @@
# Desired variables to display
IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION"
+# Desired location of the output file in the image.
+IMAGE_BUILDINFO_FILE ??= "${sysconfdir}/build"
+
# From buildhistory.bbclass
-def image_buildinfo_outputvars(vars, listvars, d):
+def image_buildinfo_outputvars(vars, d):
vars = vars.split()
- listvars = listvars.split()
ret = ""
for var in vars:
- value = d.getVar(var, True) or ""
- if (d.getVarFlag(var, 'type', True) == "list"):
+ value = d.getVar(var) or ""
+ if (d.getVarFlag(var, 'type') == "list"):
value = oe.utils.squashspaces(value)
ret += "%s = %s\n" % (var, value)
return ret.rstrip('\n')
@@ -28,7 +30,9 @@ def image_buildinfo_outputvars(vars, listvars, d):
def get_layer_git_status(path):
import subprocess
try:
- subprocess.check_output("cd %s; PSEUDO_UNLOAD=1 git diff --quiet --no-ext-diff" % path,
+ subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
+ git diff --quiet --no-ext-diff
+ git diff --quiet --no-ext-diff --cached""" % path,
shell=True,
stderr=subprocess.STDOUT)
return ""
@@ -40,7 +44,7 @@ def get_layer_git_status(path):
# Returns layer revisions along with their respective status
def get_layer_revs(d):
- layers = (d.getVar("BBLAYERS", True) or "").split()
+ layers = (d.getVar("BBLAYERS") or "").split()
medadata_revs = ["%-17s = %s:%s %s" % (os.path.basename(i), \
base_get_metadata_git_branch(i, None).strip(), \
base_get_metadata_git_revision(i, None), \
@@ -50,16 +54,17 @@ def get_layer_revs(d):
def buildinfo_target(d):
# Get context
- if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
# Single and list variables to be read
- vars = (d.getVar("IMAGE_BUILDINFO_VARS", True) or "")
- listvars = (d.getVar("IMAGE_BUILDINFO_LVARS", True) or "")
- return image_buildinfo_outputvars(vars, listvars, d)
+ vars = (d.getVar("IMAGE_BUILDINFO_VARS") or "")
+ return image_buildinfo_outputvars(vars, d)
# Write build information to target filesystem
python buildinfo () {
- with open(d.expand('${IMAGE_ROOTFS}${sysconfdir}/build'), 'w') as build:
+ if not d.getVar('IMAGE_BUILDINFO_FILE'):
+ return
+ with open(d.expand('${IMAGE_ROOTFS}${IMAGE_BUILDINFO_FILE}'), 'w') as build:
build.writelines((
'''-----------------------
Build Configuration: |
@@ -71,7 +76,9 @@ Build Configuration: |
Layer Revisions: |
-----------------------
''',
- get_layer_revs(d)
+ get_layer_revs(d),
+ '''
+'''
))
}
diff --git a/meta/classes/image-combined-dbg.bbclass b/meta/classes/image-combined-dbg.bbclass
new file mode 100644
index 0000000000..f4772f7ea1
--- /dev/null
+++ b/meta/classes/image-combined-dbg.bbclass
@@ -0,0 +1,9 @@
+IMAGE_PREPROCESS_COMMAND_append = " combine_dbg_image; "
+
+combine_dbg_image () {
+ if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
+ # copy target files into -dbg rootfs, so it can be used for
+ # debug purposes directly
+ tar -C ${IMAGE_ROOTFS} -cf - . | tar -C ${IMAGE_ROOTFS}-dbg -xf -
+ fi
+}
diff --git a/meta/classes/image-container.bbclass b/meta/classes/image-container.bbclass
new file mode 100644
index 0000000000..f002858bd2
--- /dev/null
+++ b/meta/classes/image-container.bbclass
@@ -0,0 +1,21 @@
+ROOTFS_BOOTSTRAP_INSTALL = ""
+IMAGE_TYPES_MASKED += "container"
+IMAGE_TYPEDEP_container = "tar.bz2"
+
+python __anonymous() {
+ if "container" in d.getVar("IMAGE_FSTYPES") and \
+ d.getVar("IMAGE_CONTAINER_NO_DUMMY") != "1" and \
+ "linux-dummy" not in d.getVar("PREFERRED_PROVIDER_virtual/kernel"):
+ msg = '"container" is in IMAGE_FSTYPES, but ' \
+ 'PREFERRED_PROVIDER_virtual/kernel is not "linux-dummy". ' \
+ 'Unless a particular kernel is needed, using linux-dummy will ' \
+ 'prevent a kernel from being built, which can reduce ' \
+ 'build times. If you don\'t want to use "linux-dummy", set ' \
+ '"IMAGE_CONTAINER_NO_DUMMY" to "1".'
+
+ # Raising skip recipe was Paul's clever idea. It causes the error to
+ # only be shown for the recipes actually requested to build, rather
+ # than bb.fatal which would appear for all recipes inheriting the
+ # class.
+ raise bb.parse.SkipRecipe(msg)
+}
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
index 4a634dca96..54058b350d 100644
--- a/meta/classes/image-live.bbclass
+++ b/meta/classes/image-live.bbclass
@@ -19,9 +19,6 @@
# External variables (also used by syslinux.bbclass)
# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
-# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1
-# ${NOISO} - skip building the ISO image if set to 1
-# ${NOHDD} - skip building the HDD image if set to 1
# ${HDDIMG_ID} - FAT image volume-id
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
@@ -33,26 +30,26 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
virtual/kernel:do_deploy \
${MLPREFIX}syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
- ${@oe.utils.ifelse(d.getVar('COMPRESSISO', False),'zisofs-tools-native:do_populate_sysroot','')} \
- ${PN}:do_image_ext4 \
+ ${PN}:do_image_${@d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')} \
"
LABELS_LIVE ?= "boot install"
ROOT_LIVE ?= "root=/dev/ram0"
-INITRD_IMAGE_LIVE ?= "core-image-minimal-initramfs"
-INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.cpio.gz"
+INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
+INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_FSTYPES}"
-ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.ext4"
+LIVE_ROOTFS_TYPE ?= "ext4"
+ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_live = "ext4"
-IMAGE_TYPEDEP_iso = "ext4"
-IMAGE_TYPEDEP_hddimg = "ext4"
+IMAGE_TYPEDEP_live = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP_iso = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP_hddimg = "${LIVE_ROOTFS_TYPE}"
IMAGE_TYPES_MASKED += "live hddimg iso"
python() {
- image_b = d.getVar('IMAGE_BASENAME', True)
- initrd_i = d.getVar('INITRD_IMAGE_LIVE', True)
+ image_b = d.getVar('IMAGE_BASENAME')
+ initrd_i = d.getVar('INITRD_IMAGE_LIVE')
if image_b == initrd_i:
bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i)
bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.')
@@ -64,7 +61,6 @@ HDDDIR = "${S}/hddimg"
ISODIR = "${S}/iso"
EFIIMGDIR = "${S}/efi_img"
COMPACT_ISODIR = "${S}/iso.z"
-COMPRESSISO ?= "0"
ISOLINUXDIR ?= "/isolinux"
ISO_BOOTIMG = "isolinux/isolinux.bin"
@@ -82,8 +78,8 @@ populate_live() {
}
build_iso() {
- # Only create an ISO if we have an INITRD and NOISO was not set
- if [ -z "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
+ # Only create an ISO if we have an INITRD and the live or iso image type was selected
+ if [ -z "${INITRD}" ] || [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso', '1', '0', d)}" != "1" ]; then
bbnote "ISO image will not be created."
return
fi
@@ -91,7 +87,7 @@ build_iso() {
for fs in ${INITRD}
do
if [ ! -s "$fs" ]; then
- bbnote "ISO image will not be created. $fs is invalid."
+ bbwarn "ISO image will not be created. $fs is invalid."
return
fi
done
@@ -114,18 +110,8 @@ build_iso() {
install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
fi
- if [ "${COMPRESSISO}" = "1" ] ; then
- # create compact directory, compress iso
- mkdir -p ${COMPACT_ISODIR}
- mkzftree -z 9 -p 4 -F ${ISODIR}/rootfs.img ${COMPACT_ISODIR}/rootfs.img
-
- # move compact iso to iso, then remove compact directory
- mv ${COMPACT_ISODIR}/rootfs.img ${ISODIR}/rootfs.img
- rm -Rf ${COMPACT_ISODIR}
- mkisofs_compress_opts="-R -z -D -l"
- else
- mkisofs_compress_opts="-r"
- fi
+ # We used to have support for zisofs; this is a relic of that
+ mkisofs_compress_opts="-r"
# Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3
# when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need
@@ -216,10 +202,10 @@ build_fat_img() {
fi
if [ -z "${HDDIMG_ID}" ]; then
- mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
+ mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
${BLOCKS}
else
- mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
+ mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
${BLOCKS} -i ${HDDIMG_ID}
fi
@@ -229,7 +215,7 @@ build_fat_img() {
build_hddimg() {
# Create an HDD image
- if [ "${NOHDD}" != "1" ] ; then
+ if [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live hddimg', '1', '0', d)}" = "1" ] ; then
populate_live ${HDDDIR}
if [ "${PCBIOS}" = "1" ]; then
@@ -244,11 +230,11 @@ build_hddimg() {
if [ -f ${HDDDIR}/rootfs.img ]; then
rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img`
max_size=`expr 4 \* 1024 \* 1024 \* 1024`
- if [ $rootfs_img_size -gt $max_size ]; then
- bberror "${HDDDIR}/rootfs.img execeeds 4GB,"
- bberror "this doesn't work on FAT filesystem, you can try either of:"
- bberror "1) Reduce the size of rootfs.img"
- bbfatal "2) Use iso, vmdk or vdi to instead of hddimg\n"
+ if [ $rootfs_img_size -ge $max_size ]; then
+ bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB,"
+ bberror "and this doesn't work on a FAT filesystem. You can either:"
+ bberror "1) Reduce the size of rootfs.img, or,"
+ bbfatal "2) Use wic, vmdk or vdi instead of hddimg\n"
fi
fi
@@ -264,9 +250,9 @@ build_hddimg() {
python do_bootimg() {
set_live_vm_vars(d, 'LIVE')
- if d.getVar("PCBIOS", True) == "1":
+ if d.getVar("PCBIOS") == "1":
bb.build.exec_func('build_syslinux_cfg', d)
- if d.getVar("EFI", True) == "1":
+ if d.getVar("EFI") == "1":
bb.build.exec_func('build_efi_cfg', d)
bb.build.exec_func('build_hddimg', d)
bb.build.exec_func('build_iso', d)
diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass
index 5f6df1b17f..68e11d4365 100644
--- a/meta/classes/image-mklibs.bbclass
+++ b/meta/classes/image-mklibs.bbclass
@@ -19,7 +19,7 @@ mklibs_optimize_image_doit() {
echo $i
done > ${WORKDIR}/mklibs/executables.list
- dynamic_loader=$(linuxloader)
+ dynamic_loader=${@get_linuxloader(d)}
mklibs -v \
--ldlib ${dynamic_loader} \
diff --git a/meta/classes/image-postinst-intercepts.bbclass b/meta/classes/image-postinst-intercepts.bbclass
new file mode 100644
index 0000000000..ed30bbd98d
--- /dev/null
+++ b/meta/classes/image-postinst-intercepts.bbclass
@@ -0,0 +1,23 @@
+# Gather existing and candidate postinst intercepts from BBPATH
+POSTINST_INTERCEPTS_DIR ?= "${COREBASE}/scripts/postinst-intercepts"
+POSTINST_INTERCEPTS_PATHS ?= "${@':'.join('%s/postinst-intercepts' % p for p in '${BBPATH}'.split(':'))}:${POSTINST_INTERCEPTS_DIR}"
+
+python find_intercepts() {
+ intercepts = {}
+ search_paths = []
+ paths = d.getVar('POSTINST_INTERCEPTS_PATHS').split(':')
+ overrides = (':' + d.getVar('FILESOVERRIDES')).split(':') + ['']
+ search_paths = [os.path.join(p, op) for p in paths for op in overrides]
+ searched = oe.path.which_wild('*', ':'.join(search_paths), candidates=True)
+ files, chksums = [], []
+ for pathname, candidates in searched:
+ if os.path.isfile(pathname):
+ files.append(pathname)
+ chksums.append('%s:True' % pathname)
+ chksums.extend('%s:False' % c for c in candidates[:-1])
+
+ d.setVar('POSTINST_INTERCEPT_CHECKSUMS', ' '.join(chksums))
+ d.setVar('POSTINST_INTERCEPTS', ' '.join(files))
+}
+find_intercepts[eventmask] += "bb.event.RecipePreFinalise"
+addhandler find_intercepts
diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass
index 4157df021a..04dd57c940 100644
--- a/meta/classes/image-prelink.bbclass
+++ b/meta/classes/image-prelink.bbclass
@@ -1,6 +1,6 @@
do_rootfs[depends] += "prelink-native:do_populate_sysroot"
-IMAGE_PREPROCESS_COMMAND += "prelink_setup; prelink_image; "
+IMAGE_PREPROCESS_COMMAND_append_libc-glibc = " prelink_setup; prelink_image; "
python prelink_setup () {
oe.utils.write_ld_so_conf(d)
@@ -33,10 +33,20 @@ prelink_image () {
fi
cat ${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf >> $ldsoconf
- dynamic_loader=$(linuxloader)
+ dynamic_loader=${@get_linuxloader(d)}
# prelink!
- ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
+ if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+ bbnote " prelink: BUILD_REPRODUCIBLE_BINARIES..."
+ if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
+ export PRELINK_TIMESTAMP=`git log -1 --pretty=%ct `
+ else
+ export PRELINK_TIMESTAMP=$REPRODUCIBLE_TIMESTAMP_ROOTFS
+ fi
+ ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -am -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
+ else
+ ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
+ fi
# Remove the prelink.conf if we had to add it.
if [ "$dummy_prelink_conf" = "true" ]; then
diff --git a/meta/classes/image-vm.bbclass b/meta/classes/image-vm.bbclass
deleted file mode 100644
index 32c6550037..0000000000
--- a/meta/classes/image-vm.bbclass
+++ /dev/null
@@ -1,179 +0,0 @@
-# image-vm.bbclass
-# (loosly based off image-live.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
-#
-# Create an image which can be placed directly onto a harddisk using dd and then
-# booted.
-#
-# This uses syslinux. extlinux would have been nice but required the ext2/3
-# partition to be mounted. grub requires to run itself as part of the install
-# process.
-#
-# The end result is a 512 boot sector populated with an MBR and partition table
-# followed by an msdos fat16 partition containing syslinux and a linux kernel
-# completed by the ext2/3 rootfs.
-#
-# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
-# won't touch fat12 partitions.
-
-inherit live-vm-common
-
-do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
- virtual/kernel:do_deploy \
- syslinux:do_populate_sysroot \
- syslinux-native:do_populate_sysroot \
- parted-native:do_populate_sysroot \
- mtools-native:do_populate_sysroot \
- ${PN}:do_image_${VM_ROOTFS_TYPE} \
- "
-
-IMAGE_TYPEDEP_vmdk = "${VM_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_vdi = "${VM_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_qcow2 = "${VM_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_hdddirect = "${VM_ROOTFS_TYPE}"
-IMAGE_TYPES_MASKED += "vmdk vdi qcow2 hdddirect"
-
-VM_ROOTFS_TYPE ?= "ext4"
-ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${VM_ROOTFS_TYPE}"
-
-# Used by bootloader
-LABELS_VM ?= "boot"
-ROOT_VM ?= "root=/dev/sda2"
-# Using an initramfs is optional. Enable it by setting INITRD_IMAGE_VM.
-INITRD_IMAGE_VM ?= ""
-INITRD_VM ?= "${@'${IMGDEPLOYDIR}/${INITRD_IMAGE_VM}-${MACHINE}.cpio.gz' if '${INITRD_IMAGE_VM}' else ''}"
-do_bootdirectdisk[depends] += "${@'${INITRD_IMAGE_VM}:do_image_complete' if '${INITRD_IMAGE_VM}' else ''}"
-
-BOOTDD_VOLUME_ID ?= "boot"
-BOOTDD_EXTRA_SPACE ?= "16384"
-
-DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}"
-DISK_SIGNATURE[vardepsexclude] = "DISK_SIGNATURE_GENERATED"
-
-build_boot_dd() {
- HDDDIR="${S}/hdd/boot"
- HDDIMG="${S}/hdd.image"
- IMAGE=${IMGDEPLOYDIR}/${IMAGE_NAME}.hdddirect
-
- populate_kernel $HDDDIR
-
- if [ "${PCBIOS}" = "1" ]; then
- syslinux_hddimg_populate $HDDDIR
- fi
- if [ "${EFI}" = "1" ]; then
- efi_hddimg_populate $HDDDIR
- fi
-
- BLOCKS=`du -bks $HDDDIR | cut -f 1`
- BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
-
- # Remove it since mkdosfs would fail when it exists
- rm -f $HDDIMG
- mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS
- mcopy -i $HDDIMG -s $HDDDIR/* ::/
-
- if [ "${PCBIOS}" = "1" ]; then
- syslinux_hdddirect_install $HDDIMG
- fi
- chmod 644 $HDDIMG
-
- ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
- TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS`
- END1=`expr $BLOCKS \* 1024`
- END2=`expr $END1 + 512`
- END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
-
- echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
- rm -rf $IMAGE
- dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
-
- parted $IMAGE mklabel msdos
- parted $IMAGE mkpart primary fat16 0 ${END1}B
- parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
- parted $IMAGE set 1 boot on
-
- parted $IMAGE print
-
- awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | fold -w 2 | tac | paste -sd '' | sed 's/\(..\)/\\x&/g')\" }" | \
- dd of=$IMAGE bs=1 seek=440 conv=notrunc
-
- OFFSET=`expr $END2 / 512`
- if [ "${PCBIOS}" = "1" ]; then
- dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
- fi
-
- dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
- dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
-
- cd ${IMGDEPLOYDIR}
-
- if [ "${RM_OLD_IMAGE}" = "1" ] && [ -L ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.hdddirect ]; then
- rm -f $(readlink -f ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.hdddirect)
- fi
-
- ln -sf ${IMAGE_NAME}.hdddirect ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.hdddirect
-}
-
-python do_bootdirectdisk() {
- validate_disk_signature(d)
- set_live_vm_vars(d, 'VM')
- if d.getVar("PCBIOS", True) == "1":
- bb.build.exec_func('build_syslinux_cfg', d)
- if d.getVar("EFI", True) == "1":
- bb.build.exec_func('build_efi_cfg', d)
- bb.build.exec_func('build_boot_dd', d)
-}
-
-def generate_disk_signature():
- import uuid
-
- signature = str(uuid.uuid4())[:8]
-
- if signature != '00000000':
- return signature
- else:
- return 'ffffffff'
-
-def validate_disk_signature(d):
- import re
-
- disk_signature = d.getVar("DISK_SIGNATURE", True)
-
- if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature):
- bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature)
-
-DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}"
-
-run_qemu_img (){
- type="$1"
- qemu-img convert -O $type ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.hdddirect ${IMGDEPLOYDIR}/${IMAGE_NAME}.$type
-
- if [ "${RM_OLD_IMAGE}" = "1" ] && [ -L ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.$type ]; then
- rm -f $(readlink -f ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.$type)
- fi
-
- ln -sf ${IMAGE_NAME}.$type ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.$type
-}
-create_vmdk_image () {
- run_qemu_img vmdk
-}
-
-create_vdi_image () {
- run_qemu_img vdi
-}
-
-create_qcow2_image () {
- run_qemu_img qcow2
-}
-
-python do_vmimg() {
- if 'vmdk' in d.getVar('IMAGE_FSTYPES', True):
- bb.build.exec_func('create_vmdk_image', d)
- if 'vdi' in d.getVar('IMAGE_FSTYPES', True):
- bb.build.exec_func('create_vdi_image', d)
- if 'qcow2' in d.getVar('IMAGE_FSTYPES', True):
- bb.build.exec_func('create_qcow2_image', d)
-}
-
-addtask bootdirectdisk before do_vmimg
-addtask vmimg after do_bootdirectdisk before do_image_complete
-do_vmimg[depends] += "qemu-native:do_populate_sysroot"
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 4d5a401171..c2824395c9 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -1,41 +1,49 @@
-inherit rootfs_${IMAGE_PKGTYPE}
-# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk
+IMAGE_CLASSES ??= ""
+
+# rootfs bootstrap install
+# warning - image-container resets this
+ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
+
+# Handle inherits of any of the image classes we need
+IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}"
+# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base
# in the non-Linux SDK_OS case, such as mingw32
-SDKEXTCLASS ?= "${@['populate_sdk', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS", True)]}"
-inherit ${SDKEXTCLASS}
+IMGCLASSES += "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
+IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}"
+IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}"
+IMGCLASSES += "image_types_wic"
+IMGCLASSES += "rootfs-postcommands"
+IMGCLASSES += "image-postinst-intercepts"
+inherit ${IMGCLASSES}
TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
-inherit gzipnative
-
-LICENSE = "MIT"
+LICENSE ?= "MIT"
PACKAGES = ""
-DEPENDS += "${MLPREFIX}qemuwrapper-cross ${MLPREFIX}depmodwrapper-cross"
-RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL}"
+DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
+RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
+PATH_prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
INHIBIT_DEFAULT_DEPS = "1"
-TESTIMAGECLASS = "${@base_conditional('TEST_IMAGE', '1', 'testimage-auto', '', d)}"
-inherit ${TESTIMAGECLASS}
-
# IMAGE_FEATURES may contain any available package group
IMAGE_FEATURES ?= ""
IMAGE_FEATURES[type] = "list"
-IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs empty-root-password allow-empty-password post-install-logging"
+IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
# Generate companion debugfs?
IMAGE_GEN_DEBUGFS ?= "0"
-# rootfs bootstrap install
-ROOTFS_BOOTSTRAP_INSTALL = "${@bb.utils.contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}"
+# These pacackages will be installed as additional into debug rootfs
+IMAGE_INSTALL_DEBUGFS ?= ""
# These packages will be removed from a read-only rootfs after all other
# packages have been installed
-ROOTFS_RO_UNNEEDED = "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
+ROOTFS_RO_UNNEEDED ??= "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
# packages to install from features
FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
@@ -51,7 +59,7 @@ FEATURE_PACKAGES_splash = "${SPLASH}"
IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
def check_image_features(d):
- valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems', True) or "").split()
+ valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems') or "").split()
valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
for var in d:
if var.startswith("PACKAGE_GROUP_"):
@@ -87,7 +95,6 @@ PID = "${@os.getpid()}"
PACKAGE_ARCH = "${MACHINE_ARCH}"
LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
-LDCONFIGDEPEND_libc-uclibc = ""
LDCONFIGDEPEND_libc-musl = ""
# This is needed to have depmod data in PKGDATA_DIR,
@@ -117,10 +124,10 @@ python () {
def rootfs_variables(d):
from oe.rootfs import variable_depends
variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
- 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','RM_OLD_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY',
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
- 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR']
+ 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
variables.extend(rootfs_command_variables(d))
variables.extend(variable_depends(d))
return " ".join(variables)
@@ -129,76 +136,64 @@ do_rootfs[vardeps] += "${@rootfs_variables(d)}"
do_build[depends] += "virtual/kernel:do_deploy"
-def build_live(d):
- if bb.utils.contains("IMAGE_FSTYPES", "live", "live", "0", d) == "0": # live is not set but hob might set iso or hddimg
- d.setVar('NOISO', bb.utils.contains('IMAGE_FSTYPES', "iso", "0", "1", d))
- d.setVar('NOHDD', bb.utils.contains('IMAGE_FSTYPES', "hddimg", "0", "1", d))
- if d.getVar('NOISO', True) == "0" or d.getVar('NOHDD', True) == "0":
- return "image-live"
- return ""
- return "image-live"
-IMAGE_TYPE_live = "${@build_live(d)}"
-inherit ${IMAGE_TYPE_live}
+python () {
+ def extraimage_getdepends(task):
+ deps = ""
+ for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
+ deps += " %s:%s" % (dep, task)
+ return deps
-IMAGE_TYPE_vm = '${@bb.utils.contains_any("IMAGE_FSTYPES", ["vmdk", "vdi", "qcow2", "hdddirect"], "image-vm", "", d)}'
-inherit ${IMAGE_TYPE_vm}
+ d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
-python () {
deps = " " + imagetypes_getdepends(d)
d.appendVarFlag('do_rootfs', 'depends', deps)
- deps = ""
- for dep in (d.getVar('EXTRA_IMAGEDEPENDS', True) or "").split():
- deps += " %s:do_populate_sysroot" % dep
- d.appendVarFlag('do_build', 'depends', deps)
-
#process IMAGE_FEATURES, we must do this before runtime_mapping_rename
#Check for replaces image features
features = set(oe.data.typed_value('IMAGE_FEATURES', d))
remain_features = features.copy()
for feature in features:
- replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature, True) or "").split())
+ replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature) or "").split())
remain_features -= replaces
#Check for conflict image features
for feature in remain_features:
- conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature, True) or "").split())
+ conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature) or "").split())
temp = conflicts & remain_features
if temp:
- bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN', True), feature, ' '.join(list(temp))))
+ bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN'), feature, ' '.join(list(temp))))
d.setVar('IMAGE_FEATURES', ' '.join(sorted(list(remain_features))))
check_image_features(d)
- initramfs_image = d.getVar('INITRAMFS_IMAGE', True) or ""
- if initramfs_image != "":
- d.appendVarFlag('do_build', 'depends', " %s:do_bundle_initramfs" % d.getVar('PN', True))
- d.appendVarFlag('do_bundle_initramfs', 'depends', " %s:do_image_complete" % initramfs_image)
}
-IMAGE_CLASSES += "image_types"
-inherit ${IMAGE_CLASSES}
-
IMAGE_POSTPROCESS_COMMAND ?= ""
# some default locales
IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
-LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS', True).split()))}"
+LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
# Prefer image, but use the fallback files for lookups if the image ones
# aren't yet available.
PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
-inherit rootfs-postcommands
-
PACKAGE_EXCLUDE ??= ""
PACKAGE_EXCLUDE[type] = "list"
fakeroot python do_rootfs () {
from oe.rootfs import create_rootfs
from oe.manifest import create_manifest
+ import logging
+
+ logger = d.getVar('BB_TASK_LOGGER', False)
+ if logger:
+ logcatcher = bb.utils.LogCatcher()
+ logger.addHandler(logcatcher)
+ else:
+ logcatcher = None
# NOTE: if you add, remove or significantly refactor the stages of this
# process then you should recalculate the weightings here. This is quite
@@ -212,20 +207,20 @@ fakeroot python do_rootfs () {
progress_reporter.next_stage()
# Handle package exclusions
- excl_pkgs = d.getVar("PACKAGE_EXCLUDE", True).split()
- inst_pkgs = d.getVar("PACKAGE_INSTALL", True).split()
- inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY", True).split()
+ excl_pkgs = d.getVar("PACKAGE_EXCLUDE").split()
+ inst_pkgs = d.getVar("PACKAGE_INSTALL").split()
+ inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY").split()
d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
for pkg in excl_pkgs:
if pkg in inst_pkgs:
- bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
+ bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
inst_pkgs.remove(pkg)
if pkg in inst_attempt_pkgs:
- bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
+ bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
inst_attempt_pkgs.remove(pkg)
d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
@@ -235,7 +230,7 @@ fakeroot python do_rootfs () {
# We have to delay the runtime_mapping_rename until just before rootfs runs
# otherwise, the multilib renaming could step in and squash any fixups that
# may have occurred.
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
@@ -246,30 +241,33 @@ fakeroot python do_rootfs () {
progress_reporter.next_stage()
# generate rootfs
- create_rootfs(d, progress_reporter=progress_reporter)
+ d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
+ create_rootfs(d, progress_reporter=progress_reporter, logcatcher=logcatcher)
progress_reporter.finish()
}
do_rootfs[dirs] = "${TOPDIR}"
do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
do_rootfs[umask] = "022"
-addtask rootfs before do_build
+do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
+addtask rootfs after do_prepare_recipe_sysroot
fakeroot python do_image () {
from oe.utils import execute_pre_post_process
- pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND", True)
+ d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
+ pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND")
execute_pre_post_process(d, pre_process_cmds)
}
do_image[dirs] = "${TOPDIR}"
do_image[umask] = "022"
-addtask do_image after do_rootfs before do_build
+addtask do_image after do_rootfs
fakeroot python do_image_complete () {
from oe.utils import execute_pre_post_process
- post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND", True)
+ post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND")
execute_pre_post_process(d, post_process_cmds)
}
@@ -279,20 +277,27 @@ SSTATETASKS += "do_image_complete"
SSTATE_SKIP_CREATION_task-image-complete = '1'
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
-do_image_complete[stamp-extra-info] = "${MACHINE}"
+do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
addtask do_image_complete after do_image before do_build
+python do_image_complete_setscene () {
+ sstate_setscene(d)
+}
+addtask do_image_complete_setscene
# Add image-level QA/sanity checks to IMAGE_QA_COMMANDS
#
# IMAGE_QA_COMMANDS += " \
# image_check_everything_ok \
# "
-# This task runs all functions in IMAGE_QA_COMMANDS after the image
+# This task runs all functions in IMAGE_QA_COMMANDS after the rootfs
# construction has completed in order to validate the resulting image.
+#
+# The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs
+# directory, which if QA passes will be the basis for the images.
fakeroot python do_image_qa () {
from oe.utils import ImageQAFailed
- qa_cmds = (d.getVar('IMAGE_QA_COMMANDS', True) or '').split()
+ qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split()
qamsg = ""
for cmd in qa_cmds:
@@ -300,47 +305,31 @@ fakeroot python do_image_qa () {
bb.build.exec_func(cmd, d)
except oe.utils.ImageQAFailed as e:
qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
- except bb.build.FuncFailed as e:
- qamsg = qamsg + '\tImage QA function %s failed' % e.name
- if e.logfile:
- qamsg = qamsg + ' (log file is located at %s)' % e.logfile
- qamsg = qamsg + '\n'
+ except Exception as e:
+ qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
if qamsg:
- imgname = d.getVar('IMAGE_NAME', True)
+ imgname = d.getVar('IMAGE_NAME')
bb.fatal("QA errors found whilst validating image: %s\n%s" % (imgname, qamsg))
}
-addtask do_image_qa after do_image_complete before do_build
-
-#
-# Write environment variables used by wic
-# to tmp/sysroots/<machine>/imgdata/<image>.env
-#
-python do_rootfs_wicenv () {
- wicvars = d.getVar('WICVARS', True)
- if not wicvars:
- return
-
- stdir = d.getVar('STAGING_DIR_TARGET', True)
- outdir = os.path.join(stdir, 'imgdata')
- bb.utils.mkdirhier(outdir)
- basename = d.getVar('IMAGE_BASENAME', True)
- with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
- for var in wicvars.split():
- value = d.getVar(var, True)
- if value:
- envf.write('%s="%s"\n' % (var, value.strip()))
+addtask do_image_qa after do_rootfs before do_image
+
+SSTATETASKS += "do_image_qa"
+SSTATE_SKIP_CREATION_task-image-qa = '1'
+do_image_qa[sstate-inputdirs] = ""
+do_image_qa[sstate-outputdirs] = ""
+python do_image_qa_setscene () {
+ sstate_setscene(d)
}
-addtask do_rootfs_wicenv after do_image before do_image_wic
-do_rootfs_wicenv[vardeps] += "${WICVARS}"
-do_rootfs_wicenv[prefuncs] = 'set_image_size'
+addtask do_image_qa_setscene
def setup_debugfs_variables(d):
d.appendVar('IMAGE_ROOTFS', '-dbg')
- d.appendVar('IMAGE_LINK_NAME', '-dbg')
+ if d.getVar('IMAGE_LINK_NAME'):
+ d.appendVar('IMAGE_LINK_NAME', '-dbg')
d.appendVar('IMAGE_NAME','-dbg')
d.setVar('IMAGE_BUILDING_DEBUGFS', 'true')
- debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS', True)
+ debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS')
if debugfs_image_fstypes:
d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes)
@@ -358,7 +347,7 @@ python () {
#
# Without de-duplication, gen_conversion_cmds() below
# would create the same compression command multiple times.
- ctypes = set(d.getVar('CONVERSIONTYPES', True).split())
+ ctypes = set(d.getVar('CONVERSIONTYPES').split())
old_overrides = d.getVar('OVERRIDES', False)
def _image_base_type(type):
@@ -375,11 +364,11 @@ python () {
return basetype
basetypes = {}
- alltypes = d.getVar('IMAGE_FSTYPES', True).split()
+ alltypes = d.getVar('IMAGE_FSTYPES').split()
typedeps = {}
- if d.getVar('IMAGE_GEN_DEBUGFS', True) == "1":
- debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS', True).split()
+ if d.getVar('IMAGE_GEN_DEBUGFS') == "1":
+ debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS').split()
for t in debugfs_fstypes:
alltypes.append("debugfs_" + t)
@@ -394,7 +383,7 @@ python () {
if t.startswith("debugfs_"):
t = t[8:]
debug = "debugfs_"
- deps = (d.getVar('IMAGE_TYPEDEP_' + t, True) or "").split()
+ deps = (d.getVar('IMAGE_TYPEDEP_' + t) or "").split()
vardeps.add('IMAGE_TYPEDEP_' + t)
if baset not in typedeps:
typedeps[baset] = set()
@@ -414,7 +403,7 @@ python () {
d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps))
- maskedtypes = (d.getVar('IMAGE_TYPES_MASKED', True) or "").split()
+ maskedtypes = (d.getVar('IMAGE_TYPES_MASKED') or "").split()
maskedtypes = [dbg + t for t in maskedtypes for dbg in ("", "debugfs_")]
for t in basetypes:
@@ -433,16 +422,21 @@ python () {
debug = "setup_debugfs "
realt = t[8:]
localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides))
- bb.data.update_data(localdata)
localdata.setVar('type', realt)
# Delete DATETIME so we don't expand any references to it now
# This means the task's hash can be stable rather than having hardcoded
# date/time values. It will get expanded at execution time.
# Similarly TMPDIR since otherwise we see QA stamp comparision problems
+ # Expand PV else it can trigger get_srcrev which can fail due to these variables being unset
+ localdata.setVar('PV', d.getVar('PV'))
localdata.delVar('DATETIME')
+ localdata.delVar('DATE')
localdata.delVar('TMPDIR')
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split()
+ for dep in vardepsexclude:
+ localdata.delVar(dep)
- image_cmd = localdata.getVar("IMAGE_CMD", True)
+ image_cmd = localdata.getVar("IMAGE_CMD")
vardeps.add('IMAGE_CMD_' + realt)
if image_cmd:
cmds.append("\t" + image_cmd)
@@ -456,15 +450,15 @@ python () {
rm_tmp_images = set()
def gen_conversion_cmds(bt):
- for ctype in ctypes:
- if bt[bt.find('.') + 1:] == ctype:
+ for ctype in sorted(ctypes):
+ if bt.endswith("." + ctype):
type = bt[0:-len(ctype) - 1]
if type.startswith("debugfs_"):
type = type[8:]
# Create input image first.
gen_conversion_cmds(type)
localdata.setVar('type', type)
- cmd = "\t" + (localdata.getVar("CONVERSION_CMD_" + ctype, True) or localdata.getVar("COMPRESS_CMD_" + ctype, True))
+ cmd = "\t" + (localdata.getVar("CONVERSION_CMD_" + ctype) or localdata.getVar("COMPRESS_CMD_" + ctype))
if cmd not in cmds:
cmds.append(cmd)
vardeps.add('CONVERSION_CMD_' + ctype)
@@ -487,26 +481,27 @@ python () {
# Clean up after applying all conversion commands. Some of them might
# use the same input, therefore we cannot delete sooner without applying
# some complex dependency analysis.
- for image in rm_tmp_images:
+ for image in sorted(rm_tmp_images):
cmds.append("\trm " + image)
after = 'do_image'
for dep in typedeps[t]:
after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_")
- t = t.replace("-", "_").replace(".", "_")
+ task = "do_image_%s" % t.replace("-", "_").replace(".", "_")
+
+ d.setVar(task, '\n'.join(cmds))
+ d.setVarFlag(task, 'func', '1')
+ d.setVarFlag(task, 'fakeroot', '1')
- d.setVar('do_image_%s' % t, '\n'.join(cmds))
- d.setVarFlag('do_image_%s' % t, 'func', '1')
- d.setVarFlag('do_image_%s' % t, 'fakeroot', '1')
- d.setVarFlag('do_image_%s' % t, 'prefuncs', debug + 'set_image_size')
- d.setVarFlag('do_image_%s' % t, 'postfuncs', 'create_symlinks')
- d.setVarFlag('do_image_%s' % t, 'subimages', ' '.join(subimages))
- d.appendVarFlag('do_image_%s' % t, 'vardeps', ' '.join(vardeps))
- d.appendVarFlag('do_image_%s' % t, 'vardepsexclude', 'DATETIME')
+ d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size')
+ d.prependVarFlag(task, 'postfuncs', 'create_symlinks ')
+ d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages))
+ d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps))
+ d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude))
- bb.debug(2, "Adding type %s before %s, after %s" % (t, 'do_image_complete', after))
- bb.build.addtask('do_image_%s' % t, 'do_image_complete', after, d)
+ bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after))
+ bb.build.addtask(task, 'do_image_complete', after, d)
}
#
@@ -515,33 +510,41 @@ python () {
def get_rootfs_size(d):
import subprocess
- rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT', True))
- overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR', True))
- rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE', True))
- rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True))
- rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE', True)
- image_fstypes = d.getVar('IMAGE_FSTYPES', True) or ''
- initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES', True) or ''
- initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE', True)
+ rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT'))
+ overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR'))
+ rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE'))
+ rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE'))
+ rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE')
+ image_fstypes = d.getVar('IMAGE_FSTYPES') or ''
+ initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or ''
+ initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE')
output = subprocess.check_output(['du', '-ks',
- d.getVar('IMAGE_ROOTFS', True)])
+ d.getVar('IMAGE_ROOTFS')])
size_kb = int(output.split()[0])
+
base_size = size_kb * overhead_factor
- base_size = max(base_size, rootfs_req_size) + rootfs_extra_space
+ bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor))
+ base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space
+ bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), rootfs_extra_space))
+ base_size = base_size2
if base_size != int(base_size):
base_size = int(base_size + 1)
else:
base_size = int(base_size)
+ bb.debug(1, '%f = int(%f)' % (base_size, base_size2))
+ base_size_saved = base_size
base_size += rootfs_alignment - 1
base_size -= base_size % rootfs_alignment
+ bb.debug(1, '%d = aligned(%d)' % (base_size, base_size_saved))
# Do not check image size of the debugfs image. This is not supposed
# to be deployed, etc. so it doesn't make sense to limit the size
# of the debug.
- if (d.getVar('IMAGE_BUILDING_DEBUGFS', True) or "") == "true":
+ if (d.getVar('IMAGE_BUILDING_DEBUGFS') or "") == "true":
+ bb.debug(1, 'returning debugfs size %d' % (base_size))
return base_size
# Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
@@ -559,6 +562,8 @@ def get_rootfs_size(d):
(base_size, initramfs_maxsize_int))
bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
+
+ bb.debug(1, 'returning %d' % (base_size))
return base_size
python set_image_size () {
@@ -572,13 +577,13 @@ python set_image_size () {
#
python create_symlinks() {
- deploy_dir = d.getVar('IMGDEPLOYDIR', True)
- img_name = d.getVar('IMAGE_NAME', True)
- link_name = d.getVar('IMAGE_LINK_NAME', True)
- manifest_name = d.getVar('IMAGE_MANIFEST', True)
- taskname = d.getVar("BB_CURRENTTASK", True)
+ deploy_dir = d.getVar('IMGDEPLOYDIR')
+ img_name = d.getVar('IMAGE_NAME')
+ link_name = d.getVar('IMAGE_LINK_NAME')
+ manifest_name = d.getVar('IMAGE_MANIFEST')
+ taskname = d.getVar("BB_CURRENTTASK")
subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
- imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix', True) or d.expand("${IMAGE_NAME_SUFFIX}.")
+ imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix') or d.expand("${IMAGE_NAME_SUFFIX}.")
if not link_name:
return
@@ -588,9 +593,6 @@ python create_symlinks() {
if os.path.exists(os.path.join(deploy_dir, src)):
bb.note("Creating symlink: %s -> %s" % (dst, src))
if os.path.islink(dst):
- if d.getVar('RM_OLD_IMAGE', True) == "1" and \
- os.path.exists(os.path.realpath(dst)):
- os.remove(os.path.realpath(dst))
os.remove(dst)
os.symlink(src, dst)
else:
@@ -607,19 +609,66 @@ do_patch[noexec] = "1"
do_configure[noexec] = "1"
do_compile[noexec] = "1"
do_install[noexec] = "1"
-do_populate_sysroot[noexec] = "1"
+deltask do_populate_sysroot
do_package[noexec] = "1"
-do_package_qa[noexec] = "1"
+deltask do_package_qa
do_packagedata[noexec] = "1"
-do_package_write_ipk[noexec] = "1"
-do_package_write_deb[noexec] = "1"
-do_package_write_rpm[noexec] = "1"
-
-# Allow the kernel to be repacked with the initramfs and boot image file as a single file
-do_bundle_initramfs[depends] += "virtual/kernel:do_bundle_initramfs"
-do_bundle_initramfs[nostamp] = "1"
-do_bundle_initramfs[noexec] = "1"
-do_bundle_initramfs () {
- :
+deltask do_package_write_ipk
+deltask do_package_write_deb
+deltask do_package_write_rpm
+
+# Prepare the root links to point to the /usr counterparts.
+create_merged_usr_symlinks() {
+ root="$1"
+ install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
+ lnr $root${base_bindir} $root/bin
+ lnr $root${base_sbindir} $root/sbin
+ lnr $root${base_libdir} $root/${baselib}
+
+ if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
+ install -d $root${nonarch_base_libdir}
+ lnr $root${nonarch_base_libdir} $root/lib
+ fi
+
+ # create base links for multilibs
+ multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
+ for d in $multi_libdirs; do
+ install -d $root${exec_prefix}/$d
+ lnr $root${exec_prefix}/$d $root/$d
+ done
+}
+
+create_merged_usr_symlinks_rootfs() {
+ create_merged_usr_symlinks ${IMAGE_ROOTFS}
}
-addtask bundle_initramfs after do_image_complete
+
+create_merged_usr_symlinks_sdk() {
+ create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT}
+}
+
+ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs; ', '',d)}"
+POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
+
+reproducible_final_image_task () {
+ if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+ if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
+ REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
+ if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" = "" ]; then
+ REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
+ fi
+ fi
+ # Set mtime of all files to a reproducible value
+ bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
+ find ${IMAGE_ROOTFS} -exec touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS {} \;
+ fi
+}
+
+systemd_preset_all () {
+ if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then
+ systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all
+ fi
+}
+
+IMAGE_PREPROCESS_COMMAND_append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
+
+CVE_PRODUCT = ""
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
index 5ef6f60b89..f82f1d8862 100644
--- a/meta/classes/image_types.bbclass
+++ b/meta/classes/image_types.bbclass
@@ -17,30 +17,50 @@ def imagetypes_getdepends(d):
d += ":do_populate_sysroot"
deps.add(d)
- fstypes = set((d.getVar('IMAGE_FSTYPES', True) or "").split())
- fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS', True) or "").split())
+ # Take a type in the form of foo.bar.car and split it into the items
+ # needed for the image deps "foo", and the conversion deps ["bar", "car"]
+ def split_types(typestring):
+ types = typestring.split(".")
+ return types[0], types[1:]
+
+ fstypes = set((d.getVar('IMAGE_FSTYPES') or "").split())
+ fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS') or "").split())
+ deprecated = set()
deps = set()
for typestring in fstypes:
- types = typestring.split(".")
- basetype, resttypes = types[0], types[1:]
+ basetype, resttypes = split_types(typestring)
+
+ var = "IMAGE_DEPENDS_%s" % basetype
+ if d.getVar(var) is not None:
+ deprecated.add(var)
+
+ for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype) or "").split():
+ base, rest = split_types(typedepends)
+ resttypes += rest
+
+ var = "IMAGE_DEPENDS_%s" % base
+ if d.getVar(var) is not None:
+ deprecated.add(var)
- adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
- for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype, True) or "").split():
- adddep(d.getVar('IMAGE_DEPENDS_%s' % typedepends, True) , deps)
for ctype in resttypes:
- adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype, True), deps)
- adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
+ adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype), deps)
+ adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype), deps)
+
+ if deprecated:
+ bb.fatal('Deprecated variable(s) found: "%s". '
+ 'Use do_image_<type>[depends] += "<recipe>:<task>" instead' % ', '.join(deprecated))
# Sort the set so that ordering is consistant
return " ".join(sorted(deps))
-XZ_COMPRESSION_LEVEL ?= "-3"
+XZ_COMPRESSION_LEVEL ?= "-9"
XZ_INTEGRITY_CHECK ?= "crc32"
-XZ_THREADS ?= "-T 0"
ZIP_COMPRESSION_LEVEL ?= "-9"
+ZSTD_COMPRESSION_LEVEL ?= "-3"
+
JFFS2_SUM_EXTRA_ARGS ?= ""
IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
@@ -64,8 +84,14 @@ oe_mkext234fs () {
eval COUNT=\"$MIN_COUNT\"
fi
# Create a sparse image block
+ bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
+ bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`"
+ bbdebug 1 "Actual Partion size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
+ bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}"
mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
+ # Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
+ fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
}
IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
@@ -74,34 +100,37 @@ IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
MIN_BTRFS_SIZE ?= "16384"
IMAGE_CMD_btrfs () {
- if [ ${ROOTFS_SIZE} -gt ${MIN_BTRFS_SIZE} ]; then
- dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs count=${ROOTFS_SIZE} bs=1024
- mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
- else
- bbfatal "Rootfs is too small for BTRFS (Rootfs Actual Size: ${ROOTFS_SIZE}, BTRFS Minimum Size: ${MIN_BTRFS_SIZE})"
+ size=${ROOTFS_SIZE}
+ if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then
+ size=${MIN_BTRFS_SIZE}
+ bbwarn "Rootfs size is too small for BTRFS. Filesystem will be extended to ${size}K"
fi
+ dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs seek=${size} count=0 bs=1024
+ mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
}
IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
+IMAGE_CMD_squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
# By default, tar from the host is used, which can be quite old. If
# you need special parameters (like --xattrs) which are only supported
# by GNU tar upstream >= 1.27, then override that default:
# IMAGE_CMD_TAR = "tar --xattrs --xattrs-include=*"
-# IMAGE_DEPENDS_tar_append = " tar-replacement-native"
+# do_image_tar[depends] += "tar-replacement-native:do_populate_sysroot"
# EXTRANATIVEPATH += "tar-native"
#
# The GNU documentation does not specify whether --xattrs-include is necessary.
# In practice, it turned out to be not needed when creating archives and
# required when extracting, but it seems prudent to use it in both cases.
IMAGE_CMD_TAR ?= "tar"
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} -cvf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} ."
+# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
+IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
IMAGE_CMD_cpio () {
- (cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
+ (cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
# We only need the /init symlink if we're building the real
# image. The -dbg image doesn't need it! By being clever
# about this we also avoid 'touch' below failing, as it
@@ -119,21 +148,17 @@ IMAGE_CMD_cpio () {
fi
}
-ELF_KERNEL ?= "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE}"
-ELF_APPEND ?= "ramdisk_size=32768 root=/dev/ram0 rw console="
-
-IMAGE_CMD_elf () {
- test -f ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf && rm -f ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf
- mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.cpio.gz --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
-}
-
-IMAGE_TYPEDEP_elf = "cpio.gz"
-
UBI_VOLNAME ?= "${MACHINE}-rootfs"
multiubi_mkfs() {
local mkubifs_args="$1"
local ubinize_args="$2"
+
+ # Added prompt error message for ubi and ubifs image creation.
+ if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then
+ bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
+ fi
+
if [ -z "$3" ]; then
local vname=""
else
@@ -147,7 +172,9 @@ multiubi_mkfs() {
echo vol_type=dynamic >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_name=${UBI_VOLNAME} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_flags=autoresize >> ubinize${vname}-${IMAGE_NAME}.cfg
- mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
+ if [ -n "$vname" ]; then
+ mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
+ fi
ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
# Cleanup cfg file
@@ -181,94 +208,31 @@ IMAGE_CMD_multiubi () {
IMAGE_CMD_ubi () {
multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
}
+IMAGE_TYPEDEP_ubi = "ubifs"
IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
-WKS_FILE ?= "${IMAGE_BASENAME}.${MACHINE}.wks"
-WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
-WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
-WKS_FULL_PATH = "${@wks_search('${WKS_FILES}'.split(), '${WKS_SEARCH_PATH}') or ''}"
-
-def wks_search(files, search_path):
- for f in files:
- if os.path.isabs(f):
- if os.path.exists(f):
- return f
- else:
- searched = bb.utils.which(search_path, f)
- if searched:
- return searched
-
-WIC_CREATE_EXTRA_ARGS ?= ""
-
-IMAGE_CMD_wic () {
- out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
- wks="${WKS_FULL_PATH}"
- if [ -z "$wks" ]; then
- bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
+MIN_F2FS_SIZE ?= "524288"
+IMAGE_CMD_f2fs () {
+ # We need to add additional smarts here form devices smaller than 1.5G
+ # Need to scale appropriately between 40M -> 1.5G as the "overprovision
+ # ratio" goes down as the device gets bigger (70% -> 4.5%), below about
+ # 500M the standard IMAGE_OVERHEAD_FACTOR does not work, so add additional
+ # space here when under 500M
+ size=${ROOTFS_SIZE}
+ if [ ${size} -lt ${MIN_F2FS_SIZE} ] ; then
+ size=${MIN_F2FS_SIZE}
+ bbwarn "Rootfs size is too small for F2FS. Filesystem will be extended to ${size}K"
fi
-
- BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR_TARGET}/imgdata/" -e "${IMAGE_BASENAME}" -o "$out/" ${WIC_CREATE_EXTRA_ARGS}
- mv "$out/build/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
- rm -rf "$out/"
-}
-IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES"
-
-# Rebuild when the wks file or vars in WICVARS change
-USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
-WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
-do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
-
-python do_write_wks_template () {
- """Write out expanded template contents to WKS_FULL_PATH."""
- import re
-
- template_body = d.getVar('_WKS_TEMPLATE', True)
-
- # Remove any remnant variable references left behind by the expansion
- # due to undefined variables
- expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}")
- while True:
- new_body = re.sub(expand_var_regexp, '', template_body)
- if new_body == template_body:
- break
- else:
- template_body = new_body
-
- wks_file = d.getVar('WKS_FULL_PATH', True)
- with open(wks_file, 'w') as f:
- f.write(template_body)
-}
-
-python () {
- if d.getVar('USING_WIC', True):
- wks_file_u = d.getVar('WKS_FULL_PATH', False)
- wks_file = d.expand(wks_file_u)
- base, ext = os.path.splitext(wks_file)
- if ext == '.in' and os.path.exists(wks_file):
- wks_out_file = os.path.join(d.getVar('WORKDIR', True), os.path.basename(base))
- d.setVar('WKS_FULL_PATH', wks_out_file)
- d.setVar('WKS_TEMPLATE_PATH', wks_file_u)
- d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True')
-
- try:
- with open(wks_file, 'r') as f:
- body = f.read()
- except (IOError, OSError) as exc:
- pass
- else:
- # Previously, I used expandWithRefs to get the dependency list
- # and add it to WICVARS, but there's no point re-parsing the
- # file in process_wks_template as well, so just put it in
- # a variable and let the metadata deal with the deps.
- d.setVar('_WKS_TEMPLATE', body)
- bb.build.addtask('do_write_wks_template', 'do_image_wic', None, d)
+ dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs seek=${size} count=0 bs=1024
+ mkfs.f2fs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
+ sload.f2fs -f ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
}
EXTRA_IMAGECMD = ""
-inherit siteinfo
-JFFS2_ENDIANNESS ?= "${@base_conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
+inherit siteinfo kernel-arch
+JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
JFFS2_ERASEBLOCK ?= "0x40000"
EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
@@ -277,23 +241,23 @@ EXTRA_IMAGECMD_ext2 ?= "-i 4096"
EXTRA_IMAGECMD_ext3 ?= "-i 4096"
EXTRA_IMAGECMD_ext4 ?= "-i 4096"
EXTRA_IMAGECMD_btrfs ?= "-n 4096"
-EXTRA_IMAGECMD_elf ?= ""
-
-IMAGE_DEPENDS = ""
-IMAGE_DEPENDS_jffs2 = "mtd-utils-native"
-IMAGE_DEPENDS_cramfs = "util-linux-native"
-IMAGE_DEPENDS_ext2 = "e2fsprogs-native"
-IMAGE_DEPENDS_ext3 = "e2fsprogs-native"
-IMAGE_DEPENDS_ext4 = "e2fsprogs-native"
-IMAGE_DEPENDS_btrfs = "btrfs-tools-native"
-IMAGE_DEPENDS_squashfs = "squashfs-tools-native"
-IMAGE_DEPENDS_squashfs-xz = "squashfs-tools-native"
-IMAGE_DEPENDS_squashfs-lzo = "squashfs-tools-native"
-IMAGE_DEPENDS_elf = "virtual/kernel mkelfimage-native"
-IMAGE_DEPENDS_ubi = "mtd-utils-native"
-IMAGE_DEPENDS_ubifs = "mtd-utils-native"
-IMAGE_DEPENDS_multiubi = "mtd-utils-native"
-IMAGE_DEPENDS_wic = "parted-native"
+EXTRA_IMAGECMD_f2fs ?= ""
+
+do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
+do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
+do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot"
+do_image_ext2[depends] += "e2fsprogs-native:do_populate_sysroot"
+do_image_ext3[depends] += "e2fsprogs-native:do_populate_sysroot"
+do_image_ext4[depends] += "e2fsprogs-native:do_populate_sysroot"
+do_image_btrfs[depends] += "btrfs-tools-native:do_populate_sysroot"
+do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
+do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
+do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
+do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot"
# This variable is available to request which values are suitable for IMAGE_FSTYPES
IMAGE_TYPES = " \
@@ -305,16 +269,13 @@ IMAGE_TYPES = " \
btrfs \
iso \
hddimg \
- squashfs squashfs-xz squashfs-lzo \
+ squashfs squashfs-xz squashfs-lzo squashfs-lz4 \
ubi ubifs multiubi \
- tar tar.gz tar.bz2 tar.xz tar.lz4 \
+ tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
- vmdk \
- vdi \
- qcow2 \
- hdddirect \
- elf \
wic wic.gz wic.bz2 wic.lzma \
+ container \
+ f2fs \
"
# Compression is a special case of conversion. The old variable
@@ -323,13 +284,15 @@ IMAGE_TYPES = " \
# CONVERSION_CMD/DEPENDS.
COMPRESSIONTYPES ?= ""
-CONVERSIONTYPES = "gz bz2 lzma xz lz4 zip sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap ${COMPRESSIONTYPES}"
+CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vdi qcow2 base64 ${COMPRESSIONTYPES}"
CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
+CONVERSION_CMD_gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
-CONVERSION_CMD_lz4 = "lz4c -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
+CONVERSION_CMD_lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+CONVERSION_CMD_lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD_zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
CONVERSION_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
CONVERSION_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
CONVERSION_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
@@ -338,27 +301,35 @@ CONVERSION_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}
CONVERSION_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
CONVERSION_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
CONVERSION_CMD_bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
+CONVERSION_CMD_u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
+CONVERSION_CMD_vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
+CONVERSION_CMD_vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
+CONVERSION_CMD_qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
+CONVERSION_CMD_base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
CONVERSION_DEPENDS_lzma = "xz-native"
-CONVERSION_DEPENDS_gz = ""
+CONVERSION_DEPENDS_gz = "pigz-native"
CONVERSION_DEPENDS_bz2 = "pbzip2-native"
CONVERSION_DEPENDS_xz = "xz-native"
CONVERSION_DEPENDS_lz4 = "lz4-native"
+CONVERSION_DEPENDS_lzo = "lzop-native"
CONVERSION_DEPENDS_zip = "zip-native"
+CONVERSION_DEPENDS_zst = "zstd-native"
CONVERSION_DEPENDS_sum = "mtd-utils-native"
CONVERSION_DEPENDS_bmap = "bmap-tools-native"
+CONVERSION_DEPENDS_u-boot = "u-boot-tools-native"
+CONVERSION_DEPENDS_vmdk = "qemu-system-native"
+CONVERSION_DEPENDS_vdi = "qemu-system-native"
+CONVERSION_DEPENDS_qcow2 = "qemu-system-native"
+CONVERSION_DEPENDS_base64 = "coreutils-native"
RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
-# Use IMAGE_EXTENSION_xxx to map image type 'xxx' with real image file extension name(s) for Hob
-IMAGE_EXTENSION_live = "hddimg iso"
-
# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
-# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hdddirect, hddimg, iso, etc.
+# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hddimg, iso, etc.
IMAGE_TYPES_MASKED ?= ""
-# The WICVARS variable is used to define list of bitbake variables used in wic code
-# variables from this list is written to <image>.env file
-WICVARS ?= "BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE HDDDIR IMAGE_BASENAME IMAGE_BOOT_FILES IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD ISODIR MACHINE_ARCH ROOTFS_SIZE STAGING_DATADIR STAGING_DIR_NATIVE STAGING_LIBDIR TARGET_SYS"
+# bmap requires python3 to be in the PATH
+EXTRANATIVEPATH += "${@'python3-native' if d.getVar('IMAGE_FSTYPES').find('.bmap') else ''}"
diff --git a/meta/classes/image_types_uboot.bbclass b/meta/classes/image_types_uboot.bbclass
deleted file mode 100644
index 6c8c1ff60e..0000000000
--- a/meta/classes/image_types_uboot.bbclass
+++ /dev/null
@@ -1,26 +0,0 @@
-inherit image_types kernel-arch
-
-oe_mkimage () {
- mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \
- -d ${IMGDEPLOYDIR}/$1 ${IMGDEPLOYDIR}/$1.u-boot
- if [ x$3 = x"clean" ]; then
- rm $1
- fi
-}
-
-CONVERSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot"
-
-CONVERSION_DEPENDS_u-boot = "u-boot-mkimage-native"
-CONVERSION_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none"
-
-CONVERSION_DEPENDS_gz.u-boot = "u-boot-mkimage-native"
-CONVERSION_CMD_gz.u-boot = "${CONVERSION_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip clean"
-
-CONVERSION_DEPENDS_bz2.u-boot = "u-boot-mkimage-native"
-CONVERSION_CMD_bz2.u-boot = "${CONVERSION_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2 clean"
-
-CONVERSION_DEPENDS_lzma.u-boot = "u-boot-mkimage-native"
-CONVERSION_CMD_lzma.u-boot = "${CONVERSION_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma clean"
-
-IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot cpio.gz.u-boot"
-
diff --git a/meta/classes/image_types_wic.bbclass b/meta/classes/image_types_wic.bbclass
new file mode 100644
index 0000000000..f350dc2723
--- /dev/null
+++ b/meta/classes/image_types_wic.bbclass
@@ -0,0 +1,142 @@
+# The WICVARS variable is used to define list of bitbake variables used in wic code
+# variables from this list is written to <image>.env file
+WICVARS ?= "\
+ BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_BOOT_FILES \
+ IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
+ ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS \
+ KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME"
+
+inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
+
+WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
+WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
+WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/wic' % p for p in '${BBPATH}'.split(':'))}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
+WKS_FULL_PATH = "${@wks_search(d.getVar('WKS_FILES').split(), d.getVar('WKS_SEARCH_PATH')) or ''}"
+
+def wks_search(files, search_path):
+ for f in files:
+ if os.path.isabs(f):
+ if os.path.exists(f):
+ return f
+ else:
+ searched = bb.utils.which(search_path, f)
+ if searched:
+ return searched
+
+WIC_CREATE_EXTRA_ARGS ?= ""
+
+IMAGE_CMD_wic () {
+ out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
+ wks="${WKS_FULL_PATH}"
+ if [ -z "$wks" ]; then
+ bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
+ fi
+
+ BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$out/" ${WIC_CREATE_EXTRA_ARGS}
+ mv "$out/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
+ rm -rf "$out/"
+}
+IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
+
+# Rebuild when the wks file or vars in WICVARS change
+USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
+WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
+do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
+do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r in ('parted', 'gptfdisk', 'dosfstools', 'mtools'))}"
+
+# We ensure all artfacts are deployed (e.g virtual/bootloader)
+do_image_wic[recrdeptask] += "do_deploy"
+
+WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
+WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
+WKS_FILE_DEPENDS_BOOTLOADERS = ""
+WKS_FILE_DEPENDS_BOOTLOADERS_x86 = "syslinux grub-efi systemd-boot"
+WKS_FILE_DEPENDS_BOOTLOADERS_x86-64 = "syslinux grub-efi systemd-boot"
+WKS_FILE_DEPENDS_BOOTLOADERS_x86-x32 = "syslinux grub-efi"
+
+WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
+
+DEPENDS += "${@ '${WKS_FILE_DEPENDS}' if d.getVar('USING_WIC') else '' }"
+
+python do_write_wks_template () {
+ """Write out expanded template contents to WKS_FULL_PATH."""
+ import re
+
+ template_body = d.getVar('_WKS_TEMPLATE')
+
+ # Remove any remnant variable references left behind by the expansion
+ # due to undefined variables
+ expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}")
+ while True:
+ new_body = re.sub(expand_var_regexp, '', template_body)
+ if new_body == template_body:
+ break
+ else:
+ template_body = new_body
+
+ wks_file = d.getVar('WKS_FULL_PATH')
+ with open(wks_file, 'w') as f:
+ f.write(template_body)
+ f.close()
+ # Copy the finalized wks file to the deploy directory for later use
+ depdir = d.getVar('IMGDEPLOYDIR')
+ basename = d.getVar('IMAGE_BASENAME')
+ bb.utils.copyfile(wks_file, "%s/%s" % (depdir, basename + '-' + os.path.basename(wks_file)))
+}
+
+python () {
+ if d.getVar('USING_WIC'):
+ wks_file_u = d.getVar('WKS_FULL_PATH', False)
+ wks_file = d.expand(wks_file_u)
+ base, ext = os.path.splitext(wks_file)
+ if ext == '.in' and os.path.exists(wks_file):
+ wks_out_file = os.path.join(d.getVar('WORKDIR'), os.path.basename(base))
+ d.setVar('WKS_FULL_PATH', wks_out_file)
+ d.setVar('WKS_TEMPLATE_PATH', wks_file_u)
+ d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True')
+
+ # We need to re-parse each time the file changes, and bitbake
+ # needs to be told about that explicitly.
+ bb.parse.mark_dependency(d, wks_file)
+
+ try:
+ with open(wks_file, 'r') as f:
+ body = f.read()
+ except (IOError, OSError) as exc:
+ pass
+ else:
+ # Previously, I used expandWithRefs to get the dependency list
+ # and add it to WICVARS, but there's no point re-parsing the
+ # file in process_wks_template as well, so just put it in
+ # a variable and let the metadata deal with the deps.
+ d.setVar('_WKS_TEMPLATE', body)
+ bb.build.addtask('do_write_wks_template', 'do_image_wic', 'do_image', d)
+ bb.build.addtask('do_image_wic', 'do_image_complete', None, d)
+}
+
+#
+# Write environment variables used by wic
+# to tmp/sysroots/<machine>/imgdata/<image>.env
+#
+python do_rootfs_wicenv () {
+ wicvars = d.getVar('WICVARS')
+ if not wicvars:
+ return
+
+ stdir = d.getVar('STAGING_DIR')
+ outdir = os.path.join(stdir, d.getVar('MACHINE'), 'imgdata')
+ bb.utils.mkdirhier(outdir)
+ basename = d.getVar('IMAGE_BASENAME')
+ with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
+ for var in wicvars.split():
+ value = d.getVar(var)
+ if value:
+ envf.write('%s="%s"\n' % (var, value.strip()))
+ envf.close()
+ # Copy .env file to deploy directory for later use with stand alone wic
+ depdir = d.getVar('IMGDEPLOYDIR')
+ bb.utils.copyfile(os.path.join(outdir, basename) + '.env', os.path.join(depdir, basename) + '.env')
+}
+addtask do_rootfs_wicenv after do_image before do_image_wic
+do_rootfs_wicenv[vardeps] += "${WICVARS}"
+do_rootfs_wicenv[prefuncs] = 'set_image_size'
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index 2f3f768d58..0564f9c2a4 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -16,13 +16,8 @@
# into exec_prefix
# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
# files under exec_prefix
+# -Check if the package name is upper case
-
-# unsafe-references-in-binaries requires prelink-rtld from
-# prelink-native, but we don't want this DEPENDS for -native builds
-QADEPENDS = "prelink-native"
-QADEPENDS_class-native = ""
-QADEPENDS_class-nativesdk = ""
QA_SANE = "True"
# Elect whether a given type of error is a warning or error, they may
@@ -30,16 +25,20 @@ QA_SANE = "True"
WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
textrel already-stripped incompatible-license files-invalid \
installed-vs-shipped compile-host-path install-host-path \
- pn-overrides infodir build-deps file-rdeps \
+ pn-overrides infodir build-deps src-uri-bad \
unknown-configure-option symlink-to-sysroot multilib \
- invalid-packageconfig host-user-contaminated \
+ invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
split-strip packages-list pkgv-undefined var-undefined \
version-going-backwards expanded-d invalid-chars \
- license-checksum dev-elf \
+ license-checksum dev-elf file-rdeps configure-unsafe \
+ configure-gettext perllocalpod \
"
+# Add usrmerge QA check based on distro feature
+ERROR_QA_append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
+
FAKEROOT_QA = "host-user-contaminated"
FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
enabled tests are listed here, the do_package_qa task will run under fakeroot."
@@ -48,160 +47,30 @@ ALL_QA = "${WARN_QA} ${ERROR_QA}"
UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
-#
-# dictionary for elf headers
-#
-# feel free to add and correct.
-#
-# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
-def package_qa_get_machine_dict(d):
- machdata = {
- "darwin9" : {
- "arm" : (40, 0, 0, True, 32),
- },
- "eabi" : {
- "arm" : (40, 0, 0, True, 32),
- },
- "elf" : {
- "i586" : (3, 0, 0, True, 32),
- "x86_64": (62, 0, 0, True, 64),
- "epiphany": (4643, 0, 0, True, 32),
- },
- "linux" : {
- "aarch64" : (183, 0, 0, True, 64),
- "aarch64_be" :(183, 0, 0, False, 64),
- "arm" : (40, 97, 0, True, 32),
- "armeb": (40, 97, 0, False, 32),
- "powerpc": (20, 0, 0, False, 32),
- "powerpc64": (21, 0, 0, False, 64),
- "i386": ( 3, 0, 0, True, 32),
- "i486": ( 3, 0, 0, True, 32),
- "i586": ( 3, 0, 0, True, 32),
- "i686": ( 3, 0, 0, True, 32),
- "x86_64": (62, 0, 0, True, 64),
- "ia64": (50, 0, 0, True, 64),
- "alpha": (36902, 0, 0, True, 64),
- "hppa": (15, 3, 0, False, 32),
- "m68k": ( 4, 0, 0, False, 32),
- "mips": ( 8, 0, 0, False, 32),
- "mipsel": ( 8, 0, 0, True, 32),
- "mips64": ( 8, 0, 0, False, 64),
- "mips64el": ( 8, 0, 0, True, 64),
- "nios2": (113, 0, 0, True, 32),
- "s390": (22, 0, 0, False, 32),
- "sh4": (42, 0, 0, True, 32),
- "sparc": ( 2, 0, 0, False, 32),
- "microblaze": (189, 0, 0, False, 32),
- "microblazeeb":(189, 0, 0, False, 32),
- "microblazeel":(189, 0, 0, True, 32),
- },
- "linux-uclibc" : {
- "arm" : ( 40, 97, 0, True, 32),
- "armeb": ( 40, 97, 0, False, 32),
- "powerpc": ( 20, 0, 0, False, 32),
- "i386": ( 3, 0, 0, True, 32),
- "i486": ( 3, 0, 0, True, 32),
- "i586": ( 3, 0, 0, True, 32),
- "i686": ( 3, 0, 0, True, 32),
- "x86_64": ( 62, 0, 0, True, 64),
- "mips": ( 8, 0, 0, False, 32),
- "mipsel": ( 8, 0, 0, True, 32),
- "mips64": ( 8, 0, 0, False, 64),
- "mips64el": ( 8, 0, 0, True, 64),
- "avr32": (6317, 0, 0, False, 32),
- "sh4": (42, 0, 0, True, 32),
-
- },
- "linux-musl" : {
- "aarch64" : (183, 0, 0, True, 64),
- "aarch64_be" :(183, 0, 0, False, 64),
- "arm" : ( 40, 97, 0, True, 32),
- "armeb": ( 40, 97, 0, False, 32),
- "powerpc": ( 20, 0, 0, False, 32),
- "i386": ( 3, 0, 0, True, 32),
- "i486": ( 3, 0, 0, True, 32),
- "i586": ( 3, 0, 0, True, 32),
- "i686": ( 3, 0, 0, True, 32),
- "x86_64": ( 62, 0, 0, True, 64),
- "mips": ( 8, 0, 0, False, 32),
- "mipsel": ( 8, 0, 0, True, 32),
- "mips64": ( 8, 0, 0, False, 64),
- "mips64el": ( 8, 0, 0, True, 64),
- "microblaze": (189, 0, 0, False, 32),
- "microblazeeb":(189, 0, 0, False, 32),
- "microblazeel":(189, 0, 0, True, 32),
- },
- "uclinux-uclibc" : {
- "bfin": ( 106, 0, 0, True, 32),
- },
- "linux-gnueabi" : {
- "arm" : (40, 0, 0, True, 32),
- "armeb" : (40, 0, 0, False, 32),
- },
- "linux-musleabi" : {
- "arm" : (40, 0, 0, True, 32),
- "armeb" : (40, 0, 0, False, 32),
- },
- "linux-uclibceabi" : {
- "arm" : (40, 0, 0, True, 32),
- "armeb" : (40, 0, 0, False, 32),
- },
- "linux-gnuspe" : {
- "powerpc": (20, 0, 0, False, 32),
- },
- "linux-muslspe" : {
- "powerpc": (20, 0, 0, False, 32),
- },
- "linux-uclibcspe" : {
- "powerpc": (20, 0, 0, False, 32),
- },
- "linux-gnu" : {
- "powerpc": (20, 0, 0, False, 32),
- "sh4": (42, 0, 0, True, 32),
- },
- "linux-gnux32" : {
- "x86_64": (62, 0, 0, True, 32),
- },
- "linux-gnun32" : {
- "mips64": ( 8, 0, 0, False, 32),
- "mips64el": ( 8, 0, 0, True, 32),
- },
- }
-
- # Add in any extra user supplied data which may come from a BSP layer, removing the
- # need to always change this class directly
- extra_machdata = (d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS", True) or "").split()
- for m in extra_machdata:
- call = m + "(machdata, d)"
- locs = { "machdata" : machdata, "d" : d}
- machdata = bb.utils.better_eval(call, locs)
-
- return machdata
-
-
def package_qa_clean_path(path, d, pkg=None):
"""
Remove redundant paths from the path for display. If pkg isn't set then
TMPDIR is stripped, otherwise PKGDEST/pkg is stripped.
"""
if pkg:
- path = path.replace(os.path.join(d.getVar("PKGDEST", True), pkg), "/")
- return path.replace(d.getVar("TMPDIR", True), "/").replace("//", "/")
+ path = path.replace(os.path.join(d.getVar("PKGDEST"), pkg), "/")
+ return path.replace(d.getVar("TMPDIR"), "/").replace("//", "/")
def package_qa_write_error(type, error, d):
- logfile = d.getVar('QA_LOGFILE', True)
+ logfile = d.getVar('QA_LOGFILE')
if logfile:
- p = d.getVar('P', True)
+ p = d.getVar('P')
with open(logfile, "a+") as f:
f.write("%s: %s [%s]\n" % (p, error, type))
def package_qa_handle_error(error_class, error_msg, d):
- package_qa_write_error(error_class, error_msg, d)
- if error_class in (d.getVar("ERROR_QA", True) or "").split():
+ if error_class in (d.getVar("ERROR_QA") or "").split():
+ package_qa_write_error(error_class, error_msg, d)
bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
d.setVar("QA_SANE", False)
return False
- elif error_class in (d.getVar("WARN_QA", True) or "").split():
+ elif error_class in (d.getVar("WARN_QA") or "").split():
+ package_qa_write_error(error_class, error_msg, d)
bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
else:
bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
@@ -217,7 +86,7 @@ QAPATHTEST[libexec] = "package_qa_check_libexec"
def package_qa_check_libexec(path,name, d, elf, messages):
# Skip the case where the default is explicitly /usr/libexec
- libexec = d.getVar('libexecdir', True)
+ libexec = d.getVar('libexecdir')
if libexec == "/usr/libexec":
return True
@@ -238,12 +107,12 @@ def package_qa_check_rpath(file,name, d, elf, messages):
if os.path.islink(file):
return
- bad_dirs = [d.getVar('BASE_WORKDIR', True), d.getVar('STAGING_DIR_TARGET', True)]
+ bad_dirs = [d.getVar('BASE_WORKDIR'), d.getVar('STAGING_DIR_TARGET')]
phdrs = elf.run_objdump("-p", d)
import re
- rpath_re = re.compile("\s+RPATH\s+(.*)")
+ rpath_re = re.compile(r"\s+RPATH\s+(.*)")
for line in phdrs.split("\n"):
m = rpath_re.match(line)
if m:
@@ -266,13 +135,13 @@ def package_qa_check_useless_rpaths(file, name, d, elf, messages):
if os.path.islink(file):
return
- libdir = d.getVar("libdir", True)
- base_libdir = d.getVar("base_libdir", True)
+ libdir = d.getVar("libdir")
+ base_libdir = d.getVar("base_libdir")
phdrs = elf.run_objdump("-p", d)
import re
- rpath_re = re.compile("\s+RPATH\s+(.*)")
+ rpath_re = re.compile(r"\s+RPATH\s+(.*)")
for line in phdrs.split("\n"):
m = rpath_re.match(line)
if m:
@@ -324,29 +193,29 @@ def package_qa_check_libdir(d):
"""
import re
- pkgdest = d.getVar('PKGDEST', True)
- base_libdir = d.getVar("base_libdir",True) + os.sep
- libdir = d.getVar("libdir", True) + os.sep
- libexecdir = d.getVar("libexecdir", True) + os.sep
- exec_prefix = d.getVar("exec_prefix", True) + os.sep
+ pkgdest = d.getVar('PKGDEST')
+ base_libdir = d.getVar("base_libdir") + os.sep
+ libdir = d.getVar("libdir") + os.sep
+ libexecdir = d.getVar("libexecdir") + os.sep
+ exec_prefix = d.getVar("exec_prefix") + os.sep
messages = []
# The re's are purposely fuzzy, as some there are some .so.x.y.z files
# that don't follow the standard naming convention. It checks later
# that they are actual ELF files
- lib_re = re.compile("^/lib.+\.so(\..+)?$")
- exec_re = re.compile("^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
+ lib_re = re.compile(r"^/lib.+\.so(\..+)?$")
+ exec_re = re.compile(r"^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
for root, dirs, files in os.walk(pkgdest):
if root == pkgdest:
# Skip subdirectories for any packages with libdir in INSANE_SKIP
skippackages = []
for package in dirs:
- if 'libdir' in (d.getVar('INSANE_SKIP_' + package, True) or "").split():
+ if 'libdir' in (d.getVar('INSANE_SKIP_' + package) or "").split():
bb.note("Package %s skipping libdir QA test" % (package))
skippackages.append(package)
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory' and package.endswith("-dbg"):
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory' and package.endswith("-dbg"):
bb.note("Package %s skipping libdir QA test for PACKAGE_DEBUG_SPLIT_STYLE equals debug-file-directory" % (package))
skippackages.append(package)
for package in skippackages:
@@ -387,138 +256,26 @@ def package_qa_check_dbg(path, name, d, elf, messages):
if not "-dbg" in name and not "-ptest" in name:
if '.debug' in path.split(os.path.sep):
- messages("debug-files", "non debug package contains .debug directory: %s path %s" % \
+ package_qa_add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
(name, package_qa_clean_path(path,d)))
-QAPATHTEST[perms] = "package_qa_check_perm"
-def package_qa_check_perm(path,name,d, elf, messages):
- """
- Check the permission of files
- """
- return
-
-QAPATHTEST[unsafe-references-in-binaries] = "package_qa_check_unsafe_references_in_binaries"
-def package_qa_check_unsafe_references_in_binaries(path, name, d, elf, messages):
- """
- Ensure binaries in base_[bindir|sbindir|libdir] do not link to files under exec_prefix
- """
- if unsafe_references_skippable(path, name, d):
- return
-
- if elf:
- import subprocess as sub
- pn = d.getVar('PN', True)
-
- exec_prefix = d.getVar('exec_prefix', True)
- sysroot_path = d.getVar('STAGING_DIR_TARGET', True)
- sysroot_path_usr = sysroot_path + exec_prefix
-
- try:
- ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read().decode("utf-8")
- except bb.process.CmdError:
- error_msg = pn + ": prelink-rtld aborted when processing %s" % path
- package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
- return False
-
- if sysroot_path_usr in ldd_output:
- ldd_output = ldd_output.replace(sysroot_path, "")
-
- pkgdest = d.getVar('PKGDEST', True)
- packages = d.getVar('PACKAGES', True)
-
- for package in packages.split():
- short_path = path.replace('%s/%s' % (pkgdest, package), "", 1)
- if (short_path != path):
- break
-
- base_err = pn + ": %s, installed in the base_prefix, requires a shared library under exec_prefix (%s)" % (short_path, exec_prefix)
- for line in ldd_output.split('\n'):
- if exec_prefix in line:
- error_msg = "%s: %s" % (base_err, line.strip())
- package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
-
- return False
-
-QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts"
-def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages):
- """
- Warn if scripts in base_[bindir|sbindir|libdir] reference files under exec_prefix
- """
- if unsafe_references_skippable(path, name, d):
- return
-
- if not elf:
- import stat
- import subprocess
- pn = d.getVar('PN', True)
-
- # Ensure we're checking an executable script
- statinfo = os.stat(path)
- if bool(statinfo.st_mode & stat.S_IXUSR):
- # grep shell scripts for possible references to /exec_prefix/
- exec_prefix = d.getVar('exec_prefix', True)
- statement = "grep -e '%s/[^ :]\{1,\}/[^ :]\{1,\}' %s > /dev/null" % (exec_prefix, path)
- if subprocess.call(statement, shell=True) == 0:
- error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path)
- package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
- error_msg = "Shell scripts in base_bindir and base_sbindir should not reference anything in exec_prefix"
- package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
-
-def unsafe_references_skippable(path, name, d):
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d):
- return True
-
- if "-dbg" in name or "-dev" in name:
- return True
-
- # Other package names to skip:
- if name.startswith("kernel-module-"):
- return True
-
- # Skip symlinks
- if os.path.islink(path):
- return True
-
- # Skip unusual rootfs layouts which make these tests irrelevant
- exec_prefix = d.getVar('exec_prefix', True)
- if exec_prefix == "":
- return True
-
- pkgdest = d.getVar('PKGDEST', True)
- pkgdest = pkgdest + "/" + name
- pkgdest = os.path.abspath(pkgdest)
- base_bindir = pkgdest + d.getVar('base_bindir', True)
- base_sbindir = pkgdest + d.getVar('base_sbindir', True)
- base_libdir = pkgdest + d.getVar('base_libdir', True)
- bindir = pkgdest + d.getVar('bindir', True)
- sbindir = pkgdest + d.getVar('sbindir', True)
- libdir = pkgdest + d.getVar('libdir', True)
-
- if base_bindir == bindir and base_sbindir == sbindir and base_libdir == libdir:
- return True
-
- # Skip files not in base_[bindir|sbindir|libdir]
- path = os.path.abspath(path)
- if not (base_bindir in path or base_sbindir in path or base_libdir in path):
- return True
-
- return False
-
QAPATHTEST[arch] = "package_qa_check_arch"
def package_qa_check_arch(path,name,d, elf, messages):
"""
Check if archs are compatible
"""
+ import re, oe.elf
+
if not elf:
return
- target_os = d.getVar('TARGET_OS', True)
- target_arch = d.getVar('TARGET_ARCH', True)
- provides = d.getVar('PROVIDES', True)
- bpn = d.getVar('BPN', True)
+ target_os = d.getVar('TARGET_OS')
+ target_arch = d.getVar('TARGET_ARCH')
+ provides = d.getVar('PROVIDES')
+ bpn = d.getVar('BPN')
if target_arch == "allarch":
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
package_qa_add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
return
@@ -534,18 +291,20 @@ def package_qa_check_arch(path,name,d, elf, messages):
#if this will throw an exception, then fix the dict above
(machine, osabi, abiversion, littleendian, bits) \
- = package_qa_get_machine_dict(d)[target_os][target_arch]
+ = oe.elf.machine_dict(d)[target_os][target_arch]
# Check the architecture and endiannes of the binary
- if not ((machine == elf.machine()) or \
- ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
- package_qa_add_message(messages, "arch", "Architecture did not match (%d to %d) on %s" % \
- (machine, elf.machine(), package_qa_clean_path(path,d)))
- elif not ((bits == elf.abiSize()) or \
- ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
+ is_32 = (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and \
+ (target_os == "linux-gnux32" or target_os == "linux-muslx32" or \
+ target_os == "linux-gnu_ilp32" or re.match(r'mips64.*32', d.getVar('DEFAULTTUNE')))
+ is_bpf = (oe.qa.elf_machine_to_string(elf.machine()) == "BPF")
+ if not ((machine == elf.machine()) or is_32 or is_bpf):
+ package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
+ (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
+ elif not ((bits == elf.abiSize()) or is_32 or is_bpf):
package_qa_add_message(messages, "arch", "Bit size did not match (%d to %d) %s on %s" % \
(bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
- elif not littleendian == elf.isLittleEndian():
+ elif not ((littleendian == elf.isLittleEndian()) or is_bpf):
package_qa_add_message(messages, "arch", "Endiannes did not match (%d to %d) on %s" % \
(littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
@@ -555,7 +314,7 @@ def package_qa_check_desktop(path, name, d, elf, messages):
Run all desktop files through desktop-file-validate.
"""
if path.endswith(".desktop"):
- desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'desktop-file-validate')
+ desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE'),'desktop-file-validate')
output = os.popen("%s %s" % (desktop_file_validate, path))
# This only produces output on errors
for l in output:
@@ -577,13 +336,15 @@ def package_qa_textrel(path, name, d, elf, messages):
sane = True
import re
- textrel_re = re.compile("\s+TEXTREL\s+")
+ textrel_re = re.compile(r"\s+TEXTREL\s+")
for line in phdrs.split("\n"):
if textrel_re.match(line):
sane = False
+ break
if not sane:
- package_qa_add_message(messages, "textrel", "ELF binary '%s' has relocations in .text" % path)
+ path = package_qa_clean_path(path, d, name)
+ package_qa_add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
QAPATHTEST[ldflags] = "package_qa_hash_style"
def package_qa_hash_style(path, name, d, elf, messages):
@@ -597,9 +358,9 @@ def package_qa_hash_style(path, name, d, elf, messages):
if os.path.islink(path):
return
- gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS', True)
+ gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS')
if not gnu_hash:
- gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS', True)
+ gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS')
if not gnu_hash:
return
@@ -618,7 +379,7 @@ def package_qa_hash_style(path, name, d, elf, messages):
sane = True
if has_syms and not sane:
- package_qa_add_message(messages, "ldflags", "No GNU_HASH in the elf binary: '%s'" % path)
+ package_qa_add_message(messages, "ldflags", "No GNU_HASH in the ELF binary %s, didn't pass LDFLAGS?" % path)
QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
@@ -638,11 +399,12 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
if path.find(name + "/CONTROL/") != -1 or path.find(name + "/DEBIAN/") != -1:
return
- tmpdir = d.getVar('TMPDIR', True)
- with open(path) as f:
+ tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
+ with open(path, 'rb') as f:
file_content = f.read()
if tmpdir in file_content:
- package_qa_add_message(messages, "buildpaths", "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d))
+ trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
+ package_qa_add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
@@ -657,8 +419,8 @@ def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
driverdir = d.expand("${libdir}/xorg/modules/drivers/")
if driverdir in path and path.endswith(".so"):
- mlprefix = d.getVar('MLPREFIX', True) or ''
- for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name, True) or ""):
+ mlprefix = d.getVar('MLPREFIX') or ''
+ for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name) or ""):
if rdep.startswith("%sxorg-abi-" % mlprefix):
return
package_qa_add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
@@ -681,9 +443,9 @@ def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
if os.path.islink(path):
target = os.readlink(path)
if os.path.isabs(target):
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
if target.startswith(tmpdir):
- trimmed = path.replace(os.path.join (d.getVar("PKGDEST", True), name), "")
+ trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
package_qa_add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
# Check license variables
@@ -692,32 +454,34 @@ python populate_lic_qa_checksum() {
"""
Check for changes in the license files.
"""
- import tempfile
sane = True
- lic_files = d.getVar('LIC_FILES_CHKSUM', True) or ''
- lic = d.getVar('LICENSE', True)
- pn = d.getVar('PN', True)
+ lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
+ lic = d.getVar('LICENSE')
+ pn = d.getVar('PN')
if lic == "CLOSED":
return
- if not lic_files and d.getVar('SRC_URI', True):
- sane = package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
-
- srcdir = d.getVar('S', True)
+ if not lic_files and d.getVar('SRC_URI'):
+ sane &= package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
+ srcdir = d.getVar('S')
+ corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
for url in lic_files.split():
try:
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
except bb.fetch.MalformedUrl:
- sane = package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
+ sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
continue
srclicfile = os.path.join(srcdir, path)
if not os.path.isfile(srclicfile):
- package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
+ sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
continue
+ if (srclicfile == corebase_licensefile):
+ bb.warn("${COREBASE}/LICENSE is not a valid license file, please use '${COMMON_LICENSE_DIR}/MIT' for a MIT License file in LIC_FILES_CHKSUM. This will become an error in the future")
+
recipemd5 = parm.get('md5', '')
beginline, endline = 0, 0
if 'beginline' in parm:
@@ -727,32 +491,45 @@ python populate_lic_qa_checksum() {
if (not beginline) and (not endline):
md5chksum = bb.utils.md5_file(srclicfile)
+ with open(srclicfile, 'r', errors='replace') as f:
+ license = f.read().splitlines()
else:
- fi = open(srclicfile, 'rb')
- fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False)
- tmplicfile = fo.name;
- lineno = 0
- linesout = 0
- for line in fi:
- lineno += 1
- if (lineno >= beginline):
- if ((lineno <= endline) or not endline):
- fo.write(line)
- linesout += 1
- else:
- break
- fo.flush()
- fo.close()
- fi.close()
- md5chksum = bb.utils.md5_file(tmplicfile)
- os.unlink(tmplicfile)
-
+ with open(srclicfile, 'rb') as f:
+ import hashlib
+ lineno = 0
+ license = []
+ m = hashlib.md5()
+ for line in f:
+ lineno += 1
+ if (lineno >= beginline):
+ if ((lineno <= endline) or not endline):
+ m.update(line)
+ license.append(line.decode('utf-8', errors='replace').rstrip())
+ else:
+ break
+ md5chksum = m.hexdigest()
if recipemd5 == md5chksum:
bb.note (pn + ": md5 checksum matched for ", url)
else:
if recipemd5:
msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
+ max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
+ if not license or license[-1] != '':
+ # Ensure that our license text ends with a line break
+ # (will be added with join() below).
+ license.append('')
+ remove = len(license) - max_lines
+ if remove > 0:
+ start = max_lines // 2
+ end = start + remove - 1
+ del license[start:end]
+ license.insert(start, '...')
+ msg = msg + "\n" + pn + ": Here is the selected license text:" + \
+ "\n" + \
+ "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
+ "\n" + "\n".join(license) + \
+ "{:^^70}".format(" endline=%d " % endline if endline else "")
if beginline:
if endline:
srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
@@ -767,13 +544,13 @@ python populate_lic_qa_checksum() {
else:
msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
- sane = package_qa_handle_error("license-checksum", msg, d)
+ sane &= package_qa_handle_error("license-checksum", msg, d)
if not sane:
bb.fatal("Fatal QA errors found, failing task.")
}
-def package_qa_check_staged(path,d):
+def qa_check_staged(path,d):
"""
Check staged la and pc files for common problems like references to the work
directory.
@@ -783,42 +560,90 @@ def package_qa_check_staged(path,d):
"""
sane = True
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
workdir = os.path.join(tmpdir, "work")
+ recipesysroot = d.getVar("RECIPE_SYSROOT")
if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
pkgconfigcheck = workdir
else:
pkgconfigcheck = tmpdir
+ skip = (d.getVar('INSANE_SKIP') or "").split()
+ skip_la = False
+ if 'la' in skip:
+ bb.note("Recipe %s skipping qa checking: la" % d.getVar('PN'))
+ skip_la = True
+
+ skip_pkgconfig = False
+ if 'pkgconfig' in skip:
+ bb.note("Recipe %s skipping qa checking: pkgconfig" % d.getVar('PN'))
+ skip_pkgconfig = True
+
# find all .la and .pc files
# read the content
# and check for stuff that looks wrong
for root, dirs, files in os.walk(path):
for file in files:
path = os.path.join(root,file)
- if file.endswith(".la"):
+ if file.endswith(".la") and not skip_la:
with open(path) as f:
file_content = f.read()
+ file_content = file_content.replace(recipesysroot, "")
if workdir in file_content:
error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
- sane = package_qa_handle_error("la", error_msg, d)
- elif file.endswith(".pc"):
+ sane &= package_qa_handle_error("la", error_msg, d)
+ elif file.endswith(".pc") and not skip_pkgconfig:
with open(path) as f:
file_content = f.read()
+ file_content = file_content.replace(recipesysroot, "")
if pkgconfigcheck in file_content:
error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
- sane = package_qa_handle_error("pkgconfig", error_msg, d)
+ sane &= package_qa_handle_error("pkgconfig", error_msg, d)
return sane
+# Run all package-wide warnfuncs and errorfuncs
+def package_qa_package(warnfuncs, errorfuncs, package, d):
+ warnings = {}
+ errors = {}
+
+ for func in warnfuncs:
+ func(package, d, warnings)
+ for func in errorfuncs:
+ func(package, d, errors)
+
+ for w in warnings:
+ package_qa_handle_error(w, warnings[w], d)
+ for e in errors:
+ package_qa_handle_error(e, errors[e], d)
+
+ return len(errors) == 0
+
+# Run all recipe-wide warnfuncs and errorfuncs
+def package_qa_recipe(warnfuncs, errorfuncs, pn, d):
+ warnings = {}
+ errors = {}
+
+ for func in warnfuncs:
+ func(pn, d, warnings)
+ for func in errorfuncs:
+ func(pn, d, errors)
+
+ for w in warnings:
+ package_qa_handle_error(w, warnings[w], d)
+ for e in errors:
+ package_qa_handle_error(e, errors[e], d)
+
+ return len(errors) == 0
+
# Walk over all files in a directory and call func
-def package_qa_walk(warnfuncs, errorfuncs, skip, package, d):
+def package_qa_walk(warnfuncs, errorfuncs, package, d):
import oe.qa
#if this will throw an exception, then fix the dict above
- target_os = d.getVar('TARGET_OS', True)
- target_arch = d.getVar('TARGET_ARCH', True)
+ target_os = d.getVar('TARGET_OS')
+ target_arch = d.getVar('TARGET_ARCH')
warnings = {}
errors = {}
@@ -847,11 +672,10 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
localdata = bb.data.createCopy(d)
- localdata.setVar('OVERRIDES', pkg)
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', localdata.getVar('OVERRIDES') + ':' + pkg)
# Now check the RDEPENDS
- rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "")
+ rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS') or "")
# Now do the sanity check!!!
if "build-deps" not in skip:
@@ -867,7 +691,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
continue
if not rdep_data or not 'PN' in rdep_data:
- pkgdata_dir = d.getVar("PKGDATA_DIR", True)
+ pkgdata_dir = d.getVar("PKGDATA_DIR")
try:
possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
except OSError:
@@ -887,38 +711,20 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
if "file-rdeps" not in skip:
ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
if bb.data.inherits_class('nativesdk', d):
- ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl'])
+ ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl', 'perl'])
# For Saving the FILERDEPENDS
filerdepends = {}
rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
for key in rdep_data:
if key.startswith("FILERDEPENDS_"):
- for subkey in rdep_data[key].split():
- if subkey not in ignored_file_rdeps:
+ for subkey in bb.utils.explode_deps(rdep_data[key]):
+ if subkey not in ignored_file_rdeps and \
+ not subkey.startswith('perl('):
# We already know it starts with FILERDEPENDS_
filerdepends[subkey] = key[13:]
if filerdepends:
- next = rdepends
done = rdepends[:]
- # Find all the rdepends on the dependency chain
- while next:
- new = []
- for rdep in next:
- rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
- sub_rdeps = rdep_data.get("RDEPENDS_" + rdep)
- if not sub_rdeps:
- continue
- for sub_rdep in sub_rdeps.split():
- if sub_rdep in done:
- continue
- if not sub_rdep.startswith('(') and \
- oe.packagedata.has_subpkgdata(sub_rdep, d):
- # It's a new rdep
- done.append(sub_rdep)
- new.append(sub_rdep)
- next = new
-
# Add the rprovides of itself
if pkg not in done:
done.insert(0, pkg)
@@ -926,16 +732,20 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# The python is not a package, but python-core provides it, so
# skip checking /usr/bin/python if python is in the rdeps, in
# case there is a RDEPENDS_pkg = "python" in the recipe.
- for py in [ d.getVar('MLPREFIX', True) + "python", "python" ]:
+ for py in [ d.getVar('MLPREFIX') + "python", "python" ]:
if py in done:
filerdepends.pop("/usr/bin/python",None)
done.remove(py)
for rdep in done:
+ # The file dependencies may contain package names, e.g.,
+ # perl
+ filerdepends.pop(rdep,None)
+
# For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
for key in rdep_data:
if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"):
- for subkey in rdep_data[key].split():
+ for subkey in bb.utils.explode_deps(rdep_data[key]):
filerdepends.pop(subkey,None)
# Add the files list to the rprovides
if key == "FILES_INFO":
@@ -949,17 +759,17 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
for key in filerdepends:
error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS_%s?" % \
(filerdepends[key].replace("_%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
- package_qa_handle_error("file-rdeps", error_msg, d)
+ package_qa_handle_error("file-rdeps", error_msg, d)
+package_qa_check_rdepends[vardepsexclude] = "OVERRIDES"
-def package_qa_check_deps(pkg, pkgdest, skip, d):
+def package_qa_check_deps(pkg, pkgdest, d):
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', pkg)
- bb.data.update_data(localdata)
def check_valid_deps(var):
try:
- rvar = bb.utils.explode_dep_versions2(localdata.getVar(var, True) or "")
+ rvar = bb.utils.explode_dep_versions2(localdata.getVar(var) or "")
except ValueError as e:
bb.fatal("%s_%s: %s" % (var, pkg, e))
for dep in rvar:
@@ -975,37 +785,59 @@ def package_qa_check_deps(pkg, pkgdest, skip, d):
check_valid_deps('RREPLACES')
check_valid_deps('RCONFLICTS')
-QAPATHTEST[expanded-d] = "package_qa_check_expanded_d"
-def package_qa_check_expanded_d(path,name,d,elf,messages):
+QAPKGTEST[usrmerge] = "package_qa_check_usrmerge"
+def package_qa_check_usrmerge(pkg, d, messages):
+ pkgdest = d.getVar('PKGDEST')
+ pkg_dir = pkgdest + os.sep + pkg + os.sep
+ merged_dirs = ['bin', 'sbin', 'lib'] + d.getVar('MULTILIB_VARIANTS').split()
+ for f in merged_dirs:
+ if os.path.exists(pkg_dir + f) and not os.path.islink(pkg_dir + f):
+ msg = "%s package is not obeying usrmerge distro feature. /%s should be relocated to /usr." % (pkg, f)
+ package_qa_add_message(messages, "usrmerge", msg)
+ return False
+ return True
+
+QAPKGTEST[perllocalpod] = "package_qa_check_perllocalpod"
+def package_qa_check_perllocalpod(pkg, d, messages):
+ """
+ Check that the recipe didn't ship a perlocal.pod file, which shouldn't be
+ installed in a distribution package. cpan.bbclass sets NO_PERLLOCAL=1 to
+ handle this for most recipes.
+ """
+ import glob
+ pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
+ podpath = oe.path.join(pkgd, d.getVar("libdir"), "perl*", "*", "*", "perllocal.pod")
+
+ matches = glob.glob(podpath)
+ if matches:
+ matches = [package_qa_clean_path(path, d, pkg) for path in matches]
+ msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
+ package_qa_add_message(messages, "perllocalpod", msg)
+
+QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
+def package_qa_check_expanded_d(package, d, messages):
"""
Check for the expanded D (${D}) value in pkg_* and FILES
variables, warn the user to use it correctly.
"""
-
sane = True
- expanded_d = d.getVar('D',True)
-
- # Get packages for current recipe and iterate
- packages = d.getVar('PACKAGES', True).split(" ")
- for pak in packages:
- # Go through all variables and check if expanded D is found, warn the user accordingly
- for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
- bbvar = d.getVar(var + "_" + pak, False)
- if bbvar:
- # Bitbake expands ${D} within bbvar during the previous step, so we check for its expanded value
- if expanded_d in bbvar:
- if var == 'FILES':
- package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % pak)
- sane = False
- else:
- package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, pak))
- sane = False
+ expanded_d = d.getVar('D')
+
+ for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
+ bbvar = d.getVar(var + "_" + package) or ""
+ if expanded_d in bbvar:
+ if var == 'FILES':
+ package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
+ sane = False
+ else:
+ package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
+ sane = False
return sane
def package_qa_check_encoding(keys, encode, d):
def check_encoding(key, enc):
sane = True
- value = d.getVar(key, True)
+ value = d.getVar(key)
if value:
try:
s = value.encode(enc)
@@ -1030,8 +862,8 @@ def package_qa_check_host_user(path, name, d, elf, messages):
if not os.path.lexists(path):
return
- dest = d.getVar('PKGDEST', True)
- pn = d.getVar('PN', True)
+ dest = d.getVar('PKGDEST')
+ pn = d.getVar('PN')
home = os.path.join(dest, 'home')
if path == home or path.startswith(home + os.sep):
return
@@ -1043,18 +875,29 @@ def package_qa_check_host_user(path, name, d, elf, messages):
if exc.errno != errno.ENOENT:
raise
else:
- rootfs_path = path[len(dest):]
- check_uid = int(d.getVar('HOST_USER_UID', True))
+ check_uid = int(d.getVar('HOST_USER_UID'))
if stat.st_uid == check_uid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_uid))
+ package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
return False
- check_gid = int(d.getVar('HOST_USER_GID', True))
+ check_gid = int(d.getVar('HOST_USER_GID'))
if stat.st_gid == check_gid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_gid))
+ package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
return False
return True
+QARECIPETEST[src-uri-bad] = "package_qa_check_src_uri"
+def package_qa_check_src_uri(pn, d, messages):
+ import re
+
+ if "${PN}" in d.getVar("SRC_URI", False):
+ package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
+
+ for url in d.getVar("SRC_URI").split():
+ if re.search(r"github\.com/.+/.+/archive/.+", url):
+ package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub archives" % pn, d)
+
+
# The PACKAGE FUNC to scan each package
python do_package_qa () {
import subprocess
@@ -1067,8 +910,8 @@ python do_package_qa () {
# Check non UTF-8 characters on recipe's metadata
package_qa_check_encoding(['DESCRIPTION', 'SUMMARY', 'LICENSE', 'SECTION'], 'utf-8', d)
- logdir = d.getVar('T', True)
- pkg = d.getVar('PN', True)
+ logdir = d.getVar('T')
+ pn = d.getVar('PN')
# Check the compile log for host contamination
compilelog = os.path.join(logdir,"log.do_compile")
@@ -1077,7 +920,7 @@ python do_package_qa () {
statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
if subprocess.call(statement, shell=True) == 0:
msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pkg, compilelog)
+ Please check the log '%s' for more information." % (pn, compilelog)
package_qa_handle_error("compile-host-path", msg, d)
# Check the install log for host contamination
@@ -1087,12 +930,12 @@ python do_package_qa () {
statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
if subprocess.call(statement, shell=True) == 0:
msg = "%s: The install log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pkg, installlog)
+ Please check the log '%s' for more information." % (pn, installlog)
package_qa_handle_error("install-host-path", msg, d)
# Scan the packages...
- pkgdest = d.getVar('PKGDEST', True)
- packages = set((d.getVar('PACKAGES', True) or '').split())
+ pkgdest = d.getVar('PKGDEST')
+ packages = set((d.getVar('PACKAGES') or '').split())
cpath = oe.cachedpath.CachedPath()
global pkgfiles
@@ -1107,38 +950,38 @@ python do_package_qa () {
if not packages:
return
- testmatrix = d.getVarFlags("QAPATHTEST")
import re
# The package name matches the [a-z0-9.+-]+ regular expression
- pkgname_pattern = re.compile("^[a-z0-9.+-]+$")
+ pkgname_pattern = re.compile(r"^[a-z0-9.+-]+$")
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
taskdeps = set()
for dep in taskdepdata:
taskdeps.add(taskdepdata[dep][0])
- g = globals()
- for package in packages:
- skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split()
- if skip:
- bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
+ def parse_test_matrix(matrix_name):
+ testmatrix = d.getVarFlags(matrix_name) or {}
+ g = globals()
warnchecks = []
- for w in (d.getVar("WARN_QA", True) or "").split():
+ for w in (d.getVar("WARN_QA") or "").split():
if w in skip:
continue
if w in testmatrix and testmatrix[w] in g:
warnchecks.append(g[testmatrix[w]])
- if w == 'unsafe-references-in-binaries':
- oe.utils.write_ld_so_conf(d)
errorchecks = []
- for e in (d.getVar("ERROR_QA", True) or "").split():
+ for e in (d.getVar("ERROR_QA") or "").split():
if e in skip:
continue
if e in testmatrix and testmatrix[e] in g:
errorchecks.append(g[testmatrix[e]])
- if e == 'unsafe-references-in-binaries':
- oe.utils.write_ld_so_conf(d)
+ return warnchecks, errorchecks
+
+ for package in packages:
+ skip = set((d.getVar('INSANE_SKIP') or "").split() +
+ (d.getVar('INSANE_SKIP_' + package) or "").split())
+ if skip:
+ bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
bb.note("Checking Package: %s" % package)
# Check package name
@@ -1146,25 +989,41 @@ python do_package_qa () {
package_qa_handle_error("pkgname",
"%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
- path = "%s/%s" % (pkgdest, package)
- package_qa_walk(warnchecks, errorchecks, skip, package, d)
+ warn_checks, error_checks = parse_test_matrix("QAPATHTEST")
+ package_qa_walk(warn_checks, error_checks, package, d)
+
+ warn_checks, error_checks = parse_test_matrix("QAPKGTEST")
+ package_qa_package(warn_checks, error_checks, package, d)
package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d)
- package_qa_check_deps(package, pkgdest, skip, d)
+ package_qa_check_deps(package, pkgdest, d)
- if 'libdir' in d.getVar("ALL_QA", True).split():
+ warn_checks, error_checks = parse_test_matrix("QARECIPETEST")
+ package_qa_recipe(warn_checks, error_checks, pn, d)
+
+ if 'libdir' in d.getVar("ALL_QA").split():
package_qa_check_libdir(d)
- qa_sane = d.getVar("QA_SANE", True)
+ qa_sane = d.getVar("QA_SANE")
if not qa_sane:
bb.fatal("QA run found fatal errors. Please consider fixing them.")
bb.note("DONE with PACKAGE QA")
}
+# binutils is used for most checks, so need to set as dependency
+# POPULATESYSROOTDEPS is defined in staging class.
+do_package_qa[depends] += "${POPULATESYSROOTDEPS}"
do_package_qa[vardepsexclude] = "BB_TASKDEPDATA"
do_package_qa[rdeptask] = "do_packagedata"
addtask do_package_qa after do_packagedata do_package before do_build
+# Add the package specific INSANE_SKIPs to the sstate dependencies
+python() {
+ pkgs = (d.getVar('PACKAGES') or '').split()
+ for pkg in pkgs:
+ d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP_{}".format(pkg))
+}
+
SSTATETASKS += "do_package_qa"
do_package_qa[sstate-inputdirs] = ""
do_package_qa[sstate-outputdirs] = ""
@@ -1175,11 +1034,58 @@ addtask do_package_qa_setscene
python do_qa_staging() {
bb.note("QA checking staging")
-
- if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
+ if not qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
bb.fatal("QA staging was broken by the package built above")
}
+python do_qa_patch() {
+ import subprocess
+
+ ###########################################################################
+ # Check patch.log for fuzz warnings
+ #
+ # Further information on why we check for patch fuzz warnings:
+ # http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
+ # https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
+ ###########################################################################
+
+ logdir = d.getVar('T')
+ patchlog = os.path.join(logdir,"log.do_patch")
+
+ if os.path.exists(patchlog):
+ fuzzheader = '--- Patch fuzz start ---'
+ fuzzfooter = '--- Patch fuzz end ---'
+ statement = "grep -e '%s' %s > /dev/null" % (fuzzheader, patchlog)
+ if subprocess.call(statement, shell=True) == 0:
+ msg = "Fuzz detected:\n\n"
+ fuzzmsg = ""
+ inFuzzInfo = False
+ f = open(patchlog, "r")
+ for line in f:
+ if fuzzheader in line:
+ inFuzzInfo = True
+ fuzzmsg = ""
+ elif fuzzfooter in line:
+ fuzzmsg = fuzzmsg.replace('\n\n', '\n')
+ msg += fuzzmsg
+ msg += "\n"
+ inFuzzInfo = False
+ elif inFuzzInfo and not 'Now at patch' in line:
+ fuzzmsg += line
+ f.close()
+ msg += "The context lines in the patches can be updated with devtool:\n"
+ msg += "\n"
+ msg += " devtool modify %s\n" % d.getVar('PN')
+ msg += " devtool finish --force-patch-refresh %s <layer_path>\n\n" % d.getVar('PN')
+ msg += "Don't forget to review changes done by devtool!\n"
+ if 'patch-fuzz' in d.getVar('ERROR_QA'):
+ bb.error(msg)
+ elif 'patch-fuzz' in d.getVar('WARN_QA'):
+ bb.warn(msg)
+ msg = "Patch log indicates that patches do not apply cleanly."
+ package_qa_handle_error("patch-fuzz", msg, d)
+}
+
python do_qa_configure() {
import subprocess
@@ -1188,17 +1094,24 @@ python do_qa_configure() {
###########################################################################
configs = []
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
- if bb.data.inherits_class('autotools', d):
+ skip = (d.getVar('INSANE_SKIP') or "").split()
+ skip_configure_unsafe = False
+ if 'configure-unsafe' in skip:
+ bb.note("Recipe %s skipping qa checking: configure-unsafe" % d.getVar('PN'))
+ skip_configure_unsafe = True
+
+ if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe:
bb.note("Checking autotools environment for common misconfiguration")
for root, dirs, files in os.walk(workdir):
statement = "grep -q -F -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s" % \
os.path.join(root,"config.log")
if "config.log" in files:
if subprocess.call(statement, shell=True) == 0:
- bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
-Rerun configure task after fixing this.""")
+ error_msg = """This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
+Rerun configure task after fixing this."""
+ package_qa_handle_error("configure-unsafe", error_msg, d)
if "configure.ac" in files:
configs.append(os.path.join(root,"configure.ac"))
@@ -1209,70 +1122,81 @@ Rerun configure task after fixing this.""")
# Check gettext configuration and dependencies are correct
###########################################################################
- cnf = d.getVar('EXTRA_OECONF', True) or ""
- if "gettext" not in d.getVar('P', True) and "gcc-runtime" not in d.getVar('P', True) and "--disable-nls" not in cnf:
- ml = d.getVar("MLPREFIX", True) or ""
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d):
- gt = "gettext-native"
- elif bb.data.inherits_class('cross-canadian', d):
+ skip_configure_gettext = False
+ if 'configure-gettext' in skip:
+ bb.note("Recipe %s skipping qa checking: configure-gettext" % d.getVar('PN'))
+ skip_configure_gettext = True
+
+ cnf = d.getVar('EXTRA_OECONF') or ""
+ if not ("gettext" in d.getVar('P') or "gcc-runtime" in d.getVar('P') or \
+ "--disable-nls" in cnf or skip_configure_gettext):
+ ml = d.getVar("MLPREFIX") or ""
+ if bb.data.inherits_class('cross-canadian', d):
gt = "nativesdk-gettext"
else:
- gt = "virtual/" + ml + "gettext"
- deps = bb.utils.explode_deps(d.getVar('DEPENDS', True) or "")
+ gt = "gettext-native"
+ deps = bb.utils.explode_deps(d.getVar('DEPENDS') or "")
if gt not in deps:
for config in configs:
gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
if subprocess.call(gnu, shell=True) == 0:
- bb.fatal("""%s required but not in DEPENDS for file %s.
-Missing inherit gettext?""" % (gt, config))
+ error_msg = "AM_GNU_GETTEXT used but no inherit gettext"
+ package_qa_handle_error("configure-gettext", error_msg, d)
###########################################################################
# Check unrecognised configure options (with a white list)
###########################################################################
- if bb.data.inherits_class("autotools", d):
+ if bb.data.inherits_class("autotools", d) or bb.data.inherits_class("meson", d):
bb.note("Checking configure output for unrecognised options")
try:
- flag = "WARNING: unrecognized options:"
- log = os.path.join(d.getVar('B', True), 'config.log')
- output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ')
+ if bb.data.inherits_class("autotools", d):
+ flag = "WARNING: unrecognized options:"
+ log = os.path.join(d.getVar('B'), 'config.log')
+ if bb.data.inherits_class("meson", d):
+ flag = "WARNING: Unknown options:"
+ log = os.path.join(d.getVar('T'), 'log.do_configure')
+ output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ').replace('"', '')
options = set()
for line in output.splitlines():
options |= set(line.partition(flag)[2].split())
- whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST", True).split())
+ whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST").split())
options -= whitelist
if options:
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
package_qa_handle_error("unknown-configure-option", error_msg, d)
except subprocess.CalledProcessError:
pass
# Check invalid PACKAGECONFIG
- pkgconfig = (d.getVar("PACKAGECONFIG", True) or "").split()
+ pkgconfig = (d.getVar("PACKAGECONFIG") or "").split()
if pkgconfig:
pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
for pconfig in pkgconfig:
if pconfig not in pkgconfigflags:
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
package_qa_handle_error("invalid-packageconfig", error_msg, d)
- qa_sane = d.getVar("QA_SANE", True)
+ qa_sane = d.getVar("QA_SANE")
if not qa_sane:
bb.fatal("Fatal QA errors found, failing task.")
}
python do_qa_unpack() {
- src_uri = d.getVar('SRC_URI', True)
- s_dir = d.getVar('S', True)
+ src_uri = d.getVar('SRC_URI')
+ s_dir = d.getVar('S')
if src_uri and not os.path.exists(s_dir):
- bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN', True), d.getVar('S', False), s_dir))
+ bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN'), d.getVar('S', False), s_dir))
}
# The Staging Func, to check all staging
#addtask qa_staging after do_populate_sysroot before do_build
do_populate_sysroot[postfuncs] += "do_qa_staging "
+# Check for patch fuzz
+do_patch[postfuncs] += "do_qa_patch "
+
# Check broken config.log files, for packages requiring Gettext which
# don't have it in DEPENDS.
#addtask qa_configure after do_configure before do_compile
@@ -1282,7 +1206,9 @@ do_configure[postfuncs] += "do_qa_configure "
do_unpack[postfuncs] += "do_qa_unpack"
python () {
- tests = d.getVar('ALL_QA', True).split()
+ import re
+
+ tests = d.getVar('ALL_QA').split()
if "desktop" in tests:
d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
@@ -1291,7 +1217,7 @@ python () {
###########################################################################
# Checking ${FILESEXTRAPATHS}
- extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
+ extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
if '__default' not in extrapaths.split(":"):
msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n"
msg += "type of assignment, and don't forget the colon.\n"
@@ -1303,29 +1229,37 @@ python () {
msg += "%s\n" % extrapaths
bb.warn(msg)
- overrides = d.getVar('OVERRIDES', True).split(':')
- pn = d.getVar('PN', True)
+ overrides = d.getVar('OVERRIDES').split(':')
+ pn = d.getVar('PN')
if pn in overrides:
- msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE", True), pn)
+ msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
package_qa_handle_error("pn-overrides", msg, d)
+ prog = re.compile(r'[A-Z]')
+ if prog.search(pn):
+ package_qa_handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
+
+ # Some people mistakenly use DEPENDS_${PN} instead of DEPENDS and wonder
+ # why it doesn't work.
+ if (d.getVar(d.expand('DEPENDS_${PN}'))):
+ package_qa_handle_error("pkgvarcheck", "recipe uses DEPENDS_${PN}, should use DEPENDS", d)
issues = []
- if (d.getVar('PACKAGES', True) or "").split():
- for dep in (d.getVar('QADEPENDS', True) or "").split():
+ if (d.getVar('PACKAGES') or "").split():
+ for dep in (d.getVar('QADEPENDS') or "").split():
d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep)
for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
if d.getVar(var, False):
issues.append(var)
- fakeroot_tests = d.getVar('FAKEROOT_QA', True).split()
+ fakeroot_tests = d.getVar('FAKEROOT_QA').split()
if set(tests) & set(fakeroot_tests):
d.setVarFlag('do_package_qa', 'fakeroot', '1')
d.appendVarFlag('do_package_qa', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
else:
d.setVarFlag('do_package_qa', 'rdeptask', '')
for i in issues:
- package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE", True), i), d)
- qa_sane = d.getVar("QA_SANE", True)
+ package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
+ qa_sane = d.getVar("QA_SANE")
if not qa_sane:
bb.fatal("Fatal QA errors found, failing task.")
}
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
index 3ed5986a52..07ec242e63 100644
--- a/meta/classes/kernel-arch.bbclass
+++ b/meta/classes/kernel-arch.bbclass
@@ -14,27 +14,34 @@ valid_archs = "alpha cris ia64 \
parisc s390 v850 \
avr32 blackfin \
microblaze \
- nios2"
+ nios2 arc riscv xtensa"
def map_kernel_arch(a, d):
import re
- valid_archs = d.getVar('valid_archs', True).split()
+ valid_archs = d.getVar('valid_archs').split()
if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
+ elif re.match('arceb$', a): return 'arc'
elif re.match('armeb$', a): return 'arm'
elif re.match('aarch64$', a): return 'arm64'
elif re.match('aarch64_be$', a): return 'arm64'
- elif re.match('mips(el|64|64el)$', a): return 'mips'
+ elif re.match('aarch64_ilp32$', a): return 'arm64'
+ elif re.match('aarch64_be_ilp32$', a): return 'arm64'
+ elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a): return 'mips'
+ elif re.match('mcf', a): return 'm68k'
+ elif re.match('riscv(32|64|)(eb|)$', a): return 'riscv'
elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
elif re.match('sh(3|4)$', a): return 'sh'
elif re.match('bfin', a): return 'blackfin'
elif re.match('microblazee[bl]', a): return 'microblaze'
elif a in valid_archs: return a
else:
+ if not d.getVar("TARGET_OS").startswith("linux"):
+ return a
bb.error("cannot map '%s' to a linux kernel architecture" % a)
-export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH', True), d)}"
+export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH'), d)}"
def map_uboot_arch(a, d):
import re
@@ -43,7 +50,7 @@ def map_uboot_arch(a, d):
elif re.match('i.86$', a): return 'x86'
return a
-export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', True), d)}"
+export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH'), d)}"
# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
# specific options necessary for building the kernel and modules.
@@ -54,7 +61,8 @@ HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
TARGET_AR_KERNEL_ARCH ?= ""
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
-KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd"
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH}"
KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
+TOOLCHAIN = "gcc"
diff --git a/meta/classes/kernel-artifact-names.bbclass b/meta/classes/kernel-artifact-names.bbclass
new file mode 100644
index 0000000000..bbeecba7bd
--- /dev/null
+++ b/meta/classes/kernel-artifact-names.bbclass
@@ -0,0 +1,18 @@
+KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
+KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
+
+KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}"
+KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+
+KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
+KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+
+KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}"
+KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+
+MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}"
+MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+MODULE_TARBALL_DEPLOY ?= "1"
+
+INITRAMFS_NAME ?= "initramfs-${KERNEL_ARTIFACT_NAME}"
+INITRAMFS_LINK_NAME ?= "initramfs-${KERNEL_ARTIFACT_LINK_NAME}"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
new file mode 100644
index 0000000000..522c46575d
--- /dev/null
+++ b/meta/classes/kernel-devicetree.bbclass
@@ -0,0 +1,95 @@
+# Support for device tree generation
+PACKAGES_append = " \
+ ${KERNEL_PACKAGE_NAME}-devicetree \
+ ${@[d.getVar('KERNEL_PACKAGE_NAME') + '-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \
+"
+FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
+FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
+
+# Generate kernel+devicetree bundle
+KERNEL_DEVICETREE_BUNDLE ?= "0"
+
+normalize_dtb () {
+ dtb="$1"
+ if echo $dtb | grep -q '/dts/'; then
+ bbwarn "$dtb contains the full path to the the dts file, but only the dtb name should be used."
+ dtb=`basename $dtb | sed 's,\.dts$,.dtb,g'`
+ fi
+ echo "$dtb"
+}
+
+get_real_dtb_path_in_kernel () {
+ dtb="$1"
+ dtb_path="${B}/arch/${ARCH}/boot/dts/$dtb"
+ if [ ! -e "$dtb_path" ]; then
+ dtb_path="${B}/arch/${ARCH}/boot/$dtb"
+ fi
+ echo "$dtb_path"
+}
+
+do_configure_append() {
+ if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
+ if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then
+ case "${ARCH}" in
+ "arm")
+ config="${B}/.config"
+ if ! grep -q 'CONFIG_ARM_APPENDED_DTB=y' $config; then
+ bbwarn 'CONFIG_ARM_APPENDED_DTB is NOT enabled in the kernel. Enabling it to allow the kernel to boot with the Device Tree appended!'
+ sed -i "/CONFIG_ARM_APPENDED_DTB[ =]/d" $config
+ echo "CONFIG_ARM_APPENDED_DTB=y" >> $config
+ echo "# CONFIG_ARM_ATAG_DTB_COMPAT is not set" >> $config
+ fi
+ ;;
+ *)
+ bberror "KERNEL_DEVICETREE_BUNDLE is not supported for ${ARCH}. Currently it is only supported for 'ARM'."
+ esac
+ else
+ bberror 'The KERNEL_DEVICETREE_BUNDLE requires the KERNEL_IMAGETYPE to contain zImage.'
+ fi
+ fi
+}
+
+do_compile_append() {
+ for dtbf in ${KERNEL_DEVICETREE}; do
+ dtb=`normalize_dtb "$dtbf"`
+ oe_runmake $dtb
+ done
+}
+
+do_install_append() {
+ for dtbf in ${KERNEL_DEVICETREE}; do
+ dtb=`normalize_dtb "$dtbf"`
+ dtb_ext=${dtb##*.}
+ dtb_base_name=`basename $dtb .$dtb_ext`
+ dtb_path=`get_real_dtb_path_in_kernel "$dtb"`
+ install -m 0644 $dtb_path ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext
+ done
+}
+
+do_deploy_append() {
+ for dtbf in ${KERNEL_DEVICETREE}; do
+ dtb=`normalize_dtb "$dtbf"`
+ dtb_ext=${dtb##*.}
+ dtb_base_name=`basename $dtb .$dtb_ext`
+ install -d $deployDir
+ install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
+ for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
+ if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
+ cat ${D}/${KERNEL_IMAGEDEST}/$type \
+ $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
+ > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
+ ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
+ $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
+ cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
+ $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
+ > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
+ ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
+ $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ fi
+ fi
+ done
+ done
+}
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index 8580247f82..ec18a3d699 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -1,39 +1,61 @@
-inherit kernel-uboot uboot-sign
+inherit kernel-uboot kernel-artifact-names uboot-sign
python __anonymous () {
- kerneltypes = d.getVar('KERNEL_IMAGETYPES', True) or ""
+ kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
if 'fitImage' in kerneltypes.split():
- depends = d.getVar("DEPENDS", True)
- depends = "%s u-boot-mkimage-native dtc-native" % depends
+ depends = d.getVar("DEPENDS")
+ depends = "%s u-boot-tools-native dtc-native" % depends
d.setVar("DEPENDS", depends)
- if d.getVar("UBOOT_ARCH", True) == "x86":
+ uarch = d.getVar("UBOOT_ARCH")
+ if uarch == "arm64":
+ replacementtype = "Image"
+ elif uarch == "riscv":
+ replacementtype = "Image"
+ elif uarch == "mips":
+ replacementtype = "vmlinuz.bin"
+ elif uarch == "x86":
replacementtype = "bzImage"
+ elif uarch == "microblaze":
+ replacementtype = "linux.bin"
else:
replacementtype = "zImage"
- # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
- # to kernel.bbclass . We have to override it, since we pack zImage
- # (at least for now) into the fitImage .
- typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE", True) or ""
+ # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
+ # to kernel.bbclass . We have to override it, since we pack zImage
+ # (at least for now) into the fitImage .
+ typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
if 'fitImage' in typeformake.split():
d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', replacementtype))
- image = d.getVar('INITRAMFS_IMAGE', True)
+ image = d.getVar('INITRAMFS_IMAGE')
if image:
d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ #check if there are any dtb providers
+ providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
+ if providerdtb:
+ d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/dtb:do_populate_sysroot')
+ d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' virtual/dtb:do_populate_sysroot')
+ d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
+
# Verified boot will sign the fitImage and append the public key to
- # U-boot dtb. We ensure the U-Boot dtb is deployed before assembling
+ # U-Boot dtb. We ensure the U-Boot dtb is deployed before assembling
# the fitImage:
- if d.getVar('UBOOT_SIGN_ENABLE', True):
- uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot', True) or 'u-boot'
- d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_deploy' % uboot_pn)
+ if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
+ uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
+ d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
}
# Options for the device tree compiler passed to mkimage '-D' feature:
UBOOT_MKIMAGE_DTCOPTS ??= ""
+# fitImage Hash Algo
+FIT_HASH_ALG ?= "sha256"
+
+# fitImage Signature Algo
+FIT_SIGN_ALG ?= "rsa2048"
+
#
# Emit the fitImage ITS header
#
@@ -93,12 +115,12 @@ EOF
# $4 ... Compression type
fitimage_emit_section_kernel() {
- kernel_csum="sha1"
+ kernel_csum="${FIT_HASH_ALG}"
- ENTRYPOINT=${UBOOT_ENTRYPOINT}
- if test -n "${UBOOT_ENTRYSYMBOL}"; then
- ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \
- awk '$4=="${UBOOT_ENTRYSYMBOL}" {print $2}'`
+ ENTRYPOINT="${UBOOT_ENTRYPOINT}"
+ if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
+ ENTRYPOINT=`${HOST_PREFIX}nm vmlinux | \
+ awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
fi
cat << EOF >> ${1}
@@ -126,8 +148,17 @@ EOF
# $3 ... Path to DTB image
fitimage_emit_section_dtb() {
- dtb_csum="sha1"
+ dtb_csum="${FIT_HASH_ALG}"
+ dtb_loadline=""
+ dtb_ext=${DTB##*.}
+ if [ "${dtb_ext}" = "dtbo" ]; then
+ if [ -n "${UBOOT_DTBO_LOADADDRESS}" ]; then
+ dtb_loadline="load = <${UBOOT_DTBO_LOADADDRESS}>;"
+ fi
+ elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
+ dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
+ fi
cat << EOF >> ${1}
fdt@${2} {
description = "Flattened Device Tree blob";
@@ -135,6 +166,7 @@ fitimage_emit_section_dtb() {
type = "flat_dt";
arch = "${UBOOT_ARCH}";
compression = "none";
+ ${dtb_loadline}
hash@1 {
algo = "${dtb_csum}";
};
@@ -150,7 +182,7 @@ EOF
# $3 ... Path to setup image
fitimage_emit_section_setup() {
- setup_csum="sha1"
+ setup_csum="${FIT_HASH_ALG}"
cat << EOF >> ${1}
setup@${2} {
@@ -177,18 +209,27 @@ EOF
# $3 ... Path to ramdisk image
fitimage_emit_section_ramdisk() {
- ramdisk_csum="sha1"
+ ramdisk_csum="${FIT_HASH_ALG}"
+ ramdisk_loadline=""
+ ramdisk_entryline=""
+
+ if [ -n "${UBOOT_RD_LOADADDRESS}" ]; then
+ ramdisk_loadline="load = <${UBOOT_RD_LOADADDRESS}>;"
+ fi
+ if [ -n "${UBOOT_RD_ENTRYPOINT}" ]; then
+ ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
+ fi
cat << EOF >> ${1}
ramdisk@${2} {
- description = "ramdisk image";
+ description = "${INITRAMFS_IMAGE}";
data = /incbin/("${3}");
type = "ramdisk";
arch = "${UBOOT_ARCH}";
os = "linux";
compression = "none";
- load = <${UBOOT_RD_LOADADDRESS}>;
- entry = <${UBOOT_RD_ENTRYPOINT}>;
+ ${ramdisk_loadline}
+ ${ramdisk_entryline}
hash@1 {
algo = "${ramdisk_csum}";
};
@@ -201,41 +242,58 @@ EOF
#
# $1 ... .its filename
# $2 ... Linux kernel ID
-# $3 ... DTB image ID
+# $3 ... DTB image name
# $4 ... ramdisk ID
# $5 ... config ID
+# $6 ... default flag
fitimage_emit_section_config() {
- conf_csum="sha1"
+ conf_csum="${FIT_HASH_ALG}"
+ conf_sign_algo="${FIT_SIGN_ALG}"
if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
# Test if we have any DTBs at all
- conf_desc="Linux kernel"
- kernel_line="kernel = \"kernel@${2}\";"
+ sep=""
+ conf_desc=""
+ kernel_line=""
fdt_line=""
ramdisk_line=""
+ setup_line=""
+ default_line=""
+
+ if [ -n "${2}" ]; then
+ conf_desc="Linux kernel"
+ sep=", "
+ kernel_line="kernel = \"kernel@${2}\";"
+ fi
if [ -n "${3}" ]; then
- conf_desc="${conf_desc}, FDT blob"
+ conf_desc="${conf_desc}${sep}FDT blob"
+ sep=", "
fdt_line="fdt = \"fdt@${3}\";"
fi
if [ -n "${4}" ]; then
- conf_desc="${conf_desc}, ramdisk"
+ conf_desc="${conf_desc}${sep}ramdisk"
+ sep=", "
ramdisk_line="ramdisk = \"ramdisk@${4}\";"
fi
if [ -n "${5}" ]; then
- conf_desc="${conf_desc}, setup"
+ conf_desc="${conf_desc}${sep}setup"
setup_line="setup = \"setup@${5}\";"
fi
+ if [ "${6}" = "1" ]; then
+ default_line="default = \"conf@${3}\";"
+ fi
+
cat << EOF >> ${1}
- default = "conf@1";
- conf@1 {
- description = "${conf_desc}";
+ ${default_line}
+ conf@${3} {
+ description = "${6} ${conf_desc}";
${kernel_line}
${fdt_line}
${ramdisk_line}
@@ -247,25 +305,33 @@ EOF
if [ ! -z "${conf_sign_keyname}" ] ; then
- sign_line="sign-images = \"kernel\""
+ sign_line="sign-images = "
+ sep=""
+
+ if [ -n "${2}" ]; then
+ sign_line="${sign_line}${sep}\"kernel\""
+ sep=", "
+ fi
if [ -n "${3}" ]; then
- sign_line="${sign_line}, \"fdt\""
+ sign_line="${sign_line}${sep}\"fdt\""
+ sep=", "
fi
if [ -n "${4}" ]; then
- sign_line="${sign_line}, \"ramdisk\""
+ sign_line="${sign_line}${sep}\"ramdisk\""
+ sep=", "
fi
if [ -n "${5}" ]; then
- sign_line="${sign_line}, \"setup\""
+ sign_line="${sign_line}${sep}\"setup\""
fi
sign_line="${sign_line};"
cat << EOF >> ${1}
signature@1 {
- algo = "${conf_csum},rsa2048";
+ algo = "${conf_csum},${conf_sign_algo}";
key-name-hint = "${conf_sign_keyname}";
${sign_line}
};
@@ -286,6 +352,7 @@ EOF
fitimage_assemble() {
kernelcount=1
dtbcount=""
+ DTBS=""
ramdiskcount=${3}
setupcount=""
rm -f ${1} arch/${ARCH}/boot/${2}
@@ -303,7 +370,8 @@ fitimage_assemble() {
#
# Step 2: Prepare a DTB image section
#
- if test -n "${KERNEL_DEVICETREE}"; then
+
+ if [ -z "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -n "${KERNEL_DEVICETREE}" ]; then
dtbcount=1
for DTB in ${KERNEL_DEVICETREE}; do
if echo ${DTB} | grep -q '/dts/'; then
@@ -315,15 +383,26 @@ fitimage_assemble() {
DTB_PATH="arch/${ARCH}/boot/${DTB}"
fi
- fitimage_emit_section_dtb ${1} ${dtbcount} ${DTB_PATH}
- dtbcount=`expr ${dtbcount} + 1`
+ DTB=$(echo "${DTB}" | tr '/' '_')
+ DTBS="${DTBS} ${DTB}"
+ fitimage_emit_section_dtb ${1} ${DTB} ${DTB_PATH}
+ done
+ fi
+
+ if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
+ dtbcount=1
+ for DTBFILE in ${EXTERNAL_KERNEL_DEVICETREE}/*.dtb; do
+ DTB=`basename ${DTBFILE}`
+ DTB=$(echo "${DTB}" | tr '/' '_')
+ DTBS="${DTBS} ${DTB}"
+ fitimage_emit_section_dtb ${1} ${DTB} ${DTBFILE}
done
fi
#
# Step 3: Prepare a setup section. (For x86)
#
- if test -e arch/${ARCH}/boot/setup.bin ; then
+ if [ -e arch/${ARCH}/boot/setup.bin ]; then
setupcount=1
fitimage_emit_section_setup ${1} "${setupcount}" arch/${ARCH}/boot/setup.bin
fi
@@ -332,15 +411,22 @@ fitimage_assemble() {
# Step 4: Prepare a ramdisk section.
#
if [ "x${ramdiskcount}" = "x1" ] ; then
- copy_initramfs
- fitimage_emit_section_ramdisk ${1} "${ramdiskcount}" usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
+ # Find and use the first initramfs image archive type we find
+ for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
+ initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
+ echo "Using $initramfs_path"
+ if [ -e "${initramfs_path}" ]; then
+ fitimage_emit_section_ramdisk ${1} "${ramdiskcount}" "${initramfs_path}"
+ break
+ fi
+ done
fi
fitimage_emit_section_maint ${1} sectend
# Force the first Kernel and DTB in the default config
kernelcount=1
- if test -n "${dtbcount}"; then
+ if [ -n "${dtbcount}" ]; then
dtbcount=1
fi
@@ -349,7 +435,18 @@ fitimage_assemble() {
#
fitimage_emit_section_maint ${1} confstart
- fitimage_emit_section_config ${1} "${kernelcount}" "${dtbcount}" "${ramdiskcount}" "${setupcount}"
+ if [ -n "${DTBS}" ]; then
+ i=1
+ for DTB in ${DTBS}; do
+ dtb_ext=${DTB##*.}
+ if [ "${dtb_ext}" = "dtbo" ]; then
+ fitimage_emit_section_config ${1} "" "${DTB}" "" "" "`expr ${i} = ${dtbcount}`"
+ else
+ fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+ fi
+ i=`expr ${i} + 1`
+ done
+ fi
fitimage_emit_section_maint ${1} sectend
@@ -367,10 +464,17 @@ fitimage_assemble() {
# Step 7: Sign the image and add public key to U-Boot dtb
#
if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
+ add_key_to_u_boot=""
+ if [ -n "${UBOOT_DTB_BINARY}" ]; then
+ # The u-boot.dtb is a symlink to UBOOT_DTB_IMAGE, so we need copy
+ # both of them, and don't dereference the symlink.
+ cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
+ add_key_to_u_boot="-K ${B}/${UBOOT_DTB_BINARY}"
+ fi
uboot-mkimage \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${UBOOT_SIGN_KEYDIR}" \
- -K "${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_BINARY}" \
+ $add_key_to_u_boot \
-r arch/${ARCH}/boot/${2}
fi
}
@@ -392,39 +496,35 @@ do_assemble_fitimage_initramfs() {
fi
}
-addtask assemble_fitimage_initramfs before do_deploy after do_install
+addtask assemble_fitimage_initramfs before do_deploy after do_bundle_initramfs
kernel_do_deploy[vardepsexclude] = "DATETIME"
kernel_do_deploy_append() {
# Update deploy directory
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
- cd ${B}
echo "Copying fit-image.its source file..."
- its_base_name="fitImage-its-${PV}-${PR}-${MACHINE}-${DATETIME}"
- its_symlink_name=fitImage-its-${MACHINE}
- install -m 0644 fit-image.its ${DEPLOYDIR}/${its_base_name}.its
- linux_bin_base_name="fitImage-linux.bin-${PV}-${PR}-${MACHINE}-${DATETIME}"
- linux_bin_symlink_name=fitImage-linux.bin-${MACHINE}
- install -m 0644 linux.bin ${DEPLOYDIR}/${linux_bin_base_name}.bin
+ install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
+ ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+
+ echo "Copying linux.bin file..."
+ install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
+ ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
- its_initramfs_base_name="${KERNEL_IMAGETYPE}-its-${INITRAMFS_IMAGE}-${PV}-${PR}-${MACHINE}-${DATETIME}"
- its_initramfs_symlink_name=${KERNEL_IMAGETYPE}-its-${INITRAMFS_IMAGE}-${MACHINE}
- install -m 0644 fit-image-${INITRAMFS_IMAGE}.its ${DEPLOYDIR}/${its_initramfs_base_name}.its
- fit_initramfs_base_name="${KERNEL_IMAGETYPE}-${INITRAMFS_IMAGE}-${PV}-${PR}-${MACHINE}-${DATETIME}"
- fit_initramfs_symlink_name=${KERNEL_IMAGETYPE}-${INITRAMFS_IMAGE}-${MACHINE}
- install -m 0644 arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} ${DEPLOYDIR}/${fit_initramfs_base_name}.bin
- fi
+ install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
+ ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
- cd ${DEPLOYDIR}
- ln -sf ${its_base_name}.its ${its_symlink_name}.its
- ln -sf ${linux_bin_base_name}.bin ${linux_bin_symlink_name}.bin
-
- if [ -n "${INITRAMFS_IMAGE}" ]; then
- ln -sf ${its_initramfs_base_name}.its ${its_initramfs_symlink_name}.its
- ln -sf ${fit_initramfs_base_name}.bin ${fit_initramfs_symlink_name}.bin
+ echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
+ install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
+ ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
+ # UBOOT_DTB_IMAGE is a realfile, but we can't use
+ # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
+ # for u-boot, but we are in kernel env now.
+ install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
fi
fi
}
diff --git a/meta/classes/kernel-grub.bbclass b/meta/classes/kernel-grub.bbclass
index f7dcc0715a..5d92f3b636 100644
--- a/meta/classes/kernel-grub.bbclass
+++ b/meta/classes/kernel-grub.bbclass
@@ -92,7 +92,7 @@ python __anonymous () {
fi
'''
- imagetypes = d.getVar('KERNEL_IMAGETYPES', True)
+ imagetypes = d.getVar('KERNEL_IMAGETYPES')
imagetypes = re.sub(r'\.gz$', '', imagetypes)
for type in imagetypes.split():
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
index 08d226276e..221022b7bc 100644
--- a/meta/classes/kernel-module-split.bbclass
+++ b/meta/classes/kernel-module-split.bbclass
@@ -22,15 +22,19 @@ if [ x"$D" = "x" ]; then
fi
}
+PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
+
do_install_append() {
install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
}
PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
-KERNEL_MODULES_META_PACKAGE ?= "kernel-modules"
+KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
KERNEL_MODULE_PACKAGE_PREFIX ?= ""
+KERNEL_MODULE_PACKAGE_SUFFIX ?= "-${KERNEL_VERSION}"
+KERNEL_MODULE_PROVIDE_VIRTUAL ?= "1"
python split_kernel_module_packages () {
import re
@@ -39,16 +43,33 @@ python split_kernel_module_packages () {
def extract_modinfo(file):
import tempfile, subprocess
- tempfile.tempdir = d.getVar("WORKDIR", True)
+ tempfile.tempdir = d.getVar("WORKDIR")
+ compressed = re.match( r'.*\.([xg])z$', file)
tf = tempfile.mkstemp()
tmpfile = tf[1]
- cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX", True) or "", file, tmpfile)
- subprocess.call(cmd, shell=True)
- f = open(tmpfile)
+ if compressed:
+ tmpkofile = tmpfile + ".ko"
+ if compressed.group(1) == 'g':
+ cmd = "gunzip -dc %s > %s" % (file, tmpkofile)
+ subprocess.check_call(cmd, shell=True)
+ elif compressed.group(1) == 'x':
+ cmd = "xz -dc %s > %s" % (file, tmpkofile)
+ subprocess.check_call(cmd, shell=True)
+ else:
+ msg = "Cannot decompress '%s'" % file
+ raise msg
+ cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", tmpkofile, tmpfile)
+ else:
+ cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
+ subprocess.check_call(cmd, shell=True)
+ # errors='replace': Some old kernel versions contain invalid utf-8 characters in mod descriptions (like 0xf6, 'ö')
+ f = open(tmpfile, errors='replace')
l = f.read().split("\000")
f.close()
os.close(tf[0])
os.unlink(tmpfile)
+ if compressed:
+ os.unlink(tmpkofile)
vals = {}
for i in l:
m = modinfoexp.match(i)
@@ -60,12 +81,12 @@ python split_kernel_module_packages () {
def frob_metadata(file, pkg, pattern, format, basename):
vals = extract_modinfo(file)
- dvar = d.getVar('PKGD', True)
+ dvar = d.getVar('PKGD')
# If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
# appropriate modprobe commands to the postinst
- autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD", True) or "").split()
- autoload = d.getVar('module_autoload_%s' % basename, True)
+ autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split()
+ autoload = d.getVar('module_autoload_%s' % basename)
if autoload and autoload == basename:
bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
if autoload and basename not in autoloadlist:
@@ -79,15 +100,15 @@ python split_kernel_module_packages () {
else:
f.write('%s\n' % basename)
f.close()
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
bb.fatal("pkg_postinst_%s not defined" % pkg)
- postinst += d.getVar('autoload_postinst_fragment', True) % (autoload or basename)
+ postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
d.setVar('pkg_postinst_%s' % pkg, postinst)
# Write out any modconf fragment
- modconflist = (d.getVar("KERNEL_MODULE_PROBECONF", True) or "").split()
- modconf = d.getVar('module_conf_%s' % basename, True)
+ modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
+ modconf = d.getVar('module_conf_%s' % basename)
if modconf and basename in modconflist:
name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
f = open(name, 'w')
@@ -96,15 +117,15 @@ python split_kernel_module_packages () {
elif modconf:
bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
- files = d.getVar('FILES_%s' % pkg, True)
+ files = d.getVar('FILES_%s' % pkg)
files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
d.setVar('FILES_%s' % pkg, files)
if "description" in vals:
- old_desc = d.getVar('DESCRIPTION_' + pkg, True) or ""
+ old_desc = d.getVar('DESCRIPTION_' + pkg) or ""
d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
modinfo_deps = []
if "depends" in vals and vals["depends"] != "":
for dep in vals["depends"].split(","):
@@ -119,26 +140,36 @@ python split_kernel_module_packages () {
# Avoid automatic -dev recommendations for modules ending with -dev.
d.setVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', 1)
- module_regex = '^(.*)\.k?o$'
+ # Provide virtual package without postfix
+ providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL')
+ if providevirt == "1":
+ postfix = format.split('%s')[1]
+ d.setVar('RPROVIDES_' + pkg, pkg.replace(postfix, ''))
+
+ kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
+ kernel_version = d.getVar("KERNEL_VERSION")
+
+ module_regex = r'^(.*)\.k?o(?:\.[xg]z)?$'
- module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX', True)
- module_pattern = module_pattern_prefix + 'kernel-module-%s'
+ module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
+ module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
+ module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
- postinst = d.getVar('pkg_postinst_modules', True)
- postrm = d.getVar('pkg_postrm_modules', True)
+ postinst = d.getVar('pkg_postinst_modules')
+ postrm = d.getVar('pkg_postrm_modules')
- modules = do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='kernel-%s' % (d.getVar("KERNEL_VERSION", True)))
+ modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
if modules:
- metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE', True)
+ metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
# If modules-load.d and modprobe.d are empty at this point, remove them to
# avoid warnings. removedirs only raises an OSError if an empty
# directory cannot be removed.
- dvar = d.getVar('PKGD', True)
+ dvar = d.getVar('PKGD')
for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
if len(os.listdir(dir)) == 0:
os.rmdir(dir)
}
-do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF", True) or "").split()))}'
+do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()))}'
diff --git a/meta/classes/kernel-uboot.bbclass b/meta/classes/kernel-uboot.bbclass
index 345e7f5f3b..87f02654fa 100644
--- a/meta/classes/kernel-uboot.bbclass
+++ b/meta/classes/kernel-uboot.bbclass
@@ -1,15 +1,21 @@
uboot_prep_kimage() {
- if test -e arch/${ARCH}/boot/compressed/vmlinux ; then
+ if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
linux_suffix=""
linux_comp="none"
+ elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then
+ rm -f linux.bin
+ cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin
+ vmlinux_path=""
+ linux_suffix=""
+ linux_comp="none"
else
vmlinux_path="vmlinux"
linux_suffix=".gz"
linux_comp="gzip"
fi
- ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
+ [ -n "${vmlinux_path}" ] && ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
if [ "${linux_comp}" != "none" ] ; then
gzip -9 linux.bin
diff --git a/meta/classes/kernel-uimage.bbclass b/meta/classes/kernel-uimage.bbclass
index 340503a2d6..cedb4fa070 100644
--- a/meta/classes/kernel-uimage.bbclass
+++ b/meta/classes/kernel-uimage.bbclass
@@ -1,9 +1,9 @@
inherit kernel-uboot
python __anonymous () {
- if "uImage" in (d.getVar('KERNEL_IMAGETYPES', True) or "").split():
- depends = d.getVar("DEPENDS", True)
- depends = "%s u-boot-mkimage-native" % depends
+ if "uImage" in d.getVar('KERNEL_IMAGETYPES'):
+ depends = d.getVar("DEPENDS")
+ depends = "%s u-boot-tools-native" % depends
d.setVar("DEPENDS", depends)
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
@@ -11,27 +11,25 @@ python __anonymous () {
# to build uImage using the kernel build system if and only if
# KEEPUIMAGE == yes. Otherwise, we pack compressed vmlinux into
# the uImage .
- if d.getVar("KEEPUIMAGE", True) != 'yes':
- typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE", True) or ""
+ if d.getVar("KEEPUIMAGE") != 'yes':
+ typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
if "uImage" in typeformake.split():
d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('uImage', 'vmlinux'))
+
+ # Enable building of uImage with mkimage
+ bb.build.addtask('do_uboot_mkimage', 'do_install', 'do_kernel_link_images', d)
}
+do_uboot_mkimage[dirs] += "${B}"
do_uboot_mkimage() {
- if echo "${KERNEL_IMAGETYPES}" | grep -wq "uImage"; then
- if test "x${KEEPUIMAGE}" != "xyes" ; then
- uboot_prep_kimage
-
- ENTRYPOINT=${UBOOT_ENTRYPOINT}
- if test -n "${UBOOT_ENTRYSYMBOL}"; then
- ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \
- awk '$3=="${UBOOT_ENTRYSYMBOL}" {print $1}'`
- fi
+ uboot_prep_kimage
- uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin arch/${ARCH}/boot/uImage
- rm -f linux.bin
- fi
+ ENTRYPOINT=${UBOOT_ENTRYPOINT}
+ if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
+ ENTRYPOINT=`${HOST_PREFIX}nm ${B}/vmlinux | \
+ awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
fi
-}
-addtask uboot_mkimage before do_install after do_compile
+ uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
+ rm -f linux.bin
+}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index 53659f2f37..ed9bcfa57c 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -1,14 +1,25 @@
# remove tasks that modify the source tree in case externalsrc is inherited
SRCTREECOVEREDTASKS += "do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
+PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe"
+PATCH_GIT_USER_NAME ?= "OpenEmbedded"
# returns local (absolute) path names for all valid patches in the
# src_uri
-def find_patches(d):
+def find_patches(d,subdir):
patches = src_patches(d)
patch_list=[]
for p in patches:
- _, _, local, _, _, _ = bb.fetch.decodeurl(p)
- patch_list.append(local)
+ _, _, local, _, _, parm = bb.fetch.decodeurl(p)
+ # if patchdir has been passed, we won't be able to apply it so skip
+ # the patch for now, and special processing happens later
+ patchdir = ''
+ if "patchdir" in parm:
+ patchdir = parm["patchdir"]
+ if subdir:
+ if subdir == patchdir:
+ patch_list.append(local)
+ else:
+ patch_list.append(local)
return patch_list
@@ -105,26 +116,51 @@ do_kernel_metadata() {
cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
if [ $? -ne 0 ]; then
bbwarn "defconfig detected in WORKDIR. ${KBUILD_DEFCONFIG} skipped"
+ else
+ cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
fi
else
cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
- sccs="${WORKDIR}/defconfig"
fi
+ sccs="${WORKDIR}/defconfig"
else
- bbfatal "A KBUILD_DECONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
+ bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
fi
fi
- sccs="$sccs ${@" ".join(find_sccs(d))}"
- patches="${@" ".join(find_patches(d))}"
+ # was anyone trying to patch the kernel meta data ?, we need to do
+ # this here, since the scc commands migrate the .cfg fragments to the
+ # kernel source tree, where they'll be used later.
+ check_git_config
+ patches="${@" ".join(find_patches(d,'kernel-meta'))}"
+ for p in $patches; do
+ (
+ cd ${WORKDIR}/kernel-meta
+ git am -s $p
+ )
+ done
+
+ sccs_from_src_uri="${@" ".join(find_sccs(d))}"
+ patches="${@" ".join(find_patches(d,''))}"
feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
+ # a quick check to make sure we don't have duplicate defconfigs
+ # If there's a defconfig in the SRC_URI, did we also have one from
+ # the KBUILD_DEFCONFIG processing above ?
+ if [ -n "$sccs" ]; then
+ # we did have a defconfig from above. remove any that might be in the src_uri
+ sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '{ if ($0!="defconfig") { print $0 } }' RS=' ')
+ fi
+ sccs="$sccs $sccs_from_src_uri"
+
# check for feature directories/repos/branches that were part of the
# SRC_URI. If they were supplied, we convert them into include directives
# for the update part of the process
for f in ${feat_dirs}; do
if [ -d "${WORKDIR}/$f/meta" ]; then
includes="$includes -I${WORKDIR}/$f/kernel-meta"
+ elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
+ includes="$includes -I${WORKDIR}/../oe-local-files/$f"
elif [ -d "${WORKDIR}/$f" ]; then
includes="$includes -I${WORKDIR}/$f"
fi
@@ -141,24 +177,39 @@ do_kernel_metadata() {
# expand kernel features into their full path equivalents
bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
+ if [ -z "$bsp_definition" ]; then
+ echo "$sccs" | grep -q defconfig
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided"
+ fi
+ fi
meta_dir=$(kgit --meta)
# run1: pull all the configuration fragments, no matter where they come from
elements="`echo -n ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}`"
if [ -n "${elements}" ]; then
- scc --force -o ${S}/${meta_dir}:cfg,meta ${includes} ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}
+ echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
+ scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ fi
fi
# run2: only generate patches for elements that have been passed on the SRC_URI
elements="`echo -n ${sccs} ${patches} ${KERNEL_FEATURES}`"
if [ -n "${elements}" ]; then
scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} ${KERNEL_FEATURES}
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ fi
fi
}
do_patch() {
+ set +e
cd ${S}
+ check_git_config
meta_dir=$(kgit --meta)
(cd ${meta_dir}; ln -sf patch.queue series)
if [ -f "${meta_dir}/series" ]; then
@@ -168,6 +219,19 @@ do_patch() {
bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
fi
fi
+
+ if [ -f "${meta_dir}/merge.queue" ]; then
+ # we need to merge all these branches
+ for b in $(cat ${meta_dir}/merge.queue); do
+ git show-ref --verify --quiet refs/heads/${b}
+ if [ $? -eq 0 ]; then
+ bbnote "Merging branch ${b}"
+ git merge -q --no-ff -m "Merge branch ${b}" ${b}
+ else
+ bbfatal "branch ${b} does not exist, cannot merge"
+ fi
+ done
+ fi
}
do_kernel_checkout() {
@@ -206,6 +270,7 @@ do_kernel_checkout() {
fi
rm -f .gitignore
git init
+ check_git_config
git add .
git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
git clean -d -f
@@ -231,7 +296,11 @@ do_kernel_checkout[dirs] = "${S}"
addtask kernel_checkout before do_kernel_metadata after do_unpack
addtask kernel_metadata after do_validate_branches do_unpack before do_patch
do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
+do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
+do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
+do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot"
+do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
do_kernel_configme[dirs] += "${S} ${B}"
do_kernel_configme() {
set +e
@@ -256,11 +325,12 @@ do_kernel_configme() {
meta_dir=$(kgit --meta)
configs="$(scc --configs -o ${meta_dir})"
- if [ -z "${configs}" ]; then
+ if [ $? -ne 0 ]; then
+ bberror "${configs}"
bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
fi
- CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
+ CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
if [ $? -ne 0 ]; then
bbfatal_log "Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
fi
@@ -272,25 +342,36 @@ do_kernel_configme() {
addtask kernel_configme before do_configure after do_patch
python do_kernel_configcheck() {
- import re, string, sys
+ import re, string, sys, subprocess
# if KMETA isn't set globally by a recipe using this routine, we need to
# set the default to 'meta'. Otherwise, kconf_check is not passed a valid
# meta-series for processing
- kmeta = d.getVar( "KMETA", True ) or "meta"
+ kmeta = d.getVar("KMETA") or "meta"
if not os.path.exists(kmeta):
kmeta = "." + kmeta
- pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH', True), "${S}/scripts/util/")
+ s = d.getVar('S')
+
+ env = os.environ.copy()
+ env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
- cmd = d.expand("scc --configs -o ${S}/.kernel-meta")
- ret, configs = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
+ try:
+ configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") )
- cmd = d.expand("cd ${S}; kconf_check --report -o ${S}/%s/cfg/ ${B}/.config ${S} %s" % (kmeta,configs))
- ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
+ try:
+ subprocess.check_call(['kconf_check', '--report', '-o',
+ '%s/%s/cfg' % (s, kmeta), d.getVar('B') + '/.config', s, configs], cwd=s, env=env)
+ except subprocess.CalledProcessError:
+ # The configuration gathering can return different exit codes, but
+ # we interpret them based on the KCONF_AUDIT_LEVEL variable, so we catch
+ # everything here, and let the run continue.
+ pass
- config_check_visibility = int(d.getVar( "KCONF_AUDIT_LEVEL", True ) or 0)
- bsp_check_visibility = int(d.getVar( "KCONF_BSP_AUDIT_LEVEL", True ) or 0)
+ config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
+ bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
# if config check visibility is non-zero, report dropped configuration values
mismatch_file = d.expand("${S}/%s/cfg/mismatch.txt" % kmeta)
@@ -299,6 +380,27 @@ python do_kernel_configcheck() {
with open (mismatch_file, "r") as myfile:
results = myfile.read()
bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
+
+ if bsp_check_visibility:
+ invalid_file = d.expand("${S}/%s/cfg/invalid.cfg" % kmeta)
+ if os.path.exists(invalid_file) and os.stat(invalid_file).st_size > 0:
+ with open (invalid_file, "r") as myfile:
+ results = myfile.read()
+ bb.warn( "[kernel config]: This BSP sets config options that are not offered anywhere within this kernel:\n\n%s" % results)
+ errors_file = d.expand("${S}/%s/cfg/fragment_errors.txt" % kmeta)
+ if os.path.exists(errors_file) and os.stat(errors_file).st_size > 0:
+ with open (errors_file, "r") as myfile:
+ results = myfile.read()
+ bb.warn( "[kernel config]: This BSP contains fragments with errors:\n\n%s" % results)
+
+ # if the audit level is greater than two, we report if a fragment has overriden
+ # a value from a base fragment. This is really only used for new kernel introduction
+ if bsp_check_visibility > 2:
+ redefinition_file = d.expand("${S}/%s/cfg/redefinition.txt" % kmeta)
+ if os.path.exists(redefinition_file) and os.stat(redefinition_file).st_size > 0:
+ with open (redefinition_file, "r") as myfile:
+ results = myfile.read()
+ bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results)
}
# Ensure that the branches (BSP and meta) are on the locations specified by
@@ -341,6 +443,10 @@ do_validate_branches() {
current_branch=`git rev-parse --abbrev-ref HEAD`
git branch "$current_branch-orig"
git reset --hard ${force_srcrev}
+ # We've checked out HEAD, make sure we cleanup kgit-s2q fence post check
+ # so the patches are applied as expected otherwise no patching
+ # would be done in some corner cases.
+ kgit-s2q --clean
fi
fi
}
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index 25a153cd20..ebcb79a528 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -1,7 +1,16 @@
inherit linux-kernel-base kernel-module-split
-PROVIDES += "virtual/kernel"
-DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross bc-native lzop-native"
+KERNEL_PACKAGE_NAME ??= "kernel"
+KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
+
+PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }"
+DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native lzop-native bison-native"
+PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
+
+do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot"
+do_clean[depends] += "make-mod-scripts:do_clean"
+
+CVE_PRODUCT ?= "linux_kernel"
S = "${STAGING_KERNEL_DIR}"
B = "${WORKDIR}/build"
@@ -13,6 +22,7 @@ INHIBIT_DEFAULT_DEPS = "1"
KERNEL_IMAGETYPE ?= "zImage"
INITRAMFS_IMAGE ?= ""
+INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}-${MACHINE}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
INITRAMFS_TASK ?= ""
INITRAMFS_IMAGE_BUNDLE ?= ""
@@ -22,49 +32,73 @@ INITRAMFS_IMAGE_BUNDLE ?= ""
# number and cause kernel to be rebuilt. To avoid this, make
# KERNEL_VERSION_NAME and KERNEL_VERSION_PKG_NAME depend on
# LINUX_VERSION which is a constant.
-KERNEL_VERSION_NAME = "${@d.getVar('KERNEL_VERSION', True) or ""}"
+KERNEL_VERSION_NAME = "${@d.getVar('KERNEL_VERSION') or ""}"
KERNEL_VERSION_NAME[vardepvalue] = "${LINUX_VERSION}"
-KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION', True))}"
+KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
KERNEL_VERSION_PKG_NAME[vardepvalue] = "${LINUX_VERSION}"
python __anonymous () {
- import re
+ pn = d.getVar("PN")
+ kpn = d.getVar("KERNEL_PACKAGE_NAME")
+
+ # XXX Remove this after bug 11905 is resolved
+ # FILES_${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
+ if kpn == pn:
+ bb.warn("Some packages (E.g. *-dev) might be missing due to "
+ "bug 11905 (variable KERNEL_PACKAGE_NAME == PN)")
+
+ # The default kernel recipe builds in a shared location defined by
+ # bitbake/distro confs: STAGING_KERNEL_DIR and STAGING_KERNEL_BUILDDIR.
+ # Set these variables to directories under ${WORKDIR} in alternate
+ # kernel recipes (I.e. where KERNEL_PACKAGE_NAME != kernel) so that they
+ # may build in parallel with the default kernel without clobbering.
+ if kpn != "kernel":
+ workdir = d.getVar("WORKDIR")
+ sourceDir = os.path.join(workdir, 'kernel-source')
+ artifactsDir = os.path.join(workdir, 'kernel-build-artifacts')
+ d.setVar("STAGING_KERNEL_DIR", sourceDir)
+ d.setVar("STAGING_KERNEL_BUILDDIR", artifactsDir)
# Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
- type = d.getVar('KERNEL_IMAGETYPE', True) or ""
- alttype = d.getVar('KERNEL_ALT_IMAGETYPE', True) or ""
- types = d.getVar('KERNEL_IMAGETYPES', True) or ""
+ type = d.getVar('KERNEL_IMAGETYPE') or ""
+ alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
+ types = d.getVar('KERNEL_IMAGETYPES') or ""
if type not in types.split():
types = (type + ' ' + types).strip()
if alttype not in types.split():
types = (alttype + ' ' + types).strip()
d.setVar('KERNEL_IMAGETYPES', types)
- typeformake = re.sub(r'\.gz', '', types)
- d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake)
-
- for type in typeformake.split():
+ # KERNEL_IMAGETYPES may contain a mixture of image types supported directly
+ # by the kernel build system and types which are created by post-processing
+ # the output of the kernel build system (e.g. compressing vmlinux ->
+ # vmlinux.gz in kernel_do_compile()).
+ # KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
+ # directly by the kernel build system.
+ if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
+ typeformake = set()
+ for type in types.split():
+ if type == 'vmlinux.gz':
+ type = 'vmlinux'
+ typeformake.add(type)
+
+ d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', ' '.join(sorted(typeformake)))
+
+ kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
+ imagedest = d.getVar('KERNEL_IMAGEDEST')
+
+ for type in types.split():
typelower = type.lower()
-
- d.appendVar('PACKAGES', ' ' + 'kernel-image-' + typelower)
-
- d.setVar('FILES_kernel-image-' + typelower, '/boot/' + type + '*')
-
- d.appendVar('RDEPENDS_kernel-image', ' ' + 'kernel-image-' + typelower)
-
- d.setVar('PKG_kernel-image-' + typelower, 'kernel-image-' + typelower + '-${KERNEL_VERSION_PKG_NAME}')
-
- d.setVar('ALLOW_EMPTY_kernel-image-' + typelower, '1')
-
- imagedest = d.getVar('KERNEL_IMAGEDEST', True)
- priority = d.getVar('KERNEL_PRIORITY', True)
- postinst = '#!/bin/sh\n' + 'update-alternatives --install /' + imagedest + '/' + type + ' ' + type + ' ' + '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME} ' + priority + ' || true' + '\n'
- d.setVar('pkg_postinst_kernel-image-' + typelower, postinst)
-
- postrm = '#!/bin/sh\n' + 'update-alternatives --remove' + ' ' + type + ' ' + type + '-${KERNEL_VERSION_NAME} || true' + '\n'
- d.setVar('pkg_postrm_kernel-image-' + typelower, postrm)
-
- image = d.getVar('INITRAMFS_IMAGE', True)
+ d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
+ d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
+ d.appendVar('RDEPENDS_%s-image' % kname, ' %s-image-%s' % (kname, typelower))
+ d.setVar('PKG_%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
+ d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
+
+ image = d.getVar('INITRAMFS_IMAGE')
+ # If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0,
+ # the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built
+ # standalone for use by wic and other tools.
if image:
d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
@@ -72,7 +106,7 @@ python __anonymous () {
# The preferred method is to set INITRAMFS_IMAGE, because
# this INITRAMFS_TASK has circular dependency problems
# if the initramfs requires kernel modules
- image_task = d.getVar('INITRAMFS_TASK', True)
+ image_task = d.getVar('INITRAMFS_TASK')
if image_task:
d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
}
@@ -100,16 +134,16 @@ inherit ${KERNEL_CLASSES}
# the symlink.
do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
-base_do_unpack_append () {
- s = d.getVar("S", True)
+python do_symlink_kernsrc () {
+ s = d.getVar("S")
if s[-1] == '/':
# drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
s=s[:-1]
- kernsrc = d.getVar("STAGING_KERNEL_DIR", True)
+ kernsrc = d.getVar("STAGING_KERNEL_DIR")
if s != kernsrc:
bb.utils.mkdirhier(kernsrc)
bb.utils.remove(kernsrc, recurse=True)
- if d.getVar("EXTERNALSRC", True):
+ if d.getVar("EXTERNALSRC"):
# With EXTERNALSRC S will not be wiped so we can symlink to it
os.symlink(s, kernsrc)
else:
@@ -117,30 +151,30 @@ base_do_unpack_append () {
shutil.move(s, kernsrc)
os.symlink(kernsrc, s)
}
+addtask symlink_kernsrc before do_configure after do_unpack
inherit kernel-arch deploy
-PACKAGES_DYNAMIC += "^kernel-module-.*"
-PACKAGES_DYNAMIC += "^kernel-image-.*"
-PACKAGES_DYNAMIC += "^kernel-firmware-.*"
+PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-module-.*"
+PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-image-.*"
+PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
-
-KERNEL_PRIORITY ?= "${@int(d.getVar('PV', True).split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
- int(d.getVar('PV', True).split('-')[0].split('+')[0].split('.')[1]) * 100 + \
- int(d.getVar('PV', True).split('-')[0].split('+')[0].split('.')[-1])}"
+export KBUILD_BUILD_VERSION = "1"
+export KBUILD_BUILD_USER ?= "oe-user"
+export KBUILD_BUILD_HOST ?= "oe-host"
KERNEL_RELEASE ?= "${KERNEL_VERSION}"
# The directory where built kernel lies in the kernel tree
KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot"
-KERNEL_IMAGEDEST = "boot"
+KERNEL_IMAGEDEST ?= "boot"
#
# configuration
#
-export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE", True) or "ttyS0"}"
+export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE") or "ttyS0"}"
KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
@@ -156,7 +190,7 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
# Some Linux kernel configurations need additional parameters on the command line
KERNEL_EXTRA_ARGS ?= ""
-EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCPP="${BUILD_CPP}""
+EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
KERNEL_ALT_IMAGETYPE ??= ""
copy_initramfs() {
@@ -164,83 +198,77 @@ copy_initramfs() {
# In case the directory is not created yet from the first pass compile:
mkdir -p ${B}/usr
# Find and use the first initramfs image archive type we find
- rm -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
- for img in cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
- if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" ]; then
- cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img ${B}/usr/.
+ rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
+ for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
+ if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
+ cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
case $img in
*gz)
echo "gzip decompressing image"
- gunzip -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ gunzip -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
*lz4)
echo "lz4 decompressing image"
- lz4 -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
*lzo)
echo "lzo decompressing image"
- lzop -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ lzop -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
*lzma)
echo "lzma decompressing image"
- lzma -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ lzma -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
*xz)
echo "xz decompressing image"
- xz -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
esac
+ break
fi
done
- echo "Finished copy of initramfs into ./usr"
+ # Verify that the above loop found a initramfs, fail otherwise
+ [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
}
-INITRAMFS_BASE_NAME ?= "initramfs-${PV}-${PR}-${MACHINE}-${DATETIME}"
-INITRAMFS_BASE_NAME[vardepsexclude] = "DATETIME"
do_bundle_initramfs () {
if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
echo "Creating a kernel image with a bundled initramfs..."
copy_initramfs
# Backing up kernel image relies on its type(regular file or symbolic link)
tmp_path=""
- for type in ${KERNEL_IMAGETYPES} ; do
- if [ -h ${KERNEL_OUTPUT_DIR}/$type ] ; then
- linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$type`
- realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$type`
+ for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
+ if [ -h ${KERNEL_OUTPUT_DIR}/$imageType ] ; then
+ linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$imageType`
+ realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$imageType`
mv -f $realpath $realpath.bak
- tmp_path=$tmp_path" "$type"#"$linkpath"#"$realpath
- elif [ -f ${KERNEL_OUTPUT_DIR}/$type ]; then
- mv -f ${KERNEL_OUTPUT_DIR}/$type ${KERNEL_OUTPUT_DIR}/$type.bak
- tmp_path=$tmp_path" "$type"##"
+ tmp_path=$tmp_path" "$imageType"#"$linkpath"#"$realpath
+ elif [ -f ${KERNEL_OUTPUT_DIR}/$imageType ]; then
+ mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.bak
+ tmp_path=$tmp_path" "$imageType"##"
fi
done
- use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
+ use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
kernel_do_compile
# Restoring kernel image
for tp in $tmp_path ; do
- type=`echo $tp|cut -d "#" -f 1`
+ imageType=`echo $tp|cut -d "#" -f 1`
linkpath=`echo $tp|cut -d "#" -f 2`
realpath=`echo $tp|cut -d "#" -f 3`
if [ -n "$realpath" ]; then
mv -f $realpath $realpath.initramfs
mv -f $realpath.bak $realpath
- ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$type.initramfs
+ ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$imageType.initramfs
else
- mv -f ${KERNEL_OUTPUT_DIR}/$type ${KERNEL_OUTPUT_DIR}/$type.initramfs
- mv -f ${KERNEL_OUTPUT_DIR}/$type.bak ${KERNEL_OUTPUT_DIR}/$type
+ mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.initramfs
+ mv -f ${KERNEL_OUTPUT_DIR}/$imageType.bak ${KERNEL_OUTPUT_DIR}/$imageType
fi
done
- # Update install area
- for type in ${KERNEL_IMAGETYPES} ; do
- echo "There is kernel image bundled with initramfs: ${B}/${KERNEL_OUTPUT_DIR}/$type.initramfs"
- install -m 0644 ${B}/${KERNEL_OUTPUT_DIR}/$type.initramfs ${D}/boot/$type-initramfs-${MACHINE}.bin
- echo "${B}/${KERNEL_OUTPUT_DIR}/$type.initramfs"
- done
fi
}
do_bundle_initramfs[dirs] = "${B}"
@@ -251,8 +279,36 @@ python do_devshell_prepend () {
addtask bundle_initramfs after do_install before do_deploy
+get_cc_option () {
+ # Check if KERNEL_CC supports the option "file-prefix-map".
+ # This option allows us to build images with __FILE__ values that do not
+ # contain the host build path.
+ if ${KERNEL_CC} -Q --help=joined | grep -q "\-ffile-prefix-map=<old=new>"; then
+ echo "-ffile-prefix-map=${S}=/kernel-source/"
+ fi
+}
+
kernel_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+ if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+ # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
+ # be set....
+ if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
+ olddir=`pwd`
+ cd ${S}
+ SOURCE_DATE_EPOCH=`git log -1 --pretty=%ct`
+ # git repo not guaranteed, so fall back to REPRODUCIBLE_TIMESTAMP_ROOTFS
+ if [ $? -ne 0 ]; then
+ SOURCE_DATE_EPOCH=${REPRODUCIBLE_TIMESTAMP_ROOTFS}
+ fi
+ cd $olddir
+ fi
+
+ ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ export KCONFIG_NOTIMESTAMP=1
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ fi
# The $use_alternate_initrd is only set from
# do_bundle_initramfs() This variable is specifically for the
# case where we are making a second pass at the kernel
@@ -264,23 +320,24 @@ kernel_do_compile() {
# The old style way of copying an prebuilt image and building it
# is turned on via INTIRAMFS_TASK != ""
copy_initramfs
- use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
+ use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
fi
+ cc_extra=$(get_cc_option)
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
- oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
- for type in ${KERNEL_IMAGETYPES} ; do
- if test "${typeformake}.gz" = "${type}"; then
- gzip -9c < "${typeformake}" > "${KERNEL_OUTPUT_DIR}/${type}"
- break;
- fi
- done
+ oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
+ # vmlinux.gz is not built by kernel
+ if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
+ mkdir -p "${KERNEL_OUTPUT_DIR}"
+ gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
+ fi
}
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
- oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+ cc_extra=$(get_cc_option)
+ oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
# Module.symvers gets updated during the
# building of the kernel modules. We need to
@@ -301,11 +358,11 @@ kernel_do_install() {
#
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
- oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" modules_install
- rm "${D}/lib/modules/${KERNEL_VERSION}/build"
- rm "${D}/lib/modules/${KERNEL_VERSION}/source"
+ oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
+ rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+ rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
# If the kernel/ directory is empty remove it to prevent QA issues
- rmdir --ignore-fail-on-non-empty "${D}/lib/modules/${KERNEL_VERSION}/kernel"
+ rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
else
bbnote "no modules to install"
fi
@@ -315,8 +372,11 @@ kernel_do_install() {
#
install -d ${D}/${KERNEL_IMAGEDEST}
install -d ${D}/boot
- for type in ${KERNEL_IMAGETYPES} ; do
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${type} ${D}/${KERNEL_IMAGEDEST}/${type}-${KERNEL_VERSION}
+ for imageType in ${KERNEL_IMAGETYPES} ; do
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
+ if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
+ ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
+ fi
done
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
@@ -329,6 +389,10 @@ do_install[prefuncs] += "package_get_auto_pr"
# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
do_kernel_version_sanity_check() {
+ if [ "x${KERNEL_VERSION_SANITY_SKIP}" = "x1" ]; then
+ exit 0
+ fi
+
# The Makefile determines the kernel version shown at runtime
# Don't use KERNEL_VERSION because the headers it grabs the version from aren't generated until do_compile
VERSION=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
@@ -352,7 +416,7 @@ do_kernel_version_sanity_check() {
reg="${reg}${EXTRAVERSION}"
if [ -z `echo ${PV} | grep -E "${reg}"` ]; then
- bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source."
+ bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source or set KERNEL_VERSION_SANITY_SKIP=\"1\" in your recipe."
fi
exit 0
}
@@ -366,13 +430,14 @@ do_shared_workdir_setscene () {
emit_depmod_pkgdata() {
# Stash data for depmod
- install -d ${PKGDESTWORK}/kernel-depmod/
- echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/kernel-depmod/kernel-abiversion
- cp ${B}/System.map ${PKGDESTWORK}/kernel-depmod/System.map-${KERNEL_VERSION}
+ install -d ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/
+ echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/${KERNEL_PACKAGE_NAME}-abiversion
+ cp ${B}/System.map ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/System.map-${KERNEL_VERSION}
}
PACKAGEFUNCS += "emit_depmod_pkgdata"
+do_shared_workdir[cleandirs] += " ${STAGING_KERNEL_BUILDDIR}"
do_shared_workdir () {
cd ${B}
@@ -383,7 +448,7 @@ do_shared_workdir () {
# Store the kernel version in sysroots for module-base.bbclass
#
- echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
+ echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion
# Copy files required for module builds
cp System.map $kerneldir/System.map-${KERNEL_VERSION}
@@ -391,7 +456,7 @@ do_shared_workdir () {
cp .config $kerneldir/
mkdir -p $kerneldir/include/config
cp include/config/kernel.release $kerneldir/include/config/kernel.release
- if [ -e certs/signing_key.pem ]; then
+ if [ -e certs/signing_key.x509 ]; then
# The signing_key.* files are stored in the certs/ dir in
# newer Linux kernels
mkdir -p $kerneldir/certs
@@ -412,8 +477,10 @@ do_shared_workdir () {
# arch/powerpc/lib/crtsavres.o which is present in
# KBUILD_LDFLAGS_MODULE, making it required to build external modules.
if [ ${ARCH} = "powerpc" ]; then
- mkdir -p $kerneldir/arch/powerpc/lib/
- cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
+ if [ -e arch/powerpc/lib/crtsavres.o ]; then
+ mkdir -p $kerneldir/arch/powerpc/lib/
+ cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
+ fi
fi
if [ -d include/generated ]; then
@@ -425,6 +492,15 @@ do_shared_workdir () {
mkdir -p $kerneldir/arch/${ARCH}/include/generated/
cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
fi
+
+ if (grep -q -i -e '^CONFIG_UNWINDER_ORC=y$' $kerneldir/.config); then
+ # With CONFIG_UNWINDER_ORC (the default in 4.14), objtool is required for
+ # out-of-tree modules to be able to generate object files.
+ if [ -x tools/objtool/objtool ]; then
+ mkdir -p ${kerneldir}/tools/objtool
+ cp tools/objtool/objtool ${kerneldir}/tools/objtool/
+ fi
+ fi
}
# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
@@ -432,17 +508,17 @@ sysroot_stage_all () {
:
}
-KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} oldnoconfig || yes '' | oe_runmake -C ${S} O=${B} oldconfig"
+KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" oldnoconfig"
python check_oldest_kernel() {
- oldest_kernel = d.getVar('OLDEST_KERNEL', True)
- kernel_version = d.getVar('KERNEL_VERSION', True)
- tclibc = d.getVar('TCLIBC', True)
+ oldest_kernel = d.getVar('OLDEST_KERNEL')
+ kernel_version = d.getVar('KERNEL_VERSION')
+ tclibc = d.getVar('TCLIBC')
if tclibc == 'glibc':
kernel_version = kernel_version.split('-', 1)[0]
if oldest_kernel and kernel_version:
if bb.utils.vercmp_string(kernel_version, oldest_kernel) < 0:
- bb.warn('%s: OLDEST_KERNEL is "%s" but the version of the kernel you are building is "%s" - therefore %s as built may not be compatible with this kernel. Either set OLDEST_KERNEL to an older version, or build a newer kernel.' % (d.getVar('PN', True), oldest_kernel, kernel_version, tclibc))
+ bb.warn('%s: OLDEST_KERNEL is "%s" but the version of the kernel you are building is "%s" - therefore %s as built may not be compatible with this kernel. Either set OLDEST_KERNEL to an older version, or build a newer kernel.' % (d.getVar('PN'), oldest_kernel, kernel_version, tclibc))
}
check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION"
@@ -477,32 +553,34 @@ addtask savedefconfig after do_configure
inherit cml1
+KCONFIG_CONFIG_COMMAND_append = " HOSTLDFLAGS='${BUILD_LDFLAGS}'"
+
EXPORT_FUNCTIONS do_compile do_install do_configure
# kernel-base becomes kernel-${KERNEL_VERSION}
# kernel-image becomes kernel-image-${KERNEL_VERSION}
-PACKAGES = "kernel kernel-base kernel-vmlinux kernel-image kernel-dev kernel-modules"
+PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules"
FILES_${PN} = ""
-FILES_kernel-base = "/lib/modules/${KERNEL_VERSION}/modules.order /lib/modules/${KERNEL_VERSION}/modules.builtin"
-FILES_kernel-image = ""
-FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} /lib/modules/${KERNEL_VERSION}/build"
-FILES_kernel-vmlinux = "/boot/vmlinux*"
-FILES_kernel-modules = ""
-RDEPENDS_kernel = "kernel-base"
+FILES_${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
+FILES_${KERNEL_PACKAGE_NAME}-image = ""
+FILES_${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+FILES_${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
+FILES_${KERNEL_PACKAGE_NAME}-modules = ""
+RDEPENDS_${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
-RDEPENDS_kernel-base ?= "kernel-image"
-PKG_kernel-image = "kernel-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
-RDEPENDS_kernel-image += "${@base_conditional('KERNEL_IMAGETYPE', 'vmlinux', 'kernel-vmlinux', '', d)}"
-PKG_kernel-base = "kernel-${@legitimize_package_name('${KERNEL_VERSION}')}"
-RPROVIDES_kernel-base += "kernel-${KERNEL_VERSION}"
-ALLOW_EMPTY_kernel = "1"
-ALLOW_EMPTY_kernel-base = "1"
-ALLOW_EMPTY_kernel-image = "1"
-ALLOW_EMPTY_kernel-modules = "1"
-DESCRIPTION_kernel-modules = "Kernel modules meta package"
-
-pkg_postinst_kernel-base () {
+RDEPENDS_${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image"
+PKG_${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
+RDEPENDS_${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux', '', d)}"
+PKG_${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name('${KERNEL_VERSION}')}"
+RPROVIDES_${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
+ALLOW_EMPTY_${KERNEL_PACKAGE_NAME} = "1"
+ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-base = "1"
+ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-image = "1"
+ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-modules = "1"
+DESCRIPTION_${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
+
+pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
mkdir -p $D/lib/modules/${KERNEL_VERSION}
fi
@@ -516,7 +594,7 @@ pkg_postinst_kernel-base () {
PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
python split_kernel_packages () {
- do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+ do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
}
# Many scripts want to look in arch/$arch/boot for the bootable
@@ -532,7 +610,11 @@ do_kernel_link_images() {
if [ -f ../../../vmlinuz ]; then
ln -sf ../../../vmlinuz
fi
+ if [ -f ../../../vmlinuz.bin ]; then
+ ln -sf ../../../vmlinuz.bin
+ fi
}
+addtask kernel_link_images after do_compile before do_strip
do_strip() {
if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
@@ -561,75 +643,78 @@ do_strip() {
}
do_strip[dirs] = "${B}"
-addtask do_strip before do_sizecheck after do_kernel_link_images
+addtask strip before do_sizecheck after do_kernel_link_images
# Support checking the kernel size since some kernels need to reside in partitions
-# with a fixed length or there is a limit in transferring the kernel to memory
+# with a fixed length or there is a limit in transferring the kernel to memory.
+# If more than one image type is enabled, warn on any that don't fit but only fail
+# if none fit.
do_sizecheck() {
if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'`
if [ -n "$invalid" ]; then
- die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integerx (The unit is Kbytes)"
+ die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integer (The unit is Kbytes)"
fi
- for type in ${KERNEL_IMAGETYPES} ; do
- size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$type | awk '{print $1}'`
+ at_least_one_fits=
+ for imageType in ${KERNEL_IMAGETYPES} ; do
+ size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
- warn "This kernel $type (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device. Please reduce the size of the kernel by making more of it modular."
+ bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
+ else
+ at_least_one_fits=y
fi
done
+ if [ -z "$at_least_one_fits" ]; then
+ die "All kernel images are too big for your device. Please reduce the size of the kernel by making more of it modular."
+ fi
fi
}
do_sizecheck[dirs] = "${B}"
addtask sizecheck before do_install after do_strip
-KERNEL_IMAGE_BASE_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
-# Don't include the DATETIME variable in the sstate package signatures
-KERNEL_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
-KERNEL_IMAGE_SYMLINK_NAME ?= "${MACHINE}"
-MODULE_IMAGE_BASE_NAME ?= "modules-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
-MODULE_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
-MODULE_TARBALL_BASE_NAME ?= "${MODULE_IMAGE_BASE_NAME}.tgz"
-# Don't include the DATETIME variable in the sstate package signatures
-MODULE_TARBALL_SYMLINK_NAME ?= "modules-${MACHINE}.tgz"
-MODULE_TARBALL_DEPLOY ?= "1"
+inherit kernel-artifact-names
kernel_do_deploy() {
- for type in ${KERNEL_IMAGETYPES} ; do
- base_name=${type}-${KERNEL_IMAGE_BASE_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${type} ${DEPLOYDIR}/${base_name}.bin
- done
- if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
- mkdir -p ${D}/lib
- tar -cvzf ${DEPLOYDIR}/${MODULE_TARBALL_BASE_NAME} -C ${D} lib
- ln -sf ${MODULE_TARBALL_BASE_NAME} ${DEPLOYDIR}/${MODULE_TARBALL_SYMLINK_NAME}
+ deployDir="${DEPLOYDIR}"
+ if [ -n "${KERNEL_DEPLOYSUBDIR}" ]; then
+ deployDir="${DEPLOYDIR}/${KERNEL_DEPLOYSUBDIR}"
+ mkdir "$deployDir"
fi
- for type in ${KERNEL_IMAGETYPES} ; do
- base_name=${type}-${KERNEL_IMAGE_BASE_NAME}
- symlink_name=${type}-${KERNEL_IMAGE_SYMLINK_NAME}
- ln -sf ${base_name}.bin ${DEPLOYDIR}/${symlink_name}.bin
- ln -sf ${base_name}.bin ${DEPLOYDIR}/${type}
+ for imageType in ${KERNEL_IMAGETYPES} ; do
+ base_name=${imageType}-${KERNEL_IMAGE_NAME}
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} $deployDir/${base_name}.bin
+ symlink_name=${imageType}-${KERNEL_IMAGE_LINK_NAME}
+ ln -sf ${base_name}.bin $deployDir/${symlink_name}.bin
+ ln -sf ${base_name}.bin $deployDir/${imageType}
done
- cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOYDIR}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt
+ if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
+ mkdir -p ${D}${root_prefix}/lib
+ tar -cvzf $deployDir/modules-${MODULE_TARBALL_NAME}.tgz -C ${D}${root_prefix} lib
+ ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
+ fi
- cd ${B}
- # Update deploy directory
- for type in ${KERNEL_IMAGETYPES} ; do
- if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
- echo "Copying deploy ${type} kernel-initramfs image and setting up links..."
- initramfs_base_name=${type}-${INITRAMFS_BASE_NAME}
- initramfs_symlink_name=${type}-initramfs-${MACHINE}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${type}.initramfs ${DEPLOYDIR}/${initramfs_base_name}.bin
- ln -sf ${initramfs_base_name}.bin ${DEPLOYDIR}/${initramfs_symlink_name}.bin
- fi
- done
+ if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
+ for imageType in ${KERNEL_IMAGETYPES} ; do
+ if [ "$imageType" = "fitImage" ] ; then
+ continue
+ fi
+ initramfs_base_name=${imageType}-${INITRAMFS_NAME}
+ initramfs_symlink_name=${imageType}-${INITRAMFS_LINK_NAME}
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType}.initramfs $deployDir/${initramfs_base_name}.bin
+ ln -sf ${initramfs_base_name}.bin $deployDir/${initramfs_symlink_name}.bin
+ done
+ fi
}
do_deploy[cleandirs] = "${DEPLOYDIR}"
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
do_deploy[prefuncs] += "package_get_auto_pr"
-addtask deploy after do_populate_sysroot
+addtask deploy after do_populate_sysroot do_packagedata
EXPORT_FUNCTIONS do_deploy
+
+# Add using Device Tree support
+inherit kernel-devicetree
diff --git a/meta/classes/kernelsrc.bbclass b/meta/classes/kernelsrc.bbclass
index 9efd46a92d..675d40ec9a 100644
--- a/meta/classes/kernelsrc.bbclass
+++ b/meta/classes/kernelsrc.bbclass
@@ -1,7 +1,7 @@
S = "${STAGING_KERNEL_DIR}"
-do_fetch[noexec] = "1"
-do_unpack[depends] += "virtual/kernel:do_patch"
-do_unpack[noexec] = "1"
+deltask do_fetch
+deltask do_unpack
+do_patch[depends] += "virtual/kernel:do_patch"
do_patch[noexec] = "1"
do_package[depends] += "virtual/kernel:do_populate_sysroot"
KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
diff --git a/meta/classes/libc-common.bbclass b/meta/classes/libc-common.bbclass
index 11b0065a6d..0e351b6746 100644
--- a/meta/classes/libc-common.bbclass
+++ b/meta/classes/libc-common.bbclass
@@ -1,9 +1,5 @@
do_install() {
oe_runmake install_root=${D} install
- for r in ${rpcsvc}; do
- h=`echo $r|sed -e's,\.x$,.h,'`
- install -m 0644 ${S}/sunrpc/rpcsvc/$h ${D}/${includedir}/rpcsvc/
- done
install -Dm 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/ld.so.conf
install -d ${D}${localedir}
make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
@@ -17,15 +13,15 @@ do_install() {
}
def get_libc_fpu_setting(bb, d):
- if d.getVar('TARGET_FPU', True) in [ 'soft', 'ppc-efd' ]:
+ if d.getVar('TARGET_FPU') in [ 'soft', 'ppc-efd' ]:
return "--without-fp"
return ""
python populate_packages_prepend () {
- if d.getVar('DEBIAN_NAMES', True):
- pkgs = d.getVar('PACKAGES', True).split()
- bpn = d.getVar('BPN', True)
- prefix = d.getVar('MLPREFIX', True) or ""
+ if d.getVar('DEBIAN_NAMES'):
+ pkgs = d.getVar('PACKAGES').split()
+ bpn = d.getVar('BPN')
+ prefix = d.getVar('MLPREFIX') or ""
# Set the base package...
d.setVar('PKG_' + prefix + bpn, prefix + 'libc6')
libcprefix = prefix + bpn + '-'
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index 8c99d6113d..de816bcec1 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -9,25 +9,27 @@
GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
+GLIBC_SPLIT_LC_PACKAGES ?= "0"
+
python __anonymous () {
- enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION", True)
+ enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION")
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
if pn.endswith("-initial"):
enabled = False
if enabled and int(enabled):
import re
- target_arch = d.getVar("TARGET_ARCH", True)
- binary_arches = d.getVar("BINARY_LOCALE_ARCHES", True) or ""
- use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or ""
+ target_arch = d.getVar("TARGET_ARCH")
+ binary_arches = d.getVar("BINARY_LOCALE_ARCHES") or ""
+ use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or ""
for regexp in binary_arches.split(" "):
r = re.compile(regexp)
if r.match(target_arch):
- depends = d.getVar("DEPENDS", True)
+ depends = d.getVar("DEPENDS")
if use_cross_localedef == "1" :
depends = "%s cross-localedef-native" % depends
else:
@@ -35,25 +37,14 @@ python __anonymous () {
d.setVar("DEPENDS", depends)
d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
break
-
- # try to fix disable charsets/locales/locale-code compile fail
- if bb.utils.contains('DISTRO_FEATURES', 'libc-charsets', True, False, d) and \
- bb.utils.contains('DISTRO_FEATURES', 'libc-locales', True, False, d) and \
- bb.utils.contains('DISTRO_FEATURES', 'libc-locale-code', True, False, d):
- d.setVar('PACKAGE_NO_GCONV', '0')
- else:
- d.setVar('PACKAGE_NO_GCONV', '1')
}
-OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
+# try to fix disable charsets/locales/locale-code compile fail
+PACKAGE_NO_GCONV ?= "0"
-locale_base_postinst() {
-#!/bin/sh
-
-if [ "x$D" != "x" ]; then
- exit 1
-fi
+OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
+locale_base_postinst_ontarget() {
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
}
@@ -73,9 +64,14 @@ do_prep_locale_tree() {
for i in $treedir/${datadir}/i18n/charmaps/*gz; do
gunzip $i
done
- tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir}
- if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.* ]; then
- tar -cf - -C ${STAGING_DIR_NATIVE}/${prefix_native}/${base_libdir} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
+ # The extract pattern "./l*.so*" is carefully selected so that it will
+ # match ld*.so and lib*.so*, but not any files in the gconv directory
+ # (if it exists). This makes sure we only unpack the files we need.
+ # This is important in case usrmerge is set in DISTRO_FEATURES, which
+ # means ${base_libdir} == ${libdir}.
+ tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir} --wildcards './l*.so*'
+ if [ -f ${STAGING_LIBDIR_NATIVE}/libgcc_s.* ]; then
+ tar -cf - -C ${STAGING_LIBDIR_NATIVE} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
fi
install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
}
@@ -86,41 +82,44 @@ do_collect_bins_from_locale_tree() {
parent=$(dirname ${localedir})
mkdir -p ${PKGD}/$parent
tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
+
+ # Finalize tree by chaning all duplicate files into hard links
+ cross-localedef-hardlink -c -v ${WORKDIR}/locale-tree
}
inherit qemu
python package_do_split_gconvs () {
import re
- if (d.getVar('PACKAGE_NO_GCONV', True) == '1'):
+ if (d.getVar('PACKAGE_NO_GCONV') == '1'):
bb.note("package requested not splitting gconvs")
return
- if not d.getVar('PACKAGES', True):
+ if not d.getVar('PACKAGES'):
return
- mlprefix = d.getVar("MLPREFIX", True) or ""
+ mlprefix = d.getVar("MLPREFIX") or ""
- bpn = d.getVar('BPN', True)
- libdir = d.getVar('libdir', True)
+ bpn = d.getVar('BPN')
+ libdir = d.getVar('libdir')
if not libdir:
bb.error("libdir not defined")
return
- datadir = d.getVar('datadir', True)
+ datadir = d.getVar('datadir')
if not datadir:
bb.error("datadir not defined")
return
- gconv_libdir = base_path_join(libdir, "gconv")
- charmap_dir = base_path_join(datadir, "i18n", "charmaps")
- locales_dir = base_path_join(datadir, "i18n", "locales")
- binary_locales_dir = d.getVar('localedir', True)
+ gconv_libdir = oe.path.join(libdir, "gconv")
+ charmap_dir = oe.path.join(datadir, "i18n", "charmaps")
+ locales_dir = oe.path.join(datadir, "i18n", "locales")
+ binary_locales_dir = d.getVar('localedir')
def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
deps = []
f = open(fn, "rb")
- c_re = re.compile('^copy "(.*)"')
- i_re = re.compile('^include "(\w+)".*')
+ c_re = re.compile(r'^copy "(.*)"')
+ i_re = re.compile(r'^include "(\w+)".*')
for l in f.readlines():
l = l.decode("latin-1")
m = c_re.match(l) or i_re.match(l)
@@ -134,15 +133,15 @@ python package_do_split_gconvs () {
if bpn != 'glibc':
d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
- do_split_packages(d, gconv_libdir, file_regex='^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
+ do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
description='gconv module for character set %s', hook=calc_gconv_deps, \
extra_depends=bpn+'-gconv')
def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
deps = []
f = open(fn, "rb")
- c_re = re.compile('^copy "(.*)"')
- i_re = re.compile('^include "(\w+)".*')
+ c_re = re.compile(r'^copy "(.*)"')
+ i_re = re.compile(r'^include "(\w+)".*')
for l in f.readlines():
l = l.decode("latin-1")
m = c_re.match(l) or i_re.match(l)
@@ -156,14 +155,14 @@ python package_do_split_gconvs () {
if bpn != 'glibc':
d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
- do_split_packages(d, charmap_dir, file_regex='^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
+ do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
deps = []
f = open(fn, "rb")
- c_re = re.compile('^copy "(.*)"')
- i_re = re.compile('^include "(\w+)".*')
+ c_re = re.compile(r'^copy "(.*)"')
+ i_re = re.compile(r'^include "(\w+)".*')
for l in f.readlines():
l = l.decode("latin-1")
m = c_re.match(l) or i_re.match(l)
@@ -177,17 +176,17 @@ python package_do_split_gconvs () {
if bpn != 'glibc':
d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
- do_split_packages(d, locales_dir, file_regex='(.*)', output_pattern=bpn+'-localedata-%s', \
+ do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \
description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
d.setVar('PACKAGES', d.getVar('PACKAGES', False) + ' ' + d.getVar('MLPREFIX', False) + bpn + '-gconv')
- use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", True)
+ use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE")
- dot_re = re.compile("(.*)\.(.*)")
+ dot_re = re.compile(r"(.*)\.(.*)")
# Read in supported locales and associated encodings
supported = {}
- with open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED")) as f:
+ with open(oe.path.join(d.getVar('WORKDIR'), "SUPPORTED")) as f:
for line in f.readlines():
try:
locale, charset = line.rstrip().split()
@@ -196,7 +195,7 @@ python package_do_split_gconvs () {
supported[locale] = charset
# GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
- to_generate = d.getVar('GLIBC_GENERATE_LOCALES', True)
+ to_generate = d.getVar('GLIBC_GENERATE_LOCALES')
if not to_generate or to_generate == 'all':
to_generate = sorted(supported.keys())
else:
@@ -213,34 +212,35 @@ python package_do_split_gconvs () {
def output_locale_source(name, pkgname, locale, encoding):
d.setVar('RDEPENDS_%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
(mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
- d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst', True) \
+ d.setVar('pkg_postinst_ontarget_%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
% (locale, encoding, locale))
- d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm', True) % \
+ d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm') % \
(locale, encoding, locale))
def output_locale_binary_rdepends(name, pkgname, locale, encoding):
- m = re.match("(.*)\.(.*)", name)
- if m:
- libc_name = "%s.%s" % (m.group(1), m.group(2).lower())
- else:
- libc_name = name
- d.setVar('RDEPENDS_%s' % pkgname, legitimize_package_name('%s-binary-localedata-%s' \
- % (mlprefix+bpn, libc_name)))
+ dep = legitimize_package_name('%s-binary-localedata-%s' % (bpn, name))
+ lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
+ if lcsplit and int(lcsplit):
+ d.appendVar('PACKAGES', ' ' + dep)
+ d.setVar('ALLOW_EMPTY_%s' % dep, '1')
+ d.setVar('RDEPENDS_%s' % pkgname, mlprefix + dep)
commands = {}
def output_locale_binary(name, pkgname, locale, encoding):
- treedir = base_path_join(d.getVar("WORKDIR", True), "locale-tree")
- ldlibdir = base_path_join(treedir, d.getVar("base_libdir", True))
- path = d.getVar("PATH", True)
- i18npath = base_path_join(treedir, datadir, "i18n")
- gconvpath = base_path_join(treedir, "iconvdata")
- outputpath = base_path_join(treedir, binary_locales_dir)
-
- use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0"
+ treedir = oe.path.join(d.getVar("WORKDIR"), "locale-tree")
+ ldlibdir = oe.path.join(treedir, d.getVar("base_libdir"))
+ path = d.getVar("PATH")
+ i18npath = oe.path.join(treedir, datadir, "i18n")
+ gconvpath = oe.path.join(treedir, "iconvdata")
+ outputpath = oe.path.join(treedir, binary_locales_dir)
+
+ use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or "0"
if use_cross_localedef == "1":
- target_arch = d.getVar('TARGET_ARCH', True)
+ target_arch = d.getVar('TARGET_ARCH')
locale_arch_options = { \
+ "arc": " --uint32-align=4 --little-endian ", \
+ "arceb": " --uint32-align=4 --big-endian ", \
"arm": " --uint32-align=4 --little-endian ", \
"armeb": " --uint32-align=4 --big-endian ", \
"aarch64": " --uint32-align=4 --little-endian ", \
@@ -249,9 +249,15 @@ python package_do_split_gconvs () {
"powerpc": " --uint32-align=4 --big-endian ", \
"powerpc64": " --uint32-align=4 --big-endian ", \
"mips": " --uint32-align=4 --big-endian ", \
+ "mipsisa32r6": " --uint32-align=4 --big-endian ", \
"mips64": " --uint32-align=4 --big-endian ", \
+ "mipsisa64r6": " --uint32-align=4 --big-endian ", \
"mipsel": " --uint32-align=4 --little-endian ", \
+ "mipsisa32r6el": " --uint32-align=4 --little-endian ", \
"mips64el":" --uint32-align=4 --little-endian ", \
+ "mipsisa64r6el":" --uint32-align=4 --little-endian ", \
+ "riscv64": " --uint32-align=4 --little-endian ", \
+ "riscv32": " --uint32-align=4 --little-endian ", \
"i586": " --uint32-align=4 --little-endian ", \
"i686": " --uint32-align=4 --little-endian ", \
"x86_64": " --uint32-align=4 --little-endian " }
@@ -260,9 +266,9 @@ python package_do_split_gconvs () {
localedef_opts = locale_arch_options[target_arch]
else:
bb.error("locale_arch_options not found for target_arch=" + target_arch)
- raise bb.build.FuncFailed("unknown arch:" + target_arch + " for locale_arch_options")
+ bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
- localedef_opts += " --force --no-archive --prefix=%s \
+ localedef_opts += " --force --no-hard-links --no-archive --prefix=%s \
--inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
% (treedir, treedir, datadir, locale, encoding, outputpath, name)
@@ -270,14 +276,14 @@ python package_do_split_gconvs () {
(path, i18npath, gconvpath, localedef_opts)
else: # earlier slower qemu way
qemu = qemu_target_binary(d)
- localedef_opts = "--force --no-archive --prefix=%s \
+ localedef_opts = "--force --no-hard-links --no-archive --prefix=%s \
--inputfile=%s/i18n/locales/%s --charmap=%s %s" \
% (treedir, datadir, locale, encoding, name)
- qemu_options = d.getVar('QEMU_OPTIONS', True)
+ qemu_options = d.getVar('QEMU_OPTIONS')
cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
- -E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
+ -E LD_LIBRARY_PATH=%s %s %s${base_bindir}/localedef %s" % \
(path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
commands["%s/%s" % (outputpath, name)] = cmd
@@ -287,9 +293,9 @@ python package_do_split_gconvs () {
def output_locale(name, locale, encoding):
pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
- d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES', True)))
+ d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
- m = re.match("(.*)_(.*)", name)
+ m = re.match(r"(.*)_(.*)", name)
if m:
rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
d.setVar('RPROVIDES_%s' % pkgname, rprovides)
@@ -306,8 +312,8 @@ python package_do_split_gconvs () {
bb.note("preparing tree for binary locale generation")
bb.build.exec_func("do_prep_locale_tree", d)
- utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
- utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT', True) or 0)
+ utf8_only = int(d.getVar('LOCALE_UTF8_ONLY') or 0)
+ utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT') or 0)
encodings = {}
for locale in to_generate:
@@ -333,26 +339,38 @@ python package_do_split_gconvs () {
else:
output_locale('%s.%s' % (base, charset), base, charset)
+ def metapkg_hook(file, pkg, pattern, format, basename):
+ name = basename.split('/', 1)[0]
+ metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name))
+ d.appendVar('RDEPENDS_%s' % metapkg, ' ' + pkg)
+
if use_bin == "compile":
- makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile")
- m = open(makefile, "w")
- m.write("all: %s\n\n" % " ".join(commands.keys()))
- for cmd in commands:
- m.write(cmd + ":\n")
- m.write("\t" + commands[cmd] + "\n\n")
- m.close()
+ makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
+ with open(makefile, "w") as m:
+ m.write("all: %s\n\n" % " ".join(commands.keys()))
+ total = len(commands)
+ for i, (maketarget, makerecipe) in enumerate(commands.items()):
+ m.write(maketarget + ":\n")
+ m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
+ m.write("\t" + makerecipe + "\n\n")
d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
+ d.setVarFlag("oe_runmake", "progress", "outof:Progress\s(\d+)/(\d+)")
bb.note("Executing binary locale generation makefile")
bb.build.exec_func("oe_runmake", d)
bb.note("collecting binary locales from locale tree")
bb.build.exec_func("do_collect_bins_from_locale_tree", d)
- do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
- output_pattern=bpn+'-binary-localedata-%s', \
- description='binary locale definition for %s', extra_depends='', allow_dirs=True)
- elif use_bin == "precompiled":
- do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
- output_pattern=bpn+'-binary-localedata-%s', \
- description='binary locale definition for %s', extra_depends='', allow_dirs=True)
+
+ if use_bin in ('compile', 'precompiled'):
+ lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
+ if lcsplit and int(lcsplit):
+ do_split_packages(d, binary_locales_dir, file_regex=r'^(.*/LC_\w+)', \
+ output_pattern=bpn+'-binary-localedata-%s', \
+ description='binary locale definition for %s', recursive=True,
+ hook=metapkg_hook, extra_depends='', allow_dirs=True, match_path=True)
+ else:
+ do_split_packages(d, binary_locales_dir, file_regex=r'(.*)', \
+ output_pattern=bpn+'-binary-localedata-%s', \
+ description='binary locale definition for %s', extra_depends='', allow_dirs=True)
else:
bb.note("generation of binary locales disabled. this may break i18n!")
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index ad12db4838..f90176d6c0 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -16,278 +16,6 @@ addtask populate_lic after do_patch before do_build
do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
-python write_package_manifest() {
- # Get list of installed packages
- license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
- bb.utils.mkdirhier(license_image_dir)
- from oe.rootfs import image_list_installed_packages
- from oe.utils import format_pkg_list
-
- pkgs = image_list_installed_packages(d)
- output = format_pkg_list(pkgs)
- open(os.path.join(license_image_dir, 'package.manifest'),
- 'w+').write(output)
-}
-
-python write_deploy_manifest() {
- license_deployed_manifest(d)
-}
-
-python license_create_manifest() {
- import oe.packagedata
- from oe.rootfs import image_list_installed_packages
-
- build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS', True)
- if build_images_from_feeds == "1":
- return 0
-
- pkg_dic = {}
- for pkg in sorted(image_list_installed_packages(d)):
- pkg_info = os.path.join(d.getVar('PKGDATA_DIR', True),
- 'runtime-reverse', pkg)
- pkg_name = os.path.basename(os.readlink(pkg_info))
-
- pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
- if not "LICENSE" in pkg_dic[pkg_name].keys():
- pkg_lic_name = "LICENSE_" + pkg_name
- pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
-
- rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
- d.getVar('IMAGE_NAME', True), 'license.manifest')
- write_license_files(d, rootfs_license_manifest, pkg_dic)
-}
-
-def write_license_files(d, license_manifest, pkg_dic):
- import re
-
- bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE", True) or "").split()
- bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
- bad_licenses = expand_wildcard_licenses(d, bad_licenses)
-
- with open(license_manifest, "w") as license_file:
- for pkg in sorted(pkg_dic):
- if bad_licenses:
- try:
- (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
- oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
- bad_licenses, canonical_license, d)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P', True), exc))
- else:
- pkg_dic[pkg]["LICENSES"] = re.sub('[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
- pkg_dic[pkg]["LICENSES"] = re.sub(' *', ' ', pkg_dic[pkg]["LICENSES"])
- pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
-
- if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
- # Rootfs manifest
- license_file.write("PACKAGE NAME: %s\n" % pkg)
- license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
- license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
- license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
-
- # If the package doesn't contain any file, that is, its size is 0, the license
- # isn't relevant as far as the final image is concerned. So doing license check
- # doesn't make much sense, skip it.
- if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
- continue
- else:
- # Image manifest
- license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
- license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
- license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
- license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
-
- for lic in pkg_dic[pkg]["LICENSES"]:
- lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
- pkg_dic[pkg]["PN"], "generic_%s" %
- re.sub('\+', '', lic))
- # add explicity avoid of CLOSED license because isn't generic
- if lic == "CLOSED":
- continue
-
- if not os.path.exists(lic_file):
- bb.warn("The license listed %s was not in the "\
- "licenses collected for recipe %s"
- % (lic, pkg_dic[pkg]["PN"]))
-
- # Two options here:
- # - Just copy the manifest
- # - Copy the manifest and the license directories
- # With both options set we see a .5 M increase in core-image-minimal
- copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST', True)
- copy_lic_dirs = d.getVar('COPY_LIC_DIRS', True)
- if copy_lic_manifest == "1":
- rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS', 'True'),
- 'usr', 'share', 'common-licenses')
- bb.utils.mkdirhier(rootfs_license_dir)
- rootfs_license_manifest = os.path.join(rootfs_license_dir,
- os.path.split(license_manifest)[1])
- if not os.path.exists(rootfs_license_manifest):
- os.link(license_manifest, rootfs_license_manifest)
-
- if copy_lic_dirs == "1":
- for pkg in sorted(pkg_dic):
- pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
- bb.utils.mkdirhier(pkg_rootfs_license_dir)
- pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
- pkg_dic[pkg]["PN"])
- licenses = os.listdir(pkg_license_dir)
- for lic in licenses:
- rootfs_license = os.path.join(rootfs_license_dir, lic)
- pkg_license = os.path.join(pkg_license_dir, lic)
- pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
-
- if re.match("^generic_.*$", lic):
- generic_lic = re.search("^generic_(.*)$", lic).group(1)
- if oe.license.license_ok(canonical_license(d,
- generic_lic), bad_licenses) == False:
- continue
-
- if not os.path.exists(rootfs_license):
- os.link(pkg_license, rootfs_license)
-
- if not os.path.exists(pkg_rootfs_license):
- os.symlink(os.path.join('..', lic), pkg_rootfs_license)
- else:
- if (oe.license.license_ok(canonical_license(d,
- lic), bad_licenses) == False or
- os.path.exists(pkg_rootfs_license)):
- continue
-
- os.link(pkg_license, pkg_rootfs_license)
-
-
-def license_deployed_manifest(d):
- """
- Write the license manifest for the deployed recipes.
- The deployed recipes usually includes the bootloader
- and extra files to boot the target.
- """
-
- dep_dic = {}
- man_dic = {}
- lic_dir = d.getVar("LICENSE_DIRECTORY", True)
-
- dep_dic = get_deployed_dependencies(d)
- for dep in dep_dic.keys():
- man_dic[dep] = {}
- # It is necessary to mark this will be used for image manifest
- man_dic[dep]["IMAGE_MANIFEST"] = True
- man_dic[dep]["PN"] = dep
- man_dic[dep]["FILES"] = \
- " ".join(get_deployed_files(dep_dic[dep]))
- with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
- for line in f.readlines():
- key,val = line.split(": ", 1)
- man_dic[dep][key] = val[:-1]
-
- lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
- d.getVar('IMAGE_NAME', True))
- bb.utils.mkdirhier(lic_manifest_dir)
- image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
- write_license_files(d, image_license_manifest, man_dic)
-
-def get_deployed_dependencies(d):
- """
- Get all the deployed dependencies of an image
- """
-
- deploy = {}
- # Get all the dependencies for the current task (rootfs).
- # Also get EXTRA_IMAGEDEPENDS because the bootloader is
- # usually in this var and not listed in rootfs.
- # At last, get the dependencies from boot classes because
- # it might contain the bootloader.
- taskdata = d.getVar("BB_TASKDEPDATA", False)
- depends = list(set([dep[0] for dep
- in list(taskdata.values())
- if not dep[0].endswith("-native")]))
- extra_depends = d.getVar("EXTRA_IMAGEDEPENDS", True)
- boot_depends = get_boot_dependencies(d)
- depends.extend(extra_depends.split())
- depends.extend(boot_depends)
- depends = list(set(depends))
-
- # To verify what was deployed it checks the rootfs dependencies against
- # the SSTATE_MANIFESTS for "deploy" task.
- # The manifest file name contains the arch. Because we are not running
- # in the recipe context it is necessary to check every arch used.
- sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS", True)
- sstate_archs = d.getVar("SSTATE_ARCHS", True)
- extra_archs = d.getVar("PACKAGE_EXTRA_ARCHS", True)
- archs = list(set(("%s %s" % (sstate_archs, extra_archs)).split()))
- for dep in depends:
- # Some recipes have an arch on their own, so we try that first.
- special_arch = d.getVar("PACKAGE_ARCH_pn-%s" % dep, True)
- if special_arch:
- sstate_manifest_file = os.path.join(sstate_manifest_dir,
- "manifest-%s-%s.deploy" % (special_arch, dep))
- if os.path.exists(sstate_manifest_file):
- deploy[dep] = sstate_manifest_file
- continue
-
- for arch in archs:
- sstate_manifest_file = os.path.join(sstate_manifest_dir,
- "manifest-%s-%s.deploy" % (arch, dep))
- if os.path.exists(sstate_manifest_file):
- deploy[dep] = sstate_manifest_file
- break
-
- return deploy
-get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
-
-def get_boot_dependencies(d):
- """
- Return the dependencies from boot tasks
- """
-
- depends = []
- boot_depends_string = ""
- taskdepdata = d.getVar("BB_TASKDEPDATA", False)
- # Only bootimg and bootdirectdisk include the depends flag
- boot_tasks = ["do_bootimg", "do_bootdirectdisk",]
-
- for task in boot_tasks:
- boot_depends_string = "%s %s" % (boot_depends_string,
- d.getVarFlag(task, "depends", True) or "")
- boot_depends = [dep.split(":")[0] for dep
- in boot_depends_string.split()
- if not dep.split(":")[0].endswith("-native")]
- for dep in boot_depends:
- info_file = os.path.join(d.getVar("LICENSE_DIRECTORY", True),
- dep, "recipeinfo")
- # If the recipe and dependency name is the same
- if os.path.exists(info_file):
- depends.append(dep)
- # We need to search for the provider of the dependency
- else:
- for taskdep in taskdepdata.values():
- # The fifth field contains what the task provides
- if dep in taskdep[4]:
- info_file = os.path.join(
- d.getVar("LICENSE_DIRECTORY", True),
- taskdep[0], "recipeinfo")
- if os.path.exists(info_file):
- depends.append(taskdep[0])
- break
- return depends
-get_boot_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
-
-def get_deployed_files(man_file):
- """
- Get the files deployed from the sstate manifest
- """
-
- dep_files = []
- excluded_files = ["README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt"]
- with open(man_file, "r") as manifest:
- all_files = manifest.read()
- for f in all_files.splitlines():
- if ((not (os.path.islink(f) or os.path.isdir(f))) and
- not os.path.basename(f) in excluded_files):
- dep_files.append(os.path.basename(f))
- return dep_files
-
python do_populate_lic() {
"""
Populate LICENSE_DIRECTORY with licenses.
@@ -295,7 +23,7 @@ python do_populate_lic() {
lic_files_paths = find_license_files(d)
# The base directory we wrangle licenses to
- destdir = os.path.join(d.getVar('LICSSTATEDIR', True), d.getVar('PN', True))
+ destdir = os.path.join(d.getVar('LICSSTATEDIR'), d.getVar('PN'))
copy_license_files(lic_files_paths, destdir)
info = get_recipe_info(d)
with open(os.path.join(destdir, "recipeinfo"), "w") as f:
@@ -306,11 +34,11 @@ python do_populate_lic() {
# it would be better to copy them in do_install_append, but find_license_filesa is python
python perform_packagecopy_prepend () {
enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
- if d.getVar('CLASSOVERRIDE', True) == 'class-target' and enabled:
+ if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
lic_files_paths = find_license_files(d)
# LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
- destdir = d.getVar('D', True) + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY', True), d.getVar('PN', True))
+ destdir = d.getVar('D') + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY'), d.getVar('PN'))
copy_license_files(lic_files_paths, destdir)
add_package_and_files(d)
}
@@ -318,23 +46,26 @@ perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
def get_recipe_info(d):
info = {}
- info["PV"] = d.getVar("PV", True)
- info["PR"] = d.getVar("PR", True)
- info["LICENSE"] = d.getVar("LICENSE", True)
+ info["PV"] = d.getVar("PV")
+ info["PR"] = d.getVar("PR")
+ info["LICENSE"] = d.getVar("LICENSE")
return info
def add_package_and_files(d):
- packages = d.getVar('PACKAGES', True)
- files = d.getVar('LICENSE_FILES_DIRECTORY', True)
- pn = d.getVar('PN', True)
+ packages = d.getVar('PACKAGES')
+ files = d.getVar('LICENSE_FILES_DIRECTORY')
+ pn = d.getVar('PN')
pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
- if pn_lic in packages:
+ if pn_lic in packages.split():
bb.warn("%s package already existed in %s." % (pn_lic, pn))
else:
# first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
d.setVar('FILES_' + pn_lic, files)
- rrecommends_pn = d.getVar('RRECOMMENDS_' + pn, True)
+ for pn in packages.split():
+ if pn == pn_lic:
+ continue
+ rrecommends_pn = d.getVar('RRECOMMENDS_' + pn)
if rrecommends_pn:
d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
else:
@@ -345,13 +76,15 @@ def copy_license_files(lic_files_paths, destdir):
import errno
bb.utils.mkdirhier(destdir)
- for (basename, path) in lic_files_paths:
+ for (basename, path, beginline, endline) in lic_files_paths:
try:
src = path
dst = os.path.join(destdir, basename)
if os.path.exists(dst):
os.remove(dst)
- canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev)
+ if os.path.islink(src):
+ src = os.path.realpath(src)
+ canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev) and beginline is None and endline is None
if canlink:
try:
os.link(src, dst)
@@ -362,20 +95,19 @@ def copy_license_files(lic_files_paths, destdir):
canlink = False
else:
raise
- try:
- if canlink:
- os.chown(dst,0,0)
- except OSError as err:
- if err.errno in (errno.EPERM, errno.EINVAL):
- # Suppress "Operation not permitted" error, as
- # sometimes this function is not executed under pseudo.
- # Also ignore "Invalid argument" errors that happen in
- # some (unprivileged) container environments (no root).
- pass
- else:
- raise
+ # Only chown if we did hardling, and, we're running under pseudo
+ if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
+ os.chown(dst,0,0)
if not canlink:
- shutil.copyfile(src, dst)
+ begin_idx = int(beginline)-1 if beginline is not None else None
+ end_idx = int(endline) if endline is not None else None
+ if begin_idx is None and end_idx is None:
+ shutil.copyfile(src, dst)
+ else:
+ with open(src, 'rb') as src_f:
+ with open(dst, 'wb') as dst_f:
+ dst_f.write(b''.join(src_f.readlines()[begin_idx:end_idx]))
+
except Exception as e:
bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
@@ -388,20 +120,22 @@ def find_license_files(d):
from collections import defaultdict, OrderedDict
# All the license files for the package
- lic_files = d.getVar('LIC_FILES_CHKSUM', True)
- pn = d.getVar('PN', True)
+ lic_files = d.getVar('LIC_FILES_CHKSUM') or ""
+ pn = d.getVar('PN')
# The license files are located in S/LIC_FILE_CHECKSUM.
- srcdir = d.getVar('S', True)
+ srcdir = d.getVar('S')
# Directory we store the generic licenses as set in the distro configuration
- generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
+ generic_directory = d.getVar('COMMON_LICENSE_DIR')
# List of basename, path tuples
lic_files_paths = []
+ # hash for keep track generic lics mappings
+ non_generic_lics = {}
# Entries from LIC_FILES_CHKSUM
lic_chksums = {}
license_source_dirs = []
license_source_dirs.append(generic_directory)
try:
- additional_lic_dirs = d.getVar('LICENSE_PATH', True).split()
+ additional_lic_dirs = d.getVar('LICENSE_PATH').split()
for lic_dir in additional_lic_dirs:
license_source_dirs.append(lic_dir)
except:
@@ -429,10 +163,10 @@ def find_license_files(d):
# unless NO_GENERIC_LICENSE is set.
for lic_dir in license_source_dirs:
if not os.path.isfile(os.path.join(lic_dir, license_type)):
- if d.getVarFlag('SPDXLICENSEMAP', license_type, True) != None:
+ if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
# Great, there is an SPDXLICENSEMAP. We can copy!
bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
- spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type, True)
+ spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
license_source = lic_dir
break
elif os.path.isfile(os.path.join(lic_dir, license_type)):
@@ -440,23 +174,25 @@ def find_license_files(d):
license_source = lic_dir
break
- non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type, True)
+ non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type)
if spdx_generic and license_source:
# we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
# audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
- lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic)))
+ lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic),
+ None, None))
# The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
# and should not be allowed, warn the user in this case.
- if d.getVarFlag('NO_GENERIC_LICENSE', license_type, True):
+ if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
bb.warn("%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type))
elif non_generic_lic and non_generic_lic in lic_chksums:
# if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
# of the package rather than the license_source_dirs.
lic_files_paths.append(("generic_" + license_type,
- os.path.join(srcdir, non_generic_lic)))
+ os.path.join(srcdir, non_generic_lic), None, None))
+ non_generic_lics[non_generic_lic] = license_type
else:
# Add explicity avoid of CLOSED license because this isn't generic
if license_type != 'CLOSED':
@@ -465,43 +201,44 @@ def find_license_files(d):
pass
if not generic_directory:
- raise bb.build.FuncFailed("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
-
- if not lic_files:
- # No recipe should have an invalid license file. This is checked else
- # where, but let's be pedantic
- bb.note(pn + ": Recipe file does not have license file information.")
- return lic_files_paths
+ bb.fatal("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
for url in lic_files.split():
try:
- (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
+ (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
+ if method != "file" or not path:
+ raise bb.fetch.MalformedUrl()
except bb.fetch.MalformedUrl:
- raise bb.build.FuncFailed("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF', True), url))
+ bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF'), url))
# We want the license filename and path
- chksum = parm['md5'] if 'md5' in parm else parm['sha256']
- lic_chksums[path] = chksum
+ chksum = parm.get('md5', None)
+ beginline = parm.get('beginline')
+ endline = parm.get('endline')
+ lic_chksums[path] = (chksum, beginline, endline)
v = FindVisitor()
try:
- v.visit_string(d.getVar('LICENSE', True))
+ v.visit_string(d.getVar('LICENSE'))
except oe.license.InvalidLicense as exc:
- bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
+ bb.fatal('%s: %s' % (d.getVar('PF'), exc))
except SyntaxError:
- bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF', True)))
-
+ bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF')))
# Add files from LIC_FILES_CHKSUM to list of license files
lic_chksum_paths = defaultdict(OrderedDict)
- for path, chksum in lic_chksums.items():
- lic_chksum_paths[os.path.basename(path)][chksum] = os.path.join(srcdir, path)
+ for path, data in sorted(lic_chksums.items()):
+ lic_chksum_paths[os.path.basename(path)][data] = (os.path.join(srcdir, path), data[1], data[2])
for basename, files in lic_chksum_paths.items():
if len(files) == 1:
- lic_files_paths.append((basename, list(files.values())[0]))
+ # Don't copy again a LICENSE already handled as non-generic
+ if basename in non_generic_lics:
+ continue
+ data = list(files.values())[0]
+ lic_files_paths.append(tuple([basename] + list(data)))
else:
# If there are multiple different license files with identical
# basenames we rename them to <file>.0, <file>.1, ...
- for i, path in enumerate(files.values()):
- lic_files_paths.append(("%s.%d" % (basename, i), path))
+ for i, data in enumerate(files.values()):
+ lic_files_paths.append(tuple(["%s.%d" % (basename, i)] + list(data)))
return lic_files_paths
@@ -509,35 +246,54 @@ def return_spdx(d, license):
"""
This function returns the spdx mapping of a license if it exists.
"""
- return d.getVarFlag('SPDXLICENSEMAP', license, True)
+ return d.getVarFlag('SPDXLICENSEMAP', license)
def canonical_license(d, license):
"""
Return the canonical (SPDX) form of the license if available (so GPLv3
becomes GPL-3.0), for the license named 'X+', return canonical form of
- 'X' if availabel and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
+ 'X' if available and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
or the passed license if there is no canonical form.
"""
- lic = d.getVarFlag('SPDXLICENSEMAP', license, True) or ""
+ lic = d.getVarFlag('SPDXLICENSEMAP', license) or ""
if not lic and license.endswith('+'):
- lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'), True)
+ lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'))
if lic:
lic += '+'
return lic or license
+def available_licenses(d):
+ """
+ Return the available licenses by searching the directories specified by
+ COMMON_LICENSE_DIR and LICENSE_PATH.
+ """
+ lic_dirs = ((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' +
+ (d.getVar('LICENSE_PATH') or '')).split()
+
+ licenses = []
+ for lic_dir in lic_dirs:
+ licenses += os.listdir(lic_dir)
+
+ licenses = sorted(licenses)
+ return licenses
+
+# Only determine the list of all available licenses once. This assumes that any
+# additions to LICENSE_PATH have been done before this file is parsed.
+AVAILABLE_LICENSES := "${@' '.join(available_licenses(d))}"
+
def expand_wildcard_licenses(d, wildcard_licenses):
"""
- Return actual spdx format license names if wildcard used. We expand
- wildcards from SPDXLICENSEMAP flags and SRC_DISTRIBUTE_LICENSES values.
+ Return actual spdx format license names if wildcards are used. We expand
+ wildcards from SPDXLICENSEMAP flags and AVAILABLE_LICENSES.
"""
import fnmatch
- licenses = []
+ licenses = wildcard_licenses[:]
spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
for wld_lic in wildcard_licenses:
spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
- licenses += [d.getVarFlag('SPDXLICENSEMAP', flag, True) for flag in spdxflags]
+ licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
- spdx_lics = (d.getVar('SRC_DISTRIBUTE_LICENSES', False) or '').split()
+ spdx_lics = d.getVar('AVAILABLE_LICENSES').split()
for wld_lic in wildcard_licenses:
licenses += fnmatch.filter(spdx_lics, wld_lic)
@@ -546,10 +302,30 @@ def expand_wildcard_licenses(d, wildcard_licenses):
def incompatible_license_contains(license, truevalue, falsevalue, d):
license = canonical_license(d, license)
- bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
+ bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
return truevalue if license in bad_licenses else falsevalue
+def incompatible_pkg_license(d, dont_want_licenses, license):
+ # Handles an "or" or two license sets provided by
+ # flattened_licenses(), pick one that works if possible.
+ def choose_lic_set(a, b):
+ return a if all(oe.license.license_ok(canonical_license(d, lic),
+ dont_want_licenses) for lic in a) else b
+
+ try:
+ licenses = oe.license.flattened_licenses(license, choose_lic_set)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('P'), exc))
+
+ incompatible_lic = []
+ for l in licenses:
+ license = canonical_license(d, l)
+ if not oe.license.license_ok(license, dont_want_licenses):
+ incompatible_lic.append(license)
+
+ return sorted(incompatible_lic)
+
def incompatible_license(d, dont_want_licenses, package=None):
"""
This function checks if a recipe has only incompatible licenses. It also
@@ -557,30 +333,19 @@ def incompatible_license(d, dont_want_licenses, package=None):
as canonical (SPDX) names.
"""
import oe.license
- license = d.getVar("LICENSE_%s" % package, True) if package else None
+ license = d.getVar("LICENSE_%s" % package) if package else None
if not license:
- license = d.getVar('LICENSE', True)
+ license = d.getVar('LICENSE')
- # Handles an "or" or two license sets provided by
- # flattened_licenses(), pick one that works if possible.
- def choose_lic_set(a, b):
- return a if all(oe.license.license_ok(canonical_license(d, lic),
- dont_want_licenses) for lic in a) else b
-
- try:
- licenses = oe.license.flattened_licenses(license, choose_lic_set)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P', True), exc))
- return any(not oe.license.license_ok(canonical_license(d, l), \
- dont_want_licenses) for l in licenses)
+ return incompatible_pkg_license(d, dont_want_licenses, license)
def check_license_flags(d):
"""
This function checks if a recipe has any LICENSE_FLAGS that
aren't whitelisted.
- If it does, it returns the first LICENSE_FLAGS item missing from the
- whitelist, or all of the LICENSE_FLAGS if there is no whitelist.
+ If it does, it returns the all LICENSE_FLAGS missing from the whitelist, or
+ all of the LICENSE_FLAGS if there is no whitelist.
If everything is is properly whitelisted, it returns None.
"""
@@ -617,22 +382,23 @@ def check_license_flags(d):
return False
def all_license_flags_match(license_flags, whitelist):
- """ Return first unmatched flag, None if all flags match """
- pn = d.getVar('PN', True)
+ """ Return all unmatched flags, None if all flags match """
+ pn = d.getVar('PN')
split_whitelist = whitelist.split()
+ flags = []
for flag in license_flags.split():
if not license_flag_matches(flag, split_whitelist, pn):
- return flag
- return None
+ flags.append(flag)
+ return flags if flags else None
- license_flags = d.getVar('LICENSE_FLAGS', True)
+ license_flags = d.getVar('LICENSE_FLAGS')
if license_flags:
- whitelist = d.getVar('LICENSE_FLAGS_WHITELIST', True)
+ whitelist = d.getVar('LICENSE_FLAGS_WHITELIST')
if not whitelist:
- return license_flags
- unmatched_flag = all_license_flags_match(license_flags, whitelist)
- if unmatched_flag:
- return unmatched_flag
+ return license_flags.split()
+ unmatched_flags = all_license_flags_match(license_flags, whitelist)
+ if unmatched_flags:
+ return unmatched_flags
return None
def check_license_format(d):
@@ -641,8 +407,8 @@ def check_license_format(d):
Validate operators in LICENSES.
No spaces are allowed between LICENSES.
"""
- pn = d.getVar('PN', True)
- licenses = d.getVar('LICENSE', True)
+ pn = d.getVar('PN')
+ licenses = d.getVar('LICENSE')
from oe.license import license_operator, license_operator_chars, license_pattern
elements = list(filter(lambda x: x.strip(), license_operator.split(licenses)))
@@ -662,11 +428,7 @@ SSTATETASKS += "do_populate_lic"
do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
-ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
-do_rootfs[recrdeptask] += "do_populate_lic"
-
-IMAGE_POSTPROCESS_COMMAND_prepend = "write_deploy_manifest; "
-do_image[recrdeptask] += "do_populate_lic"
+IMAGE_CLASSES_append = " license_image"
python do_populate_lic_setscene () {
sstate_setscene(d)
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
new file mode 100644
index 0000000000..a8c72da3cb
--- /dev/null
+++ b/meta/classes/license_image.bbclass
@@ -0,0 +1,256 @@
+python write_package_manifest() {
+ # Get list of installed packages
+ license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
+ bb.utils.mkdirhier(license_image_dir)
+ from oe.rootfs import image_list_installed_packages
+ from oe.utils import format_pkg_list
+
+ pkgs = image_list_installed_packages(d)
+ output = format_pkg_list(pkgs)
+ open(os.path.join(license_image_dir, 'package.manifest'),
+ 'w+').write(output)
+}
+
+python license_create_manifest() {
+ import oe.packagedata
+ from oe.rootfs import image_list_installed_packages
+
+ build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS')
+ if build_images_from_feeds == "1":
+ return 0
+
+ pkg_dic = {}
+ for pkg in sorted(image_list_installed_packages(d)):
+ pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
+ 'runtime-reverse', pkg)
+ pkg_name = os.path.basename(os.readlink(pkg_info))
+
+ pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
+ if not "LICENSE" in pkg_dic[pkg_name].keys():
+ pkg_lic_name = "LICENSE_" + pkg_name
+ pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
+
+ rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ d.getVar('IMAGE_NAME'), 'license.manifest')
+ write_license_files(d, rootfs_license_manifest, pkg_dic, rootfs=True)
+}
+
+def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
+ import re
+ import stat
+
+ bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
+ bad_licenses = [canonical_license(d, l) for l in bad_licenses]
+ bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+
+ whitelist = []
+ for lic in bad_licenses:
+ whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
+
+ with open(license_manifest, "w") as license_file:
+ for pkg in sorted(pkg_dic):
+ if bad_licenses and pkg not in whitelist:
+ try:
+ licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
+ if licenses:
+ bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(licenses)))
+ (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
+ oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
+ bad_licenses, canonical_license, d)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('P'), exc))
+ else:
+ pkg_dic[pkg]["LICENSES"] = re.sub(r'[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
+ pkg_dic[pkg]["LICENSES"] = re.sub(r' *', ' ', pkg_dic[pkg]["LICENSES"])
+ pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
+ if pkg in whitelist:
+ bb.warn("Including %s with an incompatible license %s into the image, because it has been whitelisted." %(pkg, pkg_dic[pkg]["LICENSE"]))
+
+ if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
+ # Rootfs manifest
+ license_file.write("PACKAGE NAME: %s\n" % pkg)
+ license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
+ license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
+ license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
+
+ # If the package doesn't contain any file, that is, its size is 0, the license
+ # isn't relevant as far as the final image is concerned. So doing license check
+ # doesn't make much sense, skip it.
+ if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
+ continue
+ else:
+ # Image manifest
+ license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
+ license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
+ license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
+ license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
+
+ for lic in pkg_dic[pkg]["LICENSES"]:
+ lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ pkg_dic[pkg]["PN"], "generic_%s" %
+ re.sub(r'\+', '', lic))
+ # add explicity avoid of CLOSED license because isn't generic
+ if lic == "CLOSED":
+ continue
+
+ if not os.path.exists(lic_file):
+ bb.warn("The license listed %s was not in the "\
+ "licenses collected for recipe %s"
+ % (lic, pkg_dic[pkg]["PN"]))
+
+ # Two options here:
+ # - Just copy the manifest
+ # - Copy the manifest and the license directories
+ # With both options set we see a .5 M increase in core-image-minimal
+ copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
+ copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
+ if rootfs and copy_lic_manifest == "1":
+ rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'),
+ 'usr', 'share', 'common-licenses')
+ bb.utils.mkdirhier(rootfs_license_dir)
+ rootfs_license_manifest = os.path.join(rootfs_license_dir,
+ os.path.split(license_manifest)[1])
+ if not os.path.exists(rootfs_license_manifest):
+ oe.path.copyhardlink(license_manifest, rootfs_license_manifest)
+
+ if copy_lic_dirs == "1":
+ for pkg in sorted(pkg_dic):
+ pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
+ bb.utils.mkdirhier(pkg_rootfs_license_dir)
+ pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ pkg_dic[pkg]["PN"])
+
+ pkg_manifest_licenses = [canonical_license(d, lic) \
+ for lic in pkg_dic[pkg]["LICENSES"]]
+
+ licenses = os.listdir(pkg_license_dir)
+ for lic in licenses:
+ rootfs_license = os.path.join(rootfs_license_dir, lic)
+ pkg_license = os.path.join(pkg_license_dir, lic)
+ pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
+
+ if re.match(r"^generic_.*$", lic):
+ generic_lic = canonical_license(d,
+ re.search(r"^generic_(.*)$", lic).group(1))
+
+ # Do not copy generic license into package if isn't
+ # declared into LICENSES of the package.
+ if not re.sub(r'\+$', '', generic_lic) in \
+ [re.sub(r'\+', '', lic) for lic in \
+ pkg_manifest_licenses]:
+ continue
+
+ if oe.license.license_ok(generic_lic,
+ bad_licenses) == False:
+ continue
+
+ if not os.path.exists(rootfs_license):
+ oe.path.copyhardlink(pkg_license, rootfs_license)
+
+ if not os.path.exists(pkg_rootfs_license):
+ os.symlink(os.path.join('..', lic), pkg_rootfs_license)
+ else:
+ if (oe.license.license_ok(canonical_license(d,
+ lic), bad_licenses) == False or
+ os.path.exists(pkg_rootfs_license)):
+ continue
+
+ oe.path.copyhardlink(pkg_license, pkg_rootfs_license)
+ # Fixup file ownership and permissions
+ for walkroot, dirs, files in os.walk(rootfs_license_dir):
+ for f in files:
+ p = os.path.join(walkroot, f)
+ os.lchown(p, 0, 0)
+ if not os.path.islink(p):
+ os.chmod(p, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
+ for dir in dirs:
+ p = os.path.join(walkroot, dir)
+ os.lchown(p, 0, 0)
+ os.chmod(p, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
+
+
+
+def license_deployed_manifest(d):
+ """
+ Write the license manifest for the deployed recipes.
+ The deployed recipes usually includes the bootloader
+ and extra files to boot the target.
+ """
+
+ dep_dic = {}
+ man_dic = {}
+ lic_dir = d.getVar("LICENSE_DIRECTORY")
+
+ dep_dic = get_deployed_dependencies(d)
+ for dep in dep_dic.keys():
+ man_dic[dep] = {}
+ # It is necessary to mark this will be used for image manifest
+ man_dic[dep]["IMAGE_MANIFEST"] = True
+ man_dic[dep]["PN"] = dep
+ man_dic[dep]["FILES"] = \
+ " ".join(get_deployed_files(dep_dic[dep]))
+ with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
+ for line in f.readlines():
+ key,val = line.split(": ", 1)
+ man_dic[dep][key] = val[:-1]
+
+ lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ d.getVar('IMAGE_NAME'))
+ bb.utils.mkdirhier(lic_manifest_dir)
+ image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
+ write_license_files(d, image_license_manifest, man_dic, rootfs=False)
+
+def get_deployed_dependencies(d):
+ """
+ Get all the deployed dependencies of an image
+ """
+
+ deploy = {}
+ # Get all the dependencies for the current task (rootfs).
+ taskdata = d.getVar("BB_TASKDEPDATA", False)
+ depends = list(set([dep[0] for dep
+ in list(taskdata.values())
+ if not dep[0].endswith("-native")]))
+
+ # To verify what was deployed it checks the rootfs dependencies against
+ # the SSTATE_MANIFESTS for "deploy" task.
+ # The manifest file name contains the arch. Because we are not running
+ # in the recipe context it is necessary to check every arch used.
+ sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS")
+ archs = list(set(d.getVar("SSTATE_ARCHS").split()))
+ for dep in depends:
+ for arch in archs:
+ sstate_manifest_file = os.path.join(sstate_manifest_dir,
+ "manifest-%s-%s.deploy" % (arch, dep))
+ if os.path.exists(sstate_manifest_file):
+ deploy[dep] = sstate_manifest_file
+ break
+
+ return deploy
+get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
+
+def get_deployed_files(man_file):
+ """
+ Get the files deployed from the sstate manifest
+ """
+
+ dep_files = []
+ excluded_files = []
+ with open(man_file, "r") as manifest:
+ all_files = manifest.read()
+ for f in all_files.splitlines():
+ if ((not (os.path.islink(f) or os.path.isdir(f))) and
+ not os.path.basename(f) in excluded_files):
+ dep_files.append(os.path.basename(f))
+ return dep_files
+
+ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
+do_rootfs[recrdeptask] += "do_populate_lic"
+
+python do_populate_lic_deploy() {
+ license_deployed_manifest(d)
+}
+
+addtask populate_lic_deploy before do_build after do_image_complete
+do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
+
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
index 89ce71605c..ba59222c24 100644
--- a/meta/classes/linux-kernel-base.bbclass
+++ b/meta/classes/linux-kernel-base.bbclass
@@ -34,8 +34,8 @@ def get_kernelversion_file(p):
return None
def linux_module_packages(s, d):
- suffix = ""
- return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
+ suffix = ""
+ return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
# that's all
diff --git a/meta/classes/linuxloader.bbclass b/meta/classes/linuxloader.bbclass
index 5c4dc5c51b..c0fbf26836 100644
--- a/meta/classes/linuxloader.bbclass
+++ b/meta/classes/linuxloader.bbclass
@@ -1,24 +1,65 @@
+def get_musl_loader(d):
+ import re
+ dynamic_loader = None
-linuxloader () {
- case ${TARGET_ARCH} in
- powerpc | mips | mipsel | microblaze )
- dynamic_loader="${base_libdir}/ld.so.1"
- ;;
- powerpc64)
- dynamic_loader="${base_libdir}/ld64.so.1"
- ;;
- x86_64)
- dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
- ;;
- i*86 )
- dynamic_loader="${base_libdir}/ld-linux.so.2"
- ;;
- arm )
- dynamic_loader="${base_libdir}/ld-linux.so.3"
- ;;
- * )
- dynamic_loader="/unknown_dynamic_linker"
- ;;
- esac
- echo $dynamic_loader
-}
+ targetarch = d.getVar("TARGET_ARCH")
+ if targetarch.startswith("microblaze"):
+ dynamic_loader = "${base_libdir}/ld-musl-microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el' ,d)}.so.1"
+ elif targetarch.startswith("mips"):
+ dynamic_loader = "${base_libdir}/ld-musl-mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ elif targetarch == "powerpc":
+ dynamic_loader = "${base_libdir}/ld-musl-powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ elif targetarch == "powerpc64":
+ dynamic_loader = "${base_libdir}/ld-musl-powerpc64.so.1"
+ elif targetarch == "x86_64":
+ dynamic_loader = "${base_libdir}/ld-musl-x86_64.so.1"
+ elif re.search("i.86", targetarch):
+ dynamic_loader = "${base_libdir}/ld-musl-i386.so.1"
+ elif targetarch.startswith("arm"):
+ dynamic_loader = "${base_libdir}/ld-musl-arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}.so.1"
+ elif targetarch.startswith("aarch64"):
+ dynamic_loader = "${base_libdir}/ld-musl-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
+ elif targetarch.startswith("riscv64"):
+ dynamic_loader = "${base_libdir}/ld-musl-riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ return dynamic_loader
+
+def get_glibc_loader(d):
+ import re
+
+ dynamic_loader = None
+ targetarch = d.getVar("TARGET_ARCH")
+ if targetarch in ["powerpc", "microblaze"]:
+ dynamic_loader = "${base_libdir}/ld.so.1"
+ elif targetarch in ["mipsisa32r6el", "mipsisa32r6", "mipsisa64r6el", "mipsisa64r6"]:
+ dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1"
+ elif targetarch.startswith("mips"):
+ dynamic_loader = "${base_libdir}/ld.so.1"
+ elif targetarch == "powerpc64":
+ dynamic_loader = "${base_libdir}/ld64.so.1"
+ elif targetarch == "x86_64":
+ dynamic_loader = "${base_libdir}/ld-linux-x86-64.so.2"
+ elif re.search("i.86", targetarch):
+ dynamic_loader = "${base_libdir}/ld-linux.so.2"
+ elif targetarch == "arm":
+ dynamic_loader = "${base_libdir}/ld-linux.so.3"
+ elif targetarch.startswith("aarch64"):
+ dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
+ elif targetarch.startswith("riscv64"):
+ dynamic_loader = "${base_libdir}/ld-linux-riscv64-lp64${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ return dynamic_loader
+
+def get_linuxloader(d):
+ overrides = d.getVar("OVERRIDES").split(":")
+
+ if "libc-baremetal" in overrides:
+ return None
+
+ if "libc-musl" in overrides:
+ dynamic_loader = get_musl_loader(d)
+ else:
+ dynamic_loader = get_glibc_loader(d)
+ return dynamic_loader
+
+get_linuxloader[vardepvalue] = "${@get_linuxloader(d)}"
+get_musl_loader[vardepvalue] = "${@get_musl_loader(d)}"
+get_glibc_loader[vardepvalue] = "${@get_glibc_loader(d)}"
diff --git a/meta/classes/live-vm-common.bbclass b/meta/classes/live-vm-common.bbclass
index c751385e7d..74e7074a53 100644
--- a/meta/classes/live-vm-common.bbclass
+++ b/meta/classes/live-vm-common.bbclass
@@ -4,17 +4,19 @@ def set_live_vm_vars(d, suffix):
vars = ['GRUB_CFG', 'SYSLINUX_CFG', 'ROOT', 'LABELS', 'INITRD']
for var in vars:
var_with_suffix = var + '_' + suffix
- if d.getVar(var, True):
+ if d.getVar(var):
bb.warn('Found potential conflicted var %s, please use %s rather than %s' % \
(var, var_with_suffix, var))
- elif d.getVar(var_with_suffix, True):
- d.setVar(var, d.getVar(var_with_suffix, True))
+ elif d.getVar(var_with_suffix):
+ d.setVar(var, d.getVar(var_with_suffix))
EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
EFI_PROVIDER ?= "grub-efi"
EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
+MKDOSFS_EXTRAOPTS ??= "-S 512"
+
# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
# contain "efi". This way legacy is supported by default if neither is
# specified, maintaining the original behavior.
@@ -25,20 +27,54 @@ def pcbios(d):
return pcbios
PCBIOS = "${@pcbios(d)}"
-PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS', True) == '1']}"
+PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
+
+# efi_populate_common DEST BOOTLOADER
+efi_populate_common() {
+ # DEST must be the root of the image so that EFIDIR is not
+ # nested under a top level directory.
+ DEST=$1
+
+ install -d ${DEST}${EFIDIR}
+
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/$2-${EFI_BOOT_IMAGE} ${DEST}${EFIDIR}/${EFI_BOOT_IMAGE}
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${DEST}/startup.nsh
+}
+
+efi_iso_populate() {
+ iso_dir=$1
+ efi_populate $iso_dir
+ # Build a EFI directory to create efi.img
+ mkdir -p ${EFIIMGDIR}/${EFIDIR}
+ cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+ cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
+
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${EFIIMGDIR}/startup.nsh
+
+ if [ -f "$iso_dir/initrd" ] ; then
+ cp $iso_dir/initrd ${EFIIMGDIR}
+ fi
+}
+
+efi_hddimg_populate() {
+ efi_populate $1
+}
inherit ${EFI_CLASS}
inherit ${PCBIOS_CLASS}
-KERNEL_IMAGETYPE ??= "bzImage"
-
populate_kernel() {
dest=$1
install -d $dest
# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
+ bbnote "Trying to install ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} as $dest/${KERNEL_IMAGETYPE}"
if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/vmlinuz
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/${KERNEL_IMAGETYPE}
+ else
+ bbwarn "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} doesn't exist"
fi
# initrd is made of concatenation of multiple filesystem images
diff --git a/meta/classes/logging.bbclass b/meta/classes/logging.bbclass
index 06c7c31c3e..a0c94e98c7 100644
--- a/meta/classes/logging.bbclass
+++ b/meta/classes/logging.bbclass
@@ -86,7 +86,7 @@ bbdebug() {
# Strip off the debug level and ensure it is an integer
DBGLVL=$1; shift
- NONDIGITS=$(echo "$DBGLVL" | tr -d [:digit:])
+ NONDIGITS=$(echo "$DBGLVL" | tr -d "[:digit:]")
if [ "$NONDIGITS" ]; then
bbfatal "$USAGE"
fi
diff --git a/meta/classes/manpages.bbclass b/meta/classes/manpages.bbclass
new file mode 100644
index 0000000000..50c254763e
--- /dev/null
+++ b/meta/classes/manpages.bbclass
@@ -0,0 +1,37 @@
+# Inherit this class to enable or disable building and installation of manpages
+# depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building
+# tends to pull in the entire XML stack and other tools, so it's not enabled
+# by default.
+PACKAGECONFIG_append_class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
+
+inherit qemu
+
+# usually manual files are packaged to ${PN}-doc except man-pages
+MAN_PKG ?= "${PN}-doc"
+
+# only add man-db to RDEPENDS when manual files are built and installed
+RDEPENDS_${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
+
+pkg_postinst_append_${MAN_PKG} () {
+ # only update manual page index caches when manual files are built and installed
+ if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
+ if test -n "$D"; then
+ if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true','false', d)}; then
+ sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
+ mkdir -p $D${localstatedir}/cache/man
+ mv $D${mandir}/index.db $D${localstatedir}/cache/man
+ else
+ $INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX}
+ fi
+ else
+ mandb -q
+ fi
+ fi
+}
+
+pkg_postrm_append_${MAN_PKG} () {
+ # only update manual page index caches when manual files are built and installed
+ if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
+ mandb -q
+ fi
+}
diff --git a/meta/classes/mcextend.bbclass b/meta/classes/mcextend.bbclass
new file mode 100644
index 0000000000..0f8f962298
--- /dev/null
+++ b/meta/classes/mcextend.bbclass
@@ -0,0 +1,16 @@
+python mcextend_virtclass_handler () {
+ cls = e.data.getVar("BBEXTENDCURR")
+ variant = e.data.getVar("BBEXTENDVARIANT")
+ if cls != "mcextend" or not variant:
+ return
+
+ override = ":virtclass-mcextend-" + variant
+
+ e.data.setVar("PN", e.data.getVar("PN", False) + "-" + variant)
+ e.data.setVar("MCNAME", variant)
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
+}
+
+addhandler mcextend_virtclass_handler
+mcextend_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
+
diff --git a/meta/classes/meson.bbclass b/meta/classes/meson.bbclass
new file mode 100644
index 0000000000..e1a13bbbf7
--- /dev/null
+++ b/meta/classes/meson.bbclass
@@ -0,0 +1,161 @@
+inherit siteinfo python3native
+
+DEPENDS_append = " meson-native ninja-native"
+
+# As Meson enforces out-of-tree builds we can just use cleandirs
+B = "${WORKDIR}/build"
+do_configure[cleandirs] = "${B}"
+
+# Where the meson.build build configuration is
+MESON_SOURCEPATH = "${S}"
+
+def noprefix(var, d):
+ return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
+
+MESON_BUILDTYPE ?= "plain"
+MESONOPTS = " --prefix ${prefix} \
+ --buildtype ${MESON_BUILDTYPE} \
+ --bindir ${@noprefix('bindir', d)} \
+ --sbindir ${@noprefix('sbindir', d)} \
+ --datadir ${@noprefix('datadir', d)} \
+ --libdir ${@noprefix('libdir', d)} \
+ --libexecdir ${@noprefix('libexecdir', d)} \
+ --includedir ${@noprefix('includedir', d)} \
+ --mandir ${@noprefix('mandir', d)} \
+ --infodir ${@noprefix('infodir', d)} \
+ --sysconfdir ${sysconfdir} \
+ --localstatedir ${localstatedir} \
+ --sharedstatedir ${sharedstatedir} \
+ --wrap-mode nodownload"
+
+EXTRA_OEMESON_append = " ${PACKAGECONFIG_CONFARGS}"
+
+MESON_CROSS_FILE = ""
+MESON_CROSS_FILE_class-target = "--cross-file ${WORKDIR}/meson.cross"
+MESON_CROSS_FILE_class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
+
+def meson_array(var, d):
+ items = d.getVar(var).split()
+ return repr(items[0] if len(items) == 1 else items)
+
+# Map our ARCH values to what Meson expects:
+# http://mesonbuild.com/Reference-tables.html#cpu-families
+def meson_cpu_family(var, d):
+ import re
+ arch = d.getVar(var)
+ if arch == 'powerpc':
+ return 'ppc'
+ elif arch == 'powerpc64':
+ return 'ppc64'
+ elif arch == 'armeb':
+ return 'arm'
+ elif arch == 'aarch64_be':
+ return 'aarch64'
+ elif arch == 'mipsel':
+ return 'mips'
+ elif arch == 'mips64el':
+ return 'mips64'
+ elif re.match(r"i[3-6]86", arch):
+ return "x86"
+ elif arch == "microblazeel" or arch == "microblazeeb":
+ return "microblaze"
+ else:
+ return arch
+
+def meson_endian(prefix, d):
+ arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
+ sitedata = siteinfo_data_for_machine(arch, os, d)
+ if "endian-little" in sitedata:
+ return "little"
+ elif "endian-big" in sitedata:
+ return "big"
+ else:
+ bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
+
+addtask write_config before do_configure
+do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS"
+do_write_config() {
+ # This needs to be Py to split the args into single-element lists
+ cat >${WORKDIR}/meson.cross <<EOF
+[binaries]
+c = ${@meson_array('CC', d)}
+cpp = ${@meson_array('CXX', d)}
+ar = ${@meson_array('AR', d)}
+nm = ${@meson_array('NM', d)}
+ld = ${@meson_array('LD', d)}
+strip = ${@meson_array('STRIP', d)}
+readelf = ${@meson_array('READELF', d)}
+pkgconfig = 'pkg-config'
+llvm-config = 'llvm-config${LLVMVERSION}'
+
+[properties]
+needs_exe_wrapper = true
+c_args = ${@meson_array('CFLAGS', d)}
+c_link_args = ${@meson_array('LDFLAGS', d)}
+cpp_args = ${@meson_array('CXXFLAGS', d)}
+cpp_link_args = ${@meson_array('LDFLAGS', d)}
+gtkdoc_exe_wrapper = '${B}/gtkdoc-qemuwrapper'
+
+[host_machine]
+system = '${HOST_OS}'
+cpu_family = '${@meson_cpu_family('HOST_ARCH', d)}'
+cpu = '${HOST_ARCH}'
+endian = '${@meson_endian('HOST', d)}'
+
+[target_machine]
+system = '${TARGET_OS}'
+cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}'
+cpu = '${TARGET_ARCH}'
+endian = '${@meson_endian('TARGET', d)}'
+EOF
+}
+
+CONFIGURE_FILES = "meson.build"
+
+meson_do_configure() {
+ # Work around "Meson fails if /tmp is mounted with noexec #2972"
+ mkdir -p "${B}/meson-private/tmp"
+ export TMPDIR="${B}/meson-private/tmp"
+ bbnote Executing meson ${EXTRA_OEMESON}...
+ if ! meson ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then
+ bbfatal_log meson failed
+ fi
+}
+
+override_native_tools() {
+ # Set these so that meson uses the native tools for its build sanity tests,
+ # which require executables to be runnable. The cross file will still
+ # override these for the target build.
+ export CC="${BUILD_CC}"
+ export CXX="${BUILD_CXX}"
+ export LD="${BUILD_LD}"
+ export AR="${BUILD_AR}"
+ export STRIP="${BUILD_STRIP}"
+ # These contain *target* flags but will be used as *native* flags. The
+ # correct native flags will be passed via -Dc_args and so on, unset them so
+ # they don't interfere with tools invoked by Meson (such as g-ir-scanner)
+ unset CPPFLAGS CFLAGS CXXFLAGS LDFLAGS
+}
+
+meson_do_configure_prepend_class-target() {
+ override_native_tools
+}
+
+meson_do_configure_prepend_class-nativesdk() {
+ override_native_tools
+}
+
+meson_do_configure_prepend_class-native() {
+ export PKG_CONFIG="pkg-config-native"
+}
+
+do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
+meson_do_compile() {
+ ninja -v ${PARALLEL_MAKE}
+}
+
+meson_do_install() {
+ DESTDIR='${D}' ninja -v ${PARALLEL_MAKEINST} install
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
index 2e6fac209a..58bb4c555a 100644
--- a/meta/classes/metadata_scm.bbclass
+++ b/meta/classes/metadata_scm.bbclass
@@ -3,54 +3,14 @@ METADATA_REVISION ?= "${@base_detect_revision(d)}"
def base_detect_revision(d):
path = base_get_scmbasepath(d)
-
- scms = [base_get_metadata_git_revision]
-
- for scm in scms:
- rev = scm(path, d)
- if rev != "<unknown>":
- return rev
-
- return "<unknown>"
+ return base_get_metadata_git_revision(path, d)
def base_detect_branch(d):
path = base_get_scmbasepath(d)
-
- scms = [base_get_metadata_git_branch]
-
- for scm in scms:
- rev = scm(path, d)
- if rev != "<unknown>":
- return rev.strip()
-
- return "<unknown>"
+ return base_get_metadata_git_branch(path, d)
def base_get_scmbasepath(d):
- return os.path.join(d.getVar('COREBASE', True), 'meta')
-
-def base_get_metadata_monotone_branch(path, d):
- monotone_branch = "<unknown>"
- try:
- with open("%s/_MTN/options" % path) as f:
- monotone_branch = f.read().strip()
- if monotone_branch.startswith( "database" ):
- monotone_branch_words = monotone_branch.split()
- monotone_branch = monotone_branch_words[ monotone_branch_words.index( "branch" )+1][1:-1]
- except:
- pass
- return monotone_branch
-
-def base_get_metadata_monotone_revision(path, d):
- monotone_revision = "<unknown>"
- try:
- with open("%s/_MTN/revision" % path) as f:
- monotone_revision = f.read().strip()
- if monotone_revision.startswith( "format_version" ):
- monotone_revision_words = monotone_revision.split()
- monotone_revision = monotone_revision_words[ monotone_revision_words.index( "old_revision" )+1][1:-1]
- except IOError:
- pass
- return monotone_revision
+ return os.path.join(d.getVar('COREBASE'), 'meta')
def base_get_metadata_svn_revision(path, d):
# This only works with older subversion. For newer versions
diff --git a/meta/classes/migrate_localcount.bbclass b/meta/classes/migrate_localcount.bbclass
index aa0df8bb76..810a541316 100644
--- a/meta/classes/migrate_localcount.bbclass
+++ b/meta/classes/migrate_localcount.bbclass
@@ -6,12 +6,12 @@ python migrate_localcount_handler () {
if not e.data:
return
- pv = e.data.getVar('PV', True)
+ pv = e.data.getVar('PV')
if not 'AUTOINC' in pv:
return
localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', e.data)
- pn = e.data.getVar('PN', True)
+ pn = e.data.getVar('PN')
revs = localcounts.get_by_pattern('%%-%s_rev' % pn)
counts = localcounts.get_by_pattern('%%-%s_count' % pn)
if not revs or not counts:
@@ -21,10 +21,10 @@ python migrate_localcount_handler () {
bb.warn("The number of revs and localcounts don't match in %s" % pn)
return
- version = e.data.getVar('PRAUTOINX', True)
+ version = e.data.getVar('PRAUTOINX')
srcrev = bb.fetch2.get_srcrev(e.data)
base_ver = 'AUTOINC-%s' % version[:version.find(srcrev)]
- pkgarch = e.data.getVar('PACKAGE_ARCH', True)
+ pkgarch = e.data.getVar('PACKAGE_ARCH')
value = max(int(count) for count in counts)
if len(revs) == 1:
@@ -33,8 +33,8 @@ python migrate_localcount_handler () {
else:
value += 1
- bb.utils.mkdirhier(e.data.getVar('PRSERV_DUMPDIR', True))
- df = e.data.getVar('LOCALCOUNT_DUMPFILE', True)
+ bb.utils.mkdirhier(e.data.getVar('PRSERV_DUMPDIR'))
+ df = e.data.getVar('LOCALCOUNT_DUMPFILE')
flock = bb.utils.lockfile("%s.lock" % df)
with open(df, 'a') as fd:
fd.write('PRAUTO$%s$%s$%s = "%s"\n' %
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
index 721c73fcff..6c7b868f79 100644
--- a/meta/classes/mime.bbclass
+++ b/meta/classes/mime.bbclass
@@ -1,4 +1,5 @@
-DEPENDS += "shared-mime-info-native shared-mime-info"
+DEPENDS += "shared-mime-info"
+PACKAGE_WRITE_DEPS += "shared-mime-info-native"
mime_postinst() {
if [ "$1" = configure ]; then
@@ -28,8 +29,8 @@ fi
python populate_packages_append () {
import re
- packages = d.getVar('PACKAGES', True).split()
- pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES').split()
+ pkgdest = d.getVar('PKGDEST')
for pkg in packages:
mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg)
@@ -41,16 +42,16 @@ python populate_packages_append () {
mimes.append(f)
if mimes:
bb.note("adding mime postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('mime_postinst', True)
+ postinst += d.getVar('mime_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('mime_postrm', True)
+ postrm += d.getVar('mime_postrm')
d.setVar('pkg_postrm_%s' % pkg, postrm)
bb.note("adding shared-mime-info-data dependency to %s" % pkg)
- d.appendVar('RDEPENDS_' + pkg, " shared-mime-info-data")
+ d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
index 11847085ba..87bba41472 100644
--- a/meta/classes/mirrors.bbclass
+++ b/meta/classes/mirrors.bbclass
@@ -1,25 +1,26 @@
MIRRORS += "\
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \n \
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool \n \
-${GNU_MIRROR} ftp://mirrors.kernel.org/gnu \n \
+${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \n \
+${GNU_MIRROR} https://mirrors.kernel.org/gnu \n \
${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \n \
${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \n \
@@ -27,24 +28,18 @@ ${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \n \
ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
-ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR} \n \
+ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \
http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tau.ac.il/pub/unix/admin/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cert.dfn.de/pub/tools/admin/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.fu-berlin.de/pub/unix/tools/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.kaizo.org/pub/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tu-darmstadt.de/pub/sysadmin/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tux.org/pub/sites/vic.cc.purdue.edu/tools/unix/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://sunsite.ualberta.ca/pub/Mirror/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://the.wiretapped.net/pub/security/host-security/lsof/ \n \
+http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \
${APACHE_MIRROR} http://www.us.apache.org/dist \n \
${APACHE_MIRROR} http://archive.apache.org/dist \n \
http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \
${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \
+ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \n \
+ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \n \
+ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \n \
cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
@@ -54,7 +49,7 @@ p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-npm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \n \
cvs://.*/.* http://sources.openembedded.org/ \n \
svn://.*/.* http://sources.openembedded.org/ \n \
git://.*/.* http://sources.openembedded.org/ \n \
@@ -64,7 +59,18 @@ p4://.*/.* http://sources.openembedded.org/ \n \
osc://.*/.* http://sources.openembedded.org/ \n \
https?$://.*/.* http://sources.openembedded.org/ \n \
ftp://.*/.* http://sources.openembedded.org/ \n \
-npm://.*/.* http://sources.openembedded.org/ \n \
+npm://.*/?.* http://sources.openembedded.org/ \n \
${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
"
+
+# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
+# where git native protocol fetches may fail due to local firewall rules, etc.
+
+MIRRORS += "\
+git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \n \
+git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \n \
+git://git.savannah.gnu.org/.* git://git.savannah.gnu.org/git/PATH;protocol=https \n \
+git://git.yoctoproject.org/.* git://git.yoctoproject.org/git/PATH;protocol=https \n \
+git://.*/.* git://HOST/PATH;protocol=https \n \
+"
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
index 6fe77c01b7..27bd69ff33 100644
--- a/meta/classes/module-base.bbclass
+++ b/meta/classes/module-base.bbclass
@@ -1,7 +1,8 @@
inherit kernel-arch
-# This is instead of DEPENDS = "virtual/kernel"
-do_configure[depends] += "virtual/kernel:do_compile_kernelmodules"
+# We do the dependency this way because the output is not preserved
+# in sstate, so we must force do_compile to run (once).
+do_configure[depends] += "make-mod-scripts:do_compile"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
@@ -12,16 +13,9 @@ export CROSS_COMPILE = "${TARGET_PREFIX}"
# we didn't pick the name.
export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
-export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
+export KERNEL_VERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
KERNEL_OBJECT_SUFFIX = ".ko"
# kernel modules are generally machine specific
PACKAGE_ARCH = "${MACHINE_ARCH}"
-# Function to ensure the kernel scripts are created. Expected to
-# be called before do_compile. See module.bbclass for an example.
-do_make_scripts() {
- unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- make CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
- -C ${STAGING_KERNEL_DIR} O=${STAGING_KERNEL_BUILDDIR} scripts
-}
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
index 68e3d341a3..c0dfa35061 100644
--- a/meta/classes/module.bbclass
+++ b/meta/classes/module.bbclass
@@ -1,15 +1,12 @@
-inherit module-base kernel-module-split
-
-addtask make_scripts after do_patch before do_compile
-do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
-do_make_scripts[depends] += "virtual/kernel:do_shared_workdir"
+inherit module-base kernel-module-split pkgconfig
EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
MODULES_INSTALL_TARGET ?= "modules_install"
+MODULES_MODULE_SYMVERS_LOCATION ?= ""
python __anonymous () {
- depends = d.getVar('DEPENDS', True)
+ depends = d.getVar('DEPENDS')
extra_symbols = []
for dep in depends.split():
if dep.startswith("kernel-module-"):
@@ -17,6 +14,26 @@ python __anonymous () {
d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
}
+python do_devshell_prepend () {
+ os.environ['CFLAGS'] = ''
+ os.environ['CPPFLAGS'] = ''
+ os.environ['CXXFLAGS'] = ''
+ os.environ['LDFLAGS'] = ''
+
+ os.environ['KERNEL_PATH'] = d.getVar('STAGING_KERNEL_DIR')
+ os.environ['KERNEL_SRC'] = d.getVar('STAGING_KERNEL_DIR')
+ os.environ['KERNEL_VERSION'] = d.getVar('KERNEL_VERSION')
+ os.environ['CC'] = d.getVar('KERNEL_CC')
+ os.environ['LD'] = d.getVar('KERNEL_LD')
+ os.environ['AR'] = d.getVar('KERNEL_AR')
+ os.environ['O'] = d.getVar('STAGING_KERNEL_BUILDDIR')
+ kbuild_extra_symbols = d.getVar('KBUILD_EXTRA_SYMBOLS')
+ if kbuild_extra_symbols:
+ os.environ['KBUILD_EXTRA_SYMBOLS'] = kbuild_extra_symbols
+ else:
+ os.environ['KBUILD_EXTRA_SYMBOLS'] = ''
+}
+
module_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
@@ -30,15 +47,23 @@ module_do_compile() {
module_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
+ oe_runmake DEPMOD=echo MODLIB="${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}" \
+ INSTALL_FW_PATH="${D}${nonarch_base_libdir}/firmware" \
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
O=${STAGING_KERNEL_BUILDDIR} \
${MODULES_INSTALL_TARGET}
- install -d -m0755 ${D}${includedir}/${BPN}
- cp -a --no-preserve=ownership ${B}/Module.symvers ${D}${includedir}/${BPN}
- # it doesn't actually seem to matter which path is specified here
- sed -e 's:${B}/::g' -i ${D}${includedir}/${BPN}/Module.symvers
+ if [ ! -e "${B}/${MODULES_MODULE_SYMVERS_LOCATION}/Module.symvers" ] ; then
+ bbwarn "Module.symvers not found in ${B}/${MODULES_MODULE_SYMVERS_LOCATION}"
+ bbwarn "Please consider setting MODULES_MODULE_SYMVERS_LOCATION to a"
+ bbwarn "directory below B to get correct inter-module dependencies"
+ else
+ install -Dm0644 "${B}/${MODULES_MODULE_SYMVERS_LOCATION}"/Module.symvers ${D}${includedir}/${BPN}/Module.symvers
+ # Module.symvers contains absolute path to the build directory.
+ # While it doesn't actually seem to matter which path is specified,
+ # clear them out to avoid confusion
+ sed -e 's:${B}/::g' -i ${D}${includedir}/${BPN}/Module.symvers
+ fi
}
EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index d5a31287a8..ee677da1e2 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -1,20 +1,26 @@
python multilib_virtclass_handler () {
- cls = e.data.getVar("BBEXTENDCURR", True)
- variant = e.data.getVar("BBEXTENDVARIANT", True)
+ cls = e.data.getVar("BBEXTENDCURR")
+ variant = e.data.getVar("BBEXTENDVARIANT")
if cls != "multilib" or not variant:
return
- e.data.setVar('STAGING_KERNEL_DIR', e.data.getVar('STAGING_KERNEL_DIR', True))
+ localdata = bb.data.createCopy(e.data)
+ localdata.delVar('TMPDIR')
+ e.data.setVar('STAGING_KERNEL_DIR', localdata.getVar('STAGING_KERNEL_DIR'))
# There should only be one kernel in multilib configs
# We also skip multilib setup for module packages.
- provides = (e.data.getVar("PROVIDES", True) or "").split()
- if "virtual/kernel" in provides or bb.data.inherits_class('module-base', e.data):
- raise bb.parse.SkipPackage("We shouldn't have multilib variants for the kernel")
-
- save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME", True) or ""
+ provides = (e.data.getVar("PROVIDES") or "").split()
+ non_ml_recipes = d.getVar('NON_MULTILIB_RECIPES').split()
+ bpn = e.data.getVar("BPN")
+ if "virtual/kernel" in provides or \
+ bb.data.inherits_class('module-base', e.data) or \
+ bpn in non_ml_recipes:
+ raise bb.parse.SkipRecipe("We shouldn't have multilib variants for %s" % bpn)
+
+ save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME") or ""
for name in save_var_name.split():
- val=e.data.getVar(name, True)
+ val=e.data.getVar(name)
if val:
e.data.setVar(name + "_MULTILIB_ORIGINAL", val)
@@ -26,59 +32,66 @@ python multilib_virtclass_handler () {
if bb.data.inherits_class('image', e.data):
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
- e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT', True))
+ e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT'))
+ override = ":virtclass-multilib-" + variant
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
if target_vendor:
e.data.setVar("TARGET_VENDOR", target_vendor)
return
if bb.data.inherits_class('cross-canadian', e.data):
+ # Multilib cross-candian should use the same nativesdk sysroot without MLPREFIX
+ e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
+ e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
+ e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
e.data.setVar("MLPREFIX", variant + "-")
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
- bb.data.update_data(e.data)
return
if bb.data.inherits_class('native', e.data):
- raise bb.parse.SkipPackage("We can't extend native recipes")
+ raise bb.parse.SkipRecipe("We can't extend native recipes")
if bb.data.inherits_class('nativesdk', e.data) or bb.data.inherits_class('crosssdk', e.data):
- raise bb.parse.SkipPackage("We can't extend nativesdk recipes")
-
- if bb.data.inherits_class('allarch', e.data) and not bb.data.inherits_class('packagegroup', e.data):
- raise bb.parse.SkipPackage("Don't extend allarch recipes which are not packagegroups")
+ raise bb.parse.SkipRecipe("We can't extend nativesdk recipes")
+ if bb.data.inherits_class('allarch', e.data) and not d.getVar('MULTILIB_VARIANTS') \
+ and not bb.data.inherits_class('packagegroup', e.data):
+ raise bb.parse.SkipRecipe("Don't extend allarch recipes which are not packagegroups")
# Expand this since this won't work correctly once we set a multilib into place
- e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
+ e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
override = ":virtclass-multilib-" + variant
+ blacklist = e.data.getVarFlag('PNBLACKLIST', e.data.getVar('PN'))
+ if blacklist:
+ pn_new = variant + "-" + e.data.getVar('PN')
+ if not e.data.getVarFlag('PNBLACKLIST', pn_new):
+ e.data.setVarFlag('PNBLACKLIST', pn_new, blacklist)
+
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
- # Expand the WHITELISTs with multilib prefix
- for whitelist in ["WHITELIST_GPL-3.0", "LGPLv2_WHITELIST_GPL-3.0"]:
- pkgs = e.data.getVar(whitelist, True)
- for pkg in pkgs.split():
- pkgs += " " + variant + "-" + pkg
- e.data.setVar(whitelist, pkgs)
+ # Expand WHITELIST_GPL-3.0 with multilib prefix
+ pkgs = e.data.getVar("WHITELIST_GPL-3.0")
+ for pkg in pkgs.split():
+ pkgs += " " + variant + "-" + pkg
+ e.data.setVar("WHITELIST_GPL-3.0", pkgs)
# DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
if newtune:
e.data.setVar("DEFAULTTUNE", newtune)
- e.data.setVar('DEFAULTTUNE_ML_%s' % variant, newtune)
}
addhandler multilib_virtclass_handler
multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
-STAGINGCC_prepend = "${BBEXTENDVARIANT}-"
-
python __anonymous () {
- variant = d.getVar("BBEXTENDVARIANT", True)
+ variant = d.getVar("BBEXTENDVARIANT")
import oe.classextend
@@ -88,13 +101,13 @@ python __anonymous () {
clsextend.map_depends_variable("PACKAGE_INSTALL")
clsextend.map_depends_variable("LINGUAS_INSTALL")
clsextend.map_depends_variable("RDEPENDS")
- pinstall = d.getVar("LINGUAS_INSTALL", True) + " " + d.getVar("PACKAGE_INSTALL", True)
+ pinstall = d.getVar("LINGUAS_INSTALL") + " " + d.getVar("PACKAGE_INSTALL")
d.setVar("PACKAGE_INSTALL", pinstall)
d.setVar("LINGUAS_INSTALL", "")
# FIXME, we need to map this to something, not delete it!
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
-
- if bb.data.inherits_class('image', d):
+ bb.build.deltask('do_populate_sdk', d)
+ bb.build.deltask('do_populate_sdk_ext', d)
return
clsextend.map_depends_variable("DEPENDS")
@@ -104,40 +117,93 @@ python __anonymous () {
return
clsextend.rename_packages()
- clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
+ clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
clsextend.map_packagevars()
clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
- clsextend.map_variable("PACKAGE_INSTALL")
clsextend.map_variable("INITSCRIPT_PACKAGES")
clsextend.map_variable("USERADD_PACKAGES")
clsextend.map_variable("SYSTEMD_PACKAGES")
+ clsextend.map_variable("UPDATERCPN")
+
+ reset_alternative_priority(d)
}
+def reset_alternative_priority(d):
+ if not bb.data.inherits_class('update-alternatives', d):
+ return
+
+ # There might be multiple multilibs at the same time, e.g., lib32 and
+ # lib64, each of them should have a different priority.
+ multilib_variants = d.getVar('MULTILIB_VARIANTS')
+ bbextendvariant = d.getVar('BBEXTENDVARIANT')
+ reset_gap = multilib_variants.split().index(bbextendvariant) + 1
+
+ # ALTERNATIVE_PRIORITY = priority
+ alt_priority_recipe = d.getVar('ALTERNATIVE_PRIORITY')
+ # Reset ALTERNATIVE_PRIORITY when found
+ if alt_priority_recipe:
+ reset_priority = int(alt_priority_recipe) - reset_gap
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY to %s' % (d.getVar('PN'), reset_priority))
+ d.setVar('ALTERNATIVE_PRIORITY', reset_priority)
+
+ handled_pkgs = []
+ for pkg in (d.getVar('PACKAGES') or "").split():
+ # ALTERNATIVE_PRIORITY_pkg = priority
+ alt_priority_pkg = d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg)
+ # Reset ALTERNATIVE_PRIORITY_pkg when found
+ if alt_priority_pkg:
+ reset_priority = int(alt_priority_pkg) - reset_gap
+ if not pkg in handled_pkgs:
+ handled_pkgs.append(pkg)
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s to %s' % (pkg, pkg, reset_priority))
+ d.setVar('ALTERNATIVE_PRIORITY_%s' % pkg, reset_priority)
+
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ # ALTERNATIVE_PRIORITY_pkg[tool] = priority
+ alt_priority_pkg_name = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name)
+ # ALTERNATIVE_PRIORITY[tool] = priority
+ alt_priority_name = d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name)
+
+ if alt_priority_pkg_name:
+ reset_priority = int(alt_priority_pkg_name) - reset_gap
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s[%s] to %s' % (pkg, pkg, alt_name, reset_priority))
+ d.setVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, reset_priority)
+ elif alt_priority_name:
+ reset_priority = int(alt_priority_name) - reset_gap
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY[%s] to %s' % (pkg, alt_name, reset_priority))
+ d.setVarFlag('ALTERNATIVE_PRIORITY', alt_name, reset_priority)
+
PACKAGEFUNCS_append = " do_package_qa_multilib"
python do_package_qa_multilib() {
def check_mlprefix(pkg, var, mlprefix):
- values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg), True) or d.getVar(var, True) or "")
+ values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg)) or d.getVar(var) or "")
candidates = []
for i in values:
if i.startswith('virtual/'):
i = i[len('virtual/'):]
- if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)) and \
- (not 'cross-canadian' in i) and (not i.startswith("nativesdk-")) and \
- (not i.startswith("rtld")) and (not i.startswith('kernel-vmlinux')):
+
+ if (not (i.startswith(mlprefix) or i.startswith("kernel-") \
+ or ('cross-canadian' in i) or i.startswith("nativesdk-") \
+ or i.startswith("rtld") or i.startswith("/"))):
candidates.append(i)
+
if len(candidates) > 0:
msg = "%s package %s - suspicious values '%s' in %s" \
- % (d.getVar('PN', True), pkg, ' '.join(candidates), var)
+ % (d.getVar('PN'), pkg, ' '.join(candidates), var)
package_qa_handle_error("multilib", msg, d)
- ml = d.getVar('MLPREFIX', True)
+ ml = d.getVar('MLPREFIX')
if not ml:
return
- packages = d.getVar('PACKAGES', True)
+ # exception for ${MLPREFIX}target-sdk-provides-dummy
+ if 'target-sdk-provides-dummy' in d.getVar('PN'):
+ return
+
+ packages = d.getVar('PACKAGES')
for pkg in packages.split():
check_mlprefix(pkg, 'RDEPENDS', ml)
check_mlprefix(pkg, 'RPROVIDES', ml)
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
index 11ae2681f0..98f65c8aae 100644
--- a/meta/classes/multilib_global.bbclass
+++ b/meta/classes/multilib_global.bbclass
@@ -1,7 +1,7 @@
def preferred_ml_updates(d):
# If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
# we need to mirror these variables in the multilib case;
- multilibs = d.getVar('MULTILIBS', True) or ""
+ multilibs = d.getVar('MULTILIBS') or ""
if not multilibs:
return
@@ -13,11 +13,14 @@ def preferred_ml_updates(d):
versions = []
providers = []
+ rproviders = []
for v in d.keys():
if v.startswith("PREFERRED_VERSION_"):
versions.append(v)
if v.startswith("PREFERRED_PROVIDER_"):
providers.append(v)
+ if v.startswith("PREFERRED_RPROVIDER_"):
+ rproviders.append(v)
for v in versions:
val = d.getVar(v, False)
@@ -29,7 +32,6 @@ def preferred_ml_updates(d):
localdata = bb.data.createCopy(d)
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- bb.data.update_data(localdata)
if "-canadian-" in pkg:
newname = localdata.expand(v)
else:
@@ -57,7 +59,6 @@ def preferred_ml_updates(d):
localdata = bb.data.createCopy(d)
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- bb.data.update_data(localdata)
newname = localdata.expand(prov)
if newname != prov:
newval = localdata.expand(val)
@@ -80,7 +81,6 @@ def preferred_ml_updates(d):
localdata = bb.data.createCopy(d)
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- bb.data.update_data(localdata)
newname = localdata.expand(prov)
if newname != prov and not d.getVar(newname, False):
d.setVar(newname, localdata.expand(newval))
@@ -94,7 +94,33 @@ def preferred_ml_updates(d):
if prov != provexp and d.getVar(prov, False):
d.renameVar(prov, provexp)
+ for prov in rproviders:
+ val = d.getVar(prov, False)
+ pkg = prov.replace("PREFERRED_RPROVIDER_", "")
+ for p in prefixes:
+ newval = p + "-" + val
+
+ # implement variable keys
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ newname = localdata.expand(prov)
+ if newname != prov and not d.getVar(newname, False):
+ d.setVar(newname, localdata.expand(newval))
+
+ # implement alternative multilib name
+ newname = localdata.expand("PREFERRED_RPROVIDER_" + p + "-" + pkg)
+ if not d.getVar(newname, False) and newval != None:
+ d.setVar(newname, localdata.expand(newval))
+ # Avoid future variable key expansion
+ provexp = d.expand(prov)
+ if prov != provexp and d.getVar(prov, False):
+ d.renameVar(prov, provexp)
+
def translate_provide(prefix, prov):
+ # Really need to know if kernel modules class is inherited somehow
+ if prov == "lttng-modules":
+ return prov
if not prov.startswith("virtual/"):
return prefix + "-" + prov
if prov == "virtual/kernel":
@@ -102,7 +128,7 @@ def preferred_ml_updates(d):
prov = prov.replace("virtual/", "")
return "virtual/" + prefix + "-" + prov
- mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
+ mp = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
extramp = []
for p in mp:
if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
@@ -111,14 +137,14 @@ def preferred_ml_updates(d):
extramp.append(translate_provide(pref, p))
d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
- abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
+ abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
extras = []
for p in prefixes:
for a in abisafe:
extras.append(p + "-" + a)
d.appendVar("SIGGEN_EXCLUDERECIPES_ABISAFE", " " + " ".join(extras))
- siggen_exclude = (d.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+ siggen_exclude = (d.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
extras = []
for p in prefixes:
for a in siggen_exclude:
@@ -128,7 +154,7 @@ def preferred_ml_updates(d):
python multilib_virtclass_handler_vendor () {
if isinstance(e, bb.event.ConfigParsed):
- for v in e.data.getVar("MULTILIB_VARIANTS", True).split():
+ for v in e.data.getVar("MULTILIB_VARIANTS").split():
if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
preferred_ml_updates(e.data)
@@ -137,39 +163,44 @@ addhandler multilib_virtclass_handler_vendor
multilib_virtclass_handler_vendor[eventmask] = "bb.event.ConfigParsed"
python multilib_virtclass_handler_global () {
- if not e.data:
+ variant = e.data.getVar("BBEXTENDVARIANT")
+ if variant:
return
- variant = e.data.getVar("BBEXTENDVARIANT", True)
+ non_ml_recipes = d.getVar('NON_MULTILIB_RECIPES').split()
- if isinstance(e, bb.event.RecipeParsed) and not variant:
- if bb.data.inherits_class('kernel', e.data) or \
+ if bb.data.inherits_class('kernel', e.data) or \
bb.data.inherits_class('module-base', e.data) or \
- (bb.data.inherits_class('allarch', e.data) and\
- not bb.data.inherits_class('packagegroup', e.data)):
- variants = (e.data.getVar("MULTILIB_VARIANTS", True) or "").split()
+ d.getVar('BPN') in non_ml_recipes:
+
+ # We need to avoid expanding KERNEL_VERSION which we can do by deleting it
+ # from a copy of the datastore
+ localdata = bb.data.createCopy(d)
+ localdata.delVar("KERNEL_VERSION")
+
+ variants = (e.data.getVar("MULTILIB_VARIANTS") or "").split()
import oe.classextend
clsextends = []
for variant in variants:
- clsextends.append(oe.classextend.ClassExtender(variant, e.data))
+ clsextends.append(oe.classextend.ClassExtender(variant, localdata))
# Process PROVIDES
- origprovs = provs = e.data.getVar("PROVIDES", True) or ""
+ origprovs = provs = localdata.getVar("PROVIDES") or ""
for clsextend in clsextends:
provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
e.data.setVar("PROVIDES", provs)
# Process RPROVIDES
- origrprovs = rprovs = e.data.getVar("RPROVIDES", True) or ""
+ origrprovs = rprovs = localdata.getVar("RPROVIDES") or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
if rprovs.strip():
e.data.setVar("RPROVIDES", rprovs)
- # Process RPROVIDES_${PN}...
- for pkg in (e.data.getVar("PACKAGES", True) or "").split():
- origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg, True) or ""
+ # Process RPROVIDES_${PN}...
+ for pkg in (e.data.getVar("PACKAGES") or "").split():
+ origrprovs = rprovs = localdata.getVar("RPROVIDES_%s" % pkg) or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
rprovs = rprovs + " " + clsextend.extname + "-" + pkg
@@ -177,5 +208,4 @@ python multilib_virtclass_handler_global () {
}
addhandler multilib_virtclass_handler_global
-multilib_virtclass_handler_global[eventmask] = "bb.event.RecipePreFinalise bb.event.RecipeParsed"
-
+multilib_virtclass_handler_global[eventmask] = "bb.event.RecipeTaskPreProcess"
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
index 5ee0a2d562..e03f5b13b2 100644
--- a/meta/classes/multilib_header.bbclass
+++ b/meta/classes/multilib_header.bbclass
@@ -13,13 +13,9 @@ oe_multilib_header() {
;;
*)
esac
- # We use
- # For ARM: We don't support multilib builds.
# For MIPS: "n32" is a special case, which needs to be
# distinct from both 64-bit and 32-bit.
case ${TARGET_ARCH} in
- arm*) return
- ;;
mips*) case "${MIPSPKGSFX_ABI}" in
"-n32")
ident=n32
@@ -31,9 +27,6 @@ oe_multilib_header() {
;;
*) ident=${SITEINFO_BITS}
esac
- if echo ${TARGET_ARCH} | grep -q arm; then
- return
- fi
for each_header in "$@" ; do
if [ ! -f "${D}/${includedir}/$each_header" ]; then
bberror "oe_multilib_header: Unable to find header $each_header."
@@ -52,3 +45,8 @@ oe_multilib_header() {
oe_multilib_header_class-native () {
return
}
+
+# Nor do we need multilib headers for nativesdk builds.
+oe_multilib_header_class-nativesdk () {
+ return
+}
diff --git a/meta/classes/multilib_script.bbclass b/meta/classes/multilib_script.bbclass
new file mode 100644
index 0000000000..b11efc1ec5
--- /dev/null
+++ b/meta/classes/multilib_script.bbclass
@@ -0,0 +1,34 @@
+#
+# Recipe needs to set MULTILIB_SCRIPTS in the form <pkgname>:<scriptname>, e.g.
+# MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/file1 ${PN}:${base_bindir}/file2"
+# to indicate which script files to process from which packages.
+#
+
+inherit update-alternatives
+
+MULTILIB_SUFFIX = "${@d.getVar('base_libdir',1).split('/')[-1]}"
+
+PACKAGE_PREPROCESS_FUNCS += "multilibscript_rename"
+
+multilibscript_rename() {
+ :
+}
+
+python () {
+ # Do nothing if multilib isn't being used
+ if not d.getVar("MULTILIB_VARIANTS"):
+ return
+ # Do nothing for native/cross
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
+ return
+
+ for entry in (d.getVar("MULTILIB_SCRIPTS", False) or "").split():
+ pkg, script = entry.split(":")
+ epkg = d.expand(pkg)
+ scriptname = os.path.basename(script)
+ d.appendVar("ALTERNATIVE_" + epkg, " " + scriptname + " ")
+ d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
+ d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
+ d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
+ d.appendVar("FILES_" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
+}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
index f67ef00142..3a4f572552 100644
--- a/meta/classes/native.bbclass
+++ b/meta/classes/native.bbclass
@@ -44,7 +44,6 @@ CPPFLAGS = "${BUILD_CPPFLAGS}"
CFLAGS = "${BUILD_CFLAGS}"
CXXFLAGS = "${BUILD_CXXFLAGS}"
LDFLAGS = "${BUILD_LDFLAGS}"
-LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE} "
STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
@@ -52,8 +51,6 @@ STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
# native pkg doesn't need the TOOLCHAIN_OPTIONS.
TOOLCHAIN_OPTIONS = ""
-DEPENDS_GETTEXT = "gettext-native"
-
# Don't build ptest natively
PTEST_ENABLED = "0"
@@ -80,6 +77,7 @@ exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
bindir = "${STAGING_BINDIR_NATIVE}"
sbindir = "${STAGING_SBINDIR_NATIVE}"
+base_libdir = "${STAGING_LIBDIR_NATIVE}"
libdir = "${STAGING_LIBDIR_NATIVE}"
includedir = "${STAGING_INCDIR_NATIVE}"
sysconfdir = "${STAGING_ETCDIR_NATIVE}"
@@ -87,16 +85,17 @@ datadir = "${STAGING_DATADIR_NATIVE}"
baselib = "lib"
-# Libtool's default paths are correct for the native machine
-lt_cv_sys_lib_dlsearch_path_spec[unexport] = "1"
+export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
NATIVE_PACKAGE_PATH_SUFFIX ?= ""
bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
+sbindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
+base_libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
-do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_NATIVE}/"
+do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
# Since we actually install these into situ there is no staging prefix
STAGING_DIR_HOST = ""
@@ -109,26 +108,40 @@ PKG_CONFIG_SYSROOT_DIR = ""
PKG_CONFIG_SYSTEM_LIBRARY_PATH[unexport] = "1"
PKG_CONFIG_SYSTEM_INCLUDE_PATH[unexport] = "1"
-# we dont want libc-uclibc or libc-glibc to kick in for native recipes
+# we dont want libc-*libc to kick in for native recipes
LIBCOVERRIDE = ""
CLASSOVERRIDE = "class-native"
MACHINEOVERRIDES = ""
+MACHINE_FEATURES = ""
PATH_prepend = "${COREBASE}/scripts/native-intercept:"
+# This class encodes staging paths into its scripts data so can only be
+# reused if we manipulate the paths.
+SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
+
+# No strip sysroot when DEBUG_BUILD is enabled
+INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
+
python native_virtclass_handler () {
- classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
- if "native" not in classextend:
+ pn = e.data.getVar("PN")
+ if not pn.endswith("-native"):
return
- pn = e.data.getVar("PN", True)
- if not pn.endswith("-native"):
+ # Set features here to prevent appends and distro features backfill
+ # from modifying native distro features
+ features = set(d.getVar("DISTRO_FEATURES_NATIVE").split())
+ filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVE"), d).split())
+ d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
+
+ classextend = e.data.getVar('BBCLASSEXTEND') or ""
+ if "native" not in classextend:
return
def map_dependencies(varname, d, suffix = ""):
if suffix:
varname = varname + "_" + suffix
- deps = d.getVar(varname, True)
+ deps = d.getVar(varname)
if not deps:
return
deps = bb.utils.explode_deps(deps)
@@ -144,17 +157,15 @@ python native_virtclass_handler () {
newdeps.append(dep)
d.setVar(varname, " ".join(newdeps))
- e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-native")
-
map_dependencies("DEPENDS", e.data)
- for pkg in [e.data.getVar("PN", True), "", "${PN}"]:
+ for pkg in [e.data.getVar("PN"), "", "${PN}"]:
map_dependencies("RDEPENDS", e.data, pkg)
map_dependencies("RRECOMMENDS", e.data, pkg)
map_dependencies("RSUGGESTS", e.data, pkg)
map_dependencies("RPROVIDES", e.data, pkg)
map_dependencies("RREPLACES", e.data, pkg)
- provides = e.data.getVar("PROVIDES", True)
+ provides = e.data.getVar("PROVIDES")
nprovides = []
for prov in provides.split():
if prov.find(pn) != -1:
@@ -171,9 +182,18 @@ python native_virtclass_handler () {
addhandler native_virtclass_handler
native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
+python do_addto_recipe_sysroot () {
+ bb.build.exec_func("extend_recipe_sysroot", d)
+}
+addtask addto_recipe_sysroot after do_populate_sysroot
+do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
+
inherit nopackages
do_packagedata[stamp-extra-info] = ""
do_populate_sysroot[stamp-extra-info] = ""
USE_NLS = "no"
+
+RECIPERDEPTASK = "do_populate_sysroot"
+do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}"
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index f74da6267f..03135acedc 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -10,6 +10,13 @@ LIBCOVERRIDE = ":${NATIVESDKLIBC}"
CLASSOVERRIDE = "class-nativesdk"
MACHINEOVERRIDES = ""
+MULTILIBS = ""
+
+# we need consistent staging dir whether or not multilib is enabled
+STAGING_DIR_HOST = "${WORKDIR}/recipe-sysroot"
+STAGING_DIR_TARGET = "${WORKDIR}/recipe-sysroot"
+RECIPE_SYSROOT = "${WORKDIR}/recipe-sysroot"
+
#
# Update PACKAGE_ARCH and PACKAGE_ARCHS
#
@@ -23,9 +30,7 @@ PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
DEPENDS_append = " chrpath-replacement-native"
EXTRANATIVEPATH += "chrpath-native"
-STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
-STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
-PKGDATA_DIR = "${STAGING_DIR_HOST}/pkgdata"
+PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
@@ -43,6 +48,10 @@ TARGET_PREFIX = "${SDK_PREFIX}"
TARGET_CC_ARCH = "${SDK_CC_ARCH}"
TARGET_LD_ARCH = "${SDK_LD_ARCH}"
TARGET_AS_ARCH = "${SDK_AS_ARCH}"
+TARGET_CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
+TARGET_CFLAGS = "${BUILDSDK_CFLAGS}"
+TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
+TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}"
TARGET_FPU = ""
EXTRA_OECONF_GCC_FLOAT = ""
@@ -62,17 +71,22 @@ export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
python nativesdk_virtclass_handler () {
- pn = e.data.getVar("PN", True)
+ pn = e.data.getVar("PN")
if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")):
return
+ # Set features here to prevent appends and distro features backfill
+ # from modifying nativesdk distro features
+ features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
+ filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
+ d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
+
e.data.setVar("MLPREFIX", "nativesdk-")
- e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN", True).replace("-nativesdk", "").replace("nativesdk-", ""))
- e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-nativesdk")
+ e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN").replace("-nativesdk", "").replace("nativesdk-", ""))
}
python () {
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
if not pn.startswith("nativesdk-"):
return
@@ -80,12 +94,14 @@ python () {
clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d)
clsextend.rename_packages()
- clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
+ clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
clsextend.map_depends_variable("DEPENDS")
clsextend.map_packagevars()
clsextend.map_variable("PROVIDES")
clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
+ d.setVar("LIBCEXTENSION", "")
+ d.setVar("ABIEXTENSION", "")
}
addhandler nativesdk_virtclass_handler
@@ -95,3 +111,5 @@ do_populate_sysroot[stamp-extra-info] = ""
do_packagedata[stamp-extra-info] = ""
USE_NLS = "${SDKUSE_NLS}"
+
+OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
diff --git a/meta/classes/npm.bbclass b/meta/classes/npm.bbclass
index fce4c1146f..4b1f0a39f0 100644
--- a/meta/classes/npm.bbclass
+++ b/meta/classes/npm.bbclass
@@ -2,7 +2,15 @@ DEPENDS_prepend = "nodejs-native "
RDEPENDS_${PN}_prepend = "nodejs "
S = "${WORKDIR}/npmpkg"
-NPM_INSTALLDIR = "${D}${libdir}/node_modules/${PN}"
+def node_pkgname(d):
+ bpn = d.getVar('BPN')
+ if bpn.startswith("node-"):
+ return bpn[5:]
+ return bpn
+
+NPMPN ?= "${@node_pkgname(d)}"
+
+NPM_INSTALLDIR = "${libdir}/node_modules/${NPMPN}"
# function maps arch names to npm arch names
def npm_oe_arch_map(target_arch, d):
@@ -13,7 +21,8 @@ def npm_oe_arch_map(target_arch, d):
elif re.match('arm64$', target_arch): return 'arm'
return target_arch
-NPM_ARCH ?= "${@npm_oe_arch_map(d.getVar('TARGET_ARCH', True), d)}"
+NPM_ARCH ?= "${@npm_oe_arch_map(d.getVar('TARGET_ARCH'), d)}"
+NPM_INSTALL_DEV ?= "0"
npm_do_compile() {
# Copy in any additionally fetched modules
@@ -23,27 +32,45 @@ npm_do_compile() {
# changing the home directory to the working directory, the .npmrc will
# be created in this directory
export HOME=${WORKDIR}
- npm config set dev false
+ if [ "${NPM_INSTALL_DEV}" = "1" ]; then
+ npm config set dev true
+ else
+ npm config set dev false
+ fi
npm set cache ${WORKDIR}/npm_cache
# clear cache before every build
- npm cache clear
+ npm cache clear --force
# Install pkg into ${S} without going to the registry
- npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry install
+ if [ "${NPM_INSTALL_DEV}" = "1" ]; then
+ npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --no-registry install
+ else
+ npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry install
+ fi
}
npm_do_install() {
- mkdir -p ${NPM_INSTALLDIR}/
- cp -a ${S}/* ${NPM_INSTALLDIR}/ --no-preserve=ownership
+ # changing the home directory to the working directory, the .npmrc will
+ # be created in this directory
+ export HOME=${WORKDIR}
+ mkdir -p ${D}${libdir}/node_modules
+ local NPM_PACKFILE=$(npm pack .)
+ npm install --prefix ${D}${prefix} -g --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry ${NPM_PACKFILE}
+ ln -fs node_modules ${D}${libdir}/node
+ find ${D}${NPM_INSTALLDIR} -type f \( -name "*.a" -o -name "*.d" -o -name "*.o" \) -delete
+ if [ -d ${D}${prefix}/etc ] ; then
+ # This will be empty
+ rmdir ${D}${prefix}/etc
+ fi
}
python populate_packages_prepend () {
- instdir = d.expand('${D}${libdir}/node_modules/${PN}')
+ instdir = d.expand('${D}${NPM_INSTALLDIR}')
extrapackages = oe.package.npm_split_package_dirs(instdir)
pkgnames = extrapackages.keys()
d.prependVar('PACKAGES', '%s ' % ' '.join(pkgnames))
for pkgname in pkgnames:
pkgrelpath, pdata = extrapackages[pkgname]
- pkgpath = '${libdir}/node_modules/${PN}/' + pkgrelpath
+ pkgpath = '${NPM_INSTALLDIR}/' + pkgrelpath
# package names can't have underscores but npm packages sometimes use them
oe_pkg_name = pkgname.replace('_', '-')
expanded_pkgname = d.expand(oe_pkg_name)
@@ -55,11 +82,13 @@ python populate_packages_prepend () {
description = pdata.get('description', None)
if description:
d.setVar('SUMMARY_%s' % expanded_pkgname, description.replace(u"\u2018", "'").replace(u"\u2019", "'"))
- d.appendVar('RDEPENDS_%s' % d.getVar('PN', True), ' %s' % ' '.join(pkgnames).replace('_', '-'))
+ d.appendVar('RDEPENDS_%s' % d.getVar('PN'), ' %s' % ' '.join(pkgnames).replace('_', '-'))
}
FILES_${PN} += " \
- ${libdir}/node_modules/${PN} \
+ ${bindir} \
+ ${libdir}/node \
+ ${NPM_INSTALLDIR} \
"
EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass
index c4febc2cfa..2589d34059 100644
--- a/meta/classes/oelint.bbclass
+++ b/meta/classes/oelint.bbclass
@@ -1,7 +1,7 @@
addtask lint before do_build
do_lint[nostamp] = "1"
python do_lint() {
- pkgname = d.getVar("PN", True)
+ pkgname = d.getVar("PN")
##############################
# Test that DESCRIPTION exists
@@ -35,7 +35,7 @@ python do_lint() {
# Check that all patches have Signed-off-by and Upstream-Status
#
srcuri = d.getVar("SRC_URI", False).split()
- fpaths = (d.getVar('FILESPATH', True) or '').split(':')
+ fpaths = (d.getVar('FILESPATH') or '').split(':')
def findPatch(patchname):
for dir in fpaths:
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
index 12b42675bc..a777835138 100644
--- a/meta/classes/own-mirrors.bbclass
+++ b/meta/classes/own-mirrors.bbclass
@@ -1,13 +1,13 @@
-PREMIRRORS() {
-cvs://.*/.* ${SOURCE_MIRROR_URL}
-svn://.*/.* ${SOURCE_MIRROR_URL}
-git://.*/.* ${SOURCE_MIRROR_URL}
-gitsm://.*/.* ${SOURCE_MIRROR_URL}
-hg://.*/.* ${SOURCE_MIRROR_URL}
-bzr://.*/.* ${SOURCE_MIRROR_URL}
-p4://.*/.* ${SOURCE_MIRROR_URL}
-osc://.*/.* ${SOURCE_MIRROR_URL}
-https?$://.*/.* ${SOURCE_MIRROR_URL}
-ftp://.*/.* ${SOURCE_MIRROR_URL}
-npm://.*/.* ${SOURCE_MIRROR_URL}
-}
+PREMIRRORS_prepend = " \
+cvs://.*/.* ${SOURCE_MIRROR_URL} \n \
+svn://.*/.* ${SOURCE_MIRROR_URL} \n \
+git://.*/.* ${SOURCE_MIRROR_URL} \n \
+gitsm://.*/.* ${SOURCE_MIRROR_URL} \n \
+hg://.*/.* ${SOURCE_MIRROR_URL} \n \
+bzr://.*/.* ${SOURCE_MIRROR_URL} \n \
+p4://.*/.* ${SOURCE_MIRROR_URL} \n \
+osc://.*/.* ${SOURCE_MIRROR_URL} \n \
+https?$://.*/.* ${SOURCE_MIRROR_URL} \n \
+ftp://.*/.* ${SOURCE_MIRROR_URL} \n \
+npm://.*/?.* ${SOURCE_MIRROR_URL} \n \
+"
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index a9ca14164b..ef3de35961 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -26,7 +26,7 @@
# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
#
# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
-# depenedencies found. Also stores the package name so anyone else using this library
+# dependencies found. Also stores the package name so anyone else using this library
# knows which package to depend on.
#
# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
@@ -40,6 +40,7 @@
inherit packagedata
inherit chrpath
+inherit package_pkgdata
# Need the package_qa_handle_error() in insane.bbclass
inherit insane
@@ -52,7 +53,16 @@ LOCALE_SECTION ?= ''
ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
# rpm is used for the per-file dependency identification
-PACKAGE_DEPENDS += "rpm-native"
+# dwarfsrcfiles is used to determine the list of debug source files
+PACKAGE_DEPENDS += "rpm-native dwarfsrcfiles-native"
+
+
+# If your postinstall can execute at rootfs creation time rather than on
+# target but depends on a native/cross tool in order to execute, you need to
+# list that tool in PACKAGE_WRITE_DEPS. Target package dependencies belong
+# in the package dependencies as normal, this is just for native/cross support
+# tools at rootfs build time.
+PACKAGE_WRITE_DEPS ??= ""
def legitimize_package_name(s):
"""
@@ -66,7 +76,7 @@ def legitimize_package_name(s):
return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape')
# Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
- s = re.sub('<U([0-9A-Fa-f]{1,4})>', fixutf, s)
+ s = re.sub(r'<U([0-9A-Fa-f]{1,4})>', fixutf, s)
# Remaining package name validity fixes
return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
@@ -120,7 +130,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
"""
- dvar = d.getVar('PKGD', True)
+ dvar = d.getVar('PKGD')
root = d.expand(root)
output_pattern = d.expand(output_pattern)
extra_depends = d.expand(extra_depends)
@@ -130,7 +140,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
if not os.path.exists(dvar + root):
return []
- ml = d.getVar("MLPREFIX", True)
+ ml = d.getVar("MLPREFIX")
if ml:
if not output_pattern.startswith(ml):
output_pattern = ml + output_pattern
@@ -145,7 +155,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
extra_depends = " ".join(newdeps)
- packages = d.getVar('PACKAGES', True).split()
+ packages = d.getVar('PACKAGES').split()
split_packages = set()
if postinst:
@@ -163,7 +173,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
objs.append(relpath)
if extra_depends == None:
- extra_depends = d.getVar("PN", True)
+ extra_depends = d.getVar("PN")
if not summary:
summary = description
@@ -189,7 +199,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
packages = [pkg] + packages
else:
packages.append(pkg)
- oldfiles = d.getVar('FILES_' + pkg, True)
+ oldfiles = d.getVar('FILES_' + pkg)
newfile = os.path.join(root, o)
# These names will be passed through glob() so if the filename actually
# contains * or ? (rare, but possible) we need to handle that specially
@@ -214,9 +224,9 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
if extra_depends != '':
d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
- if not d.getVar('DESCRIPTION_' + pkg, True):
+ if not d.getVar('DESCRIPTION_' + pkg):
d.setVar('DESCRIPTION_' + pkg, description % on)
- if not d.getVar('SUMMARY_' + pkg, True):
+ if not d.getVar('SUMMARY_' + pkg):
d.setVar('SUMMARY_' + pkg, summary % on)
if postinst:
d.setVar('pkg_postinst_' + pkg, postinst)
@@ -231,9 +241,9 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
PACKAGE_DEPENDS += "file-native"
python () {
- if d.getVar('PACKAGES', True) != '':
+ if d.getVar('PACKAGES') != '':
deps = ""
- for dep in (d.getVar('PACKAGE_DEPENDS', True) or "").split():
+ for dep in (d.getVar('PACKAGE_DEPENDS') or "").split():
deps += " %s:do_populate_sysroot" % dep
d.appendVarFlag('do_package', 'depends', deps)
@@ -286,14 +296,14 @@ def files_from_filevars(filevars):
# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
def get_conffiles(pkg, d):
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
root = os.path.join(pkgdest, pkg)
cwd = os.getcwd()
os.chdir(root)
- conffiles = d.getVar('CONFFILES_%s' % pkg, True);
+ conffiles = d.getVar('CONFFILES_%s' % pkg);
if conffiles == None:
- conffiles = d.getVar('CONFFILES', True)
+ conffiles = d.getVar('CONFFILES')
if conffiles == None:
conffiles = ""
conffiles = conffiles.split()
@@ -318,7 +328,7 @@ def get_conffiles(pkg, d):
return conf_list
def checkbuildpath(file, d):
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
with open(file) as f:
file_content = f.read()
if tmpdir in file_content:
@@ -326,22 +336,63 @@ def checkbuildpath(file, d):
return False
-def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
+def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
+ debugfiles = {}
+
+ for line in dwarfsrcfiles_output.splitlines():
+ if line.startswith("\t"):
+ debugfiles[os.path.normpath(line.split()[0])] = ""
+
+ return debugfiles.keys()
+
+def source_info(file, d, fatal=True):
+ import subprocess
+
+ cmd = ["dwarfsrcfiles", file]
+ try:
+ output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT)
+ retval = 0
+ except subprocess.CalledProcessError as exc:
+ output = exc.output
+ retval = exc.returncode
+
+ # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure
+ if retval != 0 and retval != 255:
+ msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")
+ if fatal:
+ bb.fatal(msg)
+ bb.note(msg)
+
+ debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
+
+ return list(debugsources)
+
+def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d):
# Function to split a single file into two components, one is the stripped
# target system binary, the other contains any debugging information. The
# two files are linked to reference each other.
#
- # sourcefile is also generated containing a list of debugsources
+ # return a mapping of files:debugsources
import stat
+ import subprocess
+
+ src = file[len(dvar):]
+ dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ debugfile = dvar + dest
+ sources = []
- dvar = d.getVar('PKGD', True)
- objcopy = d.getVar("OBJCOPY", True)
- debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
+ # Split the file...
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+ #bb.note("Split %s -> %s" % (file, debugfile))
+ # Only store off the hard link reference if we successfully split!
+
+ dvar = d.getVar('PKGD')
+ objcopy = d.getVar("OBJCOPY")
# We ignore kernel modules, we don't generate debug info files.
if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
- return 1
+ return (file, sources)
newmode = None
if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
@@ -351,42 +402,42 @@ def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
# We need to extract the debug src information here...
if debugsrcdir:
- cmd = "'%s' -i -l '%s' '%s'" % (debugedit, sourcefile, file)
- (retval, output) = oe.utils.getstatusoutput(cmd)
- if retval:
- bb.fatal("debugedit failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ sources = source_info(file, d)
bb.utils.mkdirhier(os.path.dirname(debugfile))
- cmd = "'%s' --only-keep-debug '%s' '%s'" % (objcopy, file, debugfile)
- (retval, output) = oe.utils.getstatusoutput(cmd)
- if retval:
- bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ subprocess.check_output([objcopy, '--only-keep-debug', file, debugfile], stderr=subprocess.STDOUT)
# Set the debuglink to have the view of the file path on the target
- cmd = "'%s' --add-gnu-debuglink='%s' '%s'" % (objcopy, debugfile, file)
- (retval, output) = oe.utils.getstatusoutput(cmd)
- if retval:
- bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ subprocess.check_output([objcopy, '--add-gnu-debuglink', debugfile, file], stderr=subprocess.STDOUT)
if newmode:
os.chmod(file, origmode)
- return 0
+ return (file, sources)
-def copydebugsources(debugsrcdir, d):
- # The debug src information written out to sourcefile is further procecessed
+def copydebugsources(debugsrcdir, sources, d):
+ # The debug src information written out to sourcefile is further processed
# and copied to the destination here.
import stat
-
- sourcefile = d.expand("${WORKDIR}/debugsources.list")
- if debugsrcdir and os.path.isfile(sourcefile):
- dvar = d.getVar('PKGD', True)
- strip = d.getVar("STRIP", True)
- objcopy = d.getVar("OBJCOPY", True)
- debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
- workdir = d.getVar("WORKDIR", True)
+ import subprocess
+
+ if debugsrcdir and sources:
+ sourcefile = d.expand("${WORKDIR}/debugsources.list")
+ bb.utils.remove(sourcefile)
+
+ # filenames are null-separated - this is an artefact of the previous use
+ # of rpm's debugedit, which was writing them out that way, and the code elsewhere
+ # is still assuming that.
+ debuglistoutput = '\0'.join(sources) + '\0'
+ with open(sourcefile, 'a') as sf:
+ sf.write(debuglistoutput)
+
+ dvar = d.getVar('PKGD')
+ strip = d.getVar("STRIP")
+ objcopy = d.getVar("OBJCOPY")
+ workdir = d.getVar("WORKDIR")
workparentdir = os.path.dirname(os.path.dirname(workdir))
workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
@@ -406,7 +457,8 @@ def copydebugsources(debugsrcdir, d):
bb.utils.mkdirhier(basepath)
cpath.updatecache(basepath)
- processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '(<internal>|<built-in>)$' | "
+ # Ignore files from the recipe sysroots (target and native)
+ processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '((<internal>|<built-in>)$|/.*recipe-sysroot.*/)' | "
# We need to ignore files that are not actually ours
# we do this by only paying attention to items from this package
processdebugsrc += "fgrep -zw '%s' | "
@@ -415,23 +467,21 @@ def copydebugsources(debugsrcdir, d):
processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
cmd = processdebugsrc % (sourcefile, workbasedir, localsrc_prefix, workparentdir, dvar, debugsrcdir)
- (retval, output) = oe.utils.getstatusoutput(cmd)
- # Can "fail" if internal headers/transient sources are attempted
- #if retval:
- # bb.fatal("debug source copy failed with exit code %s (cmd was %s)" % (retval, cmd))
+ try:
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ # Can "fail" if internal headers/transient sources are attempted
+ pass
# cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
# Work around this by manually finding and copying any symbolic links that made it through.
- cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s' 2>/dev/null)" % (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
- (retval, output) = oe.utils.getstatusoutput(cmd)
- if retval:
- bb.fatal("debugsrc symlink fixup failed with exit code %s (cmd was %s)" % (retval, cmd))
+ cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
+ (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
# The copy by cpio may have resulted in some empty directories! Remove these
cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
- (retval, output) = oe.utils.getstatusoutput(cmd)
- if retval:
- bb.fatal("empty directory removal failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
# Also remove debugsrcdir if its empty
for p in nosuchdir[::-1]:
@@ -450,7 +500,8 @@ def get_package_mapping (pkg, basepkg, d):
if key in data:
# Have to avoid undoing the write_extra_pkgs(global_variants...)
- if bb.data.inherits_class('allarch', d) and data[key] == basepkg:
+ if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
+ and data[key] == basepkg:
return pkg
return data[key]
@@ -462,26 +513,23 @@ def get_package_additional_metadata (pkg_type, d):
if d.getVar(key, False) is None:
continue
d.setVarFlag(key, "type", "list")
- if d.getVarFlag(key, "separator", True) is None:
+ if d.getVarFlag(key, "separator") is None:
d.setVarFlag(key, "separator", "\\n")
metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
return "\n".join(metadata_fields).strip()
def runtime_mapping_rename (varname, pkg, d):
- #bb.note("%s before: %s" % (varname, d.getVar(varname, True)))
-
- if bb.data.inherits_class('packagegroup', d):
- return
+ #bb.note("%s before: %s" % (varname, d.getVar(varname)))
new_depends = {}
- deps = bb.utils.explode_dep_versions2(d.getVar(varname, True) or "")
+ deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
for depend in deps:
new_depend = get_package_mapping(depend, pkg, d)
new_depends[new_depend] = deps[depend]
d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
- #bb.note("%s after: %s" % (varname, d.getVar(varname, True)))
+ #bb.note("%s after: %s" % (varname, d.getVar(varname)))
#
# Package functions suitable for inclusion in PACKAGEFUNCS
@@ -492,34 +540,34 @@ python package_get_auto_pr() {
import re
# Support per recipe PRSERV_HOST
- pn = d.getVar('PN', True)
- host = d.getVar("PRSERV_HOST_" + pn, True)
+ pn = d.getVar('PN')
+ host = d.getVar("PRSERV_HOST_" + pn)
if not (host is None):
d.setVar("PRSERV_HOST", host)
- pkgv = d.getVar("PKGV", True)
+ pkgv = d.getVar("PKGV")
# PR Server not active, handle AUTOINC
- if not d.getVar('PRSERV_HOST', True):
+ if not d.getVar('PRSERV_HOST'):
if 'AUTOINC' in pkgv:
d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
return
auto_pr = None
- pv = d.getVar("PV", True)
- version = d.getVar("PRAUTOINX", True)
- pkgarch = d.getVar("PACKAGE_ARCH", True)
- checksum = d.getVar("BB_TASKHASH", True)
+ pv = d.getVar("PV")
+ version = d.getVar("PRAUTOINX")
+ pkgarch = d.getVar("PACKAGE_ARCH")
+ checksum = d.getVar("BB_TASKHASH")
- if d.getVar('PRSERV_LOCKDOWN', True):
- auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None
+ if d.getVar('PRSERV_LOCKDOWN'):
+ auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
if auto_pr is None:
bb.fatal("Can NOT get PRAUTO from lockdown exported file")
d.setVar('PRAUTO',str(auto_pr))
return
try:
- conn = d.getVar("__PRSERV_CONN", True)
+ conn = d.getVar("__PRSERV_CONN")
if conn is None:
conn = oe.prservice.prserv_make_conn(d)
if conn is not None:
@@ -540,19 +588,19 @@ python package_get_auto_pr() {
LOCALEBASEPN ??= "${PN}"
python package_do_split_locales() {
- if (d.getVar('PACKAGE_NO_LOCALE', True) == '1'):
+ if (d.getVar('PACKAGE_NO_LOCALE') == '1'):
bb.debug(1, "package requested not splitting locales")
return
- packages = (d.getVar('PACKAGES', True) or "").split()
+ packages = (d.getVar('PACKAGES') or "").split()
- datadir = d.getVar('datadir', True)
+ datadir = d.getVar('datadir')
if not datadir:
bb.note("datadir not defined")
return
- dvar = d.getVar('PKGD', True)
- pn = d.getVar('LOCALEBASEPN', True)
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('LOCALEBASEPN')
if pn + '-locale' in packages:
packages.remove(pn + '-locale')
@@ -565,10 +613,10 @@ python package_do_split_locales() {
locales = os.listdir(localedir)
- summary = d.getVar('SUMMARY', True) or pn
- description = d.getVar('DESCRIPTION', True) or ""
- locale_section = d.getVar('LOCALE_SECTION', True)
- mlprefix = d.getVar('MLPREFIX', True) or ""
+ summary = d.getVar('SUMMARY') or pn
+ description = d.getVar('DESCRIPTION') or ""
+ locale_section = d.getVar('LOCALE_SECTION')
+ mlprefix = d.getVar('MLPREFIX') or ""
for l in sorted(locales):
ln = legitimize_package_name(l)
pkg = pn + '-locale-' + ln
@@ -589,22 +637,22 @@ python package_do_split_locales() {
# glibc-localedata-translit* won't install as a dependency
# for some other package which breaks meta-toolchain
# Probably breaks since virtual-locale- isn't provided anywhere
- #rdep = (d.getVar('RDEPENDS_%s' % pn, True) or "").split()
+ #rdep = (d.getVar('RDEPENDS_%s' % pn) or "").split()
#rdep.append('%s-locale*' % pn)
#d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
}
python perform_packagecopy () {
- dest = d.getVar('D', True)
- dvar = d.getVar('PKGD', True)
+ import subprocess
+
+ dest = d.getVar('D')
+ dvar = d.getVar('PKGD')
# Start by package population by taking a copy of the installed
# files to operate on
# Preserve sparse files and hard links
- cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (dest, dvar)
- (retval, output) = oe.utils.getstatusoutput(cmd)
- if retval:
- bb.fatal("file copy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ cmd = 'tar -cf - -C %s -p -S . | tar -xf - -C %s' % (dest, dvar)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
# replace RPATHs for the nativesdk binaries, to make them relocatable
if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
@@ -627,7 +675,7 @@ python fixup_perms () {
# __str__ can be used to print out an entry in the input format
#
# if fs_perms_entry.path is None:
- # an error occured
+ # an error occurred
# if fs_perms_entry.link, you can retrieve:
# fs_perms_entry.path = path
# fs_perms_entry.link = target of link
@@ -730,17 +778,19 @@ python fixup_perms () {
# paths are resolved via BBPATH
def get_fs_perms_list(d):
str = ""
- bbpath = d.getVar('BBPATH', True)
- fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES', True)
- if not fs_perms_tables:
- fs_perms_tables = 'files/fs-perms.txt'
+ bbpath = d.getVar('BBPATH')
+ fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or ""
for conf_file in fs_perms_tables.split():
- str += " %s" % bb.utils.which(bbpath, conf_file)
+ confpath = bb.utils.which(bbpath, conf_file)
+ if confpath:
+ str += " %s" % bb.utils.which(bbpath, conf_file)
+ else:
+ bb.warn("cannot find %s specified in FILESYSTEM_PERMS_TABLES" % conf_file)
return str
- dvar = d.getVar('PKGD', True)
+ dvar = d.getVar('PKGD')
fs_perms_table = {}
fs_link_table = {}
@@ -769,15 +819,16 @@ python fixup_perms () {
'oldincludedir' ]
for path in target_path_vars:
- dir = d.getVar(path, True) or ""
+ dir = d.getVar(path) or ""
if dir == "":
continue
- fs_perms_table[dir] = fs_perms_entry(bb.data.expand("%s 0755 root root false - - -" % (dir), d))
+ fs_perms_table[dir] = fs_perms_entry(d.expand("%s 0755 root root false - - -" % (dir)))
# Now we actually load from the configuration files
for conf in get_fs_perms_list(d).split():
- if os.path.exists(conf):
- f = open(conf)
+ if not os.path.exists(conf):
+ continue
+ with open(conf) as f:
for line in f:
if line.startswith('#'):
continue
@@ -798,7 +849,6 @@ python fixup_perms () {
fs_perms_table[entry.path] = entry
if entry.path in fs_link_table:
fs_link_table.pop(entry.path)
- f.close()
# Debug -- list out in-memory table
#for dir in fs_perms_table:
@@ -853,26 +903,33 @@ python fixup_perms () {
python split_and_strip_files () {
import stat, errno
+ import subprocess
- dvar = d.getVar('PKGD', True)
- pn = d.getVar('PN', True)
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('PN')
+ targetos = d.getVar('TARGET_OS')
oldcwd = os.getcwd()
os.chdir(dvar)
# We default to '.debug' style
- if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory':
+ if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
# Single debug-file-directory style debug info
debugappend = ".debug"
debugdir = ""
debuglibdir = "/usr/lib/debug"
debugsrcdir = "/usr/src/debug"
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-without-src':
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
# Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
debugappend = ""
debugdir = "/.debug"
debuglibdir = ""
debugsrcdir = ""
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ debugappend = ""
+ debugdir = "/.debug"
+ debuglibdir = ""
+ debugsrcdir = "/usr/src/debug"
else:
# Original OE-core, a.k.a. ".debug", style debug info
debugappend = ""
@@ -880,54 +937,30 @@ python split_and_strip_files () {
debuglibdir = ""
debugsrcdir = "/usr/src/debug"
- sourcefile = d.expand("${WORKDIR}/debugsources.list")
- bb.utils.remove(sourcefile)
-
- # Return type (bits):
- # 0 - not elf
- # 1 - ELF
- # 2 - stripped
- # 4 - executable
- # 8 - shared library
- # 16 - kernel module
- def isELF(path):
- type = 0
- ret, result = oe.utils.getstatusoutput("file \"%s\"" % path.replace("\"", "\\\""))
-
- if ret:
- msg = "split_and_strip_files: 'file %s' failed" % path
- package_qa_handle_error("split-strip", msg, d)
- return type
-
- # Not stripped
- if "ELF" in result:
- type |= 1
- if "not stripped" not in result:
- type |= 2
- if "executable" in result:
- type |= 4
- if "shared" in result:
- type |= 8
- return type
-
-
#
# First lets figure out all of the files we may have to process ... do this only once!
#
elffiles = {}
symlinks = {}
kernmods = []
+ staticlibs = []
inodes = {}
- libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True))
- baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True))
- if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1' or \
- d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'):
+ libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
+ baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
+ skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split()
+ if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
+ d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
+ checkelf = {}
+ checkelflinks = {}
for root, dirs, files in cpath.walk(dvar):
for f in files:
file = os.path.join(root, f)
if file.endswith(".ko") and file.find("/lib/modules/") != -1:
kernmods.append(file)
continue
+ if oe.package.is_static_lib(file):
+ staticlibs.append(file)
+ continue
# Skip debug files
if debugappend and file.endswith(debugappend):
@@ -935,6 +968,9 @@ python split_and_strip_files () {
if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
continue
+ if file in skipfiles:
+ continue
+
try:
ltarget = cpath.realpath(file, dvar, False)
s = cpath.lstat(ltarget)
@@ -946,76 +982,97 @@ python split_and_strip_files () {
continue
if not s:
continue
- # Check its an excutable
+ # Check its an executable
if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
or ((file.startswith(libdir) or file.startswith(baselibdir)) and (".so" in f or ".node" in f)):
- # If it's a symlink, and points to an ELF file, we capture the readlink target
+
if cpath.islink(file):
- target = os.readlink(file)
- if isELF(ltarget):
- #bb.note("Sym: %s (%d)" % (ltarget, isELF(ltarget)))
- symlinks[file] = target
+ checkelflinks[file] = ltarget
continue
+ # Use a reference of device ID and inode number to identify files
+ file_reference = "%d_%d" % (s.st_dev, s.st_ino)
+ checkelf[file] = (file, file_reference)
+
+ results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelflinks.values(), d)
+ results_map = {}
+ for (ltarget, elf_file) in results:
+ results_map[ltarget] = elf_file
+ for file in checkelflinks:
+ ltarget = checkelflinks[file]
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if results_map[ltarget]:
+ target = os.readlink(file)
+ #bb.note("Sym: %s (%d)" % (ltarget, results_map[ltarget]))
+ symlinks[file] = target
+
+ results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
+
+ # Sort results by file path. This ensures that the files are always
+ # processed in the same order, which is important to make sure builds
+ # are reproducible when dealing with hardlinks
+ results.sort(key=lambda x: x[0])
+
+ for (file, elf_file) in results:
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ if elf_file & 1:
+ if elf_file & 2:
+ if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
+ bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
+ else:
+ msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
+ package_qa_handle_error("already-stripped", msg, d)
+ continue
- # It's a file (or hardlink), not a link
- # ...but is it ELF, and is it already stripped?
- elf_file = isELF(file)
- if elf_file & 1:
- if elf_file & 2:
- if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
- bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
- else:
- msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
- package_qa_handle_error("already-stripped", msg, d)
- continue
-
- # At this point we have an unstripped elf file. We need to:
- # a) Make sure any file we strip is not hardlinked to anything else outside this tree
- # b) Only strip any hardlinked file once (no races)
- # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
-
- # Use a reference of device ID and inode number to indentify files
- file_reference = "%d_%d" % (s.st_dev, s.st_ino)
- if file_reference in inodes:
- os.unlink(file)
- os.link(inodes[file_reference][0], file)
- inodes[file_reference].append(file)
- else:
- inodes[file_reference] = [file]
- # break hardlink
- bb.utils.copyfile(file, file)
- elffiles[file] = elf_file
- # Modified the file so clear the cache
- cpath.updatecache(file)
+ # At this point we have an unstripped elf file. We need to:
+ # a) Make sure any file we strip is not hardlinked to anything else outside this tree
+ # b) Only strip any hardlinked file once (no races)
+ # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
+
+ # Use a reference of device ID and inode number to identify files
+ file_reference = checkelf[file][1]
+ if file_reference in inodes:
+ os.unlink(file)
+ os.link(inodes[file_reference][0], file)
+ inodes[file_reference].append(file)
+ else:
+ inodes[file_reference] = [file]
+ # break hardlink
+ bb.utils.break_hardlinks(file)
+ elffiles[file] = elf_file
+ # Modified the file so clear the cache
+ cpath.updatecache(file)
#
# First lets process debug splitting
#
- if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'):
- for file in elffiles:
- src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
- fpath = dvar + dest
+ if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
+ results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
- # Split the file...
- bb.utils.mkdirhier(os.path.dirname(fpath))
- #bb.note("Split %s -> %s" % (file, fpath))
- # Only store off the hard link reference if we successfully split!
- splitdebuginfo(file, fpath, debugsrcdir, sourcefile, d)
+ if debugsrcdir and not targetos.startswith("mingw"):
+ for file in staticlibs:
+ results.extend(source_info(file, d, fatal=False))
+
+ sources = set()
+ for r in results:
+ sources.update(r[1])
# Hardlink our debug symbols to the other hardlink copies
for ref in inodes:
if len(inodes[ref]) == 1:
continue
+
+ target = inodes[ref][0][len(dvar):]
for file in inodes[ref][1:]:
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(target) + debugappend
fpath = dvar + dest
- target = inodes[ref][0][len(dvar):]
ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
bb.utils.mkdirhier(os.path.dirname(fpath))
- #bb.note("Link %s -> %s" % (fpath, ftarget))
- os.link(ftarget, fpath)
+ # Only one hardlink of separated debug info file in each directory
+ if not os.access(fpath, os.R_OK):
+ #bb.note("Link %s -> %s" % (fpath, ftarget))
+ os.link(ftarget, fpath)
# Create symlinks for all cases we were able to split symbols
for file in symlinks:
@@ -1046,7 +1103,7 @@ python split_and_strip_files () {
# Process the debugsrcdir if requested...
# This copies and places the referenced sources for later debugging...
- copydebugsources(debugsrcdir, d)
+ copydebugsources(debugsrcdir, sources, d)
#
# End of debug splitting
#
@@ -1054,8 +1111,8 @@ python split_and_strip_files () {
#
# Now lets go back over things and strip them
#
- if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
- strip = d.getVar("STRIP", True)
+ if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1'):
+ strip = d.getVar("STRIP")
sfiles = []
for file in elffiles:
elf_file = int(elffiles[file])
@@ -1064,7 +1121,7 @@ python split_and_strip_files () {
for f in kernmods:
sfiles.append((f, 16, strip))
- oe.utils.multiprocess_exec(sfiles, oe.package.runstrip)
+ oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
#
# End of strip
@@ -1075,31 +1132,48 @@ python split_and_strip_files () {
python populate_packages () {
import glob, re
- workdir = d.getVar('WORKDIR', True)
- outdir = d.getVar('DEPLOY_DIR', True)
- dvar = d.getVar('PKGD', True)
- packages = d.getVar('PACKAGES', True)
- pn = d.getVar('PN', True)
+ workdir = d.getVar('WORKDIR')
+ outdir = d.getVar('DEPLOY_DIR')
+ dvar = d.getVar('PKGD')
+ packages = d.getVar('PACKAGES').split()
+ pn = d.getVar('PN')
bb.utils.mkdirhier(outdir)
os.chdir(dvar)
- autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG", True) or False)
+ autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False)
+
+ split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
+
+ # If debug-with-srcpkg mode is enabled then add the source package if it
+ # doesn't exist and add the source file contents to the source package.
+ if split_source_package:
+ src_package_name = ('%s-src' % d.getVar('PN'))
+ if not src_package_name in packages:
+ packages.append(src_package_name)
+ d.setVar('FILES_%s' % src_package_name, '/usr/src/debug')
# Sanity check PACKAGES for duplicates
- # Sanity should be moved to sanity.bbclass once we have the infrastucture
- package_list = []
+ # Sanity should be moved to sanity.bbclass once we have the infrastructure
+ package_dict = {}
- for pkg in packages.split():
- if pkg in package_list:
+ for i, pkg in enumerate(packages):
+ if pkg in package_dict:
msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
package_qa_handle_error("packages-list", msg, d)
+ # Ensure the source package gets the chance to pick up the source files
+ # before the debug package by ordering it first in PACKAGES. Whether it
+ # actually picks up any source files is controlled by
+ # PACKAGE_DEBUG_SPLIT_STYLE.
+ elif pkg.endswith("-src"):
+ package_dict[pkg] = (10, i)
elif autodebug and pkg.endswith("-dbg"):
- package_list.insert(0, pkg)
+ package_dict[pkg] = (30, i)
else:
- package_list.append(pkg)
- d.setVar('PACKAGES', ' '.join(package_list))
- pkgdest = d.getVar('PKGDEST', True)
+ package_dict[pkg] = (50, i)
+ packages = sorted(package_dict.keys(), key=package_dict.get)
+ d.setVar('PACKAGES', ' '.join(packages))
+ pkgdest = d.getVar('PKGDEST')
seen = []
@@ -1116,11 +1190,11 @@ python populate_packages () {
if "/.debug/" in path or path.endswith("/.debug"):
debug.append(path)
- for pkg in package_list:
+ for pkg in packages:
root = os.path.join(pkgdest, pkg)
bb.utils.mkdirhier(root)
- filesvar = d.getVar('FILES_%s' % pkg, True) or ""
+ filesvar = d.getVar('FILES_%s' % pkg) or ""
if "//" in filesvar:
msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
package_qa_handle_error("files-invalid", msg, d)
@@ -1143,7 +1217,8 @@ python populate_packages () {
src = os.path.join(src, p)
dest = os.path.join(dest, p)
fstat = cpath.stat(src)
- os.mkdir(dest, fstat.st_mode)
+ os.mkdir(dest)
+ os.chmod(dest, fstat.st_mode)
os.chown(dest, fstat.st_uid, fstat.st_gid)
if p not in seen:
seen.append(p)
@@ -1171,7 +1246,7 @@ python populate_packages () {
continue
ret = bb.utils.copyfile(file, fpath)
if ret is False or ret == 0:
- raise bb.build.FuncFailed("File population failed")
+ bb.fatal("File population failed")
# Check if symlink paths exist
for file in symlink_paths:
@@ -1187,9 +1262,10 @@ python populate_packages () {
# Handle LICENSE_EXCLUSION
package_list = []
- for pkg in packages.split():
- if d.getVar('LICENSE_EXCLUSION-' + pkg, True):
- msg = "%s has an incompatible license. Excluding from packaging." % pkg
+ for pkg in packages:
+ licenses = d.getVar('LICENSE_EXCLUSION-' + pkg)
+ if licenses:
+ msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
package_qa_handle_error("incompatible-license", msg, d)
else:
package_list.append(pkg)
@@ -1207,7 +1283,7 @@ python populate_packages () {
if unshipped != []:
msg = pn + ": Files/directories were installed but not shipped in any package:"
- if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
+ if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn) or "").split():
bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
else:
for f in unshipped:
@@ -1220,7 +1296,7 @@ populate_packages[dirs] = "${D}"
python package_fixsymlinks () {
import errno
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
packages = d.getVar("PACKAGES", False).split()
dangling_links = {}
@@ -1255,7 +1331,7 @@ python package_fixsymlinks () {
bb.note("%s contains dangling symlink to %s" % (pkg, l))
for pkg in newrdepends:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
for p in newrdepends[pkg]:
if p not in rdepends:
rdepends[p] = []
@@ -1276,21 +1352,57 @@ EXPORT_FUNCTIONS package_name_hook
PKGDESTWORK = "${WORKDIR}/pkgdata"
+PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS RPROVIDES RRECOMMENDS RSUGGESTS RREPLACES RCONFLICTS SECTION PKG ALLOW_EMPTY FILES CONFFILES FILES_INFO pkg_postinst pkg_postrm pkg_preinst pkg_prerm"
+
python emit_pkgdata() {
from glob import glob
import json
+ def process_postinst_on_target(pkg, mlprefix):
+ pkgval = d.getVar('PKG_%s' % pkg)
+ if pkgval is None:
+ pkgval = pkg
+
+ defer_fragment = """
+if [ -n "$D" ]; then
+ $INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
+ exit 0
+fi
+""" % (pkgval, mlprefix)
+
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst_ontarget = d.getVar('pkg_postinst_ontarget_%s' % pkg)
+
+ if postinst_ontarget:
+ bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += defer_fragment
+ postinst += postinst_ontarget
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ def add_set_e_to_scriptlets(pkg):
+ for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
+ scriptlet = d.getVar('%s_%s' % (scriptlet_name, pkg))
+ if scriptlet:
+ scriptlet_split = scriptlet.split('\n')
+ if scriptlet_split[0].startswith("#!"):
+ scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
+ else:
+ scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
+ d.setVar('%s_%s' % (scriptlet_name, pkg), scriptlet)
+
def write_if_exists(f, pkg, var):
def encode(str):
import codecs
c = codecs.getencoder("unicode_escape")
return c(str)[0].decode("latin1")
- val = d.getVar('%s_%s' % (var, pkg), True)
+ val = d.getVar('%s_%s' % (var, pkg))
if val:
f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
return val
- val = d.getVar('%s' % (var), True)
+ val = d.getVar('%s' % (var))
if val:
f.write('%s: %s\n' % (var, encode(val)))
return val
@@ -1309,32 +1421,29 @@ python emit_pkgdata() {
with open(subdata_file, 'w') as fd:
fd.write("PKG_%s: %s" % (ml_pkg, pkg))
- packages = d.getVar('PACKAGES', True)
- pkgdest = d.getVar('PKGDEST', True)
- pkgdatadir = d.getVar('PKGDESTWORK', True)
-
- # Take shared lock since we're only reading, not writing
- lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
+ packages = d.getVar('PACKAGES')
+ pkgdest = d.getVar('PKGDEST')
+ pkgdatadir = d.getVar('PKGDESTWORK')
- data_file = pkgdatadir + d.expand("/${PN}" )
- f = open(data_file, 'w')
- f.write("PACKAGES: %s\n" % packages)
- f.close()
+ data_file = pkgdatadir + d.expand("/${PN}")
+ with open(data_file, 'w') as fd:
+ fd.write("PACKAGES: %s\n" % packages)
- pn = d.getVar('PN', True)
- global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS', True) or "").split()
- variants = (d.getVar('MULTILIB_VARIANTS', True) or "").split()
+ pn = d.getVar('PN')
+ global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
+ variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
write_extra_pkgs(variants, pn, packages, pkgdatadir)
- if (bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d)):
+ if bb.data.inherits_class('allarch', d) and not variants \
+ and not bb.data.inherits_class('packagegroup', d):
write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
for pkg in packages.split():
- pkgval = d.getVar('PKG_%s' % pkg, True)
+ pkgval = d.getVar('PKG_%s' % pkg)
if pkgval is None:
pkgval = pkg
d.setVar('PKG_%s' % pkg, pkg)
@@ -1342,61 +1451,45 @@ python emit_pkgdata() {
pkgdestpkg = os.path.join(pkgdest, pkg)
files = {}
total_size = 0
+ seen = set()
for f in pkgfiles[pkg]:
relpth = os.path.relpath(f, pkgdestpkg)
fstat = os.lstat(f)
- total_size += fstat.st_size
files[os.sep + relpth] = fstat.st_size
- d.setVar('FILES_INFO', json.dumps(files))
+ if fstat.st_ino not in seen:
+ seen.add(fstat.st_ino)
+ total_size += fstat.st_size
+ d.setVar('FILES_INFO', json.dumps(files, sort_keys=True))
+
+ process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
+ add_set_e_to_scriptlets(pkg)
subdata_file = pkgdatadir + "/runtime/%s" % pkg
- sf = open(subdata_file, 'w')
- write_if_exists(sf, pkg, 'PN')
- write_if_exists(sf, pkg, 'PE')
- write_if_exists(sf, pkg, 'PV')
- write_if_exists(sf, pkg, 'PR')
- write_if_exists(sf, pkg, 'PKGE')
- write_if_exists(sf, pkg, 'PKGV')
- write_if_exists(sf, pkg, 'PKGR')
- write_if_exists(sf, pkg, 'LICENSE')
- write_if_exists(sf, pkg, 'DESCRIPTION')
- write_if_exists(sf, pkg, 'SUMMARY')
- write_if_exists(sf, pkg, 'RDEPENDS')
- rprov = write_if_exists(sf, pkg, 'RPROVIDES')
- write_if_exists(sf, pkg, 'RRECOMMENDS')
- write_if_exists(sf, pkg, 'RSUGGESTS')
- write_if_exists(sf, pkg, 'RREPLACES')
- write_if_exists(sf, pkg, 'RCONFLICTS')
- write_if_exists(sf, pkg, 'SECTION')
- write_if_exists(sf, pkg, 'PKG')
- write_if_exists(sf, pkg, 'ALLOW_EMPTY')
- write_if_exists(sf, pkg, 'FILES')
- write_if_exists(sf, pkg, 'pkg_postinst')
- write_if_exists(sf, pkg, 'pkg_postrm')
- write_if_exists(sf, pkg, 'pkg_preinst')
- write_if_exists(sf, pkg, 'pkg_prerm')
- write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
- write_if_exists(sf, pkg, 'FILES_INFO')
- for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg, True) or "").split():
- write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
-
- write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
- for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg, True) or "").split():
- write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
-
- sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
- sf.close()
+ with open(subdata_file, 'w') as sf:
+ for var in (d.getVar('PKGDATA_VARS') or "").split():
+ val = write_if_exists(sf, pkg, var)
+
+ write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
+ for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg) or "").split():
+ write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
+
+ write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
+ for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg) or "").split():
+ write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
+
+ sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
# Symlinks needed for rprovides lookup
+ rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
for p in rprov.strip().split():
subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
bb.utils.mkdirhier(os.path.dirname(subdata_sym))
oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
- allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg, True)
+ allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg)
if not allow_empty:
- allow_empty = d.getVar('ALLOW_EMPTY', True)
+ allow_empty = d.getVar('ALLOW_EMPTY')
root = "%s/%s" % (pkgdest, pkg)
os.chdir(root)
g = glob('*')
@@ -1411,10 +1504,10 @@ python emit_pkgdata() {
if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
write_extra_runtime_pkgs(variants, packages, pkgdatadir)
- if bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d):
+ if bb.data.inherits_class('allarch', d) and not variants \
+ and not bb.data.inherits_class('packagegroup', d):
write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
- bb.utils.unlockfile(lf)
}
emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
@@ -1424,7 +1517,7 @@ if [ x"$D" = "x" ]; then
fi
}
-RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps-oecore --macros ${STAGING_LIBDIR_NATIVE}/rpm/macros --define '_rpmfc_magic_path ${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc' --rpmpopt ${STAGING_LIBDIR_NATIVE}/rpm/rpmpopt"
+RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps"
# Collect perfile run-time dependency metadata
# Output:
@@ -1435,26 +1528,26 @@ RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps-oecore --macros ${STAGING_LI
# FILERDEPENDS_filepath_pkg - per file dep
python package_do_filedeps() {
- if d.getVar('SKIP_FILEDEPS', True) == '1':
+ if d.getVar('SKIP_FILEDEPS') == '1':
return
- pkgdest = d.getVar('PKGDEST', True)
- packages = d.getVar('PACKAGES', True)
- rpmdeps = d.getVar('RPMDEPS', True)
+ pkgdest = d.getVar('PKGDEST')
+ packages = d.getVar('PACKAGES')
+ rpmdeps = d.getVar('RPMDEPS')
def chunks(files, n):
return [files[i:i+n] for i in range(0, len(files), n)]
pkglist = []
for pkg in packages.split():
- if d.getVar('SKIP_FILEDEPS_' + pkg, True) == '1':
+ if d.getVar('SKIP_FILEDEPS_' + pkg) == '1':
continue
- if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-'):
+ if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
continue
for files in chunks(pkgfiles[pkg], 100):
pkglist.append((pkg, files, rpmdeps, pkgdest))
- processed = oe.utils.multiprocess_exec( pkglist, oe.package.filedeprunner)
+ processed = oe.utils.multiprocess_launch(oe.package.filedeprunner, pkglist, d)
provides_files = {}
requires_files = {}
@@ -1467,15 +1560,15 @@ python package_do_filedeps() {
if pkg not in requires_files:
requires_files[pkg] = []
- for file in provides:
+ for file in sorted(provides):
provides_files[pkg].append(file)
key = "FILERPROVIDES_" + file + "_" + pkg
- d.setVar(key, " ".join(provides[file]))
+ d.appendVar(key, " " + " ".join(provides[file]))
- for file in requires:
+ for file in sorted(requires):
requires_files[pkg].append(file)
key = "FILERDEPENDS_" + file + "_" + pkg
- d.setVar(key, " ".join(requires[file]))
+ d.appendVar(key, " " + " ".join(requires[file]))
for pkg in requires_files:
d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
@@ -1483,71 +1576,85 @@ python package_do_filedeps() {
d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
}
-SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs2"
+SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
python package_do_shlibs() {
+ import itertools
import re, pipes
- import subprocess as sub
+ import subprocess
exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False)
if exclude_shlibs:
bb.note("not generating shlibs")
return
- lib_re = re.compile("^.*\.so")
- libdir_re = re.compile(".*/%s$" % d.getVar('baselib', True))
+ lib_re = re.compile(r"^.*\.so")
+ libdir_re = re.compile(r".*/%s$" % d.getVar('baselib'))
+
+ packages = d.getVar('PACKAGES')
+
+ shlib_pkgs = []
+ exclusion_list = d.getVar("EXCLUDE_PACKAGES_FROM_SHLIBS")
+ if exclusion_list:
+ for pkg in packages.split():
+ if pkg not in exclusion_list.split():
+ shlib_pkgs.append(pkg)
+ else:
+ bb.note("not generating shlibs for %s" % pkg)
+ else:
+ shlib_pkgs = packages.split()
- packages = d.getVar('PACKAGES', True)
- targetos = d.getVar('TARGET_OS', True)
+ targetos = d.getVar('TARGET_OS')
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
- ver = d.getVar('PKGV', True)
+ ver = d.getVar('PKGV')
if not ver:
msg = "PKGV not defined"
package_qa_handle_error("pkgv-undefined", msg, d)
return
- pkgdest = d.getVar('PKGDEST', True)
-
- shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
+ pkgdest = d.getVar('PKGDEST')
- # Take shared lock since we're only reading, not writing
- lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR')
- def linux_so(file, needed, sonames, renames, pkgver):
+ def linux_so(file, pkg, pkgver, d):
needs_ldconfig = False
+ needed = set()
+ sonames = set()
+ renames = []
ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
- cmd = d.getVar('OBJDUMP', True) + " -p " + pipes.quote(file) + " 2>/dev/null"
+ cmd = d.getVar('OBJDUMP') + " -p " + pipes.quote(file) + " 2>/dev/null"
fd = os.popen(cmd)
lines = fd.readlines()
fd.close()
- rpath = []
+ rpath = tuple()
for l in lines:
- m = re.match("\s+RPATH\s+([^\s]*)", l)
+ m = re.match(r"\s+RPATH\s+([^\s]*)", l)
if m:
rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
- rpath = list(map(os.path.normpath, rpaths))
+ rpath = tuple(map(os.path.normpath, rpaths))
for l in lines:
- m = re.match("\s+NEEDED\s+([^\s]*)", l)
+ m = re.match(r"\s+NEEDED\s+([^\s]*)", l)
if m:
dep = m.group(1)
- if dep not in needed[pkg]:
- needed[pkg].append((dep, file, rpath))
- m = re.match("\s+SONAME\s+([^\s]*)", l)
+ if dep not in needed:
+ needed.add((dep, file, rpath))
+ m = re.match(r"\s+SONAME\s+([^\s]*)", l)
if m:
this_soname = m.group(1)
prov = (this_soname, ldir, pkgver)
if not prov in sonames:
# if library is private (only used by package) then do not build shlib for it
- if not private_libs or this_soname not in private_libs:
- sonames.append(prov)
+ import fnmatch
+ if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0:
+ sonames.add(prov)
if libdir_re.match(os.path.dirname(file)):
needs_ldconfig = True
if snap_symlinks and (os.path.basename(file) != this_soname):
renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
- return needs_ldconfig
+ return (needs_ldconfig, needed, sonames, renames)
def darwin_so(file, needed, sonames, renames, pkgver):
if not os.path.exists(file):
@@ -1567,7 +1674,7 @@ python package_do_shlibs() {
combos.append("-".join(options[0:i]))
return combos
- if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg'):
+ if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg') and not pkg.endswith('-src'):
# Drop suffix
name = os.path.basename(file).rsplit(".",1)[0]
# Find all combinations
@@ -1575,10 +1682,10 @@ python package_do_shlibs() {
for combo in combos:
if not combo in sonames:
prov = (combo, ldir, pkgver)
- sonames.append(prov)
+ sonames.add(prov)
if file.endswith('.dylib') or file.endswith('.so'):
rpath = []
- p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file],stdout=sub.PIPE,stderr=sub.PIPE)
+ p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
# If returned successfully, process stdout for results
if p.returncode == 0:
@@ -1587,7 +1694,7 @@ python package_do_shlibs() {
if l.startswith('path '):
rpath.append(l.split()[1])
- p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file],stdout=sub.PIPE,stderr=sub.PIPE)
+ p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
# If returned successfully, process stdout for results
if p.returncode == 0:
@@ -1599,78 +1706,102 @@ python package_do_shlibs() {
continue
name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
if name and name not in needed[pkg]:
- needed[pkg].append((name, file, []))
+ needed[pkg].add((name, file, tuple()))
+
+ def mingw_dll(file, needed, sonames, renames, pkgver):
+ if not os.path.exists(file):
+ return
+
+ if file.endswith(".dll"):
+ # assume all dlls are shared objects provided by the package
+ sonames.add((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
- if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS', True) == "1":
+ if (file.endswith(".dll") or file.endswith(".exe")):
+ # use objdump to search for "DLL Name: .*\.dll"
+ p = subprocess.Popen([d.expand("${HOST_PREFIX}objdump"), "-p", file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ # process the output, grabbing all .dll names
+ if p.returncode == 0:
+ for m in re.finditer(r"DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
+ dllname = m.group(1)
+ if dllname:
+ needed[pkg].add((dllname, file, tuple()))
+
+ if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1":
snap_symlinks = True
else:
snap_symlinks = False
- if (d.getVar('USE_LDCONFIG', True) or "1") == "1":
- use_ldconfig = True
- else:
- use_ldconfig = False
-
needed = {}
+
shlib_provider = oe.package.read_shlib_providers(d)
- for pkg in packages.split():
- private_libs = d.getVar('PRIVATE_LIBS_' + pkg, True) or d.getVar('PRIVATE_LIBS', True) or ""
+ for pkg in shlib_pkgs:
+ private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
private_libs = private_libs.split()
needs_ldconfig = False
bb.debug(2, "calculating shlib provides for %s" % pkg)
- pkgver = d.getVar('PKGV_' + pkg, True)
+ pkgver = d.getVar('PKGV_' + pkg)
if not pkgver:
- pkgver = d.getVar('PV_' + pkg, True)
+ pkgver = d.getVar('PV_' + pkg)
if not pkgver:
pkgver = ver
- needed[pkg] = []
- sonames = list()
- renames = list()
+ needed[pkg] = set()
+ sonames = set()
+ renames = []
+ linuxlist = []
for file in pkgfiles[pkg]:
soname = None
if cpath.islink(file):
continue
if targetos == "darwin" or targetos == "darwin8":
darwin_so(file, needed, sonames, renames, pkgver)
+ elif targetos.startswith("mingw"):
+ mingw_dll(file, needed, sonames, renames, pkgver)
elif os.access(file, os.X_OK) or lib_re.match(file):
- ldconfig = linux_so(file, needed, sonames, renames, pkgver)
- needs_ldconfig = needs_ldconfig or ldconfig
+ linuxlist.append(file)
+
+ if linuxlist:
+ results = oe.utils.multiprocess_launch(linux_so, linuxlist, d, extraargs=(pkg, pkgver, d))
+ for r in results:
+ ldconfig = r[0]
+ needed[pkg] |= r[1]
+ sonames |= r[2]
+ renames.extend(r[3])
+ needs_ldconfig = needs_ldconfig or ldconfig
+
for (old, new) in renames:
bb.note("Renaming %s to %s" % (old, new))
os.rename(old, new)
pkgfiles[pkg].remove(old)
-
+
shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
if len(sonames):
- fd = open(shlibs_file, 'w')
- for s in sonames:
- if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
- (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
- if old_pkg != pkg:
- bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
- bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
- fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
- if s[0] not in shlib_provider:
- shlib_provider[s[0]] = {}
- shlib_provider[s[0]][s[1]] = (pkg, pkgver)
- fd.close()
- if needs_ldconfig and use_ldconfig:
+ with open(shlibs_file, 'w') as fd:
+ for s in sonames:
+ if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
+ (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
+ if old_pkg != pkg:
+ bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
+ bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
+ fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (pkg, pkgver)
+ if needs_ldconfig:
bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('ldconfig_postinst_fragment', True)
+ postinst += d.getVar('ldconfig_postinst_fragment')
d.setVar('pkg_postinst_%s' % pkg, postinst)
bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
- bb.utils.unlockfile(lf)
-
- assumed_libs = d.getVar('ASSUME_SHLIBS', True)
+ assumed_libs = d.getVar('ASSUME_SHLIBS')
if assumed_libs:
- libdir = d.getVar("libdir", True)
+ libdir = d.getVar("libdir")
for e in assumed_libs.split():
l, dep_pkg = e.split(":")
lib_ver = None
@@ -1682,11 +1813,14 @@ python package_do_shlibs() {
shlib_provider[l] = {}
shlib_provider[l][libdir] = (dep_pkg, lib_ver)
- libsearchpath = [d.getVar('libdir', True), d.getVar('base_libdir', True)]
+ libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')]
- for pkg in packages.split():
+ for pkg in shlib_pkgs:
bb.debug(2, "calculating shlib requirements for %s" % pkg)
+ private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = private_libs.split()
+
deps = list()
for n in needed[pkg]:
# if n is in private libraries, don't try to search provider for it
@@ -1694,20 +1828,21 @@ python package_do_shlibs() {
# /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
# but skipping it is still better alternative than providing own
# version and then adding runtime dependency for the same system library
- if private_libs and n[0] in private_libs:
+ import fnmatch
+ if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0:
bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
continue
if n[0] in shlib_provider.keys():
- shlib_provider_path = []
- for k in shlib_provider[n[0]].keys():
- shlib_provider_path.append(k)
- match = None
- for p in n[2] + shlib_provider_path + libsearchpath:
- if p in shlib_provider[n[0]]:
- match = p
- break
- if match:
- (dep_pkg, ver_needed) = shlib_provider[n[0]][match]
+ shlib_provider_map = shlib_provider[n[0]]
+ matches = set()
+ for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
+ if p in shlib_provider_map:
+ matches.add(p)
+ if len(matches) > 1:
+ matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
+ bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
+ elif len(matches) == 1:
+ (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
@@ -1726,26 +1861,25 @@ python package_do_shlibs() {
deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
if os.path.exists(deps_file):
os.remove(deps_file)
- if len(deps):
- fd = open(deps_file, 'w')
- for dep in deps:
- fd.write(dep + '\n')
- fd.close()
+ if deps:
+ with open(deps_file, 'w') as fd:
+ for dep in sorted(deps):
+ fd.write(dep + '\n')
}
python package_do_pkgconfig () {
import re
- packages = d.getVar('PACKAGES', True)
- workdir = d.getVar('WORKDIR', True)
- pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES')
+ workdir = d.getVar('WORKDIR')
+ pkgdest = d.getVar('PKGDEST')
- shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
- shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
+ shlibs_dirs = d.getVar('SHLIBSDIRS').split()
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR')
- pc_re = re.compile('(.*)\.pc$')
- var_re = re.compile('(.*)=(.*)')
- field_re = re.compile('(.*): (.*)')
+ pc_re = re.compile(r'(.*)\.pc$')
+ var_re = re.compile(r'(.*)=(.*)')
+ field_re = re.compile(r'(.*): (.*)')
pkgconfig_provided = {}
pkgconfig_needed = {}
@@ -1760,9 +1894,8 @@ python package_do_pkgconfig () {
pkgconfig_provided[pkg].append(name)
if not os.access(file, os.R_OK):
continue
- f = open(file, 'r')
- lines = f.readlines()
- f.close()
+ with open(file, 'r') as f:
+ lines = f.readlines()
for l in lines:
m = var_re.match(l)
if m:
@@ -1773,32 +1906,27 @@ python package_do_pkgconfig () {
m = field_re.match(l)
if m:
hdr = m.group(1)
- exp = bb.data.expand(m.group(2), pd)
+ exp = pd.expand(m.group(2))
if hdr == 'Requires':
pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
- # Take shared lock since we're only reading, not writing
- lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
-
for pkg in packages.split():
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
if pkgconfig_provided[pkg] != []:
- f = open(pkgs_file, 'w')
- for p in pkgconfig_provided[pkg]:
- f.write('%s\n' % p)
- f.close()
+ with open(pkgs_file, 'w') as f:
+ for p in pkgconfig_provided[pkg]:
+ f.write('%s\n' % p)
# Go from least to most specific since the last one found wins
for dir in reversed(shlibs_dirs):
if not os.path.exists(dir):
continue
- for file in os.listdir(dir):
- m = re.match('^(.*)\.pclist$', file)
+ for file in sorted(os.listdir(dir)):
+ m = re.match(r'^(.*)\.pclist$', file)
if m:
pkg = m.group(1)
- fd = open(os.path.join(dir, file))
- lines = fd.readlines()
- fd.close()
+ with open(os.path.join(dir, file)) as fd:
+ lines = fd.readlines()
pkgconfig_provided[pkg] = []
for l in lines:
pkgconfig_provided[pkg].append(l.rstrip())
@@ -1816,25 +1944,21 @@ python package_do_pkgconfig () {
bb.note("couldn't find pkgconfig module '%s' in any package" % n)
deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
if len(deps):
- fd = open(deps_file, 'w')
- for dep in deps:
- fd.write(dep + '\n')
- fd.close()
-
- bb.utils.unlockfile(lf)
+ with open(deps_file, 'w') as fd:
+ for dep in deps:
+ fd.write(dep + '\n')
}
def read_libdep_files(d):
pkglibdeps = {}
- packages = d.getVar('PACKAGES', True).split()
+ packages = d.getVar('PACKAGES').split()
for pkg in packages:
pkglibdeps[pkg] = {}
for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
depsfile = d.expand("${PKGDEST}/" + pkg + extension)
if os.access(depsfile, os.R_OK):
- fd = open(depsfile)
- lines = fd.readlines()
- fd.close()
+ with open(depsfile) as fd:
+ lines = fd.readlines()
for l in lines:
l.rstrip()
deps = bb.utils.explode_dep_versions2(l)
@@ -1846,10 +1970,10 @@ def read_libdep_files(d):
python read_shlibdeps () {
pkglibdeps = read_libdep_files(d)
- packages = d.getVar('PACKAGES', True).split()
+ packages = d.getVar('PACKAGES').split()
for pkg in packages:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
- for dep in pkglibdeps[pkg]:
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
+ for dep in sorted(pkglibdeps[pkg]):
# Add the dep if it's not already there, or if no comparison is set
if dep not in rdepends:
rdepends[dep] = []
@@ -1873,16 +1997,16 @@ python package_depchains() {
package.
"""
- packages = d.getVar('PACKAGES', True)
- postfixes = (d.getVar('DEPCHAIN_POST', True) or '').split()
- prefixes = (d.getVar('DEPCHAIN_PRE', True) or '').split()
+ packages = d.getVar('PACKAGES')
+ postfixes = (d.getVar('DEPCHAIN_POST') or '').split()
+ prefixes = (d.getVar('DEPCHAIN_PRE') or '').split()
def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
#bb.note('depends for %s is %s' % (base, depends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
- for depend in depends:
+ for depend in sorted(depends):
if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
#bb.note("Skipping %s" % depend)
continue
@@ -1901,9 +2025,9 @@ python package_depchains() {
def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
#bb.note('rdepends for %s is %s' % (base, rdepends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
- for depend in rdepends:
+ for depend in sorted(rdepends):
if depend.find('virtual-locale-') != -1:
#bb.note("Skipping %s" % depend)
continue
@@ -1924,12 +2048,12 @@ python package_depchains() {
list.append(dep)
depends = []
- for dep in bb.utils.explode_deps(d.getVar('DEPENDS', True) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('DEPENDS') or ""):
add_dep(depends, dep)
rdepends = []
for pkg in packages.split():
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg, True) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg) or ""):
add_dep(rdepends, dep)
#bb.note('rdepends is %s' % rdepends)
@@ -1959,11 +2083,11 @@ python package_depchains() {
for pkg in pkglibdeps:
for k in pkglibdeps[pkg]:
add_dep(pkglibdeplist, k)
- dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS', True) == '1') or (bb.data.inherits_class('packagegroup', d)))
+ dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS') == '1') or (bb.data.inherits_class('packagegroup', d)))
for suffix in pkgs:
for pkg in pkgs[suffix]:
- if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', True):
+ if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
continue
(base, func) = pkgs[suffix][pkg]
if suffix == "-dev":
@@ -1976,19 +2100,19 @@ python package_depchains() {
pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
else:
rdeps = []
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base, True) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base) or ""):
add_dep(rdeps, dep)
pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
}
# Since bitbake can't determine which variables are accessed during package
# iteration, we need to list them here:
-PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE"
+PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS"
def gen_packagevar(d):
ret = []
- pkgs = (d.getVar("PACKAGES", True) or "").split()
- vars = (d.getVar("PACKAGEVARS", True) or "").split()
+ pkgs = (d.getVar("PACKAGES") or "").split()
+ vars = (d.getVar("PACKAGEVARS") or "").split()
for p in pkgs:
for v in vars:
ret.append(v + "_" + p)
@@ -2001,6 +2125,7 @@ def gen_packagevar(d):
PACKAGE_PREPROCESS_FUNCS ?= ""
# Functions for setting up PKGD
PACKAGEBUILDPKGD ?= " \
+ package_prepare_pkgdata \
perform_packagecopy \
${PACKAGE_PREPROCESS_FUNCS} \
split_and_strip_files \
@@ -2026,7 +2151,7 @@ python do_package () {
# cache. This is useful if an item this class depends on changes in a
# way that the output of this class changes. rpmdeps is a good example
# as any change to rpmdeps requires this to be rerun.
- # PACKAGE_BBCLASS_VERSION = "1"
+ # PACKAGE_BBCLASS_VERSION = "2"
# Init cachedpath
global cpath
@@ -2036,16 +2161,16 @@ python do_package () {
# Sanity test the setup
###########################################################################
- packages = (d.getVar('PACKAGES', True) or "").split()
+ packages = (d.getVar('PACKAGES') or "").split()
if len(packages) < 1:
bb.debug(1, "No packages to build, skipping do_package")
return
- workdir = d.getVar('WORKDIR', True)
- outdir = d.getVar('DEPLOY_DIR', True)
- dest = d.getVar('D', True)
- dvar = d.getVar('PKGD', True)
- pn = d.getVar('PN', True)
+ workdir = d.getVar('WORKDIR')
+ outdir = d.getVar('DEPLOY_DIR')
+ dest = d.getVar('D')
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('PN')
if not workdir or not outdir or not dest or not dvar or not pn:
msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
@@ -2063,7 +2188,7 @@ python do_package () {
# code pre-expands some frequently used variables
def expandVar(x, d):
- d.setVar(x, d.getVar(x, True))
+ d.setVar(x, d.getVar(x))
for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
expandVar(x, d)
@@ -2072,7 +2197,7 @@ python do_package () {
# Setup PKGD (from D)
###########################################################################
- for f in (d.getVar('PACKAGEBUILDPKGD', True) or '').split():
+ for f in (d.getVar('PACKAGEBUILDPKGD') or '').split():
bb.build.exec_func(f, d)
###########################################################################
@@ -2081,7 +2206,7 @@ python do_package () {
cpath = oe.cachedpath.CachedPath()
- for f in (d.getVar('PACKAGESPLITFUNCS', True) or '').split():
+ for f in (d.getVar('PACKAGESPLITFUNCS') or '').split():
bb.build.exec_func(f, d)
###########################################################################
@@ -2091,18 +2216,18 @@ python do_package () {
# Build global list of files in each split package
global pkgfiles
pkgfiles = {}
- packages = d.getVar('PACKAGES', True).split()
- pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES').split()
+ pkgdest = d.getVar('PKGDEST')
for pkg in packages:
pkgfiles[pkg] = []
for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
for file in files:
pkgfiles[pkg].append(walkroot + os.sep + file)
- for f in (d.getVar('PACKAGEFUNCS', True) or '').split():
+ for f in (d.getVar('PACKAGEFUNCS') or '').split():
bb.build.exec_func(f, d)
- qa_sane = d.getVar("QA_SANE", True)
+ qa_sane = d.getVar("QA_SANE")
if not qa_sane:
bb.fatal("Fatal QA errors found, failing task.")
}
@@ -2111,11 +2236,9 @@ do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
addtask package after do_install
-PACKAGELOCK = "${STAGING_DIR}/package-output.lock"
SSTATETASKS += "do_package"
do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
-do_package[sstate-lockfile-shared] = "${PACKAGELOCK}"
do_package_setscene[dirs] = "${STAGING_DIR}"
python do_package_setscene () {
@@ -2123,17 +2246,20 @@ python do_package_setscene () {
}
addtask do_package_setscene
-do_packagedata () {
- :
+# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
+# do_package_setscene and do_packagedata_setscene leading to races
+python do_packagedata () {
+ src = d.expand("${PKGDESTWORK}")
+ dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
+ oe.path.copyhardlinktree(src, dest)
}
addtask packagedata before do_build after do_package
SSTATETASKS += "do_packagedata"
-do_packagedata[sstate-inputdirs] = "${PKGDESTWORK}"
+do_packagedata[sstate-inputdirs] = "${WORKDIR}/pkgdata-pdata-input"
do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
-do_packagedata[sstate-lockfile-shared] = "${PACKAGELOCK}"
-do_packagedata[stamp-extra-info] = "${MACHINE}"
+do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
python do_packagedata_setscene () {
sstate_setscene(d)
@@ -2149,7 +2275,7 @@ def mapping_rename_hook(d):
Rewrite variables to account for package renaming in things
like debian.bbclass or manual PKG variable name changes
"""
- pkg = d.getVar("PKG", True)
+ pkg = d.getVar("PKG")
runtime_mapping_rename("RDEPENDS", pkg, d)
runtime_mapping_rename("RRECOMMENDS", pkg, d)
runtime_mapping_rename("RSUGGESTS", pkg, d)
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
index 4e5dc12850..790b26aef2 100644
--- a/meta/classes/package_deb.bbclass
+++ b/meta/classes/package_deb.bbclass
@@ -6,17 +6,21 @@ inherit package
IMAGE_PKGTYPE ?= "deb"
-DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH', True), d.getVar('TUNE_FEATURES', True))}"
+DPKG_BUILDCMD ??= "dpkg-deb"
+
+DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
APTCONF_TARGET = "${WORKDIR}"
-APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
+APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
def debian_arch_map(arch, tune):
tune_features = tune.split()
+ if arch == "allarch":
+ return "all"
if arch in ["i586", "i686"]:
return "i386"
if arch == "x86_64":
@@ -37,50 +41,31 @@ def debian_arch_map(arch, tune):
if arch == "arm":
return arch + ["el", "hf"]["callconvention-hard" in tune_features]
return arch
-#
-# install a bunch of packages using apt
-# the following shell variables needs to be set before calling this func:
-# INSTALL_ROOTFS_DEB - install root dir
-# INSTALL_BASEARCH_DEB - install base architecutre
-# INSTALL_ARCHS_DEB - list of available archs
-# INSTALL_PACKAGES_NORMAL_DEB - packages to be installed
-# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attempted to be installed only
-# INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc
-# INSTALL_TASK_DEB - task name
python do_package_deb () {
- import re, copy
- import textwrap
- import subprocess
- import collections
-
- oldcwd = os.getcwd()
-
- workdir = d.getVar('WORKDIR', True)
- if not workdir:
- bb.error("WORKDIR not defined, unable to package")
- return
-
- outdir = d.getVar('PKGWRITEDIRDEB', True)
- if not outdir:
- bb.error("PKGWRITEDIRDEB not defined, unable to package")
- return
-
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not packages:
bb.debug(1, "PACKAGES not defined, nothing to package")
return
- tmpdir = d.getVar('TMPDIR', True)
-
+ tmpdir = d.getVar('TMPDIR')
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
- if packages == []:
- bb.debug(1, "No packages; nothing to do")
- return
+ oe.utils.multiprocess_launch(deb_write_pkg, packages.split(), d, extraargs=(d,))
+}
+do_package_deb[vardeps] += "deb_write_pkg"
+do_package_deb[vardepsexclude] = "BB_NUMBER_THREADS"
- pkgdest = d.getVar('PKGDEST', True)
+def deb_write_pkg(pkg, d):
+ import re, copy
+ import textwrap
+ import subprocess
+ import collections
+ import codecs
+
+ outdir = d.getVar('PKGWRITEDIRDEB')
+ pkgdest = d.getVar('PKGDEST')
def cleanupcontrol(root):
for p in ['CONTROL', 'DEBIAN']:
@@ -88,25 +73,24 @@ python do_package_deb () {
if os.path.exists(p):
bb.utils.prunedir(p)
- for pkg in packages.split():
- localdata = bb.data.createCopy(d)
- root = "%s/%s" % (pkgdest, pkg)
+ localdata = bb.data.createCopy(d)
+ root = "%s/%s" % (pkgdest, pkg)
- lf = bb.utils.lockfile(root + ".lock")
+ lf = bb.utils.lockfile(root + ".lock")
+ try:
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg, True)
+ pkgname = localdata.getVar('PKG_%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
- bb.data.update_data(localdata)
basedir = os.path.join(os.path.dirname(root))
- pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH', True))
+ pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH'))
bb.utils.mkdirhier(pkgoutdir)
os.chdir(root)
@@ -114,22 +98,17 @@ python do_package_deb () {
from glob import glob
g = glob('*')
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
- bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
- bb.utils.unlockfile(lf)
- continue
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
+ return
controldir = os.path.join(root, 'DEBIAN')
bb.utils.mkdirhier(controldir)
os.chmod(controldir, 0o755)
- try:
- import codecs
- ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
- except OSError:
- bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open control file for writing.")
+
+ ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
fields = []
- pe = d.getVar('PKGE', True)
+ pe = d.getVar('PKGE')
if pe and int(pe) > 0:
fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
else:
@@ -141,7 +120,7 @@ python do_package_deb () {
fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
fields.append(["OE: %s\n", ['PN']])
fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
- if d.getVar('HOMEPAGE', True):
+ if d.getVar('HOMEPAGE'):
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
# Package, Version, Maintainer, Description - mandatory
@@ -151,10 +130,10 @@ python do_package_deb () {
def pullData(l, d):
l2 = []
for i in l:
- data = d.getVar(i, True)
+ data = d.getVar(i)
if data is None:
- raise KeyError(f)
- if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH', True) == 'all':
+ raise KeyError(i)
+ if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH') == 'all':
data = 'all'
elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
# The params in deb package control don't allow character
@@ -165,62 +144,50 @@ python do_package_deb () {
return l2
ctrlfile.write("Package: %s\n" % pkgname)
- if d.getVar('PACKAGE_ARCH', True) == "all":
+ if d.getVar('PACKAGE_ARCH') == "all":
ctrlfile.write("Multi-Arch: foreign\n")
# check for required fields
- try:
- for (c, fs) in fields:
- for f in fs:
- if localdata.getVar(f, False) is None:
- raise KeyError(f)
- # Special behavior for description...
- if 'DESCRIPTION' in fs:
- summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
- ctrlfile.write('Description: %s\n' % summary)
- description = localdata.getVar('DESCRIPTION', True) or "."
- description = textwrap.dedent(description).strip()
- if '\\n' in description:
- # Manually indent
- for t in description.split('\\n'):
- # We don't limit the width when manually indent, but we do
- # need the textwrap.fill() to set the initial_indent and
- # subsequent_indent, so set a large width
- ctrlfile.write('%s\n' % textwrap.fill(t, width=100000, initial_indent=' ', subsequent_indent=' '))
- else:
- # Auto indent
- ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
-
- else:
- ctrlfile.write(c % tuple(pullData(fs, localdata)))
- except KeyError:
- import sys
- (type, value, traceback) = sys.exc_info()
- bb.utils.unlockfile(lf)
- ctrlfile.close()
- raise bb.build.FuncFailed("Missing field for deb generation: %s" % value)
+ for (c, fs) in fields:
+ # Special behavior for description...
+ if 'DESCRIPTION' in fs:
+ summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
+ ctrlfile.write('Description: %s\n' % summary)
+ description = localdata.getVar('DESCRIPTION') or "."
+ description = textwrap.dedent(description).strip()
+ if '\\n' in description:
+ # Manually indent
+ for t in description.split('\\n'):
+ ctrlfile.write(' %s\n' % (t.strip() or '.'))
+ else:
+ # Auto indent
+ ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
+
+ else:
+ ctrlfile.write(c % tuple(pullData(fs, localdata)))
# more fields
custom_fields_chunk = get_package_additional_metadata("deb", localdata)
- if custom_fields_chunk is not None:
+ if custom_fields_chunk:
ctrlfile.write(custom_fields_chunk)
ctrlfile.write("\n")
mapping_rename_hook(localdata)
def debian_cmp_remap(var):
- # dpkg does not allow for '(' or ')' in a dependency name
- # replace these instances with '__' and '__'
+ # dpkg does not allow for '(', ')' or ':' in a dependency name
+ # Replace any instances of them with '__'
#
# In debian '>' and '<' do not mean what it appears they mean
# '<' = less or equal
# '>' = greater or equal
# adjust these to the '<<' and '>>' equivalents
#
- for dep in var:
- if '(' in dep:
- newdep = dep.replace('(', '__')
- newdep = newdep.replace(')', '__')
+ for dep in list(var.keys()):
+ if '(' in dep or '/' in dep:
+ newdep = re.sub(r'[(:)/]', '__', dep)
+ if newdep.startswith("__"):
+ newdep = "A" + newdep
if newdep != dep:
var[newdep] = var[dep]
del var[dep]
@@ -231,7 +198,7 @@ python do_package_deb () {
elif (v or "").startswith("> "):
var[dep][i] = var[dep][i].replace("> ", ">> ")
- rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
+ rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
debian_cmp_remap(rdepends)
for dep in list(rdepends.keys()):
if dep == pkg:
@@ -239,20 +206,24 @@ python do_package_deb () {
continue
if '*' in dep:
del rdepends[dep]
- rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
+ rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
debian_cmp_remap(rrecommends)
for dep in list(rrecommends.keys()):
if '*' in dep:
del rrecommends[dep]
- rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
+ rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
debian_cmp_remap(rsuggests)
# Deliberately drop version information here, not wanted/supported by deb
- rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or ""), [])
+ rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
+ # Remove file paths if any from rprovides, debian does not support custom providers
+ for key in list(rprovides.keys()):
+ if key.startswith('/'):
+ del rprovides[key]
rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
debian_cmp_remap(rprovides)
- rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
+ rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
debian_cmp_remap(rreplaces)
- rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
+ rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
debian_cmp_remap(rconflicts)
if rdepends:
ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
@@ -269,15 +240,11 @@ python do_package_deb () {
ctrlfile.close()
for script in ["preinst", "postinst", "prerm", "postrm"]:
- scriptvar = localdata.getVar('pkg_%s' % script, True)
+ scriptvar = localdata.getVar('pkg_%s' % script)
if not scriptvar:
continue
scriptvar = scriptvar.strip()
- try:
- scriptfile = open(os.path.join(controldir, script), 'w')
- except OSError:
- bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
+ scriptfile = open(os.path.join(controldir, script), 'w')
if scriptvar.startswith("#!"):
pos = scriptvar.find("\n") + 1
@@ -297,38 +264,34 @@ python do_package_deb () {
conffiles_str = ' '.join(get_conffiles(pkg, d))
if conffiles_str:
- try:
- conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
- except OSError:
- bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open conffiles for writing.")
+ conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
for f in conffiles_str.split():
if os.path.exists(oe.path.join(root, f)):
conffiles.write('%s\n' % f)
conffiles.close()
os.chdir(basedir)
- ret = subprocess.call("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH", True), root, pkgoutdir), shell=True)
- if ret != 0:
- bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("dpkg-deb execution failed")
+ subprocess.check_output("PATH=\"%s\" %s -b %s %s" % (localdata.getVar("PATH"), localdata.getVar("DPKG_BUILDCMD"),
+ root, pkgoutdir),
+ stderr=subprocess.STDOUT,
+ shell=True)
+ finally:
cleanupcontrol(root)
bb.utils.unlockfile(lf)
- os.chdir(oldcwd)
-}
-# Indirect references to these vars
-do_package_write_deb[vardeps] += "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE"
+
# Otherwise allarch packages may change depending on override configuration
-do_package_deb[vardepsexclude] = "OVERRIDES"
+deb_write_pkg[vardepsexclude] = "OVERRIDES"
+# Indirect references to these vars
+do_package_write_deb[vardeps] += "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE"
SSTATETASKS += "do_package_write_deb"
do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
python do_package_write_deb_setscene () {
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
@@ -338,7 +301,7 @@ python do_package_write_deb_setscene () {
addtask do_package_write_deb_setscene
python () {
- if d.getVar('PACKAGES', True) != '':
+ if d.getVar('PACKAGES') != '':
deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
d.appendVarFlag('do_package_write_deb', 'depends', deps)
d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
@@ -351,6 +314,7 @@ python do_package_write_deb () {
do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[umask] = "022"
+do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
addtask package_write_deb after do_packagedata do_package
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
index 930e154bd7..4f23977032 100644
--- a/meta/classes/package_ipk.bbclass
+++ b/meta/classes/package_ipk.bbclass
@@ -8,31 +8,24 @@ IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
# Program to be used to build opkg packages
-OPKGBUILDCMD ??= "opkg-build"
+OPKGBUILDCMD ??= 'opkg-build -Z xz -a "${XZ_DEFAULTS}"'
OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
-OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
-OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE', True) or "").split())][(d.getVar("PACKAGE_EXCLUDE", True) or "") != ""]}"
+OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
+OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
-OPKGLIBDIR = "${localstatedir}/lib"
+OPKGLIBDIR ??= "${localstatedir}/lib"
python do_package_ipk () {
- import re, copy
- import textwrap
- import subprocess
- import collections
-
- oldcwd = os.getcwd()
-
- workdir = d.getVar('WORKDIR', True)
- outdir = d.getVar('PKGWRITEDIRIPK', True)
- tmpdir = d.getVar('TMPDIR', True)
- pkgdest = d.getVar('PKGDEST', True)
+ workdir = d.getVar('WORKDIR')
+ outdir = d.getVar('PKGWRITEDIRIPK')
+ tmpdir = d.getVar('TMPDIR')
+ pkgdest = d.getVar('PKGDEST')
if not workdir or not outdir or not tmpdir:
bb.error("Variables incorrectly set, unable to package")
return
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not packages or packages == '':
bb.debug(1, "No packages; nothing to do")
return
@@ -42,30 +35,43 @@ python do_package_ipk () {
if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
+ oe.utils.multiprocess_launch(ipk_write_pkg, packages.split(), d, extraargs=(d,))
+}
+do_package_ipk[vardeps] += "ipk_write_pkg"
+do_package_ipk[vardepsexclude] = "BB_NUMBER_THREADS"
+
+def ipk_write_pkg(pkg, d):
+ import re, copy
+ import subprocess
+ import textwrap
+ import collections
+
def cleanupcontrol(root):
for p in ['CONTROL', 'DEBIAN']:
p = os.path.join(root, p)
if os.path.exists(p):
bb.utils.prunedir(p)
- for pkg in packages.split():
- localdata = bb.data.createCopy(d)
- root = "%s/%s" % (pkgdest, pkg)
+ outdir = d.getVar('PKGWRITEDIRIPK')
+ pkgdest = d.getVar('PKGDEST')
+ recipesource = os.path.basename(d.getVar('FILE'))
- lf = bb.utils.lockfile(root + ".lock")
+ localdata = bb.data.createCopy(d)
+ root = "%s/%s" % (pkgdest, pkg)
+ lf = bb.utils.lockfile(root + ".lock")
+ try:
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg, True)
+ pkgname = localdata.getVar('PKG_%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
- bb.data.update_data(localdata)
basedir = os.path.join(os.path.dirname(root))
- arch = localdata.getVar('PACKAGE_ARCH', True)
+ arch = localdata.getVar('PACKAGE_ARCH')
if localdata.getVar('IPK_HIERARCHICAL_FEED', False) == "1":
# Spread packages across subdirectories so each isn't too crowded
@@ -98,20 +104,15 @@ python do_package_ipk () {
from glob import glob
g = glob('*')
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
- bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
- bb.utils.unlockfile(lf)
- continue
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
+ return
controldir = os.path.join(root, 'CONTROL')
bb.utils.mkdirhier(controldir)
- try:
- ctrlfile = open(os.path.join(controldir, 'control'), 'w')
- except OSError:
- bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open control file for writing.")
+ ctrlfile = open(os.path.join(controldir, 'control'), 'w')
fields = []
- pe = d.getVar('PKGE', True)
+ pe = d.getVar('PKGE')
if pe and int(pe) > 0:
fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
else:
@@ -123,47 +124,36 @@ python do_package_ipk () {
fields.append(["License: %s\n", ['LICENSE']])
fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
fields.append(["OE: %s\n", ['PN']])
- if d.getVar('HOMEPAGE', True):
+ if d.getVar('HOMEPAGE'):
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
def pullData(l, d):
l2 = []
for i in l:
- l2.append(d.getVar(i, True))
+ l2.append(d.getVar(i))
return l2
ctrlfile.write("Package: %s\n" % pkgname)
# check for required fields
- try:
- for (c, fs) in fields:
- for f in fs:
- if localdata.getVar(f, False) is None:
- raise KeyError(f)
- # Special behavior for description...
- if 'DESCRIPTION' in fs:
- summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
- ctrlfile.write('Description: %s\n' % summary)
- description = localdata.getVar('DESCRIPTION', True) or "."
- description = textwrap.dedent(description).strip()
- if '\\n' in description:
- # Manually indent
- for t in description.split('\\n'):
- # We don't limit the width when manually indent, but we do
- # need the textwrap.fill() to set the initial_indent and
- # subsequent_indent, so set a large width
- ctrlfile.write('%s\n' % textwrap.fill(t.strip(), width=100000, initial_indent=' ', subsequent_indent=' '))
- else:
- # Auto indent
- ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
+ for (c, fs) in fields:
+ for f in fs:
+ if localdata.getVar(f, False) is None:
+ raise KeyError(f)
+ # Special behavior for description...
+ if 'DESCRIPTION' in fs:
+ summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
+ ctrlfile.write('Description: %s\n' % summary)
+ description = localdata.getVar('DESCRIPTION') or "."
+ description = textwrap.dedent(description).strip()
+ if '\\n' in description:
+ # Manually indent: multiline description includes a leading space
+ for t in description.split('\\n'):
+ ctrlfile.write(' %s\n' % (t.strip() or ' .'))
else:
- ctrlfile.write(c % tuple(pullData(fs, localdata)))
- except KeyError:
- import sys
- (type, value, traceback) = sys.exc_info()
- ctrlfile.close()
- bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("Missing field for ipk generation: %s" % value)
- # more fields
+ # Auto indent
+ ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
+ else:
+ ctrlfile.write(c % tuple(pullData(fs, localdata)))
custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
if custom_fields_chunk is not None:
@@ -185,19 +175,19 @@ python do_package_ipk () {
elif (v or "").startswith("> "):
var[dep][i] = var[dep][i].replace("> ", ">> ")
- rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
+ rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
debian_cmp_remap(rdepends)
- rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
+ rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
debian_cmp_remap(rrecommends)
- rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
+ rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
debian_cmp_remap(rsuggests)
# Deliberately drop version information here, not wanted/supported by ipk
- rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or ""), [])
+ rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
debian_cmp_remap(rprovides)
- rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
+ rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
debian_cmp_remap(rreplaces)
- rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
+ rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
debian_cmp_remap(rconflicts)
if rdepends:
@@ -212,63 +202,51 @@ python do_package_ipk () {
ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
if rconflicts:
ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
- src_uri = localdata.getVar("SRC_URI", True).strip() or "None"
- if src_uri:
- src_uri = re.sub("\s+", " ", src_uri)
- ctrlfile.write("Source: %s\n" % " ".join(src_uri.split()))
+ ctrlfile.write("Source: %s\n" % recipesource)
ctrlfile.close()
for script in ["preinst", "postinst", "prerm", "postrm"]:
- scriptvar = localdata.getVar('pkg_%s' % script, True)
+ scriptvar = localdata.getVar('pkg_%s' % script)
if not scriptvar:
continue
- try:
- scriptfile = open(os.path.join(controldir, script), 'w')
- except OSError:
- bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
+ scriptfile = open(os.path.join(controldir, script), 'w')
scriptfile.write(scriptvar)
scriptfile.close()
os.chmod(os.path.join(controldir, script), 0o755)
conffiles_str = ' '.join(get_conffiles(pkg, d))
if conffiles_str:
- try:
- conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
- except OSError:
- bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open conffiles for writing.")
+ conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
for f in conffiles_str.split():
if os.path.exists(oe.path.join(root, f)):
conffiles.write('%s\n' % f)
conffiles.close()
os.chdir(basedir)
- ret = subprocess.call("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH", True),
- d.getVar("OPKGBUILDCMD", True), pkg, pkgoutdir), shell=True)
- if ret != 0:
- bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("opkg-build execution failed")
-
- if d.getVar('IPK_SIGN_PACKAGES', True) == '1':
- ipkver = "%s-%s" % (d.getVar('PKGV', True), d.getVar('PKGR', True))
- ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH', True))
+ subprocess.check_output("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH"),
+ d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir),
+ stderr=subprocess.STDOUT,
+ shell=True)
+
+ if d.getVar('IPK_SIGN_PACKAGES') == '1':
+ ipkver = "%s-%s" % (d.getVar('PKGV'), d.getVar('PKGR'))
+ ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH'))
sign_ipk(d, ipk_to_sign)
+ finally:
cleanupcontrol(root)
bb.utils.unlockfile(lf)
- os.chdir(oldcwd)
-}
# Otherwise allarch packages may change depending on override configuration
-do_package_ipk[vardepsexclude] = "OVERRIDES"
+ipk_write_pkg[vardepsexclude] = "OVERRIDES"
+
SSTATETASKS += "do_package_write_ipk"
do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
python do_package_write_ipk_setscene () {
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
@@ -278,8 +256,8 @@ python do_package_write_ipk_setscene () {
addtask do_package_write_ipk_setscene
python () {
- if d.getVar('PACKAGES', True) != '':
- deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ if d.getVar('PACKAGES') != '':
+ deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot xz-native:do_populate_sysroot'
d.appendVarFlag('do_package_write_ipk', 'depends', deps)
d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
}
@@ -291,6 +269,7 @@ python do_package_write_ipk () {
do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[umask] = "022"
+do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
addtask package_write_ipk after do_packagedata do_package
PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
diff --git a/meta/classes/package_pkgdata.bbclass b/meta/classes/package_pkgdata.bbclass
new file mode 100644
index 0000000000..18b7ed62e0
--- /dev/null
+++ b/meta/classes/package_pkgdata.bbclass
@@ -0,0 +1,167 @@
+WORKDIR_PKGDATA = "${WORKDIR}/pkgdata-sysroot"
+
+def package_populate_pkgdata_dir(pkgdatadir, d):
+ import glob
+
+ postinsts = []
+ seendirs = set()
+ stagingdir = d.getVar("PKGDATA_DIR")
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+
+ bb.utils.mkdirhier(pkgdatadir)
+ for pkgarch in pkgarchs:
+ for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.packagedata" % pkgarch)):
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ dest = l.replace(stagingdir, "")
+ if l.endswith("/"):
+ staging_copydir(l, pkgdatadir, dest, seendirs)
+ continue
+ try:
+ staging_copyfile(l, pkgdatadir, dest, postinsts, seendirs)
+ except FileExistsError:
+ continue
+
+python package_prepare_pkgdata() {
+ import copy
+ import glob
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ if mytaskname.endswith("_setscene"):
+ mytaskname = mytaskname.replace("_setscene", "")
+ workdir = d.getVar("WORKDIR")
+ pn = d.getVar("PN")
+ stagingdir = d.getVar("PKGDATA_DIR")
+ pkgdatadir = d.getVar("WORKDIR_PKGDATA")
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps:
+ staging_package_populate_pkgdata_dir(pkgdatadir, d)
+ return
+
+ start = None
+ configuredeps = []
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+
+ # We need to figure out which sysroot files we need to expose to this task.
+ # This needs to match what would get restored from sstate, which is controlled
+ # ultimately by calls from bitbake to setscene_depvalid().
+ # That function expects a setscene dependency tree. We build a dependency tree
+ # condensed to inter-sstate task dependencies, similar to that used by setscene
+ # tasks. We can then call into setscene_depvalid() and decide
+ # which dependencies we can "see" and should expose in the recipe specific sysroot.
+ setscenedeps = copy.deepcopy(taskdepdata)
+
+ start = set([start])
+
+ sstatetasks = d.getVar("SSTATETASKS").split()
+ # Add recipe specific tasks referenced by setscene_depvalid()
+ sstatetasks.append("do_stash_locale")
+
+ # If start is an sstate task (like do_package) we need to add in its direct dependencies
+ # else the code below won't recurse into them.
+ for dep in set(start):
+ for dep2 in setscenedeps[dep][3]:
+ start.add(dep2)
+ start.remove(dep)
+
+ # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
+ for dep in taskdepdata:
+ data = setscenedeps[dep]
+ if data[1] not in sstatetasks:
+ for dep2 in setscenedeps:
+ data2 = setscenedeps[dep2]
+ if dep in data2[3]:
+ data2[3].update(setscenedeps[dep][3])
+ data2[3].remove(dep)
+ if dep in start:
+ start.update(setscenedeps[dep][3])
+ start.remove(dep)
+ del setscenedeps[dep]
+
+ # Remove circular references
+ for dep in setscenedeps:
+ if dep in setscenedeps[dep][3]:
+ setscenedeps[dep][3].remove(dep)
+
+ # Direct dependencies should be present and can be depended upon
+ for dep in set(start):
+ if setscenedeps[dep][1] == "do_packagedata":
+ if dep not in configuredeps:
+ configuredeps.append(dep)
+
+ msgbuf = []
+ # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
+ # for ones that would be restored from sstate.
+ done = list(start)
+ next = list(start)
+ while next:
+ new = []
+ for dep in next:
+ data = setscenedeps[dep]
+ for datadep in data[3]:
+ if datadep in done:
+ continue
+ taskdeps = {}
+ taskdeps[dep] = setscenedeps[dep][:2]
+ taskdeps[datadep] = setscenedeps[datadep][:2]
+ retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
+ done.append(datadep)
+ new.append(datadep)
+ if retval:
+ msgbuf.append("Skipping setscene dependency %s" % datadep)
+ continue
+ if datadep not in configuredeps and setscenedeps[datadep][1] == "do_packagedata":
+ configuredeps.append(datadep)
+ msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
+ else:
+ msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
+ next = new
+
+ # This logging is too verbose for day to day use sadly
+ #bb.debug(2, "\n".join(msgbuf))
+
+ seendirs = set()
+ postinsts = []
+ multilibs = {}
+ manifests = {}
+
+ msg_adding = []
+
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ msg_adding.append(c)
+
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "packagedata", d, multilibs)
+ destsysroot = pkgdatadir
+
+ if manifest:
+ targetdir = destsysroot
+ with open(manifest, "r") as f:
+ manifests[dep] = manifest
+ for l in f:
+ l = l.strip()
+ dest = targetdir + l.replace(stagingdir, "")
+ if l.endswith("/"):
+ staging_copydir(l, targetdir, dest, seendirs)
+ continue
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+
+ bb.note("Installed into pkgdata-sysroot: %s" % str(msg_adding))
+
+}
+package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
+package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+
+
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index f9398a9522..9145717f98 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -7,26 +7,45 @@ RPMBUILD="rpmbuild"
PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
-# Maintaining the perfile dependencies has singificant overhead when writing the
+# Maintaining the perfile dependencies has singificant overhead when writing the
# packages. When set, this value merges them for efficiency.
MERGEPERFILEDEPS = "1"
+# Filter dependencies based on a provided function.
+def filter_deps(var, f):
+ import collections
+
+ depends_dict = bb.utils.explode_dep_versions2(var)
+ newdeps_dict = collections.OrderedDict()
+ for dep in depends_dict:
+ if f(dep):
+ newdeps_dict[dep] = depends_dict[dep]
+ return bb.utils.join_deps(newdeps_dict, commasep=False)
+
+# Filter out absolute paths (typically /bin/sh and /usr/bin/env) and any perl
+# dependencies for nativesdk packages.
+def filter_nativesdk_deps(srcname, var):
+ if var and srcname.startswith("nativesdk-"):
+ var = filter_deps(var, lambda dep: not dep.startswith('/') and dep != 'perl' and not dep.startswith('perl('))
+ return var
+
# Construct per file dependencies file
def write_rpm_perfiledata(srcname, d):
- workdir = d.getVar('WORKDIR', True)
- packages = d.getVar('PACKAGES', True)
- pkgd = d.getVar('PKGD', True)
+ workdir = d.getVar('WORKDIR')
+ packages = d.getVar('PACKAGES')
+ pkgd = d.getVar('PKGD')
def dump_filerdeps(varname, outfile, d):
- outfile.write("#!/usr/bin/env python\n\n")
+ outfile.write("#!/usr/bin/env python3\n\n")
outfile.write("# Dependency table\n")
outfile.write('deps = {\n')
for pkg in packages.split():
dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
- dependsflist = (d.getVar(dependsflist_key, True) or "")
+ dependsflist = (d.getVar(dependsflist_key) or "")
for dfile in dependsflist.split():
key = "FILE" + varname + "_" + dfile + "_" + pkg
- depends_dict = bb.utils.explode_dep_versions(d.getVar(key, True) or "")
+ deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
+ depends_dict = bb.utils.explode_dep_versions(deps)
file = dfile.replace("@underscore@", "_")
file = file.replace("@closebrace@", "]")
file = file.replace("@openbrace@", "[")
@@ -55,10 +74,7 @@ def write_rpm_perfiledata(srcname, d):
# OE-core dependencies a.k.a. RPM requires
outdepends = workdir + "/" + srcname + ".requires"
- try:
- dependsfile = open(outdepends, 'w')
- except OSError:
- raise bb.build.FuncFailed("unable to open spec file for writing.")
+ dependsfile = open(outdepends, 'w')
dump_filerdeps('RDEPENDS', dependsfile, d)
@@ -68,10 +84,7 @@ def write_rpm_perfiledata(srcname, d):
# OE-core / RPM Provides
outprovides = workdir + "/" + srcname + ".provides"
- try:
- providesfile = open(outprovides, 'w')
- except OSError:
- raise bb.build.FuncFailed("unable to open spec file for writing.")
+ providesfile = open(outprovides, 'w')
dump_filerdeps('RPROVIDES', providesfile, d)
@@ -86,20 +99,24 @@ python write_specfile () {
# append information for logs and patches to %prep
def add_prep(d,spec_files_bottom):
- if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
- spec_files_bottom.append('%%prep -n %s' % d.getVar('PN', True) )
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
+ spec_files_bottom.append('%%prep -n %s' % d.getVar('PN') )
spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
spec_files_bottom.append('')
# append the name of tarball to key word 'SOURCE' in xxx.spec.
def tail_source(d):
- if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR')
if not os.path.exists(ar_outdir):
return
source_list = os.listdir(ar_outdir)
source_number = 0
for source in source_list:
+ # do_deploy_archives may have already run (from sstate) meaning a .src.rpm may already
+ # exist in ARCHIVER_OUTDIR so skip if present.
+ if source.endswith(".src.rpm"):
+ continue
# The rpmbuild doesn't need the root permission, but it needs
# to know the file's user and group name, the only user and
# group in fakeroot is "root" when working in fakeroot.
@@ -107,27 +124,6 @@ python write_specfile () {
os.chown(f, 0, 0)
spec_preamble_top.append('Source%s: %s' % (source_number, source))
source_number += 1
- # We need a simple way to remove the MLPREFIX from the package name,
- # and dependency information...
- def strip_multilib(name, d):
- multilibs = d.getVar('MULTILIBS', True) or ""
- for ext in multilibs.split():
- eext = ext.split(':')
- if len(eext) > 1 and eext[0] == 'multilib' and name and name.find(eext[1] + '-') >= 0:
- name = "".join(name.split(eext[1] + '-'))
- return name
-
- def strip_multilib_deps(deps, d):
- depends = bb.utils.explode_dep_versions2(deps or "")
- newdeps = {}
- for dep in depends:
- newdeps[strip_multilib(dep, d)] = depends[dep]
- return bb.utils.join_deps(newdeps)
-
-# ml = d.getVar("MLPREFIX", True)
-# if ml and name and len(ml) != 0 and name.find(ml) == 0:
-# return ml.join(name.split(ml, 1)[1:])
-# return name
# In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
# This format is similar to OE, however there are restrictions on the
@@ -144,7 +140,7 @@ python write_specfile () {
# after renaming we cannot look up the dependencies in the packagedata
# store.
def translate_vers(varname, d):
- depends = d.getVar(varname, True)
+ depends = d.getVar(varname)
if depends:
depends_dict = bb.utils.explode_dep_versions2(depends)
newdeps_dict = {}
@@ -197,6 +193,8 @@ python write_specfile () {
if path.endswith("DEBIAN") or path.endswith("CONTROL"):
continue
path = path.replace("%", "%%%%%%%%")
+ path = path.replace("[", "?")
+ path = path.replace("]", "?")
# Treat all symlinks to directories as normal files.
# os.walk() lists them as directories.
@@ -216,6 +214,8 @@ python write_specfile () {
if dir == "CONTROL" or dir == "DEBIAN":
continue
dir = dir.replace("%", "%%%%%%%%")
+ dir = dir.replace("[", "?")
+ dir = dir.replace("]", "?")
# All packages own the directories their files are in...
target.append('%dir "' + path + '/' + dir + '"')
else:
@@ -230,6 +230,8 @@ python write_specfile () {
if file == "CONTROL" or file == "DEBIAN":
continue
file = file.replace("%", "%%%%%%%%")
+ file = file.replace("[", "?")
+ file = file.replace("]", "?")
if conffiles.count(path + '/' + file):
target.append('%config "' + path + '/' + file + '"')
else:
@@ -248,10 +250,10 @@ python write_specfile () {
def get_perfile(varname, pkg, d):
deps = []
dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
- dependsflist = (d.getVar(dependsflist_key, True) or "")
+ dependsflist = (d.getVar(dependsflist_key) or "")
for dfile in dependsflist.split():
key = "FILE" + varname + "_" + dfile + "_" + pkg
- depends = d.getVar(key, True)
+ depends = d.getVar(key)
if depends:
deps.append(depends)
return " ".join(deps)
@@ -269,33 +271,33 @@ python write_specfile () {
else:
spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not packages or packages == '':
bb.debug(1, "No packages; nothing to do")
return
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
if not pkgdest:
bb.fatal("No PKGDEST")
- outspecfile = d.getVar('OUTSPECFILE', True)
+ outspecfile = d.getVar('OUTSPECFILE')
if not outspecfile:
bb.fatal("No OUTSPECFILE")
# Construct the SPEC file...
- srcname = strip_multilib(d.getVar('PN', True), d)
- srcsummary = (d.getVar('SUMMARY', True) or d.getVar('DESCRIPTION', True) or ".")
- srcversion = d.getVar('PKGV', True).replace('-', '+')
- srcrelease = d.getVar('PKGR', True)
- srcepoch = (d.getVar('PKGE', True) or "")
- srclicense = d.getVar('LICENSE', True)
- srcsection = d.getVar('SECTION', True)
- srcmaintainer = d.getVar('MAINTAINER', True)
- srchomepage = d.getVar('HOMEPAGE', True)
- srcdescription = d.getVar('DESCRIPTION', True) or "."
+ srcname = d.getVar('PN')
+ srcsummary = (d.getVar('SUMMARY') or d.getVar('DESCRIPTION') or ".")
+ srcversion = d.getVar('PKGV').replace('-', '+')
+ srcrelease = d.getVar('PKGR')
+ srcepoch = (d.getVar('PKGE') or "")
+ srclicense = d.getVar('LICENSE')
+ srcsection = d.getVar('SECTION')
+ srcmaintainer = d.getVar('MAINTAINER')
+ srchomepage = d.getVar('HOMEPAGE')
+ srcdescription = d.getVar('DESCRIPTION') or "."
srccustomtagschunk = get_package_additional_metadata("rpm", d)
- srcdepends = strip_multilib_deps(d.getVar('DEPENDS', True), d)
+ srcdepends = d.getVar('DEPENDS')
srcrdepends = []
srcrrecommends = []
srcrsuggests = []
@@ -318,8 +320,8 @@ python write_specfile () {
spec_files_top = []
spec_files_bottom = []
- perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
- extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA", True) or "0") == "1"
+ perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
+ extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA") or "0") == "1"
for pkg in packages.split():
localdata = bb.data.createCopy(d)
@@ -328,29 +330,27 @@ python write_specfile () {
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg, True)
+ pkgname = localdata.getVar('PKG_%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
- bb.data.update_data(localdata)
-
conffiles = get_conffiles(pkg, d)
- dirfiles = localdata.getVar('DIRFILES', True)
+ dirfiles = localdata.getVar('DIRFILES')
if dirfiles is not None:
dirfiles = dirfiles.split()
- splitname = strip_multilib(pkgname, d)
+ splitname = pkgname
- splitsummary = (localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or ".")
- splitversion = (localdata.getVar('PKGV', True) or "").replace('-', '+')
- splitrelease = (localdata.getVar('PKGR', True) or "")
- splitepoch = (localdata.getVar('PKGE', True) or "")
- splitlicense = (localdata.getVar('LICENSE', True) or "")
- splitsection = (localdata.getVar('SECTION', True) or "")
- splitdescription = (localdata.getVar('DESCRIPTION', True) or ".")
+ splitsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
+ splitversion = (localdata.getVar('PKGV') or "").replace('-', '+')
+ splitrelease = (localdata.getVar('PKGR') or "")
+ splitepoch = (localdata.getVar('PKGE') or "")
+ splitlicense = (localdata.getVar('LICENSE') or "")
+ splitsection = (localdata.getVar('SECTION') or "")
+ splitdescription = (localdata.getVar('DESCRIPTION') or ".")
splitcustomtagschunk = get_package_additional_metadata("rpm", localdata)
translate_vers('RDEPENDS', localdata)
@@ -363,18 +363,18 @@ python write_specfile () {
# Map the dependencies into their final form
mapping_rename_hook(localdata)
- splitrdepends = strip_multilib_deps(localdata.getVar('RDEPENDS', True), d)
- splitrrecommends = strip_multilib_deps(localdata.getVar('RRECOMMENDS', True), d)
- splitrsuggests = strip_multilib_deps(localdata.getVar('RSUGGESTS', True), d)
- splitrprovides = strip_multilib_deps(localdata.getVar('RPROVIDES', True), d)
- splitrreplaces = strip_multilib_deps(localdata.getVar('RREPLACES', True), d)
- splitrconflicts = strip_multilib_deps(localdata.getVar('RCONFLICTS', True), d)
+ splitrdepends = localdata.getVar('RDEPENDS')
+ splitrrecommends = localdata.getVar('RRECOMMENDS')
+ splitrsuggests = localdata.getVar('RSUGGESTS')
+ splitrprovides = localdata.getVar('RPROVIDES')
+ splitrreplaces = localdata.getVar('RREPLACES')
+ splitrconflicts = localdata.getVar('RCONFLICTS')
splitrobsoletes = []
- splitrpreinst = localdata.getVar('pkg_preinst', True)
- splitrpostinst = localdata.getVar('pkg_postinst', True)
- splitrprerm = localdata.getVar('pkg_prerm', True)
- splitrpostrm = localdata.getVar('pkg_postrm', True)
+ splitrpreinst = localdata.getVar('pkg_preinst')
+ splitrpostinst = localdata.getVar('pkg_postinst')
+ splitrprerm = localdata.getVar('pkg_prerm')
+ splitrpostrm = localdata.getVar('pkg_postrm')
if not perfiledeps:
@@ -382,8 +382,16 @@ python write_specfile () {
splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
+ splitrdepends = filter_nativesdk_deps(srcname, splitrdepends)
+
# Gather special src/first package data
if srcname == splitname:
+ archiving = d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and \
+ bb.data.inherits_class('archiver', d)
+ if archiving and srclicense != splitlicense:
+ bb.warn("The SRPM produced may not have the correct overall source license in the License tag. This is due to the LICENSE for the primary package and SRPM conflicting.")
+
+ srclicense = splitlicense
srcrdepends = splitrdepends
srcrrecommends = splitrrecommends
srcrsuggests = splitrsuggests
@@ -401,7 +409,6 @@ python write_specfile () {
if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
bb.note("Not creating empty RPM package for %s" % splitname)
else:
- bb.note("Creating RPM package for %s" % splitname)
spec_files_top.append('%files')
if extra_pkgdata:
package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
@@ -410,7 +417,7 @@ python write_specfile () {
bb.note("Creating RPM package for %s" % splitname)
spec_files_top.extend(file_list)
else:
- bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ bb.note("Creating empty RPM package for %s" % splitname)
spec_files_top.append('')
continue
@@ -423,8 +430,7 @@ python write_specfile () {
spec_preamble_bottom.append('Release: %s' % splitrelease)
if srcepoch != splitepoch:
spec_preamble_bottom.append('Epoch: %s' % splitepoch)
- if srclicense != splitlicense:
- spec_preamble_bottom.append('License: %s' % splitlicense)
+ spec_preamble_bottom.append('License: %s' % splitlicense)
spec_preamble_bottom.append('Group: %s' % splitsection)
if srccustomtagschunk != splitcustomtagschunk:
@@ -452,25 +458,10 @@ python write_specfile () {
if splitrpostrm:
print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
- # Suggests in RPM are like recommends in OE-core!
- print_deps(splitrrecommends, "Suggests", spec_preamble_bottom, d)
- # While there is no analog for suggests... (So call them recommends for now)
- print_deps(splitrsuggests, "Recommends", spec_preamble_bottom, d)
+ print_deps(splitrrecommends, "Recommends", spec_preamble_bottom, d)
+ print_deps(splitrsuggests, "Suggests", spec_preamble_bottom, d)
print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
-
- # conflicts can not be in a provide! We will need to filter it.
- if splitrconflicts:
- depends_dict = bb.utils.explode_dep_versions2(splitrconflicts)
- newdeps_dict = {}
- for dep in depends_dict:
- if dep not in splitrprovides:
- newdeps_dict[dep] = depends_dict[dep]
- if newdeps_dict:
- splitrconflicts = bb.utils.join_deps(newdeps_dict)
- else:
- splitrconflicts = ""
-
print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
spec_preamble_bottom.append('')
@@ -518,7 +509,7 @@ python write_specfile () {
bb.note("Creating RPM package for %s" % splitname)
spec_files_bottom.extend(file_list)
else:
- bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ bb.note("Creating empty RPM package for %s" % splitname)
spec_files_bottom.append('')
del localdata
@@ -562,25 +553,10 @@ python write_specfile () {
if srcrpostrm:
print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
- # Suggests in RPM are like recommends in OE-core!
- print_deps(srcrrecommends, "Suggests", spec_preamble_top, d)
- # While there is no analog for suggests... (So call them recommends for now)
- print_deps(srcrsuggests, "Recommends", spec_preamble_top, d)
- print_deps(srcrprovides, "Provides", spec_preamble_top, d)
+ print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
+ print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
+ print_deps(srcrprovides + (" /bin/sh" if srcname.startswith("nativesdk-") else ""), "Provides", spec_preamble_top, d)
print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
-
- # conflicts can not be in a provide! We will need to filter it.
- if srcrconflicts:
- depends_dict = bb.utils.explode_dep_versions2(srcrconflicts)
- newdeps_dict = {}
- for dep in depends_dict:
- if dep not in srcrprovides:
- newdeps_dict[dep] = depends_dict[dep]
- if newdeps_dict:
- srcrconflicts = bb.utils.join_deps(newdeps_dict)
- else:
- srcrconflicts = ""
-
print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
spec_preamble_top.append('')
@@ -614,14 +590,11 @@ python write_specfile () {
spec_scriptlets_top.append('')
# Write the SPEC file
- try:
- specfile = open(outspecfile, 'w')
- except OSError:
- raise bb.build.FuncFailed("unable to open spec file for writing.")
+ specfile = open(outspecfile, 'w')
# RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
# of the generated spec file
- external_preamble = d.getVar("RPMSPEC_PREAMBLE", True)
+ external_preamble = d.getVar("RPMSPEC_PREAMBLE")
if external_preamble:
specfile.write(external_preamble + "\n")
@@ -649,23 +622,15 @@ python write_specfile () {
write_specfile[vardepsexclude] = "OVERRIDES"
python do_package_rpm () {
- # We need a simple way to remove the MLPREFIX from the package name,
- # and dependency information...
- def strip_multilib(name, d):
- ml = d.getVar("MLPREFIX", True)
- if ml and name and len(ml) != 0 and name.find(ml) >= 0:
- return "".join(name.split(ml))
- return name
-
- workdir = d.getVar('WORKDIR', True)
- tmpdir = d.getVar('TMPDIR', True)
- pkgd = d.getVar('PKGD', True)
- pkgdest = d.getVar('PKGDEST', True)
+ workdir = d.getVar('WORKDIR')
+ tmpdir = d.getVar('TMPDIR')
+ pkgd = d.getVar('PKGD')
+ pkgdest = d.getVar('PKGDEST')
if not workdir or not pkgd or not tmpdir:
bb.error("Variables incorrectly set, unable to package")
return
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not packages or packages == '':
bb.debug(1, "No packages; nothing to do")
return
@@ -674,42 +639,49 @@ python do_package_rpm () {
# If the spec file already exist, and has not been stored into
# pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
# so remove it before doing rpmbuild src.rpm.
- srcname = strip_multilib(d.getVar('PN', True), d)
+ srcname = d.getVar('PN')
outspecfile = workdir + "/" + srcname + ".spec"
if os.path.isfile(outspecfile):
os.remove(outspecfile)
d.setVar('OUTSPECFILE', outspecfile)
bb.build.exec_func('write_specfile', d)
- perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
+ perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
if perfiledeps:
outdepends, outprovides = write_rpm_perfiledata(srcname, d)
# Setup the rpmbuild arguments...
- rpmbuild = d.getVar('RPMBUILD', True)
- targetsys = d.getVar('TARGET_SYS', True)
- targetvendor = d.getVar('HOST_VENDOR', True)
- package_arch = (d.getVar('PACKAGE_ARCH', True) or "").replace("-", "_")
- sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX', True) or "nativesdk").replace("-", "_")
- if package_arch not in "all any noarch".split() and not package_arch.endswith(sdkpkgsuffix):
- ml_prefix = (d.getVar('MLPREFIX', True) or "").replace("-", "_")
- d.setVar('PACKAGE_ARCH_EXTEND', ml_prefix + package_arch)
- else:
- d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
+ rpmbuild = d.getVar('RPMBUILD')
+ targetsys = d.getVar('TARGET_SYS')
+ targetvendor = d.getVar('HOST_VENDOR')
+
+ # Too many places in dnf stack assume that arch-independent packages are "noarch".
+ # Let's not fight against this.
+ package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_")
+ if package_arch == "all":
+ package_arch = "noarch"
+
+ sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX') or "nativesdk").replace("-", "_")
+ d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
d.setVar('RPM_PKGWRITEDIR', pkgwritedir)
- bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR', True))
- pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-${HOST_OS}')
- magicfile = d.expand('${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc')
+ bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR'))
+ pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-linux')
bb.utils.mkdirhier(pkgwritedir)
os.chmod(pkgwritedir, 0o755)
cmd = rpmbuild
- cmd = cmd + " --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
+ cmd = cmd + " --noclean --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
- cmd = cmd + " --define '_builddir " + d.getVar('S', True) + "'"
+ cmd = cmd + " --define '_builddir " + d.getVar('B') + "'"
cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
+ cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
+ cmd = cmd + " --define '_build_id_links none'"
+ cmd = cmd + " --define '_binary_payload w6T.xzdio'"
+ cmd = cmd + " --define '_source_payload w6T.xzdio'"
+ cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
+ cmd = cmd + " --define '_buildhost reproducible'"
if perfiledeps:
cmd = cmd + " --define '__find_requires " + outdepends + "'"
cmd = cmd + " --define '__find_provides " + outprovides + "'"
@@ -718,11 +690,10 @@ python do_package_rpm () {
cmd = cmd + " --define '__find_provides %{nil}'"
cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
cmd = cmd + " --define 'debug_package %{nil}'"
- cmd = cmd + " --define '_rpmfc_magic_path " + magicfile + "'"
cmd = cmd + " --define '_tmppath " + workdir + "'"
- if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
- cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR', True) + "'"
- cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_OUTDIR', True) + "'"
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
+ cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR') + "'"
+ cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_RPMOUTDIR') + "'"
cmdsrpm = cmdsrpm + " -bs " + outspecfile
# Build the .src.rpm
d.setVar('SBUILDSPEC', cmdsrpm + "\n")
@@ -730,17 +701,20 @@ python do_package_rpm () {
bb.build.exec_func('SBUILDSPEC', d)
cmd = cmd + " -bb " + outspecfile
+ # rpm 4 creates various empty directories in _topdir, let's clean them up
+ cleanupcmd = "rm -rf %s/BUILDROOT %s/SOURCES %s/SPECS %s/SRPMS" % (workdir, workdir, workdir, workdir)
+
# Build the rpm package!
- d.setVar('BUILDSPEC', cmd + "\n")
+ d.setVar('BUILDSPEC', cmd + "\n" + cleanupcmd + "\n")
d.setVarFlag('BUILDSPEC', 'func', '1')
bb.build.exec_func('BUILDSPEC', d)
- if d.getVar('RPM_SIGN_PACKAGES', True) == '1':
+ if d.getVar('RPM_SIGN_PACKAGES') == '1':
bb.build.exec_func("sign_rpm", d)
}
python () {
- if d.getVar('PACKAGES', True) != '':
+ if d.getVar('PACKAGES') != '':
deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
d.appendVarFlag('do_package_write_rpm', 'depends', deps)
d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
@@ -766,9 +740,10 @@ python do_package_write_rpm () {
do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[umask] = "022"
+do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
addtask package_write_rpm after do_packagedata do_package
PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
-PACKAGEINDEXDEPS += "createrepo-native:do_populate_sysroot"
+PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
do_build[recrdeptask] += "do_package_write_rpm"
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
index 854e645286..ce3ab4c8e2 100644
--- a/meta/classes/package_tar.bbclass
+++ b/meta/classes/package_tar.bbclass
@@ -4,27 +4,30 @@ IMAGE_PKGTYPE ?= "tar"
python do_package_tar () {
import subprocess
- workdir = d.getVar('WORKDIR', True)
+
+ oldcwd = os.getcwd()
+
+ workdir = d.getVar('WORKDIR')
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
- outdir = d.getVar('DEPLOY_DIR_TAR', True)
+ outdir = d.getVar('DEPLOY_DIR_TAR')
if not outdir:
bb.error("DEPLOY_DIR_TAR not defined, unable to package")
return
- dvar = d.getVar('D', True)
+ dvar = d.getVar('D')
if not dvar:
bb.error("D not defined, unable to package")
return
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not packages:
bb.debug(1, "PACKAGES not defined, nothing to package")
return
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
bb.utils.mkdirhier(outdir)
bb.utils.mkdirhier(dvar)
@@ -35,7 +38,6 @@ python do_package_tar () {
overrides = localdata.getVar('OVERRIDES', False)
localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
- bb.data.update_data(localdata)
bb.utils.mkdirhier(root)
basedir = os.path.dirname(root)
@@ -43,17 +45,19 @@ python do_package_tar () {
os.chdir(root)
dlist = os.listdir(root)
if not dlist:
- bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
continue
args = "tar -cz --exclude=CONTROL --exclude=DEBIAN -f".split()
ret = subprocess.call(args + [tarfn] + dlist)
if ret != 0:
bb.error("Creation of tar %s failed." % tarfn)
+
+ os.chdir(oldcwd)
}
python () {
- if d.getVar('PACKAGES', True) != '':
- deps = (d.getVarFlag('do_package_write_tar', 'depends', True) or "").split()
+ if d.getVar('PACKAGES') != '':
+ deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
deps.append('tar-native:do_populate_sysroot')
deps.append('virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
index 3397f1e36b..a903e5cfd2 100644
--- a/meta/classes/packagedata.bbclass
+++ b/meta/classes/packagedata.bbclass
@@ -2,10 +2,10 @@ python read_subpackage_metadata () {
import oe.packagedata
vars = {
- "PN" : d.getVar('PN', True),
- "PE" : d.getVar('PE', True),
- "PV" : d.getVar('PV', True),
- "PR" : d.getVar('PR', True),
+ "PN" : d.getVar('PN'),
+ "PE" : d.getVar('PE'),
+ "PV" : d.getVar('PV'),
+ "PR" : d.getVar('PR'),
}
data = oe.packagedata.read_pkgdata(vars["PN"], d)
@@ -13,7 +13,7 @@ python read_subpackage_metadata () {
for key in data.keys():
d.setVar(key, data[key])
- for pkg in d.getVar('PACKAGES', True).split():
+ for pkg in d.getVar('PACKAGES').split():
sdata = oe.packagedata.read_subpkgdata(pkg, d)
for key in sdata.keys():
if key in vars:
diff --git a/meta/classes/packagefeed-stability.bbclass b/meta/classes/packagefeed-stability.bbclass
index aa01def74d..5648602564 100644
--- a/meta/classes/packagefeed-stability.bbclass
+++ b/meta/classes/packagefeed-stability.bbclass
@@ -31,7 +31,7 @@ python() {
# This assumes that the package_write task is called package_write_<pkgtype>
# and that the directory in which packages should be written is
# pointed to by the variable DEPLOY_DIR_<PKGTYPE>
- for pkgclass in (d.getVar('PACKAGE_CLASSES', True) or '').split():
+ for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
if pkgclass.startswith('package_'):
pkgtype = pkgclass.split('_', 1)[1]
pkgwritefunc = 'do_package_write_%s' % pkgtype
@@ -51,7 +51,7 @@ python() {
d.appendVarFlag('do_build', 'recrdeptask', ' ' + pkgcomparefunc)
- if d.getVarFlag(pkgwritefunc, 'noexec', True) or not d.getVarFlag(pkgwritefunc, 'task', True):
+ if d.getVarFlag(pkgwritefunc, 'noexec') or not d.getVarFlag(pkgwritefunc, 'task'):
# Packaging is disabled for this recipe, we shouldn't do anything
continue
@@ -71,7 +71,7 @@ python() {
# This isn't the real task function - it's a template that we use in the
# anonymous python code above
fakeroot python do_package_compare () {
- currenttask = d.getVar('BB_CURRENTTASK', True)
+ currenttask = d.getVar('BB_CURRENTTASK')
pkgtype = currenttask.rsplit('_', 1)[1]
package_compare_impl(pkgtype, d)
}
@@ -83,12 +83,12 @@ def package_compare_impl(pkgtype, d):
import subprocess
import oe.sstatesig
- pn = d.getVar('PN', True)
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper(), True)
+ pn = d.getVar('PN')
+ deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
prepath = deploydir + '-prediff/'
# Find out PKGR values are
- pkgdatadir = d.getVar('PKGDATA_DIR', True)
+ pkgdatadir = d.getVar('PKGDATA_DIR')
packages = []
try:
with open(os.path.join(pkgdatadir, pn), 'r') as f:
@@ -138,7 +138,7 @@ def package_compare_impl(pkgtype, d):
files = []
docopy = False
manifest, _ = oe.sstatesig.sstate_get_manifest_filename(pkgwritetask, d)
- mlprefix = d.getVar('MLPREFIX', True)
+ mlprefix = d.getVar('MLPREFIX')
# Copy recipe's all packages if one of the packages are different to make
# they have the same PR.
with open(manifest, 'r') as f:
@@ -189,7 +189,7 @@ def package_compare_impl(pkgtype, d):
# Remove all the old files and copy again if docopy
if docopy:
- bb.plain('Copying packages for recipe %s' % pn)
+ bb.note('Copying packages for recipe %s' % pn)
pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
try:
with open(pcmanifest, 'r') as f:
@@ -215,7 +215,7 @@ def package_compare_impl(pkgtype, d):
# multilib), they're identical in theory, but sstate.bbclass
# copies it again, so keep align with that.
if os.path.exists(destpath) and pkgtype == 'rpm' \
- and d.getVar('PACKAGE_ARCH', True) == 'all':
+ and d.getVar('PACKAGE_ARCH') == 'all':
os.unlink(destpath)
if (os.stat(srcpath).st_dev == os.stat(destdir).st_dev):
# Use a hard link to save space
@@ -224,15 +224,15 @@ def package_compare_impl(pkgtype, d):
shutil.copyfile(srcpath, destpath)
f.write('%s\n' % destpath)
else:
- bb.plain('Not copying packages for recipe %s' % pn)
+ bb.note('Not copying packages for recipe %s' % pn)
do_cleansstate[postfuncs] += "pfs_cleanpkgs"
python pfs_cleanpkgs () {
import errno
- for pkgclass in (d.getVar('PACKAGE_CLASSES', True) or '').split():
+ for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
if pkgclass.startswith('package_'):
pkgtype = pkgclass.split('_', 1)[1]
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper(), True)
+ deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
prepath = deploydir + '-prediff'
pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
try:
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
index 3928c8a4ac..1541c8fbff 100644
--- a/meta/classes/packagegroup.bbclass
+++ b/meta/classes/packagegroup.bbclass
@@ -8,7 +8,7 @@ PACKAGES = "${PN}"
# By default, packagegroup packages do not depend on a certain architecture.
# Only if dependencies are modified by MACHINE_FEATURES, packages
-# need to be set to MACHINE_ARCH after inheriting packagegroup.bbclass
+# need to be set to MACHINE_ARCH before inheriting packagegroup.bbclass
PACKAGE_ARCH ?= "all"
# Fully expanded - so it applies the overrides as well
@@ -16,15 +16,15 @@ PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
LICENSE ?= "MIT"
-inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED', True) == 'all', 'allarch', '')}
+inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED') == 'all', 'allarch', '')}
# This automatically adds -dbg and -dev flavours of all PACKAGES
# to the list. Their dependencies (RRECOMMENDS) are handled as usual
# by package_depchains in a following step.
# Also mark all packages as ALLOW_EMPTY
python () {
- packages = d.getVar('PACKAGES', True).split()
- if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY', True) != '1':
+ packages = d.getVar('PACKAGES').split()
+ if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY') != '1':
types = ['', '-dbg', '-dev']
if bb.utils.contains('DISTRO_FEATURES', 'ptest', True, False, d):
types.append('-ptest')
@@ -40,17 +40,22 @@ python () {
DEPCHAIN_DBGDEFAULTDEPS = "1"
# We only need the packaging tasks - disable the rest
-do_fetch[noexec] = "1"
-do_unpack[noexec] = "1"
-do_patch[noexec] = "1"
-do_configure[noexec] = "1"
-do_compile[noexec] = "1"
-do_install[noexec] = "1"
-do_populate_sysroot[noexec] = "1"
+deltask do_fetch
+deltask do_unpack
+deltask do_patch
+deltask do_configure
+deltask do_compile
+deltask do_install
+deltask do_populate_sysroot
+
+INHIBIT_DEFAULT_DEPS = "1"
python () {
- initman = d.getVar("VIRTUAL-RUNTIME_init_manager", True)
+ if bb.data.inherits_class('nativesdk', d):
+ return
+ initman = d.getVar("VIRTUAL-RUNTIME_init_manager")
if initman and initman in ['sysvinit', 'systemd'] and not bb.utils.contains('DISTRO_FEATURES', initman, True, False, d):
bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
}
+CVE_PRODUCT = ""
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
index 1f6927be04..cd241f1c84 100644
--- a/meta/classes/patch.bbclass
+++ b/meta/classes/patch.bbclass
@@ -10,110 +10,79 @@ PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
inherit terminal
-def src_patches(d, all = False ):
- workdir = d.getVar('WORKDIR', True)
- fetch = bb.fetch2.Fetch([], d)
- patches = []
- sources = []
- for url in fetch.urls:
- local = patch_path(url, fetch, workdir)
- if not local:
- if all:
- local = fetch.localpath(url)
- sources.append(local)
- continue
-
- urldata = fetch.ud[url]
- parm = urldata.parm
- patchname = parm.get('pname') or os.path.basename(local)
-
- apply, reason = should_apply(parm, d)
- if not apply:
- if reason:
- bb.note("Patch %s %s" % (patchname, reason))
- continue
-
- patchparm = {'patchname': patchname}
- if "striplevel" in parm:
- striplevel = parm["striplevel"]
- elif "pnum" in parm:
- #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
- striplevel = parm["pnum"]
- else:
- striplevel = '1'
- patchparm['striplevel'] = striplevel
-
- patchdir = parm.get('patchdir')
- if patchdir:
- patchparm['patchdir'] = patchdir
-
- localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
- patches.append(localurl)
-
- if all:
- return sources
-
- return patches
-
-def patch_path(url, fetch, workdir):
- """Return the local path of a patch, or None if this isn't a patch"""
+python () {
+ if d.getVar('PATCHTOOL') == 'git' and d.getVar('PATCH_COMMIT_FUNCTIONS') == '1':
+ extratasks = bb.build.tasksbetween('do_unpack', 'do_patch', d)
+ try:
+ extratasks.remove('do_unpack')
+ except ValueError:
+ # For some recipes do_unpack doesn't exist, ignore it
+ pass
+
+ d.appendVarFlag('do_patch', 'prefuncs', ' patch_task_patch_prefunc')
+ for task in extratasks:
+ d.appendVarFlag(task, 'postfuncs', ' patch_task_postfunc')
+}
- local = fetch.localpath(url)
- base, ext = os.path.splitext(os.path.basename(local))
- if ext in ('.gz', '.bz2', '.Z'):
- local = os.path.join(workdir, base)
- ext = os.path.splitext(base)[1]
+python patch_task_patch_prefunc() {
+ # Prefunc for do_patch
+ srcsubdir = d.getVar('S')
+
+ workdir = os.path.abspath(d.getVar('WORKDIR'))
+ testsrcdir = os.path.abspath(srcsubdir)
+ if (testsrcdir + os.sep).startswith(workdir + os.sep):
+ # Double-check that either workdir or S or some directory in-between is a git repository
+ found = False
+ while testsrcdir != workdir:
+ if os.path.exists(os.path.join(testsrcdir, '.git')):
+ found = True
+ break
+ if testsrcdir == workdir:
+ break
+ testsrcdir = os.path.dirname(testsrcdir)
+ if not found:
+ bb.fatal('PATCHTOOL = "git" set for source tree that is not a git repository. Refusing to continue as that may result in commits being made in your metadata repository.')
+
+ patchdir = os.path.join(srcsubdir, 'patches')
+ if os.path.exists(patchdir):
+ if os.listdir(patchdir):
+ d.setVar('PATCH_HAS_PATCHES_DIR', '1')
+ else:
+ os.rmdir(patchdir)
+}
- urldata = fetch.ud[url]
- if "apply" in urldata.parm:
- apply = oe.types.boolean(urldata.parm["apply"])
- if not apply:
- return
- elif ext not in (".diff", ".patch"):
- return
+python patch_task_postfunc() {
+ # Prefunc for task functions between do_unpack and do_patch
+ import oe.patch
+ import shutil
+ func = d.getVar('BB_RUNTASK')
+ srcsubdir = d.getVar('S')
+
+ if os.path.exists(srcsubdir):
+ if func == 'do_patch':
+ haspatches = (d.getVar('PATCH_HAS_PATCHES_DIR') == '1')
+ patchdir = os.path.join(srcsubdir, 'patches')
+ if os.path.exists(patchdir):
+ shutil.rmtree(patchdir)
+ if haspatches:
+ stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
+ if stdout:
+ bb.process.run('git checkout patches', cwd=srcsubdir)
+ stdout, _ = bb.process.run('git status --porcelain .', cwd=srcsubdir)
+ if stdout:
+ useroptions = []
+ oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
+ bb.process.run('git add .; git %s commit -a -m "Committing changes from %s\n\n%s"' % (' '.join(useroptions), func, oe.patch.GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
+}
- return local
+def src_patches(d, all=False, expand=True):
+ import oe.patch
+ return oe.patch.src_patches(d, all, expand)
def should_apply(parm, d):
"""Determine if we should apply the given patch"""
-
- if "mindate" in parm or "maxdate" in parm:
- pn = d.getVar('PN', True)
- srcdate = d.getVar('SRCDATE_%s' % pn, True)
- if not srcdate:
- srcdate = d.getVar('SRCDATE', True)
-
- if srcdate == "now":
- srcdate = d.getVar('DATE', True)
-
- if "maxdate" in parm and parm["maxdate"] < srcdate:
- return False, 'is outdated'
-
- if "mindate" in parm and parm["mindate"] > srcdate:
- return False, 'is predated'
-
-
- if "minrev" in parm:
- srcrev = d.getVar('SRCREV', True)
- if srcrev and srcrev < parm["minrev"]:
- return False, 'applies to later revisions'
-
- if "maxrev" in parm:
- srcrev = d.getVar('SRCREV', True)
- if srcrev and srcrev > parm["maxrev"]:
- return False, 'applies to earlier revisions'
-
- if "rev" in parm:
- srcrev = d.getVar('SRCREV', True)
- if srcrev and parm["rev"] not in srcrev:
- return False, "doesn't apply to revision"
-
- if "notrev" in parm:
- srcrev = d.getVar('SRCREV', True)
- if srcrev and parm["notrev"] in srcrev:
- return False, "doesn't apply to revision"
-
- return True, None
+ import oe.patch
+ return oe.patch.should_apply(parm, d)
should_apply[vardepsexclude] = "DATE SRCDATE"
@@ -126,20 +95,20 @@ python patch_do_patch() {
"git": oe.patch.GitApplyTree,
}
- cls = patchsetmap[d.getVar('PATCHTOOL', True) or 'quilt']
+ cls = patchsetmap[d.getVar('PATCHTOOL') or 'quilt']
resolvermap = {
"noop": oe.patch.NOOPResolver,
"user": oe.patch.UserResolver,
}
- rcls = resolvermap[d.getVar('PATCHRESOLVE', True) or 'user']
+ rcls = resolvermap[d.getVar('PATCHRESOLVE') or 'user']
classes = {}
- s = d.getVar('S', True)
+ s = d.getVar('S')
- os.putenv('PATH', d.getVar('PATH', True))
+ os.putenv('PATH', d.getVar('PATH'))
# We must use one TMPDIR per process so that the "patch" processes
# don't generate the same temp file name.
@@ -184,6 +153,7 @@ python patch_do_patch() {
patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
addtask patch after do_unpack
+do_patch[umask] = "022"
do_patch[dirs] = "${WORKDIR}"
do_patch[depends] = "${PATCHDEPENDENCY}"
diff --git a/meta/classes/perl-version.bbclass b/meta/classes/perl-version.bbclass
new file mode 100644
index 0000000000..84b67b8180
--- /dev/null
+++ b/meta/classes/perl-version.bbclass
@@ -0,0 +1,66 @@
+PERL_OWN_DIR = ""
+
+# Determine the staged version of perl from the perl configuration file
+# Assign vardepvalue, because otherwise signature is changed before and after
+# perl is built (from None to real version in config.sh).
+get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
+def get_perl_version(d):
+ import re
+ cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
+ try:
+ f = open(cfg, 'r')
+ except IOError:
+ return None
+ l = f.readlines();
+ f.close();
+ r = re.compile(r"^version='(\d*\.\d*\.\d*)'")
+ for s in l:
+ m = r.match(s)
+ if m:
+ return m.group(1)
+ return None
+
+PERLVERSION := "${@get_perl_version(d)}"
+PERLVERSION[vardepvalue] = ""
+
+
+# Determine the staged arch of perl from the perl configuration file
+# Assign vardepvalue, because otherwise signature is changed before and after
+# perl is built (from None to real version in config.sh).
+def get_perl_arch(d):
+ import re
+ cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
+ try:
+ f = open(cfg, 'r')
+ except IOError:
+ return None
+ l = f.readlines();
+ f.close();
+ r = re.compile("^archname='([^']*)'")
+ for s in l:
+ m = r.match(s)
+ if m:
+ return m.group(1)
+ return None
+
+PERLARCH := "${@get_perl_arch(d)}"
+PERLARCH[vardepvalue] = ""
+
+# Determine the staged arch of perl-native from the perl configuration file
+# Assign vardepvalue, because otherwise signature is changed before and after
+# perl is built (from None to real version in config.sh).
+def get_perl_hostarch(d):
+ import re
+ cfg = d.expand('${STAGING_LIBDIR_NATIVE}/perl5/config.sh')
+ try:
+ f = open(cfg, 'r')
+ except IOError:
+ return None
+ l = f.readlines();
+ f.close();
+ r = re.compile("^archname='([^']*)'")
+ for s in l:
+ m = r.match(s)
+ if m:
+ return m.group(1)
+ return None
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
index dbe11e12da..b07f51ed56 100644
--- a/meta/classes/pixbufcache.bbclass
+++ b/meta/classes/pixbufcache.bbclass
@@ -3,14 +3,16 @@
# packages.
#
-DEPENDS += "qemu-native"
+DEPENDS_append_class-target = " qemu-native"
inherit qemu
PIXBUF_PACKAGES ??= "${PN}"
+PACKAGE_WRITE_DEPS += "qemu-native gdk-pixbuf-native"
+
pixbufcache_common() {
if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
+ $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} libdir=${libdir} \
bindir=${bindir} base_libdir=${base_libdir}
else
@@ -28,40 +30,34 @@ fi
}
python populate_packages_append() {
- pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES', True).split()
+ pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES').split()
for pkg in pixbuf_pkgs:
bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('pixbufcache_common', True)
+ postinst += d.getVar('pixbufcache_common')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('pixbufcache_common', True)
+ postrm += d.getVar('pixbufcache_common')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
gdkpixbuf_complete() {
- GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
+GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
}
-#
-# Add an sstate postinst hook to update the cache for native packages.
-# An error exit during populate_sysroot_setscene allows bitbake to
-# try to recover by re-building the package.
-#
-SSTATEPOSTINSTFUNCS_append_class-native = " pixbufcache_sstate_postinst"
+DEPENDS_append_class-native = " gdk-pixbuf-native"
+SYSROOT_PREPROCESS_FUNCS_append_class-native = " pixbufcache_sstate_postinst"
-# See base.bbclass for the other half of this
pixbufcache_sstate_postinst() {
- if [ "${BB_CURRENTTASK}" = "populate_sysroot" ]; then
- ${gdkpixbuf_complete}
- elif [ "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]; then
- echo "${gdkpixbuf_complete}" >> ${STAGING_DIR}/sstatecompletions
- fi
+ mkdir -p ${SYSROOT_DESTDIR}${bindir}
+ dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}
+ echo '#!/bin/sh' > $dest
+ echo "${gdkpixbuf_complete}" >> $dest
+ chmod 0755 $dest
}
-
diff --git a/meta/classes/podfix.bbclass b/meta/classes/podfix.bbclass
new file mode 100644
index 0000000000..cc8210a27e
--- /dev/null
+++ b/meta/classes/podfix.bbclass
@@ -0,0 +1,35 @@
+python pod_strip_version() {
+ import re
+
+ def opener(filename, mode):
+ if filename.endswith(".gz"):
+ import gzip
+ return gzip.open(filename, mode)
+ elif filename.endswith(".bz2"):
+ import bz2
+ return bz2.open(filename, mode)
+ else:
+ return open(filename, mode)
+
+ bad_re = re.compile(rb"Automatically generated by Pod::Man( [0-9]+.+)")
+
+ for root, dirs, files in os.walk(d.expand("${D}${mandir}")):
+ for filename in files:
+ filename = os.path.join(root, filename)
+ if not os.path.isfile(filename):
+ continue
+
+ with opener(filename, "rb") as manfile:
+ manpage = manfile.read()
+ m = bad_re.search(manpage)
+ if not m:
+ continue
+
+ bb.note("podfix: stripping version from %s" % filename)
+ os.unlink(filename)
+ with opener(filename, "wb") as manfile:
+ manfile.write(manpage[:m.start(1)])
+ manfile.write(manpage[m.end(1):])
+}
+
+do_install[postfuncs] += "pod_strip_version"
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index a23775e6c5..d03465b6fc 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -1,4 +1,4 @@
-inherit meta
+inherit meta image-postinst-intercepts
# Wildcards specifying complementary packages to install for every package that has been explicitly
# installed into the rootfs
@@ -6,20 +6,28 @@ COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev'
COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
+COMPLEMENTARY_GLOB[src-pkgs] = '*-src'
COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
+COMPLEMENTARY_GLOB[bash-completion-pkgs] = '*-bash-completion'
def complementary_globs(featurevar, d):
all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
globs = []
- features = set((d.getVar(featurevar, True) or '').split())
+ features = set((d.getVar(featurevar) or '').split())
for name, glob in all_globs.items():
if name in features:
globs.append(glob)
return ' '.join(globs)
-SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs"
+SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs src-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}"
SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
+PACKAGE_ARCHS_append_task-populate-sdk = " sdk-provides-dummy-target"
+SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
+
+# List of locales to install, or "all" for all of them, or unset for none.
+SDKIMAGE_LINGUAS ?= "all"
+
inherit rootfs_${IMAGE_PKGTYPE}
SDK_DIR = "${WORKDIR}/sdk"
@@ -34,15 +42,29 @@ SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
-TOOLCHAIN_TARGET_TASK ?= " \
- ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \
- ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target-dbg')} \
- "
+TOOLCHAIN_TARGET_TASK ?= "${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} target-sdk-provides-dummy"
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
+# Default archived SDK's suffix
+SDK_ARCHIVE_TYPE ?= "tar.xz"
+
+# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
+python () {
+ if d.getVar('SDK_ARCHIVE_TYPE') == 'zip':
+ d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
+ # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
+ # recommand to cd into input dir first to avoid archive with buildpath
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
+ else:
+ d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz -T 0 -9 > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
+}
+
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
-SDK_DEPENDS = "virtual/fakeroot-native pixz-native"
+SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
+PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
+SDK_DEPENDS += "nativesdk-glibc-locale"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
# could be set to the MACHINE_ARCH
@@ -57,48 +79,74 @@ SDK_PRE_INSTALL_COMMAND ?= ""
SDK_POST_INSTALL_COMMAND ?= ""
SDK_RELOCATE_AFTER_INSTALL ?= "1"
-SDKEXTPATH ?= "~/${@d.getVar('DISTRO', True)}_sdk"
-SDK_TITLE ?= "${@d.getVar('DISTRO_NAME', True) or d.getVar('DISTRO', True)} SDK"
+SDKEXTPATH ??= "~/${@d.getVar('DISTRO')}_sdk"
+SDK_TITLE ??= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK"
SDK_TARGET_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
+SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
+SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
+
python write_target_sdk_manifest () {
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
- sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST", True))
+ sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST"))
pkgs = sdk_list_installed_packages(d, True)
if not os.path.exists(sdkmanifestdir):
bb.utils.mkdirhier(sdkmanifestdir)
- with open(d.getVar('SDK_TARGET_MANIFEST', True), 'w') as output:
+ with open(d.getVar('SDK_TARGET_MANIFEST'), 'w') as output:
output.write(format_pkg_list(pkgs, 'ver'))
}
+python write_sdk_test_data() {
+ from oe.data import export2json
+ testdata = "%s/%s.testdata.json" % (d.getVar('SDKDEPLOYDIR'), d.getVar('TOOLCHAIN_OUTPUTNAME'))
+ bb.utils.mkdirhier(os.path.dirname(testdata))
+ export2json(d, testdata)
+}
+
python write_host_sdk_manifest () {
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
- sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST", True))
+ sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST"))
pkgs = sdk_list_installed_packages(d, False)
if not os.path.exists(sdkmanifestdir):
bb.utils.mkdirhier(sdkmanifestdir)
- with open(d.getVar('SDK_HOST_MANIFEST', True), 'w') as output:
+ with open(d.getVar('SDK_HOST_MANIFEST'), 'w') as output:
output.write(format_pkg_list(pkgs, 'ver'))
}
-POPULATE_SDK_POST_TARGET_COMMAND_append = " write_target_sdk_manifest ; "
-POPULATE_SDK_POST_HOST_COMMAND_append = " write_host_sdk_manifest; "
+POPULATE_SDK_POST_TARGET_COMMAND_append = " write_sdk_test_data ; "
+POPULATE_SDK_POST_TARGET_COMMAND_append_task-populate-sdk = " write_target_sdk_manifest ; "
+POPULATE_SDK_POST_HOST_COMMAND_append_task-populate-sdk = " write_host_sdk_manifest; "
SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
-SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; tar_sdk; ${SDK_PACKAGING_COMMAND} "
-
-# Some archs override this, we need the nativesdk version
-# turns out this is hard to get from the datastore due to TRANSLATED_TARGET_ARCH
-# manipulation.
-SDK_OLDEST_KERNEL = "3.2.0"
+SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; archive_sdk; ${SDK_PACKAGING_COMMAND} "
def populate_sdk_common(d):
from oe.sdk import populate_sdk
from oe.manifest import create_manifest, Manifest
- pn = d.getVar('PN', True)
+ # Handle package exclusions
+ excl_pkgs = (d.getVar("PACKAGE_EXCLUDE") or "").split()
+ inst_pkgs = (d.getVar("PACKAGE_INSTALL") or "").split()
+ inst_attempt_pkgs = (d.getVar("PACKAGE_INSTALL_ATTEMPTONLY") or "").split()
+
+ d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
+ d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
+
+ for pkg in excl_pkgs:
+ if pkg in inst_pkgs:
+ bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
+ inst_pkgs.remove(pkg)
+
+ if pkg in inst_attempt_pkgs:
+ bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
+ inst_attempt_pkgs.remove(pkg)
+
+ d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
+ d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
+
+ pn = d.getVar('PN')
runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
@@ -106,13 +154,13 @@ def populate_sdk_common(d):
ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata")
runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
- d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK", True))
- d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", True))
+ d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK"))
+ d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY"))
# create target/host SDK manifests
- create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
+ create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST)
- create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
+ create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
populate_sdk(d)
@@ -125,7 +173,7 @@ SSTATE_SKIP_CREATION_task-populate-sdk = '1'
do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
-do_populate_sdk[stamp-extra-info] = "${MACHINE}${SDKMACHINE}"
+do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
fakeroot create_sdk_files() {
cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
@@ -139,7 +187,7 @@ fakeroot create_sdk_files() {
python check_sdk_sysroots() {
# Fails build if there are broken or dangling symlinks in SDK sysroots
- if d.getVar('CHECK_SDK_SYSROOTS', True) != '1':
+ if d.getVar('CHECK_SDK_SYSROOTS') != '1':
# disabled, bail out
return
@@ -147,8 +195,8 @@ python check_sdk_sysroots() {
return os.path.abspath(path)
# Get scan root
- SCAN_ROOT = norm_path("%s/%s/sysroots/" % (d.getVar('SDK_OUTPUT', True),
- d.getVar('SDKPATH', True)))
+ SCAN_ROOT = norm_path("%s/%s/sysroots/" % (d.getVar('SDK_OUTPUT'),
+ d.getVar('SDKPATH')))
bb.note('Checking SDK sysroots at ' + SCAN_ROOT)
@@ -190,21 +238,23 @@ python check_sdk_sysroots() {
SDKTAROPTS = "--owner=root --group=root"
-fakeroot tar_sdk() {
+fakeroot archive_sdk() {
# Package it up
mkdir -p ${SDKDEPLOYDIR}
- cd ${SDK_OUTPUT}/${SDKPATH}
- tar ${SDKTAROPTS} -cf - . | pixz > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+ ${SDK_ARCHIVE_CMD}
}
+TOOLCHAIN_SHAR_EXT_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-extract.sh"
+TOOLCHAIN_SHAR_REL_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-relocate.sh"
+
fakeroot create_shar() {
# copy in the template shar extractor script
- cp ${COREBASE}/meta/files/toolchain-shar-extract.sh ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
+ cp ${TOOLCHAIN_SHAR_EXT_TMPL} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
rm -f ${T}/pre_install_command ${T}/post_install_command
if [ ${SDK_RELOCATE_AFTER_INSTALL} -eq 1 ] ; then
- cp ${COREBASE}/meta/files/toolchain-shar-relocate.sh ${T}/post_install_command
+ cp ${TOOLCHAIN_SHAR_REL_TMPL} ${T}/post_install_command
fi
cat << "EOF" >> ${T}/pre_install_command
${SDK_PRE_INSTALL_COMMAND}
@@ -223,20 +273,22 @@ EOF
-e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
-e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
-e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
- -e 's#@SDK_TITLE@#${SDK_TITLE}#g' \
+ -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE").replace('&', '\\&')}#g' \
-e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
-e '/@SDK_PRE_INSTALL_COMMAND@/d' \
-e '/@SDK_POST_INSTALL_COMMAND@/d' \
+ -e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d, taskcontextonly=True)}#g' \
+ -e 's#@SDK_ARCHIVE_TYPE@#${SDK_ARCHIVE_TYPE}#g' \
${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# add execution permission
chmod +x ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# append the SDK tarball
- cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
+ cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# delete the old tarball, we don't need it anymore
- rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+ rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}
}
populate_sdk_log_check() {
@@ -246,8 +298,7 @@ populate_sdk_log_check() {
echo "log_check: Using $lf_path as logfile"
- if test -e "$lf_path"
- then
+ if [ -e "$lf_path" ]; then
${IMAGE_PKGTYPE}_log_check $target $lf_path
else
echo "Cannot find logfile [$lf_path]"
@@ -257,23 +308,23 @@ populate_sdk_log_check() {
}
def sdk_command_variables(d):
- return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS',
- 'RPM_POSTPROCESS_COMMANDS']
+ return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_PRE_TARGET_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS']
def sdk_variables(d):
variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT',
'SDKTARGETSYSROOT','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
- 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI']
+ 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'IMAGE_INSTALL_DEBUGFS']
variables.extend(sdk_command_variables(d))
return " ".join(variables)
do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
-do_populate_sdk[file-checksums] += "${COREBASE}/meta/files/toolchain-shar-relocate.sh:True \
- ${COREBASE}/meta/files/toolchain-shar-extract.sh:True"
+do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
+ ${TOOLCHAIN_SHAR_EXT_TMPL}:True"
do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
-do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
-do_populate_sdk[rdepends] = "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_RDEPENDS', True).split()])}"
+do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS').split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
+do_populate_sdk[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('SDK_RDEPENDS').split()])}"
do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
+do_populate_sdk[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
addtask populate_sdk
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index 5402988ba9..05cfc1cc15 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -11,8 +11,6 @@ TOOLCHAIN_HOST_TASK_task-populate-sdk-ext = " \
TOOLCHAIN_TARGET_TASK_task-populate-sdk-ext = ""
-SDK_RDEPENDS_append_task-populate-sdk-ext = " ${SDK_TARGETS}"
-
SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
SDK_EXT = ""
@@ -21,7 +19,9 @@ SDK_EXT_task-populate-sdk-ext = "-ext"
# Options are full or minimal
SDK_EXT_TYPE ?= "full"
SDK_INCLUDE_PKGDATA ?= "0"
-SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE', True) == 'full' else '0'}"
+SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE') == 'full' else '0'}"
+SDK_INCLUDE_NATIVESDK ?= "0"
+SDK_INCLUDE_BUILDTOOLS ?= '1'
SDK_RECRDEP_TASKS ?= ""
@@ -35,6 +35,7 @@ SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
DL_DIR \
SSTATE_DIR \
TMPDIR \
+ BB_SERVER_TIMEOUT \
"
SDK_INHERIT_BLACKLIST ?= "buildhistory icecc"
SDK_UPDATE_URL ?= ""
@@ -43,19 +44,21 @@ SDK_TARGETS ?= "${PN}"
def get_sdk_install_targets(d, images_only=False):
sdk_install_targets = ''
- if images_only or d.getVar('SDK_EXT_TYPE', True) != 'minimal':
- sdk_install_targets = d.getVar('SDK_TARGETS', True)
+ if images_only or d.getVar('SDK_EXT_TYPE') != 'minimal':
+ sdk_install_targets = d.getVar('SDK_TARGETS')
depd = d.getVar('BB_TASKDEPDATA', False)
+ tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
+ tasklist.remove('do_build')
for v in depd.values():
- if v[1] == 'do_image_complete':
+ if v[1] in tasklist:
if v[0] not in sdk_install_targets:
sdk_install_targets += ' {}'.format(v[0])
if not images_only:
- if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
+ if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata'
- if d.getVar('SDK_INCLUDE_TOOLCHAIN', True) == '1':
+ if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1':
sdk_install_targets += ' meta-extsdk-toolchain:do_populate_sysroot'
return sdk_install_targets
@@ -69,7 +72,6 @@ OE_INIT_ENV_SCRIPT ?= "oe-init-build-env"
# COREBASE be preserved as well as untracked files.
COREBASE_FILES ?= " \
oe-init-build-env \
- oe-init-build-env-memres \
scripts \
LICENSE \
.templateconf \
@@ -77,18 +79,52 @@ COREBASE_FILES ?= " \
SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
B_task-populate-sdk-ext = "${SDK_DIR}"
-TOOLCHAINEXT_OUTPUTNAME = "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
+TOOLCHAINEXT_OUTPUTNAME ?= "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
-SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME', True) or d.getVar('DISTRO', True)} Extensible SDK"
+python write_target_sdk_ext_manifest () {
+ from oe.sdk import get_extra_sdkinfo
+ sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
+ extra_info = get_extra_sdkinfo(sstate_dir)
+
+ target = d.getVar('TARGET_SYS')
+ target_multimach = d.getVar('MULTIMACH_TARGET_SYS')
+ real_target_multimach = d.getVar('REAL_MULTIMACH_TARGET_SYS')
+
+ pkgs = {}
+ os.makedirs(os.path.dirname(d.getVar('SDK_EXT_TARGET_MANIFEST')), exist_ok=True)
+ with open(d.getVar('SDK_EXT_TARGET_MANIFEST'), 'w') as f:
+ for fn in extra_info['filesizes']:
+ info = fn.split(':')
+ if info[2] in (target, target_multimach, real_target_multimach) \
+ or info[5] == 'allarch':
+ if not info[1] in pkgs:
+ f.write("%s %s %s\n" % (info[1], info[2], info[3]))
+ pkgs[info[1]] = {}
+}
+python write_host_sdk_ext_manifest () {
+ from oe.sdk import get_extra_sdkinfo
+ sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
+ extra_info = get_extra_sdkinfo(sstate_dir)
+ host = d.getVar('BUILD_SYS')
+ with open(d.getVar('SDK_EXT_HOST_MANIFEST'), 'w') as f:
+ for fn in extra_info['filesizes']:
+ info = fn.split(':')
+ if info[2] == host:
+ f.write("%s %s %s\n" % (info[1], info[2], info[3]))
+}
+
+SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+
+SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
def clean_esdk_builddir(d, sdkbasepath):
"""Clean up traces of the fake build for create_filtered_tasklist()"""
import shutil
- cleanpaths = 'cache conf/sanity_info conf/templateconf.cfg tmp'.split()
+ cleanpaths = 'cache conf/sanity_info tmp'.split()
for pth in cleanpaths:
fullpth = os.path.join(sdkbasepath, pth)
if os.path.isdir(fullpth):
@@ -110,10 +146,11 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
try:
with open(sdkbasepath + '/conf/local.conf', 'a') as f:
# Force the use of sstate from the build system
- f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR', True))
- f.write('SSTATE_MIRRORS_forcevariable = ""\n')
+ f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
+ f.write('SSTATE_MIRRORS_forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
# Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
f.write('TMPDIR_forcevariable = "${TOPDIR}/tmp"\n')
+ f.write('TCLIBCAPPEND_forcevariable = ""\n')
# Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
# be different and we won't be able to find our native sstate)
if not bb.data.inherits_class('uninative', d):
@@ -121,25 +158,23 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
# Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake
# will not allow in its COREBASE path, so we need to rename the directory temporarily
- temp_sdkbasepath = d.getVar('SDK_OUTPUT', True) + '/tmp-renamed-sdk'
+ temp_sdkbasepath = d.getVar('SDK_OUTPUT') + '/tmp-renamed-sdk'
# Delete any existing temp dir
try:
shutil.rmtree(temp_sdkbasepath)
except FileNotFoundError:
pass
os.rename(sdkbasepath, temp_sdkbasepath)
+ cmdprefix = '. %s .; ' % conf_initpath
+ logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt'
try:
- cmdprefix = '. %s .; ' % conf_initpath
- logfile = d.getVar('WORKDIR', True) + '/tasklist_bb_log.txt'
- try:
- oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile)
- except bb.process.ExecutionError as e:
- msg = 'Failed to generate filtered task list for extensible SDK:\n%s' % e.stdout.rstrip()
- if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
- msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
- bb.fatal(msg)
- finally:
- os.rename(temp_sdkbasepath, sdkbasepath)
+ oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile)
+ except bb.process.ExecutionError as e:
+ msg = 'Failed to generate filtered task list for extensible SDK:\n%s' % e.stdout.rstrip()
+ if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
+ msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
+ bb.fatal(msg)
+ os.rename(temp_sdkbasepath, sdkbasepath)
# Clean out residue of running bitbake, which check_sstate_task_list()
# will effectively do
clean_esdk_builddir(d, sdkbasepath)
@@ -152,7 +187,7 @@ python copy_buildsystem () {
import glob
import oe.copy_buildsystem
- oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT', True)
+ oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT')
conf_bbpath = ''
conf_initpath = ''
@@ -160,23 +195,17 @@ python copy_buildsystem () {
# Copy in all metadata layers + bitbake (as repositories)
buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
- baseoutpath = d.getVar('SDK_OUTPUT', True) + '/' + d.getVar('SDKPATH', True)
+ baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH')
# Determine if we're building a derivative extensible SDK (from devtool build-sdk)
- derivative = (d.getVar('SDK_DERIVATIVE', True) or '') == '1'
+ derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1'
if derivative:
workspace_name = 'orig-workspace'
else:
workspace_name = None
- layers_copied = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
- sdkbblayers = []
- corebase = os.path.basename(d.getVar('COREBASE', True))
- for layer in layers_copied:
- if corebase == os.path.basename(layer):
- conf_bbpath = os.path.join('layers', layer, 'bitbake')
- else:
- sdkbblayers.append(layer)
+ corebase, sdkbblayers = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
+ conf_bbpath = os.path.join('layers', corebase, 'bitbake')
for path in os.listdir(baseoutpath + '/layers'):
relpath = os.path.join('layers', path, oe_init_env_script)
@@ -202,8 +231,8 @@ python copy_buildsystem () {
config.set('General', 'init_path', conf_initpath)
config.set('General', 'core_meta_subdir', core_meta_subdir)
config.add_section('SDK')
- config.set('SDK', 'sdk_targets', d.getVar('SDK_TARGETS', True))
- updateurl = d.getVar('SDK_UPDATE_URL', True)
+ config.set('SDK', 'sdk_targets', d.getVar('SDK_TARGETS'))
+ updateurl = d.getVar('SDK_UPDATE_URL')
if updateurl:
config.set('SDK', 'updateserver', updateurl)
bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf'))
@@ -215,7 +244,7 @@ python copy_buildsystem () {
pass
# Create a layer for new recipes / appends
- bbpath = d.getVar('BBPATH', True)
+ bbpath = d.getVar('BBPATH')
bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')])
# Create bblayers.conf
@@ -242,22 +271,27 @@ python copy_buildsystem () {
# Copy uninative tarball
# For now this is where uninative.bbclass expects the tarball
- uninative_file = d.expand('${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2')
- uninative_checksum = bb.utils.sha256_file(uninative_file)
- uninative_outdir = '%s/downloads/uninative/%s' % (baseoutpath, uninative_checksum)
- bb.utils.mkdirhier(uninative_outdir)
- shutil.copy(uninative_file, uninative_outdir)
-
- env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE', True) or '').split()
+ if bb.data.inherits_class('uninative', d):
+ uninative_file = d.expand('${UNINATIVE_DLDIR}/' + d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH")) + '/${UNINATIVE_TARBALL}')
+ uninative_checksum = bb.utils.sha256_file(uninative_file)
+ uninative_outdir = '%s/downloads/uninative/%s' % (baseoutpath, uninative_checksum)
+ bb.utils.mkdirhier(uninative_outdir)
+ shutil.copy(uninative_file, uninative_outdir)
+
+ env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE') or '').split()
env_whitelist_values = {}
# Create local.conf
- builddir = d.getVar('TOPDIR', True)
+ builddir = d.getVar('TOPDIR')
+ if derivative and os.path.exists(builddir + '/conf/site.conf'):
+ shutil.copyfile(builddir + '/conf/site.conf', baseoutpath + '/conf/site.conf')
+ if derivative and os.path.exists(builddir + '/conf/auto.conf'):
+ shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf')
if derivative:
shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
else:
- local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST', True) or '').split()
- local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST', True) or '').split()
+ local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST') or '').split()
+ local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST') or '').split()
def handle_var(varname, origvalue, op, newlines):
if varname in local_conf_blacklist or (origvalue.strip().startswith('/') and not varname in local_conf_whitelist):
newlines.append('# Removed original setting of %s\n' % varname)
@@ -267,8 +301,15 @@ python copy_buildsystem () {
env_whitelist_values[varname] = origvalue
return origvalue, op, 0, True
varlist = ['[^#=+ ]*']
+ oldlines = []
+ if os.path.exists(builddir + '/conf/site.conf'):
+ with open(builddir + '/conf/site.conf', 'r') as f:
+ oldlines += f.readlines()
+ if os.path.exists(builddir + '/conf/auto.conf'):
+ with open(builddir + '/conf/auto.conf', 'r') as f:
+ oldlines += f.readlines()
with open(builddir + '/conf/local.conf', 'r') as f:
- oldlines = f.readlines()
+ oldlines += f.readlines()
(updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
with open(baseoutpath + '/conf/local.conf', 'w') as f:
@@ -282,8 +323,13 @@ python copy_buildsystem () {
# Write a newline just in case there's none at the end of the original
f.write('\n')
- f.write('INHERIT += "%s"\n' % 'uninative')
- f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH', True), uninative_checksum))
+ f.write('TMPDIR = "${TOPDIR}/tmp"\n')
+ f.write('TCLIBCAPPEND = ""\n')
+ f.write('DL_DIR = "${TOPDIR}/downloads"\n')
+
+ if bb.data.inherits_class('uninative', d):
+ f.write('INHERIT += "%s"\n' % 'uninative')
+ f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH'), uninative_checksum))
f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
# Some classes are not suitable for SDK, remove them from INHERIT
@@ -302,11 +348,20 @@ python copy_buildsystem () {
# the sig computed from the metadata.
f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
+ # We want to be able to set this without a full reparse
+ f.write('BB_HASHCONFIG_WHITELIST_append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
+
# Set up whitelist for run on install
- f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work"\n\n')
+ f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
# Hide the config information from bitbake output (since it's fixed within the SDK)
- f.write('BUILDCFG_HEADER = ""\n')
+ f.write('BUILDCFG_HEADER = ""\n\n')
+
+ f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
+ f.write('WITHIN_EXT_SDK = "1"\n\n')
+
+ # Map gcc-dependent uninative sstate cache for installer usage
+ f.write('SSTATE_MIRRORS += " file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
# Allow additional config through sdk-extra.conf
fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
@@ -317,7 +372,7 @@ python copy_buildsystem () {
# If you define a sdk_extraconf() function then it can contain additional config
# (Though this is awkward; sdk-extra.conf should probably be used instead)
- extraconf = (d.getVar('sdk_extraconf', True) or '').strip()
+ extraconf = (d.getVar('sdk_extraconf') or '').strip()
if extraconf:
# Strip off any leading / trailing spaces
for line in extraconf.splitlines():
@@ -326,27 +381,20 @@ python copy_buildsystem () {
f.write('require conf/locked-sigs.inc\n')
f.write('require conf/unlocked-sigs.inc\n')
- if os.path.exists(builddir + '/conf/auto.conf'):
- if derivative:
- shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf')
- else:
- with open(builddir + '/conf/auto.conf', 'r') as f:
- oldlines = f.readlines()
- (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
- with open(baseoutpath + '/conf/auto.conf', 'w') as f:
- f.write('# WARNING: this configuration has been automatically generated and in\n')
- f.write('# most cases should not be edited. If you need more flexibility than\n')
- f.write('# this configuration provides, it is strongly suggested that you set\n')
- f.write('# up a proper instance of the full build system and use that instead.\n\n')
- for line in newlines:
- if line.strip() and not line.startswith('#'):
- f.write(line)
+ if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
+ bb.parse.siggen.save_unitaskhashes()
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
+ shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
+
+ # Write a templateconf.cfg
+ with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
+ f.write('meta/conf\n')
# Ensure any variables set from the external environment (by way of
# BB_ENV_EXTRAWHITE) are set in the SDK's configuration
extralines = []
for name, value in env_whitelist_values.items():
- actualvalue = d.getVar(name, True) or ''
+ actualvalue = d.getVar(name) or ''
if value != actualvalue:
extralines.append('%s = "%s"\n' % (name, actualvalue))
if extralines:
@@ -359,48 +407,72 @@ python copy_buildsystem () {
# Filter the locked signatures file to just the sstate tasks we are interested in
excluded_targets = get_sdk_install_targets(d, images_only=True)
- sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
+ sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
+ #nativesdk-only sigfile to merge into locked-sigs.inc
+ sdk_include_nativesdk = (d.getVar("SDK_INCLUDE_NATIVESDK") == '1')
+ nativesigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
+ nativesigfile_pruned = d.getVar('WORKDIR') + '/locked-sigs_nativesdk_pruned.inc'
+
+ if sdk_include_nativesdk:
+ oe.copy_buildsystem.prune_lockedsigs([],
+ excluded_targets.split(),
+ nativesigfile,
+ True,
+ nativesigfile_pruned)
+
+ oe.copy_buildsystem.merge_lockedsigs([],
+ sigfile,
+ nativesigfile_pruned,
+ sigfile)
+
oe.copy_buildsystem.prune_lockedsigs([],
excluded_targets.split(),
sigfile,
+ False,
lockedsigs_pruned)
sstate_out = baseoutpath + '/sstate-cache'
bb.utils.remove(sstate_out, True)
- # uninative.bbclass sets NATIVELSBSTRING to 'universal'
- fixedlsbstring = 'universal'
- sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN', True) == '1')
- sdk_ext_type = d.getVar('SDK_EXT_TYPE', True)
- if sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative:
+ # uninative.bbclass sets NATIVELSBSTRING to 'universal%s' % oe.utils.host_gcc_version(d)
+ fixedlsbstring = "universal%s" % oe.utils.host_gcc_version(d)
+
+ sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
+ sdk_ext_type = d.getVar('SDK_EXT_TYPE')
+ if (sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative) and not sdk_include_nativesdk:
# Create the filtered task list used to generate the sstate cache shipped with the SDK
- tasklistfn = d.getVar('WORKDIR', True) + '/tasklist.txt'
+ tasklistfn = d.getVar('WORKDIR') + '/tasklist.txt'
create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath)
else:
tasklistfn = None
+ if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
+ bb.parse.siggen.save_unitaskhashes()
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
+ shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
+
# Add packagedata if enabled
- if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
- lockedsigs_base = d.getVar('WORKDIR', True) + '/locked-sigs-base.inc'
- lockedsigs_copy = d.getVar('WORKDIR', True) + '/locked-sigs-copy.inc'
+ if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
+ lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc'
+ lockedsigs_copy = d.getVar('WORKDIR') + '/locked-sigs-copy.inc'
shutil.move(lockedsigs_pruned, lockedsigs_base)
oe.copy_buildsystem.merge_lockedsigs(['do_packagedata'],
lockedsigs_base,
- d.getVar('STAGING_DIR_HOST', True) + '/world-pkgdata/locked-sigs-pkgdata.inc',
+ d.getVar('STAGING_DIR_HOST') + '/world-pkgdata/locked-sigs-pkgdata.inc',
lockedsigs_pruned,
lockedsigs_copy)
if sdk_include_toolchain:
- lockedsigs_base = d.getVar('WORKDIR', True) + '/locked-sigs-base2.inc'
- lockedsigs_toolchain = d.getVar('STAGING_DIR_HOST', True) + '/locked-sigs/locked-sigs-extsdk-toolchain.inc'
+ lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base2.inc'
+ lockedsigs_toolchain = d.expand("${STAGING_DIR}/${TUNE_PKGARCH}/meta-extsdk-toolchain/locked-sigs/locked-sigs-extsdk-toolchain.inc")
shutil.move(lockedsigs_pruned, lockedsigs_base)
oe.copy_buildsystem.merge_lockedsigs([],
lockedsigs_base,
lockedsigs_toolchain,
lockedsigs_pruned)
oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_toolchain,
- d.getVar('SSTATE_DIR', True),
+ d.getVar('SSTATE_DIR'),
sstate_out, d,
fixedlsbstring,
filterfile=tasklistfn)
@@ -410,22 +482,22 @@ python copy_buildsystem () {
# Assume the user is not going to set up an additional sstate
# mirror, thus we need to copy the additional artifacts (from
# workspace recipes) into the derivative SDK
- lockedsigs_orig = d.getVar('TOPDIR', True) + '/conf/locked-sigs.inc'
+ lockedsigs_orig = d.getVar('TOPDIR') + '/conf/locked-sigs.inc'
if os.path.exists(lockedsigs_orig):
- lockedsigs_extra = d.getVar('WORKDIR', True) + '/locked-sigs-extra.inc'
+ lockedsigs_extra = d.getVar('WORKDIR') + '/locked-sigs-extra.inc'
oe.copy_buildsystem.merge_lockedsigs(None,
lockedsigs_orig,
lockedsigs_pruned,
None,
lockedsigs_extra)
oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_extra,
- d.getVar('SSTATE_DIR', True),
+ d.getVar('SSTATE_DIR'),
sstate_out, d,
fixedlsbstring,
filterfile=tasklistfn)
else:
oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
- d.getVar('SSTATE_DIR', True),
+ d.getVar('SSTATE_DIR'),
sstate_out, d,
fixedlsbstring,
filterfile=tasklistfn)
@@ -456,24 +528,28 @@ python copy_buildsystem () {
def get_current_buildtools(d):
"""Get the file name of the current buildtools installer"""
import glob
- btfiles = glob.glob(os.path.join(d.getVar('SDK_DEPLOY', True), '*-buildtools-nativesdk-standalone-*.sh'))
+ btfiles = glob.glob(os.path.join(d.getVar('SDK_DEPLOY'), '*-buildtools-nativesdk-standalone-*.sh'))
btfiles.sort(key=os.path.getctime)
return os.path.basename(btfiles[-1])
def get_sdk_required_utilities(buildtools_fn, d):
"""Find required utilities that aren't provided by the buildtools"""
- sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES', True) or '').split()
+ sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES') or '').split()
sanity_required_utilities.append(d.expand('${BUILD_PREFIX}gcc'))
sanity_required_utilities.append(d.expand('${BUILD_PREFIX}g++'))
- buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY', True), buildtools_fn)
- filelist, _ = bb.process.run('%s -l' % buildtools_installer)
+ if buildtools_fn:
+ buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn)
+ filelist, _ = bb.process.run('%s -l' % buildtools_installer)
+ else:
+ buildtools_installer = None
+ filelist = ""
localdata = bb.data.createCopy(d)
localdata.setVar('SDKPATH', '.')
- sdkpathnative = localdata.getVar('SDKPATHNATIVE', True)
- sdkbindirs = [localdata.getVar('bindir_nativesdk', True),
- localdata.getVar('sbindir_nativesdk', True),
- localdata.getVar('base_bindir_nativesdk', True),
- localdata.getVar('base_sbindir_nativesdk', True)]
+ sdkpathnative = localdata.getVar('SDKPATHNATIVE')
+ sdkbindirs = [localdata.getVar('bindir_nativesdk'),
+ localdata.getVar('sbindir_nativesdk'),
+ localdata.getVar('base_bindir_nativesdk'),
+ localdata.getVar('base_sbindir_nativesdk')]
for line in filelist.splitlines():
splitline = line.split()
if len(splitline) > 5:
@@ -492,12 +568,26 @@ def get_sdk_required_utilities(buildtools_fn, d):
install_tools() {
install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
- lnr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/devtool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/devtool
- lnr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/recipetool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/recipetool
+ scripts="devtool recipetool oe-find-native-sysroot runqemu* wic"
+ for script in $scripts; do
+ for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
+ targetscriptfn="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/$(basename $scriptfn)"
+ test -e ${targetscriptfn} || lnr ${scriptfn} ${targetscriptfn}
+ done
+ done
+ # We can't use the same method as above because files in the sysroot won't exist at this point
+ # (they get populated from sstate on installation)
+ unfsd_path="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd"
+ if [ "${SDK_INCLUDE_TOOLCHAIN}" = "1" -a ! -e $unfsd_path ] ; then
+ binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE'), d.getVar('TMPDIR'))}
+ lnr ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
+ fi
touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
# find latest buildtools-tarball and install it
- install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
+ if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
+ install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
+ fi
install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH}
}
@@ -536,11 +626,8 @@ sdk_ext_preinst() {
exit 1
fi
SDK_EXTENSIBLE="1"
- if [ "$publish" = "1" ] ; then
- EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=ext-sdk-prepare.py"
- if [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
- EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
- fi
+ if [ "$publish" = "1" ] && [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
+ EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
fi
}
SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
@@ -550,19 +637,23 @@ sdk_ext_postinst() {
printf "\nExtracting buildtools...\n"
cd $target_sdk_dir
env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
- printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
+ printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
- # Delete the buildtools tar file since it won't be used again
- rm -f ./${SDK_BUILDTOOLS_INSTALLER}
- # We don't need the log either since it succeeded
- rm -f buildtools.log
+ # Delete the buildtools tar file since it won't be used again
+ rm -f ./${SDK_BUILDTOOLS_INSTALLER}
+ # We don't need the log either since it succeeded
+ rm -f buildtools.log
- # Make sure when the user sets up the environment, they also get
- # the buildtools-tarball tools in their path.
- echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
+ # Make sure when the user sets up the environment, they also get
+ # the buildtools-tarball tools in their path.
+ echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
+ fi
# Allow bitbake environment setup to be ran as part of this sdk.
echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script
+ # Work around runqemu not knowing how to get this information within the eSDK
+ echo "export DEPLOY_DIR_IMAGE=$target_sdk_dir/tmp/${@os.path.relpath(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('TMPDIR'))}" >> $env_setup_script
# A bit of another hack, but we need this in the path only for devtool
# so put it at the end of $PATH.
@@ -573,13 +664,15 @@ sdk_ext_postinst() {
# Warn if trying to use external bitbake and the ext SDK together
echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script
- if [ "$prepare_buildsystem" != "no" ]; then
+ if [ "$prepare_buildsystem" != "no" -a -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
printf "Preparing build system...\n"
# dash which is /bin/sh on Ubuntu will not preserve the
# current working directory when first ran, nor will it set $1 when
# sourcing a script. That is why this has to look so ugly.
LOGFILE="$target_sdk_dir/preparing_build_system.log"
sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ fi
+ if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then
rm $target_sdk_dir/ext-sdk-prepare.py
fi
echo done
@@ -593,25 +686,38 @@ SDK_INSTALL_TARGETS = ""
fakeroot python do_populate_sdk_ext() {
# FIXME hopefully we can remove this restriction at some point, but uninative
# currently forces this upon us
- if d.getVar('SDK_ARCH', True) != d.getVar('BUILD_ARCH', True):
- bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH', True), d.getVar('BUILD_ARCH', True)))
+ if d.getVar('SDK_ARCH') != d.getVar('BUILD_ARCH'):
+ bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH'), d.getVar('BUILD_ARCH')))
d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
- buildtools_fn = get_current_buildtools(d)
+ if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1':
+ buildtools_fn = get_current_buildtools(d)
+ else:
+ buildtools_fn = None
d.setVar('SDK_REQUIRED_UTILITIES', get_sdk_required_utilities(buildtools_fn, d))
d.setVar('SDK_BUILDTOOLS_INSTALLER', buildtools_fn)
d.setVar('SDKDEPLOYDIR', '${SDKEXTDEPLOYDIR}')
-
+ # ESDKs have a libc from the buildtools so ensure we don't ship linguas twice
+ d.delVar('SDKIMAGE_LINGUAS')
+ if d.getVar("SDK_INCLUDE_NATIVESDK") == '1':
+ generate_nativesdk_lockedsigs(d)
populate_sdk_common(d)
}
+def generate_nativesdk_lockedsigs(d):
+ import oe.copy_buildsystem
+ sigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
+ oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
+
def get_ext_sdk_depends(d):
# Note: the deps varflag is a list not a string, so we need to specify expand=False
deps = d.getVarFlag('do_image_complete', 'deps', False)
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
deplist = ['%s:%s' % (pn, dep) for dep in deps]
- for task in ['do_image_complete', 'do_rootfs', 'do_build']:
- deplist.extend((d.getVarFlag(task, 'depends', True) or '').split())
+ tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
+ tasklist.append('do_rootfs')
+ for task in tasklist:
+ deplist.extend((d.getVarFlag(task, 'depends') or '').split())
return ' '.join(deplist)
python do_sdk_depends() {
@@ -619,13 +725,13 @@ python do_sdk_depends() {
# dependencies we don't need to (e.g. buildtools-tarball) and bringing those
# into the SDK's sstate-cache
import oe.copy_buildsystem
- sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
+ sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
}
addtask sdk_depends
do_sdk_depends[dirs] = "${WORKDIR}"
-do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)}"
+do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)} meta-extsdk-toolchain:do_populate_sysroot"
do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
@@ -633,17 +739,21 @@ do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
def get_sdk_ext_rdepends(d):
localdata = d.createCopy()
localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
- bb.data.update_data(localdata)
- return localdata.getVarFlag('do_populate_sdk', 'rdepends', True)
+ return localdata.getVarFlag('do_populate_sdk', 'rdepends')
do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
- buildtools-tarball:do_populate_sdk uninative-tarball:do_populate_sdk \
- ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1' else ''} \
- ${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN', True) == '1' else ''}"
+ ${@'buildtools-tarball:do_populate_sdk' if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1' else ''} \
+ ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA') == '1' else ''} \
+ ${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1' else ''}"
-do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':do_build' for x in d.getVar('SDK_TARGETS', True).split()])}"
+# We must avoid depending on do_build here if rm_work.bbclass is active,
+# because otherwise do_rm_work may run before do_populate_sdk_ext itself.
+# We can't mark do_populate_sdk_ext and do_sdk_depends as having to
+# run before do_rm_work, because then they would also run as part
+# of normal builds.
+do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':' + (d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build') for x in d.getVar('SDK_TARGETS').split()])}"
# Make sure code changes can result in rebuild
do_populate_sdk_ext[vardeps] += "copy_buildsystem \
@@ -658,9 +768,9 @@ SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext"
SSTATETASKS += "do_populate_sdk_ext"
SSTATE_SKIP_CREATION_task-populate-sdk-ext = '1'
-do_populate_sdk_ext[cleandirs] = "${SDKDEPLOYDIR}"
+do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
-do_populate_sdk_ext[stamp-extra-info] = "${MACHINE}"
+do_populate_sdk_ext[stamp-extra-info] = "${MACHINE_ARCH}"
addtask populate_sdk_ext after do_sdk_depends
diff --git a/meta/classes/prexport.bbclass b/meta/classes/prexport.bbclass
index 809ec1034e..6dcf99e29f 100644
--- a/meta/classes/prexport.bbclass
+++ b/meta/classes/prexport.bbclass
@@ -15,7 +15,7 @@ python prexport_handler () {
if isinstance(e, bb.event.RecipeParsed):
import oe.prservice
#get all PR values for the current PRAUTOINX
- ver = e.data.getVar('PRSERV_DUMPOPT_VERSION', True)
+ ver = e.data.getVar('PRSERV_DUMPOPT_VERSION')
ver = ver.replace('%','-')
retval = oe.prservice.prserv_dump_db(e.data)
if not retval:
@@ -40,7 +40,7 @@ python prexport_handler () {
import oe.prservice
oe.prservice.prserv_check_avail(e.data)
#remove dumpfile
- bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE', True))
+ bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE'))
elif isinstance(e, bb.event.ParseCompleted):
import oe.prservice
#dump meta info of tables
diff --git a/meta/classes/ptest-perl.bbclass b/meta/classes/ptest-perl.bbclass
new file mode 100644
index 0000000000..a4bc40b51a
--- /dev/null
+++ b/meta/classes/ptest-perl.bbclass
@@ -0,0 +1,30 @@
+inherit ptest
+
+FILESEXTRAPATHS_prepend := "${COREBASE}/meta/files:"
+
+SRC_URI += "file://ptest-perl/run-ptest"
+
+do_install_ptest_perl() {
+ install -d ${D}${PTEST_PATH}
+ if [ ! -f ${D}${PTEST_PATH}/run-ptest ]; then
+ install -m 0755 ${WORKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH}
+ fi
+ cp -r ${B}/t ${D}${PTEST_PATH}
+ chown -R root:root ${D}${PTEST_PATH}
+}
+
+FILES_${PN}-ptest_prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
+
+RDEPENDS_${PN}-ptest_prepend = "perl "
+
+addtask install_ptest_perl after do_install_ptest_base before do_package
+
+python () {
+ if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ d.setVarFlag('do_install_ptest_perl', 'fakeroot', '1')
+
+ # Remove all '*ptest_perl' tasks when ptest is not enabled
+ if not(d.getVar('PTEST_ENABLED') == "1"):
+ for i in ['do_install_ptest_perl']:
+ bb.build.deltask(i, d)
+}
diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass
index fa3561e621..fa4c36ec76 100644
--- a/meta/classes/ptest.bbclass
+++ b/meta/classes/ptest.bbclass
@@ -2,7 +2,10 @@ SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
This package contains a test directory ${PTEST_PATH} for package test purposes."
-PTEST_PATH ?= "${libdir}/${PN}/ptest"
+PTEST_PATH ?= "${libdir}/${BPN}/ptest"
+PTEST_BUILD_HOST_FILES ?= "Makefile"
+PTEST_BUILD_HOST_PATTERN ?= ""
+
FILES_${PN}-ptest = "${PTEST_PATH}"
SECTION_${PN}-ptest = "devel"
ALLOW_EMPTY_${PN}-ptest = "1"
@@ -10,6 +13,7 @@ PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
PTEST_ENABLED_class-native = ""
PTEST_ENABLED_class-nativesdk = ""
PTEST_ENABLED_class-cross-canadian = ""
+RDEPENDS_${PN}-ptest += "${PN}"
RDEPENDS_${PN}-ptest_class-native = ""
RDEPENDS_${PN}-ptest_class-nativesdk = ""
RRECOMMENDS_${PN}-ptest += "ptest-runner"
@@ -45,6 +49,53 @@ do_install_ptest_base() {
fi
do_install_ptest
chown -R root:root ${D}${PTEST_PATH}
+
+ # Strip build host paths from any installed Makefile
+ for filename in ${PTEST_BUILD_HOST_FILES}; do
+ for installed_ptest_file in $(find ${D}${PTEST_PATH} -type f -name $filename); do
+ bbnote "Stripping host paths from: $installed_ptest_file"
+ sed -e 's#${HOSTTOOLS_DIR}/*##g' \
+ -e 's#${WORKDIR}/*=#.=#g' \
+ -e 's#${WORKDIR}/*##g' \
+ -i $installed_ptest_file
+ if [ -n "${PTEST_BUILD_HOST_PATTERN}" ]; then
+ sed -E '/${PTEST_BUILD_HOST_PATTERN}/d' \
+ -i $installed_ptest_file
+ fi
+ done
+ done
+}
+
+PTEST_BINDIR_PKGD_PATH = "${PKGD}${PTEST_PATH}/bin"
+
+# This function needs to run after apply_update_alternative_renames because the
+# aforementioned function will update the ALTERNATIVE_LINK_NAME flag. Append is
+# used here to make this function to run as late as possible.
+PACKAGE_PREPROCESS_FUNCS_append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
+ bb.utils.contains('PTEST_ENABLED', '1', ' ptest_update_alternatives', '', d), '', d)}"
+
+python ptest_update_alternatives() {
+ """
+ This function will generate the symlinks in the PTEST_BINDIR_PKGD_PATH
+ to match the renamed binaries by update-alternatives.
+ """
+
+ if not bb.data.inherits_class('update-alternatives', d) \
+ or not update_alternatives_enabled(d):
+ return
+
+ bb.note("Generating symlinks for ptest")
+ bin_paths = { d.getVar("bindir"), d.getVar("base_bindir"),
+ d.getVar("sbindir"), d.getVar("base_sbindir") }
+ ptest_bindir = d.getVar("PTEST_BINDIR_PKGD_PATH")
+ os.mkdir(ptest_bindir)
+ for pkg in (d.getVar('PACKAGES') or "").split():
+ alternatives = update_alternatives_alt_targets(d, pkg)
+ for alt_name, alt_link, alt_target, _ in alternatives:
+ # Some alternatives are for man pages,
+ # check if the alternative is in PATH
+ if os.path.dirname(alt_link) in bin_paths:
+ os.symlink(alt_target, os.path.join(ptest_bindir, alt_name))
}
do_configure_ptest_base[dirs] = "${B}"
@@ -59,9 +110,10 @@ addtask install_ptest_base after do_install before do_package do_populate_sy
python () {
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
d.setVarFlag('do_install_ptest_base', 'fakeroot', '1')
+ d.setVarFlag('do_install_ptest_base', 'umask', '022')
# Remove all '*ptest_base' tasks when ptest is not enabled
- if not(d.getVar('PTEST_ENABLED', True) == "1"):
+ if not(d.getVar('PTEST_ENABLED') == "1"):
for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
bb.build.deltask(i, d)
}
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
new file mode 100644
index 0000000000..e5d7ab3ce1
--- /dev/null
+++ b/meta/classes/pypi.bbclass
@@ -0,0 +1,26 @@
+def pypi_package(d):
+ bpn = d.getVar('BPN')
+ if bpn.startswith('python-'):
+ return bpn[7:]
+ elif bpn.startswith('python3-'):
+ return bpn[8:]
+ return bpn
+
+PYPI_PACKAGE ?= "${@pypi_package(d)}"
+PYPI_PACKAGE_EXT ?= "tar.gz"
+
+def pypi_src_uri(d):
+ package = d.getVar('PYPI_PACKAGE')
+ package_ext = d.getVar('PYPI_PACKAGE_EXT')
+ pv = d.getVar('PV')
+ return 'https://files.pythonhosted.org/packages/source/%s/%s/%s-%s.%s' % (package[0], package, package, pv, package_ext)
+
+PYPI_SRC_URI ?= "${@pypi_src_uri(d)}"
+
+HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
+SECTION = "devel/python"
+SRC_URI += "${PYPI_SRC_URI}"
+S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
+
+UPSTREAM_CHECK_URI ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
+UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)"
diff --git a/meta/classes/python3-dir.bbclass b/meta/classes/python3-dir.bbclass
index 06bb046d9c..7dd130bad9 100644
--- a/meta/classes/python3-dir.bbclass
+++ b/meta/classes/python3-dir.bbclass
@@ -1,4 +1,4 @@
-PYTHON_BASEVERSION = "3.5"
+PYTHON_BASEVERSION = "3.7"
PYTHON_ABI = "m"
PYTHON_DIR = "python${PYTHON_BASEVERSION}"
PYTHON_PN = "python3"
diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass
index ef468b3fde..182c11aa2e 100644
--- a/meta/classes/python3native.bbclass
+++ b/meta/classes/python3native.bbclass
@@ -1,13 +1,28 @@
inherit python3-dir
-PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
-EXTRANATIVEPATH += "${PYTHON_PN}-native"
-DEPENDS_append = " ${PYTHON_PN}-native "
+PYTHON="${STAGING_BINDIR_NATIVE}/python3-native/python3"
+# PYTHON_EXECUTABLE is used by cmake
+PYTHON_EXECUTABLE="${PYTHON}"
+EXTRANATIVEPATH += "python3-native"
+DEPENDS_append = " python3-native "
# python-config and other scripts are using distutils modules
# which we patch to access these variables
export STAGING_INCDIR
export STAGING_LIBDIR
+# Packages can use
+# find_package(PythonInterp REQUIRED)
+# find_package(PythonLibs REQUIRED)
+# which ends up using libs/includes from build host
+# Therefore pre-empt that effort
+export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
+export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
+
+export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+
+# suppress host user's site-packages dirs.
+export PYTHONNOUSERSITE = "1"
+
# autoconf macros will use their internal default preference otherwise
export PYTHON
diff --git a/meta/classes/pythonnative.bbclass b/meta/classes/pythonnative.bbclass
index 4e0381b568..0e9019d1e2 100644
--- a/meta/classes/pythonnative.bbclass
+++ b/meta/classes/pythonnative.bbclass
@@ -1,16 +1,27 @@
inherit python-dir
-PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
+PYTHON="${STAGING_BINDIR_NATIVE}/python-native/python"
# PYTHON_EXECUTABLE is used by cmake
PYTHON_EXECUTABLE="${PYTHON}"
-EXTRANATIVEPATH += "${PYTHON_PN}-native"
-DEPENDS_append = " ${PYTHON_PN}-native "
+EXTRANATIVEPATH += "python-native"
+DEPENDS_append = " python-native "
# python-config and other scripts are using distutils modules
# which we patch to access these variables
export STAGING_INCDIR
export STAGING_LIBDIR
+# Packages can use
+# find_package(PythonInterp REQUIRED)
+# find_package(PythonLibs REQUIRED)
+# which ends up using libs/includes from build host
+# Therefore pre-empt that effort
+export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
+export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
+
+# suppress host user's site-packages dirs.
+export PYTHONNOUSERSITE = "1"
+
# autoconf macros will use their internal default preference otherwise
export PYTHON
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
index f2d4d1c9e1..f5c5780125 100644
--- a/meta/classes/qemu.bbclass
+++ b/meta/classes/qemu.bbclass
@@ -4,12 +4,12 @@
#
def qemu_target_binary(data):
- package_arch = data.getVar("PACKAGE_ARCH", True)
- qemu_target_binary = (data.getVar("QEMU_TARGET_BINARY_%s" % package_arch, True) or "")
+ package_arch = data.getVar("PACKAGE_ARCH")
+ qemu_target_binary = (data.getVar("QEMU_TARGET_BINARY_%s" % package_arch) or "")
if qemu_target_binary:
return qemu_target_binary
- target_arch = data.getVar("TARGET_ARCH", True)
+ target_arch = data.getVar("TARGET_ARCH")
if target_arch in ("i486", "i586", "i686"):
target_arch = "i386"
elif target_arch == "powerpc":
@@ -26,7 +26,7 @@ def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
if qemu_binary == "qemu-allarch":
qemu_binary = "qemuwrapper"
- qemu_options = data.getVar("QEMU_OPTIONS", True)
+ qemu_options = data.getVar("QEMU_OPTIONS")
return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
+ " -E LD_LIBRARY_PATH=" + ":".join(library_paths) + " "
@@ -52,7 +52,7 @@ def qemu_run_binary(data, rootfs_path, binary):
# this dance). For others (e.g. arm) a -cpu option is not necessary, since the
# qemu-arm default CPU supports all required architecture levels.
-QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True) or ""}"
+QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH')) or ""}"
QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS_${PACKAGE_ARCH}"
QEMU_EXTRAOPTIONS_ppce500v2 = " -cpu e500v2"
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
index 97a235772d..15a9e63f2b 100644
--- a/meta/classes/qemuboot.bbclass
+++ b/meta/classes/qemuboot.bbclass
@@ -3,30 +3,52 @@
# boot by runqemu:
#
# QB_SYSTEM_NAME: qemu name, e.g., "qemu-system-i386"
+#
# QB_OPT_APPEND: options to append to qemu, e.g., "-show-cursor"
+#
# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
+#
# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
+#
# QB_MEM: memory, e.g., "-m 512"
+#
# QB_MACHINE: qemu machine, e.g., "-machine virt"
+#
# QB_CPU: qemu cpu, e.g., "-cpu qemu32"
+#
# QB_CPU_KVM: the similar to QB_CPU, but used when kvm, e.g., '-cpu kvm64',
# set it when support kvm.
+#
# QB_KERNEL_CMDLINE_APPEND: options to append to kernel's -append
# option, e.g., "console=ttyS0 console=tty"
+#
# QB_DTB: qemu dtb name
+#
# QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio
+#
# QB_AUDIO_OPT: qemu audio option, e.g., "-soundhw ac97,es1370", used
# when QB_AUDIO_DRV is set.
+#
# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
+#
+# QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@",
+# it needs work with QB_TAP_OPT and QB_SLIRP_OPT.
+# Note, runqemu will replace @MAC@ with a predefined mac, you can set
+# a custom one, but that may cause conflicts when multiple qemus are
+# running on the same host.
+#
# QB_TAP_OPT: netowrk option for 'tap' mode, e.g.,
-# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no -device virtio-net-device,netdev=net0"
+# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no"
# Note, runqemu will replace "@TAP@" with the one which is used, such as tap0, tap1 ...
-# QB_SLIRP_OPT: network option for SLIRP mode, e.g.,
-# "-netdev user,id=net0 -device virtio-net-device,netdev=net0"
+#
+# QB_SLIRP_OPT: network option for SLIRP mode, e.g., -netdev user,id=net0"
+#
# QB_ROOTFS_OPT: used as rootfs, e.g.,
# "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0"
# Note, runqemu will replace "@ROOTFS@" with the one which is used, such as core-image-minimal-qemuarm64.ext4.
+#
# QB_SERIAL_OPT: serial port, e.g., "-serial mon:stdio"
+#
# QB_TCPSERIAL_OPT: tcp serial port option, e.g.,
# " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon"
# Note, runqemu will replace "@PORT@" with the port number which is used.
@@ -40,46 +62,63 @@ QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
QB_DEFAULT_FSTYPE ?= "ext4"
QB_OPT_APPEND ?= "-show-cursor"
+QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
-# Create qemuboot.conf
-ROOTFS_POSTPROCESS_COMMAND += "write_qemuboot_conf; "
+# This should be kept align with ROOT_VM
+QB_DRIVE_TYPE ?= "/dev/sd"
-python write_qemuboot_conf() {
- import configparser
+# Create qemuboot.conf
+addtask do_write_qemuboot_conf after do_rootfs before do_image
+IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete"
- build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE', \
- 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME', \
- 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE', \
+def qemuboot_vars(d):
+ build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
+ 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
+ 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
'STAGING_DIR_HOST']
+ return build_vars + [k for k in d.keys() if k.startswith('QB_')]
- # Vars from bsp
- qb_vars = []
- for k in d.keys():
- if k.startswith('QB_'):
- qb_vars.append(k)
+do_write_qemuboot_conf[vardeps] += "${@' '.join(qemuboot_vars(d))}"
+do_write_qemuboot_conf[vardepsexclude] += "TOPDIR"
+python do_write_qemuboot_conf() {
+ import configparser
- qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('DEPLOY_DIR_IMAGE', True), d.getVar('IMAGE_NAME', True))
- qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('DEPLOY_DIR_IMAGE', True), d.getVar('IMAGE_LINK_NAME', True))
+ qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME'))
+ qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
+ finalpath = d.getVar("DEPLOY_DIR_IMAGE")
+ topdir = d.getVar('TOPDIR')
cf = configparser.ConfigParser()
cf.add_section('config_bsp')
- for k in build_vars + qb_vars:
- cf.set('config_bsp', k, '%s' % d.getVar(k, True))
+ for k in sorted(qemuboot_vars(d)):
+ # qemu-helper-native sysroot is not removed by rm_work and
+ # contains all tools required by runqemu
+ if k == 'STAGING_BINDIR_NATIVE':
+ val = os.path.join(d.getVar('BASE_WORKDIR'), d.getVar('BUILD_SYS'),
+ 'qemu-helper-native/1.0-r1/recipe-sysroot-native/usr/bin/')
+ else:
+ val = d.getVar(k)
+ # we only want to write out relative paths so that we can relocate images
+ # and still run them
+ if val.startswith(topdir):
+ val = os.path.relpath(val, finalpath)
+ cf.set('config_bsp', k, '%s' % val)
# QB_DEFAULT_KERNEL's value of KERNEL_IMAGETYPE is the name of a symlink
# to the kernel file, which hinders relocatability of the qb conf.
# Read the link and replace it with the full filename of the target.
- kernel_link = os.path.join(d.getVar('DEPLOY_DIR_IMAGE', True), d.getVar('QB_DEFAULT_KERNEL', True))
- kernel = os.readlink(kernel_link)
+ kernel_link = os.path.join(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('QB_DEFAULT_KERNEL'))
+ kernel = os.path.realpath(kernel_link)
+ # we only want to write out relative paths so that we can relocate images
+ # and still run them
+ kernel = os.path.relpath(kernel, finalpath)
cf.set('config_bsp', 'QB_DEFAULT_KERNEL', kernel)
bb.utils.mkdirhier(os.path.dirname(qemuboot))
with open(qemuboot, 'w') as f:
cf.write(f)
- if d.getVar('RM_OLD_IMAGE', True) == "1" and os.path.exists(qemuboot_link):
- os.remove(os.path.realpath(qemuboot_link))
-
- if os.path.lexists(qemuboot_link):
- os.remove(qemuboot_link)
- os.symlink(os.path.basename(qemuboot), qemuboot_link)
+ if qemuboot_link != qemuboot:
+ if os.path.lexists(qemuboot_link):
+ os.remove(qemuboot_link)
+ os.symlink(os.path.basename(qemuboot), qemuboot_link)
}
diff --git a/meta/classes/recipe_sanity.bbclass b/meta/classes/recipe_sanity.bbclass
index add34df9d6..7fa4a849ea 100644
--- a/meta/classes/recipe_sanity.bbclass
+++ b/meta/classes/recipe_sanity.bbclass
@@ -1,5 +1,5 @@
def __note(msg, d):
- bb.note("%s: recipe_sanity: %s" % (d.getVar("P", True), msg))
+ bb.note("%s: recipe_sanity: %s" % (d.getVar("P"), msg))
__recipe_sanity_badruntimevars = "RDEPENDS RPROVIDES RRECOMMENDS RCONFLICTS"
def bad_runtime_vars(cfgdata, d):
@@ -7,7 +7,7 @@ def bad_runtime_vars(cfgdata, d):
bb.data.inherits_class("cross", d):
return
- for var in d.getVar("__recipe_sanity_badruntimevars", True).split():
+ for var in d.getVar("__recipe_sanity_badruntimevars").split():
val = d.getVar(var, False)
if val and val != cfgdata.get(var):
__note("%s should be %s_${PN}" % (var, var), d)
@@ -15,11 +15,11 @@ def bad_runtime_vars(cfgdata, d):
__recipe_sanity_reqvars = "DESCRIPTION"
__recipe_sanity_reqdiffvars = ""
def req_vars(cfgdata, d):
- for var in d.getVar("__recipe_sanity_reqvars", True).split():
+ for var in d.getVar("__recipe_sanity_reqvars").split():
if not d.getVar(var, False):
__note("%s should be set" % var, d)
- for var in d.getVar("__recipe_sanity_reqdiffvars", True).split():
+ for var in d.getVar("__recipe_sanity_reqdiffvars").split():
val = d.getVar(var, False)
cfgval = cfgdata.get(var)
@@ -38,11 +38,11 @@ def var_renames_overwrite(cfgdata, d):
def incorrect_nonempty_PACKAGES(cfgdata, d):
if bb.data.inherits_class("native", d) or \
bb.data.inherits_class("cross", d):
- if d.getVar("PACKAGES", True):
+ if d.getVar("PACKAGES"):
return True
def can_use_autotools_base(cfgdata, d):
- cfg = d.getVar("do_configure", True)
+ cfg = d.getVar("do_configure")
if not bb.data.inherits_class("autotools", d):
return False
@@ -61,7 +61,7 @@ def can_delete_FILESPATH(cfgdata, d):
expected = cfgdata.get("FILESPATH")
expectedpaths = d.expand(expected)
unexpanded = d.getVar("FILESPATH", False)
- filespath = d.getVar("FILESPATH", True).split(":")
+ filespath = d.getVar("FILESPATH").split(":")
filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
for fp in filespath:
if not fp in expectedpaths:
@@ -70,22 +70,6 @@ def can_delete_FILESPATH(cfgdata, d):
return False
return expected != unexpanded
-def can_delete_FILESDIR(cfgdata, d):
- expected = cfgdata.get("FILESDIR")
- #expected = "${@bb.utils.which(d.getVar('FILESPATH', True), '.')}"
- unexpanded = d.getVar("FILESDIR", False)
- if unexpanded is None:
- return False
-
- expanded = os.path.normpath(d.getVar("FILESDIR", True))
- filespath = d.getVar("FILESPATH", True).split(":")
- filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
-
- return unexpanded != expected and \
- os.path.exists(expanded) and \
- (expanded in filespath or
- expanded == d.expand(expected))
-
def can_delete_others(p, cfgdata, d):
for k in ["S", "PV", "PN", "DESCRIPTION", "DEPENDS",
"SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
@@ -96,7 +80,7 @@ def can_delete_others(p, cfgdata, d):
continue
try:
- expanded = d.getVar(k, True)
+ expanded = d.getVar(k)
cfgexpanded = d.expand(cfgunexpanded)
except bb.fetch.ParameterError:
continue
@@ -108,11 +92,10 @@ def can_delete_others(p, cfgdata, d):
(p, cfgunexpanded, unexpanded, expanded))
python do_recipe_sanity () {
- p = d.getVar("P", True)
- p = "%s %s %s" % (d.getVar("PN", True), d.getVar("PV", True), d.getVar("PR", True))
+ p = d.getVar("P")
+ p = "%s %s %s" % (d.getVar("PN"), d.getVar("PV"), d.getVar("PR"))
sanitychecks = [
- (can_delete_FILESDIR, "candidate for removal of FILESDIR"),
(can_delete_FILESPATH, "candidate for removal of FILESPATH"),
#(can_use_autotools_base, "candidate for use of autotools_base"),
(incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
diff --git a/meta/classes/relative_symlinks.bbclass b/meta/classes/relative_symlinks.bbclass
new file mode 100644
index 0000000000..3157737347
--- /dev/null
+++ b/meta/classes/relative_symlinks.bbclass
@@ -0,0 +1,5 @@
+do_install[postfuncs] += "install_relative_symlinks"
+
+python install_relative_symlinks () {
+ oe.path.replace_absolute_symlinks(d.getVar('D'), d)
+}
diff --git a/meta/classes/relocatable.bbclass b/meta/classes/relocatable.bbclass
index 4ca9981f44..582812c1cf 100644
--- a/meta/classes/relocatable.bbclass
+++ b/meta/classes/relocatable.bbclass
@@ -1,7 +1,18 @@
inherit chrpath
-SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess"
+SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess relocatable_native_pcfiles"
python relocatable_binaries_preprocess() {
rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
}
+
+relocatable_native_pcfiles () {
+ if [ -d ${SYSROOT_DESTDIR}${libdir}/pkgconfig ]; then
+ rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('libdir') + "/pkgconfig")}
+ sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${libdir}/pkgconfig/*.pc
+ fi
+ if [ -d ${SYSROOT_DESTDIR}${datadir}/pkgconfig ]; then
+ rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('datadir') + "/pkgconfig")}
+ sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${datadir}/pkgconfig/*.pc
+ fi
+}
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
index 5bb231efc1..1a12db1206 100644
--- a/meta/classes/report-error.bbclass
+++ b/meta/classes/report-error.bbclass
@@ -10,7 +10,7 @@ ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
def errorreport_getdata(e):
import codecs
- logpath = e.data.getVar('ERR_REPORT_DIR', True)
+ logpath = e.data.getVar('ERR_REPORT_DIR')
datafile = os.path.join(logpath, "error-report.txt")
with codecs.open(datafile, 'r', 'utf-8') as f:
data = f.read()
@@ -19,31 +19,53 @@ def errorreport_getdata(e):
def errorreport_savedata(e, newdata, file):
import json
import codecs
- logpath = e.data.getVar('ERR_REPORT_DIR', True)
+ logpath = e.data.getVar('ERR_REPORT_DIR')
datafile = os.path.join(logpath, file)
with codecs.open(datafile, 'w', 'utf-8') as f:
json.dump(newdata, f, indent=4, sort_keys=True)
return datafile
+def get_conf_data(e, filename):
+ builddir = e.data.getVar('TOPDIR')
+ filepath = os.path.join(builddir, "conf", filename)
+ jsonstring = ""
+ if os.path.exists(filepath):
+ with open(filepath, 'r') as f:
+ for line in f.readlines():
+ if line.startswith("#") or len(line.strip()) == 0:
+ continue
+ else:
+ jsonstring=jsonstring + line
+ return jsonstring
+
python errorreport_handler () {
import json
import codecs
- logpath = e.data.getVar('ERR_REPORT_DIR', True)
+ def nativelsb():
+ nativelsbstr = e.data.getVar("NATIVELSBSTRING")
+ # provide a bit more host info in case of uninative build
+ if e.data.getVar('UNINATIVE_URL') != 'unset':
+ return '/'.join([nativelsbstr, lsb_distro_identifier(e.data)])
+ return nativelsbstr
+
+ logpath = e.data.getVar('ERR_REPORT_DIR')
datafile = os.path.join(logpath, "error-report.txt")
if isinstance(e, bb.event.BuildStarted):
bb.utils.mkdirhier(logpath)
data = {}
- machine = e.data.getVar("MACHINE", True)
+ machine = e.data.getVar("MACHINE")
data['machine'] = machine
- data['build_sys'] = e.data.getVar("BUILD_SYS", True)
- data['nativelsb'] = e.data.getVar("NATIVELSBSTRING", True)
- data['distro'] = e.data.getVar("DISTRO", True)
- data['target_sys'] = e.data.getVar("TARGET_SYS", True)
+ data['build_sys'] = e.data.getVar("BUILD_SYS")
+ data['nativelsb'] = nativelsb()
+ data['distro'] = e.data.getVar("DISTRO")
+ data['target_sys'] = e.data.getVar("TARGET_SYS")
data['failures'] = []
data['component'] = " ".join(e.getPkgs())
data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
+ data['local_conf'] = get_conf_data(e, 'local.conf')
+ data['auto_conf'] = get_conf_data(e, 'auto.conf')
lock = bb.utils.lockfile(datafile + '.lock')
errorreport_savedata(e, data, "error-report.txt")
bb.utils.unlockfile(lock)
@@ -51,24 +73,20 @@ python errorreport_handler () {
elif isinstance(e, bb.build.TaskFailed):
task = e.task
taskdata={}
- log = e.data.getVar('BB_LOGFILE', True)
+ log = e.data.getVar('BB_LOGFILE')
taskdata['package'] = e.data.expand("${PF}")
taskdata['task'] = task
if log:
try:
- logFile = codecs.open(log, 'r', 'utf-8')
- logdata = logFile.read()
-
+ with codecs.open(log, encoding='utf-8') as logFile:
+ logdata = logFile.read()
# Replace host-specific paths so the logs are cleaner
for d in ("TOPDIR", "TMPDIR"):
- s = e.data.getVar(d, True)
+ s = e.data.getVar(d)
if s:
logdata = logdata.replace(s, d)
-
- logFile.close()
except:
logdata = "Unable to read log file"
-
else:
logdata = "No Log"
@@ -92,7 +110,7 @@ python errorreport_handler () {
bb.utils.unlockfile(lock)
failures = jsondata['failures']
if(len(failures) > 0):
- filename = "error_report_" + e.data.getVar("BUILDNAME", True)+".txt"
+ filename = "error_report_" + e.data.getVar("BUILDNAME")+".txt"
datafile = errorreport_savedata(e, jsondata, filename)
bb.note("The errors for this build are stored in %s\nYou can send the errors to a reports server by running:\n send-error-report %s [-s server]" % (datafile, datafile))
bb.note("The contents of these logs will be posted in public if you use the above command with the default server. Please ensure you remove any identifying or proprietary information when prompted before sending.")
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass
new file mode 100644
index 0000000000..39b6e40cac
--- /dev/null
+++ b/meta/classes/reproducible_build.bbclass
@@ -0,0 +1,179 @@
+# reproducible_build.bbclass
+#
+# Sets SOURCE_DATE_EPOCH in each component's build environment.
+# Upstream components (generally) respect this environment variable,
+# using it in place of the "current" date and time.
+# See https://reproducible-builds.org/specs/source-date-epoch/
+#
+# After sources are unpacked but before they are patched, we set a reproducible value for SOURCE_DATE_EPOCH.
+# This value should be reproducible for anyone who builds the same revision from the same sources.
+#
+# There are 4 ways we determine SOURCE_DATE_EPOCH:
+#
+# 1. Use the value from __source_date_epoch.txt file if this file exists.
+# This file was most likely created in the previous build by one of the following methods 2,3,4.
+# Alternatively, it can be provided by a recipe via SRC_URI.
+#
+# If the file does not exist:
+#
+# 2. If there is a git checkout, use the last git commit timestamp.
+# Git does not preserve file timestamps on checkout.
+#
+# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
+# This works for well-kept repositories distributed via tarball.
+#
+# 4. Use the modification time of the youngest file in the source tree, if there is one.
+# This will be the newest file from the distribution tarball, if any.
+#
+# 5. Fall back to a fixed timestamp.
+#
+# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe's SDE_FILE.
+# If none of these mechanisms are suitable, replace the do_deploy_source_date_epoch task
+# with recipe-specific functionality to write the appropriate SOURCE_DATE_EPOCH into the SDE_FILE.
+#
+# If this file is found by other tasks, the value is exported in the SOURCE_DATE_EPOCH variable.
+# SOURCE_DATE_EPOCH is set for all tasks that might use it (do_configure, do_compile, do_package, ...)
+
+BUILD_REPRODUCIBLE_BINARIES ??= '1'
+inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
+
+SDE_DIR ="${WORKDIR}/source-date-epoch"
+SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
+SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch"
+
+SSTATETASKS += "do_deploy_source_date_epoch"
+
+do_deploy_source_date_epoch () {
+ echo "Deploying SDE to ${SDE_DIR}."
+ mkdir -p ${SDE_DEPLOYDIR}
+ if [ -e ${SDE_FILE} ]; then
+ cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
+ fi
+}
+
+python do_deploy_source_date_epoch_setscene () {
+ sstate_setscene(d)
+ bb.utils.mkdirhier(d.getVar('SDE_DIR'))
+ sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
+ if os.path.exists(sde_file):
+ os.rename(sde_file, d.getVar('SDE_FILE'))
+}
+
+do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
+do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
+addtask do_deploy_source_date_epoch_setscene
+addtask do_deploy_source_date_epoch before do_configure after do_patch
+
+def get_source_date_epoch_from_known_files(d, sourcedir):
+ source_date_epoch = None
+ newest_file = None
+ known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
+ for file in known_files:
+ filepath = os.path.join(sourcedir, file)
+ if os.path.isfile(filepath):
+ mtime = int(os.lstat(filepath).st_mtime)
+ # There may be more than one "known_file" present, if so, use the youngest one
+ if not source_date_epoch or mtime > source_date_epoch:
+ source_date_epoch = mtime
+ newest_file = filepath
+ if newest_file:
+ bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
+ return source_date_epoch
+
+def find_git_folder(d, sourcedir):
+ # First guess: WORKDIR/git
+ # This is the default git fetcher unpack path
+ workdir = d.getVar('WORKDIR')
+ gitpath = os.path.join(workdir, "git/.git")
+ if os.path.isdir(gitpath):
+ return gitpath
+
+ # Second guess: ${S}
+ gitpath = os.path.join(sourcedir, ".git")
+ if os.path.isdir(gitpath):
+ return gitpath
+
+ # Perhaps there was a subpath or destsuffix specified.
+ # Go looking in the WORKDIR
+ exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
+ "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
+ for root, dirs, files in os.walk(workdir, topdown=True):
+ dirs[:] = [d for d in dirs if d not in exclude]
+ if '.git' in dirs:
+ return root
+
+ bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
+ return None
+
+def get_source_date_epoch_from_git(d, sourcedir):
+ source_date_epoch = None
+ if "git://" in d.getVar('SRC_URI'):
+ gitpath = find_git_folder(d, sourcedir)
+ if gitpath:
+ import subprocess
+ source_date_epoch = int(subprocess.check_output(['git','log','-1','--pretty=%ct'], cwd=gitpath))
+ bb.debug(1, "git repository: %s" % gitpath)
+ return source_date_epoch
+
+def get_source_date_epoch_from_youngest_file(d, sourcedir):
+ if sourcedir == d.getVar('WORKDIR'):
+ # These sources are almost certainly not from a tarball
+ return None
+
+ # Do it the hard way: check all files and find the youngest one...
+ source_date_epoch = None
+ newest_file = None
+ for root, dirs, files in os.walk(sourcedir, topdown=True):
+ files = [f for f in files if not f[0] == '.']
+
+ for fname in files:
+ filename = os.path.join(root, fname)
+ try:
+ mtime = int(os.lstat(filename).st_mtime)
+ except ValueError:
+ mtime = 0
+ if not source_date_epoch or mtime > source_date_epoch:
+ source_date_epoch = mtime
+ newest_file = filename
+
+ if newest_file:
+ bb.debug(1, "Newest file found: %s" % newest_file)
+ return source_date_epoch
+
+def fixed_source_date_epoch():
+ bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
+ return 0
+
+python do_create_source_date_epoch_stamp() {
+ epochfile = d.getVar('SDE_FILE')
+ if os.path.isfile(epochfile):
+ bb.debug(1, "Reusing SOURCE_DATE_EPOCH from: %s" % epochfile)
+ return
+
+ sourcedir = d.getVar('S')
+ source_date_epoch = (
+ get_source_date_epoch_from_git(d, sourcedir) or
+ get_source_date_epoch_from_known_files(d, sourcedir) or
+ get_source_date_epoch_from_youngest_file(d, sourcedir) or
+ fixed_source_date_epoch() # Last resort
+ )
+
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ bb.utils.mkdirhier(d.getVar('SDE_DIR'))
+ with open(epochfile, 'w') as f:
+ f.write(str(source_date_epoch))
+}
+
+BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
+
+python () {
+ if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
+ d.appendVarFlag("do_unpack", "postfuncs", " do_create_source_date_epoch_stamp")
+ epochfile = d.getVar('SDE_FILE')
+ source_date_epoch = "0"
+ if os.path.isfile(epochfile):
+ with open(epochfile, 'r') as f:
+ source_date_epoch = f.read()
+ bb.debug(1, "SOURCE_DATE_EPOCH: %s" % source_date_epoch)
+ d.setVar('SOURCE_DATE_EPOCH', source_date_epoch)
+}
diff --git a/meta/classes/reproducible_build_simple.bbclass b/meta/classes/reproducible_build_simple.bbclass
new file mode 100644
index 0000000000..d0842f0f90
--- /dev/null
+++ b/meta/classes/reproducible_build_simple.bbclass
@@ -0,0 +1,11 @@
+# Setup default environment for reproducible builds.
+
+BUILD_REPRODUCIBLE_BINARIES = "1"
+
+export PYTHONHASHSEED = "0"
+export PERL_HASH_SEED = "0"
+export SOURCE_DATE_EPOCH ??= "1520598896"
+
+REPRODUCIBLE_TIMESTAMP_ROOTFS ??= "1520598896"
+
+inherit podfix
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index b71a9d1cf8..01c2ab1c78 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -10,6 +10,14 @@
#
# RM_WORK_EXCLUDE += "icu-native icu busybox"
#
+# Recipes can also configure which entries in their ${WORKDIR}
+# are preserved besides temp, which already gets excluded by default
+# because it contains logs:
+# do_install_append () {
+# echo "bar" >${WORKDIR}/foo
+# }
+# RM_WORK_EXCLUDE_ITEMS += "foo"
+RM_WORK_EXCLUDE_ITEMS = "temp"
# Use the completion scheduler by default when rm_work is active
# to try and reduce disk usage
@@ -18,9 +26,6 @@ BB_SCHEDULER ?= "completion"
# Run the rm_work task in the idle scheduling class
BB_TASK_IONICE_LEVEL_task-rm_work = "3.0"
-RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}"
-BB_DEFAULT_TASK = "rm_work_all"
-
do_rm_work () {
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
for p in ${RM_WORK_EXCLUDE}; do
@@ -30,80 +35,83 @@ do_rm_work () {
fi
done
- cd ${WORKDIR}
- for dir in *
- do
- # Retain only logs and other files in temp, safely ignore
- # failures of removing pseudo folers on NFS2/3 server.
- if [ $dir = 'pseudo' ]; then
- rm -rf $dir 2> /dev/null || true
- elif [ $dir != 'temp' ]; then
- rm -rf $dir
- fi
- done
-
# Need to add pseudo back or subsqeuent work in this workdir
# might fail since setscene may not rerun to recreate it
mkdir -p ${WORKDIR}/pseudo/
+ excludes='${RM_WORK_EXCLUDE_ITEMS}'
+
# Change normal stamps into setscene stamps as they better reflect the
# fact WORKDIR is now empty
# Also leave noexec stamps since setscene stamps don't cover them
cd `dirname ${STAMP}`
for i in `basename ${STAMP}`*
do
- for j in ${SSTATETASKS} do_shared_workdir
- do
- case $i in
- *do_setscene*)
- break
- ;;
- *sigdata*)
- i=dummy
- break
- ;;
- *do_package_write*)
- i=dummy
- break
- ;;
- *do_rootfs*|*do_image*|*do_bootimg*|*do_bootdirectdisk*|*do_vmimg*)
- i=dummy
- break
- ;;
- *do_build*)
- i=dummy
- break
- ;;
+ case $i in
+ *sigdata*|*sigbasedata*)
+ # Save/skip anything that looks like a signature data file.
+ ;;
+ *do_image_complete_setscene*|*do_image_qa_setscene*)
+ # Ensure we don't 'stack' setscene extensions to these stamps with the sections below
+ ;;
+ *do_image_complete*)
+ # Promote do_image_complete stamps to setscene versions (ahead of *do_image* below)
+ mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
+ ;;
+ *do_image_qa*)
+ # Promote do_image_qa stamps to setscene versions (ahead of *do_image* below)
+ mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"`
+ ;;
+ *do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
+ ;;
+ *do_addto_recipe_sysroot*)
+ # Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
+ excludes="$excludes recipe-sysroot-native"
+ ;;
+ *do_package|*do_package.*|*do_package_setscene.*)
# We remove do_package entirely, including any
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
- *do_package|*do_package.*|*do_package_setscene.*)
- rm -f $i;
- i=dummy
- break
- ;;
- *_setscene*)
- i=dummy
- break
- ;;
- *$j|*$j.*)
- mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
- i=dummy
- break
+ rm -f $i;
+ ;;
+ *_setscene*)
+ # Skip stamps which are already setscene versions
;;
- esac
- done
- rm -f $i
+ *)
+ # For everything else: if suitable, promote the stamp to a setscene
+ # version, otherwise remove it
+ for j in ${SSTATETASKS} do_shared_workdir
+ do
+ case $i in
+ *$j|*$j.*)
+ mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
+ break
+ ;;
+ esac
+ done
+ rm -f $i
+ esac
done
-}
-addtask rm_work after do_${RMWORK_ORIG_TASK}
+ cd ${WORKDIR}
+ for dir in *
+ do
+ # Retain only logs and other files in temp, safely ignore
+ # failures of removing pseudo folers on NFS2/3 server.
+ if [ $dir = 'pseudo' ]; then
+ rm -rf $dir 2> /dev/null || true
+ elif ! echo "$excludes" | grep -q -w "$dir"; then
+ rm -rf $dir
+ fi
+ done
+}
do_rm_work_all () {
:
}
do_rm_work_all[recrdeptask] = "do_rm_work"
-addtask rm_work_all after do_rm_work
+do_rm_work_all[noexec] = "1"
+addtask rm_work_all before do_build
do_populate_sdk[postfuncs] += "rm_work_populatesdk"
rm_work_populatesdk () {
@@ -117,13 +125,55 @@ rm_work_rootfs () {
}
rm_work_rootfs[cleandirs] = "${WORKDIR}/rootfs"
-python () {
+# This task can be used instead of do_build to trigger building
+# without also invoking do_rm_work. It only exists when rm_work.bbclass
+# is active, otherwise do_build needs to be used.
+#
+# The intended usage is
+# ${@ d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build'}
+# in places that previously used just 'do_build'.
+RM_WORK_BUILD_WITHOUT = "do_build_without_rm_work"
+do_build_without_rm_work () {
+ :
+}
+do_build_without_rm_work[noexec] = "1"
+
+# We have to add these tasks already now, because all tasks are
+# meant to be defined before the RecipeTaskPreProcess event triggers.
+# The inject_rm_work event handler then merely changes task dependencies.
+addtask do_rm_work
+addtask do_build_without_rm_work
+addhandler inject_rm_work
+inject_rm_work[eventmask] = "bb.event.RecipeTaskPreProcess"
+python inject_rm_work() {
if bb.data.inherits_class('kernel', d):
- d.appendVar("RM_WORK_EXCLUDE", ' ' + d.getVar("PN", True))
+ d.appendVar("RM_WORK_EXCLUDE", ' ' + d.getVar("PN"))
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
- excludes = (d.getVar("RM_WORK_EXCLUDE", True) or "").split()
- pn = d.getVar("PN", True)
+ excludes = (d.getVar("RM_WORK_EXCLUDE") or "").split()
+ pn = d.getVar("PN")
+
+ # Determine what do_build depends upon, without including do_build
+ # itself or our own special do_rm_work_all.
+ deps = sorted((set(bb.build.preceedtask('do_build', True, d))).difference(('do_build', 'do_rm_work_all')) or "")
+
+ # deps can be empty if do_build doesn't exist, e.g. *-inital recipes
+ if not deps:
+ deps = ["do_populate_sysroot", "do_populate_lic"]
+
if pn in excludes:
d.delVarFlag('rm_work_rootfs', 'cleandirs')
d.delVarFlag('rm_work_populatesdk', 'cleandirs')
+ else:
+ # Inject do_rm_work into the tasks of the current recipe such that do_build
+ # depends on it and that it runs after all other tasks that block do_build,
+ # i.e. after all work on the current recipe is done. The reason for taking
+ # this approach instead of making do_rm_work depend on do_build is that
+ # do_build inherits additional runtime dependencies on
+ # other recipes and thus will typically run much later than completion of
+ # work in the recipe itself.
+ # In practice, addtask() here merely updates the dependencies.
+ bb.build.addtask('do_rm_work', 'do_build', ' '.join(deps), d)
+
+ # Always update do_build_without_rm_work dependencies.
+ bb.build.addtask('do_build_without_rm_work', '', ' '.join(deps), d)
}
diff --git a/meta/classes/rm_work_and_downloads.bbclass b/meta/classes/rm_work_and_downloads.bbclass
new file mode 100644
index 0000000000..7c00bea597
--- /dev/null
+++ b/meta/classes/rm_work_and_downloads.bbclass
@@ -0,0 +1,33 @@
+# Author: Patrick Ohly <patrick.ohly@intel.com>
+# Copyright: Copyright (C) 2015 Intel Corporation
+#
+# This file is licensed under the MIT license, see COPYING.MIT in
+# this source distribution for the terms.
+
+# This class is used like rm_work:
+# INHERIT += "rm_work_and_downloads"
+#
+# In addition to removing local build directories of a recipe, it also
+# removes the downloaded source. This is achieved by making the DL_DIR
+# recipe-specific. While reducing disk usage, it increases network usage (for
+# example, compiling the same source for target and host implies downloading
+# the source twice).
+#
+# Because the "do_fetch" task does not get re-run after removing the downloaded
+# sources, this class is also not suitable for incremental builds.
+#
+# Where it works well is in well-connected build environments with limited
+# disk space (like TravisCI).
+
+inherit rm_work
+
+# This would ensure that the existing do_rm_work() removes the downloads,
+# but does not work because some recipes have a circular dependency between
+# WORKDIR and DL_DIR (via ${SRCPV}?).
+# DL_DIR = "${WORKDIR}/downloads"
+
+# Instead go up one level and remove ourself.
+DL_DIR = "${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}/${PN}/downloads"
+do_rm_work_append () {
+ rm -rf ${DL_DIR}
+}
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
index 07e265c5ac..2f171836fa 100644
--- a/meta/classes/rootfs-postcommands.bbclass
+++ b/meta/classes/rootfs-postcommands.bbclass
@@ -2,9 +2,12 @@
# Zap the root password if debug-tweaks feature is not enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
-# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled
+# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
+# Allow dropbear/openssh to accept root logins if debug-tweaks or allow-root-login is enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-root-login' ], "ssh_allow_root_login; ", "",d)}'
+
# Enable postinst logging if debug-tweaks is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
@@ -14,8 +17,19 @@ ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
+# We also need to do the same for the kernel boot parameters,
+# otherwise kernel or initramfs end up mounting the rootfs read/write
+# (the default) if supported by the underlying storage.
+#
+# We do this with _append because the default value might get set later with ?=
+# and we don't want to disable such a default that by setting a value here.
+APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
+
+# Generates test data file with data store variables expanded in json format
+ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
+
# Write manifest
-IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}.rootfs.manifest"
+IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
# Set default postinst log file
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
@@ -25,10 +39,23 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd"
ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
-# Disable DNS lookups, the SSH_DISABLE_DNS_LOOKUP can be overridden to allow
-# distros to choose not to take this change
-SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
-ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
+# Sort the user and group entries in /etc by ID in order to make the content
+# deterministic. Package installs are not deterministic, causing the ordering
+# of entries to change between builds. In case that this isn't desired,
+# the command can be overridden.
+#
+# Note that useradd-staticids.bbclass has to be used to ensure that
+# the numeric IDs of dynamically created entries remain stable.
+#
+# We want this to run as late as possible, in particular after
+# systemd_sysusers_create and set_user_group. Using _append is not
+# enough for that, set_user_group is added that way and would end
+# up running after us.
+SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
+python () {
+ d.appendVar('ROOTFS_POSTPROCESS_COMMAND', '${SORT_PASSWD_POSTPROCESS_COMMAND}')
+ d.appendVar('ROOTFS_POSTPROCESS_COMMAND', 'rootfs_reproducible;')
+}
systemd_create_users () {
for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
@@ -64,31 +91,35 @@ systemd_create_users () {
#
read_only_rootfs_hook () {
# Tweak the mount option and fs_passno for rootfs in fstab
- sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
+ if [ -f ${IMAGE_ROOTFS}/etc/fstab ]; then
+ sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
+ fi
+
+ # Tweak the "mount -o remount,rw /" command in busybox-inittab inittab
+ if [ -f ${IMAGE_ROOTFS}/etc/inittab ]; then
+ sed -i 's|/bin/mount -o remount,rw /|/bin/mount -o remount,ro /|' ${IMAGE_ROOTFS}/etc/inittab
+ fi
# If we're using openssh and the /etc/ssh directory has no pre-generated keys,
# we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
# and the keys under /var/run/ssh.
if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
- echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
else
- echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
fi
fi
# Also tweak the key location for dropbear in the same way.
if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
- if [ -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
- echo "DROPBEAR_RSAKEY_DIR=/etc/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
- else
+ if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
fi
fi
-
if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
# Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
@@ -100,6 +131,12 @@ read_only_rootfs_hook () {
${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
fi
fi
+
+ if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
+ # Create machine-id
+ # 20:12 < mezcalero> koen: you have three options: a) run systemd-machine-id-setup at install time, b) have / read-only and an empty file there (for stateless) and c) boot with / writable
+ touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id
+ fi
}
#
@@ -112,15 +149,14 @@ zap_empty_root_password () {
if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
fi
-}
+}
#
-# allow dropbear/openssh to accept root logins and logins from accounts with an empty password string
+# allow dropbear/openssh to accept logins from accounts with an empty password string
#
ssh_allow_empty_password () {
for config in sshd_config sshd_config_readonly; do
if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
- sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
fi
done
@@ -136,16 +172,35 @@ ssh_allow_empty_password () {
fi
if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
- sed -i 's/nullok_secure/nullok/' ${IMAGE_ROOTFS}${sysconfdir}/pam.d/*
+ for f in `find ${IMAGE_ROOTFS}${sysconfdir}/pam.d/* -type f -exec test -e {} \; -print`
+ do
+ sed -i 's/nullok_secure/nullok/' $f
+ done
fi
}
-ssh_disable_dns_lookup () {
- if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
- sed -i -e 's:#UseDNS yes:UseDNS no:' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
+#
+# allow dropbear/openssh to accept root logins
+#
+ssh_allow_root_login () {
+ for config in sshd_config sshd_config_readonly; do
+ if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
+ sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
+ fi
+ done
+
+ if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
+ if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
+ sed -i '/^DROPBEAR_EXTRA_ARGS=/ s/-w//' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
+ fi
fi
}
+python sort_passwd () {
+ import rootfspostcommands
+ rootfspostcommands.sort_passwd(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
+}
+
#
# Enable postinst logging if debug-tweaks is enabled
#
@@ -195,31 +250,13 @@ make_zimage_symlink_relative () {
fi
}
-insert_feed_uris () {
-
- echo "Building feeds for [${DISTRO}].."
-
- for line in ${FEED_URIS}
- do
- # strip leading and trailing spaces/tabs, then split into name and uri
- line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
- feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
- feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
-
- echo "Added $feed_name feed with URL $feed_uri"
-
- # insert new feed-sources
- echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/opkg/${feed_name}-feed.conf
- done
-}
-
python write_image_manifest () {
from oe.rootfs import image_list_installed_packages
from oe.utils import format_pkg_list
- deploy_dir = d.getVar('IMGDEPLOYDIR', True)
- link_name = d.getVar('IMAGE_LINK_NAME', True)
- manifest_name = d.getVar('IMAGE_MANIFEST', True)
+ deploy_dir = d.getVar('IMGDEPLOYDIR')
+ link_name = d.getVar('IMAGE_LINK_NAME')
+ manifest_name = d.getVar('IMAGE_MANIFEST')
if not manifest_name:
return
@@ -227,22 +264,25 @@ python write_image_manifest () {
pkgs = image_list_installed_packages(d)
with open(manifest_name, 'w+') as image_manifest:
image_manifest.write(format_pkg_list(pkgs, "ver"))
- image_manifest.write("\n")
- if os.path.exists(manifest_name):
+ if os.path.exists(manifest_name) and link_name:
manifest_link = deploy_dir + "/" + link_name + ".manifest"
if os.path.lexists(manifest_link):
- if d.getVar('RM_OLD_IMAGE', True) == "1" and \
- os.path.exists(os.path.realpath(manifest_link)):
- os.remove(os.path.realpath(manifest_link))
os.remove(manifest_link)
os.symlink(os.path.basename(manifest_name), manifest_link)
}
-# Can be use to create /etc/timestamp during image construction to give a reasonably
+# Can be used to create /etc/timestamp during image construction to give a reasonably
# sane default time setting
rootfs_update_timestamp () {
- date -u +%4Y%2m%2d%2H%2M%2S >${IMAGE_ROOTFS}/etc/timestamp
+ if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
+ # Convert UTC into %4Y%2m%2d%2H%2M%2S
+ sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
+ else
+ sformatted=`date -u +%4Y%2m%2d%2H%2M%2S`
+ fi
+ echo $sformatted > ${IMAGE_ROOTFS}/etc/timestamp
+ bbnote "rootfs_update_timestamp: set /etc/timestamp to $sformatted"
}
# Prevent X from being started
@@ -281,3 +321,49 @@ rootfs_check_host_user_contaminated () {
rootfs_sysroot_relativelinks () {
sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
}
+
+# Generated test data json file
+python write_image_test_data() {
+ from oe.data import export2json
+
+ deploy_dir = d.getVar('IMGDEPLOYDIR')
+ link_name = d.getVar('IMAGE_LINK_NAME')
+ testdata_name = os.path.join(deploy_dir, "%s.testdata.json" % d.getVar('IMAGE_NAME'))
+
+ searchString = "%s/"%(d.getVar("TOPDIR")).replace("//","/")
+ export2json(d, testdata_name, searchString=searchString, replaceString="")
+
+ if os.path.exists(testdata_name) and link_name:
+ testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
+ if os.path.lexists(testdata_link):
+ os.remove(testdata_link)
+ os.symlink(os.path.basename(testdata_name), testdata_link)
+}
+write_image_test_data[vardepsexclude] += "TOPDIR"
+
+# Check for unsatisfied recommendations (RRECOMMENDS)
+python rootfs_log_check_recommends() {
+ log_path = d.expand("${T}/log.do_rootfs")
+ with open(log_path, 'r') as log:
+ for line in log:
+ if 'log_check' in line:
+ continue
+
+ if 'unsatisfied recommendation for' in line:
+ bb.warn('[log_check] %s: %s' % (d.getVar('PN'), line))
+}
+
+# Perform any additional adjustments needed to make rootf binary reproducible
+rootfs_reproducible () {
+ if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
+ # Convert UTC into %4Y%2m%2d%2H%2M%2S
+ sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
+ echo $sformatted > ${IMAGE_ROOTFS}/etc/version
+ bbnote "rootfs_reproducible: set /etc/version to $sformatted"
+
+ if [ -d ${IMAGE_ROOTFS}${sysconfdir}/gconf ]; then
+ find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \
+ sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
+ fi
+ fi
+}
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
index f79fca608e..2b93796a76 100644
--- a/meta/classes/rootfs_deb.bbclass
+++ b/meta/classes/rootfs_deb.bbclass
@@ -3,18 +3,18 @@
#
ROOTFS_PKGMANAGE = "dpkg apt"
-ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
-do_rootfs[recrdeptask] += "do_package_write_deb"
+do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa"
do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
+do_populate_sdk_ext[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
python rootfs_deb_bad_recommendations() {
- if d.getVar("BAD_RECOMMENDATIONS", True):
+ if d.getVar("BAD_RECOMMENDATIONS"):
bb.warn("Debian package install does not support BAD_RECOMMENDATIONS")
}
do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations"
@@ -25,7 +25,7 @@ opkglibdir = "${localstatedir}/lib/opkg"
python () {
# Map TARGET_ARCH to Debian's ideas about architectures
- darch = d.getVar('SDK_ARCH', True)
+ darch = d.getVar('SDK_ARCH')
if darch in ["x86", "i486", "i586", "i686", "pentium"]:
d.setVar('DEB_SDK_ARCH', 'i386')
elif darch == "x86_64":
@@ -33,6 +33,3 @@ python () {
elif darch == "arm":
d.setVar('DEB_SDK_ARCH', 'armel')
}
-
-# This will of course only work after rootfs_deb_do_rootfs or populate_sdk_deb has been called
-DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg-query --admindir=$INSTALL_ROOTFS_DEB/var/lib/dpkg"
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
index d5c38fef74..e73d2bfdae 100644
--- a/meta/classes/rootfs_ipk.bbclass
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -7,28 +7,28 @@
EXTRAOPKGCONFIG ?= ""
ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
-ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
-do_rootfs[recrdeptask] += "do_package_write_ipk"
+do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
+do_populate_sdk_ext[lockfiles] += "${WORKDIR}/ipk.lock"
OPKG_PREPROCESS_COMMANDS = ""
OPKG_POSTPROCESS_COMMANDS = ""
-OPKGLIBDIR = "${localstatedir}/lib"
+OPKGLIBDIR ??= "${localstatedir}/lib"
MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
python () {
- if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
- flags = d.getVarFlag('do_rootfs', 'recrdeptask', True)
+ if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
+ flags = d.getVarFlag('do_rootfs', 'recrdeptask')
flags = flags.replace("do_package_write_ipk", "")
flags = flags.replace("do_deploy", "")
flags = flags.replace("do_populate_sysroot", "")
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
index 37730a7104..51f89ea990 100644
--- a/meta/classes/rootfs_rpm.bbclass
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -2,31 +2,33 @@
# Creates a root filesystem out of rpm packages
#
-ROOTFS_PKGMANAGE = "rpm smartpm"
-ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
+ROOTFS_PKGMANAGE = "rpm dnf"
-# Add 100Meg of extra space for Smart
-IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "smartpm", " + 102400", "" ,d)}"
+# dnf is using our custom distutils, and so will fail without these
+export STAGING_INCDIR
+export STAGING_LIBDIR
-# Smart is python based, so be sure python-native is available to us.
-EXTRANATIVEPATH += "python-native"
+# Add 100Meg of extra space for dnf
+IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "" ,d)}"
+
+# Dnf is python based, so be sure python3-native is available to us.
+EXTRANATIVEPATH += "python3-native"
# opkg is needed for update-alternatives
RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \
- rpmresolve-native:do_populate_sysroot \
- python-smartpm-native:do_populate_sysroot \
- createrepo-native:do_populate_sysroot \
+ dnf-native:do_populate_sysroot \
+ createrepo-c-native:do_populate_sysroot \
opkg-native:do_populate_sysroot"
do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
-do_rootfs[recrdeptask] += "do_package_write_rpm"
+do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa"
do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
python () {
- if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
- flags = d.getVarFlag('do_rootfs', 'recrdeptask', True)
+ if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
+ flags = d.getVarFlag('do_rootfs', 'recrdeptask')
flags = flags.replace("do_package_write_rpm", "")
flags = flags.replace("do_deploy", "")
flags = flags.replace("do_populate_sysroot", "")
@@ -35,7 +37,3 @@ python () {
d.setVar('RPM_POSTPROCESS_COMMANDS', '')
}
-# Smart is python based, so be sure python-native is available to us.
-EXTRANATIVEPATH += "python-native"
-
-rpmlibdir = "/var/lib/rpm"
diff --git a/meta/classes/rootfsdebugfiles.bbclass b/meta/classes/rootfsdebugfiles.bbclass
index a558871e99..e2ba4e3647 100644
--- a/meta/classes/rootfsdebugfiles.bbclass
+++ b/meta/classes/rootfsdebugfiles.bbclass
@@ -15,6 +15,10 @@
# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ;"
# 2. Boot the image once, copy the dropbear_rsa_host_key from
# the device into your build conf directory.
+# 3. A optional parameter can be used to set file mode
+# of the copied target, for instance:
+# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key 0600;"
+# in case they might be required to have a specific mode. (Shoundn't be too open, for example)
#
# Do not use for production images! It bypasses several
# core build mechanisms (updating the image when one
@@ -27,10 +31,11 @@ ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed
ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;"
rootfs_debug_files () {
#!/bin/sh -e
- echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target; do
+ echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
if [ -e "$source" ]; then
mkdir -p $(dirname $target)
cp -a $source $target
+ [ -n "$mode" ] && chmod $mode $target
fi
done
}
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 7682ffbb8c..63ab6cf3df 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -3,10 +3,10 @@
#
SANITY_REQUIRED_UTILITIES ?= "patch diffstat makeinfo git bzip2 tar \
- gzip gawk chrpath wget cpio perl file"
+ gzip gawk chrpath wget cpio perl file which"
def bblayers_conf_file(d):
- return os.path.join(d.getVar('TOPDIR', True), 'conf/bblayers.conf')
+ return os.path.join(d.getVar('TOPDIR'), 'conf/bblayers.conf')
def sanity_conf_read(fn):
with open(fn, 'r') as f:
@@ -39,8 +39,8 @@ SANITY_DIFF_TOOL ?= "meld"
SANITY_LOCALCONF_SAMPLE ?= "${COREBASE}/meta*/conf/local.conf.sample"
python oecore_update_localconf() {
# Check we are using a valid local.conf
- current_conf = d.getVar('CONF_VERSION', True)
- conf_version = d.getVar('LOCALCONF_VERSION', True)
+ current_conf = d.getVar('CONF_VERSION')
+ conf_version = d.getVar('LOCALCONF_VERSION')
failmsg = """Your version of local.conf was generated from an older/newer version of
local.conf.sample and there have been updates made to this file. Please compare the two
@@ -59,8 +59,8 @@ is a good way to visualise the changes."""
SANITY_SITECONF_SAMPLE ?= "${COREBASE}/meta*/conf/site.conf.sample"
python oecore_update_siteconf() {
# If we have a site.conf, check it's valid
- current_sconf = d.getVar('SCONF_VERSION', True)
- sconf_version = d.getVar('SITE_CONF_VERSION', True)
+ current_sconf = d.getVar('SCONF_VERSION')
+ sconf_version = d.getVar('SITE_CONF_VERSION')
failmsg = """Your version of site.conf was generated from an older version of
site.conf.sample and there have been updates made to this file. Please compare the two
@@ -80,8 +80,8 @@ SANITY_BBLAYERCONF_SAMPLE ?= "${COREBASE}/meta*/conf/bblayers.conf.sample"
python oecore_update_bblayers() {
# bblayers.conf is out of date, so see if we can resolve that
- current_lconf = int(d.getVar('LCONF_VERSION', True))
- lconf_version = int(d.getVar('LAYER_CONF_VERSION', True))
+ current_lconf = int(d.getVar('LCONF_VERSION'))
+ lconf_version = int(d.getVar('LAYER_CONF_VERSION'))
failmsg = """Your version of bblayers.conf has the wrong LCONF_VERSION (has ${LCONF_VERSION}, expecting ${LAYER_CONF_VERSION}).
Please compare your file against bblayers.conf.sample and merge any changes before continuing.
@@ -141,7 +141,7 @@ is a good way to visualise the changes."""
# Handle rename of meta-yocto -> meta-poky
# This marks the start of separate version numbers but code is needed in OE-Core
# for the migration, one last time.
- layers = d.getVar('BBLAYERS', True).split()
+ layers = d.getVar('BBLAYERS').split()
layers = [ os.path.basename(path) for path in layers ]
if 'meta-yocto' in layers:
found = False
@@ -172,7 +172,7 @@ is a good way to visualise the changes."""
}
def raise_sanity_error(msg, d, network_error=False):
- if d.getVar("SANITY_USE_EVENTS", True) == "1":
+ if d.getVar("SANITY_USE_EVENTS") == "1":
try:
bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
except TypeError:
@@ -198,8 +198,8 @@ def check_toolchain_tune_args(data, tune, multilib, errs):
return found_errors
def check_toolchain_args_present(data, tune, multilib, tune_errors, which):
- args_set = (data.getVar("TUNE_%s" % which, True) or "").split()
- args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune), True) or "").split()
+ args_set = (data.getVar("TUNE_%s" % which) or "").split()
+ args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune)) or "").split()
args_missing = []
# If no args are listed/required, we are done.
@@ -226,9 +226,8 @@ def check_toolchain_tune(data, tune, multilib):
# Apply the overrides so we can look at the details.
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
localdata.setVar("OVERRIDES", overrides)
- bb.data.update_data(localdata)
bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
- features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune, True) or "").split()
+ features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune) or "").split()
if not features:
return "Tuning '%s' has no defined features, and cannot be used." % tune
valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
@@ -248,9 +247,9 @@ def check_toolchain_tune(data, tune, multilib):
bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
else:
tune_errors.append("Feature '%s' is not defined." % feature)
- whitelist = localdata.getVar("TUNEABI_WHITELIST", True)
+ whitelist = localdata.getVar("TUNEABI_WHITELIST")
if whitelist:
- tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune, True)
+ tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune)
if not tuneabi:
tuneabi = tune
if True not in [x in whitelist.split() for x in tuneabi.split()]:
@@ -264,13 +263,13 @@ def check_toolchain_tune(data, tune, multilib):
def check_toolchain(data):
tune_error_set = []
- deftune = data.getVar("DEFAULTTUNE", True)
+ deftune = data.getVar("DEFAULTTUNE")
tune_errors = check_toolchain_tune(data, deftune, 'default')
if tune_errors:
tune_error_set.append(tune_errors)
- multilibs = (data.getVar("MULTILIB_VARIANTS", True) or "").split()
- global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS", True) or "").split()
+ multilibs = (data.getVar("MULTILIB_VARIANTS") or "").split()
+ global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS") or "").split()
if multilibs:
seen_libs = []
@@ -282,7 +281,7 @@ def check_toolchain(data):
seen_libs.append(lib)
if not lib in global_multilibs:
tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
- tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib, True)
+ tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib)
if tune in seen_tunes:
tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
else:
@@ -337,11 +336,11 @@ def check_path_length(filepath, pathname, limit):
return ""
def get_filesystem_id(path):
- status, result = oe.utils.getstatusoutput("stat -f -c '%s' %s" % ("%t", path))
- if status == 0:
- return result
- else:
- bb.warn("Can't get the filesystem id of: %s" % path)
+ import subprocess
+ try:
+ return subprocess.check_output(["stat", "-f", "-c", "%t", path]).decode('utf-8').strip()
+ except subprocess.CalledProcessError:
+ bb.warn("Can't get filesystem id of: %s" % path)
return None
# Check that the path isn't located on nfs.
@@ -351,6 +350,14 @@ def check_not_nfs(path, name):
return "The %s: %s can't be located on nfs.\n" % (name, path)
return ""
+# Check that the path is on a case-sensitive file system
+def check_case_sensitive(path, name):
+ import tempfile
+ with tempfile.NamedTemporaryFile(prefix='TmP', dir=path) as tmp_file:
+ if os.path.exists(tmp_file.name.lower()):
+ return "The %s (%s) can't be on a case-insensitive file system.\n" % (name, path)
+ return ""
+
# Check that path isn't a broken symlink
def check_symlink(lnk, data):
if os.path.islink(lnk) and not os.path.exists(lnk):
@@ -360,27 +367,34 @@ def check_connectivity(d):
# URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
# using the same syntax as for SRC_URI. If the variable is not set
# the check is skipped
- test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS', True) or "").split()
+ test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS') or "").split()
retval = ""
+ bbn = d.getVar('BB_NO_NETWORK')
+ if bbn not in (None, '0', '1'):
+ return 'BB_NO_NETWORK should be "0" or "1", but it is "%s"' % bbn
+
# Only check connectivity if network enabled and the
# CONNECTIVITY_CHECK_URIS are set
- network_enabled = not d.getVar('BB_NO_NETWORK', True)
+ network_enabled = not (bbn == '1')
check_enabled = len(test_uris)
- # Take a copy of the data store and unset MIRRORS and PREMIRRORS
- data = bb.data.createCopy(d)
- data.delVar('PREMIRRORS')
- data.delVar('MIRRORS')
if check_enabled and network_enabled:
+ # Take a copy of the data store and unset MIRRORS and PREMIRRORS
+ data = bb.data.createCopy(d)
+ data.delVar('PREMIRRORS')
+ data.delVar('MIRRORS')
try:
fetcher = bb.fetch2.Fetch(test_uris, data)
fetcher.checkstatus()
except Exception as err:
# Allow the message to be configured so that users can be
# pointed to a support mechanism.
- msg = data.getVar('CONNECTIVITY_CHECK_MSG', True) or ""
+ msg = data.getVar('CONNECTIVITY_CHECK_MSG') or ""
if len(msg) == 0:
- msg = "%s. Please ensure your network is configured correctly.\n" % err
+ msg = "%s.\n" % err
+ msg += " Please ensure your host's network is configured correctly,\n"
+ msg += " or set BB_NO_NETWORK = \"1\" to disable network access if\n"
+ msg += " all required sources are on local disk.\n"
retval = msg
return retval
@@ -388,7 +402,7 @@ def check_connectivity(d):
def check_supported_distro(sanity_data):
from fnmatch import fnmatch
- tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS', True)
+ tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS')
if not tested_distros:
return
@@ -411,17 +425,17 @@ def check_sanity_validmachine(sanity_data):
messages = ""
# Check TUNE_ARCH is set
- if sanity_data.getVar('TUNE_ARCH', True) == 'INVALID':
+ if sanity_data.getVar('TUNE_ARCH') == 'INVALID':
messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
# Check TARGET_OS is set
- if sanity_data.getVar('TARGET_OS', True) == 'INVALID':
+ if sanity_data.getVar('TARGET_OS') == 'INVALID':
messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
# Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
- pkgarchs = sanity_data.getVar('PACKAGE_ARCHS', True)
- tunepkg = sanity_data.getVar('TUNE_PKGARCH', True)
- defaulttune = sanity_data.getVar('DEFAULTTUNE', True)
+ pkgarchs = sanity_data.getVar('PACKAGE_ARCHS')
+ tunepkg = sanity_data.getVar('TUNE_PKGARCH')
+ defaulttune = sanity_data.getVar('DEFAULTTUNE')
tunefound = False
seen = {}
dups = []
@@ -442,52 +456,32 @@ def check_sanity_validmachine(sanity_data):
return messages
-# Checks if necessary to add option march to host gcc
-def check_gcc_march(sanity_data):
- result = True
- message = ""
-
- # Check if -march not in BUILD_CFLAGS
- if sanity_data.getVar("BUILD_CFLAGS",True).find("-march") < 0:
- result = False
-
- # Construct a test file
- f = open("gcc_test.c", "w")
- f.write("int main (){ volatile int atomic = 2; __sync_bool_compare_and_swap (&atomic, 2, 3); return 0; }\n")
- f.close()
-
- # Check if GCC could work without march
- if not result:
- status,res = oe.utils.getstatusoutput(sanity_data.expand("${BUILD_CC} gcc_test.c -o gcc_test"))
- if status == 0:
- result = True;
-
- if not result:
- status,res = oe.utils.getstatusoutput(sanity_data.expand("${BUILD_CC} -march=native gcc_test.c -o gcc_test"))
- if status == 0:
- message = "BUILD_CFLAGS_append = \" -march=native\""
- result = True;
-
- if not result:
- build_arch = sanity_data.getVar('BUILD_ARCH', True)
- status,res = oe.utils.getstatusoutput(sanity_data.expand("${BUILD_CC} -march=%s gcc_test.c -o gcc_test" % build_arch))
- if status == 0:
- message = "BUILD_CFLAGS_append = \" -march=%s\"" % build_arch
- result = True;
-
- os.remove("gcc_test.c")
- if os.path.exists("gcc_test"):
- os.remove("gcc_test")
+# Patch before 2.7 can't handle all the features in git-style diffs. Some
+# patches may incorrectly apply, and others won't apply at all.
+def check_patch_version(sanity_data):
+ from distutils.version import LooseVersion
+ import re, subprocess
- return (result, message)
+ try:
+ result = subprocess.check_output(["patch", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
+ version = re.search(r"[0-9.]+", result.splitlines()[0]).group()
+ if LooseVersion(version) < LooseVersion("2.7"):
+ return "Your version of patch is older than 2.7 and has bugs which will break builds. Please install a newer version of patch.\n"
+ else:
+ return None
+ except subprocess.CalledProcessError as e:
+ return "Unable to execute patch --version, exit code %d:\n%s\n" % (e.returncode, e.output)
# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
def check_make_version(sanity_data):
from distutils.version import LooseVersion
- status, result = oe.utils.getstatusoutput("make --version")
- if status != 0:
- return "Unable to execute make --version, exit code %s\n" % status
+ import subprocess
+
+ try:
+ result = subprocess.check_output(['make', '--version'], stderr=subprocess.STDOUT).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ return "Unable to execute make --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[2]
if LooseVersion(version) == LooseVersion("3.82"):
# Construct a test file
@@ -502,31 +496,46 @@ def check_make_version(sanity_data):
f.close()
# Check if make 3.82 has been patched
- status,result = oe.utils.getstatusoutput("make -f makefile_test")
-
- os.remove("makefile_test")
- if os.path.exists("makefile_test_a.c"):
- os.remove("makefile_test_a.c")
- if os.path.exists("makefile_test_b.c"):
- os.remove("makefile_test_b.c")
- if os.path.exists("makefile_test.a"):
- os.remove("makefile_test.a")
-
- if status != 0:
+ try:
+ subprocess.check_call(['make', '-f', 'makefile_test'])
+ except subprocess.CalledProcessError as e:
return "Your version of make 3.82 is broken. Please revert to 3.81 or install a patched version.\n"
+ finally:
+ os.remove("makefile_test")
+ if os.path.exists("makefile_test_a.c"):
+ os.remove("makefile_test_a.c")
+ if os.path.exists("makefile_test_b.c"):
+ os.remove("makefile_test_b.c")
+ if os.path.exists("makefile_test.a"):
+ os.remove("makefile_test.a")
return None
+# Check if we're running on WSL (Windows Subsystem for Linux). Its known not to
+# work but we should tell the user that upfront.
+def check_wsl(d):
+ with open("/proc/version", "r") as f:
+ verdata = f.readlines()
+ for l in verdata:
+ if "Microsoft" in l:
+ return "OpenEmbedded doesn't work under WSL at this time, sorry"
+ return None
+
# Tar version 1.24 and onwards handle overwriting symlinks correctly
# but earlier versions do not; this needs to work properly for sstate
+# Version 1.28 is needed so opkg-build works correctly when reproducibile builds are enabled
def check_tar_version(sanity_data):
from distutils.version import LooseVersion
- status, result = oe.utils.getstatusoutput("tar --version")
- if status != 0:
- return "Unable to execute tar --version, exit code %s\n" % status
+ import subprocess
+ try:
+ result = subprocess.check_output(["tar", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[3]
if LooseVersion(version) < LooseVersion("1.24"):
- return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n"
+ return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar (1.28+).\n"
+ if LooseVersion(version) < LooseVersion("1.28"):
+ return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the projects buildtools-tarball from our last release).\n"
return None
# We use git parameters and functionality only found in 1.7.8 or later
@@ -534,9 +543,11 @@ def check_tar_version(sanity_data):
# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
def check_git_version(sanity_data):
from distutils.version import LooseVersion
- status, result = oe.utils.getstatusoutput("git --version 2> /dev/null")
- if status != 0:
- return "Unable to execute git --version, exit code %s\n" % status
+ import subprocess
+ try:
+ result = subprocess.check_output(["git", "--version"], stderr=subprocess.DEVNULL).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ return "Unable to execute git --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[2]
if LooseVersion(version) < LooseVersion("1.8.3.1"):
return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
@@ -544,28 +555,30 @@ def check_git_version(sanity_data):
# Check the required perl modules which may not be installed by default
def check_perl_modules(sanity_data):
+ import subprocess
ret = ""
modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper" )
errresult = ''
for m in modules:
- status, result = oe.utils.getstatusoutput("perl -e 'use %s'" % m)
- if status != 0:
- errresult += result
+ try:
+ subprocess.check_output(["perl", "-e", "use %s" % m])
+ except subprocess.CalledProcessError as e:
+ errresult += bytes.decode(e.output)
ret += "%s " % m
if ret:
return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
return None
def sanity_check_conffiles(d):
- funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS', True).split()
+ funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS').split()
for func in funcs:
conffile, current_version, required_version, func = func.split(":")
- if check_conf_exists(conffile, d) and d.getVar(current_version, True) is not None and \
- d.getVar(current_version, True) != d.getVar(required_version, True):
+ if check_conf_exists(conffile, d) and d.getVar(current_version) is not None and \
+ d.getVar(current_version) != d.getVar(required_version):
try:
- bb.build.exec_func(func, d, pythonexception=True)
+ bb.build.exec_func(func, d)
except NotImplementedError as e:
- bb.fatal(e)
+ bb.fatal(str(e))
d.setVar("BB_INVALIDCONF", True)
def sanity_handle_abichanges(status, d):
@@ -574,55 +587,16 @@ def sanity_handle_abichanges(status, d):
#
import subprocess
- current_abi = d.getVar('OELAYOUT_ABI', True)
- abifile = d.getVar('SANITY_ABIFILE', True)
+ current_abi = d.getVar('OELAYOUT_ABI')
+ abifile = d.getVar('SANITY_ABIFILE')
if os.path.exists(abifile):
with open(abifile, "r") as f:
abi = f.read().strip()
if not abi.isdigit():
with open(abifile, "w") as f:
f.write(current_abi)
- elif abi == "2" and current_abi == "3":
- bb.note("Converting staging from layout version 2 to layout version 3")
- subprocess.call(d.expand("mv ${TMPDIR}/staging ${TMPDIR}/sysroots"), shell=True)
- subprocess.call(d.expand("ln -s sysroots ${TMPDIR}/staging"), shell=True)
- subprocess.call(d.expand("cd ${TMPDIR}/stamps; for i in */*do_populate_staging; do new=`echo $i | sed -e 's/do_populate_staging/do_populate_sysroot/'`; mv $i $new; done"), shell=True)
- with open(abifile, "w") as f:
- f.write(current_abi)
- elif abi == "3" and current_abi == "4":
- bb.note("Converting staging layout from version 3 to layout version 4")
- if os.path.exists(d.expand("${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}")):
- subprocess.call(d.expand("mv ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS} ${STAGING_BINDIR_CROSS}"), shell=True)
- subprocess.call(d.expand("ln -s ${STAGING_BINDIR_CROSS} ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}"), shell=True)
- with open(abifile, "w") as f:
- f.write(current_abi)
- elif abi == "4":
- status.addresult("Staging layout has changed. The cross directory has been deprecated and cross packages are now built under the native sysroot.\nThis requires a rebuild.\n")
- elif abi == "5" and current_abi == "6":
- bb.note("Converting staging layout from version 5 to layout version 6")
- subprocess.call(d.expand("mv ${TMPDIR}/pstagelogs ${SSTATE_MANIFESTS}"), shell=True)
- with open(abifile, "w") as f:
- f.write(current_abi)
- elif abi == "7" and current_abi == "8":
- status.addresult("Your configuration is using stamp files including the sstate hash but your build directory was built with stamp files that do not include this.\nTo continue, either rebuild or switch back to the OEBasic signature handler with BB_SIGNATURE_HANDLER = 'OEBasic'.\n")
- elif (abi != current_abi and current_abi == "9"):
- status.addresult("The layout of the TMPDIR STAMPS directory has changed. Please clean out TMPDIR and rebuild (sstate will be still be valid and reused)\n")
- elif (abi != current_abi and current_abi == "10" and (abi == "8" or abi == "9")):
- bb.note("Converting staging layout from version 8/9 to layout version 10")
- cmd = d.expand("grep -r -l sysroot-providers/virtual_kernel ${SSTATE_MANIFESTS}")
- ret, result = oe.utils.getstatusoutput(cmd)
- result = result.split()
- for f in result:
- bb.note("Uninstalling manifest file %s" % f)
- sstate_clean_manifest(f, d)
- with open(abifile, "w") as f:
- f.write(current_abi)
- elif abi == "10" and current_abi == "11":
- bb.note("Converting staging layout from version 10 to layout version 11")
- # Files in xf86-video-modesetting moved to xserver-xorg and bitbake can't currently handle that:
- subprocess.call(d.expand("rm ${TMPDIR}/sysroots/*/usr/lib/xorg/modules/drivers/modesetting_drv.so ${TMPDIR}/sysroots/*/pkgdata/runtime/xf86-video-modesetting* ${TMPDIR}/sysroots/*/pkgdata/runtime-reverse/xf86-video-modesetting* ${TMPDIR}/sysroots/*/pkgdata/shlibs2/xf86-video-modesetting*"), shell=True)
- with open(abifile, "w") as f:
- f.write(current_abi)
+ elif int(abi) <= 11 and current_abi == "12":
+ status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
elif (abi != current_abi):
# Code to convert from one ABI to another could go here if possible.
status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
@@ -645,24 +619,27 @@ def check_sanity_sstate_dir_change(sstate_dir, data):
except IndexError:
pass
return testmsg
-
+
def check_sanity_version_change(status, d):
# Sanity checks to be done when SANITY_VERSION or NATIVELSBSTRING changes
# In other words, these tests run once in a given build directory and then
# never again until the sanity version or host distrubution id/version changes.
- # Check the python install is complete. glib-2.0-natives requries
- # xml.parsers.expat
+ # Check the python install is complete. Examples that are often removed in
+ # minimal installations: glib-2.0-natives requries # xml.parsers.expat and icu
+ # requires distutils.sysconfig.
try:
import xml.parsers.expat
- except ImportError:
- status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n')
- import stat
+ import distutils.sysconfig
+ except ImportError as e:
+ status.addresult('Your Python 3 is not a full install. Please install the module %s (see the Getting Started guide for further information).\n' % e.name)
status.addresult(check_make_version(d))
+ status.addresult(check_patch_version(d))
status.addresult(check_tar_version(d))
status.addresult(check_git_version(d))
status.addresult(check_perl_modules(d))
+ status.addresult(check_wsl(d))
missing = ""
@@ -670,12 +647,12 @@ def check_sanity_version_change(status, d):
missing = missing + "GNU make,"
if not check_app_exists('${BUILD_CC}', d):
- missing = missing + "C Compiler (%s)," % d.getVar("BUILD_CC", True)
+ missing = missing + "C Compiler (%s)," % d.getVar("BUILD_CC")
if not check_app_exists('${BUILD_CXX}', d):
- missing = missing + "C++ Compiler (%s)," % d.getVar("BUILD_CXX", True)
+ missing = missing + "C++ Compiler (%s)," % d.getVar("BUILD_CXX")
- required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES', True)
+ required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES')
for util in required_utilities.split():
if not check_app_exists(util, d):
@@ -685,30 +662,14 @@ def check_sanity_version_change(status, d):
missing = missing.rstrip(',')
status.addresult("Please install the following missing utilities: %s\n" % missing)
- assume_provided = d.getVar('ASSUME_PROVIDED', True).split()
+ assume_provided = d.getVar('ASSUME_PROVIDED').split()
# Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
if "diffstat-native" not in assume_provided:
status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
- if "qemu-native" in assume_provided:
- if not check_app_exists("qemu-arm", d):
- status.addresult("qemu-native was in ASSUME_PROVIDED but the QEMU binaries (qemu-arm) can't be found in PATH")
-
- if "libsdl-native" in assume_provided:
- if not check_app_exists("sdl-config", d):
- status.addresult("libsdl-native is set to be ASSUME_PROVIDED but sdl-config can't be found in PATH. Please either install it, or configure qemu not to require sdl.")
-
- (result, message) = check_gcc_march(d)
- if result and message:
- status.addresult("Your gcc version is older than 4.5, please add the following param to local.conf\n \
- %s\n" % message)
- if not result:
- status.addresult("Your gcc version is older than 4.5 or is not working properly. Please verify you can build")
- status.addresult(" and link something that uses atomic operations, such as: \n")
- status.addresult(" __sync_bool_compare_and_swap (&atomic, 2, 3);\n")
-
# Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
- tmpdir = d.getVar('TMPDIR', True)
+ import stat
+ tmpdir = d.getVar('TMPDIR')
status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
tmpdirmode = os.stat(tmpdir).st_mode
if (tmpdirmode & stat.S_ISGID):
@@ -732,7 +693,7 @@ def check_sanity_version_change(status, d):
if netcheck:
status.network_error = True
- nolibs = d.getVar('NO32LIBS', True)
+ nolibs = d.getVar('NO32LIBS')
if not nolibs:
lib32path = '/lib'
if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
@@ -741,7 +702,7 @@ def check_sanity_version_change(status, d):
if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
- bbpaths = d.getVar('BBPATH', True).split(":")
+ bbpaths = d.getVar('BBPATH').split(":")
if ("." in bbpaths or "./" in bbpaths or "" in bbpaths):
status.addresult("BBPATH references the current directory, either through " \
"an empty entry, a './' or a '.'.\n\t This is unsafe and means your "\
@@ -751,7 +712,7 @@ def check_sanity_version_change(status, d):
"references.\n" \
"Parsed BBPATH is" + str(bbpaths));
- oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF', True)
+ oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF')
if not oes_bb_conf:
status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
@@ -761,6 +722,10 @@ def check_sanity_version_change(status, d):
# Check that TMPDIR isn't located on nfs
status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
+ # Check for case-insensitive file systems (such as Linux in Docker on
+ # macOS with default HFS+ file system)
+ status.addresult(check_case_sensitive(tmpdir, "TMPDIR"))
+
def sanity_check_locale(d):
"""
Currently bitbake switches locale to en_US.UTF-8 so check that this locale actually exists.
@@ -769,7 +734,7 @@ def sanity_check_locale(d):
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
except locale.Error:
- raise_sanity_error("You system needs to support the en_US.UTF-8 locale.", d)
+ raise_sanity_error("Your system needs to support the en_US.UTF-8 locale.", d)
def check_sanity_everybuild(status, d):
import os, stat
@@ -779,33 +744,38 @@ def check_sanity_everybuild(status, d):
if 0 == os.getuid():
raise_sanity_error("Do not use Bitbake as root.", d)
- # Check the Python version, we now have a minimum of Python 2.7.3
+ # Check the Python version, we now have a minimum of Python 3.4
import sys
- if sys.hexversion < 0x020703F0:
- status.addresult('The system requires at least Python 2.7.3 to run. Please update your Python interpreter.\n')
+ if sys.hexversion < 0x03040000:
+ status.addresult('The system requires at least Python 3.4 to run. Please update your Python interpreter.\n')
# Check the bitbake version meets minimum requirements
from distutils.version import LooseVersion
- minversion = d.getVar('BB_MIN_VERSION', True)
+ minversion = d.getVar('BB_MIN_VERSION')
if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
sanity_check_locale(d)
- paths = d.getVar('PATH', True).split(":")
+ paths = d.getVar('PATH').split(":")
if "." in paths or "./" in paths or "" in paths:
status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
# Check that the DISTRO is valid, if set
# need to take into account DISTRO renaming DISTRO
- distro = d.getVar('DISTRO', True)
+ distro = d.getVar('DISTRO')
if distro and distro != "nodistro":
if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
- status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO", True))
+ status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO"))
+
+ # Check that these variables don't use tilde-expansion as we don't do that
+ for v in ("TMPDIR", "DL_DIR", "SSTATE_DIR"):
+ if d.getVar(v).startswith("~"):
+ status.addresult("%s uses ~ but Bitbake will not expand this, use an absolute path or variables." % v)
# Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
# set, since so much relies on it being set.
- dldir = d.getVar('DL_DIR', True)
+ dldir = d.getVar('DL_DIR')
if not dldir:
status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
@@ -814,9 +784,9 @@ def check_sanity_everybuild(status, d):
# Check that the MACHINE is valid, if it is set
machinevalid = True
- if d.getVar('MACHINE', True):
+ if d.getVar('MACHINE'):
if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
- status.addresult('Please set a valid MACHINE in your local.conf or environment\n')
+ status.addresult('MACHINE=%s is invalid. Please set a valid MACHINE in your local.conf, environment or other configuration file.\n' % (d.getVar('MACHINE')))
machinevalid = False
else:
status.addresult(check_sanity_validmachine(d))
@@ -827,12 +797,17 @@ def check_sanity_everybuild(status, d):
status.addresult(check_toolchain(d))
# Check that the SDKMACHINE is valid, if it is set
- if d.getVar('SDKMACHINE', True):
+ if d.getVar('SDKMACHINE'):
if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d):
status.addresult('Specified SDKMACHINE value is not valid\n')
elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
+ # If SDK_VENDOR looks like "-my-sdk" then the triples are badly formed so fail early
+ sdkvendor = d.getVar("SDK_VENDOR")
+ if not (sdkvendor.startswith("-") and sdkvendor.count("-") == 1):
+ status.addresult("SDK_VENDOR should be of the form '-foosdk' with a single dash\n")
+
check_supported_distro(d)
omask = os.umask(0o022)
@@ -840,7 +815,7 @@ def check_sanity_everybuild(status, d):
status.addresult("Please use a umask which allows a+rx and u+rwx\n")
os.umask(omask)
- if d.getVar('TARGET_ARCH', True) == "arm":
+ if d.getVar('TARGET_ARCH') == "arm":
# This path is no longer user-readable in modern (very recent) Linux
try:
if os.path.exists("/proc/sys/vm/mmap_min_addr"):
@@ -853,7 +828,7 @@ def check_sanity_everybuild(status, d):
except:
pass
- oeroot = d.getVar('COREBASE', True)
+ oeroot = d.getVar('COREBASE')
if oeroot.find('+') != -1:
status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
if oeroot.find('@') != -1:
@@ -866,20 +841,18 @@ def check_sanity_everybuild(status, d):
mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
protocols = ['http', 'ftp', 'file', 'https', \
'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
- 'bzr', 'cvs', 'npm', 'sftp', 'ssh']
+ 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3' ]
for mirror_var in mirror_vars:
- mirrors = (d.getVar(mirror_var, True) or '').replace('\\n', '\n').split('\n')
- for mirror_entry in mirrors:
- mirror_entry = mirror_entry.strip()
- if not mirror_entry:
- # ignore blank lines
- continue
+ mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
- try:
- pattern, mirror = mirror_entry.split()
- except ValueError:
- bb.warn('Invalid %s: %s, should be 2 members.' % (mirror_var, mirror_entry.strip()))
- continue
+ # Split into pairs
+ if len(mirrors) % 2 != 0:
+ bb.warn('Invalid mirror variable value for %s: %s, should contain paired members.' % (mirror_var, str(mirrors)))
+ continue
+ mirrors = list(zip(*[iter(mirrors)]*2))
+
+ for mirror_entry in mirrors:
+ pattern, mirror = mirror_entry
decoded = bb.fetch2.decodeurl(pattern)
try:
@@ -907,13 +880,13 @@ def check_sanity_everybuild(status, d):
check_symlink(mirror_base, d)
# Check that TMPDIR hasn't changed location since the last time we were run
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
checkfile = os.path.join(tmpdir, "saved_tmpdir")
if os.path.exists(checkfile):
with open(checkfile, "r") as f:
saved_tmpdir = f.read().strip()
if (saved_tmpdir != tmpdir):
- status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or rebuild\n" % saved_tmpdir)
+ status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or delete it and rebuild\n" % saved_tmpdir)
else:
bb.utils.mkdirhier(tmpdir)
# Remove setuid, setgid and sticky bits from TMPDIR
@@ -929,7 +902,9 @@ def check_sanity_everybuild(status, d):
# If /bin/sh is a symlink, check that it points to dash or bash
if os.path.islink('/bin/sh'):
real_sh = os.path.realpath('/bin/sh')
- if not real_sh.endswith('/dash') and not real_sh.endswith('/bash'):
+ # Due to update-alternatives, the shell name may take various
+ # forms, such as /bin/dash, bin/bash, /bin/bash.bash ...
+ if '/dash' not in real_sh and '/bash' not in real_sh:
status.addresult("Error, /bin/sh links to %s, must be dash or bash\n" % real_sh)
def check_sanity(sanity_data):
@@ -944,8 +919,8 @@ def check_sanity(sanity_data):
status = SanityStatus()
- tmpdir = sanity_data.getVar('TMPDIR', True)
- sstate_dir = sanity_data.getVar('SSTATE_DIR', True)
+ tmpdir = sanity_data.getVar('TMPDIR')
+ sstate_dir = sanity_data.getVar('SSTATE_DIR')
check_symlink(sstate_dir, sanity_data)
@@ -969,7 +944,7 @@ def check_sanity(sanity_data):
check_sanity_everybuild(status, sanity_data)
- sanity_version = int(sanity_data.getVar('SANITY_VERSION', True) or 1)
+ sanity_version = int(sanity_data.getVar('SANITY_VERSION') or 1)
network_error = False
# NATIVELSBSTRING var may have been overridden with "universal", so
# get actual host distribution id and version
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
index b9ae19d582..6b171ca8df 100644
--- a/meta/classes/scons.bbclass
+++ b/meta/classes/scons.bbclass
@@ -1,17 +1,31 @@
-DEPENDS += "python-scons-native"
+inherit python3native
+
+DEPENDS += "python3-scons-native"
EXTRA_OESCONS ?= ""
-do_configure[noexec] = "1"
+do_configure() {
+ unset _PYTHON_SYSCONFIGDATA_NAME
+ if [ -n "${CONFIGURESTAMPFILE}" ]; then
+ if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
+ ${STAGING_BINDIR_NATIVE}/scons --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
+ fi
+
+ mkdir -p `dirname ${CONFIGURESTAMPFILE}`
+ echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
+ fi
+}
scons_do_compile() {
- ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
- die "scons build execution failed."
+ unset _PYTHON_SYSCONFIGDATA_NAME
+ ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
+ die "scons build execution failed."
}
scons_do_install() {
- ${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
- die "scons install execution failed."
+ unset _PYTHON_SYSCONFIGDATA_NAME
+ ${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
+ die "scons install execution failed."
}
EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/setuptools.bbclass b/meta/classes/setuptools.bbclass
index 56343b1c73..a923ea3c4a 100644
--- a/meta/classes/setuptools.bbclass
+++ b/meta/classes/setuptools.bbclass
@@ -1,8 +1,3 @@
inherit distutils
-DEPENDS += "python-distribute-native"
-
-DISTUTILS_INSTALL_ARGS = "--root=${D} \
- --prefix=${prefix} \
- --install-lib=${PYTHON_SITEPACKAGES_DIR} \
- --install-data=${datadir}"
+DEPENDS += "python-setuptools-native"
diff --git a/meta/classes/setuptools3.bbclass b/meta/classes/setuptools3.bbclass
index de6dd9440c..8ca66ee708 100644
--- a/meta/classes/setuptools3.bbclass
+++ b/meta/classes/setuptools3.bbclass
@@ -2,7 +2,3 @@ inherit distutils3
DEPENDS += "python3-setuptools-native"
-DISTUTILS_INSTALL_ARGS = "--root=${D} \
- --prefix=${prefix} \
- --install-lib=${PYTHON_SITEPACKAGES_DIR} \
- --install-data=${datadir}"
diff --git a/meta/classes/sign_ipk.bbclass b/meta/classes/sign_ipk.bbclass
index a481f6d9a8..e5057b7799 100644
--- a/meta/classes/sign_ipk.bbclass
+++ b/meta/classes/sign_ipk.bbclass
@@ -29,10 +29,10 @@ IPK_GPG_SIGNATURE_TYPE ?= 'ASC'
python () {
# Check configuration
for var in ('IPK_GPG_NAME', 'IPK_GPG_PASSPHRASE_FILE'):
- if not d.getVar(var, True):
+ if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
- sigtype = d.getVar("IPK_GPG_SIGNATURE_TYPE", True)
+ sigtype = d.getVar("IPK_GPG_SIGNATURE_TYPE")
if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
raise_sanity_error("Bad value for IPK_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
}
@@ -42,11 +42,11 @@ def sign_ipk(d, ipk_to_sign):
bb.debug(1, 'Signing ipk: %s' % ipk_to_sign)
- signer = get_signer(d, d.getVar('IPK_GPG_BACKEND', True))
- sig_type = d.getVar('IPK_GPG_SIGNATURE_TYPE', True)
+ signer = get_signer(d, d.getVar('IPK_GPG_BACKEND'))
+ sig_type = d.getVar('IPK_GPG_SIGNATURE_TYPE')
is_ascii_sig = (sig_type.upper() != "BIN")
signer.detach_sign(ipk_to_sign,
- d.getVar('IPK_GPG_NAME', True),
- d.getVar('IPK_GPG_PASSPHRASE_FILE', True),
+ d.getVar('IPK_GPG_NAME'),
+ d.getVar('IPK_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)
diff --git a/meta/classes/sign_package_feed.bbclass b/meta/classes/sign_package_feed.bbclass
index 31a6e9b042..7ff3a35a2f 100644
--- a/meta/classes/sign_package_feed.bbclass
+++ b/meta/classes/sign_package_feed.bbclass
@@ -28,16 +28,19 @@ PACKAGE_FEED_SIGN = '1'
PACKAGE_FEED_GPG_BACKEND ?= 'local'
PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
+# Make feed signing key to be present in rootfs
+FEATURE_PACKAGES_package-management_append = " signing-keys-packagefeed"
+
python () {
# Check sanity of configuration
for var in ('PACKAGE_FEED_GPG_NAME', 'PACKAGE_FEED_GPG_PASSPHRASE_FILE'):
- if not d.getVar(var, True):
+ if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
- sigtype = d.getVar("PACKAGE_FEED_GPG_SIGNATURE_TYPE", True)
+ sigtype = d.getVar("PACKAGE_FEED_GPG_SIGNATURE_TYPE")
if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
raise_sanity_error("Bad value for PACKAGE_FEED_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
}
do_package_index[depends] += "signing-keys:do_deploy"
-do_rootfs[depends] += "signing-keys:do_populate_sysroot"
+do_rootfs[depends] += "signing-keys:do_populate_sysroot gnupg-native:do_populate_sysroot"
diff --git a/meta/classes/sign_rpm.bbclass b/meta/classes/sign_rpm.bbclass
index a8ea75faaa..64ae7ce30e 100644
--- a/meta/classes/sign_rpm.bbclass
+++ b/meta/classes/sign_rpm.bbclass
@@ -9,45 +9,63 @@
# Optional variable for specifying the backend to use for signing.
# Currently the only available option is 'local', i.e. local signing
# on the build host.
+# RPM_FILE_CHECKSUM_DIGEST
+# Optional variable for specifying the algorithm for generating file
+# checksum digest.
+# RPM_FSK_PATH
+# Optional variable for the file signing key.
+# RPM_FSK_PASSWORD
+# Optional variable for the file signing key password.
# GPG_BIN
# Optional variable for specifying the gpg binary/wrapper to use for
# signing.
+# RPM_GPG_SIGN_CHUNK
+# Optional variable indicating the number of packages used per gpg
+# invocation
# GPG_PATH
# Optional variable for specifying the gnupg "home" directory:
-#
+
inherit sanity
RPM_SIGN_PACKAGES='1'
+RPM_SIGN_FILES ?= '0'
RPM_GPG_BACKEND ?= 'local'
+# SHA-256 is used by default
+RPM_FILE_CHECKSUM_DIGEST ?= '8'
+RPM_GPG_SIGN_CHUNK ?= "${BB_NUMBER_THREADS}"
python () {
- if d.getVar('RPM_GPG_PASSPHRASE_FILE', True):
+ if d.getVar('RPM_GPG_PASSPHRASE_FILE'):
raise_sanity_error('RPM_GPG_PASSPHRASE_FILE is replaced by RPM_GPG_PASSPHRASE', d)
# Check configuration
for var in ('RPM_GPG_NAME', 'RPM_GPG_PASSPHRASE'):
- if not d.getVar(var, True):
+ if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
- # Set the expected location of the public key
- d.setVar('RPM_GPG_PUBKEY', os.path.join(d.getVar('STAGING_DIR_TARGET', False),
- d.getVar('sysconfdir', False),
- 'pki',
- 'rpm-gpg',
- 'RPM-GPG-KEY-${DISTRO_VERSION}'))
+ if d.getVar('RPM_SIGN_FILES') == '1':
+ for var in ('RPM_FSK_PATH', 'RPM_FSK_PASSWORD'):
+ if not d.getVar(var):
+ raise_sanity_error("You need to define %s in the config" % var, d)
}
python sign_rpm () {
import glob
from oe.gpg_sign import get_signer
- signer = get_signer(d, d.getVar('RPM_GPG_BACKEND', True))
- rpms = glob.glob(d.getVar('RPM_PKGWRITEDIR', True) + '/*')
+ signer = get_signer(d, d.getVar('RPM_GPG_BACKEND'))
+ rpms = glob.glob(d.getVar('RPM_PKGWRITEDIR') + '/*')
signer.sign_rpms(rpms,
- d.getVar('RPM_GPG_NAME', True),
- d.getVar('RPM_GPG_PASSPHRASE', True))
+ d.getVar('RPM_GPG_NAME'),
+ d.getVar('RPM_GPG_PASSPHRASE'),
+ d.getVar('RPM_FILE_CHECKSUM_DIGEST'),
+ int(d.getVar('RPM_GPG_SIGN_CHUNK')),
+ d.getVar('RPM_FSK_PATH'),
+ d.getVar('RPM_FSK_PASSWORD'))
}
do_package_index[depends] += "signing-keys:do_deploy"
do_rootfs[depends] += "signing-keys:do_populate_sysroot"
+
+PACKAGE_WRITE_DEPS += "gnupg-native"
diff --git a/meta/classes/siteconfig.bbclass b/meta/classes/siteconfig.bbclass
index 45dce489de..0cfa5a6834 100644
--- a/meta/classes/siteconfig.bbclass
+++ b/meta/classes/siteconfig.bbclass
@@ -1,13 +1,13 @@
python siteconfig_do_siteconfig () {
- shared_state = sstate_state_fromvars(d)
- if shared_state['task'] != 'populate_sysroot':
- return
- if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME', True), 'site_config')):
- bb.debug(1, "No site_config directory, skipping do_siteconfig")
- return
- bb.build.exec_func('do_siteconfig_gencache', d)
- sstate_clean(shared_state, d)
- sstate_install(shared_state, d)
+ shared_state = sstate_state_fromvars(d)
+ if shared_state['task'] != 'populate_sysroot':
+ return
+ if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME'), 'site_config')):
+ bb.debug(1, "No site_config directory, skipping do_siteconfig")
+ return
+ sstate_install(shared_state, d)
+ bb.build.exec_func('do_siteconfig_gencache', d)
+ sstate_clean(shared_state, d)
}
EXTRASITECONFIG ?= ""
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
index 6a2e4bf560..411e70478e 100644
--- a/meta/classes/siteinfo.bbclass
+++ b/meta/classes/siteinfo.bbclass
@@ -15,11 +15,13 @@
# It is an error for the target not to exist.
# If 'what' doesn't exist then an empty value is returned
#
-def siteinfo_data(d):
+def siteinfo_data_for_machine(arch, os, d):
archinfo = {
"allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
"aarch64": "endian-little bit-64 arm-common arm-64",
"aarch64_be": "endian-big bit-64 arm-common arm-64",
+ "arc": "endian-little bit-32 arc-common",
+ "arceb": "endian-big bit-32 arc-common",
"arm": "endian-little bit-32 arm-common arm-32",
"armeb": "endian-big bit-32 arm-common arm-32",
"avr32": "endian-big bit-32 avr32-common",
@@ -30,19 +32,27 @@ def siteinfo_data(d):
"i586": "endian-little bit-32 ix86-common",
"i686": "endian-little bit-32 ix86-common",
"ia64": "endian-little bit-64",
+ "lm32": "endian-big bit-32",
+ "m68k": "endian-big bit-32",
"microblaze": "endian-big bit-32 microblaze-common",
"microblazeeb": "endian-big bit-32 microblaze-common",
"microblazeel": "endian-little bit-32 microblaze-common",
"mips": "endian-big bit-32 mips-common",
"mips64": "endian-big bit-64 mips-common",
"mips64el": "endian-little bit-64 mips-common",
+ "mipsisa64r6": "endian-big bit-64 mips-common",
+ "mipsisa64r6el": "endian-little bit-64 mips-common",
"mipsel": "endian-little bit-32 mips-common",
+ "mipsisa32r6": "endian-big bit-32 mips-common",
+ "mipsisa32r6el": "endian-little bit-32 mips-common",
"powerpc": "endian-big bit-32 powerpc-common",
"nios2": "endian-little bit-32 nios2-common",
"powerpc64": "endian-big bit-64 powerpc-common",
"ppc": "endian-big bit-32 powerpc-common",
"ppc64": "endian-big bit-64 powerpc-common",
"ppc64le" : "endian-little bit-64 powerpc-common",
+ "riscv32": "endian-little bit-32 riscv-common",
+ "riscv64": "endian-little bit-64 riscv-common",
"sh3": "endian-little bit-32 sh-common",
"sh4": "endian-little bit-32 sh-common",
"sparc": "endian-big bit-32",
@@ -54,14 +64,13 @@ def siteinfo_data(d):
"darwin9": "common-darwin",
"linux": "common-linux common-glibc",
"linux-gnu": "common-linux common-glibc",
+ "linux-gnu_ilp32": "common-linux common-glibc",
"linux-gnux32": "common-linux common-glibc",
"linux-gnun32": "common-linux common-glibc",
"linux-gnueabi": "common-linux common-glibc",
"linux-gnuspe": "common-linux common-glibc",
- "linux-uclibc": "common-linux common-uclibc",
- "linux-uclibceabi": "common-linux common-uclibc",
- "linux-uclibcspe": "common-linux common-uclibc",
"linux-musl": "common-linux common-musl",
+ "linux-muslx32": "common-linux common-musl",
"linux-musleabi": "common-linux common-musl",
"linux-muslspe": "common-linux common-musl",
"uclinux-uclibc": "common-uclibc",
@@ -71,36 +80,44 @@ def siteinfo_data(d):
targetinfo = {
"aarch64-linux-gnu": "aarch64-linux",
"aarch64_be-linux-gnu": "aarch64_be-linux",
+ "aarch64-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
+ "aarch64_be-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
"aarch64-linux-musl": "aarch64-linux",
"aarch64_be-linux-musl": "aarch64_be-linux",
"arm-linux-gnueabi": "arm-linux",
"arm-linux-musleabi": "arm-linux",
- "arm-linux-uclibceabi": "arm-linux-uclibc",
"armeb-linux-gnueabi": "armeb-linux",
- "armeb-linux-uclibceabi": "armeb-linux-uclibc",
"armeb-linux-musleabi": "armeb-linux",
+ "microblazeeb-linux" : "microblaze-linux",
+ "microblazeeb-linux-musl" : "microblaze-linux",
+ "microblazeel-linux" : "microblaze-linux",
+ "microblazeel-linux-musl" : "microblaze-linux",
"mips-linux-musl": "mips-linux",
"mipsel-linux-musl": "mipsel-linux",
"mips64-linux-musl": "mips64-linux",
"mips64el-linux-musl": "mips64el-linux",
"mips64-linux-gnun32": "mips-linux bit-32",
"mips64el-linux-gnun32": "mipsel-linux bit-32",
+ "mipsisa64r6-linux-gnun32": "mipsisa32r6-linux bit-32",
+ "mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32",
"powerpc-linux": "powerpc32-linux",
"powerpc-linux-musl": "powerpc-linux powerpc32-linux",
- "powerpc-linux-uclibc": "powerpc-linux powerpc32-linux",
"powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
"powerpc-linux-muslspe": "powerpc-linux powerpc32-linux",
- "powerpc-linux-uclibcspe": "powerpc-linux powerpc32-linux powerpc-linux-uclibc",
"powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
"powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
"powerpc64-linux": "powerpc-linux",
"powerpc64-linux-musl": "powerpc-linux",
+ "riscv32-linux": "riscv32-linux",
+ "riscv32-linux-musl": "riscv32-linux",
+ "riscv64-linux": "riscv64-linux",
+ "riscv64-linux-musl": "riscv64-linux",
"x86_64-cygwin": "bit-64",
"x86_64-darwin": "bit-64",
"x86_64-darwin9": "bit-64",
"x86_64-linux": "bit-64",
"x86_64-linux-musl": "x86_64-linux bit-64",
- "x86_64-linux-uclibc": "bit-64",
+ "x86_64-linux-muslx32": "bit-32 ix86-common x32-linux",
"x86_64-elf": "bit-64",
"x86_64-linux-gnu": "bit-64 x86_64-linux",
"x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
@@ -109,21 +126,19 @@ def siteinfo_data(d):
# Add in any extra user supplied data which may come from a BSP layer, removing the
# need to always change this class directly
- extra_siteinfo = (d.getVar("SITEINFO_EXTRA_DATAFUNCS", True) or "").split()
+ extra_siteinfo = (d.getVar("SITEINFO_EXTRA_DATAFUNCS") or "").split()
for m in extra_siteinfo:
call = m + "(archinfo, osinfo, targetinfo, d)"
locs = { "archinfo" : archinfo, "osinfo" : osinfo, "targetinfo" : targetinfo, "d" : d}
archinfo, osinfo, targetinfo = bb.utils.better_eval(call, locs)
- hostarch = d.getVar("HOST_ARCH", True)
- hostos = d.getVar("HOST_OS", True)
- target = "%s-%s" % (hostarch, hostos)
+ target = "%s-%s" % (arch, os)
sitedata = []
- if hostarch in archinfo:
- sitedata.extend(archinfo[hostarch].split())
- if hostos in osinfo:
- sitedata.extend(osinfo[hostos].split())
+ if arch in archinfo:
+ sitedata.extend(archinfo[arch].split())
+ if os in osinfo:
+ sitedata.extend(osinfo[os].split())
if target in targetinfo:
sitedata.extend(targetinfo[target].split())
sitedata.append(target)
@@ -132,6 +147,9 @@ def siteinfo_data(d):
bb.debug(1, "SITE files %s" % sitedata);
return sitedata
+def siteinfo_data(d):
+ return siteinfo_data_for_machine(d.getVar("HOST_ARCH"), d.getVar("HOST_OS"), d)
+
python () {
sitedata = set(siteinfo_data(d))
if "endian-little" in sitedata:
@@ -140,7 +158,7 @@ python () {
d.setVar("SITEINFO_ENDIANNESS", "be")
else:
bb.error("Unable to determine endianness for architecture '%s'" %
- d.getVar("HOST_ARCH", True))
+ d.getVar("HOST_ARCH"))
bb.fatal("Please add your architecture to siteinfo.bbclass")
if "bit-32" in sitedata:
@@ -149,31 +167,24 @@ python () {
d.setVar("SITEINFO_BITS", "64")
else:
bb.error("Unable to determine bit size for architecture '%s'" %
- d.getVar("HOST_ARCH", True))
+ d.getVar("HOST_ARCH"))
bb.fatal("Please add your architecture to siteinfo.bbclass")
}
-def siteinfo_get_files(d, aclocalcache = False):
+def siteinfo_get_files(d, sysrootcache = False):
sitedata = siteinfo_data(d)
sitefiles = ""
- for path in d.getVar("BBPATH", True).split(":"):
+ for path in d.getVar("BBPATH").split(":"):
for element in sitedata:
filename = os.path.join(path, "site", element)
if os.path.exists(filename):
sitefiles += filename + " "
- if not aclocalcache:
+ if not sysrootcache:
return sitefiles
- # Now check for siteconfig cache files in the directory setup by autotools.bbclass to
- # avoid races.
- #
- # ACLOCALDIR may or may not exist so cache should only be set to True from autotools.bbclass
- # after files have been copied into this location. To do otherwise risks parsing/signature
- # issues and the directory being created/removed whilst this code executes. This can happen
- # when a multilib recipe is parsed along with its base variant which may be running at the time
- # causing rare but nasty failures
- path_siteconfig = d.getVar('ACLOCALDIR', True)
+ # Now check for siteconfig cache files in sysroots
+ path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
if path_siteconfig and os.path.isdir(path_siteconfig):
for i in os.listdir(path_siteconfig):
if not i.endswith("_config"):
diff --git a/meta/classes/spdx.bbclass b/meta/classes/spdx.bbclass
index 0c9276584c..fb78e274a8 100644
--- a/meta/classes/spdx.bbclass
+++ b/meta/classes/spdx.bbclass
@@ -26,20 +26,20 @@ python do_spdx () {
import json, shutil
info = {}
- info['workdir'] = d.getVar('WORKDIR', True)
- info['sourcedir'] = d.getVar('SPDX_S', True)
- info['pn'] = d.getVar('PN', True)
- info['pv'] = d.getVar('PV', True)
- info['spdx_version'] = d.getVar('SPDX_VERSION', True)
- info['data_license'] = d.getVar('DATA_LICENSE', True)
-
- sstatedir = d.getVar('SPDXSSTATEDIR', True)
+ info['workdir'] = d.getVar('WORKDIR')
+ info['sourcedir'] = d.getVar('SPDX_S')
+ info['pn'] = d.getVar('PN')
+ info['pv'] = d.getVar('PV')
+ info['spdx_version'] = d.getVar('SPDX_VERSION')
+ info['data_license'] = d.getVar('DATA_LICENSE')
+
+ sstatedir = d.getVar('SPDXSSTATEDIR')
sstatefile = os.path.join(sstatedir, info['pn'] + info['pv'] + ".spdx")
- manifest_dir = d.getVar('SPDX_MANIFEST_DIR', True)
+ manifest_dir = d.getVar('SPDX_MANIFEST_DIR')
info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
- info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR', True)
+ info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR')
info['tar_file'] = os.path.join(info['workdir'], info['pn'] + ".tar.gz" )
# Make sure important dirs exist
@@ -74,9 +74,9 @@ python do_spdx () {
foss_license_info = cached_spdx['Licenses']
else:
## setup fossology command
- foss_server = d.getVar('FOSS_SERVER', True)
- foss_flags = d.getVar('FOSS_WGET_FLAGS', True)
- foss_full_spdx = d.getVar('FOSS_FULL_SPDX', True) == "true" or False
+ foss_server = d.getVar('FOSS_SERVER')
+ foss_flags = d.getVar('FOSS_WGET_FLAGS')
+ foss_full_spdx = d.getVar('FOSS_FULL_SPDX') == "true" or False
foss_command = "wget %s --post-file=%s %s"\
% (foss_flags, info['tar_file'], foss_server)
@@ -202,32 +202,26 @@ def list_files(dir):
return
def hash_file(file_name):
- try:
- with open(file_name, 'rb') as f:
- data_string = f.read()
- sha1 = hash_string(data_string)
- return sha1
- except:
- return None
+ from bb.utils import sha1_file
+ return sha1_file(file_name)
def hash_string(data):
import hashlib
sha1 = hashlib.sha1()
- sha1.update(data)
+ sha1.update(data.encode('utf-8'))
return sha1.hexdigest()
def run_fossology(foss_command, full_spdx):
import string, re
import subprocess
-
- p = subprocess.Popen(foss_command.split(),
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- foss_output, foss_error = p.communicate()
- if p.returncode != 0:
+
+ try:
+ foss_output = subprocess.check_output(foss_command.split(),
+ stderr=subprocess.STDOUT).decode('utf-8')
+ except subprocess.CalledProcessError as e:
return None
- foss_output = unicode(foss_output, "utf-8")
- foss_output = string.replace(foss_output, '\r', '')
+ foss_output = foss_output.replace('\r', '')
# Package info
package_info = {}
@@ -290,7 +284,8 @@ def create_spdx_doc(file_info, scanned_files):
def get_ver_code(dirname):
chksums = []
for f_dir, f in list_files(dirname):
- hash = hash_file(os.path.join(dirname, f_dir, f))
+ path = os.path.join(dirname, f_dir, f)
+ hash = hash_file(path)
if not hash is None:
chksums.append(hash)
else:
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index ac9d77d528..b5267f00be 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -11,7 +11,7 @@ def generate_sstatefn(spec, hash, d):
SSTATE_PKGARCH = "${PACKAGE_ARCH}"
SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
-SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC', True), d.getVar('BB_TASKHASH', True), d)}"
+SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d)}"
SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
SSTATE_EXTRAPATH = ""
SSTATE_EXTRAPATHWILDCARD = ""
@@ -23,18 +23,28 @@ PV[vardepvalue] = "${PV}"
# We don't want the sstate to depend on things like the distro string
# of the system, we let the sstate paths take care of this.
SSTATE_EXTRAPATH[vardepvalue] = ""
+SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
-SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/all/"
+SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
# Avoid docbook/sgml catalog warnings for now
SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
+# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
+# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
+SSTATE_DUPWHITELIST += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
# Archive the sources for many architectures in one deploy folder
SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
-# Ignore overlapping README
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR}/sdk/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt"
+# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/ovmf"
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/grub-efi"
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/systemd-boot"
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/microcode"
-SSTATE_SCAN_FILES ?= "*.la *-config *_config"
-SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
+SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
+SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
+SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
@@ -46,17 +56,18 @@ SSTATE_ARCHS = " \
${SDK_ARCH}_${PACKAGE_ARCH} \
allarch \
${PACKAGE_ARCH} \
- ${MACHINE}"
+ ${PACKAGE_EXTRA_ARCHS} \
+ ${MACHINE_ARCH}"
SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
SSTATECREATEFUNCS = "sstate_hardcode_path"
+SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
SSTATEPOSTCREATEFUNCS = ""
SSTATEPREINSTFUNCS = ""
SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
SSTATEPOSTINSTFUNCS = ""
-EXTRA_STAGING_FIXMES ?= ""
-SSTATECLEANFUNCS = ""
+EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
# Check whether sstate exists for tasks that support sstate and are in the
# locked signatures file.
@@ -73,6 +84,18 @@ SSTATE_SIG_PASSPHRASE ?= ""
# Whether to verify the GnUPG signatures when extracting sstate archives
SSTATE_VERIFY_SIG ?= "0"
+SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
+SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
+ the output hash for a task, which in turn is used to determine equivalency. \
+ "
+
+SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
+SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
+ hash equivalency server, such as PN, PV, taskname, etc. This information \
+ is very useful for developers looking at task data, but may leak sensitive \
+ data if the equivalence server is public. \
+ "
+
python () {
if bb.data.inherits_class('native', d):
d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
@@ -84,7 +107,7 @@ python () {
d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross-canadian', d):
d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
- elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH", True) == "all":
+ elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
d.setVar('SSTATE_PKGARCH', "allarch")
else:
d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
@@ -92,15 +115,9 @@ python () {
if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
- d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
+ d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
- # These classes encode staging paths into their scripts data so can only be
- # reused if we manipulate the paths
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d):
- scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
- d.setVar('SSTATE_SCAN_CMD', scan_cmd)
-
- unique_tasks = sorted(set((d.getVar('SSTATETASKS', True) or "").split()))
+ unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
d.setVar('SSTATETASKS', " ".join(unique_tasks))
for task in unique_tasks:
d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
@@ -118,19 +135,20 @@ def sstate_init(task, d):
def sstate_state_fromvars(d, task = None):
if task is None:
- task = d.getVar('BB_CURRENTTASK', True)
+ task = d.getVar('BB_CURRENTTASK')
if not task:
bb.fatal("sstate code running without task context?!")
task = task.replace("_setscene", "")
if task.startswith("do_"):
task = task[3:]
- inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs', True) or "").split()
- outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs', True) or "").split()
- plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs', True) or "").split()
- lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile', True) or "").split()
- lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared', True) or "").split()
- interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs', True) or "").split()
+ inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
+ outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
+ plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
+ lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
+ lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
+ interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
+ fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
if not task or len(inputs) != len(outputs):
bb.fatal("sstate variables not setup correctly?!")
@@ -146,6 +164,7 @@ def sstate_state_fromvars(d, task = None):
ss['lockfiles-shared'] = lockfilesshared
ss['plaindirs'] = plaindirs
ss['interceptfuncs'] = interceptfuncs
+ ss['fixmedir'] = fixmedir
return ss
def sstate_add(ss, source, dest, d):
@@ -195,15 +214,18 @@ def sstate_install(ss, d):
srcdir = os.path.join(walkroot, dir)
dstdir = srcdir.replace(state[1], state[2])
#bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
+ if os.path.islink(srcdir):
+ sharedfiles.append(dstdir)
+ continue
if not dstdir.endswith("/"):
dstdir = dstdir + "/"
shareddirs.append(dstdir)
# Check the file list for conflicts against files which already exist
- whitelist = (d.getVar("SSTATE_DUPWHITELIST", True) or "").split()
+ whitelist = (d.getVar("SSTATE_DUPWHITELIST") or "").split()
match = []
for f in sharedfiles:
- if os.path.exists(f):
+ if os.path.exists(f) and not os.path.islink(f):
f = os.path.normpath(f)
realmatch = True
for w in whitelist:
@@ -213,25 +235,27 @@ def sstate_install(ss, d):
break
if realmatch:
match.append(f)
- sstate_search_cmd = "grep -rl '%s' %s --exclude=master.list | sed -e 's:^.*/::' -e 's:\.populate-sysroot::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
+ sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
- if search_output != "":
- match.append("Matched in %s" % search_output.rstrip())
+ if search_output:
+ match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
+ else:
+ match.append(" (not matched to any task)")
if match:
bb.error("The recipe %s is trying to install files into a shared " \
"area when those files already exist. Those files and their manifest " \
- "location are:\n %s\nPlease verify which recipe should provide the " \
- "above files.\nThe build has stopped as continuing in this scenario WILL " \
- "break things, if not now, possibly in the future (we've seen builds fail " \
+ "location are:\n %s\nPlease verify which recipe should provide the " \
+ "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
+ "break things - if not now, possibly in the future (we've seen builds fail " \
"several months later). If the system knew how to recover from this " \
- "automatically it would however there are several different scenarios " \
+ "automatically it would, however there are several different scenarios " \
"which can result in this and we don't know which one this is. It may be " \
"you have switched providers of something like virtual/kernel (e.g. from " \
"linux-yocto to linux-yocto-dev), in that case you need to execute the " \
"clean task for both recipes and it will resolve this error. It may be " \
"you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
- "those recipes should again resolve this error however switching " \
- "DISTRO_FEATURES on an existing build directory is not supported, you " \
+ "those recipes should again resolve this error, however switching " \
+ "DISTRO_FEATURES on an existing build directory is not supported - you " \
"should really clean out tmp and rebuild (reusing sstate should be safe). " \
"It could be the overlapping files detected are harmless in which case " \
"adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
@@ -239,9 +263,13 @@ def sstate_install(ss, d):
"things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
"be to resolve the conflict. If in doubt, please ask on the mailing list, " \
"sharing the error and filelist above." % \
- (d.getVar('PN', True), "\n ".join(match)))
+ (d.getVar('PN'), "\n ".join(match)))
bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
+ if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
+ sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
+ sharedfiles.append(ss['fixmedir'] + "/fixmepath")
+
# Write out the manifest
f = open(manifest, "w")
for file in sharedfiles:
@@ -260,7 +288,7 @@ def sstate_install(ss, d):
i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
l = bb.utils.lockfile(i + ".lock")
- filedata = d.getVar("STAMP", True) + " " + d2.getVar("SSTATE_MANFILEPREFIX", True) + " " + d.getVar("WORKDIR", True) + "\n"
+ filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
manifests = []
if os.path.exists(i):
with open(i, "r") as f:
@@ -275,7 +303,7 @@ def sstate_install(ss, d):
if os.path.exists(state[1]):
oe.path.copyhardlinktree(state[1], state[2])
- for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split():
+ for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
# All hooks should run in the SSTATE_INSTDIR
bb.build.exec_func(postinst, d, (sstateinst,))
@@ -286,50 +314,73 @@ sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANF
sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
def sstate_installpkg(ss, d):
- import oe.path
- import subprocess
from oe.gpg_sign import get_signer
- def prepdir(dir):
- # remove dir if it exists, ensure any parent directories do exist
- if os.path.exists(dir):
- oe.path.remove(dir)
- bb.utils.mkdirhier(dir)
- oe.path.remove(dir)
-
sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
- sstatefetch = d.getVar('SSTATE_PKGNAME', True) + '_' + ss['task'] + ".tgz"
- sstatepkg = d.getVar('SSTATE_PKG', True) + '_' + ss['task'] + ".tgz"
+ sstatefetch = d.getVar('SSTATE_PKGNAME') + '_' + ss['task'] + ".tgz"
+ d.appendVar('SSTATE_PKG', '_'+ ss['task'] + ".tgz")
+ sstatepkg = d.getVar('SSTATE_PKG')
if not os.path.exists(sstatepkg):
- pstaging_fetch(sstatefetch, sstatepkg, d)
+ pstaging_fetch(sstatefetch, d)
if not os.path.isfile(sstatepkg):
- bb.note("Staging package %s does not exist" % sstatepkg)
+ bb.note("Sstate package %s does not exist" % sstatepkg)
return False
sstate_clean(ss, d)
d.setVar('SSTATE_INSTDIR', sstateinst)
- d.setVar('SSTATE_PKG', sstatepkg)
- if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
+ if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
signer = get_signer(d, 'local')
if not signer.verify(sstatepkg + '.sig'):
- bb.warn("Cannot verify signature on sstate package %s" % sstatepkg)
+ bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
+ return False
+
+ # Empty sstateinst directory, ensure its clean
+ if os.path.exists(sstateinst):
+ oe.path.remove(sstateinst)
+ bb.utils.mkdirhier(sstateinst)
+
+ sstateinst = d.getVar("SSTATE_INSTDIR")
+ d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
+
+ for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
+ # All hooks should run in the SSTATE_INSTDIR
+ bb.build.exec_func(f, d, (sstateinst,))
+
+ return sstate_installpkgdir(ss, d)
+
+def sstate_installpkgdir(ss, d):
+ import oe.path
+ import subprocess
+
+ sstateinst = d.getVar("SSTATE_INSTDIR")
+ d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
- for f in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split() + ['sstate_unpack_package'] + (d.getVar('SSTATEPOSTUNPACKFUNCS', True) or '').split():
+ for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
# All hooks should run in the SSTATE_INSTDIR
bb.build.exec_func(f, d, (sstateinst,))
+ def prepdir(dir):
+ # remove dir if it exists, ensure any parent directories do exist
+ if os.path.exists(dir):
+ oe.path.remove(dir)
+ bb.utils.mkdirhier(dir)
+ oe.path.remove(dir)
+
for state in ss['dirs']:
prepdir(state[1])
os.rename(sstateinst + state[0], state[1])
sstate_install(ss, d)
for plain in ss['plaindirs']:
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
+ sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
src = sstateinst + "/" + plain.replace(workdir, '')
+ if sharedworkdir in plain:
+ src = sstateinst + "/" + plain.replace(sharedworkdir, '')
dest = plain
bb.utils.mkdirhier(src)
prepdir(dest)
@@ -344,32 +395,44 @@ python sstate_hardcode_path_unpack () {
# sstate_hardcode_path(d)
import subprocess
- sstateinst = d.getVar('SSTATE_INSTDIR', True)
- fixmefn = sstateinst + "fixmepath"
+ sstateinst = d.getVar('SSTATE_INSTDIR')
+ sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
+ fixmefn = sstateinst + "fixmepath"
if os.path.isfile(fixmefn):
- staging = d.getVar('STAGING_DIR', True)
- staging_target = d.getVar('STAGING_DIR_TARGET', True)
- staging_host = d.getVar('STAGING_DIR_HOST', True)
-
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
- sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIR:%s:g'" % (staging)
- elif bb.data.inherits_class('cross', d):
- sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIR:%s:g'" % (staging_target, staging)
- else:
+ staging_target = d.getVar('RECIPE_SYSROOT')
+ staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
+
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
+ elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
+ sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
+ else:
+ sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
- extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
+ extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
for fixmevar in extra_staging_fixmes.split():
- fixme_path = d.getVar(fixmevar, True)
+ fixme_path = d.getVar(fixmevar)
sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
# Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
+ # Defer do_populate_sysroot relocation command
+ if sstatefixmedir:
+ bb.utils.mkdirhier(sstatefixmedir)
+ with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
+ sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
+ sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
+ sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
+ sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
+ f.write(sstate_hardcode_cmd)
+ bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
+ return
+
bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
- subprocess.call(sstate_hardcode_cmd, shell=True)
+ subprocess.check_call(sstate_hardcode_cmd, shell=True)
- # Need to remove this or we'd copy it into the target directory and may
+ # Need to remove this or we'd copy it into the target directory and may
# conflict with another writer
os.remove(fixmefn)
}
@@ -377,17 +440,18 @@ python sstate_hardcode_path_unpack () {
def sstate_clean_cachefile(ss, d):
import oe.path
- sstatepkgfile = d.getVar('SSTATE_PATHSPEC', True) + "*_" + ss['task'] + ".tgz*"
- bb.note("Removing %s" % sstatepkgfile)
- oe.path.remove(sstatepkgfile)
+ sstatepkgfile = d.getVar('SSTATE_PATHSPEC') + "*_" + ss['task'] + ".tgz*"
+ if d.getVarFlag('do_%s' % ss['task'], 'task'):
+ bb.note("Removing %s" % sstatepkgfile)
+ oe.path.remove(sstatepkgfile)
def sstate_clean_cachefiles(d):
- for task in (d.getVar('SSTATETASKS', True) or "").split():
+ for task in (d.getVar('SSTATETASKS') or "").split():
ld = d.createCopy()
ss = sstate_state_fromvars(ld, task)
sstate_clean_cachefile(ss, ld)
-def sstate_clean_manifest(manifest, d):
+def sstate_clean_manifest(manifest, d, prefix=None):
import oe.path
mfile = open(manifest)
@@ -396,6 +460,8 @@ def sstate_clean_manifest(manifest, d):
for entry in entries:
entry = entry.strip()
+ if prefix and not entry.startswith("/"):
+ entry = prefix + "/" + entry
bb.debug(2, "Removing manifest: %s" % entry)
# We can race against another package populating directories as we're removing them
# so we ignore errors here.
@@ -406,7 +472,7 @@ def sstate_clean_manifest(manifest, d):
elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
os.rmdir(entry[:-1])
else:
- oe.path.remove(entry)
+ os.remove(entry)
except OSError:
pass
@@ -414,7 +480,7 @@ def sstate_clean_manifest(manifest, d):
if os.path.exists(manifest + ".postrm"):
import subprocess
os.chmod(postrm, 0o755)
- subprocess.call(postrm, shell=True)
+ subprocess.check_call(postrm, shell=True)
oe.path.remove(postrm)
oe.path.remove(manifest)
@@ -424,8 +490,8 @@ def sstate_clean(ss, d):
import glob
d2 = d.createCopy()
- stamp_clean = d.getVar("STAMPCLEAN", True)
- extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
+ stamp_clean = d.getVar("STAMPCLEAN")
+ extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
if extrainf:
d2.setVar("SSTATE_MANMACH", extrainf)
wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
@@ -457,7 +523,7 @@ def sstate_clean(ss, d):
rm_nohash = ".do_%s" % ss['task']
for stfile in glob.glob(wildcard_stfile):
# Keep the sigdata
- if ".sigdata." in stfile:
+ if ".sigdata." in stfile or ".sigbasedata." in stfile:
continue
# Preserve taint files in the stamps directory
if stfile.endswith('.taint'):
@@ -466,22 +532,18 @@ def sstate_clean(ss, d):
stfile.endswith(rm_nohash):
oe.path.remove(stfile)
- # Removes the users/groups created by the package
- for cleanfunc in (d.getVar('SSTATECLEANFUNCS', True) or '').split():
- bb.build.exec_func(cleanfunc, d)
-
sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
CLEANFUNCS += "sstate_cleanall"
python sstate_cleanall() {
- bb.note("Removing shared state for package %s" % d.getVar('PN', True))
+ bb.note("Removing shared state for package %s" % d.getVar('PN'))
- manifest_dir = d.getVar('SSTATE_MANIFESTS', True)
+ manifest_dir = d.getVar('SSTATE_MANIFESTS')
if not os.path.exists(manifest_dir):
return
- tasks = d.getVar('SSTATETASKS', True).split()
+ tasks = d.getVar('SSTATETASKS').split()
for name in tasks:
ld = d.createCopy()
shared_state = sstate_state_fromvars(ld, name)
@@ -497,29 +559,29 @@ python sstate_hardcode_path () {
# Note: the logic in this function needs to match the reverse logic
# in sstate_installpkg(ss, d)
- staging = d.getVar('STAGING_DIR', True)
- staging_target = d.getVar('STAGING_DIR_TARGET', True)
- staging_host = d.getVar('STAGING_DIR_HOST', True)
- sstate_builddir = d.getVar('SSTATE_BUILDDIR', True)
+ staging_target = d.getVar('RECIPE_SYSROOT')
+ staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
+ sstate_builddir = d.getVar('SSTATE_BUILDDIR')
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
- sstate_grep_cmd = "grep -l -e '%s'" % (staging)
- sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging)
- elif bb.data.inherits_class('cross', d):
- sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging)
- sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging)
- else:
+ sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
- sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host)
+ elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
+ sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
+ sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
+ else:
+ sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
+ sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
- extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
+ extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
for fixmevar in extra_staging_fixmes.split():
- fixme_path = d.getVar(fixmevar, True)
+ fixme_path = d.getVar(fixmevar)
sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
+ sstate_grep_cmd += " -e '%s'" % (fixme_path)
fixmefn = sstate_builddir + "fixmepath"
- sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD', True)
+ sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
sstate_filelist_cmd = "tee %s" % (fixmefn)
# fixmepath file needs relative paths, drop sstate_builddir prefix
@@ -534,96 +596,87 @@ python sstate_hardcode_path () {
sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
- subprocess.call(sstate_hardcode_cmd, shell=True)
+ subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
# If the fixmefn is empty, remove it..
if os.stat(fixmefn).st_size == 0:
os.remove(fixmefn)
else:
bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
- subprocess.call(sstate_filelist_relative_cmd, shell=True)
+ subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
}
def sstate_package(ss, d):
import oe.path
- def make_relative_symlink(path, outputpath, d):
- # Replace out absolute TMPDIR paths in symlinks with relative ones
- if not os.path.islink(path):
- return
- link = os.readlink(path)
- if not os.path.isabs(link):
- return
- if not link.startswith(tmpdir):
- return
-
- depth = outputpath.rpartition(tmpdir)[2].count('/')
- base = link.partition(tmpdir)[2].strip()
- while depth > 1:
- base = "/.." + base
- depth -= 1
- base = "." + base
-
- bb.debug(2, "Replacing absolute path %s with relative path %s for %s" % (link, base, outputpath))
- os.remove(path)
- os.symlink(base, path)
-
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
- sstatepkg = d.getVar('SSTATE_PKG', True) + '_'+ ss['task'] + ".tgz"
+ d.appendVar('SSTATE_PKG', '_'+ ss['task'] + ".tgz")
bb.utils.remove(sstatebuild, recurse=True)
bb.utils.mkdirhier(sstatebuild)
- bb.utils.mkdirhier(os.path.dirname(sstatepkg))
for state in ss['dirs']:
if not os.path.exists(state[1]):
continue
- if d.getVar('SSTATE_SKIP_CREATION', True) == '1':
- continue
srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
+ # Find and error for absolute symlinks. We could attempt to relocate but its not
+ # clear where the symlink is relative to in this context. We could add that markup
+ # to sstate tasks but there aren't many of these so better just avoid them entirely.
for walkroot, dirs, files in os.walk(state[1]):
- for file in files:
+ for file in files + dirs:
srcpath = os.path.join(walkroot, file)
- dstpath = srcpath.replace(state[1], state[2])
- make_relative_symlink(srcpath, dstpath, d)
- for dir in dirs:
- srcpath = os.path.join(walkroot, dir)
- dstpath = srcpath.replace(state[1], state[2])
- make_relative_symlink(srcpath, dstpath, d)
+ if not os.path.islink(srcpath):
+ continue
+ link = os.readlink(srcpath)
+ if not os.path.isabs(link):
+ continue
+ if not link.startswith(tmpdir):
+ continue
+ bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
- oe.path.copyhardlinktree(state[1], sstatebuild + state[0])
+ os.rename(state[1], sstatebuild + state[0])
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
+ sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
for plain in ss['plaindirs']:
pdir = plain.replace(workdir, sstatebuild)
+ if sharedworkdir in plain:
+ pdir = plain.replace(sharedworkdir, sstatebuild)
bb.utils.mkdirhier(plain)
bb.utils.mkdirhier(pdir)
- oe.path.copyhardlinktree(plain, pdir)
+ os.rename(plain, pdir)
d.setVar('SSTATE_BUILDDIR', sstatebuild)
- d.setVar('SSTATE_PKG', sstatepkg)
+ d.setVar('SSTATE_INSTDIR', sstatebuild)
- for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + \
- ['sstate_create_package', 'sstate_sign_package'] + \
- (d.getVar('SSTATEPOSTCREATEFUNCS', True) or '').split():
+ if d.getVar('SSTATE_SKIP_CREATION') == '1':
+ return
+
+ sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
+ if d.getVar('SSTATE_SIG_KEY'):
+ sstate_create_package.append('sstate_sign_package')
+
+ for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
+ sstate_create_package + \
+ (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
# All hooks should run in SSTATE_BUILDDIR.
bb.build.exec_func(f, d, (sstatebuild,))
- bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
+ # SSTATE_PKG may have been changed by sstate_report_unihash
+ bb.siggen.dump_this_task(d.getVar('SSTATE_PKG') + ".siginfo", d)
return
-def pstaging_fetch(sstatefetch, sstatepkg, d):
+def pstaging_fetch(sstatefetch, d):
import bb.fetch2
# Only try and fetch if the user has configured a mirror
- mirrors = d.getVar('SSTATE_MIRRORS', True)
+ mirrors = d.getVar('SSTATE_MIRRORS')
if not mirrors:
return
# Copy the data object and override DL_DIR and SRC_URI
localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
dldir = localdata.expand("${SSTATE_DIR}")
bb.utils.mkdirhier(dldir)
@@ -635,14 +688,15 @@ def pstaging_fetch(sstatefetch, sstatepkg, d):
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
- if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
+ if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
+ bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
localdata.delVar('BB_NO_NETWORK')
# Try a fetch from the sstate mirror, if it fails just return and
# we will build the package
uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
- if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
+ if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
for srcuri in uris:
@@ -658,7 +712,7 @@ def sstate_setscene(d):
shared_state = sstate_state_fromvars(d)
accelerate = sstate_installpkg(shared_state, d)
if not accelerate:
- raise bb.build.FuncFailed("No suitable staging package found")
+ bb.fatal("No suitable staging package found")
python sstate_task_prefunc () {
shared_state = sstate_state_fromvars(d)
@@ -669,14 +723,21 @@ sstate_task_prefunc[dirs] = "${WORKDIR}"
python sstate_task_postfunc () {
shared_state = sstate_state_fromvars(d)
- sstate_install(shared_state, d)
for intercept in shared_state['interceptfuncs']:
- bb.build.exec_func(intercept, d, (d.getVar("WORKDIR", True),))
+ bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
+
omask = os.umask(0o002)
if omask != 0o002:
bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
sstate_package(shared_state, d)
os.umask(omask)
+
+ sstateinst = d.getVar("SSTATE_INSTDIR")
+ d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
+
+ sstate_installpkgdir(shared_state, d)
+
+ bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
}
sstate_task_postfunc[dirs] = "${WORKDIR}"
@@ -686,36 +747,59 @@ sstate_task_postfunc[dirs] = "${WORKDIR}"
# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
#
sstate_create_package () {
+ # Exit early if it already exists
+ if [ -e ${SSTATE_PKG} ]; then
+ return
+ fi
+
+ mkdir -p `dirname ${SSTATE_PKG}`
TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
+
+ # Use pigz if available
+ OPT="-czS"
+ if [ -x "$(command -v pigz)" ]; then
+ OPT="-I pigz -cS"
+ fi
+
# Need to handle empty directories
if [ "$(ls -A)" ]; then
set +e
- tar -czf $TFILE *
+ tar $OPT -f $TFILE *
ret=$?
if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
exit 1
fi
set -e
else
- tar -cz --file=$TFILE --files-from=/dev/null
+ tar $OPT --file=$TFILE --files-from=/dev/null
fi
chmod 0664 $TFILE
- mv -f $TFILE ${SSTATE_PKG}
-
- cd ${WORKDIR}
- rm -rf ${SSTATE_BUILDDIR}
+ # Skip if it was already created by some other process
+ if [ ! -e ${SSTATE_PKG} ]; then
+ mv -f $TFILE ${SSTATE_PKG}
+ else
+ rm $TFILE
+ fi
}
python sstate_sign_package () {
from oe.gpg_sign import get_signer
- if d.getVar('SSTATE_SIG_KEY', True):
- signer = get_signer(d, 'local')
- sstate_pkg = d.getVar('SSTATE_PKG', True)
- if os.path.exists(sstate_pkg + '.sig'):
- os.unlink(sstate_pkg + '.sig')
- signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
- d.getVar('SSTATE_SIG_PASSPHRASE', True), armor=False)
+
+ signer = get_signer(d, 'local')
+ sstate_pkg = d.getVar('SSTATE_PKG')
+ if os.path.exists(sstate_pkg + '.sig'):
+ os.unlink(sstate_pkg + '.sig')
+ signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
+ d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
+}
+
+python sstate_report_unihash() {
+ report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
+
+ if report_unihash:
+ ss = sstate_state_fromvars(d)
+ report_unihash(os.getcwd(), ss['task'], d)
}
#
@@ -724,6 +808,8 @@ python sstate_sign_package () {
#
sstate_unpack_package () {
tar -xvzf ${SSTATE_PKG}
+ # update .siginfo atime on local/NFS mirror
+ [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
# Use "! -w ||" to return true for read only files
[ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
[ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
@@ -732,25 +818,26 @@ sstate_unpack_package () {
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
-def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
-
- ret = []
- missed = []
- missing = []
+def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
+ found = set()
+ missed = set()
extension = ".tgz"
if siginfo:
extension = extension + ".siginfo"
+ def gethash(task):
+ return sq_data['unihash'][task]
+
def getpathcomponents(task, d):
# Magic data from BB_HASHFILENAME
- splithashfn = sq_hashfn[task].split(" ")
+ splithashfn = sq_data['hashfn'][task].split(" ")
spec = splithashfn[1]
if splithashfn[0] == "True":
- extrapath = d.getVar("NATIVELSBSTRING", True) + "/"
+ extrapath = d.getVar("NATIVELSBSTRING") + "/"
else:
extrapath = ""
-
- tname = sq_task[task][3:]
+
+ tname = bb.runqueue.taskname_from_tid(task)[3:]
if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
spec = splithashfn[2]
@@ -758,38 +845,25 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
return spec, extrapath, tname
- def sstate_pkg_to_pn(pkg, d):
- """
- Translate an sstate filename to a PN value by way of SSTATE_PKGSPEC. This is slightly hacky but
- we don't have access to everything in this context.
- """
- pkgspec = d.getVar('SSTATE_PKGSPEC', False)
- try:
- idx = pkgspec.split(':').index('${PN}')
- except ValueError:
- bb.fatal('Unable to find ${PN} in SSTATE_PKGSPEC')
- return pkg.split(':')[idx]
-
- for task in range(len(sq_fn)):
+ for tid in sq_data['hash']:
- spec, extrapath, tname = getpathcomponents(task, d)
+ spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
+ sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, gethash(tid), d) + "_" + tname + extension)
if os.path.exists(sstatefile):
bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
- ret.append(task)
+ found.add(tid)
continue
else:
- missed.append(task)
+ missed.add(tid)
bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
- mirrors = d.getVar("SSTATE_MIRRORS", True)
+ mirrors = d.getVar("SSTATE_MIRRORS")
if mirrors:
# Copy the data object and override DL_DIR and SRC_URI
localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
dldir = localdata.expand("${SSTATE_DIR}")
localdata.delVar('MIRRORS')
@@ -801,11 +875,10 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
- if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
+ if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
+ bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
localdata.delVar('BB_NO_NETWORK')
- whitelist = bb.runqueue.get_setscene_enforce_whitelist(d)
-
from bb.fetch2 import FetchConnectionCache
def checkstatus_init(thread_worker):
thread_worker.connection_cache = FetchConnectionCache()
@@ -814,7 +887,7 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
thread_worker.connection_cache.close_connections()
def checkstatus(thread_worker, arg):
- (task, sstatefile) = arg
+ (tid, sstatefile) = arg
localdata2 = bb.data.createCopy(localdata)
srcuri = "file://" + sstatefile
@@ -826,82 +899,103 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
connection_cache=thread_worker.connection_cache)
fetcher.checkstatus()
bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
- ret.append(task)
- if task in missed:
- missed.remove(task)
+ found.add(tid)
+ if tid in missed:
+ missed.remove(tid)
except:
- missed.append(task)
+ missed.add(tid)
bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
- if whitelist:
- pn = sstate_pkg_to_pn(sstatefile, d)
- taskname = sq_task[task]
- if not bb.runqueue.check_setscene_enforce_whitelist(pn, taskname, whitelist):
- missing.append(task)
- bb.error('Sstate artifact unavailable for %s.%s' % (pn, taskname))
pass
- bb.event.fire(bb.event.ProcessProgress("Checking sstate mirror object availability", len(tasklist) - thread_worker.tasks.qsize()), d)
+ bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
tasklist = []
- for task in range(len(sq_fn)):
- if task in ret:
+ for tid in sq_data['hash']:
+ if tid in found:
continue
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
- tasklist.append((task, sstatefile))
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), d) + "_" + tname + extension)
+ tasklist.append((tid, sstatefile))
if tasklist:
- bb.event.fire(bb.event.ProcessStarted("Checking sstate mirror object availability", len(tasklist)), d)
+ msg = "Checking sstate mirror object availability"
+ bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
+
import multiprocessing
nproc = min(multiprocessing.cpu_count(), len(tasklist))
+ bb.event.enable_threadlock()
pool = oe.utils.ThreadedPool(nproc, len(tasklist),
worker_init=checkstatus_init, worker_end=checkstatus_end)
for t in tasklist:
pool.add_task(checkstatus, t)
pool.start()
pool.wait_completion()
- bb.event.fire(bb.event.ProcessFinished("Checking sstate mirror object availability"), d)
- if whitelist and missing:
- bb.fatal('Required artifacts were unavailable - exiting')
+ bb.event.disable_threadlock()
+
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
+
+ # Likely checking an individual task hash again for multiconfig sharing of sstate tasks so skip reporting
+ if len(sq_data['hash']) == 1:
+ return found
- inheritlist = d.getVar("INHERIT", True)
+ inheritlist = d.getVar("INHERIT")
if "toaster" in inheritlist:
evdata = {'missed': [], 'found': []};
- for task in missed:
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
- evdata['missed'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
- for task in ret:
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
- evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
+ for tid in missed:
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), d) + "_" + tname + ".tgz")
+ evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
+ for tid in found:
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), d) + "_" + tname + ".tgz")
+ evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
+ if summary:
+ # Print some summary statistics about the current task completion and how much sstate
+ # reuse there was. Avoid divide by zero errors.
+ total = len(sq_data['hash'])
+ complete = 0
+ if currentcount:
+ complete = (len(found) + currentcount) / (total + currentcount) * 100
+ match = 0
+ if total:
+ match = len(found) / total * 100
+ bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(found), len(missed), currentcount, match, complete))
+
if hasattr(bb.parse.siggen, "checkhashes"):
- bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d)
+ bb.parse.siggen.checkhashes(sq_data, missed, found, d)
- return ret
+ return found
BB_SETSCENE_DEPVALID = "setscene_depvalid"
-def setscene_depvalid(task, taskdependees, notneeded, d):
+def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
# taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
# task is included in taskdependees too
+ # Return - False - We need this dependency
+ # - True - We can skip this dependency
+ import re
- bb.debug(2, "Considering setscene task: %s" % (str(taskdependees[task])))
+ def logit(msg, log):
+ if log is not None:
+ log.append(msg)
+ else:
+ bb.debug(2, msg)
- def isNativeCross(x):
- return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x
+ logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
- def isPostInstDep(x):
- if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-icon-utils-native", "ca-certificates-native"]:
- return True
- return False
+ def isNativeCross(x):
+ return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
# We only need to trigger populate_lic through direct dependencies
if taskdependees[task][1] == "do_populate_lic":
return True
+ # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
+ if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
+ return True
+
# We only need to trigger packagedata through direct dependencies
# but need to preserve packagedata on packagedata links
if taskdependees[task][1] == "do_packagedata":
@@ -911,7 +1005,7 @@ def setscene_depvalid(task, taskdependees, notneeded, d):
return True
for dep in taskdependees:
- bb.debug(2, " considering dependency: %s" % (str(taskdependees[dep])))
+ logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
if task == dep:
continue
if dep in notneeded:
@@ -919,10 +1013,11 @@ def setscene_depvalid(task, taskdependees, notneeded, d):
# do_package_write_* and do_package doesn't need do_package
if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
continue
- # do_package_write_* and do_package doesn't need do_populate_sysroot, unless is a postinstall dependency
- if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
- if isPostInstDep(taskdependees[task][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
- return False
+ # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
+ return False
+ # do_package/packagedata/package_qa don't need do_populate_sysroot
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']:
continue
# Native/Cross packages don't exist and are noexec anyway
if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
@@ -935,11 +1030,24 @@ def setscene_depvalid(task, taskdependees, notneeded, d):
# Consider sysroot depending on sysroot tasks
if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
- # base-passwd/shadow-sysroot don't need their dependencies
- if taskdependees[dep][0].endswith(("base-passwd", "shadow-sysroot")):
- continue
- # Nothing need depend on libc-initial/gcc-cross-initial
- if "-initial" in taskdependees[task][0]:
+ # Allow excluding certain recursive dependencies. If a recipe needs it should add a
+ # specific dependency itself, rather than relying on one of its dependees to pull
+ # them in.
+ # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
+ not_needed = False
+ excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
+ if excludedeps is None:
+ # Cache the regular expressions for speed
+ excludedeps = []
+ for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
+ excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
+ d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
+ for excl in excludedeps:
+ if excl[0].match(taskdependees[dep][0]):
+ if excl[1].match(taskdependees[task][0]):
+ not_needed = True
+ break
+ if not_needed:
continue
# For meta-extsdk-toolchain we want all sysroot dependencies
if taskdependees[dep][0] == 'meta-extsdk-toolchain':
@@ -951,7 +1059,8 @@ def setscene_depvalid(task, taskdependees, notneeded, d):
if isNativeCross(taskdependees[dep][0]):
return False
# Native/cross tools depended upon by target sysroot are not needed
- if isNativeCross(taskdependees[task][0]):
+ # Add an exception for shadow-native as required by useradd.bbclass
+ if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
continue
# Target populate_sysroot need their dependencies
return False
@@ -964,7 +1073,7 @@ def setscene_depvalid(task, taskdependees, notneeded, d):
# Safe fallthrough default
- bb.debug(2, " Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])))
+ logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
return False
return True
@@ -973,19 +1082,19 @@ sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
python sstate_eventhandler() {
d = e.data
# When we write an sstate package we rewrite the SSTATE_PKG
- spkg = d.getVar('SSTATE_PKG', True)
+ spkg = d.getVar('SSTATE_PKG')
if not spkg.endswith(".tgz"):
- taskname = d.getVar("BB_RUNTASK", True)[3:]
- spec = d.getVar('SSTATE_PKGSPEC', True)
- swspec = d.getVar('SSTATE_SWSPEC', True)
+ taskname = d.getVar("BB_RUNTASK")[3:]
+ spec = d.getVar('SSTATE_PKGSPEC')
+ swspec = d.getVar('SSTATE_SWSPEC')
if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
d.setVar("SSTATE_EXTRAPATH", "")
- sstatepkg = d.getVar('SSTATE_PKG', True)
+ sstatepkg = d.getVar('SSTATE_PKG')
bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
}
-SSTATE_PRUNE_OBSOLETEWORKDIR = "1"
+SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
# Event handler which removes manifests and stamps file for
# recipes which are no longer reachable in a build where they
@@ -999,8 +1108,23 @@ python sstate_eventhandler2() {
d = e.data
stamps = e.stamps.values()
removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
+ preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
+ preservestamps = []
+ if os.path.exists(preservestampfile):
+ with open(preservestampfile, 'r') as f:
+ preservestamps = f.readlines()
seen = []
- for a in d.getVar("SSTATE_ARCHS", True).split():
+
+ # The machine index contains all the stamps this machine has ever seen in this build directory.
+ # We should only remove things which this machine once accessed but no longer does.
+ machineindex = set()
+ bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
+ mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
+ if os.path.exists(mi):
+ with open(mi, "r") as f:
+ machineindex = set(line.strip() for line in f.readlines())
+
+ for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
toremove = []
i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
if not os.path.exists(i):
@@ -1008,27 +1132,44 @@ python sstate_eventhandler2() {
with open(i, "r") as f:
lines = f.readlines()
for l in lines:
- (stamp, manifest, workdir) = l.split()
- if stamp not in stamps:
- toremove.append(l)
- if stamp not in seen:
- bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
- seen.append(stamp)
+ try:
+ (stamp, manifest, workdir) = l.split()
+ if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
+ toremove.append(l)
+ if stamp not in seen:
+ bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
+ seen.append(stamp)
+ except ValueError:
+ bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
if toremove:
- bb.note("There are %d recipes to be removed from sysroot %s, removing..." % (len(toremove), a))
+ msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
+ bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
+
+ removed = 0
+ for r in toremove:
+ (stamp, manifest, workdir) = r.split()
+ for m in glob.glob(manifest + ".*"):
+ if m.endswith(".postrm"):
+ continue
+ sstate_clean_manifest(m, d)
+ bb.utils.remove(stamp + "*")
+ if removeworkdir:
+ bb.utils.remove(workdir, recurse = True)
+ lines.remove(r)
+ removed = removed + 1
+ bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
+
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
- for r in toremove:
- (stamp, manifest, workdir) = r.split()
- for m in glob.glob(manifest + ".*"):
- if m.endswith(".postrm"):
- continue
- sstate_clean_manifest(m, d)
- bb.utils.remove(stamp + "*")
- if removeworkdir:
- bb.utils.remove(workdir, recurse = True)
- lines.remove(r)
with open(i, "w") as f:
for l in lines:
f.write(l)
+ machineindex |= set(stamps)
+ with open(mi, "w") as f:
+ for l in machineindex:
+ f.write(l + "\n")
+
+ if preservestamps:
+ os.remove(preservestampfile)
}
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index a0b09a00bd..7e108950f5 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -27,10 +27,12 @@ SYSROOT_DIRS_BLACKLIST = " \
${mandir} \
${docdir} \
${infodir} \
- ${datadir}/locale \
${datadir}/applications \
${datadir}/fonts \
+ ${datadir}/gtk-doc/html \
+ ${datadir}/locale \
${datadir}/pixmaps \
+ ${libdir}/${PN}/ptest \
"
sysroot_stage_dir() {
@@ -67,101 +69,19 @@ sysroot_stage_all() {
}
python sysroot_strip () {
- import stat, errno
-
- dvar = d.getVar('SYSROOT_DESTDIR', True)
- pn = d.getVar('PN', True)
-
- os.chdir(dvar)
-
- # Return type (bits):
- # 0 - not elf
- # 1 - ELF
- # 2 - stripped
- # 4 - executable
- # 8 - shared library
- # 16 - kernel module
- def isELF(path):
- type = 0
- ret, result = oe.utils.getstatusoutput("file \"%s\"" % path.replace("\"", "\\\""))
-
- if ret:
- bb.error("split_and_strip_files: 'file %s' failed" % path)
- return type
-
- # Not stripped
- if "ELF" in result:
- type |= 1
- if "not stripped" not in result:
- type |= 2
- if "executable" in result:
- type |= 4
- if "shared" in result:
- type |= 8
- return type
-
-
- elffiles = {}
- inodes = {}
- libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True))
- baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True))
- if (d.getVar('INHIBIT_SYSROOT_STRIP', True) != '1'):
- #
- # First lets figure out all of the files we may have to process
- #
- for root, dirs, files in os.walk(dvar):
- for f in files:
- file = os.path.join(root, f)
-
- try:
- ltarget = oe.path.realpath(file, dvar, False)
- s = os.lstat(ltarget)
- except OSError as e:
- (err, strerror) = e.args
- if err != errno.ENOENT:
- raise
- # Skip broken symlinks
- continue
- if not s:
- continue
- # Check its an excutable
- if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
- or ((file.startswith(libdir) or file.startswith(baselibdir)) and ".so" in f):
- # If it's a symlink, and points to an ELF file, we capture the readlink target
- if os.path.islink(file):
- continue
-
- # It's a file (or hardlink), not a link
- # ...but is it ELF, and is it already stripped?
- elf_file = isELF(file)
- if elf_file & 1:
- if elf_file & 2:
- if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
- bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
- else:
- bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn))
- continue
-
- if s.st_ino in inodes:
- os.unlink(file)
- os.link(inodes[s.st_ino], file)
- else:
- inodes[s.st_ino] = file
- # break hardlink
- bb.utils.copyfile(file, file)
- elffiles[file] = elf_file
-
- #
- # Now strip them (in parallel)
- #
- strip = d.getVar("STRIP", True)
- sfiles = []
- for file in elffiles:
- elf_file = int(elffiles[file])
- #bb.note("Strip %s" % file)
- sfiles.append((file, elf_file, strip))
-
- oe.utils.multiprocess_exec(sfiles, oe.package.runstrip)
+ inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP')
+ if inhibit_sysroot and oe.types.boolean(inhibit_sysroot):
+ return
+
+ dstdir = d.getVar('SYSROOT_DESTDIR')
+ pn = d.getVar('PN')
+ libdir = d.getVar("libdir")
+ base_libdir = d.getVar("base_libdir")
+ qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split()
+ strip_cmd = d.getVar("STRIP")
+
+ oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d,
+ qa_already_stripped=qa_already_stripped)
}
do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
@@ -171,54 +91,17 @@ addtask populate_sysroot after do_install
SYSROOT_PREPROCESS_FUNCS ?= ""
SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
-SYSROOT_LOCK = "${STAGING_DIR}/staging.lock"
-
-# We clean out any existing sstate from the sysroot if we rerun configure
-python sysroot_cleansstate () {
- ss = sstate_state_fromvars(d, "populate_sysroot")
- sstate_clean(ss, d)
-}
-do_configure[prefuncs] += "sysroot_cleansstate"
-
-
-BB_SETSCENE_VERIFY_FUNCTION2 = "sysroot_checkhashes2"
-
-def sysroot_checkhashes2(covered, tasknames, fns, d, invalidtasks):
- problems = set()
- configurefns = set()
- for tid in invalidtasks:
- if tasknames[tid] == "do_configure" and tid not in covered:
- configurefns.add(fns[tid])
- for tid in covered:
- if tasknames[tid] == "do_populate_sysroot" and fns[tid] in configurefns:
- problems.add(tid)
- return problems
-
-BB_SETSCENE_VERIFY_FUNCTION = "sysroot_checkhashes"
-
-def sysroot_checkhashes(covered, tasknames, fnids, fns, d, invalidtasks = None):
- problems = set()
- configurefnids = set()
- if not invalidtasks:
- invalidtasks = range(len(tasknames))
- for task in invalidtasks:
- if tasknames[task] == "do_configure" and task not in covered:
- configurefnids.add(fnids[task])
- for task in covered:
- if tasknames[task] == "do_populate_sysroot" and fnids[task] in configurefnids:
- problems.add(task)
- return problems
python do_populate_sysroot () {
bb.build.exec_func("sysroot_stage_all", d)
bb.build.exec_func("sysroot_strip", d)
- for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS', True) or '').split():
+ for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
bb.build.exec_func(f, d)
- pn = d.getVar("PN", True)
- multiprov = d.getVar("MULTI_PROVIDER_WHITELIST", True).split()
+ pn = d.getVar("PN")
+ multiprov = d.getVar("MULTI_PROVIDER_WHITELIST").split()
provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
bb.utils.mkdirhier(provdir)
- for p in d.getVar("PROVIDES", True).split():
+ for p in d.getVar("PROVIDES").split():
if p in multiprov:
continue
p = p.replace("/", "_")
@@ -229,15 +112,491 @@ python do_populate_sysroot () {
do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST"
+POPULATESYSROOTDEPS = ""
+POPULATESYSROOTDEPS_class-target = "virtual/${MLPREFIX}${TARGET_PREFIX}binutils:do_populate_sysroot"
+POPULATESYSROOTDEPS_class-nativesdk = "virtual/${TARGET_PREFIX}binutils-crosssdk:do_populate_sysroot"
+do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
+
SSTATETASKS += "do_populate_sysroot"
do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
-do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_HOST}/"
-do_populate_sysroot[stamp-extra-info] = "${MACHINE}"
+do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
+do_populate_sysroot[sstate-fixmedir] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
python do_populate_sysroot_setscene () {
sstate_setscene(d)
}
addtask do_populate_sysroot_setscene
+def staging_copyfile(c, target, dest, postinsts, seendirs):
+ import errno
+
+ destdir = os.path.dirname(dest)
+ if destdir not in seendirs:
+ bb.utils.mkdirhier(destdir)
+ seendirs.add(destdir)
+ if "/usr/bin/postinst-" in c:
+ postinsts.append(dest)
+ if os.path.islink(c):
+ linkto = os.readlink(c)
+ if os.path.lexists(dest):
+ if not os.path.islink(dest):
+ raise OSError(errno.EEXIST, "Link %s already exists as a file" % dest, dest)
+ if os.readlink(dest) == linkto:
+ return dest
+ raise OSError(errno.EEXIST, "Link %s already exists to a different location? (%s vs %s)" % (dest, os.readlink(dest), linkto), dest)
+ os.symlink(linkto, dest)
+ #bb.warn(c)
+ else:
+ try:
+ os.link(c, dest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(c, dest)
+ else:
+ raise
+ return dest
+
+def staging_copydir(c, target, dest, seendirs):
+ if dest not in seendirs:
+ bb.utils.mkdirhier(dest)
+ seendirs.add(dest)
+
+def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
+ import subprocess
+
+ if not fixme:
+ return
+ cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
+ for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
+ fixme_path = d.getVar(fixmevar)
+ cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
+ bb.debug(2, cmd)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+
+def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
+ import glob
+ import subprocess
+ import errno
+
+ fixme = []
+ postinsts = []
+ seendirs = set()
+ stagingdir = d.getVar("STAGING_DIR")
+ if native:
+ pkgarchs = ['${BUILD_ARCH}', '${BUILD_ARCH}_*']
+ targetdir = nativesysroot
+ else:
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+ targetdir = targetsysroot
+
+ bb.utils.mkdirhier(targetdir)
+ for pkgarch in pkgarchs:
+ for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
+ if manifest.endswith("-initial.populate_sysroot"):
+ # skip libgcc-initial due to file overlap
+ continue
+ if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest):
+ continue
+ if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest):
+ continue
+ tmanifest = targetdir + "/" + os.path.basename(manifest)
+ if os.path.exists(tmanifest):
+ continue
+ try:
+ os.link(manifest, tmanifest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(manifest, tmanifest)
+ else:
+ raise
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ if l.endswith("/fixmepath"):
+ fixme.append(l)
+ continue
+ if l.endswith("/fixmepath.cmd"):
+ continue
+ dest = l.replace(stagingdir, "")
+ dest = targetdir + "/" + "/".join(dest.split("/")[3:])
+ if l.endswith("/"):
+ staging_copydir(l, targetdir, dest, seendirs)
+ continue
+ try:
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+ except FileExistsError:
+ continue
+
+ staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d)
+ for p in postinsts:
+ subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
+
+#
+# Manifests here are complicated. The main sysroot area has the unpacked sstate
+# which us unrelocated and tracked by the main sstate manifests. Each recipe
+# specific sysroot has manifests for each dependency that is installed there.
+# The task hash is used to tell whether the data needs to be reinstalled. We
+# use a symlink to point to the currently installed hash. There is also a
+# "complete" stamp file which is used to mark if installation completed. If
+# something fails (e.g. a postinst), this won't get written and we would
+# remove and reinstall the dependency. This also means partially installed
+# dependencies should get cleaned up correctly.
+#
+
+python extend_recipe_sysroot() {
+ import copy
+ import subprocess
+ import errno
+ import collections
+ import glob
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ if mytaskname.endswith("_setscene"):
+ mytaskname = mytaskname.replace("_setscene", "")
+ workdir = d.getVar("WORKDIR")
+ #bb.warn(str(taskdepdata))
+ pn = d.getVar("PN")
+ stagingdir = d.getVar("STAGING_DIR")
+ sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
+ recipesysroot = d.getVar("RECIPE_SYSROOT")
+ recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps:
+ lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
+ staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, True, d)
+ staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, False, d)
+ bb.utils.unlockfile(lock)
+ return
+
+ start = None
+ configuredeps = []
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+
+ # We need to figure out which sysroot files we need to expose to this task.
+ # This needs to match what would get restored from sstate, which is controlled
+ # ultimately by calls from bitbake to setscene_depvalid().
+ # That function expects a setscene dependency tree. We build a dependency tree
+ # condensed to inter-sstate task dependencies, similar to that used by setscene
+ # tasks. We can then call into setscene_depvalid() and decide
+ # which dependencies we can "see" and should expose in the recipe specific sysroot.
+ setscenedeps = copy.deepcopy(taskdepdata)
+
+ start = set([start])
+
+ sstatetasks = d.getVar("SSTATETASKS").split()
+ # Add recipe specific tasks referenced by setscene_depvalid()
+ sstatetasks.append("do_stash_locale")
+
+ def print_dep_tree(deptree):
+ data = ""
+ for dep in deptree:
+ deps = " " + "\n ".join(deptree[dep][3]) + "\n"
+ data = data + "%s:\n %s\n %s\n%s %s\n %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
+ return data
+
+ #bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata))
+
+ #bb.note(" start2 is %s" % str(start))
+
+ # If start is an sstate task (like do_package) we need to add in its direct dependencies
+ # else the code below won't recurse into them.
+ for dep in set(start):
+ for dep2 in setscenedeps[dep][3]:
+ start.add(dep2)
+ start.remove(dep)
+
+ #bb.note(" start3 is %s" % str(start))
+
+ # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
+ for dep in taskdepdata:
+ data = setscenedeps[dep]
+ if data[1] not in sstatetasks:
+ for dep2 in setscenedeps:
+ data2 = setscenedeps[dep2]
+ if dep in data2[3]:
+ data2[3].update(setscenedeps[dep][3])
+ data2[3].remove(dep)
+ if dep in start:
+ start.update(setscenedeps[dep][3])
+ start.remove(dep)
+ del setscenedeps[dep]
+
+ # Remove circular references
+ for dep in setscenedeps:
+ if dep in setscenedeps[dep][3]:
+ setscenedeps[dep][3].remove(dep)
+
+ #bb.note("Computed dep tree is:\n%s" % print_dep_tree(setscenedeps))
+ #bb.note(" start is %s" % str(start))
+
+ # Direct dependencies should be present and can be depended upon
+ for dep in set(start):
+ if setscenedeps[dep][1] == "do_populate_sysroot":
+ if dep not in configuredeps:
+ configuredeps.append(dep)
+ bb.note("Direct dependencies are %s" % str(configuredeps))
+ #bb.note(" or %s" % str(start))
+
+ msgbuf = []
+ # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
+ # for ones that would be restored from sstate.
+ done = list(start)
+ next = list(start)
+ while next:
+ new = []
+ for dep in next:
+ data = setscenedeps[dep]
+ for datadep in data[3]:
+ if datadep in done:
+ continue
+ taskdeps = {}
+ taskdeps[dep] = setscenedeps[dep][:2]
+ taskdeps[datadep] = setscenedeps[datadep][:2]
+ retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
+ if retval:
+ msgbuf.append("Skipping setscene dependency %s for installation into the sysroot" % datadep)
+ continue
+ done.append(datadep)
+ new.append(datadep)
+ if datadep not in configuredeps and setscenedeps[datadep][1] == "do_populate_sysroot":
+ configuredeps.append(datadep)
+ msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
+ else:
+ msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
+ next = new
+
+ # This logging is too verbose for day to day use sadly
+ #bb.debug(2, "\n".join(msgbuf))
+
+ depdir = recipesysrootnative + "/installeddeps"
+ bb.utils.mkdirhier(depdir)
+ bb.utils.mkdirhier(sharedmanifests)
+
+ lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
+
+ fixme = {}
+ seendirs = set()
+ postinsts = []
+ multilibs = {}
+ manifests = {}
+ # All files that we're going to be installing, to find conflicts.
+ fileset = {}
+
+ for f in os.listdir(depdir):
+ if not f.endswith(".complete"):
+ continue
+ f = depdir + "/" + f
+ if os.path.islink(f) and not os.path.exists(f):
+ bb.note("%s no longer exists, removing from sysroot" % f)
+ lnk = os.readlink(f.replace(".complete", ""))
+ sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ os.unlink(f)
+ os.unlink(f.replace(".complete", ""))
+
+ installed = []
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ if mytaskname in ["do_sdk_depends", "do_populate_sdk_ext"] and c.endswith("-initial"):
+ bb.note("Skipping initial setscene dependency %s for installation into the sysroot" % c)
+ continue
+ installed.append(c)
+
+ # We want to remove anything which this task previously installed but is no longer a dependency
+ taskindex = depdir + "/" + "index." + mytaskname
+ if os.path.exists(taskindex):
+ potential = []
+ with open(taskindex, "r") as f:
+ for l in f:
+ l = l.strip()
+ if l not in installed:
+ fl = depdir + "/" + l
+ if not os.path.exists(fl):
+ # Was likely already uninstalled
+ continue
+ potential.append(l)
+ # We need to ensure not other task needs this dependency. We hold the sysroot
+ # lock so we ca search the indexes to check
+ if potential:
+ for i in glob.glob(depdir + "/index.*"):
+ if i.endswith("." + mytaskname):
+ continue
+ with open(i, "r") as f:
+ for l in f:
+ l = l.strip()
+ if l in potential:
+ potential.remove(l)
+ for l in potential:
+ fl = depdir + "/" + l
+ bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
+ lnk = os.readlink(fl)
+ sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ os.unlink(fl)
+ os.unlink(fl + ".complete")
+
+ msg_exists = []
+ msg_adding = []
+
+ # Handle all removals first since files may move between recipes
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ if c not in installed:
+ continue
+ taskhash = setscenedeps[dep][5]
+ taskmanifest = depdir + "/" + c + "." + taskhash
+
+ if os.path.exists(depdir + "/" + c):
+ lnk = os.readlink(depdir + "/" + c)
+ if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
+ continue
+ else:
+ bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
+ sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ os.unlink(depdir + "/" + c)
+ if os.path.lexists(depdir + "/" + c + ".complete"):
+ os.unlink(depdir + "/" + c + ".complete")
+ elif os.path.lexists(depdir + "/" + c):
+ os.unlink(depdir + "/" + c)
+
+ # Now handle installs
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ if c not in installed:
+ continue
+ taskhash = setscenedeps[dep][5]
+ taskmanifest = depdir + "/" + c + "." + taskhash
+
+ if os.path.exists(depdir + "/" + c):
+ lnk = os.readlink(depdir + "/" + c)
+ if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
+ msg_exists.append(c)
+ continue
+
+ msg_adding.append(c)
+
+ os.symlink(c + "." + taskhash, depdir + "/" + c)
+
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "populate_sysroot", d, multilibs)
+ if d2 is not d:
+ # If we don't do this, the recipe sysroot will be placed in the wrong WORKDIR for multilibs
+ # We need a consistent WORKDIR for the image
+ d2.setVar("WORKDIR", d.getVar("WORKDIR"))
+ destsysroot = d2.getVar("RECIPE_SYSROOT")
+ # We put allarch recipes into the default sysroot
+ if manifest and "allarch" in manifest:
+ destsysroot = d.getVar("RECIPE_SYSROOT")
+
+ native = False
+ if c.endswith("-native") or "-cross-" in c or "-crosssdk" in c:
+ native = True
+
+ if manifest:
+ newmanifest = collections.OrderedDict()
+ targetdir = destsysroot
+ if native:
+ targetdir = recipesysrootnative
+ if targetdir not in fixme:
+ fixme[targetdir] = []
+ fm = fixme[targetdir]
+
+ with open(manifest, "r") as f:
+ manifests[dep] = manifest
+ for l in f:
+ l = l.strip()
+ if l.endswith("/fixmepath"):
+ fm.append(l)
+ continue
+ if l.endswith("/fixmepath.cmd"):
+ continue
+ dest = l.replace(stagingdir, "")
+ dest = "/" + "/".join(dest.split("/")[3:])
+ newmanifest[l] = targetdir + dest
+
+ # Check if files have already been installed by another
+ # recipe and abort if they have, explaining what recipes are
+ # conflicting.
+ hashname = targetdir + dest
+ if not hashname.endswith("/"):
+ if hashname in fileset:
+ bb.fatal("The file %s is installed by both %s and %s, aborting" % (dest, c, fileset[hashname]))
+ else:
+ fileset[hashname] = c
+
+ # Having multiple identical manifests in each sysroot eats diskspace so
+ # create a shared pool of them and hardlink if we can.
+ # We create the manifest in advance so that if something fails during installation,
+ # or the build is interrupted, subsequent exeuction can cleanup.
+ sharedm = sharedmanifests + "/" + os.path.basename(taskmanifest)
+ if not os.path.exists(sharedm):
+ smlock = bb.utils.lockfile(sharedm + ".lock")
+ # Can race here. You'd think it just means we may not end up with all copies hardlinked to each other
+ # but python can lose file handles so we need to do this under a lock.
+ if not os.path.exists(sharedm):
+ with open(sharedm, 'w') as m:
+ for l in newmanifest:
+ dest = newmanifest[l]
+ m.write(dest.replace(workdir + "/", "") + "\n")
+ bb.utils.unlockfile(smlock)
+ try:
+ os.link(sharedm, taskmanifest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(sharedm, taskmanifest)
+ else:
+ raise
+ # Finally actually install the files
+ for l in newmanifest:
+ dest = newmanifest[l]
+ if l.endswith("/"):
+ staging_copydir(l, targetdir, dest, seendirs)
+ continue
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+
+ bb.note("Installed into sysroot: %s" % str(msg_adding))
+ bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
+
+ for f in fixme:
+ staging_processfixme(fixme[f], f, recipesysroot, recipesysrootnative, d)
+
+ for p in postinsts:
+ subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
+
+ for dep in manifests:
+ c = setscenedeps[dep][0]
+ os.symlink(manifests[dep], depdir + "/" + c + ".complete")
+
+ with open(taskindex, "w") as f:
+ for l in sorted(installed):
+ f.write(l + "\n")
+ bb.utils.unlockfile(lock)
+}
+extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+
+do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
+python do_prepare_recipe_sysroot () {
+ bb.build.exec_func("extend_recipe_sysroot", d)
+}
+addtask do_prepare_recipe_sysroot before do_configure after do_fetch
+
+python staging_taskhandler() {
+ bbtasks = e.tasklist
+ for task in bbtasks:
+ deps = d.getVarFlag(task, "depends")
+ if deps and "populate_sysroot" in deps:
+ d.appendVarFlag(task, "prefuncs", " extend_recipe_sysroot")
+}
+staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
+addhandler staging_taskhandler
diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass
index 9b0c2c7fc6..894f6b3718 100644
--- a/meta/classes/syslinux.bbclass
+++ b/meta/classes/syslinux.bbclass
@@ -75,21 +75,16 @@ syslinux_hddimg_install() {
syslinux ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
}
-syslinux_hdddirect_install() {
- DEST=$1
- syslinux $DEST
-}
-
python build_syslinux_cfg () {
import copy
import sys
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
- labels = d.getVar('LABELS', True)
+ labels = d.getVar('LABELS')
if not labels:
bb.debug(1, "LABELS not defined, nothing to do")
return
@@ -98,50 +93,50 @@ python build_syslinux_cfg () {
bb.debug(1, "No labels, nothing to do")
return
- cfile = d.getVar('SYSLINUX_CFG', True)
+ cfile = d.getVar('SYSLINUX_CFG')
if not cfile:
- raise bb.build.FuncFailed('Unable to read SYSLINUX_CFG')
+ bb.fatal('Unable to read SYSLINUX_CFG')
try:
cfgfile = open(cfile, 'w')
except OSError:
- raise bb.build.FuncFailed('Unable to open %s' % (cfile))
+ bb.fatal('Unable to open %s' % cfile)
cfgfile.write('# Automatically created by OE\n')
- opts = d.getVar('SYSLINUX_OPTS', True)
+ opts = d.getVar('SYSLINUX_OPTS')
if opts:
for opt in opts.split(';'):
cfgfile.write('%s\n' % opt)
- allowoptions = d.getVar('SYSLINUX_ALLOWOPTIONS', True)
+ allowoptions = d.getVar('SYSLINUX_ALLOWOPTIONS')
if allowoptions:
cfgfile.write('ALLOWOPTIONS %s\n' % allowoptions)
else:
cfgfile.write('ALLOWOPTIONS 1\n')
- syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE', True)
- syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY', True)
- syslinux_serial = d.getVar('SYSLINUX_SERIAL', True)
+ syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE')
+ syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY')
+ syslinux_serial = d.getVar('SYSLINUX_SERIAL')
if syslinux_serial:
cfgfile.write('SERIAL %s\n' % syslinux_serial)
- menu = (d.getVar('AUTO_SYSLINUXMENU', True) == "1")
+ menu = (d.getVar('AUTO_SYSLINUXMENU') == "1")
if menu and syslinux_serial:
cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
else:
cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
- timeout = d.getVar('SYSLINUX_TIMEOUT', True)
+ timeout = d.getVar('SYSLINUX_TIMEOUT')
if timeout:
cfgfile.write('TIMEOUT %s\n' % timeout)
else:
cfgfile.write('TIMEOUT 50\n')
- prompt = d.getVar('SYSLINUX_PROMPT', True)
+ prompt = d.getVar('SYSLINUX_PROMPT')
if prompt:
cfgfile.write('PROMPT %s\n' % prompt)
else:
@@ -151,38 +146,38 @@ python build_syslinux_cfg () {
cfgfile.write('ui vesamenu.c32\n')
cfgfile.write('menu title Select kernel options and boot kernel\n')
cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n')
- splash = d.getVar('SYSLINUX_SPLASH', True)
+ splash = d.getVar('SYSLINUX_SPLASH')
if splash:
cfgfile.write('menu background splash.lss\n')
for label in labels.split():
localdata = bb.data.createCopy(d)
- overrides = localdata.getVar('OVERRIDES', True)
+ overrides = localdata.getVar('OVERRIDES')
if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
+ bb.fatal('OVERRIDES not defined')
localdata.setVar('OVERRIDES', label + ':' + overrides)
- bb.data.update_data(localdata)
btypes = [ [ "", syslinux_default_console ] ]
if menu and syslinux_serial:
btypes = [ [ "Graphics console ", syslinux_default_console ],
[ "Serial console ", syslinux_serial_tty ] ]
- root= d.getVar('SYSLINUX_ROOT', True)
+ root= d.getVar('SYSLINUX_ROOT')
if not root:
- raise bb.build.FuncFailed('SYSLINUX_ROOT not defined')
+ bb.fatal('SYSLINUX_ROOT not defined')
+ kernel = localdata.getVar('KERNEL_IMAGETYPE')
for btype in btypes:
- cfgfile.write('LABEL %s%s\nKERNEL /vmlinuz\n' % (btype[0], label))
+ cfgfile.write('LABEL %s%s\nKERNEL /%s\n' % (btype[0], label, kernel))
- exargs = d.getVar('SYSLINUX_KERNEL_ARGS', True)
+ exargs = d.getVar('SYSLINUX_KERNEL_ARGS')
if exargs:
btype[1] += " " + exargs
- append = localdata.getVar('APPEND', True)
- initrd = localdata.getVar('INITRD', True)
+ append = localdata.getVar('APPEND')
+ initrd = localdata.getVar('INITRD')
append = root + " " + append
cfgfile.write('APPEND ')
diff --git a/meta/classes/systemd-boot-cfg.bbclass b/meta/classes/systemd-boot-cfg.bbclass
new file mode 100644
index 0000000000..b3e0e6ad41
--- /dev/null
+++ b/meta/classes/systemd-boot-cfg.bbclass
@@ -0,0 +1,71 @@
+SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
+SYSTEMD_BOOT_ENTRIES ?= ""
+SYSTEMD_BOOT_TIMEOUT ?= "10"
+
+# Uses MACHINE specific KERNEL_IMAGETYPE
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+# Need UUID utility code.
+inherit fs-uuid
+
+python build_efi_cfg() {
+ s = d.getVar("S")
+ labels = d.getVar('LABELS')
+ if not labels:
+ bb.debug(1, "LABELS not defined, nothing to do")
+ return
+
+ if labels == []:
+ bb.debug(1, "No labels, nothing to do")
+ return
+
+ cfile = d.getVar('SYSTEMD_BOOT_CFG')
+ cdir = os.path.dirname(cfile)
+ if not os.path.exists(cdir):
+ os.makedirs(cdir)
+ try:
+ cfgfile = open(cfile, 'w')
+ except OSError:
+ bb.fatal('Unable to open %s' % cfile)
+
+ cfgfile.write('# Automatically created by OE\n')
+ cfgfile.write('default %s\n' % (labels.split()[0]))
+ timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT')
+ if timeout:
+ cfgfile.write('timeout %s\n' % timeout)
+ else:
+ cfgfile.write('timeout 10\n')
+ cfgfile.close()
+
+ for label in labels.split():
+ localdata = d.createCopy()
+
+ entryfile = "%s/%s.conf" % (s, label)
+ if not os.path.exists(s):
+ os.makedirs(s)
+ d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile)
+ try:
+ entrycfg = open(entryfile, "w")
+ except OSError:
+ bb.fatal('Unable to open %s' % entryfile)
+
+ entrycfg.write('title %s\n' % label)
+
+ kernel = localdata.getVar("KERNEL_IMAGETYPE")
+ entrycfg.write('linux /%s\n' % kernel)
+
+ append = localdata.getVar('APPEND')
+ initrd = localdata.getVar('INITRD')
+
+ if initrd:
+ entrycfg.write('initrd /initrd\n')
+ lb = label
+ if label == "install":
+ lb = "install-efi"
+ entrycfg.write('options LABEL=%s ' % lb)
+ if append:
+ append = replace_rootfs_uuid(d, append)
+ entrycfg.write('%s' % append)
+ entrycfg.write('\n')
+ entrycfg.close()
+}
diff --git a/meta/classes/systemd-boot.bbclass b/meta/classes/systemd-boot.bbclass
index b550b61a7c..336c4c2ff5 100644
--- a/meta/classes/systemd-boot.bbclass
+++ b/meta/classes/systemd-boot.bbclass
@@ -4,121 +4,32 @@
# systemd-boot.bbclass - The "systemd-boot" is essentially the gummiboot merged into systemd.
# The original standalone gummiboot project is dead without any more
-# maintenance. As a start point, we replace all gummitboot occurrences
-# with systemd-boot in gummiboot.bbclass to have a base version of this
-# systemd-boot.bbclass.
+# maintenance.
#
# Set EFI_PROVIDER = "systemd-boot" to use systemd-boot on your live images instead of grub-efi
-# (images built by image-live.bbclass or image-vm.bbclass)
+# (images built by image-live.bbclass)
do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
-do_bootdirectdisk[depends] += "${MLPREFIX}systemd-boot:do_deploy"
-
-EFIDIR = "/EFI/BOOT"
-
-SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
-SYSTEMD_BOOT_ENTRIES ?= ""
-SYSTEMD_BOOT_TIMEOUT ?= "10"
+require conf/image-uefi.conf
# Need UUID utility code.
inherit fs-uuid
efi_populate() {
- DEST=$1
+ efi_populate_common "$1" systemd
- EFI_IMAGE="systemd-bootia32.efi"
- DEST_EFI_IMAGE="bootia32.efi"
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- EFI_IMAGE="systemd-bootx64.efi"
- DEST_EFI_IMAGE="bootx64.efi"
- fi
-
- install -d ${DEST}${EFIDIR}
# systemd-boot requires these paths for configuration files
# they are not customizable so no point in new vars
install -d ${DEST}/loader
install -d ${DEST}/loader/entries
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf
for i in ${SYSTEMD_BOOT_ENTRIES}; do
install -m 0644 ${i} ${DEST}/loader/entries
done
}
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
- cp $iso_dir/vmlinuz ${EFIIMGDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
+efi_iso_populate_append() {
+ cp -r $iso_dir/loader ${EFIIMGDIR}
}
-python build_efi_cfg() {
- s = d.getVar("S", True)
- labels = d.getVar('LABELS', True)
- if not labels:
- bb.debug(1, "LABELS not defined, nothing to do")
- return
-
- if labels == []:
- bb.debug(1, "No labels, nothing to do")
- return
-
- cfile = d.getVar('SYSTEMD_BOOT_CFG', True)
- try:
- cfgfile = open(cfile, 'w')
- except OSError:
- raise bb.build.FuncFailed('Unable to open %s' % (cfile))
-
- cfgfile.write('# Automatically created by OE\n')
- cfgfile.write('default %s\n' % (labels.split()[0]))
- timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT', True)
- if timeout:
- cfgfile.write('timeout %s\n' % timeout)
- else:
- cfgfile.write('timeout 10\n')
- cfgfile.close()
-
- for label in labels.split():
- localdata = d.createCopy()
-
- overrides = localdata.getVar('OVERRIDES', True)
- if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
-
- entryfile = "%s/%s.conf" % (s, label)
- d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile)
- try:
- entrycfg = open(entryfile, "w")
- except OSError:
- raise bb.build.FuncFailed('Unable to open %s' % (entryfile))
- localdata.setVar('OVERRIDES', label + ':' + overrides)
- bb.data.update_data(localdata)
-
- entrycfg.write('title %s\n' % label)
- entrycfg.write('linux /vmlinuz\n')
-
- append = localdata.getVar('APPEND', True)
- initrd = localdata.getVar('INITRD', True)
-
- if initrd:
- entrycfg.write('initrd /initrd\n')
- lb = label
- if label == "install":
- lb = "install-efi"
- entrycfg.write('options LABEL=%s ' % lb)
- if append:
- append = replace_rootfs_uuid(d, append)
- entrycfg.write('%s' % append)
- entrycfg.write('\n')
- entrycfg.close()
-}
+inherit systemd-boot-cfg
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
index db7873fbe2..9e8a82c9f1 100644
--- a/meta/classes/systemd.bbclass
+++ b/meta/classes/systemd.bbclass
@@ -17,39 +17,43 @@ python __anonymous() {
# files.
if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
d.appendVar("DEPENDS", " systemd-systemctl-native")
+ d.appendVar("PACKAGE_WRITE_DEPS", " systemd-systemctl-native")
if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d):
d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
}
systemd_postinst() {
-OPTS=""
+if type systemctl >/dev/null 2>/dev/null; then
+ OPTS=""
-if [ -n "$D" ]; then
- OPTS="--root=$D"
-fi
+ if [ -n "$D" ]; then
+ OPTS="--root=$D"
+ fi
-if type systemctl >/dev/null 2>/dev/null; then
- systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE}
+ if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
+ for service in ${SYSTEMD_SERVICE_ESCAPED}; do
+ systemctl ${OPTS} enable "$service"
+ done
+ fi
+
+ if [ -z "$D" ]; then
+ systemctl daemon-reload
+ systemctl preset ${SYSTEMD_SERVICE_ESCAPED}
- if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
- systemctl restart ${SYSTEMD_SERVICE}
+ if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
+ systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED}
+ fi
fi
fi
}
systemd_prerm() {
-OPTS=""
-
-if [ -n "$D" ]; then
- OPTS="--root=$D"
-fi
-
if type systemctl >/dev/null 2>/dev/null; then
if [ -z "$D" ]; then
- systemctl stop ${SYSTEMD_SERVICE}
- fi
+ systemctl stop ${SYSTEMD_SERVICE_ESCAPED}
- systemctl $OPTS disable ${SYSTEMD_SERVICE}
+ systemctl disable ${SYSTEMD_SERVICE_ESCAPED}
+ fi
fi
}
@@ -60,19 +64,20 @@ systemd_populate_packages[vardepsexclude] += "OVERRIDES"
python systemd_populate_packages() {
import re
+ import shlex
if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
return
def get_package_var(d, var, pkg):
- val = (d.getVar('%s_%s' % (var, pkg), True) or "").strip()
+ val = (d.getVar('%s_%s' % (var, pkg)) or "").strip()
if val == "":
- val = (d.getVar(var, True) or "").strip()
+ val = (d.getVar(var) or "").strip()
return val
# Check if systemd-packages already included in PACKAGES
def systemd_check_package(pkg_systemd):
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not pkg_systemd in packages.split():
bb.error('%s does not appear in package list, please add it' % pkg_systemd)
@@ -80,29 +85,31 @@ python systemd_populate_packages() {
def systemd_generate_package_scripts(pkg):
bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
+ paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE_' + pkg).split())
+ d.setVar('SYSTEMD_SERVICE_ESCAPED_' + pkg, paths_escaped)
+
# Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
# variable.
localdata = d.createCopy()
localdata.prependVar("OVERRIDES", pkg + ":")
- bb.data.update_data(localdata)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += localdata.getVar('systemd_postinst', True)
+ postinst += localdata.getVar('systemd_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg, True)
+ prerm = d.getVar('pkg_prerm_%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
- prerm += localdata.getVar('systemd_prerm', True)
+ prerm += localdata.getVar('systemd_prerm')
d.setVar('pkg_prerm_%s' % pkg, prerm)
# Add files to FILES_*-systemd if existent and not already done
def systemd_append_file(pkg_systemd, file_append):
appended = False
- if os.path.exists(oe.path.join(d.getVar("D", True), file_append)):
+ if os.path.exists(oe.path.join(d.getVar("D"), file_append)):
var_name = "FILES_" + pkg_systemd
files = d.getVar(var_name, False) or ""
if file_append not in files.split():
@@ -114,7 +121,7 @@ python systemd_populate_packages() {
def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
# avoid infinite recursion
if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
- fullpath = oe.path.join(d.getVar("D", True), path, service)
+ fullpath = oe.path.join(d.getVar("D"), path, service)
if service.find('.service') != -1:
# for *.service add *@.service
service_base = service.replace('.service', '')
@@ -126,7 +133,7 @@ python systemd_populate_packages() {
systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
for key in keys.split():
# recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
- cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, fullpath, key)
+ cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, shlex.quote(fullpath), key)
pipe = os.popen(cmd, 'r')
line = pipe.readline()
while line:
@@ -137,9 +144,9 @@ python systemd_populate_packages() {
# Check service-files and call systemd_add_files_and_parse for each entry
def systemd_check_services():
- searchpaths = [oe.path.join(d.getVar("sysconfdir", True), "systemd", "system"),]
- searchpaths.append(d.getVar("systemd_system_unitdir", True))
- systemd_packages = d.getVar('SYSTEMD_PACKAGES', True)
+ searchpaths = [oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),]
+ searchpaths.append(d.getVar("systemd_system_unitdir"))
+ systemd_packages = d.getVar('SYSTEMD_PACKAGES')
keys = 'Also'
# scan for all in SYSTEMD_SERVICE[]
@@ -150,30 +157,44 @@ python systemd_populate_packages() {
# Deal with adding, for example, 'ifplugd@eth0.service' from
# 'ifplugd@.service'
base = None
- if service.find('@') != -1:
- base = re.sub('@[^.]+.', '@.', service)
+ at = service.find('@')
+ if at != -1:
+ ext = service.rfind('.')
+ base = service[:at] + '@' + service[ext:]
for path in searchpaths:
- if os.path.exists(oe.path.join(d.getVar("D", True), path, service)):
+ if os.path.exists(oe.path.join(d.getVar("D"), path, service)):
path_found = path
break
elif base is not None:
- if os.path.exists(oe.path.join(d.getVar("D", True), path, base)):
+ if os.path.exists(oe.path.join(d.getVar("D"), path, base)):
path_found = path
break
if path_found != '':
systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
else:
- raise bb.build.FuncFailed("SYSTEMD_SERVICE_%s value %s does not exist" % \
- (pkg_systemd, service))
+ bb.fatal("SYSTEMD_SERVICE_%s value %s does not exist" % (pkg_systemd, service))
+
+ def systemd_create_presets(pkg, action):
+ presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)
+ bb.utils.mkdirhier(os.path.dirname(presetf))
+ with open(presetf, 'a') as fd:
+ for service in d.getVar('SYSTEMD_SERVICE_%s' % pkg).split():
+ fd.write("%s %s\n" % (action,service))
+ d.appendVar("FILES_%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
# Run all modifications once when creating package
- if os.path.exists(d.getVar("D", True)):
- for pkg in d.getVar('SYSTEMD_PACKAGES', True).split():
+ if os.path.exists(d.getVar("D")):
+ for pkg in d.getVar('SYSTEMD_PACKAGES').split():
systemd_check_package(pkg)
- if d.getVar('SYSTEMD_SERVICE_' + pkg, True):
+ if d.getVar('SYSTEMD_SERVICE_' + pkg):
systemd_generate_package_scripts(pkg)
+ action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg)
+ if action in ("enable", "disable"):
+ systemd_create_presets(pkg, action)
+ elif action not in ("mask", "preset"):
+ bb.fatal("SYSTEMD_AUTO_ENABLE_%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
systemd_check_services()
}
@@ -182,26 +203,30 @@ PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
python rm_systemd_unitdir (){
import shutil
if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
- systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True))
+ systemd_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_unitdir'))
if os.path.exists(systemd_unitdir):
shutil.rmtree(systemd_unitdir)
systemd_libdir = os.path.dirname(systemd_unitdir)
if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
os.rmdir(systemd_libdir)
}
-do_install[postfuncs] += "rm_systemd_unitdir "
python rm_sysvinit_initddir (){
import shutil
- sysv_initddir = oe.path.join(d.getVar("D", True), (d.getVar('INIT_D_DIR', True) or "/etc/init.d"))
+ sysv_initddir = oe.path.join(d.getVar("D"), (d.getVar('INIT_D_DIR') or "/etc/init.d"))
if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \
not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
os.path.exists(sysv_initddir):
- systemd_system_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_system_unitdir', True))
+ systemd_system_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_system_unitdir'))
# If systemd_system_unitdir contains anything, delete sysv_initddir
if (os.path.exists(systemd_system_unitdir) and os.listdir(systemd_system_unitdir)):
shutil.rmtree(sysv_initddir)
}
-do_install[postfuncs] += "rm_sysvinit_initddir "
+
+do_install[postfuncs] += "${RMINITDIR} "
+RMINITDIR_class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR_class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR = ""
+
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
index a94f755a40..6059ae95e0 100644
--- a/meta/classes/terminal.bbclass
+++ b/meta/classes/terminal.bbclass
@@ -3,7 +3,7 @@ OE_TERMINAL[type] = 'choice'
OE_TERMINAL[choices] = 'auto none \
${@oe_terminal_prioritized()}'
-OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE'
+OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE CACHED_CONFIGUREVARS CONFIGUREOPTS EXTRA_OECONF'
OE_TERMINAL_EXPORTS[type] = 'list'
XAUTHORITY ?= "${HOME}/.Xauthority"
@@ -14,18 +14,19 @@ def oe_terminal_prioritized():
return " ".join(o.name for o in oe.terminal.prioritized())
def emit_terminal_func(command, envdata, d):
+ import bb.build
cmd_func = 'do_terminal'
envdata.setVar(cmd_func, 'exec ' + command)
envdata.setVarFlag(cmd_func, 'func', '1')
- runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
+ runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}"
runfile = runfmt.format(func=cmd_func, task=cmd_func, taskfunc=cmd_func, pid=os.getpid())
- runfile = os.path.join(d.getVar('T', True), runfile)
+ runfile = os.path.join(d.getVar('T'), runfile)
bb.utils.mkdirhier(os.path.dirname(runfile))
with open(runfile, 'w') as script:
- script.write('#!/bin/sh -e\n')
+ script.write(bb.build.shell_trap_code())
bb.data.emit_func(cmd_func, script, envdata)
script.write(cmd_func)
script.write("\n")
@@ -44,7 +45,7 @@ def oe_terminal(command, title, d):
envdata.setVarFlag(v, 'export', '1')
for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d):
- value = d.getVar(export, True)
+ value = d.getVar(export)
if value is not None:
os.environ[export] = str(value)
envdata.setVar(export, str(value))
@@ -60,12 +61,17 @@ def oe_terminal(command, title, d):
for key in origbbenv:
if key in envdata:
continue
- value = origbbenv.getVar(key, True)
+ value = origbbenv.getVar(key)
if value is not None:
os.environ[key] = str(value)
envdata.setVar(key, str(value))
envdata.setVarFlag(key, 'export', '1')
+ # Use original PATH as a fallback
+ path = d.getVar('PATH') + ":" + origbbenv.getVar('PATH')
+ os.environ['PATH'] = path
+ envdata.setVar('PATH', path)
+
# A complex PS1 might need more escaping of chars.
# Lets not export PS1 instead.
envdata.delVar("PS1")
@@ -88,8 +94,12 @@ def oe_terminal(command, title, d):
try:
oe.terminal.spawn_preferred(command, title, None, d)
- except oe.terminal.NoSupportedTerminals:
- bb.fatal('No valid terminal found, unable to open devshell')
+ except oe.terminal.NoSupportedTerminals as nosup:
+ nosup.terms.remove("false")
+ cmds = '\n\t'.join(nosup.terms).replace("{command}",
+ "do_terminal").replace("{title}", title)
+ bb.fatal('No valid terminal found, unable to open devshell.\n' +
+ 'Tried the following commands:\n\t%s' % cmds)
except oe.terminal.ExecutionError as exc:
bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
diff --git a/meta/classes/testexport.bbclass b/meta/classes/testexport.bbclass
index 5147020820..59cbaefbf9 100644
--- a/meta/classes/testexport.bbclass
+++ b/meta/classes/testexport.bbclass
@@ -33,162 +33,141 @@ TEST_EXPORT_DEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-nativ
TEST_EXPORT_DEPENDS += "${@bb.utils.contains('TEST_EXPORT_SDK_ENABLED', '1', 'testexport-tarball:do_populate_sdk', '', d)}"
TEST_EXPORT_LOCK = "${TMPDIR}/testimage.lock"
-python do_testexport() {
- testexport_main(d)
-}
-
addtask testexport
do_testexport[nostamp] = "1"
do_testexport[depends] += "${TEST_EXPORT_DEPENDS} ${TESTIMAGEDEPENDS}"
do_testexport[lockfiles] += "${TEST_EXPORT_LOCK}"
-def exportTests(d,tc):
+python do_testexport() {
+ testexport_main(d)
+}
+
+def testexport_main(d):
import json
+ import logging
+
+ from oeqa.runtime.context import OERuntimeTestContext
+ from oeqa.runtime.context import OERuntimeTestContextExecutor
+
+ image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
+ d.getVar('IMAGE_LINK_NAME')))
+
+ tdname = "%s.testdata.json" % image_name
+ td = json.load(open(tdname, "r"))
+
+ logger = logging.getLogger("BitBake")
+
+ target = OERuntimeTestContextExecutor.getTarget(
+ d.getVar("TEST_TARGET"), None, d.getVar("TEST_TARGET_IP"),
+ d.getVar("TEST_SERVER_IP"))
+
+ host_dumper = OERuntimeTestContextExecutor.getHostDumper(
+ d.getVar("testimage_dump_host"), d.getVar("TESTIMAGE_DUMP_DIR"))
+
+ image_manifest = "%s.manifest" % image_name
+ image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
+
+ extract_dir = d.getVar("TEST_EXTRACTED_DIR")
+
+ tc = OERuntimeTestContext(td, logger, target, host_dumper,
+ image_packages, extract_dir)
+
+ copy_needed_files(d, tc)
+
+def copy_needed_files(d, tc):
import shutil
- import pkgutil
- import re
import oe.path
- exportpath = d.getVar("TEST_EXPORT_DIR", True)
-
- savedata = {}
- savedata["d"] = {}
- savedata["target"] = {}
- savedata["target"]["ip"] = tc.target.ip or d.getVar("TEST_TARGET_IP", True)
- savedata["target"]["server_ip"] = tc.target.server_ip or d.getVar("TEST_SERVER_IP", True)
-
- keys = [ key for key in d.keys() if not key.startswith("_") and not key.startswith("BB") \
- and not key.startswith("B_pn") and not key.startswith("do_") and not d.getVarFlag(key, "func", True)]
- for key in keys:
- try:
- savedata["d"][key] = d.getVar(key, True)
- except bb.data_smart.ExpansionError:
- # we don't care about those anyway
- pass
-
- json_file = os.path.join(exportpath, "testdata.json")
- with open(json_file, "w") as f:
- json.dump(savedata, f, skipkeys=True, indent=4, sort_keys=True)
-
- # Replace absolute path with relative in the file
- exclude_path = os.path.join(d.getVar("COREBASE", True),'meta','lib','oeqa')
- f1 = open(json_file,'r').read()
- f2 = open(json_file,'w')
- m = f1.replace(exclude_path,'oeqa')
- f2.write(m)
- f2.close()
-
- # now start copying files
- # we'll basically copy everything under meta/lib/oeqa, with these exceptions
- # - oeqa/targetcontrol.py - not needed
- # - oeqa/selftest - something else
- # That means:
- # - all tests from oeqa/runtime defined in TEST_SUITES (including from other layers)
- # - the contents of oeqa/utils and oeqa/runtime/files
- # - oeqa/oetest.py and oeqa/runexport.py (this will get copied to exportpath not exportpath/oeqa)
- # - __init__.py files
- bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/runtime/files"))
- bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/utils"))
- # copy test modules, this should cover tests in other layers too
- bbpath = d.getVar("BBPATH", True).split(':')
- for t in tc.testslist:
- isfolder = False
- if re.search("\w+\.\w+\.test_\S+", t):
- t = '.'.join(t.split('.')[:3])
- mod = pkgutil.get_loader(t)
- # More depth than usual?
- if (t.count('.') > 2):
- for p in bbpath:
- foldername = os.path.join(p, 'lib', os.sep.join(t.split('.')).rsplit(os.sep, 1)[0])
- if os.path.isdir(foldername):
- isfolder = True
- target_folder = os.path.join(exportpath, "oeqa", "runtime", os.path.basename(foldername))
- if not os.path.exists(target_folder):
- oe.path.copytree(foldername, target_folder)
- if not isfolder:
- shutil.copy2(mod.path, os.path.join(exportpath, "oeqa/runtime"))
- json_file = "%s.json" % mod.path.rsplit(".", 1)[0]
- if os.path.isfile(json_file):
- shutil.copy2(json_file, os.path.join(exportpath, "oeqa/runtime"))
- # Get meta layer
- for layer in d.getVar("BBLAYERS", True).split():
- if os.path.basename(layer) == "meta":
- meta_layer = layer
- break
- # copy oeqa/oetest.py and oeqa/runexported.py
- oeqadir = os.path.join(meta_layer, "lib/oeqa")
- shutil.copy2(os.path.join(oeqadir, "oetest.py"), os.path.join(exportpath, "oeqa"))
- shutil.copy2(os.path.join(oeqadir, "runexported.py"), exportpath)
- # copy oeqa/utils/*.py
- for root, dirs, files in os.walk(os.path.join(oeqadir, "utils")):
- for f in files:
- if f.endswith(".py"):
- shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/utils"))
- # copy oeqa/runtime/files/*
- for root, dirs, files in os.walk(os.path.join(oeqadir, "runtime/files")):
- for f in files:
- shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/runtime/files"))
+ from oeqa.utils.package_manager import _get_json_file
+ from oeqa.core.utils.test import getSuiteCasesFiles
+
+ export_path = d.getVar('TEST_EXPORT_DIR')
+ corebase_path = d.getVar('COREBASE')
+
+ # Clean everything before starting
+ oe.path.remove(export_path)
+ bb.utils.mkdirhier(os.path.join(export_path, 'lib', 'oeqa'))
+
+ # The source of files to copy are relative to 'COREBASE' directory
+ # The destination is relative to 'TEST_EXPORT_DIR'
+ # Because we are squashing the libraries, we need to remove
+ # the layer/script directory
+ files_to_copy = [ os.path.join('meta', 'lib', 'oeqa', 'core'),
+ os.path.join('meta', 'lib', 'oeqa', 'runtime'),
+ os.path.join('meta', 'lib', 'oeqa', 'files'),
+ os.path.join('meta', 'lib', 'oeqa', 'utils'),
+ os.path.join('scripts', 'oe-test'),
+ os.path.join('scripts', 'lib', 'argparse_oe.py'),
+ os.path.join('scripts', 'lib', 'scriptutils.py'), ]
+
+ for f in files_to_copy:
+ src = os.path.join(corebase_path, f)
+ dst = os.path.join(export_path, f.split('/', 1)[-1])
+ if os.path.isdir(src):
+ oe.path.copytree(src, dst)
+ else:
+ shutil.copy2(src, dst)
+
+ # Remove cases and just copy the ones specified
+ cases_path = os.path.join(export_path, 'lib', 'oeqa', 'runtime', 'cases')
+ oe.path.remove(cases_path)
+ bb.utils.mkdirhier(cases_path)
+ test_paths = get_runtime_paths(d)
+ test_modules = d.getVar('TEST_SUITES').split()
+ tc.loadTests(test_paths, modules=test_modules)
+ for f in getSuiteCasesFiles(tc.suites):
+ shutil.copy2(f, cases_path)
+ json_file = _get_json_file(f)
+ if json_file:
+ shutil.copy2(json_file, cases_path)
+
+ # Copy test data
+ image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
+ d.getVar('IMAGE_LINK_NAME')))
+ image_manifest = "%s.manifest" % image_name
+ tdname = "%s.testdata.json" % image_name
+ test_data_path = os.path.join(export_path, 'data')
+ bb.utils.mkdirhier(test_data_path)
+ shutil.copy2(image_manifest, os.path.join(test_data_path, 'manifest'))
+ shutil.copy2(tdname, os.path.join(test_data_path, 'testdata.json'))
+
+ for subdir, dirs, files in os.walk(export_path):
+ for dir in dirs:
+ if dir == '__pycache__':
+ shutil.rmtree(os.path.join(subdir, dir))
# Create tar file for common parts of testexport
- create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR", True))
+ create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
# Copy packages needed for runtime testing
- test_pkg_dir = d.getVar("TEST_NEEDED_PACKAGES_DIR", True)
- if os.listdir(test_pkg_dir):
- export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR", True), "packages")
+ package_extraction(d, tc.suites)
+ test_pkg_dir = d.getVar("TEST_NEEDED_PACKAGES_DIR")
+ if os.path.isdir(test_pkg_dir) and os.listdir(test_pkg_dir):
+ export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "packages")
oe.path.copytree(test_pkg_dir, export_pkg_dir)
# Create tar file for packages needed by the DUT
- create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE", True), export_pkg_dir)
+ create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
# Copy SDK
- if d.getVar("TEST_EXPORT_SDK_ENABLED", True) == "1":
- sdk_deploy = d.getVar("SDK_DEPLOY", True)
- tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME", True)
+ if d.getVar("TEST_EXPORT_SDK_ENABLED") == "1":
+ sdk_deploy = d.getVar("SDK_DEPLOY")
+ tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME")
tarball_path = os.path.join(sdk_deploy, tarball_name)
- export_sdk_dir = os.path.join(d.getVar("TEST_EXPORT_DIR", True),
- d.getVar("TEST_EXPORT_SDK_DIR", True))
+ export_sdk_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"),
+ d.getVar("TEST_EXPORT_SDK_DIR"))
bb.utils.mkdirhier(export_sdk_dir)
shutil.copy2(tarball_path, export_sdk_dir)
# Create tar file for the sdk
- create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH", True), export_sdk_dir)
-
- bb.plain("Exported tests to: %s" % exportpath)
+ create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
-def testexport_main(d):
- from oeqa.oetest import ExportTestContext
- from oeqa.targetcontrol import get_target_controller
- from oeqa.utils.dump import get_host_dumper
-
- test_create_extract_dirs(d)
- export_dir = d.getVar("TEST_EXPORT_DIR", True)
- bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
- bb.utils.remove(export_dir, recurse=True)
- bb.utils.mkdirhier(export_dir)
-
- # the robot dance
- target = get_target_controller(d)
-
- # test context
- tc = ExportTestContext(d, target)
-
- # this is a dummy load of tests
- # we are doing that to find compile errors in the tests themselves
- # before booting the image
- try:
- tc.loadTests()
- except Exception as e:
- import traceback
- bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
-
- tc.extract_packages()
- exportTests(d,tc)
+ bb.plain("Exported tests to: %s" % export_path)
def create_tarball(d, tar_name, src_dir):
import tarfile
- tar_path = os.path.join(d.getVar("TEST_EXPORT_DIR", True), tar_name)
+ tar_path = os.path.join(d.getVar("TEST_EXPORT_DIR"), tar_name)
current_dir = os.getcwd()
src_dir = src_dir.rstrip('/')
dir_name = os.path.dirname(src_dir)
@@ -200,7 +179,4 @@ def create_tarball(d, tar_name, src_dir):
tar.close()
os.chdir(current_dir)
-
-testexport_main[vardepsexclude] =+ "BB_ORIGENV"
-
inherit testimage
diff --git a/meta/classes/testimage-auto.bbclass b/meta/classes/testimage-auto.bbclass
deleted file mode 100644
index e0a22b773c..0000000000
--- a/meta/classes/testimage-auto.bbclass
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (C) 2013 Intel Corporation
-#
-# Released under the MIT license (see COPYING.MIT)
-
-
-# Run tests automatically on an image after the image is constructed
-# (as opposed to testimage.bbclass alone where tests must be called
-# manually using bitbake -c testimage <image>).
-#
-# NOTE: to use this class, simply set TEST_IMAGE = "1" - no need to
-# inherit it since that will be done in image.bbclass when this variable
-# has been set.
-#
-# See testimage.bbclass for the test implementation.
-
-inherit testimage
-
-python do_testimage_auto() {
- testimage_main(d)
-}
-addtask testimage_auto before do_build after do_image_complete
-do_testimage_auto[depends] += "${TESTIMAGEDEPENDS}"
-do_testimage_auto[lockfiles] += "${TESTIMAGELOCK}"
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index a908f92fee..844ed87944 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -2,18 +2,23 @@
#
# Released under the MIT license (see COPYING.MIT)
-
+inherit metadata_scm
# testimage.bbclass enables testing of qemu images using python unittests.
# Most of the tests are commands run on target image over ssh.
# To use it add testimage to global inherit and call your target image with -c testimage
# You can try it out like this:
-# - first build a qemu core-image-sato
-# - add IMAGE_CLASSES += "testimage" in local.conf
+# - first add IMAGE_CLASSES += "testimage" in local.conf
+# - build a qemu core-image-sato
# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
+#
+# The tests can be run automatically each time an image is built if you set
+# TESTIMAGE_AUTO = "1"
+
+TESTIMAGE_AUTO ??= "0"
# You can set (or append to) TEST_SUITES in local.conf to select the tests
# which you want to run for your target.
-# The test names are the module names in meta/lib/oeqa/runtime.
+# The test names are the module names in meta/lib/oeqa/runtime/cases.
# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed)
# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own).
# Note that order in TEST_SUITES is relevant: tests are run in an order such that
@@ -26,6 +31,8 @@
# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login.
# Booting is handled by this class, and it's not a test in itself.
# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
+# TEST_QEMUPARAMS can be used to pass extra parameters to qemu, e.g. "-m 1024" for setting the amount of ram to 1 GB.
+# TEST_RUNQEMUPARAMS can be used to pass extra parameters to runqemu, e.g. "gl" to enable OpenGL acceleration.
TEST_LOG_DIR ?= "${WORKDIR}/testimage"
@@ -35,48 +42,46 @@ TEST_NEEDED_PACKAGES_DIR ?= "${WORKDIR}/testimage/packages"
TEST_EXTRACTED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/extracted"
TEST_PACKAGED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/packaged"
-RPMTESTSUITE = "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'smart rpm', '', d)}"
-MINTESTSUITE = "ping"
-NETTESTSUITE = "${MINTESTSUITE} ssh df date scp syslog"
-DEVTESTSUITE = "gcc kernelmodule ldd"
-
-DEFAULT_TEST_SUITES = "${MINTESTSUITE} auto"
-DEFAULT_TEST_SUITES_pn-core-image-minimal = "${MINTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-minimal-dev = "${MINTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-full-cmdline = "${NETTESTSUITE} perl python logrotate"
-DEFAULT_TEST_SUITES_pn-core-image-x11 = "${MINTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-lsb = "${NETTESTSUITE} pam parselogs ${RPMTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-sato = "${NETTESTSUITE} connman xorg parselogs ${RPMTESTSUITE} \
- ${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python', '', d)}"
-DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "${NETTESTSUITE} connman xorg perl python \
- ${DEVTESTSUITE} parselogs ${RPMTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-lsb-dev = "${NETTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcvs buildiptables buildgalculator \
- connman ${DEVTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto"
+BASICTESTSUITE = "\
+ ping date df ssh scp python perl gi ptest parselogs \
+ logrotate connman systemd oe_syslog pam stap ldd xorg \
+ kernelmodule gcc buildcpio buildlzip buildgalculator \
+ dnf rpm opkg apt"
+
+DEFAULT_TEST_SUITES = "${BASICTESTSUITE}"
# aarch64 has no graphics
DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
+# musl doesn't support systemtap
+DEFAULT_TEST_SUITES_remove_libc-musl = "stap"
+
+# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
+# mitigate this by removing build tests for qemumips machines.
+MIPSREMOVE ??= "buildcpio buildlzip buildgalculator"
+DEFAULT_TEST_SUITES_remove_qemumips = "${MIPSREMOVE}"
+DEFAULT_TEST_SUITES_remove_qemumips64 = "${MIPSREMOVE}"
TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
TEST_QEMUBOOT_TIMEOUT ?= "1000"
TEST_TARGET ?= "qemu"
+TEST_QEMUPARAMS ?= ""
+TEST_RUNQEMUPARAMS ?= ""
TESTIMAGEDEPENDS = ""
-TESTIMAGEDEPENDS_qemuall = "qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot"
+TESTIMAGEDEPENDS_append_qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS_qemuall += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS_qemuall += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python-smartpm-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot', '', d)}"
-
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot package-index:do_package_index', '', d)}"
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot package-index:do_package_index', '', d)}"
TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
TESTIMAGELOCK_qemuall = ""
-TESTIMAGE_DUMP_DIR ?= "/tmp/oe-saved-tests/"
+TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
+
+TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
testimage_dump_target () {
top -bn1
@@ -114,72 +119,312 @@ do_testimage[nostamp] = "1"
do_testimage[depends] += "${TESTIMAGEDEPENDS}"
do_testimage[lockfiles] += "${TESTIMAGELOCK}"
+def testimage_sanity(d):
+ if (d.getVar('TEST_TARGET') == 'simpleremote'
+ and (not d.getVar('TEST_TARGET_IP')
+ or not d.getVar('TEST_SERVER_IP'))):
+ bb.fatal('When TEST_TARGET is set to "simpleremote" '
+ 'TEST_TARGET_IP and TEST_SERVER_IP are needed too.')
+
+def get_testimage_configuration(d, test_type, machine):
+ import platform
+ from oeqa.utils.metadata import get_layers
+ configuration = {'TEST_TYPE': test_type,
+ 'MACHINE': machine,
+ 'DISTRO': d.getVar("DISTRO"),
+ 'IMAGE_BASENAME': d.getVar("IMAGE_BASENAME"),
+ 'IMAGE_PKGTYPE': d.getVar("IMAGE_PKGTYPE"),
+ 'STARTTIME': d.getVar("DATETIME"),
+ 'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
+ 'LAYERS': get_layers(d.getVar("BBLAYERS"))}
+ return configuration
+get_testimage_configuration[vardepsexclude] = "DATETIME"
+
+def get_testimage_json_result_dir(d):
+ json_result_dir = os.path.join(d.getVar("LOG_DIR"), 'oeqa')
+ custom_json_result_dir = d.getVar("OEQA_JSON_RESULT_DIR")
+ if custom_json_result_dir:
+ json_result_dir = custom_json_result_dir
+ return json_result_dir
+
+def get_testimage_result_id(configuration):
+ return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['MACHINE'], configuration['STARTTIME'])
+
def testimage_main(d):
- import unittest
import os
- import oeqa.runtime
- import time
+ import json
import signal
- from oeqa.oetest import ImageTestContext
- from oeqa.targetcontrol import get_target_controller
- from oeqa.utils.dump import get_host_dumper
+ import logging
+
+ from bb.utils import export_proxies
+ from oeqa.core.utils.misc import updateTestData
+ from oeqa.runtime.context import OERuntimeTestContext
+ from oeqa.runtime.context import OERuntimeTestContextExecutor
+ from oeqa.core.target.qemu import supported_fstypes
+ from oeqa.core.utils.test import getSuiteCases
+ from oeqa.utils import make_logger_bitbake_compatible
+
+ def sigterm_exception(signum, stackframe):
+ """
+ Catch SIGTERM from worker in order to stop qemu.
+ """
+ raise RuntimeError
+
+ testimage_sanity(d)
+
+ if (d.getVar('IMAGE_PKGTYPE') == 'rpm'
+ and ('dnf' in d.getVar('TEST_SUITES') or 'auto' in d.getVar('TEST_SUITES'))):
+ create_rpm_index(d)
+
+ logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
+ pn = d.getVar("PN")
+
+ bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR"))
+
+ image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
+ d.getVar('IMAGE_LINK_NAME')))
+
+ tdname = "%s.testdata.json" % image_name
+ try:
+ td = json.load(open(tdname, "r"))
+ except (FileNotFoundError) as err:
+ bb.fatal('File %s Not Found. Have you built the image with INHERIT+="testimage" in the conf/local.conf?' % tdname)
+
+ # Some variables need to be updates (mostly paths) with the
+ # ones of the current environment because some tests require them.
+ updateTestData(d, td, d.getVar('TESTIMAGE_UPDATE_VARS').split())
+
+ image_manifest = "%s.manifest" % image_name
+ image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
+
+ extract_dir = d.getVar("TEST_EXTRACTED_DIR")
+
+ # Get machine
+ machine = d.getVar("MACHINE")
+
+ # Get rootfs
+ fstypes = d.getVar('IMAGE_FSTYPES').split()
+ if d.getVar("TEST_TARGET") == "qemu":
+ fstypes = [fs for fs in fstypes if fs in supported_fstypes]
+ if not fstypes:
+ bb.fatal('Unsupported image type built. Add a comptible image to '
+ 'IMAGE_FSTYPES. Supported types: %s' %
+ ', '.join(supported_fstypes))
+ qfstype = fstypes[0]
+ qdeffstype = d.getVar("QB_DEFAULT_FSTYPE")
+ if qdeffstype:
+ qfstype = qdeffstype
+ rootfs = '%s.%s' % (image_name, qfstype)
+
+ # Get tmpdir (not really used, just for compatibility)
+ tmpdir = d.getVar("TMPDIR")
+
+ # Get deploy_dir_image (not really used, just for compatibility)
+ dir_image = d.getVar("DEPLOY_DIR_IMAGE")
+
+ # Get bootlog
+ bootlog = os.path.join(d.getVar("TEST_LOG_DIR"),
+ 'qemu_boot_log.%s' % d.getVar('DATETIME'))
+
+ # Get display
+ display = d.getVar("BB_ORIGENV").getVar("DISPLAY")
+
+ # Get kernel
+ kernel_name = ('%s-%s.bin' % (d.getVar("KERNEL_IMAGETYPE"), machine))
+ kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), kernel_name)
- pn = d.getVar("PN", True)
- bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
- test_create_extract_dirs(d)
+ # Get boottime
+ boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT"))
+
+ # Get use_kvm
+ kvm = oe.types.qemu_use_kvm(d.getVar('QEMU_USE_KVM'), d.getVar('TARGET_ARCH'))
+
+ slirp = False
+ if d.getVar("QEMU_USE_SLIRP"):
+ slirp = True
+
+ # TODO: We use the current implementatin of qemu runner because of
+ # time constrains, qemu runner really needs a refactor too.
+ target_kwargs = { 'machine' : machine,
+ 'rootfs' : rootfs,
+ 'tmpdir' : tmpdir,
+ 'dir_image' : dir_image,
+ 'display' : display,
+ 'kernel' : kernel,
+ 'boottime' : boottime,
+ 'bootlog' : bootlog,
+ 'kvm' : kvm,
+ 'slirp' : slirp,
+ 'dump_dir' : d.getVar("TESTIMAGE_DUMP_DIR"),
+ }
+
+ # TODO: Currently BBPATH is needed for custom loading of targets.
+ # It would be better to find these modules using instrospection.
+ target_kwargs['target_modules_path'] = d.getVar('BBPATH')
+
+ # hardware controlled targets might need further access
+ target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None
+ target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
+ target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
+ target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
+
+ def export_ssh_agent(d):
+ import os
+
+ variables = ['SSH_AGENT_PID', 'SSH_AUTH_SOCK']
+ for v in variables:
+ if v not in os.environ.keys():
+ val = d.getVar(v)
+ if val is not None:
+ os.environ[v] = val
+
+ export_ssh_agent(d)
+
+ # runtime use network for download projects for build
+ export_proxies(d)
# we need the host dumper in test context
- host_dumper = get_host_dumper(d)
+ host_dumper = OERuntimeTestContextExecutor.getHostDumper(
+ d.getVar("testimage_dump_host"),
+ d.getVar("TESTIMAGE_DUMP_DIR"))
# the robot dance
- target = get_target_controller(d)
+ target = OERuntimeTestContextExecutor.getTarget(
+ d.getVar("TEST_TARGET"), logger, d.getVar("TEST_TARGET_IP"),
+ d.getVar("TEST_SERVER_IP"), **target_kwargs)
# test context
- tc = ImageTestContext(d, target, host_dumper)
+ tc = OERuntimeTestContext(td, logger, target, host_dumper,
+ image_packages, extract_dir)
- # this is a dummy load of tests
- # we are doing that to find compile errors in the tests themselves
- # before booting the image
- try:
- tc.loadTests()
- except Exception as e:
- import traceback
- bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+ # Load tests before starting the target
+ test_paths = get_runtime_paths(d)
+ test_modules = d.getVar('TEST_SUITES').split()
+ if not test_modules:
+ bb.fatal('Empty test suite, please verify TEST_SUITES variable')
+
+ tc.loadTests(test_paths, modules=test_modules)
+
+ suitecases = getSuiteCases(tc.suites)
+ if not suitecases:
+ bb.fatal('Empty test suite, please verify TEST_SUITES variable')
+ else:
+ bb.debug(2, 'test suites:\n\t%s' % '\n\t'.join([str(c) for c in suitecases]))
- tc.extract_packages()
- target.deploy()
+ package_extraction(d, tc.suites)
+
+ results = None
+ orig_sigterm_handler = signal.signal(signal.SIGTERM, sigterm_exception)
try:
- bootparams = None
- if d.getVar('VIRTUAL-RUNTIME_init_manager', '') == 'systemd':
- bootparams = 'systemd.log_level=debug systemd.log_target=console'
- target.start(extra_bootparams=bootparams)
- starttime = time.time()
- result = tc.runTests()
- stoptime = time.time()
- if result.wasSuccessful():
- bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
- msg = "%s - OK - All required tests passed" % pn
- skipped = len(result.skipped)
- if skipped:
- msg += " (skipped=%d)" % skipped
- bb.plain(msg)
+ # We need to check if runqemu ends unexpectedly
+ # or if the worker send us a SIGTERM
+ tc.target.start(params=d.getVar("TEST_QEMUPARAMS"), runqemuparams=d.getVar("TEST_RUNQEMUPARAMS"))
+ results = tc.runTests()
+ except (RuntimeError, BlockingIOError) as err:
+ if isinstance(err, RuntimeError):
+ bb.error('testimage received SIGTERM, shutting down...')
else:
- raise bb.build.FuncFailed("%s - FAILED - check the task log and the ssh log" % pn )
+ bb.error('runqemu failed, shutting down...')
+ if results:
+ results.stop()
+ results = None
finally:
- signal.signal(signal.SIGTERM, tc.origsigtermhandler)
- target.stop()
+ signal.signal(signal.SIGTERM, orig_sigterm_handler)
+ tc.target.stop()
+
+ # Show results (if we have them)
+ if not results:
+ bb.fatal('%s - FAILED - tests were interrupted during execution' % pn, forcelog=True)
+ configuration = get_testimage_configuration(d, 'runtime', machine)
+ results.logDetails(get_testimage_json_result_dir(d),
+ configuration,
+ get_testimage_result_id(configuration),
+ dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
+ results.logSummary(pn)
+ if not results.wasSuccessful():
+ bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True)
+
+def get_runtime_paths(d):
+ """
+ Returns a list of paths where runtime test must reside.
-def test_create_extract_dirs(d):
- install_path = d.getVar("TEST_INSTALL_TMP_DIR", True)
- package_path = d.getVar("TEST_PACKAGED_DIR", True)
- extracted_path = d.getVar("TEST_EXTRACTED_DIR", True)
- bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
- bb.utils.remove(package_path, recurse=True)
- bb.utils.mkdirhier(install_path)
- bb.utils.mkdirhier(package_path)
- bb.utils.mkdirhier(extracted_path)
+ Runtime tests are expected in <LAYER_DIR>/lib/oeqa/runtime/cases/
+ """
+ paths = []
+ for layer in d.getVar('BBLAYERS').split():
+ path = os.path.join(layer, 'lib/oeqa/runtime/cases')
+ if os.path.isdir(path):
+ paths.append(path)
+ return paths
-testimage_main[vardepsexclude] =+ "BB_ORIGENV"
+def create_index(arg):
+ import subprocess
+
+ index_cmd = arg
+ try:
+ bb.note("Executing '%s' ..." % index_cmd)
+ result = subprocess.check_output(index_cmd,
+ stderr=subprocess.STDOUT,
+ shell=True)
+ result = result.decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ return("Index creation command '%s' failed with return code "
+ '%d:\n%s' % (e.cmd, e.returncode, e.output.decode("utf-8")))
+ if result:
+ bb.note(result)
+ return None
+
+def create_rpm_index(d):
+ import glob
+ # Index RPMs
+ rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo_c")
+ index_cmds = []
+ archs = (d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or '').replace('-', '_')
+
+ for arch in archs.split():
+ rpm_dir = os.path.join(d.getVar('DEPLOY_DIR_RPM'), arch)
+ idx_path = os.path.join(d.getVar('WORKDIR'), 'oe-testimage-repo', arch)
+
+ if not os.path.isdir(rpm_dir):
+ continue
+
+ lockfilename = os.path.join(d.getVar('DEPLOY_DIR_RPM'), 'rpm.lock')
+ lf = bb.utils.lockfile(lockfilename, False)
+ oe.path.copyhardlinktree(rpm_dir, idx_path)
+ # Full indexes overload a 256MB image so reduce the number of rpms
+ # in the feed by filtering to specific packages needed by the tests.
+ package_list = glob.glob(idx_path + "*/*.rpm")
+
+ for pkg in package_list:
+ if not os.path.basename(pkg).startswith(("rpm", "run-postinsts", "busybox", "bash", "update-alternatives", "libc6", "curl", "musl")):
+ bb.utils.remove(pkg)
+
+ bb.utils.unlockfile(lf)
+ cmd = '%s --update -q %s' % (rpm_createrepo, idx_path)
+
+ # Create repodata
+ result = create_index(cmd)
+ if result:
+ bb.fatal('%s' % ('\n'.join(result)))
+
+def package_extraction(d, test_suites):
+ from oeqa.utils.package_manager import find_packages_to_extract
+ from oeqa.utils.package_manager import extract_packages
+
+ bb.utils.remove(d.getVar("TEST_NEEDED_PACKAGES_DIR"), recurse=True)
+ packages = find_packages_to_extract(test_suites)
+ if packages:
+ bb.utils.mkdirhier(d.getVar("TEST_INSTALL_TMP_DIR"))
+ bb.utils.mkdirhier(d.getVar("TEST_PACKAGED_DIR"))
+ bb.utils.mkdirhier(d.getVar("TEST_EXTRACTED_DIR"))
+ extract_packages(d, packages)
+
+testimage_main[vardepsexclude] += "BB_ORIGENV DATETIME"
+
+python () {
+ if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
+ bb.build.addtask("testimage", "do_build", "do_image_complete", d)
+}
inherit testsdk
diff --git a/meta/classes/testsdk.bbclass b/meta/classes/testsdk.bbclass
index 0b8716edb5..758a23ac55 100644
--- a/meta/classes/testsdk.bbclass
+++ b/meta/classes/testsdk.bbclass
@@ -4,147 +4,47 @@
# testsdk.bbclass enables testing for SDK and Extensible SDK
#
-# For run SDK tests you need to do,
-# - bitbake core-image-sato -c populate_sdk
-# - bitbake core-image-sato -c testsdk
+# To run SDK tests, run the commands:
+# $ bitbake <image-name> -c populate_sdk
+# $ bitbake <image-name> -c testsdk
#
-# For run eSDK tests you need to do,
-# - bitbake core-image-sato -c populate_sdk_ext
-# - bitbake core-image-sato -c testsdkext
-
-TEST_LOG_DIR ?= "${WORKDIR}/testimage"
-TESTSDKLOCK = "${TMPDIR}/testsdk.lock"
-
-def run_test_context(CTestContext, d, testdir, tcname, pn, *args):
- import glob
- import time
-
- targets = glob.glob(d.expand(testdir + "/tc/environment-setup-*"))
- for sdkenv in targets:
- bb.plain("Testing %s" % sdkenv)
- tc = CTestContext(d, testdir, sdkenv, tcname, args)
-
- # this is a dummy load of tests
- # we are doing that to find compile errors in the tests themselves
- # before booting the image
- try:
- tc.loadTests()
- except Exception as e:
- import traceback
- bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
-
- starttime = time.time()
- result = tc.runTests()
- stoptime = time.time()
- if result.wasSuccessful():
- bb.plain("%s SDK(%s):%s - Ran %d test%s in %.3fs" % (pn, os.path.basename(tcname), os.path.basename(sdkenv),result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
- msg = "%s - OK - All required tests passed" % pn
- skipped = len(result.skipped)
- if skipped:
- msg += " (skipped=%d)" % skipped
- bb.plain(msg)
- else:
- raise bb.build.FuncFailed("%s - FAILED - check the task log and the commands log" % pn )
-
-def testsdk_main(d):
- import os
- import oeqa.sdk
- import subprocess
- from oeqa.oetest import SDKTestContext
-
- pn = d.getVar("PN", True)
- bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
+# To run eSDK tests, run the commands:
+# $ bitbake <image-name> -c populate_sdk_ext
+# $ bitbake <image-name> -c testsdkext
+#
+# where "<image-name>" is an image like core-image-sato.
- tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh")
- if not os.path.exists(tcname):
- bb.fatal("The toolchain is not built. Build it before running the tests: 'bitbake <image> -c populate_sdk' .")
+TESTSDK_CLASS_NAME ?= "oeqa.sdk.testsdk.TestSDK"
+TESTSDKEXT_CLASS_NAME ?= "oeqa.sdkext.testsdk.TestSDKExt"
- sdktestdir = d.expand("${WORKDIR}/testimage-sdk/")
- bb.utils.remove(sdktestdir, True)
- bb.utils.mkdirhier(sdktestdir)
- try:
- subprocess.check_output("cd %s; %s <<EOF\n./tc\nY\nEOF" % (sdktestdir, tcname), shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Couldn't install the SDK:\n%s" % e.output.decode("utf-8"))
+def import_and_run(name, d):
+ import importlib
- try:
- run_test_context(SDKTestContext, d, sdktestdir, tcname, pn)
- finally:
- bb.utils.remove(sdktestdir, True)
+ class_name = d.getVar(name)
+ if class_name:
+ module, cls = class_name.rsplit('.', 1)
+ m = importlib.import_module(module)
+ c = getattr(m, cls)()
+ c.run(d)
+ else:
+ bb.warn('No tests were run because %s did not define a class' % name)
-testsdk_main[vardepsexclude] =+ "BB_ORIGENV"
+import_and_run[vardepsexclude] = "DATETIME BB_ORIGENV"
python do_testsdk() {
- testsdk_main(d)
+ import_and_run('TESTSDK_CLASS_NAME', d)
}
addtask testsdk
do_testsdk[nostamp] = "1"
-do_testsdk[lockfiles] += "${TESTSDKLOCK}"
-
-TEST_LOG_SDKEXT_DIR ?= "${WORKDIR}/testsdkext"
-TESTSDKEXTLOCK = "${TMPDIR}/testsdkext.lock"
-
-def testsdkext_main(d):
- import os
- import oeqa.sdkext
- import subprocess
- from bb.utils import export_proxies
- from oeqa.oetest import SDKTestContext, SDKExtTestContext
- from oeqa.utils import avoid_paths_in_environ
-
-
- # extensible sdk use network
- export_proxies(d)
-
- # extensible sdk can be contaminated if native programs are
- # in PATH, i.e. use perl-native instead of eSDK one.
- paths_to_avoid = [d.getVar('STAGING_DIR', True),
- d.getVar('BASE_WORKDIR', True)]
- os.environ['PATH'] = avoid_paths_in_environ(paths_to_avoid)
-
- pn = d.getVar("PN", True)
- bb.utils.mkdirhier(d.getVar("TEST_LOG_SDKEXT_DIR", True))
-
- tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.sh")
- if not os.path.exists(tcname):
- bb.fatal("The toolchain ext is not built. Build it before running the" \
- " tests: 'bitbake <image> -c populate_sdk_ext' .")
-
- testdir = d.expand("${WORKDIR}/testsdkext/")
- bb.utils.remove(testdir, True)
- bb.utils.mkdirhier(testdir)
- sdkdir = os.path.join(testdir, 'tc')
- try:
- subprocess.check_output("%s -y -d %s" % (tcname, sdkdir), shell=True)
- except subprocess.CalledProcessError as e:
- msg = "Couldn't install the extensible SDK:\n%s" % e.output.decode("utf-8")
- logfn = os.path.join(sdkdir, 'preparing_build_system.log')
- if os.path.exists(logfn):
- msg += '\n\nContents of preparing_build_system.log:\n'
- with open(logfn, 'r') as f:
- for line in f:
- msg += line
- bb.fatal(msg)
-
- try:
- bb.plain("Running SDK Compatibility tests ...")
- run_test_context(SDKExtTestContext, d, testdir, tcname, pn, True)
- finally:
- pass
-
- try:
- bb.plain("Running Extensible SDK tests ...")
- run_test_context(SDKExtTestContext, d, testdir, tcname, pn)
- finally:
- pass
-
- bb.utils.remove(testdir, True)
-
-testsdkext_main[vardepsexclude] =+ "BB_ORIGENV"
python do_testsdkext() {
- testsdkext_main(d)
+ import_and_run('TESTSDKEXT_CLASS_NAME', d)
}
addtask testsdkext
do_testsdkext[nostamp] = "1"
-do_testsdkext[lockfiles] += "${TESTSDKEXTLOCK}"
+
+python () {
+ if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
+ bb.build.addtask("testsdk", None, "do_populate_sdk", d)
+ bb.build.addtask("testsdkext", None, "do_populate_sdk_ext", d)
+}
diff --git a/meta/classes/texinfo.bbclass b/meta/classes/texinfo.bbclass
index 92efbccddf..6b0def0eac 100644
--- a/meta/classes/texinfo.bbclass
+++ b/meta/classes/texinfo.bbclass
@@ -1,10 +1,10 @@
# This class is inherited by recipes whose upstream packages invoke the
# texinfo utilities at build-time. Native and cross recipes are made to use the
-# dummy scripts provided by texinfo-dummy-native, for improved performance.
-# Target architecture recipes use the genuine Texinfo utilities. By default,
+# dummy scripts provided by texinfo-dummy-native, for improved performance.
+# Target architecture recipes use the genuine Texinfo utilities. By default,
# they use the Texinfo utilities on the host system. If you want to use the
-# Texinfo recipe shipped with yoco, you can remove texinfo-native from
-# ASSUME_PROVIDED and makeinfo from SANITY_REQUIRED_UTILITIES.
+# Texinfo recipe, you can remove texinfo-native from ASSUME_PROVIDED and
+# makeinfo from SANITY_REQUIRED_UTILITIES.
TEXDEP = "texinfo-native"
TEXDEP_class-native = "texinfo-dummy-native"
@@ -13,3 +13,6 @@ DEPENDS_append = " ${TEXDEP}"
PATH_prepend_class-native = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
PATH_prepend_class-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
+# libtool-cross doesn't inherit cross
+TEXDEP_pn-libtool-cross = "texinfo-dummy-native"
+PATH_prepend_pn-libtool-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
diff --git a/meta/classes/tinderclient.bbclass b/meta/classes/tinderclient.bbclass
deleted file mode 100644
index 917b74d887..0000000000
--- a/meta/classes/tinderclient.bbclass
+++ /dev/null
@@ -1,368 +0,0 @@
-def tinder_http_post(server, selector, content_type, body):
- import httplib
- # now post it
- for i in range(0,5):
- try:
- h = httplib.HTTP(server)
- h.putrequest('POST', selector)
- h.putheader('content-type', content_type)
- h.putheader('content-length', str(len(body)))
- h.endheaders()
- h.send(body)
- errcode, errmsg, headers = h.getreply()
- #print(errcode, errmsg, headers)
- return (errcode,errmsg, headers, h.file)
- except:
- print("Error sending the report!")
- # try again
- pass
-
- # return some garbage
- return (-1, "unknown", "unknown", None)
-
-def tinder_form_data(bound, dict, log):
- output = []
- # for each key in the dictionary
- for name in dict:
- assert dict[name]
- output.append( "--" + bound )
- output.append( 'Content-Disposition: form-data; name="%s"' % name )
- output.append( "" )
- output.append( dict[name] )
- if log:
- output.append( "--" + bound )
- output.append( 'Content-Disposition: form-data; name="log"; filename="log.txt"' )
- output.append( '' )
- output.append( log )
- output.append( '--' + bound + '--' )
- output.append( '' )
-
- return "\r\n".join(output)
-
-def tinder_time_string():
- """
- Return the time as GMT
- """
- return ""
-
-def tinder_format_http_post(d,status,log):
- """
- Format the Tinderbox HTTP post with the data needed
- for the tinderbox to be happy.
- """
-
- import random
-
- # the variables we will need to send on this form post
- variables = {
- "tree" : d.getVar('TINDER_TREE', True),
- "machine_name" : d.getVar('TINDER_MACHINE', True),
- "os" : os.uname()[0],
- "os_version" : os.uname()[2],
- "compiler" : "gcc",
- "clobber" : d.getVar('TINDER_CLOBBER', True) or "0",
- "srcdate" : d.getVar('SRCDATE', True),
- "PN" : d.getVar('PN', True),
- "PV" : d.getVar('PV', True),
- "PR" : d.getVar('PR', True),
- "FILE" : d.getVar('FILE', True) or "N/A",
- "TARGETARCH" : d.getVar('TARGET_ARCH', True),
- "TARGETFPU" : d.getVar('TARGET_FPU', True) or "Unknown",
- "TARGETOS" : d.getVar('TARGET_OS', True) or "Unknown",
- "MACHINE" : d.getVar('MACHINE', True) or "Unknown",
- "DISTRO" : d.getVar('DISTRO', True) or "Unknown",
- "zecke-rocks" : "sure",
- }
-
- # optionally add the status
- if status:
- variables["status"] = str(status)
-
- # try to load the machine id
- # we only need on build_status.pl but sending it
- # always does not hurt
- try:
- f = open(d.getVar('TMPDIR',True)+'/tinder-machine.id', 'r')
- id = f.read()
- variables['machine_id'] = id
- except:
- pass
-
- # the boundary we will need
- boundary = "----------------------------------%d" % int(random.random()*1000000000000)
-
- # now format the body
- body = tinder_form_data( boundary, variables, log )
-
- return ("multipart/form-data; boundary=%s" % boundary),body
-
-
-def tinder_build_start(d):
- """
- Inform the tinderbox that a build is starting. We do this
- by posting our name and tree to the build_start.pl script
- on the server.
- """
-
- # get the body and type
- content_type, body = tinder_format_http_post(d,None,None)
- server = d.getVar('TINDER_HOST', True )
- url = d.getVar('TINDER_URL', True )
-
- selector = url + "/xml/build_start.pl"
-
- #print("selector %s and url %s" % (selector, url))
-
- # now post it
- errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
- #print(errcode, errmsg, headers)
- report = h_file.read()
-
- # now let us find the machine id that was assigned to us
- search = "<machine id='"
- report = report[report.find(search)+len(search):]
- report = report[0:report.find("'")]
-
- bb.note("Machine ID assigned by tinderbox: %s" % report )
-
- # now we will need to save the machine number
- # we will override any previous numbers
- f = open(d.getVar('TMPDIR', True)+"/tinder-machine.id", 'w')
- f.write(report)
-
-
-def tinder_send_http(d, status, _log):
- """
- Send this log as build status
- """
-
- # get the body and type
- server = d.getVar('TINDER_HOST', True)
- url = d.getVar('TINDER_URL', True)
-
- selector = url + "/xml/build_status.pl"
-
- # now post it - in chunks of 10.000 characters
- new_log = _log
- while len(new_log) > 0:
- content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
- errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
- #print(errcode, errmsg, headers)
- #print(h.file.read())
- new_log = new_log[18000:]
-
-
-def tinder_print_info(d):
- """
- Print the TinderBox Info
- Including informations of the BaseSystem and the Tree
- we use.
- """
-
- # get the local vars
- time = tinder_time_string()
- ops = os.uname()[0]
- version = os.uname()[2]
- url = d.getVar( 'TINDER_URL' , True )
- tree = d.getVar( 'TINDER_TREE', True )
- branch = d.getVar( 'TINDER_BRANCH', True )
- srcdate = d.getVar( 'SRCDATE', True )
- machine = d.getVar( 'MACHINE', True )
- distro = d.getVar( 'DISTRO', True )
- bbfiles = d.getVar( 'BBFILES', True )
- tarch = d.getVar( 'TARGET_ARCH', True )
- fpu = d.getVar( 'TARGET_FPU', True )
- oerev = d.getVar( 'OE_REVISION', True ) or "unknown"
-
- # there is a bug with tipple quoted strings
- # i will work around but will fix the original
- # bug as well
- output = []
- output.append("== Tinderbox Info" )
- output.append("Time: %(time)s" )
- output.append("OS: %(ops)s" )
- output.append("%(version)s" )
- output.append("Compiler: gcc" )
- output.append("Tinderbox Client: 0.1" )
- output.append("Tinderbox Client Last Modified: yesterday" )
- output.append("Tinderbox Protocol: 0.1" )
- output.append("URL: %(url)s" )
- output.append("Tree: %(tree)s" )
- output.append("Config:" )
- output.append("branch = '%(branch)s'" )
- output.append("TARGET_ARCH = '%(tarch)s'" )
- output.append("TARGET_FPU = '%(fpu)s'" )
- output.append("SRCDATE = '%(srcdate)s'" )
- output.append("MACHINE = '%(machine)s'" )
- output.append("DISTRO = '%(distro)s'" )
- output.append("BBFILES = '%(bbfiles)s'" )
- output.append("OEREV = '%(oerev)s'" )
- output.append("== End Tinderbox Client Info" )
-
- # now create the real output
- return "\n".join(output) % vars()
-
-
-def tinder_print_env():
- """
- Print the environment variables of this build
- """
- time_start = tinder_time_string()
- time_end = tinder_time_string()
-
- # build the environment
- env = ""
- for var in os.environ:
- env += "%s=%s\n" % (var, os.environ[var])
-
- output = []
- output.append( "---> TINDERBOX RUNNING env %(time_start)s" )
- output.append( env )
- output.append( "<--- TINDERBOX FINISHED (SUCCESS) %(time_end)s" )
-
- return "\n".join(output) % vars()
-
-def tinder_tinder_start(d, event):
- """
- PRINT the configuration of this build
- """
-
- time_start = tinder_time_string()
- config = tinder_print_info(d)
- #env = tinder_print_env()
- time_end = tinder_time_string()
- packages = " ".join( event.getPkgs() )
-
- output = []
- output.append( "---> TINDERBOX PRINTING CONFIGURATION %(time_start)s" )
- output.append( config )
- #output.append( env )
- output.append( "<--- TINDERBOX FINISHED PRINTING CONFIGURATION %(time_end)s" )
- output.append( "---> TINDERBOX BUILDING '%(packages)s'" )
- output.append( "<--- TINDERBOX STARTING BUILD NOW" )
-
- output.append( "" )
-
- return "\n".join(output) % vars()
-
-def tinder_do_tinder_report(event):
- """
- Report to the tinderbox:
- On the BuildStart we will inform the box directly
- On the other events we will write to the TINDER_LOG and
- when the Task is finished we will send the report.
-
- The above is not yet fully implemented. Currently we send
- information immediately. The caching/queuing needs to be
- implemented. Also sending more or less information is not
- implemented yet.
-
- We have two temporary files stored in the TMP directory. One file
- contains the assigned machine id for the tinderclient. This id gets
- assigned when we connect the box and start the build process the second
- file is used to workaround an EventHandler limitation. If BitBake is ran
- with the continue option we want the Build to fail even if we get the
- BuildCompleted Event. In this case we have to look up the status and
- send it instead of 100/success.
- """
- import glob
-
- # variables
- name = bb.event.getName(event)
- log = ""
- status = 1
- # Check what we need to do Build* shows we start or are done
- if name == "BuildStarted":
- tinder_build_start(event.data)
- log = tinder_tinder_start(event.data,event)
-
- try:
- # truncate the tinder log file
- f = open(event.data.getVar('TINDER_LOG', True), 'w')
- f.write("")
- f.close()
- except:
- pass
-
- try:
- # write a status to the file. This is needed for the -k option
- # of BitBake
- g = open(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
- g.write("")
- g.close()
- except IOError:
- pass
-
- # Append the Task-Log (compile,configure...) to the log file
- # we will send to the server
- if name == "TaskSucceeded" or name == "TaskFailed":
- log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
-
- if len(log_file) != 0:
- to_file = event.data.getVar('TINDER_LOG', True)
- log += "".join(open(log_file[0], 'r').readlines())
-
- # set the right 'HEADER'/Summary for the TinderBox
- if name == "TaskStarted":
- log += "---> TINDERBOX Task %s started\n" % event.task
- elif name == "TaskSucceeded":
- log += "<--- TINDERBOX Task %s done (SUCCESS)\n" % event.task
- elif name == "TaskFailed":
- log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
- elif name == "PkgStarted":
- log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF', True)
- elif name == "PkgSucceeded":
- log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF', True)
- elif name == "PkgFailed":
- if not event.data.getVar('TINDER_AUTOBUILD', True) == "0":
- build.exec_task('do_clean', event.data)
- log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF', True)
- status = 200
- # remember the failure for the -k case
- h = open(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
- h.write("200")
- elif name == "BuildCompleted":
- log += "Build Completed\n"
- status = 100
- # Check if we have a old status...
- try:
- h = open(event.data.getVar('TMPDIR',True)+'/tinder-status', 'r')
- status = int(h.read())
- except:
- pass
-
- elif name == "MultipleProviders":
- log += "---> TINDERBOX Multiple Providers\n"
- log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates())
- log += "consider defining PREFERRED_PROVIDER_%s\n" % event.getItem()
- log += "is runtime: %d\n" % event.isRuntime()
- log += "<--- TINDERBOX Multiple Providers\n"
- elif name == "NoProvider":
- log += "Error: No Provider for: %s\n" % event.getItem()
- log += "Error:Was Runtime: %d\n" % event.isRuntime()
- status = 200
- # remember the failure for the -k case
- h = open(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
- h.write("200")
-
- # now post the log
- if len(log) == 0:
- return
-
- # for now we will use the http post method as it is the only one
- log_post_method = tinder_send_http
- log_post_method(event.data, status, log)
-
-
-# we want to be an event handler
-addhandler tinderclient_eventhandler
-python tinderclient_eventhandler() {
- if e.data is None or bb.event.getName(e) == "MsgNote":
- return
-
- do_tinder_report = e.data.getVar('TINDER_REPORT', True)
- if do_tinder_report and do_tinder_report == "1":
- tinder_do_tinder_report(e)
-
- return
-}
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
index 4bddf34e9c..6cef0b8f6e 100644
--- a/meta/classes/toaster.bbclass
+++ b/meta/classes/toaster.bbclass
@@ -80,7 +80,7 @@ python toaster_layerinfo_dumpdata() {
return layer_info
- bblayers = e.data.getVar("BBLAYERS", True)
+ bblayers = e.data.getVar("BBLAYERS")
llayerinfo = {}
@@ -119,10 +119,10 @@ python toaster_package_dumpdata() {
"""
# No need to try and dumpdata if the recipe isn't generating packages
- if not d.getVar('PACKAGES', True):
+ if not d.getVar('PACKAGES'):
return
- pkgdatadir = d.getVar('PKGDESTWORK', True)
+ pkgdatadir = d.getVar('PKGDESTWORK')
lpkgdata = {}
datadir = os.path.join(pkgdatadir, 'runtime')
@@ -142,7 +142,7 @@ python toaster_artifact_dumpdata() {
"""
event_data = {
- "TOOLCHAIN_OUTPUTNAME": d.getVar("TOOLCHAIN_OUTPUTNAME", True)
+ "TOOLCHAIN_OUTPUTNAME": d.getVar("TOOLCHAIN_OUTPUTNAME")
}
bb.event.fire(bb.event.MetadataEvent("SDKArtifactInfo", event_data), d)
@@ -157,11 +157,11 @@ python toaster_collect_task_stats() {
import bb.utils
import os
- toaster_statlist_file = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")
-
- if not e.data.getVar('BUILDSTATS_BASE', True):
+ if not e.data.getVar('BUILDSTATS_BASE'):
return # if we don't have buildstats, we cannot collect stats
+ toaster_statlist_file = os.path.join(e.data.getVar('BUILDSTATS_BASE'), "toasterstatlist")
+
def stat_to_float(value):
return float(value.strip('% \n\r'))
@@ -246,7 +246,7 @@ python toaster_buildhistory_dump() {
import re
BUILDHISTORY_DIR = e.data.expand("${TOPDIR}/buildhistory")
BUILDHISTORY_DIR_IMAGE_BASE = e.data.expand("%s/images/${MACHINE_ARCH}/${TCLIBC}/"% BUILDHISTORY_DIR)
- pkgdata_dir = e.data.getVar("PKGDATA_DIR", True)
+ pkgdata_dir = e.data.getVar("PKGDATA_DIR")
# scan the build targets for this build
@@ -265,28 +265,33 @@ python toaster_buildhistory_dump() {
with open("%s/installed-package-sizes.txt" % installed_img_path, "r") as fin:
for line in fin:
line = line.rstrip(";")
- psize, px = line.split("\t")
- punit, pname = px.split(" ")
+ psize, punit, pname = line.split()
# this size is "installed-size" as it measures how much space it takes on disk
images[target][pname.strip()] = {'size':int(psize)*1024, 'depends' : []}
with open("%s/depends.dot" % installed_img_path, "r") as fin:
- p = re.compile(r' -> ')
- dot = re.compile(r'.*style=dotted')
+ p = re.compile(r'\s*"(?P<name>[^"]+)"\s*->\s*"(?P<dep>[^"]+)"(?P<rec>.*?\[style=dotted\])?')
for line in fin:
- line = line.rstrip(';')
- linesplit = p.split(line)
- if len(linesplit) == 2:
- pname = linesplit[0].rstrip('"').strip('"')
- dependsname = linesplit[1].split(" ")[0].strip().strip(";").strip('"').rstrip('"')
- deptype = "depends"
- if dot.match(line):
- deptype = "recommends"
- if not pname in images[target]:
- images[target][pname] = {'size': 0, 'depends' : []}
- if not dependsname in images[target]:
- images[target][dependsname] = {'size': 0, 'depends' : []}
- images[target][pname]['depends'].append((dependsname, deptype))
+ m = p.match(line)
+ if not m:
+ continue
+ pname = m.group('name')
+ dependsname = m.group('dep')
+ deptype = 'recommends' if m.group('rec') else 'depends'
+
+ # If RPM is used for packaging, then there may be
+ # dependencies such as "/bin/sh", which will confuse
+ # _toaster_load_pkgdatafile() later on. While at it, ignore
+ # any dependencies that contain parentheses, e.g.,
+ # "libc.so.6(GLIBC_2.7)".
+ if dependsname.startswith('/') or '(' in dependsname:
+ continue
+
+ if not pname in images[target]:
+ images[target][pname] = {'size': 0, 'depends' : []}
+ if not dependsname in images[target]:
+ images[target][dependsname] = {'size': 0, 'depends' : []}
+ images[target][pname]['depends'].append((dependsname, deptype))
# files-in-image.txt is only generated if an image file is created,
# so the file entries ('syms', 'dirs', 'files') for a target will be
@@ -329,8 +334,18 @@ python toaster_artifacts() {
if e.taskname in ["do_deploy", "do_image_complete", "do_populate_sdk", "do_populate_sdk_ext"]:
d2 = d.createCopy()
d2.setVar('FILE', e.taskfile)
- d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}"))
+ # Use 'stamp-extra-info' if present, else use workaround
+ # to determine 'SSTATE_MANMACH'
+ extrainf = d2.getVarFlag(e.taskname, 'stamp-extra-info')
+ if extrainf:
+ d2.setVar('SSTATE_MANMACH', extrainf)
+ else:
+ if "do_populate_sdk" == e.taskname:
+ d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}${SDKMACHINE}"))
+ else:
+ d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}"))
manifest = oe.sstatesig.sstate_get_manifest_filename(e.taskname[3:], d2)[0]
+
if os.access(manifest, os.R_OK):
with open(manifest) as fmanifest:
artifacts = [fname.strip() for fname in fmanifest]
@@ -357,8 +372,9 @@ do_packagedata_setscene[vardepsexclude] += "toaster_package_dumpdata "
do_package[postfuncs] += "toaster_package_dumpdata "
do_package[vardepsexclude] += "toaster_package_dumpdata "
-do_populate_sdk[postfuncs] += "toaster_artifact_dumpdata "
-do_populate_sdk[vardepsexclude] += "toaster_artifact_dumpdata "
+#do_populate_sdk[postfuncs] += "toaster_artifact_dumpdata "
+#do_populate_sdk[vardepsexclude] += "toaster_artifact_dumpdata "
+
+#do_populate_sdk_ext[postfuncs] += "toaster_artifact_dumpdata "
+#do_populate_sdk_ext[vardepsexclude] += "toaster_artifact_dumpdata "
-do_populate_sdk_ext[postfuncs] += "toaster_artifact_dumpdata "
-do_populate_sdk_ext[vardepsexclude] += "toaster_artifact_dumpdata " \ No newline at end of file
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index 0e11f2d7a0..db1d3215ef 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -3,7 +3,6 @@ inherit toolchain-scripts-base siteinfo kernel-arch
# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
# doesn't always match our expectations... but we default to the stock value
REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
-TARGET_CC_ARCH_append_libc-uclibc = " -muclibc"
TARGET_CC_ARCH_append_libc-musl = " -mmusl"
# default debug prefix map isn't valid in the SDK
@@ -25,19 +24,37 @@ toolchain_create_sdk_env_script () {
script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys}
rm -f $script
touch $script
+
+ echo '# Check for LD_LIBRARY_PATH being set, which can break SDK and generally is a bad practice' >> $script
+ echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
+ echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
+ echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
+ echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
+ echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
+ echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
+ echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
+ echo ' echo "For more references see:"' >> $script
+ echo ' echo " http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80"' >> $script
+ echo ' echo " http://xahlee.info/UnixResource_dir/_/ldpath.html"' >> $script
+ echo ' return 1' >> $script
+ echo 'fi' >> $script
+
echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
EXTRAPATH=""
for i in ${CANADIANEXTRAOS}; do
EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
done
echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
- echo "export CCACHE_PATH=$sdkpathnative$bindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$CCACHE_PATH' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
+ echo 'export OECORE_BASELIB="${baselib}"' >> $script
+ echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
+ echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
+
echo 'unset command_not_found_handle' >> $script
toolchain_shared_env_script
@@ -49,8 +66,8 @@ toolchain_create_tree_env_script () {
script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
rm -f $script
touch $script
- echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${PATH}' >> $script
- echo 'export CCACHE_PATH=${STAGING_DIR_NATIVE}/usr/bin:${CCACHE_PATH}' >> $script
+ echo 'orig=`pwd`; cd ${COREBASE}; . ./oe-init-build-env ${TOPDIR}; cd $orig' >> $script
+ echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${STAGING_BINDIR_TOOLCHAIN}:$PATH' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
@@ -73,6 +90,7 @@ toolchain_shared_env_script () {
echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
+ echo 'export READELF=${TARGET_PREFIX}readelf' >> $script
echo 'export AR=${TARGET_PREFIX}ar' >> $script
echo 'export NM=${TARGET_PREFIX}nm' >> $script
echo 'export M4=m4' >> $script
@@ -104,10 +122,46 @@ fi
EOF
}
+toolchain_create_post_relocate_script() {
+ relocate_script=$1
+ env_dir=$2
+ rm -f $relocate_script
+ touch $relocate_script
+
+ cat >> $relocate_script <<EOF
+if [ -d "${SDKPATHNATIVE}/post-relocate-setup.d/" ]; then
+ # Source top-level SDK env scripts in case they are needed for the relocate
+ # scripts.
+ for env_setup_script in ${env_dir}/environment-setup-*; do
+ . \$env_setup_script
+ status=\$?
+ if [ \$status != 0 ]; then
+ echo "\$0: Failed to source \$env_setup_script with status \$status"
+ exit \$status
+ fi
+
+ for s in ${SDKPATHNATIVE}/post-relocate-setup.d/*; do
+ if [ ! -x \$s ]; then
+ continue
+ fi
+ \$s "\$1"
+ status=\$?
+ if [ \$status != 0 ]; then
+ echo "post-relocate command \"\$s \$1\" failed with status \$status" >&2
+ exit \$status
+ fi
+ done
+ done
+ rm -rf "${SDKPATHNATIVE}/post-relocate-setup.d"
+fi
+EOF
+}
+
#we get the cached site config in the runtime
TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d)}"
TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
+DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
#This function create a site config file
toolchain_create_sdk_siteconfig () {
@@ -139,9 +193,9 @@ toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTC
python __anonymous () {
import oe.classextend
deps = ""
- for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE', True) or "").split():
+ for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE') or "").split():
deps += " %s:do_populate_sysroot" % dep
- for variant in (d.getVar('MULTILIB_VARIANTS', True) or "").split():
+ for variant in (d.getVar('MULTILIB_VARIANTS') or "").split():
clsextend = oe.classextend.ClassExtender(variant, d)
newdep = clsextend.extend_name(dep)
deps += " %s:do_populate_sysroot" % newdep
diff --git a/meta/classes/typecheck.bbclass b/meta/classes/typecheck.bbclass
index 6bff7c7138..72da932232 100644
--- a/meta/classes/typecheck.bbclass
+++ b/meta/classes/typecheck.bbclass
@@ -5,7 +5,7 @@
python check_types() {
import oe.types
for key in e.data.keys():
- if e.data.getVarFlag(key, "type", True):
+ if e.data.getVarFlag(key, "type"):
oe.data.typed_value(key, e.data)
}
addhandler check_types
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
index 3f760f2fbe..89ff970fcc 100644
--- a/meta/classes/uboot-config.bbclass
+++ b/meta/classes/uboot-config.bbclass
@@ -14,37 +14,34 @@
UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
python () {
- ubootmachine = d.getVar("UBOOT_MACHINE", True)
+ ubootmachine = d.getVar("UBOOT_MACHINE")
ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
- ubootbinary = d.getVar('UBOOT_BINARY', True)
- ubootbinaries = d.getVar('UBOOT_BINARIES', True)
+ ubootbinary = d.getVar('UBOOT_BINARY')
+ ubootbinaries = d.getVar('UBOOT_BINARIES')
# The "doc" varflag is special, we don't want to see it here
ubootconfigflags.pop('doc', None)
+ ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split()
- if not ubootmachine and not ubootconfigflags:
- PN = d.getVar("PN", True)
- FILE = os.path.basename(d.getVar("FILE", True))
+ if not ubootmachine and not ubootconfig:
+ PN = d.getVar("PN")
+ FILE = os.path.basename(d.getVar("FILE"))
bb.debug(1, "To build %s, see %s for instructions on \
setting up your machine config" % (PN, FILE))
- raise bb.parse.SkipPackage("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE", True))
+ raise bb.parse.SkipRecipe("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE"))
- if ubootmachine and ubootconfigflags:
- raise bb.parse.SkipPackage("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
+ if ubootmachine and ubootconfig:
+ raise bb.parse.SkipRecipe("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
if ubootconfigflags and ubootbinaries:
- raise bb.parse.SkipPackage("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.")
+ raise bb.parse.SkipRecipe("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.")
- if not ubootconfigflags:
- return
-
- ubootconfig = (d.getVar('UBOOT_CONFIG', True) or "").split()
if len(ubootconfig) > 0:
for config in ubootconfig:
for f, v in ubootconfigflags.items():
if config == f:
items = v.split(',')
if items[0] and len(items) > 3:
- raise bb.parse.SkipPackage('Only config,images,binary can be specified!')
+ raise bb.parse.SkipRecipe('Only config,images,binary can be specified!')
d.appendVar('UBOOT_MACHINE', ' ' + items[0])
# IMAGE_FSTYPES appending
if len(items) > 1 and items[1]:
@@ -57,6 +54,4 @@ python () {
bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % ubootbinary)
d.appendVar('UBOOT_BINARIES', ' ' + ubootbinary)
break
- elif len(ubootconfig) == 0:
- raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
}
diff --git a/meta/classes/uboot-extlinux-config.bbclass b/meta/classes/uboot-extlinux-config.bbclass
new file mode 100644
index 0000000000..f4bf94be04
--- /dev/null
+++ b/meta/classes/uboot-extlinux-config.bbclass
@@ -0,0 +1,157 @@
+# uboot-extlinux-config.bbclass
+#
+# This class allow the extlinux.conf generation for U-Boot use. The
+# U-Boot support for it is given to allow the Generic Distribution
+# Configuration specification use by OpenEmbedded-based products.
+#
+# External variables:
+#
+# UBOOT_EXTLINUX_CONSOLE - Set to "console=ttyX" to change kernel boot
+# default console.
+# UBOOT_EXTLINUX_LABELS - A list of targets for the automatic config.
+# UBOOT_EXTLINUX_KERNEL_ARGS - Add additional kernel arguments.
+# UBOOT_EXTLINUX_KERNEL_IMAGE - Kernel image name.
+# UBOOT_EXTLINUX_FDTDIR - Device tree directory.
+# UBOOT_EXTLINUX_FDT - Device tree file.
+# UBOOT_EXTLINUX_INITRD - Indicates a list of filesystem images to
+# concatenate and use as an initrd (optional).
+# UBOOT_EXTLINUX_MENU_DESCRIPTION - Name to use as description.
+# UBOOT_EXTLINUX_ROOT - Root kernel cmdline.
+# UBOOT_EXTLINUX_TIMEOUT - Timeout before DEFAULT selection is made.
+# Measured in 1/10 of a second.
+# UBOOT_EXTLINUX_DEFAULT_LABEL - Target to be selected by default after
+# the timeout period
+#
+# If there's only one label system will boot automatically and menu won't be
+# created. If you want to use more than one labels, e.g linux and alternate,
+# use overrides to set menu description, console and others variables.
+#
+# Ex:
+#
+# UBOOT_EXTLINUX_LABELS ??= "default fallback"
+#
+# UBOOT_EXTLINUX_DEFAULT_LABEL ??= "Linux Default"
+# UBOOT_EXTLINUX_TIMEOUT ??= "30"
+#
+# UBOOT_EXTLINUX_KERNEL_IMAGE_default ??= "../zImage"
+# UBOOT_EXTLINUX_MENU_DESCRIPTION_default ??= "Linux Default"
+#
+# UBOOT_EXTLINUX_KERNEL_IMAGE_fallback ??= "../zImage-fallback"
+# UBOOT_EXTLINUX_MENU_DESCRIPTION_fallback ??= "Linux Fallback"
+#
+# Results:
+#
+# menu title Select the boot mode
+# TIMEOUT 30
+# DEFAULT Linux Default
+# LABEL Linux Default
+# KERNEL ../zImage
+# FDTDIR ../
+# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
+# LABEL Linux Fallback
+# KERNEL ../zImage-fallback
+# FDTDIR ../
+# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
+#
+# Copyright (C) 2016, O.S. Systems Software LTDA. All Rights Reserved
+# Released under the MIT license (see packages/COPYING)
+#
+# The kernel has an internal default console, which you can override with
+# a console=...some_tty...
+UBOOT_EXTLINUX_CONSOLE ??= "console=${console},${baudrate}"
+UBOOT_EXTLINUX_LABELS ??= "linux"
+UBOOT_EXTLINUX_FDT ??= ""
+UBOOT_EXTLINUX_FDTDIR ??= "../"
+UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
+UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
+UBOOT_EXTLINUX_MENU_DESCRIPTION_linux ??= "${DISTRO_NAME}"
+
+UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
+
+python do_create_extlinux_config() {
+ if d.getVar("UBOOT_EXTLINUX") != "1":
+ return
+
+ if not d.getVar('WORKDIR'):
+ bb.error("WORKDIR not defined, unable to package")
+
+ labels = d.getVar('UBOOT_EXTLINUX_LABELS')
+ if not labels:
+ bb.fatal("UBOOT_EXTLINUX_LABELS not defined, nothing to do")
+
+ if not labels.strip():
+ bb.fatal("No labels, nothing to do")
+
+ cfile = d.getVar('UBOOT_EXTLINUX_CONFIG')
+ if not cfile:
+ bb.fatal('Unable to read UBOOT_EXTLINUX_CONFIG')
+
+ localdata = bb.data.createCopy(d)
+
+ try:
+ with open(cfile, 'w') as cfgfile:
+ cfgfile.write('# Generic Distro Configuration file generated by OpenEmbedded\n')
+
+ if len(labels.split()) > 1:
+ cfgfile.write('menu title Select the boot mode\n')
+
+ timeout = localdata.getVar('UBOOT_EXTLINUX_TIMEOUT')
+ if timeout:
+ cfgfile.write('TIMEOUT %s\n' % (timeout))
+
+ if len(labels.split()) > 1:
+ default = localdata.getVar('UBOOT_EXTLINUX_DEFAULT_LABEL')
+ if default:
+ cfgfile.write('DEFAULT %s\n' % (default))
+
+ # Need to deconflict the labels with existing overrides
+ label_overrides = labels.split()
+ default_overrides = localdata.getVar('OVERRIDES').split(':')
+ # We're keeping all the existing overrides that aren't used as a label
+ # an override for that label will be added back in while we're processing that label
+ keep_overrides = list(filter(lambda x: x not in label_overrides, default_overrides))
+
+ for label in labels.split():
+
+ localdata.setVar('OVERRIDES', ':'.join(keep_overrides + [label]))
+
+ extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE')
+
+ menu_description = localdata.getVar('UBOOT_EXTLINUX_MENU_DESCRIPTION')
+ if not menu_description:
+ menu_description = label
+
+ root = localdata.getVar('UBOOT_EXTLINUX_ROOT')
+ if not root:
+ bb.fatal('UBOOT_EXTLINUX_ROOT not defined')
+
+ kernel_image = localdata.getVar('UBOOT_EXTLINUX_KERNEL_IMAGE')
+ fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR')
+
+ fdt = localdata.getVar('UBOOT_EXTLINUX_FDT')
+
+ if fdt:
+ cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDT %s\n' %
+ (menu_description, kernel_image, fdt))
+ elif fdtdir:
+ cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDTDIR %s\n' %
+ (menu_description, kernel_image, fdtdir))
+ else:
+ cfgfile.write('LABEL %s\n\tKERNEL %s\n' % (menu_description, kernel_image))
+
+ kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS')
+
+ initrd = localdata.getVar('UBOOT_EXTLINUX_INITRD')
+ if initrd:
+ cfgfile.write('\tINITRD %s\n'% initrd)
+
+ kernel_args = root + " " + kernel_args
+ cfgfile.write('\tAPPEND %s %s\n' % (kernel_args, extlinux_console))
+
+ except OSError:
+ bb.fatal('Unable to open %s' % (cfile))
+}
+UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
+do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
+
+addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes/uboot-sign.bbclass b/meta/classes/uboot-sign.bbclass
index 3c56db8872..982ed46d01 100644
--- a/meta/classes/uboot-sign.bbclass
+++ b/meta/classes/uboot-sign.bbclass
@@ -19,13 +19,17 @@
# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
# treat the device tree blob:
#
-# u-boot:do_deploy_dtb
-# u-boot:do_deploy
-# virtual/kernel:do_assemble_fitimage
-# u-boot:do_concat_dtb
-# u-boot:do_install
+# * u-boot:do_install_append
+# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
+# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
#
-# For more details on signature process, please refer to U-boot documentation.
+# * virtual/kernel:do_assemble_fitimage
+# Sign the image
+#
+# * u-boot:do_deploy[postfuncs]
+# Deploy files like UBOOT_DTB_IMAGE, UBOOT_DTB_SYMLINK and others.
+#
+# For more details on signature process, please refer to U-Boot documentation.
# Signature activation.
UBOOT_SIGN_ENABLE ?= "0"
@@ -38,58 +42,90 @@ UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
UBOOT_NODTB_BINARY ?= "u-boot-nodtb.${UBOOT_SUFFIX}"
UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.${UBOOT_SUFFIX}"
-#
-# Following is relevant only for u-boot recipes:
-#
-
-do_deploy_dtb () {
- mkdir -p ${DEPLOYDIR}
- cd ${DEPLOYDIR}
+# Functions in this bbclass is for u-boot only
+UBOOT_PN = "${@d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'}"
- if [ -f ${B}/${UBOOT_DTB_BINARY} ]; then
- install ${B}/${UBOOT_DTB_BINARY} ${DEPLOYDIR}/${UBOOT_DTB_IMAGE}
- rm -f ${UBOOT_DTB_BINARY} ${UBOOT_DTB_SYMLINK}
- ln -sf ${UBOOT_DTB_IMAGE} ${UBOOT_DTB_SYMLINK}
- ln -sf ${UBOOT_DTB_IMAGE} ${UBOOT_DTB_BINARY}
+concat_dtb_helper() {
+ if [ -e "${UBOOT_DTB_BINARY}" ]; then
+ ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
+ ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
fi
- if [ -f ${B}/${UBOOT_NODTB_BINARY} ]; then
- install ${B}/${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
- rm -f ${UBOOT_NODTB_BINARY} ${UBOOT_NODTB_SYMLINK}
- ln -sf ${UBOOT_NODTB_IMAGE} ${UBOOT_NODTB_SYMLINK}
- ln -sf ${UBOOT_NODTB_IMAGE} ${UBOOT_NODTB_BINARY}
+
+ if [ -f "${UBOOT_NODTB_BINARY}" ]; then
+ install ${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY}
fi
-}
-do_concat_dtb () {
# Concatenate U-Boot w/o DTB & DTB with public key
# (cf. kernel-fitimage.bbclass for more details)
- if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ]; then
- if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
- [ -e "${DEPLOYDIR}/${UBOOT_DTB_IMAGE}" ]; then
+ deployed_uboot_dtb_binary='${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_IMAGE}'
+ if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
+ [ -e "$deployed_uboot_dtb_binary" ]; then
+ oe_runmake EXT_DTB=$deployed_uboot_dtb_binary
+ install ${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
+ elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
+ cd ${DEPLOYDIR}
+ cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
+ else
+ bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
+ fi
+}
+
+concat_dtb() {
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
+ mkdir -p ${DEPLOYDIR}
+ if [ -n "${UBOOT_CONFIG}" ]; then
+ for config in ${UBOOT_MACHINE}; do
+ CONFIG_B_PATH="${config}"
+ cd ${B}/${config}
+ concat_dtb_helper
+ done
+ else
+ CONFIG_B_PATH=""
cd ${B}
- oe_runmake EXT_DTB=${DEPLOYDIR}/${UBOOT_DTB_IMAGE}
- install ${S}/${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
- install ${S}/${UBOOT_BINARY} ${DEPLOY_DIR_IMAGE}/${UBOOT_IMAGE}
- elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "${DEPLOYDIR}/${UBOOT_DTB_IMAGE}" ]; then
- cd ${DEPLOYDIR}
- cat ${UBOOT_NODTB_IMAGE} ${UBOOT_DTB_IMAGE} | tee ${B}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
+ concat_dtb_helper
+ fi
+ fi
+}
+
+# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
+# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
+install_helper() {
+ if [ -f "${UBOOT_DTB_BINARY}" ]; then
+ install -d ${D}${datadir}
+ # UBOOT_DTB_BINARY is a symlink to UBOOT_DTB_IMAGE, so we
+ # need both of them.
+ install ${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
+ ln -sf ${UBOOT_DTB_IMAGE} ${D}${datadir}/${UBOOT_DTB_BINARY}
+ else
+ bbwarn "${UBOOT_DTB_BINARY} not found"
+ fi
+}
+
+do_install_append() {
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
+ if [ -n "${UBOOT_CONFIG}" ]; then
+ for config in ${UBOOT_MACHINE}; do
+ cd ${B}/${config}
+ install_helper
+ done
else
- bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
+ cd ${B}
+ install_helper
fi
fi
}
python () {
- uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot', True) or 'u-boot'
- if d.getVar('UBOOT_SIGN_ENABLE', True) == '1' and d.getVar('PN', True) == uboot_pn:
- kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel', True)
+ if d.getVar('UBOOT_SIGN_ENABLE') == '1' and d.getVar('PN') == d.getVar('UBOOT_PN') and d.getVar('UBOOT_DTB_BINARY'):
+ kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel')
- # u-boot.dtb and u-boot-nodtb.bin are deployed _before_ do_deploy
- # Thus, do_deploy_setscene will also populate them in DEPLOY_IMAGE_DIR
- bb.build.addtask('do_deploy_dtb', 'do_deploy', 'do_compile', d)
+ # Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb
+ d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % kernel_pn)
- # do_concat_dtb is scheduled _before_ do_install as it overwrite the
- # u-boot.bin in both DEPLOYDIR and DEPLOY_IMAGE_DIR.
- bb.build.addtask('do_concat_dtb', 'do_install', None, d)
- d.appendVarFlag('do_concat_dtb', 'depends', ' %s:do_assemble_fitimage' % kernel_pn)
+ # kernerl's do_deploy is a litle special, so we can't use
+ # do_deploy_append, otherwise it would override
+ # kernel_do_deploy.
+ d.appendVarFlag('do_deploy', 'prefuncs', ' concat_dtb')
}
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
index 89cec07d78..9f8645a36a 100644
--- a/meta/classes/uninative.bbclass
+++ b/meta/classes/uninative.bbclass
@@ -1,22 +1,16 @@
-UNINATIVE_LOADER ?= "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', 'ld-linux.so.2', d)}"
+UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}"
+UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
UNINATIVE_URL ?= "unset"
-UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.bz2"
+UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.xz"
# Example checksums
-#UNINATIVE_CHECKSUM[i586] = "dead"
+#UNINATIVE_CHECKSUM[aarch64] = "dead"
+#UNINATIVE_CHECKSUM[i686] = "dead"
#UNINATIVE_CHECKSUM[x86_64] = "dead"
UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
-# https://wiki.debian.org/GCC5
-# We may see binaries built with gcc5 run or linked into gcc4 environment
-# so use the older libstdc++ standard for now until we don't support gcc4
-# on the host system.
-BUILD_CXXFLAGS_append = " -D_GLIBCXX_USE_CXX11_ABI=0"
-
-#
-# icu configure defaults to CXX11 if no -std= option is passed in CXXFLAGS
-# therefore pass one
-BUILD_CXXFLAGS_append_pn-icu-native = " -std=c++98"
+# Enabling uninative will change the following variables so they need to go the parsing white list to prevent multiple recipe parsing
+BB_HASHCONFIG_WHITELIST += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
addhandler uninative_event_fetchloader
uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
@@ -30,11 +24,11 @@ python uninative_event_fetchloader() {
loader isn't already present.
"""
- chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH", True), True)
+ chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH"))
if not chksum:
- bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH", True))
+ bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH"))
- loader = d.getVar("UNINATIVE_LOADER", True)
+ loader = d.getVar("UNINATIVE_LOADER")
loaderchksum = loader + ".chksum"
if os.path.exists(loader) and os.path.exists(loaderchksum):
with open(loaderchksum, "r") as f:
@@ -47,18 +41,24 @@ python uninative_event_fetchloader() {
# Save and restore cwd as Fetch.download() does a chdir()
olddir = os.getcwd()
- tarball = d.getVar("UNINATIVE_TARBALL", True)
- tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR", True), chksum)
+ tarball = d.getVar("UNINATIVE_TARBALL")
+ tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
tarballpath = os.path.join(tarballdir, tarball)
- if not os.path.exists(tarballpath):
+ if not os.path.exists(tarballpath + ".done"):
bb.utils.mkdirhier(tarballdir)
- if d.getVar("UNINATIVE_URL", True) == "unset":
+ if d.getVar("UNINATIVE_URL") == "unset":
bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
localdata = bb.data.createCopy(d)
localdata.setVar('FILESPATH', "")
localdata.setVar('DL_DIR', tarballdir)
+ # Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
+ # and we can't easily put 'chksum' into the url path from a url parameter with
+ # the current fetcher url handling
+ ownmirror = d.getVar('SOURCE_MIRROR_URL')
+ if ownmirror:
+ localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} ${SOURCE_MIRROR_URL}/uninative/%s/${UNINATIVE_TARBALL}" % chksum)
srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
bb.note("Fetching uninative binary shim from %s" % srcuri)
@@ -67,16 +67,44 @@ python uninative_event_fetchloader() {
fetcher.download()
localpath = fetcher.localpath(srcuri)
if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
+ # Follow the symlink behavior from the bitbake fetch2.
+ # This will cover the case where an existing symlink is broken
+ # as well as if there are two processes trying to create it
+ # at the same time.
+ if os.path.islink(tarballpath):
+ # Broken symbolic link
+ os.unlink(tarballpath)
+
+ # Deal with two processes trying to make symlink at once
+ try:
os.symlink(localpath, tarballpath)
-
- cmd = d.expand("mkdir -p ${STAGING_DIR}-uninative; cd ${STAGING_DIR}-uninative; tar -xjf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; ${STAGING_DIR}-uninative/relocate_sdk.py ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux ${UNINATIVE_LOADER} ${UNINATIVE_LOADER} ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
- subprocess.check_call(cmd, shell=True)
+ except FileExistsError:
+ pass
+
+ # ldd output is "ldd (Ubuntu GLIBC 2.23-0ubuntu10) 2.23", extract last option from first line
+ glibcver = subprocess.check_output(["ldd", "--version"]).decode('utf-8').split('\n')[0].split()[-1]
+ if bb.utils.vercmp_string(d.getVar("UNINATIVE_MAXGLIBCVERSION"), glibcver) < 0:
+ raise RuntimeError("Your host glibc verson (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
+
+ cmd = d.expand("\
+mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
+cd ${UNINATIVE_STAGING_DIR}-uninative; \
+tar -xJf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
+${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux \
+ ${UNINATIVE_LOADER} \
+ ${UNINATIVE_LOADER} \
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
+ subprocess.check_output(cmd, shell=True)
with open(loaderchksum, "w") as f:
f.write(chksum)
enable_uninative(d)
+ except RuntimeError as e:
+ bb.warn(str(e))
except bb.fetch2.BBFetchException as exc:
bb.warn("Disabling uninative as unable to fetch uninative tarball: %s" % str(exc))
bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
@@ -96,11 +124,15 @@ python uninative_event_enable() {
}
def enable_uninative(d):
- loader = d.getVar("UNINATIVE_LOADER", True)
+ loader = d.getVar("UNINATIVE_LOADER")
if os.path.exists(loader):
bb.debug(2, "Enabling uninative")
- d.setVar("NATIVELSBSTRING", "universal")
+ d.setVar("NATIVELSBSTRING", "universal%s" % oe.utils.host_gcc_version(d))
d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
+ d.appendVarFlag("SSTATEPOSTUNPACKFUNCS", "vardepvalueexclude", "| uninative_changeinterp")
+ d.appendVar("BUILD_LDFLAGS", " -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
+ d.appendVarFlag("BUILD_LDFLAGS", "vardepvalueexclude", "| -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
+ d.appendVarFlag("BUILD_LDFLAGS", "vardepsexclude", "UNINATIVE_LOADER")
d.prependVar("PATH", "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
python uninative_changeinterp () {
@@ -111,7 +143,7 @@ python uninative_changeinterp () {
if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
return
- sstateinst = d.getVar('SSTATE_INSTDIR', True)
+ sstateinst = d.getVar('SSTATE_INSTDIR')
for walkroot, dirs, files in os.walk(sstateinst):
for file in files:
if file.endswith(".so") or ".so." in file:
@@ -130,11 +162,5 @@ python uninative_changeinterp () {
if not elf.isDynamic():
continue
- try:
- subprocess.check_output(("patchelf-uninative", "--set-interpreter",
- d.getVar("UNINATIVE_LOADER", True), f),
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("'%s' failed with exit code %d and the following output:\n%s" %
- (e.cmd, e.returncode, e.output))
+ subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
}
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
index 1fdd681315..8c2b66e7f1 100644
--- a/meta/classes/update-alternatives.bbclass
+++ b/meta/classes/update-alternatives.bbclass
@@ -65,9 +65,11 @@ ALTERNATIVE_PRIORITY = "10"
# and include that vairable in the set.
UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
+PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native"
+
def gen_updatealternativesvardeps(d):
- pkgs = (d.getVar("PACKAGES", True) or "").split()
- vars = (d.getVar("UPDALTVARS", True) or "").split()
+ pkgs = (d.getVar("PACKAGES") or "").split()
+ vars = (d.getVar("UPDALTVARS") or "").split()
# First compute them for non_pkg versions
for v in vars:
@@ -84,14 +86,24 @@ def gen_updatealternativesvardeps(d):
d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
def ua_extend_depends(d):
- if not 'virtual/update-alternatives' in d.getVar('PROVIDES', True):
+ if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
-python __anonymous() {
+def update_alternatives_enabled(d):
# Update Alternatives only works on target packages...
if bb.data.inherits_class('native', d) or \
bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \
bb.data.inherits_class('cross-canadian', d):
+ return False
+
+ # Disable when targeting mingw32 (no target support)
+ if d.getVar("TARGET_OS") == "mingw32":
+ return False
+
+ return True
+
+python __anonymous() {
+ if not update_alternatives_enabled(d):
return
# compute special vardeps
@@ -103,8 +115,8 @@ python __anonymous() {
def gen_updatealternativesvars(d):
ret = []
- pkgs = (d.getVar("PACKAGES", True) or "").split()
- vars = (d.getVar("UPDALTVARS", True) or "").split()
+ pkgs = (d.getVar("PACKAGES") or "").split()
+ vars = (d.getVar("UPDALTVARS") or "").split()
for v in vars:
ret.append(v + "_VARDEPS")
@@ -119,27 +131,43 @@ def gen_updatealternativesvars(d):
populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
# We need to do the rename after the image creation step, but before
-# the split and strip steps.. packagecopy seems to be the earliest reasonable
-# place.
-python perform_packagecopy_append () {
+# the split and strip steps.. PACKAGE_PREPROCESS_FUNCS is the right
+# place for that.
+PACKAGE_PREPROCESS_FUNCS += "apply_update_alternative_renames"
+python apply_update_alternative_renames () {
+ if not update_alternatives_enabled(d):
+ return
+
+ import re
+
+ def update_files(alt_target, alt_target_rename, pkg, d):
+ f = d.getVar('FILES_' + pkg)
+ if f:
+ f = re.sub(r'(^|\s)%s(\s|$)' % re.escape (alt_target), r'\1%s\2' % alt_target_rename, f)
+ d.setVar('FILES_' + pkg, f)
+
# Check for deprecated usage...
- pn = d.getVar('BPN', True)
- if d.getVar('ALTERNATIVE_LINKS', True) != None:
+ pn = d.getVar('BPN')
+ if d.getVar('ALTERNATIVE_LINKS') != None:
bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn)
# Do actual update alternatives processing
- pkgdest = d.getVar('PKGD', True)
- for pkg in (d.getVar('PACKAGES', True) or "").split():
+ pkgdest = d.getVar('PKGD')
+ for pkg in (d.getVar('PACKAGES') or "").split():
# If the src == dest, we know we need to rename the dest by appending ${BPN}
- link_rename = {}
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
+ link_rename = []
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
if not alt_link:
- alt_link = "%s/%s" % (d.getVar('bindir', True), alt_name)
+ alt_link = "%s/%s" % (d.getVar('bindir'), alt_name)
d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
+ if alt_link.startswith(os.path.join(d.getVar('sysconfdir'), 'init.d')):
+ # Managing init scripts does not work (bug #10433), foremost
+ # because of a race with update-rc.d
+ bb.fatal("Using update-alternatives for managing SysV init scripts is not supported")
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
- alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
+ alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
# Sometimes alt_target is specified as relative to the link name.
alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
@@ -153,10 +181,11 @@ python perform_packagecopy_append () {
elif os.path.lexists(src):
if os.path.islink(src):
# Delay rename of links
- link_rename[alt_target] = alt_target_rename
+ link_rename.append((alt_target, alt_target_rename))
else:
bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
os.rename(src, dest)
+ update_files(alt_target, alt_target_rename, pkg, d)
else:
bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
continue
@@ -164,92 +193,124 @@ python perform_packagecopy_append () {
# Process delayed link names
# Do these after other renames so we can correct broken links
- for alt_target in link_rename:
+ for (alt_target, alt_target_rename) in link_rename:
src = '%s/%s' % (pkgdest, alt_target)
- dest = '%s/%s' % (pkgdest, link_rename[alt_target])
- link = os.readlink(src)
+ dest = '%s/%s' % (pkgdest, alt_target_rename)
link_target = oe.path.realpath(src, pkgdest, True)
if os.path.lexists(link_target):
# Ok, the link_target exists, we can rename
- bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, link_rename[alt_target]))
+ bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, alt_target_rename))
os.rename(src, dest)
else:
# Try to resolve the broken link to link.${BPN}
link_maybe = '%s.%s' % (os.readlink(src), pn)
if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)):
# Ok, the renamed link target exists.. create a new link, and remove the original
- bb.note('%s: Creating new link %s -> %s' % (pn, link_rename[alt_target], link_maybe))
+ bb.note('%s: Creating new link %s -> %s' % (pn, alt_target_rename, link_maybe))
os.symlink(link_maybe, dest)
os.unlink(src)
else:
bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target))
+ continue
+ update_files(alt_target, alt_target_rename, pkg, d)
}
+def update_alternatives_alt_targets(d, pkg):
+ """
+ Returns the update-alternatives metadata for a package.
+
+ The returned format is a list of tuples where the tuple contains:
+ alt_name: The binary name
+ alt_link: The path for the binary (Shared by different packages)
+ alt_target: The path for the renamed binary (Unique per package)
+ alt_priority: The priority of the alt_target
+
+ All the alt_targets will be installed into the sysroot. The alt_link is
+ a symlink pointing to the alt_target with the highest priority.
+ """
+
+ pn = d.getVar('BPN')
+ pkgdest = d.getVar('PKGD')
+ updates = list()
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or \
+ d.getVarFlag('ALTERNATIVE_TARGET', alt_name) or \
+ d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
+ d.getVar('ALTERNATIVE_TARGET') or \
+ alt_link
+ alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or \
+ d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name) or \
+ d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or \
+ d.getVar('ALTERNATIVE_PRIORITY')
+
+ # This shouldn't trigger, as it should have been resolved earlier!
+ if alt_link == alt_target:
+ bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
+ alt_target = '%s.%s' % (alt_target, pn)
+
+ if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
+ bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
+ continue
+
+ alt_target = os.path.normpath(alt_target)
+ updates.append( (alt_name, alt_link, alt_target, alt_priority) )
+
+ return updates
+
PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
python populate_packages_updatealternatives () {
- pn = d.getVar('BPN', True)
+ if not update_alternatives_enabled(d):
+ return
# Do actual update alternatives processing
- pkgdest = d.getVar('PKGD', True)
- for pkg in (d.getVar('PACKAGES', True) or "").split():
+ for pkg in (d.getVar('PACKAGES') or "").split():
# Create post install/removal scripts
alt_setup_links = ""
alt_remove_links = ""
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
- alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
- # Sometimes alt_target is specified as relative to the link name.
- alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
-
- alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name, True)
- alt_priority = alt_priority or d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg, True) or d.getVar('ALTERNATIVE_PRIORITY', True)
-
- # This shouldn't trigger, as it should have been resolved earlier!
- if alt_link == alt_target:
- bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
- alt_target = '%s.%s' % (alt_target, pn)
-
- if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
- bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
- continue
-
- # Default to generate shell script.. eventually we may want to change this...
- alt_target = os.path.normpath(alt_target)
-
+ updates = update_alternatives_alt_targets(d, pkg)
+ for alt_name, alt_link, alt_target, alt_priority in updates:
alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
if alt_setup_links:
# RDEPENDS setup
- provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives', True)
+ provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives')
if provider:
#bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
bb.note('%s' % alt_setup_links)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True) or '#!/bin/sh\n'
- postinst += alt_setup_links
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
+ if postinst:
+ postinst = alt_setup_links + postinst
+ else:
+ postinst = '#!/bin/sh\n' + alt_setup_links
d.setVar('pkg_postinst_%s' % pkg, postinst)
bb.note('%s' % alt_remove_links)
- prerm = d.getVar('pkg_prerm_%s' % pkg, True) or '#!/bin/sh\n'
+ prerm = d.getVar('pkg_prerm_%s' % pkg) or '#!/bin/sh\n'
prerm += alt_remove_links
d.setVar('pkg_prerm_%s' % pkg, prerm)
}
python package_do_filedeps_append () {
- pn = d.getVar('BPN', True)
- pkgdest = d.getVar('PKGDEST', True)
+ if update_alternatives_enabled(d):
+ apply_update_alternative_provides(d)
+}
+
+def apply_update_alternative_provides(d):
+ pn = d.getVar('BPN')
+ pkgdest = d.getVar('PKGDEST')
- for pkg in packages.split():
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
- alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
+ for pkg in d.getVar('PACKAGES').split():
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
+ alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
if alt_link == alt_target:
bb.warn('%s: alt_link == alt_target: %s == %s' % (pn, alt_link, alt_target))
@@ -261,7 +322,6 @@ python package_do_filedeps_append () {
# Add file provide
trans_target = oe.package.file_translate(alt_target)
d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
- if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg, True) or ""):
+ if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg) or ""):
d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
-}
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
index 82b80245d4..1366fee653 100644
--- a/meta/classes/update-rc.d.bbclass
+++ b/meta/classes/update-rc.d.bbclass
@@ -1,6 +1,6 @@
UPDATERCPN ?= "${PN}"
-DEPENDS_append_class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d-native update-rc.d initscripts', '', d)}"
+DEPENDS_append_class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
UPDATERCD = "update-rc.d"
UPDATERCD_class-cross = ""
@@ -11,22 +11,19 @@ INITSCRIPT_PARAMS ?= "defaults"
INIT_D_DIR = "${sysconfdir}/init.d"
-updatercd_preinst() {
-if [ -z "$D" -a -f "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
- ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
-fi
-if type update-rc.d >/dev/null 2>/dev/null; then
- if [ -n "$D" ]; then
- OPT="-f -r $D"
- else
- OPT="-f"
- fi
- update-rc.d $OPT ${INITSCRIPT_NAME} remove
-fi
-}
+def use_updatercd(d):
+ # If the distro supports both sysvinit and systemd, and the current recipe
+ # supports systemd, only call update-rc.d on rootfs creation or if systemd
+ # is not running. That's because systemctl enable/disable will already call
+ # update-rc.d if it detects initscripts.
+ if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and bb.data.inherits_class('systemd', d):
+ return '[ -n "$D" -o ! -d /run/systemd/system ]'
+ return 'true'
+
+PACKAGE_WRITE_DEPS += "update-rc.d-native"
updatercd_postinst() {
-if type update-rc.d >/dev/null 2>/dev/null; then
+if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
OPT="-r $D"
else
@@ -37,13 +34,13 @@ fi
}
updatercd_prerm() {
-if [ -z "$D" ]; then
- ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
+if ${@use_updatercd(d)} && [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
+ ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
fi
}
updatercd_postrm() {
-if type update-rc.d >/dev/null 2>/dev/null; then
+if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
OPT="-f -r $D"
else
@@ -57,9 +54,9 @@ fi
def update_rc_after_parse(d):
if d.getVar('INITSCRIPT_PACKAGES', False) == None:
if d.getVar('INITSCRIPT_NAME', False) == None:
- raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE', False))
+ bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE', False))
if d.getVar('INITSCRIPT_PARAMS', False) == None:
- raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE', False))
+ bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE', False))
python __anonymous() {
update_rc_after_parse(d)
@@ -68,7 +65,7 @@ python __anonymous() {
PACKAGESPLITFUNCS_prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
-populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_preinst updatercd_postinst"
+populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
python populate_packages_updatercd () {
@@ -80,52 +77,45 @@ python populate_packages_updatercd () {
return
statement = "grep -q -w '/etc/init.d/functions' %s" % path
if subprocess.call(statement, shell=True) == 0:
- mlprefix = d.getVar('MLPREFIX', True) or ""
- d.appendVar('RDEPENDS_' + pkg, ' %sinitscripts-functions' % (mlprefix))
+ mlprefix = d.getVar('MLPREFIX') or ""
+ d.appendVar('RDEPENDS_' + pkg, ' %sinitd-functions' % (mlprefix))
def update_rcd_package(pkg):
- bb.debug(1, 'adding update-rc.d calls to preinst/postinst/prerm/postrm for %s' % pkg)
+ bb.debug(1, 'adding update-rc.d calls to postinst/prerm/postrm for %s' % pkg)
localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES", True)
+ overrides = localdata.getVar("OVERRIDES")
localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
- bb.data.update_data(localdata)
update_rcd_auto_depend(pkg)
- preinst = d.getVar('pkg_preinst_%s' % pkg, True)
- if not preinst:
- preinst = '#!/bin/sh\n'
- preinst += localdata.getVar('updatercd_preinst', True)
- d.setVar('pkg_preinst_%s' % pkg, preinst)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += localdata.getVar('updatercd_postinst', True)
+ postinst += localdata.getVar('updatercd_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg, True)
+ prerm = d.getVar('pkg_prerm_%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
- prerm += localdata.getVar('updatercd_prerm', True)
+ prerm += localdata.getVar('updatercd_prerm')
d.setVar('pkg_prerm_%s' % pkg, prerm)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += localdata.getVar('updatercd_postrm', True)
+ postrm += localdata.getVar('updatercd_postrm')
d.setVar('pkg_postrm_%s' % pkg, postrm)
d.appendVar('RRECOMMENDS_' + pkg, " ${MLPREFIX}${UPDATERCD}")
# Check that this class isn't being inhibited (generally, by
# systemd.bbclass) before doing any work.
- if not d.getVar("INHIBIT_UPDATERCD_BBCLASS", True):
- pkgs = d.getVar('INITSCRIPT_PACKAGES', True)
+ if not d.getVar("INHIBIT_UPDATERCD_BBCLASS"):
+ pkgs = d.getVar('INITSCRIPT_PACKAGES')
if pkgs == None:
- pkgs = d.getVar('UPDATERCPN', True)
- packages = (d.getVar('PACKAGES', True) or "").split()
+ pkgs = d.getVar('UPDATERCPN')
+ packages = (d.getVar('PACKAGES') or "").split()
if not pkgs in packages and packages != []:
pkgs = packages[0]
for pkg in pkgs.split():
diff --git a/meta/classes/upstream-version-is-even.bbclass b/meta/classes/upstream-version-is-even.bbclass
index 89556ed7d4..256c752423 100644
--- a/meta/classes/upstream-version-is-even.bbclass
+++ b/meta/classes/upstream-version-is-even.bbclass
@@ -2,4 +2,4 @@
# accepts even minor versions (i.e. 3.0.x, 3.2.x, 3.4.x, etc.)
# This scheme is used by Gnome and a number of other projects
# to signify stable releases vs development releases.
-UPSTREAM_CHECK_REGEX = "(?P<pver>\d+\.(\d*[02468])+(\.\d+)+)"
+UPSTREAM_CHECK_REGEX = "[^\d\.](?P<pver>\d+\.(\d*[02468])+(\.\d+)+)\.tar"
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 46d4a4b3da..3a1b5f1320 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -1,22 +1,10 @@
# In order to support a deterministic set of 'dynamic' users/groups,
# we need a function to reformat the params based on a static file
def update_useradd_static_config(d):
- import argparse
import itertools
import re
import errno
-
- class myArgumentParser( argparse.ArgumentParser ):
- def _print_message(self, message, file=None):
- bb.warn("%s - %s: %s" % (d.getVar('PN', True), pkg, message))
-
- # This should never be called...
- def exit(self, status=0, message=None):
- message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN', True), pkg))
- error(message)
-
- def error(self, message):
- raise bb.build.FuncFailed(message)
+ import oe.useradd
def list_extend(iterable, length, obj = None):
"""Ensure that iterable is the specified length by extending with obj
@@ -50,64 +38,46 @@ def update_useradd_static_config(d):
return id_table
- def handle_missing_id(id, type, pkg):
+ def handle_missing_id(id, type, pkg, files, var, value):
# For backwards compatibility we accept "1" in addition to "error"
- if d.getVar('USERADD_ERROR_DYNAMIC', True) == 'error' or d.getVar('USERADD_ERROR_DYNAMIC', True) == '1':
- #bb.error("Skipping recipe %s, package %s which adds %sname %s does not have a static ID defined." % (d.getVar('PN', True), pkg, type, id))
- raise bb.build.FuncFailed("%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN', True), pkg, type, id))
- elif d.getVar('USERADD_ERROR_DYNAMIC', True) == 'warn':
- bb.warn("%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN', True), pkg, type, id))
+ error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
+ msg = "%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id)
+ if files:
+ msg += " Add %s to one of these files: %s" % (id, files)
+ else:
+ msg += " %s file(s) not found in BBPATH: %s" % (var, value)
+ if error_dynamic == 'error' or error_dynamic == '1':
+ raise NotImplementedError(msg)
+ elif error_dynamic == 'warn':
+ bb.warn(msg)
+ elif error_dynamic == 'skip':
+ raise bb.parse.SkipRecipe(msg)
+
+ # Return a list of configuration files based on either the default
+ # files/group or the contents of USERADD_GID_TABLES, resp.
+ # files/passwd for USERADD_UID_TABLES.
+ # Paths are resolved via BBPATH.
+ def get_table_list(d, var, default):
+ files = []
+ bbpath = d.getVar('BBPATH')
+ tables = d.getVar(var)
+ if not tables:
+ tables = default
+ for conf_file in tables.split():
+ files.append(bb.utils.which(bbpath, conf_file))
+ return (' '.join(files), var, default)
# We parse and rewrite the useradd components
- def rewrite_useradd(params):
- # The following comes from --help on useradd from shadow
- parser = myArgumentParser(prog='useradd')
- parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
- parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
- parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
- parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
- parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
- parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
- parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
- parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
- parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
- parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
- parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
- parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
- parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
- parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
- parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
- parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
- parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
- parser.add_argument("-r", "--system", help="create a system account", action="store_true")
- parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
- parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
- parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
- parser.add_argument("LOGIN", help="Login name of the new user")
-
- # Return a list of configuration files based on either the default
- # files/passwd or the contents of USERADD_UID_TABLES
- # paths are resolved via BBPATH
- def get_passwd_list(d):
- str = ""
- bbpath = d.getVar('BBPATH', True)
- passwd_tables = d.getVar('USERADD_UID_TABLES', True)
- if not passwd_tables:
- passwd_tables = 'files/passwd'
- for conf_file in passwd_tables.split():
- str += " %s" % bb.utils.which(bbpath, conf_file)
- return str
+ def rewrite_useradd(params, is_pkg):
+ parser = oe.useradd.build_useradd_parser()
newparams = []
users = None
- for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
- param = param.strip()
- if not param:
- continue
+ for param in oe.useradd.split_commands(params):
try:
- uaargs = parser.parse_args(re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
- except:
- raise bb.build.FuncFailed("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
+ uaargs = parser.parse_args(oe.useradd.split_args(param))
+ except Exception as e:
+ bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all passwd files specified in USERADD_UID_TABLES or files/passwd
# Use the standard passwd layout:
@@ -121,17 +91,19 @@ def update_useradd_static_config(d):
# all new users get the default ('*' which prevents login) until the user is
# specifically configured by the system admin.
if not users:
- users = merge_files(get_passwd_list(d), 7)
+ files, table_var, table_value = get_table_list(d, 'USERADD_UID_TABLES', 'files/passwd')
+ users = merge_files(files, 7)
+ type = 'system user' if uaargs.system else 'normal user'
if uaargs.LOGIN not in users:
- if not uaargs.uid or not uaargs.uid.isdigit() or not uaargs.gid:
- handle_missing_id(uaargs.LOGIN, 'user', pkg)
+ handle_missing_id(uaargs.LOGIN, type, pkg, files, table_var, table_value)
+ newparams.append(param)
continue
field = users[uaargs.LOGIN]
if uaargs.uid and field[2] and (uaargs.uid != field[2]):
- bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.uid, field[2]))
+ bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN'), uaargs.LOGIN, uaargs.uid, field[2]))
uaargs.uid = field[2] or uaargs.uid
# Determine the possible groupname
@@ -141,9 +113,13 @@ def update_useradd_static_config(d):
# So if the implicit username-group creation is on, then the implicit groupname (LOGIN)
# is used, and we disable the user_group option.
#
- user_group = uaargs.user_group is None or uaargs.user_group is True
- uaargs.groupname = uaargs.LOGIN if user_group else uaargs.gid
- uaargs.groupid = field[3] or uaargs.gid or uaargs.groupname
+ if uaargs.gid:
+ uaargs.groupname = uaargs.gid
+ elif uaargs.user_group is not False:
+ uaargs.groupname = uaargs.LOGIN
+ else:
+ uaargs.groupname = 'users'
+ uaargs.groupid = field[3] or uaargs.groupname
if uaargs.groupid and uaargs.gid != uaargs.groupid:
newgroup = None
@@ -159,14 +135,16 @@ def update_useradd_static_config(d):
# We want to add a group, but we don't know it's name... so we can't add the group...
# We have to assume the group has previously been added or we'll fail on the adduser...
# Note: specifying the actual gid is very rare in OE, usually the group name is specified.
- bb.warn("%s: Changing gid for login %s to %s, verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.groupid))
+ bb.warn("%s: Changing gid for login %s to %s, verify configuration files!" % (d.getVar('PN'), uaargs.LOGIN, uaargs.groupid))
uaargs.gid = uaargs.groupid
uaargs.user_group = None
- if newgroup:
- groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True)
+ if newgroup and is_pkg:
+ groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg)
if groupadd:
- d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup))
+ # Only add the group if not already specified
+ if not uaargs.groupname in groupadd:
+ d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup))
else:
d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
@@ -176,14 +154,14 @@ def update_useradd_static_config(d):
# Should be an error if a specific option is set...
if not uaargs.uid or not uaargs.uid.isdigit() or not uaargs.gid:
- handle_missing_id(uaargs.LOGIN, 'user', pkg)
+ handle_missing_id(uaargs.LOGIN, type, pkg, files, table_var, table_value)
# Reconstruct the args...
newparam = ['', ' --defaults'][uaargs.defaults]
newparam += ['', ' --base-dir %s' % uaargs.base_dir][uaargs.base_dir != None]
newparam += ['', ' --comment %s' % uaargs.comment][uaargs.comment != None]
newparam += ['', ' --home-dir %s' % uaargs.home_dir][uaargs.home_dir != None]
- newparam += ['', ' --expiredata %s' % uaargs.expiredate][uaargs.expiredate != None]
+ newparam += ['', ' --expiredate %s' % uaargs.expiredate][uaargs.expiredate != None]
newparam += ['', ' --inactive %s' % uaargs.inactive][uaargs.inactive != None]
newparam += ['', ' --gid %s' % uaargs.gid][uaargs.gid != None]
newparam += ['', ' --groups %s' % uaargs.groups][uaargs.groups != None]
@@ -194,7 +172,10 @@ def update_useradd_static_config(d):
newparam += ['', ' --no-create-home'][uaargs.create_home is False]
newparam += ['', ' --no-user-group'][uaargs.user_group is False]
newparam += ['', ' --non-unique'][uaargs.non_unique]
- newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
+ if uaargs.password != None:
+ newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
+ elif uaargs.clear_password:
+ newparam += ['', ' --clear-password %s' % uaargs.clear_password][uaargs.clear_password != None]
newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
newparam += ['', ' --system'][uaargs.system]
newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
@@ -207,42 +188,17 @@ def update_useradd_static_config(d):
return ";".join(newparams).strip()
# We parse and rewrite the groupadd components
- def rewrite_groupadd(params):
- # The following comes from --help on groupadd from shadow
- parser = myArgumentParser(prog='groupadd')
- parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
- parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
- parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
- parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
- parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
- parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
- parser.add_argument("-r", "--system", help="create a system account", action="store_true")
- parser.add_argument("GROUP", help="Group name of the new group")
-
- # Return a list of configuration files based on either the default
- # files/group or the contents of USERADD_GID_TABLES
- # paths are resolved via BBPATH
- def get_group_list(d):
- str = ""
- bbpath = d.getVar('BBPATH', True)
- group_tables = d.getVar('USERADD_GID_TABLES', True)
- if not group_tables:
- group_tables = 'files/group'
- for conf_file in group_tables.split():
- str += " %s" % bb.utils.which(bbpath, conf_file)
- return str
+ def rewrite_groupadd(params, is_pkg):
+ parser = oe.useradd.build_groupadd_parser()
newparams = []
groups = None
- for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
- param = param.strip()
- if not param:
- continue
+ for param in oe.useradd.split_commands(params):
try:
# If we're processing multiple lines, we could have left over values here...
- gaargs = parser.parse_args(re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
- except:
- raise bb.build.FuncFailed("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
+ gaargs = parser.parse_args(oe.useradd.split_args(param))
+ except Exception as e:
+ bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all group files specified in USERADD_GID_TABLES or files/group
# Use the standard group layout:
@@ -254,29 +210,34 @@ def update_useradd_static_config(d):
# Note: similar to the passwd file, the 'password' filed is ignored
# Note: group_members is ignored, group members must be configured with the GROUPMEMS_PARAM
if not groups:
- groups = merge_files(get_group_list(d), 4)
+ files, table_var, table_value = get_table_list(d, 'USERADD_GID_TABLES', 'files/group')
+ groups = merge_files(files, 4)
+ type = 'system group' if gaargs.system else 'normal group'
if gaargs.GROUP not in groups:
- if not gaargs.gid or not gaargs.gid.isdigit():
- handle_missing_id(gaargs.GROUP, 'group', pkg)
+ handle_missing_id(gaargs.GROUP, type, pkg, files, table_var, table_value)
+ newparams.append(param)
continue
field = groups[gaargs.GROUP]
if field[2]:
if gaargs.gid and (gaargs.gid != field[2]):
- bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), gaargs.GROUP, gaargs.gid, field[2]))
+ bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN'), gaargs.GROUP, gaargs.gid, field[2]))
gaargs.gid = field[2]
if not gaargs.gid or not gaargs.gid.isdigit():
- handle_missing_id(gaargs.GROUP, 'group', pkg)
+ handle_missing_id(gaargs.GROUP, type, pkg, files, table_var, table_value)
# Reconstruct the args...
newparam = ['', ' --force'][gaargs.force]
newparam += ['', ' --gid %s' % gaargs.gid][gaargs.gid != None]
newparam += ['', ' --key %s' % gaargs.key][gaargs.key != None]
newparam += ['', ' --non-unique'][gaargs.non_unique]
- newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
+ if gaargs.password != None:
+ newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
+ elif gaargs.clear_password:
+ newparam += ['', ' --clear-password %s' % gaargs.clear_password][gaargs.clear_password != None]
newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
newparam += ['', ' --system'][gaargs.system]
newparam += ' %s' % gaargs.GROUP
@@ -289,33 +250,54 @@ def update_useradd_static_config(d):
# the files listed in USERADD_UID/GID_TABLES. We need to tell bitbake
# about that explicitly to trigger re-parsing and thus re-execution of
# this code when the files change.
- bbpath = d.getVar('BBPATH', True)
+ bbpath = d.getVar('BBPATH')
for varname, default in (('USERADD_UID_TABLES', 'files/passwd'),
('USERADD_GID_TABLES', 'files/group')):
- tables = d.getVar(varname, True)
+ tables = d.getVar(varname)
if not tables:
tables = default
for conf_file in tables.split():
bb.parse.mark_dependency(d, bb.utils.which(bbpath, conf_file))
# Load and process the users and groups, rewriting the adduser/addgroup params
- useradd_packages = d.getVar('USERADD_PACKAGES', True)
+ useradd_packages = d.getVar('USERADD_PACKAGES') or ""
for pkg in useradd_packages.split():
# Groupmems doesn't have anything we might want to change, so simply validating
# is a bit of a waste -- only process useradd/groupadd
- useradd_param = d.getVar('USERADD_PARAM_%s' % pkg, True)
+ useradd_param = d.getVar('USERADD_PARAM_%s' % pkg)
if useradd_param:
#bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param))
- d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param))
- #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg, True)))
+ d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param, True))
+ #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg)))
- groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg, True)
+ groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg)
if groupadd_param:
#bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param))
- d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param))
- #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg, True)))
+ d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param, True))
+ #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg)))
+
+ # Load and process extra users and groups, rewriting only adduser/addgroup params
+ pkg = d.getVar('PN')
+ extrausers = d.getVar('EXTRA_USERS_PARAMS') or ""
+
+ #bb.warn("Before: 'EXTRA_USERS_PARAMS' - '%s'" % (d.getVar('EXTRA_USERS_PARAMS')))
+ new_extrausers = []
+ for cmd in oe.useradd.split_commands(extrausers):
+ if re.match('''useradd (.*)''', cmd):
+ useradd_param = re.match('''useradd (.*)''', cmd).group(1)
+ useradd_param = rewrite_useradd(useradd_param, False)
+ cmd = 'useradd %s' % useradd_param
+ elif re.match('''groupadd (.*)''', cmd):
+ groupadd_param = re.match('''groupadd (.*)''', cmd).group(1)
+ groupadd_param = rewrite_groupadd(groupadd_param, False)
+ cmd = 'groupadd %s' % groupadd_param
+
+ new_extrausers.append(cmd)
+ new_extrausers.append('')
+ d.setVar('EXTRA_USERS_PARAMS', ';'.join(new_extrausers))
+ #bb.warn("After: 'EXTRA_USERS_PARAMS' - '%s'" % (d.getVar('EXTRA_USERS_PARAMS')))
python __anonymous() {
@@ -323,7 +305,7 @@ python __anonymous() {
and not bb.data.inherits_class('native', d):
try:
update_useradd_static_config(d)
- except bb.build.FuncFailed as f:
- bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN', True), f))
- raise bb.parse.SkipPackage(f)
+ except NotImplementedError as f:
+ bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN'), f))
+ raise bb.parse.SkipRecipe(f)
}
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
index bf62ada8b5..e5f3ba24f9 100644
--- a/meta/classes/useradd.bbclass
+++ b/meta/classes/useradd.bbclass
@@ -3,7 +3,8 @@ inherit useradd_base
# base-passwd-cross provides the default passwd and group files in the
# target sysroot, and shadow -native and -sysroot provide the utilities
# and support files needed to add and modify user and group accounts
-DEPENDS_append_class-target = " base-files shadow-native shadow-sysroot shadow"
+DEPENDS_append_class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
+PACKAGE_WRITE_DEPS += "shadow-native"
# This preinstall function can be run in four different contexts:
#
@@ -31,7 +32,7 @@ if test "x$D" != "x"; then
fi
# user/group lookups should match useradd/groupadd --root
- export PSEUDO_PASSWD="$SYSROOT:${STAGING_DIR_NATIVE}"
+ export PSEUDO_PASSWD="$SYSROOT"
fi
# If we're not doing a special SSTATE/SYSROOT install
@@ -96,15 +97,33 @@ fi
}
useradd_sysroot () {
- # Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running
+ # Pseudo may (do_prepare_recipe_sysroot) or may not (do_populate_sysroot_setscene) be running
# at this point so we're explicit about the environment so pseudo can load if
# not already present.
- export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir_native}/pseudo"
+ # PSEUDO_SYSROOT can contain references to the build architecture and COMPONENT_DIR
+ # so needs the STAGING_FIXME below
+ export PSEUDO="${FAKEROOTENV} ${PSEUDO_SYSROOT}${bindir_native}/pseudo"
# Explicitly set $D since it isn't set to anything
- # before do_install
+ # before do_prepare_recipe_sysroot
D=${STAGING_DIR_TARGET}
+ # base-passwd's postinst may not have run yet in which case we'll get called later, just exit.
+ # Beware that in some cases we might see the fake pseudo passwd here, in which case we also must
+ # exit.
+ if [ ! -f $D${sysconfdir}/passwd ] ||
+ grep -q this-is-the-pseudo-passwd $D${sysconfdir}/passwd; then
+ exit 0
+ fi
+
+ # It is also possible we may be in a recipe which doesn't have useradd dependencies and hence the
+ # useradd/groupadd tools are unavailable. If there is no dependency, we assume we don't want to
+ # create users in the sysroot
+ if ! command -v useradd; then
+ bbwarn "command useradd not found!"
+ exit 0
+ fi
+
# Add groups and users defined for all recipe packages
GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
@@ -116,49 +135,40 @@ useradd_sysroot () {
useradd_preinst
}
-useradd_sysroot_sstate () {
- if [ "${BB_CURRENTTASK}" = "package_setscene" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]
- then
- useradd_sysroot
- fi
-}
-
-userdel_sysroot_sstate () {
-if test "x${STAGING_DIR_TARGET}" != "x"; then
- if [ "${BB_CURRENTTASK}" = "clean" ]; then
- export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir_native}/pseudo"
- OPT="--root ${STAGING_DIR_TARGET}"
-
- # Remove groups and users defined for package
- GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
- USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
-
- user=`echo "$USERADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
- remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
- while test "x$user" != "x"; do
- perform_userdel "${STAGING_DIR_TARGET}" "$OPT $user"
- user=`echo "$remaining" | cut -d ';' -f 1 | awk '{ print $NF }'`
- remaining=`echo "$remaining" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
- done
-
- user=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
- remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
- while test "x$user" != "x"; do
- perform_groupdel "${STAGING_DIR_TARGET}" "$OPT $user"
- user=`echo "$remaining" | cut -d ';' -f 1 | awk '{ print $NF }'`
- remaining=`echo "$remaining" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
- done
-
- fi
-fi
+# The export of PSEUDO in useradd_sysroot() above contains references to
+# ${PSEUDO_SYSROOT} and ${PSEUDO_LOCALSTATEDIR}. Additionally, the logging
+# shell functions use ${LOGFIFO}. These need to be handled when restoring
+# postinst-useradd-${PN} from the sstate cache.
+EXTRA_STAGING_FIXMES += "PSEUDO_SYSROOT PSEUDO_LOCALSTATEDIR LOGFIFO"
+
+python useradd_sysroot_sstate () {
+ scriptfile = None
+ task = d.getVar("BB_CURRENTTASK")
+ if task == "package_setscene":
+ bb.build.exec_func("useradd_sysroot", d)
+ elif task == "prepare_recipe_sysroot":
+ # Used to update this recipe's own sysroot so the user/groups are available to do_install
+ scriptfile = d.expand("${RECIPE_SYSROOT}${bindir}/postinst-useradd-${PN}")
+ bb.build.exec_func("useradd_sysroot", d)
+ elif task == "populate_sysroot":
+ # Used when installed in dependent task sysroots
+ scriptfile = d.expand("${SYSROOT_DESTDIR}${bindir}/postinst-useradd-${PN}")
+
+ if scriptfile:
+ bb.utils.mkdirhier(os.path.dirname(scriptfile))
+ with open(scriptfile, 'w') as script:
+ script.write("#!/bin/sh\n")
+ bb.data.emit_func("useradd_sysroot", script, d)
+ script.write("useradd_sysroot\n")
+ os.chmod(scriptfile, 0o755)
}
-SSTATECLEANFUNCS_append_class-target = " userdel_sysroot_sstate"
-
-do_install[prefuncs] += "${SYSROOTFUNC}"
-SYSROOTFUNC_class-target = "useradd_sysroot"
+do_prepare_recipe_sysroot[postfuncs] += "${SYSROOTFUNC}"
+SYSROOTFUNC_class-target = "useradd_sysroot_sstate"
SYSROOTFUNC = ""
+SYSROOT_PREPROCESS_FUNCS += "${SYSROOTFUNC}"
+
SSTATEPREINSTFUNCS_append_class-target = " useradd_sysroot_sstate"
do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
@@ -168,13 +178,14 @@ USERADDSETSCENEDEPS = ""
# Recipe parse-time sanity checks
def update_useradd_after_parse(d):
- useradd_packages = d.getVar('USERADD_PACKAGES', True)
+ useradd_packages = d.getVar('USERADD_PACKAGES')
if not useradd_packages:
- raise bb.build.FuncFailed("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
+ bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
for pkg in useradd_packages.split():
- if not d.getVar('USERADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg, True):
+ d.appendVarFlag("do_populate_sysroot", "vardeps", "USERADD_PARAM_%s GROUPADD_PARAM_%s GROUPMEMS_PARAM_%s" % (pkg, pkg, pkg))
+ if not d.getVar('USERADD_PARAM_%s' % pkg) and not d.getVar('GROUPADD_PARAM_%s' % pkg) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg):
bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
python __anonymous() {
@@ -191,9 +202,9 @@ def get_all_cmd_params(d, cmd_type):
param_type = cmd_type.upper() + "_PARAM_%s"
params = []
- useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
+ useradd_packages = d.getVar('USERADD_PACKAGES') or ""
for pkg in useradd_packages.split():
- param = d.getVar(param_type % pkg, True)
+ param = d.getVar(param_type % pkg)
if param:
params.append(param.rstrip(" ;"))
@@ -209,20 +220,20 @@ fakeroot python populate_packages_prepend () {
required to execute on the target. Not doing so may cause
useradd preinst to be invoked twice, causing unwanted warnings.
"""
- preinst = d.getVar('pkg_preinst_%s' % pkg, True) or d.getVar('pkg_preinst', True)
+ preinst = d.getVar('pkg_preinst_%s' % pkg) or d.getVar('pkg_preinst')
if not preinst:
preinst = '#!/bin/sh\n'
preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n'
preinst += 'bbwarn () {\n\techo "WARNING: $*"\n}\n'
preinst += 'bbfatal () {\n\techo "ERROR: $*"\n\texit 1\n}\n'
- preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd', True)
- preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd', True)
- preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems', True)
- preinst += d.getVar('useradd_preinst', True)
+ preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd')
+ preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
+ preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
+ preinst += d.getVar('useradd_preinst')
d.setVar('pkg_preinst_%s' % pkg, preinst)
# RDEPENDS setup
- rdepends = d.getVar("RDEPENDS_%s" % pkg, True) or ""
+ rdepends = d.getVar("RDEPENDS_%s" % pkg) or ""
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd'
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow'
# base-files is where the default /etc/skel is packaged
@@ -233,7 +244,7 @@ fakeroot python populate_packages_prepend () {
# to packages specified by USERADD_PACKAGES
if not bb.data.inherits_class('nativesdk', d) \
and not bb.data.inherits_class('native', d):
- useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
+ useradd_packages = d.getVar('USERADD_PACKAGES') or ""
for pkg in useradd_packages.split():
update_useradd_package(pkg)
}
diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass
index ba87edc57a..0d0bdb80f5 100644
--- a/meta/classes/useradd_base.bbclass
+++ b/meta/classes/useradd_base.bbclass
@@ -51,10 +51,10 @@ perform_groupmems () {
local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'`
local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'`
bbnote "${PN}: Running groupmems command with group $groupname and user $username"
- local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
+ local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*$" $rootdir/etc/group || true`"
if test "x$mem_exists" = "x"; then
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmems \$opts\" || true
- mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
+ mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*$" $rootdir/etc/group || true`"
if test "x$mem_exists" = "x"; then
bbfatal "${PN}: groupmems command did not succeed."
fi
@@ -69,11 +69,21 @@ perform_groupdel () {
bbnote "${PN}: Performing groupdel with [$opts]"
local groupname=`echo "$opts" | awk '{ print $NF }'`
local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+
if test "x$group_exists" != "x"; then
- eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupdel \$opts\" || true
- group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
- if test "x$group_exists" != "x"; then
- bbfatal "${PN}: groupdel command did not succeed."
+ local awk_input='BEGIN {FS=":"}; $1=="'$groupname'" { print $3 }'
+ local groupid=`echo "$awk_input" | awk -f- $rootdir/etc/group`
+ local awk_check_users='BEGIN {FS=":"}; $4=="'$groupid'" {print $1}'
+ local other_users=`echo "$awk_check_users" | awk -f- $rootdir/etc/passwd`
+
+ if test "x$other_users" = "x"; then
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupdel \$opts\" || true
+ group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+ if test "x$group_exists" != "x"; then
+ bbfatal "${PN}: groupdel command did not succeed."
+ fi
+ else
+ bbnote "${PN}: '$groupname' is primary group for users '$other_users', not removing it"
fi
else
bbnote "${PN}: group $groupname doesn't exist, not removing it"
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
index 7bc584abb9..b1f27d3658 100644
--- a/meta/classes/utility-tasks.bbclass
+++ b/meta/classes/utility-tasks.bbclass
@@ -4,12 +4,12 @@ python do_listtasks() {
taskdescs = {}
maxlen = 0
for e in d.keys():
- if d.getVarFlag(e, 'task', True):
+ if d.getVarFlag(e, 'task'):
maxlen = max(maxlen, len(e))
if e.endswith('_setscene'):
- desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc', True) or '')
+ desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
else:
- desc = d.getVarFlag(e, 'doc', True) or ''
+ desc = d.getVarFlag(e, 'doc') or ''
taskdescs[e] = desc
tasks = sorted(taskdescs.keys())
@@ -28,18 +28,18 @@ python do_clean() {
bb.note("Removing " + dir)
oe.path.remove(dir)
- dir = "%s.*" % bb.data.expand(d.getVar('STAMP', False), d)
+ dir = "%s.*" % d.getVar('STAMP')
bb.note("Removing " + dir)
oe.path.remove(dir)
- for f in (d.getVar('CLEANFUNCS', True) or '').split():
+ for f in (d.getVar('CLEANFUNCS') or '').split():
bb.build.exec_func(f, d)
}
addtask checkuri
do_checkuri[nostamp] = "1"
python do_checkuri() {
- src_uri = (d.getVar('SRC_URI', True) or "").split()
+ src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
return
@@ -47,20 +47,7 @@ python do_checkuri() {
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.checkstatus()
except bb.fetch2.BBFetchException as e:
- raise bb.build.FuncFailed(e)
+ bb.fatal(str(e))
}
-addtask checkuriall after do_checkuri
-do_checkuriall[recrdeptask] = "do_checkuriall do_checkuri"
-do_checkuriall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_checkuriall[nostamp] = "1"
-do_checkuriall() {
- :
-}
-addtask fetchall after do_fetch
-do_fetchall[recrdeptask] = "do_fetchall do_fetch"
-do_fetchall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_fetchall() {
- :
-}
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
index 800b56578c..cd3d05709e 100644
--- a/meta/classes/utils.bbclass
+++ b/meta/classes/utils.bbclass
@@ -1,49 +1,8 @@
-# For compatibility
-def base_path_join(a, *p):
- return oe.path.join(a, *p)
-
-def base_path_relative(src, dest):
- return oe.path.relative(src, dest)
-
-def base_path_out(path, d):
- return oe.path.format_display(path, d)
-
-def base_read_file(filename):
- return oe.utils.read_file(filename)
-
-def base_ifelse(condition, iftrue = True, iffalse = False):
- return oe.utils.ifelse(condition, iftrue, iffalse)
-
-def base_conditional(variable, checkvalue, truevalue, falsevalue, d):
- return oe.utils.conditional(variable, checkvalue, truevalue, falsevalue, d)
-
-def base_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- return oe.utils.less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
-
-def base_version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- return oe.utils.version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
-
-def base_contains(variable, checkvalues, truevalue, falsevalue, d):
- bb.note('base_contains is deprecated, please use bb.utils.contains instead.')
- return bb.utils.contains(variable, checkvalues, truevalue, falsevalue, d)
-
-def base_both_contain(variable1, variable2, checkvalue, d):
- return oe.utils.both_contain(variable1, variable2, checkvalue, d)
-
-def base_prune_suffix(var, suffixes, d):
- return oe.utils.prune_suffix(var, suffixes, d)
-
-def oe_filter(f, str, d):
- return oe.utils.str_filter(f, str, d)
-
-def oe_filter_out(f, str, d):
- return oe.utils.str_filter_out(f, str, d)
-
def machine_paths(d):
"""List any existing machine specific filespath directories"""
- machine = d.getVar("MACHINE", True)
- filespathpkg = d.getVar("FILESPATHPKG", True).split(":")
- for basepath in d.getVar("FILESPATHBASE", True).split(":"):
+ machine = d.getVar("MACHINE")
+ filespathpkg = d.getVar("FILESPATHPKG").split(":")
+ for basepath in d.getVar("FILESPATHBASE").split(":"):
for pkgpath in filespathpkg:
machinepath = os.path.join(basepath, pkgpath, machine)
if os.path.isdir(machinepath):
@@ -52,7 +11,7 @@ def machine_paths(d):
def is_machine_specific(d):
"""Determine whether the current recipe is machine specific"""
machinepaths = set(machine_paths(d))
- srcuri = d.getVar("SRC_URI", True).split()
+ srcuri = d.getVar("SRC_URI").split()
for url in srcuri:
fetcher = bb.fetch2.Fetch([srcuri], d)
if url.startswith("file://"):
@@ -264,10 +223,17 @@ create_cmdline_wrapper () {
mv $cmd $cmd.real
cmdname=`basename $cmd`
+ dirname=`dirname $cmd`
+ cmdoptions=$@
+ if [ "${base_prefix}" != "" ]; then
+ relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
+ cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
+ fi
cat <<END >$cmd
#!/bin/bash
realpath=\`readlink -fn \$0\`
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $@ "\$@"
+realdir=\`dirname \$realpath\`
+exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $cmdoptions "\$@"
END
chmod +x $cmd
}
@@ -287,11 +253,18 @@ create_wrapper () {
mv $cmd $cmd.real
cmdname=`basename $cmd`
+ dirname=`dirname $cmd`
+ exportstring=$@
+ if [ "${base_prefix}" != "" ]; then
+ relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
+ exportstring=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
+ fi
cat <<END >$cmd
#!/bin/bash
realpath=\`readlink -fn \$0\`
-export $@
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real "\$@"
+realdir=\`dirname \$realpath\`
+export $exportstring
+exec -a "\$0" \$realdir/$cmdname.real "\$@"
END
chmod +x $cmd
}
@@ -306,8 +279,8 @@ hardlinkdir () {
def check_app_exists(app, d):
- app = d.expand(app).strip()
- path = d.getVar('PATH', d, True)
+ app = d.expand(app).split()[0].strip()
+ path = d.getVar('PATH')
return bool(bb.utils.which(path, app))
def explode_deps(s):
@@ -315,14 +288,14 @@ def explode_deps(s):
def base_set_filespath(path, d):
filespath = []
- extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
+ extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
# Remove default flag which was used for checking
extrapaths = extrapaths.replace("__default:", "")
# Don't prepend empty strings to the path list
if extrapaths != "":
path = extrapaths.split(":") + path
# The ":" ensures we have an 'empty' override
- overrides = (":" + (d.getVar("FILESOVERRIDES", True) or "")).split(":")
+ overrides = (":" + (d.getVar("FILESOVERRIDES") or "")).split(":")
overrides.reverse()
for o in overrides:
for p in path:
@@ -333,7 +306,7 @@ def base_set_filespath(path, d):
def extend_variants(d, var, extend, delim=':'):
"""Return a string of all bb class extend variants for the given extend"""
variants = []
- whole = d.getVar(var, True) or ""
+ whole = d.getVar(var) or ""
for ext in whole.split():
eext = ext.split(delim)
if len(eext) > 1 and eext[0] == extend:
@@ -341,7 +314,7 @@ def extend_variants(d, var, extend, delim=':'):
return " ".join(variants)
def multilib_pkg_extend(d, pkg):
- variants = (d.getVar("MULTILIB_VARIANTS", True) or "").split()
+ variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
if not variants:
return pkg
pkgs = pkg
@@ -349,24 +322,18 @@ def multilib_pkg_extend(d, pkg):
pkgs = pkgs + " " + v + "-" + pkg
return pkgs
+def get_multilib_datastore(variant, d):
+ return oe.utils.get_multilib_datastore(variant, d)
+
def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
"""Return a string of all ${var} in all multilib tune configuration"""
values = []
- value = d.getVar(var, True) or ""
- if value != "":
- if need_split:
- for item in value.split(delim):
- values.append(item)
- else:
- values.append(value)
- variants = d.getVar("MULTILIB_VARIANTS", True) or ""
- for item in variants.split():
- localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
- localdata.setVar("OVERRIDES", overrides)
- localdata.setVar("MLPREFIX", item + "-")
- bb.data.update_data(localdata)
- value = localdata.getVar(var, True) or ""
+ variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
+ for item in variants:
+ localdata = get_multilib_datastore(item, d)
+ # We need WORKDIR to be consistent with the original datastore
+ localdata.setVar("WORKDIR", d.getVar("WORKDIR"))
+ value = localdata.getVar(var) or ""
if value != "":
if need_split:
for item in value.split(delim):
@@ -393,29 +360,22 @@ def all_multilib_tune_list(vars, d):
values = {}
for v in vars:
values[v] = []
-
- localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES", False).split(":")
- newoverrides = []
- for o in overrides:
- if not o.startswith("virtclass-multilib-"):
- newoverrides.append(o)
- localdata.setVar("OVERRIDES", ":".join(newoverrides))
- localdata.setVar("MLPREFIX", "")
- origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL", True)
- if origdefault:
- localdata.setVar("DEFAULTTUNE", origdefault)
- bb.data.update_data(localdata)
values['ml'] = ['']
- for v in vars:
- values[v].append(localdata.getVar(v, True))
- variants = d.getVar("MULTILIB_VARIANTS", True) or ""
- for item in variants.split():
- localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
- localdata.setVar("OVERRIDES", overrides)
- localdata.setVar("MLPREFIX", item + "-")
- bb.data.update_data(localdata)
- values[v].append(localdata.getVar(v, True))
+
+ variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
+ for item in variants:
+ localdata = get_multilib_datastore(item, d)
+ values[v].append(localdata.getVar(v))
values['ml'].append(item)
return values
+all_multilib_tune_list[vardepsexclude] = "OVERRIDES"
+
+# If the user hasn't set up their name/email, set some defaults
+check_git_config() {
+ if ! git config user.email > /dev/null ; then
+ git config --local user.email "${PATCH_GIT_USER_EMAIL}"
+ fi
+ if ! git config user.name > /dev/null ; then
+ git config --local user.name "${PATCH_GIT_USER_NAME}"
+ fi
+}
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
index 5e55833ca4..900244004e 100644
--- a/meta/classes/waf.bbclass
+++ b/meta/classes/waf.bbclass
@@ -1,38 +1,65 @@
# avoids build breaks when using no-static-libs.inc
DISABLE_STATIC = ""
-def get_waf_parallel_make(d):
- pm = d.getVar('PARALLEL_MAKE', True)
- if pm:
- # look for '-j' and throw other options (e.g. '-l') away
- # because they might have different meaning in bjam
- pm = pm.split()
- while pm:
- v = None
- opt = pm.pop(0)
- if opt == '-j':
- v = pm.pop(0)
- elif opt.startswith('-j'):
- v = opt[2:].strip()
- else:
- v = None
-
- if v:
- v = min(64, int(v))
- return '-j' + str(v)
-
- return ""
+B = "${WORKDIR}/build"
+
+EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+
+def waflock_hash(d):
+ # Calculates the hash used for the waf lock file. This should include
+ # all of the user controllable inputs passed to waf configure. Note
+ # that the full paths for ${B} and ${S} are used; this is OK and desired
+ # because a change to either of these should create a unique lock file
+ # to prevent collisions.
+ import hashlib
+ h = hashlib.sha512()
+ def update(name):
+ val = d.getVar(name)
+ if val is not None:
+ h.update(val.encode('utf-8'))
+ update('S')
+ update('B')
+ update('prefix')
+ update('EXTRA_OECONF')
+ return h.hexdigest()
+
+# Use WAFLOCK to specify a separate lock file. The build is already
+# sufficiently isolated by setting the output directory, this ensures that
+# bitbake won't step on toes of any other configured context in the source
+# directory (e.g. if the source is coming from externalsrc and was previously
+# configured elsewhere).
+export WAFLOCK = ".lock-waf_oe_${@waflock_hash(d)}_build"
+BB_HASHBASE_WHITELIST += "WAFLOCK"
+
+python waf_preconfigure() {
+ import subprocess
+ from distutils.version import StrictVersion
+ subsrcdir = d.getVar('S')
+ wafbin = os.path.join(subsrcdir, 'waf')
+ try:
+ result = subprocess.check_output([wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
+ version = result.decode('utf-8').split()[1]
+ if StrictVersion(version) >= StrictVersion("1.8.7"):
+ d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
+ except subprocess.CalledProcessError as e:
+ bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode)
+ except FileNotFoundError:
+ bb.fatal("waf does not exist in %s" % subsrcdir)
+}
+
+do_configure[prefuncs] += "waf_preconfigure"
waf_do_configure() {
- ${S}/waf configure --prefix=${prefix} ${EXTRA_OECONF}
+ (cd ${S} && ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
}
+do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+"
waf_do_compile() {
- ${S}/waf build ${@get_waf_parallel_make(d)}
+ (cd ${S} && ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)})
}
waf_do_install() {
- ${S}/waf install --destdir=${D}
+ (cd ${S} && ./waf install --destdir=${D})
}
EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/xmlcatalog.bbclass b/meta/classes/xmlcatalog.bbclass
new file mode 100644
index 0000000000..ae4811fdeb
--- /dev/null
+++ b/meta/classes/xmlcatalog.bbclass
@@ -0,0 +1,26 @@
+DEPENDS = "libxml2-native"
+
+# A whitespace-separated list of XML catalogs to be registered, for example
+# "${sysconfdir}/xml/docbook-xml.xml".
+XMLCATALOGS ?= ""
+
+SYSROOT_PREPROCESS_FUNCS_append = " xmlcatalog_sstate_postinst"
+
+xmlcatalog_complete() {
+ ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog"
+ if [ ! -f $ROOTCATALOG ]; then
+ mkdir --parents $(dirname $ROOTCATALOG)
+ xmlcatalog --noout --create $ROOTCATALOG
+ fi
+ for CATALOG in ${XMLCATALOGS}; do
+ xmlcatalog --noout --add nextCatalog unused file://$CATALOG $ROOTCATALOG
+ done
+}
+
+xmlcatalog_sstate_postinst() {
+ mkdir -p ${SYSROOT_DESTDIR}${bindir}
+ dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}-xmlcatalog
+ echo '#!/bin/sh' > $dest
+ echo '${xmlcatalog_complete}' >> $dest
+ chmod 0755 $dest
+}