summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/archiver.bbclass5
-rw-r--r--meta/classes/baremetal-image.bbclass11
-rw-r--r--meta/classes/base.bbclass5
-rw-r--r--meta/classes/bin_package.bbclass3
-rw-r--r--meta/classes/cargo.bbclass1
-rw-r--r--meta/classes/cargo_common.bbclass10
-rw-r--r--meta/classes/cmake.bbclass5
-rw-r--r--meta/classes/cml1.bbclass2
-rw-r--r--meta/classes/core-image.bbclass4
-rw-r--r--meta/classes/create-spdx.bbclass39
-rw-r--r--meta/classes/cve-check.bbclass53
-rw-r--r--meta/classes/devshell.bbclass2
-rw-r--r--meta/classes/externalsrc.bbclass46
-rw-r--r--meta/classes/fontcache.bbclass1
-rw-r--r--meta/classes/fs-uuid.bbclass2
-rw-r--r--meta/classes/gnomebase.bbclass2
-rw-r--r--meta/classes/go.bbclass2
-rw-r--r--meta/classes/goarch.bbclass27
-rw-r--r--meta/classes/gobject-introspection-data.bbclass5
-rw-r--r--meta/classes/gtk-icon-cache.bbclass2
-rw-r--r--meta/classes/image-live.bbclass2
-rw-r--r--meta/classes/image.bbclass7
-rw-r--r--meta/classes/image_types.bbclass8
-rw-r--r--meta/classes/image_types_wic.bbclass4
-rw-r--r--meta/classes/insane.bbclass14
-rw-r--r--meta/classes/kernel-arch.bbclass5
-rw-r--r--meta/classes/kernel-devicetree.bbclass22
-rw-r--r--meta/classes/kernel-fitimage.bbclass31
-rw-r--r--meta/classes/kernel-uboot.bbclass3
-rw-r--r--meta/classes/kernel-uimage.bbclass2
-rw-r--r--meta/classes/kernel-yocto.bbclass18
-rw-r--r--meta/classes/kernel.bbclass96
-rw-r--r--meta/classes/kernelsrc.bbclass1
-rw-r--r--meta/classes/libc-package.bbclass1
-rw-r--r--meta/classes/license.bbclass2
-rw-r--r--meta/classes/license_image.bbclass2
-rw-r--r--meta/classes/linux-kernel-base.bbclass15
-rw-r--r--meta/classes/meson.bbclass1
-rw-r--r--meta/classes/mirrors.bbclass4
-rw-r--r--meta/classes/module-base.bbclass1
-rw-r--r--meta/classes/multilib.bbclass1
-rw-r--r--meta/classes/native.bbclass4
-rw-r--r--meta/classes/nativesdk.bbclass1
-rw-r--r--meta/classes/npm.bbclass63
-rw-r--r--meta/classes/overlayfs-etc.bbclass5
-rw-r--r--meta/classes/overlayfs.bbclass6
-rw-r--r--meta/classes/own-mirrors.bbclass1
-rw-r--r--meta/classes/package.bbclass78
-rw-r--r--meta/classes/package_rpm.bbclass12
-rw-r--r--meta/classes/populate_sdk_base.bbclass4
-rw-r--r--meta/classes/populate_sdk_ext.bbclass9
-rw-r--r--meta/classes/qemuboot.bbclass3
-rw-r--r--meta/classes/recipe_sanity.bbclass2
-rw-r--r--meta/classes/rm_work.bbclass19
-rw-r--r--meta/classes/rootfs-postcommands.bbclass34
-rw-r--r--meta/classes/sanity.bbclass22
-rw-r--r--meta/classes/scons.bbclass8
-rw-r--r--meta/classes/sstate.bbclass2
-rw-r--r--meta/classes/staging.bbclass6
-rw-r--r--meta/classes/systemd.bbclass1
-rw-r--r--meta/classes/testimage.bbclass26
-rw-r--r--meta/classes/toolchain-scripts.bbclass4
-rw-r--r--meta/classes/uboot-config.bbclass4
-rw-r--r--meta/classes/uboot-extlinux-config.bbclass10
-rw-r--r--meta/classes/uboot-sign.bbclass5
-rw-r--r--meta/classes/uninative.bbclass2
-rw-r--r--meta/classes/update-alternatives.bbclass10
-rw-r--r--meta/classes/useradd-staticids.bbclass2
68 files changed, 531 insertions, 279 deletions
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index 33070cd17f..4a5865d7b5 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -69,7 +69,6 @@ SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}/mirror"
do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
-do_deploy_archives[dirs] = "${WORKDIR}"
# This is a convenience for the shell script to use it
@@ -460,7 +459,9 @@ def create_diff_gz(d, src_orig, src, ar_outdir):
def is_work_shared(d):
pn = d.getVar('PN')
- return bb.data.inherits_class('kernel', d) or pn.startswith('gcc-source')
+ return pn.startswith('gcc-source') or \
+ bb.data.inherits_class('kernel', d) or \
+ (bb.data.inherits_class('kernelsrc', d) and d.expand("${TMPDIR}/work-shared") in d.getVar('S'))
# Run do_unpack and do_patch
python do_unpack_and_patch() {
diff --git a/meta/classes/baremetal-image.bbclass b/meta/classes/baremetal-image.bbclass
index cb9e250350..3a96df1f2d 100644
--- a/meta/classes/baremetal-image.bbclass
+++ b/meta/classes/baremetal-image.bbclass
@@ -95,6 +95,17 @@ QB_OPT_APPEND:append:qemuriscv32 = " -bios none"
CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
+## Emulate image.bbclass
+# Handle inherits of any of the image classes we need
+IMAGE_CLASSES ??= ""
+IMGCLASSES = " ${IMAGE_CLASSES}"
+inherit ${IMGCLASSES}
+# Set defaults to satisfy IMAGE_FEATURES check
+IMAGE_FEATURES ?= ""
+IMAGE_FEATURES[type] = "list"
+IMAGE_FEATURES[validitems] += ""
+
+
# This next part is necessary to trick the build system into thinking
# its building an image recipe so it generates the qemuboot.conf
addtask do_rootfs before do_image after do_install
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 0cf27fbb91..ee26ee5597 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -12,7 +12,7 @@ inherit logging
OE_EXTRA_IMPORTS ?= ""
-OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license oe.qa oe.reproducible oe.rust ${OE_EXTRA_IMPORTS}"
+OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license oe.qa oe.reproducible oe.rust oe.go ${OE_EXTRA_IMPORTS}"
OE_IMPORTS[type] = "list"
PACKAGECONFIG_CONFARGS ??= ""
@@ -132,7 +132,7 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
# /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
# would return /usr/local/bin/ccache/gcc, but what we need is
# /usr/bin/gcc, this code can check and fix that.
- if "ccache" in srctool:
+ if os.path.islink(srctool) and os.path.basename(os.readlink(srctool)) == 'ccache':
srctool = bb.utils.which(path, tool, executable=True, direction=1)
if srctool:
os.symlink(srctool, desttool)
@@ -208,6 +208,7 @@ addtask do_deploy_source_date_epoch_setscene
addtask do_deploy_source_date_epoch before do_configure after do_patch
python create_source_date_epoch_stamp() {
+ # Version: 1
source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d)
}
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
index c3aca20443..f0407e1329 100644
--- a/meta/classes/bin_package.bbclass
+++ b/meta/classes/bin_package.bbclass
@@ -30,8 +30,9 @@ bin_package_do_install () {
bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
fi
cd ${S}
+ install -d ${D}${base_prefix}
tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
- | tar --no-same-owner -xpf - -C ${D}
+ | tar --no-same-owner -xpf - -C ${D}${base_prefix}
}
FILES:${PN} = "/"
diff --git a/meta/classes/cargo.bbclass b/meta/classes/cargo.bbclass
index 4a780a501f..6df3b19b00 100644
--- a/meta/classes/cargo.bbclass
+++ b/meta/classes/cargo.bbclass
@@ -49,7 +49,6 @@ oe_cargo_build () {
do_compile[progress] = "outof:\s+(\d+)/(\d+)"
cargo_do_compile () {
- oe_cargo_fix_env
oe_cargo_build
}
diff --git a/meta/classes/cargo_common.bbclass b/meta/classes/cargo_common.bbclass
index 90fad75415..c1bc142d85 100644
--- a/meta/classes/cargo_common.bbclass
+++ b/meta/classes/cargo_common.bbclass
@@ -45,12 +45,12 @@ cargo_common_do_configure () {
directory = "${CARGO_VENDORING_DIRECTORY}"
EOF
- if [ -z "${EXTERNALSRC}" ] && [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
+ if [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
cat <<- EOF >> ${CARGO_HOME}/config
[source.crates-io]
replace-with = "bitbake"
- local-registry = "/nonexistant"
+ local-registry = "/nonexistent"
EOF
fi
@@ -88,7 +88,7 @@ cargo_common_do_configure () {
cat <<- EOF >> ${CARGO_HOME}/config
[build]
- # Use out of tree build destination to avoid poluting the source tree
+ # Use out of tree build destination to avoid polluting the source tree
target-dir = "${B}/target"
EOF
fi
@@ -101,6 +101,10 @@ cargo_common_do_configure () {
EOF
}
+do_compile:prepend () {
+ oe_cargo_fix_env
+}
+
oe_cargo_fix_env () {
export CC="${RUST_TARGET_CC}"
export CXX="${RUST_TARGET_CXX}"
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index d9bcddbdbb..7ec6ca58fc 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -85,9 +85,12 @@ def map_host_arch_to_uname_arch(host_arch):
return "ppc64"
return host_arch
+
cmake_do_generate_toolchain_file() {
if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )"
+ else
+ cmake_sysroot="set( CMAKE_SYSROOT \"${RECIPE_SYSROOT}\" )"
fi
cat > ${WORKDIR}/toolchain.cmake <<EOF
# CMake system name must be something like "Linux".
@@ -120,6 +123,8 @@ set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
set( CMAKE_PROGRAM_PATH "/" )
+$cmake_sysroot
+
# Use qt.conf settings
set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
index d319d66ab2..fd087c2a14 100644
--- a/meta/classes/cml1.bbclass
+++ b/meta/classes/cml1.bbclass
@@ -48,7 +48,7 @@ python do_menuconfig() {
# ensure that environment variables are overwritten with this tasks 'd' values
d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
- oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
+ oe_terminal("sh -c 'make %s; if [ \\$? -ne 0 ]; then echo \"Command failed.\"; printf \"Press any key to continue... \"; read r; fi'" % d.getVar('KCONFIG_CONFIG_COMMAND'),
d.getVar('PN') + ' Configuration', d)
# FIXME this check can be removed when the minimum bitbake version has been bumped
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
index 84fd3eeb38..803727da0e 100644
--- a/meta/classes/core-image.bbclass
+++ b/meta/classes/core-image.bbclass
@@ -59,6 +59,10 @@ FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
# Including image feature foo would replace the image features bar1 and bar2
IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
+# Do not install openssh complementary packages if either packagegroup-core-ssh-dropbear or dropbear
+# is installed # to avoid openssh-dropbear conflict
+# see [Yocto #14858] for more information
+PACKAGE_EXCLUDE_COMPLEMENTARY:append = "${@bb.utils.contains_any('PACKAGE_INSTALL', 'packagegroup-core-ssh-dropbear dropbear', ' openssh', '' , d)}"
# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
# An error exception would be raised if both image features foo and bar1(or bar2) are included
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
index 37b6b569a1..349ecfe6ab 100644
--- a/meta/classes/create-spdx.bbclass
+++ b/meta/classes/create-spdx.bbclass
@@ -19,12 +19,12 @@ SPDX_TOOL_VERSION ??= "1.0"
SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
SPDX_INCLUDE_SOURCES ??= "0"
-SPDX_INCLUDE_PACKAGED ??= "0"
SPDX_ARCHIVE_SOURCES ??= "0"
SPDX_ARCHIVE_PACKAGED ??= "0"
SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc"
+SPDX_PRETTY ??= "0"
SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
@@ -76,6 +76,11 @@ def recipe_spdx_is_native(d, recipe):
def is_work_shared_spdx(d):
return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR'))
+def get_json_indent(d):
+ if d.getVar("SPDX_PRETTY") == "1":
+ return 2
+ return None
+
python() {
import json
if d.getVar("SPDX_LICENSE_DATA"):
@@ -210,7 +215,7 @@ def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archiv
filepath = Path(subdir) / file
filename = str(filepath.relative_to(topdir))
- if filepath.is_file() and not filepath.is_symlink():
+ if not filepath.is_symlink() and filepath.is_file():
spdx_file = oe.spdx.SPDXFile()
spdx_file.SPDXID = get_spdxid(file_counter)
for t in get_types(filepath):
@@ -423,7 +428,6 @@ python do_create_spdx() {
deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
spdx_workdir = Path(d.getVar("SPDXWORK"))
- include_packaged = d.getVar("SPDX_INCLUDE_PACKAGED") == "1"
include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
@@ -445,12 +449,13 @@ python do_create_spdx() {
recipe.name = d.getVar("PN")
recipe.versionInfo = d.getVar("PV")
recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
- recipe.packageSupplier = d.getVar("SPDX_SUPPLIER")
+ recipe.supplier = d.getVar("SPDX_SUPPLIER")
if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
recipe.annotations.append(create_annotation(d, "isNative"))
for s in d.getVar('SRC_URI').split():
if not s.startswith("file://"):
+ s = s.split(';')[0]
recipe.downloadLocation = s
break
else:
@@ -515,7 +520,7 @@ python do_create_spdx() {
dep_recipes = collect_dep_recipes(d, doc, recipe)
- doc_sha1 = oe.sbom.write_doc(d, doc, "recipes")
+ doc_sha1 = oe.sbom.write_doc(d, doc, "recipes", indent=get_json_indent(d))
dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
recipe_ref = oe.spdx.SPDXExternalDocumentRef()
@@ -555,7 +560,7 @@ python do_create_spdx() {
spdx_package.name = pkg_name
spdx_package.versionInfo = d.getVar("PV")
spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses)
- spdx_package.packageSupplier = d.getVar("SPDX_SUPPLIER")
+ spdx_package.supplier = d.getVar("SPDX_SUPPLIER")
package_doc.packages.append(spdx_package)
@@ -571,6 +576,7 @@ python do_create_spdx() {
pkgdest / package,
lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
lambda filepath: ["BINARY"],
+ ignore_top_level_dirs=['CONTROL', 'DEBIAN'],
archive=archive,
)
@@ -579,7 +585,7 @@ python do_create_spdx() {
add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
- oe.sbom.write_doc(d, package_doc, "packages")
+ oe.sbom.write_doc(d, package_doc, "packages", indent=get_json_indent(d))
}
# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
addtask do_create_spdx after do_package do_packagedata do_unpack before do_populate_sdk do_build do_rm_work
@@ -743,7 +749,7 @@ python do_create_runtime_spdx() {
)
seen_deps.add(dep)
- oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy)
+ oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy, indent=get_json_indent(d))
}
addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
@@ -787,6 +793,7 @@ def spdx_get_src(d):
bb.build.exec_func('do_unpack', d)
# Copy source of kernel to spdx_workdir
if is_work_shared_spdx(d):
+ share_src = d.getVar('WORKDIR')
d.setVar('WORKDIR', spdx_workdir)
d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR')
@@ -794,8 +801,8 @@ def spdx_get_src(d):
if bb.data.inherits_class('kernel',d):
share_src = d.getVar('STAGING_KERNEL_DIR')
cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/"
- cmd_copy_kernel_result = os.popen(cmd_copy_share).read()
- bb.note("cmd_copy_kernel_result = " + cmd_copy_kernel_result)
+ cmd_copy_shared_res = os.popen(cmd_copy_share).read()
+ bb.note("cmd_copy_shared_result = " + cmd_copy_shared_res)
git_path = src_dir + "/.git"
if os.path.exists(git_path):
@@ -895,7 +902,7 @@ def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
image.name = d.getVar("PN")
image.versionInfo = d.getVar("PV")
image.SPDXID = rootfs_spdxid
- image.packageSupplier = d.getVar("SPDX_SUPPLIER")
+ image.supplier = d.getVar("SPDX_SUPPLIER")
doc.packages.append(image)
@@ -938,7 +945,7 @@ def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
image_spdx_path = rootfs_deploydir / (rootfs_name + ".spdx.json")
with image_spdx_path.open("wb") as f:
- doc.to_json(f, sort_keys=True)
+ doc.to_json(f, sort_keys=True, indent=get_json_indent(d))
num_threads = int(d.getVar("BB_NUMBER_THREADS"))
@@ -996,7 +1003,11 @@ def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
index["documents"].sort(key=lambda x: x["filename"])
- index_str = io.BytesIO(json.dumps(index, sort_keys=True).encode("utf-8"))
+ index_str = io.BytesIO(json.dumps(
+ index,
+ sort_keys=True,
+ indent=get_json_indent(d),
+ ).encode("utf-8"))
info = tarfile.TarInfo()
info.name = "index.json"
@@ -1010,4 +1021,4 @@ def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
spdx_index_path = rootfs_deploydir / (rootfs_name + ".spdx.index.json")
with spdx_index_path.open("w") as f:
- json.dump(index, f, sort_keys=True)
+ json.dump(index, f, sort_keys=True, indent=get_json_indent(d))
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index da7f93371c..f554150d94 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -26,7 +26,7 @@ CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
-CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_2.db"
CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
CVE_CHECK_LOG ?= "${T}/cve.log"
@@ -42,8 +42,8 @@ CVE_CHECK_LOG_JSON ?= "${T}/cve.json"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json"
-CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
-CVE_CHECK_MANIFEST_JSON ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.json"
+CVE_CHECK_MANIFEST ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
+CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.json"
CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
@@ -98,6 +98,8 @@ def generate_json_report(d, out_path, link_path):
cve_check_merge_jsons(summary, data)
filename = f.readline()
+ summary["package"].sort(key=lambda d: d['name'])
+
with open(out_path, "w") as f:
json.dump(summary, f, indent=2)
@@ -139,22 +141,23 @@ python do_cve_check () {
"""
from oe.cve_check import get_patched_cves
- if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
- try:
- patched_cves = get_patched_cves(d)
- except FileNotFoundError:
- bb.fatal("Failure in searching patches")
- ignored, patched, unpatched, status = check_cves(d, patched_cves)
- if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
- cve_data = get_cve_info(d, patched + unpatched + ignored)
- cve_write_data(d, patched, unpatched, ignored, cve_data, status)
- else:
- bb.note("No CVE database found, skipping CVE check")
+ with bb.utils.fileslocked([d.getVar("CVE_CHECK_DB_FILE_LOCK")], shared=True):
+ if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
+ try:
+ patched_cves = get_patched_cves(d)
+ except FileNotFoundError:
+ bb.fatal("Failure in searching patches")
+ ignored, patched, unpatched, status = check_cves(d, patched_cves)
+ if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
+ cve_data = get_cve_info(d, patched + unpatched + ignored)
+ cve_write_data(d, patched, unpatched, ignored, cve_data, status)
+ else:
+ bb.note("No CVE database found, skipping CVE check")
}
addtask cve_check before do_build
-do_cve_check[depends] = "cve-update-db-native:do_fetch"
+do_cve_check[depends] = "cve-update-nvd2-native:do_fetch"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
@@ -195,7 +198,7 @@ python cve_check_write_rootfs_manifest () {
recipies.add(pkg_data["PN"])
bb.note("Writing rootfs CVE manifest")
- deploy_dir = d.getVar("DEPLOY_DIR_IMAGE")
+ deploy_dir = d.getVar("IMGDEPLOYDIR")
link_name = d.getVar("IMAGE_LINK_NAME")
json_data = {"version":"1", "package": []}
@@ -253,7 +256,7 @@ def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
"""
- from oe.cve_check import Version
+ from oe.cve_check import Version, convert_cve_version
pn = d.getVar("PN")
real_pv = d.getVar("PV")
@@ -290,7 +293,8 @@ def check_cves(d, patched_cves):
vendor = "%"
# Find all relevant CVE IDs.
- for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
+ cve_cursor = conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor))
+ for cverow in cve_cursor:
cve = cverow[0]
if cve in cve_ignore:
@@ -309,12 +313,16 @@ def check_cves(d, patched_cves):
vulnerable = False
ignored = False
- for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)):
+ product_cursor = conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor))
+ for row in product_cursor:
(_, _, _, version_start, operator_start, version_end, operator_end) = row
#bb.debug(2, "Evaluating row " + str(row))
if cve in cve_ignore:
ignored = True
+ version_start = convert_cve_version(version_start)
+ version_end = convert_cve_version(version_end)
+
if (operator_start == '=' and pv == version_start) or version_start == '-':
vulnerable = True
else:
@@ -353,10 +361,12 @@ def check_cves(d, patched_cves):
bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
cves_unpatched.append(cve)
break
+ product_cursor.close()
if not vulnerable:
bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
patched_cves.add(cve)
+ cve_cursor.close()
if not cves_in_product:
bb.note("No CVE records found for product %s, pn %s" % (product, pn))
@@ -381,14 +391,15 @@ def get_cve_info(d, cves):
conn = sqlite3.connect(db_file, uri=True)
for cve in cves:
- for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
+ cursor = conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,))
+ for row in cursor:
cve_data[row[0]] = {}
cve_data[row[0]]["summary"] = row[1]
cve_data[row[0]]["scorev2"] = row[2]
cve_data[row[0]]["scorev3"] = row[3]
cve_data[row[0]]["modified"] = row[4]
cve_data[row[0]]["vector"] = row[5]
-
+ cursor.close()
conn.close()
return cve_data
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index 247d04478c..26c01c080a 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -2,8 +2,6 @@ inherit terminal
DEVSHELL = "${SHELL}"
-PATH:prepend:task-devshell = "${COREBASE}/scripts/git-intercept:"
-
python do_devshell () {
if d.getVarFlag("do_devshell", "manualfakeroot"):
d.prependVar("DEVSHELL", "pseudo ")
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index b2f216f361..a209730240 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -60,7 +60,11 @@ python () {
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
else:
- d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
+ d.setVar('B', '${WORKDIR}/${BPN}-${PV}')
+
+ if d.getVar('SRCREV', "INVALID") != "INVALID":
+ # Ensure SRCREV has been processed before accessing SRC_URI
+ bb.fetch.get_srcrev(d)
local_srcuri = []
fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
@@ -68,7 +72,7 @@ python () {
url_data = fetch.ud[url]
parm = url_data.parm
if (url_data.type == 'file' or
- url_data.type == 'npmsw' or
+ url_data.type == 'npmsw' or url_data.type == 'crate' or
'type' in parm and parm['type'] == 'kmeta'):
local_srcuri.append(url)
@@ -76,6 +80,8 @@ python () {
# Dummy value because the default function can't be called with blank SRC_URI
d.setVar('SRCPV', '999')
+ # sstate is never going to work for external source trees, disable it
+ d.setVar('SSTATE_SKIP_CREATION', '1')
if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
d.setVar('CONFIGUREOPT_DEPTRACK', '')
@@ -83,23 +89,22 @@ python () {
tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
for task in tasks:
- if task.endswith("_setscene"):
- # sstate is never going to work for external source trees, disable it
- bb.build.deltask(task, d)
- elif os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
+ if os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
# Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
- for funcname in [task, "base_" + task, "kernel_" + task]:
+ for v in d.keys():
+ cleandirs = d.getVarFlag(v, "cleandirs", False)
+ if cleandirs:
# We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
- cleandirs = oe.recipeutils.split_var_value(d.getVarFlag(funcname, 'cleandirs', False) or '')
+ cleandirs = oe.recipeutils.split_var_value(cleandirs)
setvalue = False
for cleandir in cleandirs[:]:
if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
cleandirs.remove(cleandir)
setvalue = True
if setvalue:
- d.setVarFlag(funcname, 'cleandirs', ' '.join(cleandirs))
+ d.setVarFlag(v, 'cleandirs', ' '.join(cleandirs))
fetch_tasks = ['do_fetch', 'do_unpack']
# If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
@@ -209,8 +214,8 @@ def srctree_hash_files(d, srcdir=None):
try:
git_dir = os.path.join(s_dir,
subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
- top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'],
- stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ top_git_dir = os.path.join(d.getVar("TOPDIR"),
+ subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
if git_dir == top_git_dir:
git_dir = None
except subprocess.CalledProcessError:
@@ -227,15 +232,16 @@ def srctree_hash_files(d, srcdir=None):
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
- submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8")
- for line in submodule_helper.splitlines():
- module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
- if os.path.isdir(module_dir):
- proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
- proc.communicate()
- proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
- stdout, _ = proc.communicate()
- git_sha1 += stdout.decode("utf-8")
+ if os.path.exists(os.path.join(s_dir, ".gitmodules")) and os.path.getsize(os.path.join(s_dir, ".gitmodules")) > 0:
+ submodule_helper = subprocess.check_output(["git", "config", "--file", ".gitmodules", "--get-regexp", "path"], cwd=s_dir, env=env).decode("utf-8")
+ for line in submodule_helper.splitlines():
+ module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
+ if os.path.isdir(module_dir):
+ proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ proc.communicate()
+ proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+ stdout, _ = proc.communicate()
+ git_sha1 += stdout.decode("utf-8")
sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
index 442bfc7392..34688591f4 100644
--- a/meta/classes/fontcache.bbclass
+++ b/meta/classes/fontcache.bbclass
@@ -7,6 +7,7 @@ PACKAGE_WRITE_DEPS += "qemu-native"
inherit qemu
FONT_PACKAGES ??= "${PN}"
+FONT_PACKAGES:class-native = ""
FONT_EXTRA_RDEPENDS ?= "${MLPREFIX}fontconfig-utils"
FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
FONTCONFIG_CACHE_PARAMS ?= "-v"
diff --git a/meta/classes/fs-uuid.bbclass b/meta/classes/fs-uuid.bbclass
index 9b53dfba7a..731ea575bd 100644
--- a/meta/classes/fs-uuid.bbclass
+++ b/meta/classes/fs-uuid.bbclass
@@ -4,7 +4,7 @@
def get_rootfs_uuid(d):
import subprocess
rootfs = d.getVar('ROOTFS')
- output = subprocess.check_output(['tune2fs', '-l', rootfs])
+ output = subprocess.check_output(['tune2fs', '-l', rootfs], text=True)
for line in output.split('\n'):
if line.startswith('Filesystem UUID:'):
uuid = line.split()[-1]
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
index 9a5bd9a232..99ac472080 100644
--- a/meta/classes/gnomebase.bbclass
+++ b/meta/classes/gnomebase.bbclass
@@ -1,5 +1,5 @@
def gnome_verdir(v):
- return ".".join(v.split(".")[:-1])
+ return ".".join(v.split(".")[:-1]) or v
GNOME_COMPRESS_TYPE ?= "xz"
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
index f3d83febbf..d944722309 100644
--- a/meta/classes/go.bbclass
+++ b/meta/classes/go.bbclass
@@ -122,7 +122,7 @@ go_do_install() {
tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
tar -C ${D}${libdir}/go --no-same-owner -xf -
- if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
+ if ls ${B}/${GO_BUILD_BINDIR}/* >/dev/null 2>/dev/null ; then
install -d ${D}${bindir}
install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
fi
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass
index 92fec16b82..394c0c5d84 100644
--- a/meta/classes/goarch.bbclass
+++ b/meta/classes/goarch.bbclass
@@ -61,31 +61,10 @@ SECURITY_NOPIE_CFLAGS ??= ""
CCACHE_DISABLE ?= "1"
def go_map_arch(a, d):
- import re
- if re.match('i.86', a):
- return '386'
- elif a == 'x86_64':
- return 'amd64'
- elif re.match('arm.*', a):
- return 'arm'
- elif re.match('aarch64.*', a):
- return 'arm64'
- elif re.match('mips64el.*', a):
- return 'mips64le'
- elif re.match('mips64.*', a):
- return 'mips64'
- elif a == 'mips':
- return 'mips'
- elif a == 'mipsel':
- return 'mipsle'
- elif re.match('p(pc|owerpc)(64le)', a):
- return 'ppc64le'
- elif re.match('p(pc|owerpc)(64)', a):
- return 'ppc64'
- elif a == 'riscv64':
- return 'riscv64'
- else:
+ arch = oe.go.map_arch(a)
+ if not arch:
raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
+ return arch
def go_map_arm(a, d):
if a.startswith("arm"):
diff --git a/meta/classes/gobject-introspection-data.bbclass b/meta/classes/gobject-introspection-data.bbclass
index 2ef684626a..d90cdb4839 100644
--- a/meta/classes/gobject-introspection-data.bbclass
+++ b/meta/classes/gobject-introspection-data.bbclass
@@ -5,3 +5,8 @@
# so that qemu use can be avoided when necessary.
GI_DATA_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
+
+do_compile:prepend() {
+ # This prevents g-ir-scanner from writing cache data to $HOME
+ export GI_SCANNER_DISABLE_CACHE=1
+}
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
index 6808339b90..f999b891f3 100644
--- a/meta/classes/gtk-icon-cache.bbclass
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -3,7 +3,7 @@ FILES:${PN} += "${datadir}/icons/hicolor"
GTKIC_VERSION ??= '3'
GTKPN = "${@ 'gtk4' if d.getVar('GTKIC_VERSION') == '4' else 'gtk+3' }"
-GTKIC_CMD = "${@ 'gtk-update-icon-cache-3.0.0' if d.getVar('GTKIC_VERSION') == '4' else 'gtk4-update-icon-cache' }"
+GTKIC_CMD = "${@ 'gtk4-update-icon-cache' if d.getVar('GTKIC_VERSION') == '4' else 'gtk-update-icon-cache-3.0' }"
#gtk+3/gtk4 require GTK3DISTROFEATURES, DEPENDS on it make all the
#recipes inherit this class require GTK3DISTROFEATURES
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
index 2c948190cf..c0c1fb31ac 100644
--- a/meta/classes/image-live.bbclass
+++ b/meta/classes/image-live.bbclass
@@ -30,7 +30,7 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
virtual/kernel:do_deploy \
${MLPREFIX}syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
- ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')) if d.getVar('ROOTFS') else ''} \
+ ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_').split('.')[0]) if d.getVar('ROOTFS') else ''} \
"
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 2139a7e576..00413d56d1 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -177,8 +177,7 @@ python () {
IMAGE_POSTPROCESS_COMMAND ?= ""
-# some default locales
-IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
+IMAGE_LINGUAS ??= ""
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
@@ -314,7 +313,7 @@ fakeroot python do_image_qa () {
except oe.utils.ImageQAFailed as e:
qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
except Exception as e:
- qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
+ qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (cmd, e)
if qamsg:
imgname = d.getVar('IMAGE_NAME')
@@ -441,7 +440,7 @@ python () {
localdata.delVar('DATE')
localdata.delVar('TMPDIR')
localdata.delVar('IMAGE_VERSION_SUFFIX')
- vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude', True) or '').split()
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude') or '').split()
for dep in vardepsexclude:
localdata.delVar(dep)
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
index 960dab1a60..9d5f8c68a4 100644
--- a/meta/classes/image_types.bbclass
+++ b/meta/classes/image_types.bbclass
@@ -130,10 +130,11 @@ IMAGE_CMD:cpio () {
if [ ! -L ${IMAGE_ROOTFS}/init ] && [ ! -e ${IMAGE_ROOTFS}/init ]; then
if [ -L ${IMAGE_ROOTFS}/sbin/init ] || [ -e ${IMAGE_ROOTFS}/sbin/init ]; then
ln -sf /sbin/init ${WORKDIR}/cpio_append/init
+ touch -h -r ${IMAGE_ROOTFS}/sbin/init ${WORKDIR}/cpio_append/init
else
- touch ${WORKDIR}/cpio_append/init
+ touch -r ${IMAGE_ROOTFS} ${WORKDIR}/cpio_append/init
fi
- (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
+ (cd ${WORKDIR}/cpio_append && echo ./init | cpio --reproducible -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
fi
fi
}
@@ -187,7 +188,10 @@ multiubi_mkfs() {
fi
}
+MULTIUBI_ARGS = "MKUBIFS_ARGS UBINIZE_ARGS"
+
IMAGE_CMD:multiubi () {
+ ${@' '.join(['%s_%s="%s";' % (arg, name, d.getVar('%s_%s' % (arg, name))) for arg in d.getVar('MULTIUBI_ARGS').split() for name in d.getVar('MULTIUBI_BUILD').split()])}
# Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
for name in ${MULTIUBI_BUILD}; do
eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
diff --git a/meta/classes/image_types_wic.bbclass b/meta/classes/image_types_wic.bbclass
index e3863c88a9..8497916d48 100644
--- a/meta/classes/image_types_wic.bbclass
+++ b/meta/classes/image_types_wic.bbclass
@@ -83,7 +83,9 @@ do_image_wic[recrdeptask] += "do_deploy"
do_image_wic[deptask] += "do_image_complete"
WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
-WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
+WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native erofs-utils-native"
+# Unified kernel images need objcopy
+WKS_FILE_DEPENDS_DEFAULT += "virtual/${MLPREFIX}${TARGET_PREFIX}binutils"
WKS_FILE_DEPENDS_BOOTLOADERS = ""
WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot os-release"
WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot os-release"
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index 6f6dcb3dd5..dfda70bad6 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -444,12 +444,14 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
Check for build paths inside target files and error if paths are not
explicitly ignored.
"""
+ import stat
# Ignore .debug files, not interesting
if path.find(".debug") != -1:
return
- # Ignore symlinks
- if os.path.islink(path):
+ # Ignore symlinks/devs/fifos
+ mode = os.lstat(path).st_mode
+ if stat.S_ISLNK(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode) or stat.S_ISCHR(mode) or stat.S_ISSOCK(mode):
return
tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
@@ -550,7 +552,10 @@ python populate_lic_qa_checksum() {
import hashlib
lineno = 0
license = []
- m = hashlib.new('MD5', usedforsecurity=False)
+ try:
+ m = hashlib.new('MD5', usedforsecurity=False)
+ except TypeError:
+ m = hashlib.new('MD5')
for line in f:
lineno += 1
if (lineno >= beginline):
@@ -1194,11 +1199,12 @@ python do_qa_patch() {
import re
from oe import patch
+ coremeta_path = os.path.join(d.getVar('COREBASE'), 'meta', '')
for url in patch.src_patches(d):
(_, _, fullpath, _, _, _) = bb.fetch.decodeurl(url)
# skip patches not in oe-core
- if '/meta/' not in fullpath:
+ if not os.path.abspath(fullpath).startswith(coremeta_path):
continue
kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
index 07ec242e63..62c8211621 100644
--- a/meta/classes/kernel-arch.bbclass
+++ b/meta/classes/kernel-arch.bbclass
@@ -61,8 +61,7 @@ HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
TARGET_AR_KERNEL_ARCH ?= ""
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
-KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH}"
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} -fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH}"
KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
-TOOLCHAIN = "gcc"
-
+TOOLCHAIN ?= "gcc"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
index b4338da1b1..18ab6b4c4f 100644
--- a/meta/classes/kernel-devicetree.bbclass
+++ b/meta/classes/kernel-devicetree.bbclass
@@ -6,7 +6,12 @@ python () {
d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
}
-FILES:${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
+# recursivly search for devicetree files
+FILES:${KERNEL_PACKAGE_NAME}-devicetree = " \
+ /${KERNEL_DTBDEST}/**/*.dtb \
+ /${KERNEL_DTBDEST}/**/*.dtbo \
+"
+
FILES:${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
# Generate kernel+devicetree bundle
@@ -67,12 +72,16 @@ do_compile:append() {
}
do_install:append() {
+ install -d ${D}/${KERNEL_DTBDEST}
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
- dtb_ext=${dtb##*.}
- dtb_base_name=`basename $dtb .$dtb_ext`
dtb_path=`get_real_dtb_path_in_kernel "$dtb"`
- install -m 0644 $dtb_path ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext
+ if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
+ dtb_ext=${dtb##*.}
+ dtb_base_name=`basename $dtb .$dtb_ext`
+ dtb=$dtb_base_name.$dtb_ext
+ fi
+ install -Dm 0644 $dtb_path ${D}/${KERNEL_DTBDEST}/$dtb
done
}
@@ -82,7 +91,10 @@ do_deploy:append() {
dtb_ext=${dtb##*.}
dtb_base_name=`basename $dtb .$dtb_ext`
install -d $deployDir
- install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
+ if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
+ dtb=$dtb_base_name.$dtb_ext
+ fi
+ install -m 0644 ${D}/${KERNEL_DTBDEST}/$dtb $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
fi
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index 7e09b075ff..27e17db951 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -67,6 +67,9 @@ FIT_CONF_PREFIX[doc] = "Prefix to use for FIT configuration node name"
FIT_SUPPORTED_INITRAMFS_FSTYPES ?= "cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst cpio.gz ext2.gz cpio"
+# Allow user to select the default DTB for FIT image when multiple dtb's exists.
+FIT_CONF_DEFAULT_DTB ?= ""
+
# Keys used to sign individually image nodes.
# The keys to sign image nodes must be different from those used to sign
# configuration nodes, otherwise the "required" property, from
@@ -148,7 +151,7 @@ fitimage_emit_section_kernel() {
kernel-$2 {
description = "Linux kernel";
data = /incbin/("$3");
- type = "kernel";
+ type = "${UBOOT_MKIMAGE_KERNEL_TYPE}";
arch = "${UBOOT_ARCH}";
os = "linux";
compression = "$4";
@@ -346,6 +349,7 @@ fitimage_emit_section_config() {
conf_csum="${FIT_HASH_ALG}"
conf_sign_algo="${FIT_SIGN_ALG}"
+ conf_padding_algo="${FIT_PAD_ALG}"
if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
@@ -368,6 +372,7 @@ fitimage_emit_section_config() {
bootscr_line=""
setup_line=""
default_line=""
+ default_dtb_image="${FIT_CONF_DEFAULT_DTB}"
# conf node name is selected based on dtb ID if it is present,
# otherwise its selected based on kernel ID
@@ -410,7 +415,17 @@ fitimage_emit_section_config() {
# default node is selected based on dtb ID if it is present,
# otherwise its selected based on kernel ID
if [ -n "$dtb_image" ]; then
- default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
+ # Select default node as user specified dtb when
+ # multiple dtb exists.
+ if [ -n "$default_dtb_image" ]; then
+ if [ -s "${EXTERNAL_KERNEL_DEVICETREE}/$default_dtb_image" ]; then
+ default_line="default = \"${FIT_CONF_PREFIX}$default_dtb_image\";"
+ else
+ bbwarn "Couldn't find a valid user specified dtb in ${EXTERNAL_KERNEL_DEVICETREE}/$default_dtb_image"
+ fi
+ else
+ default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
+ fi
else
default_line="default = \"${FIT_CONF_PREFIX}$kernel_id\";"
fi
@@ -465,6 +480,7 @@ EOF
signature-1 {
algo = "$conf_csum,$conf_sign_algo";
key-name-hint = "$conf_sign_keyname";
+ padding = "$conf_padding_algo";
$sign_line
};
EOF
@@ -527,6 +543,10 @@ fitimage_assemble() {
fi
DTB=$(echo "$DTB" | tr '/' '_')
+
+ # Skip DTB if we've picked it up previously
+ echo "$DTBS" | tr ' ' '\n' | grep -xq "$DTB" && continue
+
DTBS="$DTBS $DTB"
fitimage_emit_section_dtb $1 $DTB $DTB_PATH
done
@@ -534,8 +554,13 @@ fitimage_assemble() {
if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
dtbcount=1
- for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
+ for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" -name '*.dtb' -printf '%P\n' | sort) \
+ $(find "${EXTERNAL_KERNEL_DEVICETREE}" -name '*.dtbo' -printf '%P\n' | sort); do
DTB=$(echo "$DTB" | tr '/' '_')
+
+ # Skip DTB/DTBO if we've picked it up previously
+ echo "$DTBS" | tr ' ' '\n' | grep -xq "$DTB" && continue
+
DTBS="$DTBS $DTB"
fitimage_emit_section_dtb $1 $DTB "${EXTERNAL_KERNEL_DEVICETREE}/$DTB"
done
diff --git a/meta/classes/kernel-uboot.bbclass b/meta/classes/kernel-uboot.bbclass
index 2facade818..1bc98e042d 100644
--- a/meta/classes/kernel-uboot.bbclass
+++ b/meta/classes/kernel-uboot.bbclass
@@ -2,6 +2,9 @@
FIT_KERNEL_COMP_ALG ?= "gzip"
FIT_KERNEL_COMP_ALG_EXTENSION ?= ".gz"
+# Kernel image type passed to mkimage (i.e. kernel kernel_noload...)
+UBOOT_MKIMAGE_KERNEL_TYPE ?= "kernel"
+
uboot_prep_kimage() {
if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
diff --git a/meta/classes/kernel-uimage.bbclass b/meta/classes/kernel-uimage.bbclass
index cedb4fa070..2e661ea916 100644
--- a/meta/classes/kernel-uimage.bbclass
+++ b/meta/classes/kernel-uimage.bbclass
@@ -30,6 +30,6 @@ do_uboot_mkimage() {
awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
fi
- uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
+ uboot-mkimage -A ${UBOOT_ARCH} -O linux -T ${UBOOT_MKIMAGE_KERNEL_TYPE} -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
rm -f linux.bin
}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index afccffcf17..4f8e391428 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -206,7 +206,7 @@ do_kernel_metadata() {
# SRC_URI. If they were supplied, we convert them into include directives
# for the update part of the process
for f in ${feat_dirs}; do
- if [ -d "${WORKDIR}/$f/meta" ]; then
+ if [ -d "${WORKDIR}/$f/kernel-meta" ]; then
includes="$includes -I${WORKDIR}/$f/kernel-meta"
elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
includes="$includes -I${WORKDIR}/../oe-local-files/$f"
@@ -322,7 +322,11 @@ do_patch() {
meta_dir=$(kgit --meta)
(cd ${meta_dir}; ln -sf patch.queue series)
if [ -f "${meta_dir}/series" ]; then
- kgit-s2q --gen -v --patches .kernel-meta/
+ kgit_extra_args=""
+ if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
+ kgit_extra_args="--commit-sha author"
+ fi
+ kgit-s2q --gen -v $kgit_extra_args --patches .kernel-meta/
if [ $? -ne 0 ]; then
bberror "Could not apply patches for ${KMACHINE}."
bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
@@ -496,7 +500,7 @@ python do_config_analysis() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' )
@@ -504,7 +508,7 @@ python do_config_analysis() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
outfile = d.getVar( 'CONFIG_AUDIT_FILE' )
@@ -565,7 +569,7 @@ python do_kernel_configcheck() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
if analysis:
outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta )
@@ -587,7 +591,7 @@ python do_kernel_configcheck() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
if analysis:
outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta)
@@ -606,7 +610,7 @@ python do_kernel_configcheck() {
try:
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8')
except subprocess.CalledProcessError as e:
- bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+ bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
if analysis:
outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta)
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index 8299b394a7..dbd89057f3 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -176,13 +176,14 @@ do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILD
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
python do_symlink_kernsrc () {
s = d.getVar("S")
- if s[-1] == '/':
- # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
- s=s[:-1]
kernsrc = d.getVar("STAGING_KERNEL_DIR")
if s != kernsrc:
bb.utils.mkdirhier(kernsrc)
bb.utils.remove(kernsrc, recurse=True)
+ if s[-1] == '/':
+ # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as
+ # directory name and fail
+ s = s[:-1]
if d.getVar("EXTERNALSRC"):
# With EXTERNALSRC S will not be wiped so we can symlink to it
os.symlink(s, kernsrc)
@@ -204,15 +205,14 @@ PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
-export KBUILD_BUILD_VERSION = "1"
-export KBUILD_BUILD_USER ?= "oe-user"
-export KBUILD_BUILD_HOST ?= "oe-host"
KERNEL_RELEASE ?= "${KERNEL_VERSION}"
# The directory where built kernel lies in the kernel tree
KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot"
KERNEL_IMAGEDEST ?= "boot"
+KERNEL_DTBDEST ?= "${KERNEL_IMAGEDEST}"
+KERNEL_DTBVENDORED ?= "0"
#
# configuration
@@ -231,8 +231,9 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
# Some Linux kernel configurations need additional parameters on the command line
KERNEL_EXTRA_ARGS ?= ""
-EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
-EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}" PAHOLE=false"
+EXTRA_OEMAKE += ' CC="${KERNEL_CC}" LD="${KERNEL_LD}"'
+EXTRA_OEMAKE += ' HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}"'
+EXTRA_OEMAKE += ' HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}"'
KERNEL_ALT_IMAGETYPE ??= ""
@@ -360,6 +361,10 @@ kernel_do_compile() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
# The $use_alternate_initrd is only set from
# do_bundle_initramfs() This variable is specifically for the
@@ -375,7 +380,7 @@ kernel_do_compile() {
use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
fi
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
- oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
+ oe_runmake ${PARALLEL_MAKE} ${typeformake} ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
}
@@ -391,6 +396,13 @@ addtask transform_kernel after do_compile before do_install
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+
+ # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
+ export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig"
+ export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig"
+ export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR"
+ export PKG_CONFIG_SYSROOT_DIR=""
+
if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
# be set....
@@ -405,11 +417,15 @@ do_compile_kernelmodules() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
- oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+ oe_runmake -C ${B} ${PARALLEL_MAKE} modules ${KERNEL_EXTRA_ARGS}
- # Module.symvers gets updated during the
+ # Module.symvers gets updated during the
# building of the kernel modules. We need to
# update this in the shared workdir since some
# external kernel modules has a dependency on
@@ -433,10 +449,10 @@ kernel_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
- # If the kernel/ directory is empty remove it to prevent QA issues
- rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
+ # Remove empty module directories to prevent QA issues
+ find "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel" -type d -empty -delete
else
bbnote "no modules to install"
fi
@@ -532,6 +548,7 @@ do_shared_workdir () {
#
echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion
+ echo "${KERNEL_LOCALVERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-localversion
# Copy files required for module builds
cp System.map $kerneldir/System.map-${KERNEL_VERSION}
@@ -584,14 +601,28 @@ do_shared_workdir () {
cp tools/objtool/objtool ${kerneldir}/tools/objtool/
fi
fi
+
+ # When building with CONFIG_MODVERSIONS=y and CONFIG_RANDSTRUCT=y we need
+ # to copy the build assets generated for the randstruct seed to
+ # STAGING_KERNEL_BUILDDIR, otherwise the out-of-tree modules build will
+ # generate those assets which will result in a different
+ # RANDSTRUCT_HASHED_SEED
+ if [ -d scripts/basic ]; then
+ mkdir -p ${kerneldir}/scripts
+ cp -r scripts/basic ${kerneldir}/scripts
+ fi
+
+ if [ -d scripts/gcc-plugins ]; then
+ mkdir -p ${kerneldir}/scripts
+ cp -r scripts/gcc-plugins ${kerneldir}/scripts
+ fi
+
}
# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
-sysroot_stage_all () {
- :
-}
+SYSROOT_DIRS = ""
-KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" LD="${KERNEL_LD}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" LD="${KERNEL_LD}" oldnoconfig"
+KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} olddefconfig || oe_runmake -C ${S} O=${B} oldnoconfig"
python check_oldest_kernel() {
oldest_kernel = d.getVar('OLDEST_KERNEL')
@@ -607,12 +638,31 @@ python check_oldest_kernel() {
check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION"
do_configure[prefuncs] += "check_oldest_kernel"
+KERNEL_LOCALVERSION ??= ""
+
+# 6.3+ requires the variable LOCALVERSION to be set to not get a "+" in
+# the local version. Having it empty means nothing will be added, and any
+# value will be appended to the local kernel version. This replaces the
+# use of .scmversion file for setting a localversion without using
+# the CONFIG_LOCALVERSION option.
+#
+# Note: This class saves the value of localversion to a file
+# so other recipes like make-mod-scripts can restore it via the
+# helper function get_kernellocalversion_file
+export LOCALVERSION="${KERNEL_LOCALVERSION}"
+
kernel_do_configure() {
# fixes extra + in /lib/modules/2.6.37+
# $ scripts/setlocalversion . => +
# $ make kernelversion => 2.6.37
# $ make kernelrelease => 2.6.37+
- touch ${B}/.scmversion ${S}/.scmversion
+ # See kernel-arch.bbclass for post v6.3 removal of the extra
+ # + in localversion. .scmversion is no longer used, and the
+ # variable LOCALVERSION must be used
+ if [ ! -e ${B}/.scmversion -a ! -e ${S}/.scmversion ]; then
+ echo ${KERNEL_LOCALVERSION} > ${B}/.scmversion
+ echo ${KERNEL_LOCALVERSION} > ${S}/.scmversion
+ fi
if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
mv "${S}/.config" "${B}/.config"
@@ -634,9 +684,10 @@ do_savedefconfig() {
do_savedefconfig[nostamp] = "1"
addtask savedefconfig after do_configure
-inherit cml1
+inherit cml1 pkgconfig
-KCONFIG_CONFIG_COMMAND:append = " PAHOLE=false LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
+# Need LD, HOSTLDFLAGS and more for config operations
+KCONFIG_CONFIG_COMMAND:append = " ${EXTRA_OEMAKE}"
EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
@@ -649,6 +700,7 @@ FILES:${KERNEL_PACKAGE_NAME}-image = ""
FILES:${KERNEL_PACKAGE_NAME}-dev = "/${KERNEL_IMAGEDEST}/System.map* /${KERNEL_IMAGEDEST}/Module.symvers* /${KERNEL_IMAGEDEST}/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
FILES:${KERNEL_PACKAGE_NAME}-vmlinux = "/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION_NAME}"
FILES:${KERNEL_PACKAGE_NAME}-modules = ""
+FILES:${KERNEL_PACKAGE_NAME}-dbg = "/usr/lib/debug /usr/src/debug"
RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
diff --git a/meta/classes/kernelsrc.bbclass b/meta/classes/kernelsrc.bbclass
index a951ba3325..a79bf18b09 100644
--- a/meta/classes/kernelsrc.bbclass
+++ b/meta/classes/kernelsrc.bbclass
@@ -5,6 +5,7 @@ do_patch[depends] += "virtual/kernel:do_shared_workdir"
do_patch[noexec] = "1"
do_package[depends] += "virtual/kernel:do_populate_sysroot"
KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
+LOCAL_VERSION = "${@get_kernellocalversion_file("${STAGING_KERNEL_BUILDDIR}")}"
inherit linux-kernel-base
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index 13ef8cdc0d..baab8fc9a9 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -45,6 +45,7 @@ PACKAGE_NO_GCONV ?= "0"
OVERRIDES:append = ":${TARGET_ARCH}-${TARGET_OS}"
locale_base_postinst_ontarget() {
+mkdir ${libdir}/locale
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
}
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index 4ebfc4fb92..b92838c030 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -223,7 +223,7 @@ def find_license_files(d):
bb.fatal('%s: %s' % (d.getVar('PF'), exc))
except SyntaxError:
oe.qa.handle_error("license-syntax",
- "%s: Failed to parse it's LICENSE field." % (d.getVar('PF')), d)
+ "%s: Failed to parse LICENSE: %s" % (d.getVar('PF'), d.getVar('LICENSE')), d)
# Add files from LIC_FILES_CHKSUM to list of license files
lic_chksum_paths = defaultdict(OrderedDict)
for path, data in sorted(lic_chksums.items()):
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
index 3213ea758e..1c06a02951 100644
--- a/meta/classes/license_image.bbclass
+++ b/meta/classes/license_image.bbclass
@@ -229,7 +229,7 @@ def get_deployed_dependencies(d):
deploy = {}
# Get all the dependencies for the current task (rootfs).
taskdata = d.getVar("BB_TASKDEPDATA", False)
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
depends = list(set([dep[0] for dep
in list(taskdata.values())
if not dep[0].endswith("-native") and not dep[0] == pn]))
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
index ba59222c24..0e2a4a4abe 100644
--- a/meta/classes/linux-kernel-base.bbclass
+++ b/meta/classes/linux-kernel-base.bbclass
@@ -33,9 +33,24 @@ def get_kernelversion_file(p):
except IOError:
return None
+def get_kernellocalversion_file(p):
+ fn = p + '/kernel-localversion'
+
+ try:
+ with open(fn, 'r') as f:
+ return f.readlines()[0].strip()
+ except IOError:
+ return ""
+
+ return ""
+
def linux_module_packages(s, d):
suffix = ""
return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
+export KBUILD_BUILD_VERSION = "1"
+export KBUILD_BUILD_USER ?= "oe-user"
+export KBUILD_BUILD_HOST ?= "oe-host"
+
# that's all
diff --git a/meta/classes/meson.bbclass b/meta/classes/meson.bbclass
index 19b54e0fdc..fb6660c1a2 100644
--- a/meta/classes/meson.bbclass
+++ b/meta/classes/meson.bbclass
@@ -105,6 +105,7 @@ nm = ${@meson_array('BUILD_NM', d)}
strip = ${@meson_array('BUILD_STRIP', d)}
readelf = ${@meson_array('BUILD_READELF', d)}
objcopy = ${@meson_array('BUILD_OBJCOPY', d)}
+llvm-config = '${STAGING_BINDIR_NATIVE}/llvm-config'
pkgconfig = 'pkg-config-native'
${@rust_tool(d, "BUILD_SYS")}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
index ffdccff5fb..3720c00ae5 100644
--- a/meta/classes/mirrors.bbclass
+++ b/meta/classes/mirrors.bbclass
@@ -61,8 +61,7 @@ osc://.*/.* http://sources.openembedded.org/ \
https?://.*/.* http://sources.openembedded.org/ \
ftp://.*/.* http://sources.openembedded.org/ \
npm://.*/?.* http://sources.openembedded.org/ \
-${CPAN_MIRROR} http://cpan.metacpan.org/ \
-${CPAN_MIRROR} http://search.cpan.org/CPAN/ \
+${CPAN_MIRROR} https://cpan.metacpan.org/ \
https?://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \
https?://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \
"
@@ -84,6 +83,7 @@ BB_GIT_SHALLOW:pn-binutils-cross-${TARGET_ARCH} = "1"
BB_GIT_SHALLOW:pn-binutils-cross-canadian-${TRANSLATED_TARGET_ARCH} = "1"
BB_GIT_SHALLOW:pn-binutils-cross-testsuite = "1"
BB_GIT_SHALLOW:pn-binutils-crosssdk-${SDK_SYS} = "1"
+BB_GIT_SHALLOW:pn-binutils-native = "1"
BB_GIT_SHALLOW:pn-glibc = "1"
PREMIRRORS += "git://sourceware.org/git/glibc.git https://downloads.yoctoproject.org/mirror/sources/ \
git://sourceware.org/git/binutils-gdb.git https://downloads.yoctoproject.org/mirror/sources/"
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
index 27bd69ff33..5b2fde8144 100644
--- a/meta/classes/module-base.bbclass
+++ b/meta/classes/module-base.bbclass
@@ -14,6 +14,7 @@ export CROSS_COMPILE = "${TARGET_PREFIX}"
export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
export KERNEL_VERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
+export LOCALVERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-localversion')}"
KERNEL_OBJECT_SUFFIX = ".ko"
# kernel modules are generally machine specific
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index 5859ca8d21..a0be559970 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -45,6 +45,7 @@ python multilib_virtclass_handler () {
e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
+ e.data.setVar("RECIPE_SYSROOT_MANIFEST_SUBDIR", "nativesdk-" + variant)
e.data.setVar("MLPREFIX", variant + "-")
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
index fc7422c5d7..56726301bd 100644
--- a/meta/classes/native.bbclass
+++ b/meta/classes/native.bbclass
@@ -23,6 +23,8 @@ TARGET_CFLAGS = "${BUILD_CFLAGS}"
TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}"
TARGET_LDFLAGS = "${BUILD_LDFLAGS}"
TARGET_FPU = ""
+TUNE_FEATURES = ""
+ABIEXTENSION = ""
HOST_ARCH = "${BUILD_ARCH}"
HOST_OS = "${BUILD_OS}"
@@ -153,7 +155,7 @@ python native_virtclass_handler () {
newdeps.append(dep.replace(pn, bpn) + "-native")
else:
newdeps.append(dep)
- d.setVar(varname, " ".join(newdeps), parsing=True)
+ d.setVar(varname, " ".join(newdeps))
map_dependencies("DEPENDS", e.data, selfref=False)
for pkg in e.data.getVar("PACKAGES", False).split():
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index f8e9607513..e46739e325 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -55,6 +55,7 @@ TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}"
TARGET_FPU = ""
EXTRA_OECONF_GCC_FLOAT = ""
+TUNE_FEATURES = ""
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
diff --git a/meta/classes/npm.bbclass b/meta/classes/npm.bbclass
index ba50fcac20..45e6b4fac7 100644
--- a/meta/classes/npm.bbclass
+++ b/meta/classes/npm.bbclass
@@ -19,7 +19,7 @@
inherit python3native
-DEPENDS:prepend = "nodejs-native "
+DEPENDS:prepend = "nodejs-native nodejs-oe-cache-native "
RDEPENDS:${PN}:append:class-target = " nodejs"
EXTRA_OENPM = ""
@@ -46,6 +46,7 @@ NPM_ARCH ?= "${@npm_target_arch_map(d.getVar("TARGET_ARCH"))}"
NPM_PACKAGE = "${WORKDIR}/npm-package"
NPM_CACHE = "${WORKDIR}/npm-cache"
NPM_BUILD = "${WORKDIR}/npm-build"
+NPM_REGISTRY = "${WORKDIR}/npm-registry"
def npm_global_configs(d):
"""Get the npm global configuration"""
@@ -57,13 +58,36 @@ def npm_global_configs(d):
configs.append(("cache", d.getVar("NPM_CACHE")))
return configs
+## 'npm pack' runs 'prepare' and 'prepack' scripts. Support for
+## 'ignore-scripts' which prevents this behavior has been removed
+## from nodejs 16. Use simple 'tar' instead of.
def npm_pack(env, srcdir, workdir):
- """Run 'npm pack' on a specified directory"""
- import shlex
- cmd = "npm pack %s" % shlex.quote(srcdir)
- args = [("ignore-scripts", "true")]
- tarball = env.run(cmd, args=args, workdir=workdir).strip("\n")
- return os.path.join(workdir, tarball)
+ """Emulate 'npm pack' on a specified directory"""
+ import subprocess
+ import os
+ import json
+
+ src = os.path.join(srcdir, 'package.json')
+ with open(src) as f:
+ j = json.load(f)
+
+ # base does not really matter and is for documentation purposes
+ # only. But the 'version' part must exist because other parts of
+ # the bbclass rely on it.
+ base = j['name'].split('/')[-1]
+ tarball = os.path.join(workdir, "%s-%s.tgz" % (base, j['version']));
+
+ # TODO: real 'npm pack' does not include directories while 'tar'
+ # does. But this does not seem to matter...
+ subprocess.run(['tar', 'czf', tarball,
+ '--exclude', './node-modules',
+ '--exclude-vcs',
+ '--transform', r's,^\./,package/,',
+ '--mtime', '1985-10-26T08:15:00.000Z',
+ '.'],
+ check = True, cwd = srcdir)
+
+ return (tarball, j)
python npm_do_configure() {
"""
@@ -86,27 +110,24 @@ python npm_do_configure() {
from bb.fetch2.npm import npm_unpack
from bb.fetch2.npmsw import foreach_dependencies
from bb.progress import OutOfProgressHandler
+ from oe.npm_registry import NpmRegistry
bb.utils.remove(d.getVar("NPM_CACHE"), recurse=True)
bb.utils.remove(d.getVar("NPM_PACKAGE"), recurse=True)
env = NpmEnvironment(d, configs=npm_global_configs(d))
+ registry = NpmRegistry(d.getVar('NPM_REGISTRY'), d.getVar('NPM_CACHE'))
- def _npm_cache_add(tarball):
- """Run 'npm cache add' for a specified tarball"""
- cmd = "npm cache add %s" % shlex.quote(tarball)
- env.run(cmd)
+ def _npm_cache_add(tarball, pkg):
+ """Add tarball to local registry and register it in the
+ cache"""
+ registry.add_pkg(tarball, pkg)
def _npm_integrity(tarball):
"""Return the npm integrity of a specified tarball"""
sha512 = bb.utils.sha512_file(tarball)
return "sha512-" + base64.b64encode(bytes.fromhex(sha512)).decode()
- def _npm_version(tarball):
- """Return the version of a specified tarball"""
- regex = r"-(\d+\.\d+\.\d+(-.*)?(\+.*)?)\.tgz"
- return re.search(regex, tarball).group(1)
-
def _npmsw_dependency_dict(orig, deptree):
"""
Return the sub dictionary in the 'orig' dictionary corresponding to the
@@ -163,11 +184,11 @@ python npm_do_configure() {
with tempfile.TemporaryDirectory() as tmpdir:
# Add the dependency to the npm cache
destdir = os.path.join(d.getVar("S"), destsuffix)
- tarball = npm_pack(env, destdir, tmpdir)
- _npm_cache_add(tarball)
+ (tarball, pkg) = npm_pack(env, destdir, tmpdir)
+ _npm_cache_add(tarball, pkg)
# Add its signature to the cached shrinkwrap
dep = _npmsw_dependency_dict(cached_shrinkwrap, deptree)
- dep["version"] = _npm_version(tarball)
+ dep["version"] = pkg['version']
dep["integrity"] = _npm_integrity(tarball)
if params.get("dev", False):
dep["dev"] = True
@@ -184,7 +205,7 @@ python npm_do_configure() {
# Configure the main package
with tempfile.TemporaryDirectory() as tmpdir:
- tarball = npm_pack(env, d.getVar("S"), tmpdir)
+ (tarball, _) = npm_pack(env, d.getVar("S"), tmpdir)
npm_unpack(tarball, d.getVar("NPM_PACKAGE"), d)
# Configure the cached manifest file and cached shrinkwrap file
@@ -257,7 +278,7 @@ python npm_do_compile() {
args.append(("build-from-source", "true"))
# Pack and install the main package
- tarball = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
+ (tarball, _) = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
cmd = "npm install %s %s" % (shlex.quote(tarball), d.getVar("EXTRA_OENPM"))
env.run(cmd, args=args)
}
diff --git a/meta/classes/overlayfs-etc.bbclass b/meta/classes/overlayfs-etc.bbclass
index 91afee695c..40116e4c6e 100644
--- a/meta/classes/overlayfs-etc.bbclass
+++ b/meta/classes/overlayfs-etc.bbclass
@@ -34,6 +34,7 @@ OVERLAYFS_ETC_DEVICE ??= ""
OVERLAYFS_ETC_USE_ORIG_INIT_NAME ??= "1"
OVERLAYFS_ETC_MOUNT_OPTIONS ??= "defaults"
OVERLAYFS_ETC_INIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-etc-preinit.sh.in"
+OVERLAYFS_ETC_EXPOSE_LOWER ??= "0"
python create_overlayfs_etc_preinit() {
overlayEtcMountPoint = d.getVar("OVERLAYFS_ETC_MOUNT_POINT")
@@ -54,13 +55,15 @@ python create_overlayfs_etc_preinit() {
preinitPath = oe.path.join(d.getVar("IMAGE_ROOTFS"), d.getVar("base_sbindir"), "preinit")
initBaseName = oe.path.join(d.getVar("base_sbindir"), "init")
origInitNameSuffix = ".orig"
+ exposeLower = oe.types.boolean(d.getVar('OVERLAYFS_ETC_EXPOSE_LOWER'))
args = {
'OVERLAYFS_ETC_MOUNT_POINT': overlayEtcMountPoint,
'OVERLAYFS_ETC_MOUNT_OPTIONS': d.getVar('OVERLAYFS_ETC_MOUNT_OPTIONS'),
'OVERLAYFS_ETC_FSTYPE': overlayEtcFsType,
'OVERLAYFS_ETC_DEVICE': overlayEtcDevice,
- 'SBIN_INIT_NAME': initBaseName + origInitNameSuffix if useOrigInit else initBaseName
+ 'SBIN_INIT_NAME': initBaseName + origInitNameSuffix if useOrigInit else initBaseName,
+ 'OVERLAYFS_ETC_EXPOSE_LOWER': "true" if exposeLower else "false"
}
if useOrigInit:
diff --git a/meta/classes/overlayfs.bbclass b/meta/classes/overlayfs.bbclass
index f7069edd41..c3564b6ec1 100644
--- a/meta/classes/overlayfs.bbclass
+++ b/meta/classes/overlayfs.bbclass
@@ -96,7 +96,11 @@ python do_create_overlayfs_units() {
overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
for mountPoint in overlayMountPoints:
bb.debug(1, "Process variable flag %s" % mountPoint)
- for lower in d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint).split():
+ lowerList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint)
+ if not lowerList:
+ bb.note("No mount points defined for %s flag, skipping" % (mountPoint))
+ continue
+ for lower in lowerList.split():
bb.debug(1, "Prepare mount unit for %s with data mount point %s" %
(lower, d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)))
prepareUnits(d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint), lower)
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
index ef972740ce..30c7ccd8e7 100644
--- a/meta/classes/own-mirrors.bbclass
+++ b/meta/classes/own-mirrors.bbclass
@@ -11,4 +11,5 @@ https?://.*/.* ${SOURCE_MIRROR_URL} \
ftp://.*/.* ${SOURCE_MIRROR_URL} \
npm://.*/?.* ${SOURCE_MIRROR_URL} \
s3://.*/.* ${SOURCE_MIRROR_URL} \
+crate://.*/.* ${SOURCE_MIRROR_URL} \
"
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index 62050a18b8..67351b2510 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -262,7 +262,7 @@ def files_from_filevars(filevars):
f = '.' + f
if not f.startswith("./"):
f = './' + f
- globbed = glob.glob(f)
+ globbed = glob.glob(f, recursive=True)
if globbed:
if [ f ] != globbed:
files += globbed
@@ -382,6 +382,11 @@ def splitdebuginfo(file, dvar, dv, d):
debugfile = dvar + dest
sources = []
+ if file.endswith(".ko") and file.find("/lib/modules/") != -1:
+ if oe.package.is_kernel_module_signed(file):
+ bb.debug(1, "Skip strip on signed module %s" % file)
+ return (file, sources)
+
# Split the file...
bb.utils.mkdirhier(os.path.dirname(debugfile))
#bb.note("Split %s -> %s" % (file, debugfile))
@@ -479,16 +484,31 @@ def inject_minidebuginfo(file, dvar, dv, d):
bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file))
return
+ # minidebuginfo does not make sense to apply to ELF objects other than
+ # executables and shared libraries, skip applying the minidebuginfo
+ # generation for objects like kernel modules.
+ for line in subprocess.check_output([readelf, '-h', debugfile], universal_newlines=True).splitlines():
+ if not line.strip().startswith("Type:"):
+ continue
+ elftype = line.split(":")[1].strip()
+ if not any(elftype.startswith(i) for i in ["EXEC", "DYN"]):
+ bb.debug(1, 'ELF file {} is not executable/shared, skipping minidebuginfo injection'.format(file))
+ return
+ break
+
# Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo.
# We will exclude all of these from minidebuginfo to save space.
remove_section_names = []
for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines():
- fields = line.split()
- if len(fields) < 8:
+ # strip the leading " [ 1]" section index to allow splitting on space
+ if ']' not in line:
+ continue
+ fields = line[line.index(']') + 1:].split()
+ if len(fields) < 7:
continue
name = fields[0]
type = fields[1]
- flags = fields[7]
+ flags = fields[6]
# .debug_ sections will be removed by objcopy -S so no need to explicitly remove them
if name.startswith('.debug_'):
continue
@@ -553,13 +573,25 @@ def copydebugsources(debugsrcdir, sources, d):
strip = d.getVar("STRIP")
objcopy = d.getVar("OBJCOPY")
workdir = d.getVar("WORKDIR")
+ sdir = d.getVar("S")
+ sparentdir = os.path.dirname(os.path.dirname(sdir))
+ sbasedir = os.path.basename(os.path.dirname(sdir)) + "/" + os.path.basename(sdir)
workparentdir = os.path.dirname(os.path.dirname(workdir))
workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
+ # If S isnt based on WORKDIR we can infer our sources are located elsewhere,
+ # e.g. using externalsrc; use S as base for our dirs
+ if workdir in sdir or 'work-shared' in sdir:
+ basedir = workbasedir
+ parentdir = workparentdir
+ else:
+ basedir = sbasedir
+ parentdir = sparentdir
+
# If build path exists in sourcefile, it means toolchain did not use
# -fdebug-prefix-map to compile
if checkbuildpath(sourcefile, d):
- localsrc_prefix = workparentdir + "/"
+ localsrc_prefix = parentdir + "/"
else:
localsrc_prefix = "/usr/src/debug/"
@@ -581,7 +613,7 @@ def copydebugsources(debugsrcdir, sources, d):
processdebugsrc += "sed 's#%s##g' | "
processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
- cmd = processdebugsrc % (sourcefile, workbasedir, localsrc_prefix, workparentdir, dvar, debugsrcdir)
+ cmd = processdebugsrc % (sourcefile, basedir, localsrc_prefix, parentdir, dvar, debugsrcdir)
try:
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
@@ -591,9 +623,29 @@ def copydebugsources(debugsrcdir, sources, d):
# cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
# Work around this by manually finding and copying any symbolic links that made it through.
cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
- (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
+ (dvar, debugsrcdir, dvar, debugsrcdir, parentdir, dvar, debugsrcdir)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # debugsources.list may be polluted from the host if we used externalsrc,
+ # cpio uses copy-pass and may have just created a directory structure
+ # matching the one from the host, if thats the case move those files to
+ # debugsrcdir to avoid host contamination.
+ # Empty dir structure will be deleted in the next step.
+
+ # Same check as above for externalsrc
+ if workdir not in sdir:
+ if os.path.exists(dvar + debugsrcdir + sdir):
+ # Special case for /build since we need to move into
+ # /usr/src/debug/build so rename sdir to build.build
+ if sdir == "/build" or sdir.find("/build/") == 0:
+ cmd = "mv %s%s%s %s%s%s" % (dvar, debugsrcdir, "/build", dvar, debugsrcdir, "/build.build")
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ sdir = sdir.replace("/build", "/build.build", 1)
+
+ cmd = "mv %s%s%s/* %s%s" % (dvar, debugsrcdir, sdir, dvar,debugsrcdir)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
# The copy by cpio may have resulted in some empty directories! Remove these
cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
@@ -2126,18 +2178,18 @@ python package_do_pkgconfig () {
with open(file, 'r') as f:
lines = f.readlines()
for l in lines:
- m = var_re.match(l)
- if m:
- name = m.group(1)
- val = m.group(2)
- pd.setVar(name, pd.expand(val))
- continue
m = field_re.match(l)
if m:
hdr = m.group(1)
exp = pd.expand(m.group(2))
if hdr == 'Requires':
pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
+ continue
+ m = var_re.match(l)
+ if m:
+ name = m.group(1)
+ val = m.group(2)
+ pd.setVar(name, pd.expand(val))
for pkg in packages.split():
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index e9ff1f7e65..f403af5343 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -4,6 +4,7 @@ IMAGE_PKGTYPE ?= "rpm"
RPM="rpm"
RPMBUILD="rpmbuild"
+RPMBUILD_COMPMODE ?= "${@'w19T%d.zstdio' % int(d.getVar('ZSTD_THREADS'))}"
PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
@@ -193,8 +194,6 @@ python write_specfile () {
if path.endswith("DEBIAN") or path.endswith("CONTROL"):
continue
path = path.replace("%", "%%%%%%%%")
- path = path.replace("[", "?")
- path = path.replace("]", "?")
# Treat all symlinks to directories as normal files.
# os.walk() lists them as directories.
@@ -214,8 +213,6 @@ python write_specfile () {
if dir == "CONTROL" or dir == "DEBIAN":
continue
dir = dir.replace("%", "%%%%%%%%")
- dir = dir.replace("[", "?")
- dir = dir.replace("]", "?")
# All packages own the directories their files are in...
target.append('%dir "' + path + '/' + dir + '"')
else:
@@ -230,8 +227,6 @@ python write_specfile () {
if file == "CONTROL" or file == "DEBIAN":
continue
file = file.replace("%", "%%%%%%%%")
- file = file.replace("[", "?")
- file = file.replace("]", "?")
if conffiles.count(path + '/' + file):
target.append('%config "' + path + '/' + file + '"')
else:
@@ -658,6 +653,7 @@ python do_package_rpm () {
# Setup the rpmbuild arguments...
rpmbuild = d.getVar('RPMBUILD')
+ rpmbuild_compmode = d.getVar('RPMBUILD_COMPMODE')
targetsys = d.getVar('TARGET_SYS')
targetvendor = d.getVar('HOST_VENDOR')
@@ -684,8 +680,8 @@ python do_package_rpm () {
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
cmd = cmd + " --define '_build_id_links none'"
- cmd = cmd + " --define '_binary_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
- cmd = cmd + " --define '_source_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
+ cmd = cmd + " --define '_source_payload %s'" % rpmbuild_compmode
+ cmd = cmd + " --define '_binary_payload %s'" % rpmbuild_compmode
cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
cmd = cmd + " --define '_buildhost reproducible'"
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index 16f929bf59..fb00460172 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -53,6 +53,8 @@ TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
SDK_ARCHIVE_TYPE ?= "tar.xz"
SDK_XZ_COMPRESSION_LEVEL ?= "-9"
SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
+SDK_ZIP_OPTIONS ?= "-y"
+
# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
python () {
@@ -60,7 +62,7 @@ python () {
d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
# SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
# recommand to cd into input dir first to avoid archive with buildpath
- d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDK_ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
else:
d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index e2019f9bbf..bdd86863c6 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -114,7 +114,7 @@ python write_host_sdk_ext_manifest () {
f.write("%s %s %s\n" % (info[1], info[2], info[3]))
}
-SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = " write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
SDK_TITLE:task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
@@ -227,7 +227,7 @@ python copy_buildsystem () {
# Write out config file for devtool
import configparser
- config = configparser.SafeConfigParser()
+ config = configparser.ConfigParser()
config.add_section('General')
config.set('General', 'bitbake_subdir', conf_bbpath)
config.set('General', 'init_path', conf_initpath)
@@ -363,7 +363,8 @@ python copy_buildsystem () {
f.write('BUILDCFG_HEADER = ""\n\n')
# Write METADATA_REVISION
- f.write('METADATA_REVISION = "%s"\n\n' % d.getVar('METADATA_REVISION'))
+ # Needs distro override so it can override the value set in the bbclass code (later than local.conf)
+ f.write('METADATA_REVISION:%s = "%s"\n\n' % (d.getVar('DISTRO'), d.getVar('METADATA_REVISION')))
f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
f.write('WITHIN_EXT_SDK = "1"\n\n')
@@ -714,7 +715,7 @@ sdk_ext_postinst() {
# A bit of another hack, but we need this in the path only for devtool
# so put it at the end of $PATH.
- echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
+ echo "export PATH=\"$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH\"" >> $env_setup_script
echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
index ad8489902a..f2ebe94ca4 100644
--- a/meta/classes/qemuboot.bbclass
+++ b/meta/classes/qemuboot.bbclass
@@ -7,6 +7,7 @@
# QB_OPT_APPEND: options to append to qemu, e.g., "-device usb-mouse"
#
# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
+# e.g., "bzImage-initramfs-qemux86-64.bin" if INITRAMFS_IMAGE_BUNDLE is set to 1.
#
# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
#
@@ -87,7 +88,7 @@
QB_MEM ?= "-m 256"
QB_SMP ?= ""
QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
-QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
+QB_DEFAULT_KERNEL ?= "${@bb.utils.contains("INITRAMFS_IMAGE_BUNDLE", "1", "${KERNEL_IMAGETYPE}-${INITRAMFS_LINK_NAME}.bin", "${KERNEL_IMAGETYPE}", d)}"
QB_DEFAULT_FSTYPE ?= "ext4"
QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0"
QB_OPT_APPEND ?= ""
diff --git a/meta/classes/recipe_sanity.bbclass b/meta/classes/recipe_sanity.bbclass
index 7fa4a849ea..df6e9a7db9 100644
--- a/meta/classes/recipe_sanity.bbclass
+++ b/meta/classes/recipe_sanity.bbclass
@@ -10,7 +10,7 @@ def bad_runtime_vars(cfgdata, d):
for var in d.getVar("__recipe_sanity_badruntimevars").split():
val = d.getVar(var, False)
if val and val != cfgdata.get(var):
- __note("%s should be %s_${PN}" % (var, var), d)
+ __note("%s should be %s:${PN}" % (var, var), d)
__recipe_sanity_reqvars = "DESCRIPTION"
__recipe_sanity_reqdiffvars = ""
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 5f12d5aaeb..f7ededff26 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -27,6 +27,13 @@ BB_SCHEDULER ?= "completion"
BB_TASK_IONICE_LEVEL:task-rm_work = "3.0"
do_rm_work () {
+ # Force using the HOSTTOOLS 'rm' - otherwise the SYSROOT_NATIVE 'rm' can be selected depending on PATH
+ # Avoids race-condition accessing 'rm' when deleting WORKDIR folders at the end of this function
+ RM_BIN="$(PATH=${HOSTTOOLS_DIR} command -v rm)"
+ if [ -z "${RM_BIN}" ]; then
+ bbfatal "Binary 'rm' not found in HOSTTOOLS_DIR, cannot remove WORKDIR data."
+ fi
+
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
for p in ${RM_WORK_EXCLUDE}; do
if [ "$p" = "${PN}" ]; then
@@ -73,7 +80,7 @@ do_rm_work () {
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
- rm -f -- $i;
+ "${RM_BIN}" -f -- $i;
;;
*_setscene*)
# Skip stamps which are already setscene versions
@@ -90,7 +97,7 @@ do_rm_work () {
;;
esac
done
- rm -f -- $i
+ "${RM_BIN}" -f -- $i
esac
done
@@ -100,12 +107,14 @@ do_rm_work () {
# Retain only logs and other files in temp, safely ignore
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
- rm -rf -- $dir 2> /dev/null || true
+ "${RM_BIN}" -rf -- $dir 2> /dev/null || true
elif ! echo "$excludes" | grep -q -w "$dir"; then
- rm -rf -- $dir
+ "${RM_BIN}" -rf -- $dir
fi
done
}
+do_rm_work[vardepsexclude] += "SSTATETASKS"
+
do_rm_work_all () {
:
}
@@ -172,7 +181,7 @@ python inject_rm_work() {
# other recipes and thus will typically run much later than completion of
# work in the recipe itself.
# In practice, addtask() here merely updates the dependencies.
- bb.build.addtask('do_rm_work', 'do_build', ' '.join(deps), d)
+ bb.build.addtask('do_rm_work', 'do_rm_work_all do_build', ' '.join(deps), d)
# Always update do_build_without_rm_work dependencies.
bb.build.addtask('do_build_without_rm_work', '', ' '.join(deps), d)
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
index fc179613fb..f7517c66dc 100644
--- a/meta/classes/rootfs-postcommands.bbclass
+++ b/meta/classes/rootfs-postcommands.bbclass
@@ -14,7 +14,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'deb
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
-# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
+# Tweak files in /etc if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
# We also need to do the same for the kernel boot parameters,
@@ -103,20 +103,26 @@ read_only_rootfs_hook () {
# If we're using openssh and the /etc/ssh directory has no pre-generated keys,
# we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
# and the keys under /var/run/ssh.
- if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
- if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
- echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
- echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
- else
- echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
- echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ # If overlayfs-etc is used this is not done as /etc is treated as writable
+ # If stateless-rootfs is enabled this is always done as we don't want to save keys then
+ if ${@ 'true' if not bb.utils.contains('IMAGE_FEATURES', 'overlayfs-etc', True, False, d) or bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else 'false'}; then
+ if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
+ if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
+ echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ else
+ echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ fi
fi
- fi
- # Also tweak the key location for dropbear in the same way.
- if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
- if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
- echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+ # Also tweak the key location for dropbear in the same way.
+ if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
+ if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
+ if ! grep -q "^DROPBEAR_RSAKEY_DIR=" ${IMAGE_ROOTFS}/etc/default/dropbear ; then
+ echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+ fi
+ fi
fi
fi
@@ -305,7 +311,7 @@ rootfs_trim_schemas () {
}
rootfs_check_host_user_contaminated () {
- contaminated="${WORKDIR}/host-user-contaminated.txt"
+ contaminated="${S}/host-user-contaminated.txt"
HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index b1fac107d5..293e405f62 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -351,6 +351,7 @@ def check_connectivity(d):
if len(msg) == 0:
msg = "%s.\n" % err
msg += " Please ensure your host's network is configured correctly.\n"
+ msg += " Please ensure CONNECTIVITY_CHECK_URIS is correct and specified URIs are available.\n"
msg += " If your ISP or network is blocking the above URL,\n"
msg += " try with another domain name, for example by setting:\n"
msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\""
@@ -497,6 +498,14 @@ def check_tar_version(sanity_data):
version = result.split()[3]
if bb.utils.vercmp_string_op(version, "1.28", "<"):
return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
+
+ try:
+ result = subprocess.check_output(["tar", "--help"], stderr=subprocess.STDOUT).decode('utf-8')
+ if "--xattrs" not in result:
+ return "Your tar doesn't support --xattrs, please use GNU tar.\n"
+ except subprocess.CalledProcessError as e:
+ return "Unable to execute tar --help, exit code %d\n%s\n" % (e.returncode, e.output)
+
return None
# We use git parameters and functionality only found in 1.7.8 or later
@@ -858,7 +867,7 @@ def check_sanity_everybuild(status, d):
mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
protocols = ['http', 'ftp', 'file', 'https', \
'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
- 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3', 'az', 'ftps']
+ 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3', 'az', 'ftps', 'crate']
for mirror_var in mirror_vars:
mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
@@ -990,13 +999,6 @@ def check_sanity(sanity_data):
if status.messages != "":
raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
-# Create a copy of the datastore and finalise it to ensure appends and
-# overrides are set - the datastore has yet to be finalised at ConfigParsed
-def copy_data(e):
- sanity_data = bb.data.createCopy(e.data)
- sanity_data.finalize()
- return sanity_data
-
addhandler config_reparse_eventhandler
config_reparse_eventhandler[eventmask] = "bb.event.ConfigParsed"
python config_reparse_eventhandler() {
@@ -1007,13 +1009,13 @@ addhandler check_sanity_eventhandler
check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
python check_sanity_eventhandler() {
if bb.event.getName(e) == "SanityCheck":
- sanity_data = copy_data(e)
+ sanity_data = bb.data.createCopy(e.data)
check_sanity(sanity_data)
if e.generateevents:
sanity_data.setVar("SANITY_USE_EVENTS", "1")
bb.event.fire(bb.event.SanityCheckPassed(), e.data)
elif bb.event.getName(e) == "NetworkTest":
- sanity_data = copy_data(e)
+ sanity_data = bb.data.createCopy(e.data)
if e.generateevents:
sanity_data.setVar("SANITY_USE_EVENTS", "1")
bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
index 80f8382107..ffe43bb7c9 100644
--- a/meta/classes/scons.bbclass
+++ b/meta/classes/scons.bbclass
@@ -3,7 +3,9 @@ inherit python3native
DEPENDS += "python3-scons-native"
EXTRA_OESCONS ?= ""
-
+# This value below is derived from $(getconf ARG_MAX)
+SCONS_MAXLINELENGTH ?= "MAXLINELENGTH=2097152"
+EXTRA_OESCONS:append = " ${SCONS_MAXLINELENGTH}"
do_configure() {
if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then
if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
@@ -25,4 +27,8 @@ scons_do_install() {
die "scons install execution failed."
}
+do_configure[vardepsexclude] = "SCONS_MAXLINELENGTH"
+do_compile[vardepsexclude] = "SCONS_MAXLINELENGTH"
+do_install[vardepsexclude] = "SCONS_MAXLINELENGTH"
+
EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index 3513269bca..dd6cf12920 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -1084,7 +1084,7 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
- directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx"]
+ directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
def isNativeCross(x):
return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index bf8ca58b0b..a78839bdc2 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -269,6 +269,10 @@ python extend_recipe_sysroot() {
pn = d.getVar("PN")
stagingdir = d.getVar("STAGING_DIR")
sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
+ # only needed by multilib cross-canadian since it redefines RECIPE_SYSROOT
+ manifestprefix = d.getVar("RECIPE_SYSROOT_MANIFEST_SUBDIR")
+ if manifestprefix:
+ sharedmanifests = sharedmanifests + "/" + manifestprefix
recipesysroot = d.getVar("RECIPE_SYSROOT")
recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
@@ -644,7 +648,7 @@ python staging_taskhandler() {
bbtasks = e.tasklist
for task in bbtasks:
deps = d.getVarFlag(task, "depends")
- if task == "do_configure" or (deps and "populate_sysroot" in deps):
+ if task != 'do_prepare_recipe_sysroot' and (task == "do_configure" or (deps and "populate_sysroot" in deps)):
d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ")
}
staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
index 09ec52792d..c07332d5b6 100644
--- a/meta/classes/systemd.bbclass
+++ b/meta/classes/systemd.bbclass
@@ -146,6 +146,7 @@ python systemd_populate_packages() {
def systemd_check_services():
searchpaths = [oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),]
searchpaths.append(d.getVar("systemd_system_unitdir"))
+ searchpaths.append(d.getVar("systemd_user_unitdir"))
systemd_packages = d.getVar('SYSTEMD_PACKAGES')
keys = 'Also'
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index 8ffaeab284..0241f29dfb 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -101,36 +101,12 @@ TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
testimage_dump_target () {
- top -bn1
- ps
- free
- df
- # The next command will export the default gateway IP
- export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
- ping -c3 $DEFAULT_GATEWAY
- dmesg
- netstat -an
- ip address
- # Next command will dump logs from /var/log/
- find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
}
testimage_dump_host () {
- top -bn1
- iostat -x -z -N -d -p ALL 20 2
- ps -ef
- free
- df
- memstat
- dmesg
- ip -s link
- netstat -an
}
testimage_dump_monitor () {
- query-status
- query-block
- dump-guest-memory {"paging":false,"protocol":"file:%s.img"}
}
python do_testimage() {
@@ -240,7 +216,7 @@ def testimage_main(d):
with open(tdname, "r") as f:
td = json.load(f)
except FileNotFoundError as err:
- bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err))
+ bb.fatal('File %s not found (%s).\nHave you built the image with IMAGE_CLASSES += "testimage" in the conf/local.conf?' % (tdname, err))
# Some variables need to be updates (mostly paths) with the
# ones of the current environment because some tests require them.
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index 1d7c703748..d735d434e6 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -31,7 +31,7 @@ toolchain_create_sdk_env_script () {
echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
- echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
+ echo 'if [ ! -z "${LD_LIBRARY_PATH:-}" ]; then' >> $script
echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
@@ -47,7 +47,7 @@ toolchain_create_sdk_env_script () {
for i in ${CANADIANEXTRAOS}; do
EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
done
- echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
+ echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':"$PATH"' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
index b9ad35821a..fe85521877 100644
--- a/meta/classes/uboot-config.bbclass
+++ b/meta/classes/uboot-config.bbclass
@@ -59,10 +59,6 @@ UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}"
UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${PV}-${PR}.${UBOOT_ENV_SUFFIX}"
UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}"
-# Default name of u-boot initial env, but enable individual recipes to change
-# this value.
-UBOOT_INITIAL_ENV ?= "${PN}-initial-env"
-
# U-Boot EXTLINUX variables. U-Boot searches for /boot/extlinux/extlinux.conf
# to find EXTLINUX conf file.
UBOOT_EXTLINUX_INSTALL_DIR ?= "/boot/extlinux"
diff --git a/meta/classes/uboot-extlinux-config.bbclass b/meta/classes/uboot-extlinux-config.bbclass
index dcebe7ff31..ba7a213ea2 100644
--- a/meta/classes/uboot-extlinux-config.bbclass
+++ b/meta/classes/uboot-extlinux-config.bbclass
@@ -33,11 +33,11 @@
# UBOOT_EXTLINUX_DEFAULT_LABEL ??= "Linux Default"
# UBOOT_EXTLINUX_TIMEOUT ??= "30"
#
-# UBOOT_EXTLINUX_KERNEL_IMAGE_default ??= "../zImage"
-# UBOOT_EXTLINUX_MENU_DESCRIPTION_default ??= "Linux Default"
+# UBOOT_EXTLINUX_KERNEL_IMAGE:default ??= "../zImage"
+# UBOOT_EXTLINUX_MENU_DESCRIPTION:default ??= "Linux Default"
#
-# UBOOT_EXTLINUX_KERNEL_IMAGE_fallback ??= "../zImage-fallback"
-# UBOOT_EXTLINUX_MENU_DESCRIPTION_fallback ??= "Linux Fallback"
+# UBOOT_EXTLINUX_KERNEL_IMAGE:fallback ??= "../zImage-fallback"
+# UBOOT_EXTLINUX_MENU_DESCRIPTION:fallback ??= "Linux Fallback"
#
# Results:
#
@@ -152,7 +152,7 @@ python do_create_extlinux_config() {
bb.fatal('Unable to open %s' % (cfile))
}
UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
-do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
+do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s:%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
do_create_extlinux_config[vardepsexclude] += "OVERRIDES"
addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes/uboot-sign.bbclass b/meta/classes/uboot-sign.bbclass
index 31ffe1f472..6bb4ddc600 100644
--- a/meta/classes/uboot-sign.bbclass
+++ b/meta/classes/uboot-sign.bbclass
@@ -73,6 +73,9 @@ UBOOT_FIT_HASH_ALG ?= "sha256"
FIT_SIGN_ALG ?= "rsa2048"
UBOOT_FIT_SIGN_ALG ?= "rsa2048"
+# Kernel / U-Boot fitImage Padding Algo
+FIT_PAD_ALG ?= "pkcs-1.5"
+
# Generate keys for signing Kernel / U-Boot fitImage
FIT_GENERATE_KEYS ?= "0"
UBOOT_FIT_GENERATE_KEYS ?= "0"
@@ -289,7 +292,7 @@ do_uboot_generate_rsa_keys() {
"${UBOOT_FIT_SIGN_NUMBITS}"
echo "Generating certificate for signing U-Boot fitImage"
- openssl req ${FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \
+ openssl req ${UBOOT_FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \
-key "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
-out "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt
fi
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
index 6a9e862bcd..7f0591d49a 100644
--- a/meta/classes/uninative.bbclass
+++ b/meta/classes/uninative.bbclass
@@ -167,5 +167,7 @@ python uninative_changeinterp () {
if not elf.isDynamic():
continue
+ os.chmod(f, s[stat.ST_MODE] | stat.S_IWUSR)
subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
+ os.chmod(f, s[stat.ST_MODE])
}
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
index fc1ffd828c..2804299fc4 100644
--- a/meta/classes/update-alternatives.bbclass
+++ b/meta/classes/update-alternatives.bbclass
@@ -1,5 +1,5 @@
# This class is used to help the alternatives system which is useful when
-# multiple sources provide same command. You can use update-alternatives
+# multiple sources provide the same command. You can use update-alternatives
# command directly in your recipe, but in most cases this class simplifies
# that job.
#
@@ -29,7 +29,7 @@
# A non-default link to create for a target
# ALTERNATIVE_TARGET[name] = "target"
#
-# This is the name of the binary as it's been install by do_install
+# This is the name of the binary as it's been installed by do_install
# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
#
# A package specific link for a target
@@ -62,7 +62,7 @@ ALTERNATIVE_PRIORITY = "10"
# We need special processing for vardeps because it can not work on
# modified flag values. So we aggregate the flags into a new variable
-# and include that vairable in the set.
+# and include that variable in the set.
UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native"
@@ -80,10 +80,10 @@ def gen_updatealternativesvardeps(d):
for p in pkgs:
for v in vars:
- for flag in sorted((d.getVarFlags("%s_%s" % (v,p)) or {}).keys()):
+ for flag in sorted((d.getVarFlags("%s:%s" % (v,p)) or {}).keys()):
if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
continue
- d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
+ d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s:%s' % (v,p), flag, False)))
def ua_extend_depends(d):
if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 3acf59cd46..e5527f0529 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -41,7 +41,7 @@ def update_useradd_static_config(d):
def handle_missing_id(id, type, pkg, files, var, value):
# For backwards compatibility we accept "1" in addition to "error"
error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
- msg = "%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id)
+ msg = 'Recipe %s, package %s: %sname "%s" does not have a static ID defined.' % (d.getVar('PN'), pkg, type, id)
if files:
msg += " Add %s to one of these files: %s" % (id, files)
else: