summaryrefslogtreecommitdiffstats
path: root/meta/lib/oe
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib/oe')
-rw-r--r--meta/lib/oe/__init__.py10
-rw-r--r--meta/lib/oe/buildcfg.py79
-rw-r--r--meta/lib/oe/buildhistory_analysis.py203
-rw-r--r--meta/lib/oe/cachedpath.py4
-rw-r--r--meta/lib/oe/classextend.py51
-rw-r--r--meta/lib/oe/classutils.py5
-rw-r--r--meta/lib/oe/copy_buildsystem.py55
-rw-r--r--meta/lib/oe/cve_check.py245
-rw-r--r--meta/lib/oe/data.py6
-rw-r--r--meta/lib/oe/distro_check.py8
-rw-r--r--meta/lib/oe/elf.py145
-rw-r--r--meta/lib/oe/go.py34
-rw-r--r--meta/lib/oe/gpg_sign.py110
-rw-r--r--meta/lib/oe/license.py64
-rw-r--r--meta/lib/oe/lsb.py14
-rw-r--r--meta/lib/oe/maketype.py9
-rw-r--r--meta/lib/oe/manifest.py154
-rw-r--r--meta/lib/oe/npm_registry.py175
-rw-r--r--meta/lib/oe/overlayfs.py54
-rw-r--r--meta/lib/oe/package.py1899
-rw-r--r--meta/lib/oe/package_manager.py1595
-rw-r--r--meta/lib/oe/package_manager/__init__.py562
-rw-r--r--meta/lib/oe/package_manager/deb/__init__.py522
-rw-r--r--meta/lib/oe/package_manager/deb/manifest.py28
-rw-r--r--meta/lib/oe/package_manager/deb/rootfs.py212
-rw-r--r--meta/lib/oe/package_manager/deb/sdk.py107
-rw-r--r--meta/lib/oe/package_manager/ipk/__init__.py511
-rw-r--r--meta/lib/oe/package_manager/ipk/manifest.py76
-rw-r--r--meta/lib/oe/package_manager/ipk/rootfs.py352
-rw-r--r--meta/lib/oe/package_manager/ipk/sdk.py113
-rw-r--r--meta/lib/oe/package_manager/rpm/__init__.py422
-rw-r--r--meta/lib/oe/package_manager/rpm/manifest.py56
-rw-r--r--meta/lib/oe/package_manager/rpm/rootfs.py150
-rw-r--r--meta/lib/oe/package_manager/rpm/sdk.py122
-rw-r--r--meta/lib/oe/packagedata.py285
-rw-r--r--meta/lib/oe/packagegroup.py14
-rw-r--r--meta/lib/oe/patch.py297
-rw-r--r--meta/lib/oe/path.py120
-rw-r--r--meta/lib/oe/prservice.py99
-rw-r--r--meta/lib/oe/qa.py71
-rw-r--r--meta/lib/oe/recipeutils.py362
-rw-r--r--meta/lib/oe/reproducible.py197
-rw-r--r--meta/lib/oe/rootfs.py817
-rw-r--r--meta/lib/oe/rust.py13
-rw-r--r--meta/lib/oe/sbom.py120
-rw-r--r--meta/lib/oe/sdk.py315
-rw-r--r--meta/lib/oe/spdx.py357
-rw-r--r--meta/lib/oe/sstatesig.py540
-rw-r--r--meta/lib/oe/terminal.py60
-rw-r--r--meta/lib/oe/types.py39
-rw-r--r--meta/lib/oe/useradd.py9
-rw-r--r--meta/lib/oe/utils.py421
52 files changed, 8840 insertions, 3448 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py
index 3ad9513f40..6eb536ad28 100644
--- a/meta/lib/oe/__init__.py
+++ b/meta/lib/oe/__init__.py
@@ -1,2 +1,12 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
+
+BBIMPORTS = ["data", "path", "utils", "types", "package", "packagedata", \
+ "packagegroup", "sstatesig", "lsb", "cachedpath", "license", \
+ "qa", "reproducible", "rust", "buildcfg", "go"]
diff --git a/meta/lib/oe/buildcfg.py b/meta/lib/oe/buildcfg.py
new file mode 100644
index 0000000000..27b059b834
--- /dev/null
+++ b/meta/lib/oe/buildcfg.py
@@ -0,0 +1,79 @@
+
+import os
+import subprocess
+import bb.process
+
+def detect_revision(d):
+ path = get_scmbasepath(d)
+ return get_metadata_git_revision(path)
+
+def detect_branch(d):
+ path = get_scmbasepath(d)
+ return get_metadata_git_branch(path)
+
+def get_scmbasepath(d):
+ return os.path.join(d.getVar('COREBASE'), 'meta')
+
+def get_metadata_git_branch(path):
+ try:
+ rev, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', cwd=path)
+ except bb.process.ExecutionError:
+ rev = '<unknown>'
+ return rev.strip()
+
+def get_metadata_git_revision(path):
+ try:
+ rev, _ = bb.process.run('git rev-parse HEAD', cwd=path)
+ except bb.process.ExecutionError:
+ rev = '<unknown>'
+ return rev.strip()
+
+def get_metadata_git_toplevel(path):
+ try:
+ toplevel, _ = bb.process.run('git rev-parse --show-toplevel', cwd=path)
+ except bb.process.ExecutionError:
+ return ""
+ return toplevel.strip()
+
+def get_metadata_git_remotes(path):
+ try:
+ remotes_list, _ = bb.process.run('git remote', cwd=path)
+ remotes = remotes_list.split()
+ except bb.process.ExecutionError:
+ remotes = []
+ return remotes
+
+def get_metadata_git_remote_url(path, remote):
+ try:
+ uri, _ = bb.process.run('git remote get-url {remote}'.format(remote=remote), cwd=path)
+ except bb.process.ExecutionError:
+ return ""
+ return uri.strip()
+
+def get_metadata_git_describe(path):
+ try:
+ describe, _ = bb.process.run('git describe --tags', cwd=path)
+ except bb.process.ExecutionError:
+ return ""
+ return describe.strip()
+
+def is_layer_modified(path):
+ try:
+ subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
+ git diff --quiet --no-ext-diff
+ git diff --quiet --no-ext-diff --cached""" % path,
+ shell=True,
+ stderr=subprocess.STDOUT)
+ return ""
+ except subprocess.CalledProcessError as ex:
+ # Silently treat errors as "modified", without checking for the
+ # (expected) return code 1 in a modified git repo. For example, we get
+ # output and a 129 return code when a layer isn't a git repo at all.
+ return " -- modified"
+
+def get_layer_revisions(d):
+ layers = (d.getVar("BBLAYERS") or "").split()
+ revisions = []
+ for i in layers:
+ revisions.append((i, os.path.basename(i), get_metadata_git_branch(i).strip(), get_metadata_git_revision(i), is_layer_modified(i)))
+ return revisions
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
index 3e86a46a3f..4edad01580 100644
--- a/meta/lib/oe/buildhistory_analysis.py
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -3,6 +3,8 @@
# Copyright (C) 2012-2013, 2016-2017 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Note: requires GitPython 0.3.1+
#
# You can use this from the command line by running scripts/buildhistory-diff
@@ -13,6 +15,7 @@ import os.path
import difflib
import git
import re
+import shlex
import hashlib
import collections
import bb.utils
@@ -31,15 +34,27 @@ ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
monitor_numeric_threshold = 10
# Image files to monitor (note that image-info.txt is handled separately)
img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
-# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
-related_fields = {}
-related_fields['RDEPENDS'] = ['DEPENDS']
-related_fields['RRECOMMENDS'] = ['DEPENDS']
-related_fields['FILELIST'] = ['FILES']
-related_fields['PKGSIZE'] = ['FILELIST']
-related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
-related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+}
+
+def init_colours(use_colours):
+ global colours
+ if use_colours:
+ colours = {
+ 'colour_default': '\033[0m',
+ 'colour_add': '\033[1;32m',
+ 'colour_remove': '\033[1;31m',
+ }
+ else:
+ colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+ }
class ChangeRecord:
def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
@@ -48,7 +63,6 @@ class ChangeRecord:
self.oldvalue = oldvalue
self.newvalue = newvalue
self.monitored = monitored
- self.related = []
self.filechanges = None
def __str__(self):
@@ -79,7 +93,17 @@ class ChangeRecord:
for name in adirs - bdirs]
files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
for name in bdirs - adirs]
- renamed_dirs = [(dir1, dir2) for dir1, files1 in files_ab for dir2, files2 in files_ba if files1 == files2]
+ renamed_dirs = []
+ for dir1, files1 in files_ab:
+ rename = False
+ for dir2, files2 in files_ba:
+ if files1 == files2 and not rename:
+ renamed_dirs.append((dir1,dir2))
+ # Make sure that we don't use this (dir, files) pair again.
+ files_ba.remove((dir2,files2))
+ # If a dir has already been found to have a rename, stop and go no further.
+ rename = True
+
# remove files that belong to renamed dirs from aitems and bitems
for dir1, dir2 in renamed_dirs:
aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
@@ -88,35 +112,50 @@ class ChangeRecord:
if self.fieldname in list_fields or self.fieldname in list_order_fields:
renamed_dirs = []
+ changed_order = False
if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
aitems = pkglist_combine(depvera)
bitems = pkglist_combine(depverb)
else:
- aitems = self.oldvalue.split()
- bitems = self.newvalue.split()
if self.fieldname == 'FILELIST':
+ aitems = shlex.split(self.oldvalue)
+ bitems = shlex.split(self.newvalue)
renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems)
+ else:
+ aitems = self.oldvalue.split()
+ bitems = self.newvalue.split()
removed = list(set(aitems) - set(bitems))
added = list(set(bitems) - set(aitems))
+ if not removed and not added and self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
+ depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False)
+ depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False)
+ for i, j in zip(depvera.items(), depverb.items()):
+ if i[0] != j[0]:
+ changed_order = True
+ break
+
lines = []
if renamed_dirs:
for dfrom, dto in renamed_dirs:
- lines.append('directory renamed %s -> %s' % (dfrom, dto))
+ lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours))
if removed or added:
if removed and not bitems:
- lines.append('removed all items "%s"' % ' '.join(removed))
+ lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours))
else:
if removed:
- lines.append('removed "%s"' % ' '.join(removed))
+ lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours))
if added:
- lines.append('added "%s"' % ' '.join(added))
+ lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours))
else:
lines.append('changed order')
- out = '%s: %s' % (self.fieldname, ', '.join(lines))
+ if not (removed or added or changed_order):
+ out = ''
+ else:
+ out = '%s: %s' % (self.fieldname, ', '.join(lines))
elif self.fieldname in numeric_fields:
aval = int(self.oldvalue or 0)
@@ -125,9 +164,9 @@ class ChangeRecord:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
- out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours)
elif self.fieldname in defaultval_map:
- out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
if self.fieldname == 'PKG' and '[default]' in self.newvalue:
out += ' - may indicate debian renaming failure'
elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
@@ -142,7 +181,7 @@ class ChangeRecord:
diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
out += '\n '.join(list(diff)[2:])
out += '\n --'
- elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
+ elif self.fieldname in img_monitor_files or '/image-files/' in self.path or self.fieldname == "sysroot":
if self.filechanges or (self.oldvalue and self.newvalue):
fieldname = self.fieldname
if '/image-files/' in self.path:
@@ -163,14 +202,7 @@ class ChangeRecord:
else:
out = ''
else:
- out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
-
- if self.related:
- for chg in self.related:
- if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
- continue
- for line in chg._str_internal(False).splitlines():
- out += '\n * %s' % line
+ out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
return '%s%s' % (prefix, out) if out else ''
@@ -181,6 +213,7 @@ class FileChange:
changetype_perms = 'P'
changetype_ownergroup = 'O'
changetype_link = 'L'
+ changetype_move = 'M'
def __init__(self, path, changetype, oldvalue = None, newvalue = None):
self.path = path
@@ -219,10 +252,11 @@ class FileChange:
return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_link:
return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+ elif self.changetype == self.changetype_move:
+ return '%s moved to %s' % (self.path, self.oldvalue)
else:
return '%s changed (unknown)' % self.path
-
def blob_to_dict(blob):
alines = [line for line in blob.data_stream.read().decode('utf-8').splitlines()]
adict = {}
@@ -249,11 +283,14 @@ def file_list_to_dict(lines):
adict[path] = splitv[0:3]
return adict
+numeric_removal = str.maketrans('0123456789', 'XXXXXXXXXX')
-def compare_file_lists(alines, blines):
+def compare_file_lists(alines, blines, compare_ownership=True):
adict = file_list_to_dict(alines)
bdict = file_list_to_dict(blines)
filechanges = []
+ additions = []
+ removals = []
for path, splitv in adict.items():
newsplitv = bdict.pop(path, None)
if newsplitv:
@@ -262,16 +299,20 @@ def compare_file_lists(alines, blines):
newvalue = newsplitv[0][0]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
+
# Check permissions
oldvalue = splitv[0][1:]
newvalue = newsplitv[0][1:]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
- # Check owner/group
- oldvalue = '%s/%s' % (splitv[1], splitv[2])
- newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
- if oldvalue != newvalue:
- filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+
+ if compare_ownership:
+ # Check owner/group
+ oldvalue = '%s/%s' % (splitv[1], splitv[2])
+ newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+
# Check symlink target
if newsplitv[0][0] == 'l':
if len(splitv) > 3:
@@ -282,11 +323,67 @@ def compare_file_lists(alines, blines):
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
else:
- filechanges.append(FileChange(path, FileChange.changetype_remove))
+ removals.append(path)
# Whatever is left over has been added
for path in bdict:
- filechanges.append(FileChange(path, FileChange.changetype_add))
+ additions.append(path)
+
+ # Rather than print additions and removals, its nicer to print file 'moves'
+ # where names or paths are similar.
+ revmap_remove = {}
+ for removal in removals:
+ translated = removal.translate(numeric_removal)
+ if translated not in revmap_remove:
+ revmap_remove[translated] = []
+ revmap_remove[translated].append(removal)
+
+ #
+ # We want to detect renames of large trees of files like
+ # /lib/modules/5.4.40-yocto-standard to /lib/modules/5.4.43-yocto-standard
+ #
+ renames = {}
+ for addition in additions.copy():
+ if addition not in additions:
+ continue
+ translated = addition.translate(numeric_removal)
+ if translated in revmap_remove:
+ if len(revmap_remove[translated]) != 1:
+ continue
+ removal = revmap_remove[translated][0]
+ commondir = addition.split("/")
+ commondir2 = removal.split("/")
+ idx = None
+ for i in range(len(commondir)):
+ if commondir[i] != commondir2[i]:
+ idx = i
+ break
+ commondir = "/".join(commondir[:i+1])
+ commondir2 = "/".join(commondir2[:i+1])
+ # If the common parent is in one dict and not the other its likely a rename
+ # so iterate through those files and process as such
+ if commondir2 not in bdict and commondir not in adict:
+ if commondir not in renames:
+ renames[commondir] = commondir2
+ for addition2 in additions.copy():
+ if addition2.startswith(commondir):
+ removal2 = addition2.replace(commondir, commondir2)
+ if removal2 in removals:
+ additions.remove(addition2)
+ removals.remove(removal2)
+ continue
+ filechanges.append(FileChange(removal, FileChange.changetype_move, addition))
+ if addition in additions:
+ additions.remove(addition)
+ if removal in removals:
+ removals.remove(removal)
+ for rename in renames:
+ filechanges.append(FileChange(renames[rename], FileChange.changetype_move, rename))
+
+ for addition in additions:
+ filechanges.append(FileChange(addition, FileChange.changetype_add))
+ for removal in removals:
+ filechanges.append(FileChange(removal, FileChange.changetype_remove))
return filechanges
@@ -377,15 +474,19 @@ def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
if abs(percentchg) < monitor_numeric_threshold:
continue
elif (not report_all) and key in list_fields:
- if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
+ if key == "FILELIST" and (path.endswith("-dbg") or path.endswith("-src")) and bstr.strip() != '':
continue
if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(astr, bstr)
if depvera == depverb:
continue
- alist = astr.split()
+ if key == 'FILELIST':
+ alist = shlex.split(astr)
+ blist = shlex.split(bstr)
+ else:
+ alist = astr.split()
+ blist = bstr.split()
alist.sort()
- blist = bstr.split()
blist.sort()
# We don't care about the removal of self-dependencies
if pkgname in alist and not pkgname in blist:
@@ -461,7 +562,7 @@ def compare_siglists(a_blob, b_blob, taskdiff=False):
elif not hash2 in hashfiles:
out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
else:
- out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, collapsed=True)
+ out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, collapsed=True)
for line in out2:
m = hashlib.sha256()
m.update(line.encode('utf-8'))
@@ -535,6 +636,15 @@ def process_changes(repopath, revision1, revision2='HEAD', report_all=False, rep
elif filename.startswith('latest.'):
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
changes.append(chg)
+ elif filename == 'sysroot':
+ alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
+ blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
+ filechanges = compare_file_lists(alines,blines, compare_ownership=False)
+ if filechanges:
+ chg = ChangeRecord(path, filename, None, None, True)
+ chg.filechanges = filechanges
+ changes.append(chg)
+
elif path.startswith('images/'):
filename = os.path.basename(d.a_blob.path)
if filename in img_monitor_files:
@@ -594,17 +704,6 @@ def process_changes(repopath, revision1, revision2='HEAD', report_all=False, rep
chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True)
changes.append(chg)
- # Link related changes
- for chg in changes:
- if chg.monitored:
- for chg2 in changes:
- # (Check dirname in the case of fields from recipe info files)
- if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
- if chg2.fieldname in related_fields.get(chg.fieldname, []):
- chg.related.append(chg2)
- elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
- chg.related.append(chg2)
-
# filter out unwanted paths
if exclude_path:
for chg in changes:
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py
index 0840cc4c3f..0138b791d4 100644
--- a/meta/lib/oe/cachedpath.py
+++ b/meta/lib/oe/cachedpath.py
@@ -1,4 +1,8 @@
#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Based on standard python library functions but avoid
# repeated stat calls. Its assumed the files will not change from under us
# so we can cache stat calls.
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
index d2eeaf0e5c..5161d33d2d 100644
--- a/meta/lib/oe/classextend.py
+++ b/meta/lib/oe/classextend.py
@@ -1,10 +1,26 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import collections
+def get_packages(d):
+ pkgs = d.getVar("PACKAGES_NONML")
+ extcls = d.getVar("EXTENDERCLASS")
+ return extcls.rename_packages_internal(pkgs)
+
+def get_depends(varprefix, d):
+ extcls = d.getVar("EXTENDERCLASS")
+ return extcls.map_depends_variable(varprefix + "_NONML")
+
class ClassExtender(object):
def __init__(self, extname, d):
self.extname = extname
self.d = d
self.pkgs_mapping = []
+ self.d.setVar("EXTENDERCLASS", self)
def extend_name(self, name):
if name.startswith("kernel-") or name == "virtual/kernel":
@@ -16,10 +32,15 @@ class ClassExtender(object):
if name.endswith("-" + self.extname):
name = name.replace("-" + self.extname, "")
if name.startswith("virtual/"):
+ # Assume large numbers of dashes means a triplet is present and we don't need to convert
+ if name.count("-") >= 3 and name.endswith(("-go", "-binutils", "-gcc", "-g++")):
+ return name
subs = name.split("/", 1)[1]
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
return name
+ if name.startswith("/") or (name.startswith("${") and name.endswith("}")):
+ return name
if not name.startswith(self.extname):
return self.extname + "-" + name
return name
@@ -71,7 +92,7 @@ class ClassExtender(object):
def map_depends_variable(self, varname, suffix = ""):
# We need to preserve EXTENDPKGV so it can be expanded correctly later
if suffix:
- varname = varname + "_" + suffix
+ varname = varname + ":" + suffix
orig = self.d.getVar("EXTENDPKGV", False)
self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
deps = self.d.getVar(varname)
@@ -83,8 +104,13 @@ class ClassExtender(object):
for dep in deps:
newdeps[self.map_depends(dep)] = deps[dep]
- self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}"))
+ if not varname.endswith("_NONML"):
+ self.d.renameVar(varname, varname + "_NONML")
+ self.d.setVar(varname, "${@oe.classextend.get_depends('%s', d)}" % varname)
+ self.d.appendVarFlag(varname, "vardeps", " " + varname + "_NONML")
+ ret = bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}")
self.d.setVar("EXTENDPKGV", orig)
+ return ret
def map_packagevars(self):
for pkg in (self.d.getVar("PACKAGES").split() + [""]):
@@ -103,20 +129,31 @@ class ClassExtender(object):
continue
self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
- self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping]))
+ self.d.renameVar("PACKAGES", "PACKAGES_NONML")
+ self.d.setVar("PACKAGES", "${@oe.classextend.get_packages(d)}")
+
+ def rename_packages_internal(self, pkgs):
+ self.pkgs_mapping = []
+ for pkg in (self.d.expand(pkgs) or "").split():
+ if pkg.startswith(self.extname):
+ self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
+ continue
+ self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
+
+ return " ".join([row[1] for row in self.pkgs_mapping])
def rename_package_variables(self, variables):
for pkg_mapping in self.pkgs_mapping:
+ if pkg_mapping[0].startswith("${") and pkg_mapping[0].endswith("}"):
+ continue
for subs in variables:
- self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
+ self.d.renameVar("%s:%s" % (subs, pkg_mapping[0]), "%s:%s" % (subs, pkg_mapping[1]))
class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
if dep.startswith(self.extname):
return dep
- if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
- return dep + "-crosssdk"
- elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
+ if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
return dep
else:
return self.extend_name(dep)
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py
index 45cd5249be..ec3f6ad720 100644
--- a/meta/lib/oe/classutils.py
+++ b/meta/lib/oe/classutils.py
@@ -1,3 +1,8 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
class ClassRegistryMeta(type):
"""Give each ClassRegistry their own registry"""
diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py
index ac2fae1ed1..81abfbf9e2 100644
--- a/meta/lib/oe/copy_buildsystem.py
+++ b/meta/lib/oe/copy_buildsystem.py
@@ -1,5 +1,17 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This class should provide easy access to the different aspects of the
# buildsystem such as layers, bitbake location, etc.
+#
+# SDK_LAYERS_EXCLUDE: Layers which will be excluded from SDK layers.
+# SDK_LAYERS_EXCLUDE_PATTERN: The simiar to SDK_LAYERS_EXCLUDE, this supports
+# python regular expression, use space as separator,
+# e.g.: ".*-downloads closed-.*"
+#
+
import stat
import shutil
@@ -10,7 +22,7 @@ def _smart_copy(src, dest):
mode = os.stat(src).st_mode
if stat.S_ISDIR(mode):
bb.utils.mkdirhier(dest)
- cmd = "tar --exclude='.git' --xattrs --xattrs-include='*' -chf - -C %s -p . \
+ cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -cf - -C %s -p . \
| tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
@@ -23,9 +35,12 @@ class BuildSystem(object):
self.context = context
self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()]
self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split()
+ self.layers_exclude_pattern = d.getVar('SDK_LAYERS_EXCLUDE_PATTERN')
def copy_bitbake_and_layers(self, destdir, workspace_name=None):
+ import re
# Copy in all metadata layers + bitbake (as repositories)
+ copied_corebase = None
layers_copied = []
bb.utils.mkdirhier(destdir)
layers = list(self.layerdirs)
@@ -40,8 +55,17 @@ class BuildSystem(object):
# Exclude layers
for layer_exclude in self.layers_exclude:
if layer_exclude in layers:
+ bb.note('Excluded %s from sdk layers since it is in SDK_LAYERS_EXCLUDE' % layer_exclude)
layers.remove(layer_exclude)
+ if self.layers_exclude_pattern:
+ layers_cp = layers[:]
+ for pattern in self.layers_exclude_pattern.split():
+ for layer in layers_cp:
+ if re.match(pattern, layer):
+ bb.note('Excluded %s from sdk layers since matched SDK_LAYERS_EXCLUDE_PATTERN' % layer)
+ layers.remove(layer)
+
workspace_newname = workspace_name
if workspace_newname:
layernames = [os.path.basename(layer) for layer in layers]
@@ -75,8 +99,10 @@ class BuildSystem(object):
layerdestpath = destdir
if corebase == os.path.dirname(layer):
layerdestpath += '/' + os.path.basename(corebase)
- else:
- layer_relative = os.path.basename(corebase) + '/' + os.path.relpath(layer, corebase)
+ # If the layer is located somewhere under the same parent directory
+ # as corebase we keep the layer structure.
+ elif os.path.commonpath([layer, corebase]) == os.path.dirname(corebase):
+ layer_relative = os.path.relpath(layer, os.path.dirname(corebase))
if os.path.dirname(layer_relative) != layernewname:
layerdestpath += '/' + os.path.dirname(layer_relative)
@@ -84,18 +110,19 @@ class BuildSystem(object):
layer_relative = os.path.relpath(layerdestpath,
destdir)
- layers_copied.append(layer_relative)
-
# Treat corebase as special since it typically will contain
# build directories or other custom items.
if corebase == layer:
+ copied_corebase = layer_relative
bb.utils.mkdirhier(layerdestpath)
for f in corebase_files:
f_basename = os.path.basename(f)
destname = os.path.join(layerdestpath, f_basename)
_smart_copy(f, destname)
else:
- if os.path.exists(layerdestpath):
+ layers_copied.append(layer_relative)
+
+ if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
bb.note("Skipping layer %s, already handled" % layer)
else:
_smart_copy(layer, layerdestpath)
@@ -140,15 +167,15 @@ class BuildSystem(object):
layers_copied.remove(layer)
break
- return layers_copied
+ return copied_corebase, layers_copied
def generate_locked_sigs(sigfile, d):
bb.utils.mkdirhier(os.path.dirname(sigfile))
depd = d.getVar('BB_TASKDEPDATA', False)
- tasks = ['%s.%s' % (v[2], v[1]) for v in depd.values()]
+ tasks = ['%s:%s' % (v[2], v[1]) for v in depd.values()]
bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
-def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output):
+def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, onlynative, pruned_output):
with open(lockedsigs, 'r') as infile:
bb.utils.mkdirhier(os.path.dirname(pruned_output))
with open(pruned_output, 'w') as f:
@@ -158,7 +185,11 @@ def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output
if line.endswith('\\\n'):
splitval = line.strip().split(':')
if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets:
- f.write(line)
+ if onlynative:
+ if 'nativesdk' in splitval[0]:
+ f.write(line)
+ else:
+ f.write(line)
else:
f.write(line)
invalue = False
@@ -226,7 +257,7 @@ def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cac
bb.note('Generating sstate-cache...')
nativelsbstring = d.getVar('NATIVELSBSTRING')
- bb.process.run("gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
+ bb.process.run("PYTHONDONTWRITEBYTECODE=1 gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
if fixedlsbstring and nativelsbstring != fixedlsbstring:
nativedir = output_sstate_cache + '/' + nativelsbstring
if os.path.isdir(nativedir):
@@ -253,7 +284,7 @@ def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, lo
logparam = '-l %s' % logfile
else:
logparam = ''
- cmd = "%sBB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
+ cmd = "%sPYTHONDONTWRITEBYTECODE=1 BB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
env = dict(d.getVar('BB_ORIGENV', False))
env.pop('BUILDDIR', '')
env.pop('BBPATH', '')
diff --git a/meta/lib/oe/cve_check.py b/meta/lib/oe/cve_check.py
new file mode 100644
index 0000000000..ed5c714cb8
--- /dev/null
+++ b/meta/lib/oe/cve_check.py
@@ -0,0 +1,245 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import collections
+import re
+import itertools
+import functools
+
+_Version = collections.namedtuple(
+ "_Version", ["release", "patch_l", "pre_l", "pre_v"]
+)
+
+@functools.total_ordering
+class Version():
+
+ def __init__(self, version, suffix=None):
+
+ suffixes = ["alphabetical", "patch"]
+
+ if str(suffix) == "alphabetical":
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(?P<patch_l>[a-z]))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ elif str(suffix) == "patch":
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(p|patch)(?P<patch_l>[0-9]+))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ else:
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ regex = re.compile(r"^\s*" + version_pattern + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ match = regex.search(version)
+ if not match:
+ raise Exception("Invalid version: '{0}'".format(version))
+
+ self._version = _Version(
+ release=tuple(int(i) for i in match.group("release").replace("-",".").split(".")),
+ patch_l=match.group("patch_l") if str(suffix) in suffixes and match.group("patch_l") else "",
+ pre_l=match.group("pre_l"),
+ pre_v=match.group("pre_v")
+ )
+
+ self._key = _cmpkey(
+ self._version.release,
+ self._version.patch_l,
+ self._version.pre_l,
+ self._version.pre_v
+ )
+
+ def __eq__(self, other):
+ if not isinstance(other, Version):
+ return NotImplemented
+ return self._key == other._key
+
+ def __gt__(self, other):
+ if not isinstance(other, Version):
+ return NotImplemented
+ return self._key > other._key
+
+def _cmpkey(release, patch_l, pre_l, pre_v):
+ # remove leading 0
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+ )
+
+ _patch = patch_l.upper()
+
+ if pre_l is None and pre_v is None:
+ _pre = float('inf')
+ else:
+ _pre = float(pre_v) if pre_v else float('-inf')
+ return _release, _patch, _pre
+
+
+def get_patched_cves(d):
+ """
+ Get patches that solve CVEs using the "CVE: " tag.
+ """
+
+ import re
+ import oe.patch
+
+ cve_match = re.compile(r"CVE:( CVE-\d{4}-\d+)+")
+
+ # Matches the last "CVE-YYYY-ID" in the file name, also if written
+ # in lowercase. Possible to have multiple CVE IDs in a single
+ # file name, but only the last one will be detected from the file name.
+ # However, patch files contents addressing multiple CVE IDs are supported
+ # (cve_match regular expression)
+ cve_file_name_match = re.compile(r".*(CVE-\d{4}-\d+)", re.IGNORECASE)
+
+ patched_cves = set()
+ patches = oe.patch.src_patches(d)
+ bb.debug(2, "Scanning %d patches for CVEs" % len(patches))
+ for url in patches:
+ patch_file = bb.fetch.decodeurl(url)[2]
+
+ # Check patch file name for CVE ID
+ fname_match = cve_file_name_match.search(patch_file)
+ if fname_match:
+ cve = fname_match.group(1).upper()
+ patched_cves.add(cve)
+ bb.debug(2, "Found %s from patch file name %s" % (cve, patch_file))
+
+ # Remote patches won't be present and compressed patches won't be
+ # unpacked, so say we're not scanning them
+ if not os.path.isfile(patch_file):
+ bb.note("%s is remote or compressed, not scanning content" % patch_file)
+ continue
+
+ with open(patch_file, "r", encoding="utf-8") as f:
+ try:
+ patch_text = f.read()
+ except UnicodeDecodeError:
+ bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
+ " trying with iso8859-1" % patch_file)
+ f.close()
+ with open(patch_file, "r", encoding="iso8859-1") as f:
+ patch_text = f.read()
+
+ # Search for one or more "CVE: " lines
+ text_match = False
+ for match in cve_match.finditer(patch_text):
+ # Get only the CVEs without the "CVE: " tag
+ cves = patch_text[match.start()+5:match.end()]
+ for cve in cves.split():
+ bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
+ patched_cves.add(cve)
+ text_match = True
+
+ if not fname_match and not text_match:
+ bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+
+ # Search for additional patched CVEs
+ for cve in (d.getVarFlags("CVE_STATUS") or {}):
+ decoded_status, _, _ = decode_cve_status(d, cve)
+ if decoded_status == "Patched":
+ bb.debug(2, "CVE %s is additionally patched" % cve)
+ patched_cves.add(cve)
+
+ return patched_cves
+
+
+def get_cpe_ids(cve_product, version):
+ """
+ Get list of CPE identifiers for the given product and version
+ """
+
+ version = version.split("+git")[0]
+
+ cpe_ids = []
+ for product in cve_product.split():
+ # CVE_PRODUCT in recipes may include vendor information for CPE identifiers. If not,
+ # use wildcard for vendor.
+ if ":" in product:
+ vendor, product = product.split(":", 1)
+ else:
+ vendor = "*"
+
+ cpe_id = 'cpe:2.3:*:{}:{}:{}:*:*:*:*:*:*:*'.format(vendor, product, version)
+ cpe_ids.append(cpe_id)
+
+ return cpe_ids
+
+def cve_check_merge_jsons(output, data):
+ """
+ Merge the data in the "package" property to the main data file
+ output
+ """
+ if output["version"] != data["version"]:
+ bb.error("Version mismatch when merging JSON outputs")
+ return
+
+ for product in output["package"]:
+ if product["name"] == data["package"][0]["name"]:
+ bb.error("Error adding the same package %s twice" % product["name"])
+ return
+
+ output["package"].append(data["package"][0])
+
+def update_symlinks(target_path, link_path):
+ """
+ Update a symbolic link link_path to point to target_path.
+ Remove the link and recreate it if exist and is different.
+ """
+ if link_path != target_path and os.path.exists(target_path):
+ if os.path.exists(os.path.realpath(link_path)):
+ os.remove(link_path)
+ os.symlink(os.path.basename(target_path), link_path)
+
+
+def convert_cve_version(version):
+ """
+ This function converts from CVE format to Yocto version format.
+ eg 8.3_p1 -> 8.3p1, 6.2_rc1 -> 6.2-rc1
+
+ Unless it is redefined using CVE_VERSION in the recipe,
+ cve_check uses the version in the name of the recipe (${PV})
+ to check vulnerabilities against a CVE in the database downloaded from NVD.
+
+ When the version has an update, i.e.
+ "p1" in OpenSSH 8.3p1,
+ "-rc1" in linux kernel 6.2-rc1,
+ the database stores the version as version_update (8.3_p1, 6.2_rc1).
+ Therefore, we must transform this version before comparing to the
+ recipe version.
+
+ In this case, the parameter of the function is 8.3_p1.
+ If the version uses the Release Candidate format, "rc",
+ this function replaces the '_' by '-'.
+ If the version uses the Update format, "p",
+ this function removes the '_' completely.
+ """
+ import re
+
+ matches = re.match('^([0-9.]+)_((p|rc)[0-9]+)$', version)
+
+ if not matches:
+ return version
+
+ version = matches.group(1)
+ update = matches.group(2)
+
+ if matches.group(3) == "rc":
+ return version + '-' + update
+
+ return version + update
+
+def decode_cve_status(d, cve):
+ """
+ Convert CVE_STATUS into status, detail and description.
+ """
+ status = d.getVarFlag("CVE_STATUS", cve)
+ if not status:
+ return ("", "", "")
+
+ status_split = status.split(':', 1)
+ detail = status_split[0]
+ description = status_split[1].strip() if (len(status_split) > 1) else ""
+
+ status_mapping = d.getVarFlag("CVE_CHECK_STATUSMAP", detail)
+ if status_mapping is None:
+ bb.warn('Invalid detail "%s" for CVE_STATUS[%s] = "%s", fallback to Unpatched' % (detail, cve, status))
+ status_mapping = "Unpatched"
+
+ return (status_mapping, detail, description)
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py
index b8901e63f5..37121cfad2 100644
--- a/meta/lib/oe/data.py
+++ b/meta/lib/oe/data.py
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import json
import oe.maketype
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py
index e775c3a6ec..3494520f40 100644
--- a/meta/lib/oe/distro_check.py
+++ b/meta/lib/oe/distro_check.py
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
def create_socket(url, d):
import urllib
from bb.utils import export_proxies
@@ -22,7 +28,7 @@ def find_latest_numeric_release(url, d):
maxstr=""
for link in get_links_from_url(url, d):
try:
- # TODO use LooseVersion
+ # TODO use bb.utils.vercmp_string_op()
release = float(link)
except:
release = 0
diff --git a/meta/lib/oe/elf.py b/meta/lib/oe/elf.py
new file mode 100644
index 0000000000..eab2349a4f
--- /dev/null
+++ b/meta/lib/oe/elf.py
@@ -0,0 +1,145 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+def machine_dict(d):
+# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
+ machdata = {
+ "darwin9" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "eabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "elf" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "i586" : (3, 0, 0, True, 32),
+ "i686" : (3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "epiphany": (4643, 0, 0, True, 32),
+ "lm32": (138, 0, 0, False, 32),
+ "loongarch64":(258, 0, 0, True, 64),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ "powerpc": (20, 0, 0, False, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ },
+ "linux" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "arm" : (40, 97, 0, True, 32),
+ "armeb": (40, 97, 0, False, 32),
+ "powerpc": (20, 0, 0, False, 32),
+ "powerpc64": (21, 0, 0, False, 64),
+ "powerpc64le": (21, 0, 0, True, 64),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "ia64": (50, 0, 0, True, 64),
+ "alpha": (36902, 0, 0, True, 64),
+ "hppa": (15, 3, 0, False, 32),
+ "loongarch64":(258, 0, 0, True, 64),
+ "m68k": ( 4, 0, 0, False, 32),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "mipsisa32r6": ( 8, 0, 0, False, 32),
+ "mipsisa32r6el": ( 8, 0, 0, True, 32),
+ "mipsisa64r6": ( 8, 0, 0, False, 64),
+ "mipsisa64r6el": ( 8, 0, 0, True, 64),
+ "nios2": (113, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ "s390": (22, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ "sparc": ( 2, 0, 0, False, 32),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ },
+ "linux-android" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ },
+ "linux-androideabi" : {
+ "arm" : (40, 97, 0, True, 32),
+ },
+ "linux-musl" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "arm" : ( 40, 97, 0, True, 32),
+ "armeb": ( 40, 97, 0, False, 32),
+ "powerpc": ( 20, 0, 0, False, 32),
+ "powerpc64": ( 21, 0, 0, False, 64),
+ "powerpc64le": (21, 0, 0, True, 64),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": ( 62, 0, 0, True, 64),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ "sh4": ( 42, 0, 0, True, 32),
+ },
+ "uclinux-uclibc" : {
+ "bfin": ( 106, 0, 0, True, 32),
+ },
+ "linux-gnueabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-musleabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-gnuspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-muslspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-gnu" : {
+ "powerpc": (20, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ },
+ "linux-gnu_ilp32" : {
+ "aarch64" : (183, 0, 0, True, 32),
+ },
+ "linux-gnux32" : {
+ "x86_64": (62, 0, 0, True, 32),
+ },
+ "linux-muslx32" : {
+ "x86_64": (62, 0, 0, True, 32),
+ },
+ "linux-gnun32" : {
+ "mips64": ( 8, 0, 0, False, 32),
+ "mips64el": ( 8, 0, 0, True, 32),
+ "mipsisa64r6": ( 8, 0, 0, False, 32),
+ "mipsisa64r6el":( 8, 0, 0, True, 32),
+ },
+ }
+
+ # Add in any extra user supplied data which may come from a BSP layer, removing the
+ # need to always change this class directly
+ extra_machdata = (d and d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS" or None) or "").split()
+ for m in extra_machdata:
+ call = m + "(machdata, d)"
+ locs = { "machdata" : machdata, "d" : d}
+ machdata = bb.utils.better_eval(call, locs)
+
+ return machdata
diff --git a/meta/lib/oe/go.py b/meta/lib/oe/go.py
new file mode 100644
index 0000000000..dfd957d157
--- /dev/null
+++ b/meta/lib/oe/go.py
@@ -0,0 +1,34 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import re
+
+def map_arch(a):
+ if re.match('i.86', a):
+ return '386'
+ elif a == 'x86_64':
+ return 'amd64'
+ elif re.match('arm.*', a):
+ return 'arm'
+ elif re.match('aarch64.*', a):
+ return 'arm64'
+ elif re.match('mips64el.*', a):
+ return 'mips64le'
+ elif re.match('mips64.*', a):
+ return 'mips64'
+ elif a == 'mips':
+ return 'mips'
+ elif a == 'mipsel':
+ return 'mipsle'
+ elif re.match('p(pc|owerpc)(64le)', a):
+ return 'ppc64le'
+ elif re.match('p(pc|owerpc)(64)', a):
+ return 'ppc64'
+ elif a == 'riscv64':
+ return 'riscv64'
+ elif a == 'loongarch64':
+ return 'loong64'
+ return ''
diff --git a/meta/lib/oe/gpg_sign.py b/meta/lib/oe/gpg_sign.py
index 9cc88f020c..ede6186c84 100644
--- a/meta/lib/oe/gpg_sign.py
+++ b/meta/lib/oe/gpg_sign.py
@@ -1,37 +1,49 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
"""Helper module for GPG signing"""
-import os
import bb
-import oe.utils
+import os
+import shlex
+import subprocess
+import tempfile
class LocalSigner(object):
"""Class for handling local (on the build host) signing"""
def __init__(self, d):
self.gpg_bin = d.getVar('GPG_BIN') or \
bb.utils.which(os.getenv('PATH'), 'gpg')
+ self.gpg_cmd = [self.gpg_bin]
+ self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
+ # Without this we see "Cannot allocate memory" errors when running processes in parallel
+ # It needs to be set for any gpg command since any agent launched can stick around in memory
+ # and this parameter must be set.
+ if self.gpg_agent_bin:
+ self.gpg_cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
self.gpg_path = d.getVar('GPG_PATH')
- self.gpg_version = self.get_gpg_version()
self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
+ self.gpg_version = self.get_gpg_version()
+
def export_pubkey(self, output_file, keyid, armor=True):
"""Export GPG public key to a file"""
- cmd = '%s --no-permission-warning --batch --yes --export -o %s ' % \
- (self.gpg_bin, output_file)
+ cmd = self.gpg_cmd + ["--no-permission-warning", "--batch", "--yes", "--export", "-o", output_file]
if self.gpg_path:
- cmd += "--homedir %s " % self.gpg_path
+ cmd += ["--homedir", self.gpg_path]
if armor:
- cmd += "--armor "
- cmd += keyid
- status, output = oe.utils.getstatusoutput(cmd)
- if status:
- raise bb.build.FuncFailed('Failed to export gpg public key (%s): %s' %
- (keyid, output))
+ cmd += ["--armor"]
+ cmd += [keyid]
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None):
"""Sign RPM files"""
cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
- gpg_args = '--no-permission-warning --batch --passphrase=%s' % passphrase
+ gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
if self.gpg_version > (2,1,):
gpg_args += ' --pinentry-mode=loopback'
cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
@@ -47,44 +59,50 @@ class LocalSigner(object):
# Sign in chunks
for i in range(0, len(files), sign_chunk):
- status, output = oe.utils.getstatusoutput(cmd + ' '.join(files[i:i+sign_chunk]))
- if status:
- raise bb.build.FuncFailed("Failed to sign RPM packages: %s" % output)
+ subprocess.check_output(shlex.split(cmd + ' '.join(files[i:i+sign_chunk])), stderr=subprocess.STDOUT)
- def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True):
+ def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True, output_suffix=None, use_sha256=False):
"""Create a detached signature of a file"""
- import subprocess
if passphrase_file and passphrase:
raise Exception("You should use either passphrase_file of passphrase, not both")
- cmd = [self.gpg_bin, '--detach-sign', '--no-permission-warning', '--batch',
+ cmd = self.gpg_cmd + ['--detach-sign', '--no-permission-warning', '--batch',
'--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid]
if self.gpg_path:
cmd += ['--homedir', self.gpg_path]
if armor:
cmd += ['--armor']
+ if use_sha256:
+ cmd += ['--digest-algo', "SHA256"]
#gpg > 2.1 supports password pipes only through the loopback interface
#gpg < 2.1 errors out if given unknown parameters
if self.gpg_version > (2,1,):
cmd += ['--pinentry-mode', 'loopback']
- cmd += [input_file]
-
try:
if passphrase_file:
with open(passphrase_file) as fobj:
passphrase = fobj.readline();
- job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
- (_, stderr) = job.communicate(passphrase.encode("utf-8"))
+ if not output_suffix:
+ output_suffix = 'asc' if armor else 'sig'
+ output_file = input_file + "." + output_suffix
+ with tempfile.TemporaryDirectory(dir=os.path.dirname(output_file)) as tmp_dir:
+ tmp_file = os.path.join(tmp_dir, os.path.basename(output_file))
+ cmd += ['-o', tmp_file]
+
+ cmd += [input_file]
+
+ job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+ (_, stderr) = job.communicate(passphrase.encode("utf-8"))
- if job.returncode:
- raise bb.build.FuncFailed("GPG exited with code %d: %s" %
- (job.returncode, stderr.decode("utf-8")))
+ if job.returncode:
+ bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8")))
+ os.rename(tmp_file, output_file)
except IOError as e:
bb.error("IO error (%s): %s" % (e.errno, e.strerror))
raise Exception("Failed to sign '%s'" % input_file)
@@ -96,23 +114,41 @@ class LocalSigner(object):
def get_gpg_version(self):
"""Return the gpg version as a tuple of ints"""
- import subprocess
try:
- ver_str = subprocess.check_output((self.gpg_bin, "--version", "--no-permission-warning")).split()[2].decode("utf-8")
- return tuple([int(i) for i in ver_str.split('.')])
+ cmd = self.gpg_cmd + ["--version", "--no-permission-warning"]
+ ver_str = subprocess.check_output(cmd).split()[2].decode("utf-8")
+ return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
except subprocess.CalledProcessError as e:
- raise bb.build.FuncFailed("Could not get gpg version: %s" % e)
+ bb.fatal("Could not get gpg version: %s" % e)
- def verify(self, sig_file):
+ def verify(self, sig_file, valid_sigs = ''):
"""Verify signature"""
- cmd = self.gpg_bin + " --verify --no-permission-warning "
+ cmd = self.gpg_cmd + ["--verify", "--no-permission-warning", "--status-fd", "1"]
if self.gpg_path:
- cmd += "--homedir %s " % self.gpg_path
- cmd += sig_file
- status, _ = oe.utils.getstatusoutput(cmd)
- ret = False if status else True
- return ret
+ cmd += ["--homedir", self.gpg_path]
+
+ cmd += [sig_file]
+ status = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ # Valid if any key matches if unspecified
+ if not valid_sigs:
+ ret = False if status.returncode else True
+ return ret
+
+ import re
+ goodsigs = []
+ sigre = re.compile(r'^\[GNUPG:\] GOODSIG (\S+)\s(.*)$')
+ for l in status.stdout.decode("utf-8").splitlines():
+ s = sigre.match(l)
+ if s:
+ goodsigs += [s.group(1)]
+
+ for sig in valid_sigs.split():
+ if sig in goodsigs:
+ return True
+ if len(goodsigs):
+ bb.warn('No accepted signatures found. Good signatures found: %s.' % ' '.join(goodsigs))
+ return False
def get_signer(d, backend):
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
index ca385d5187..d9c8d94da4 100644
--- a/meta/lib/oe/license.py
+++ b/meta/lib/oe/license.py
@@ -1,4 +1,8 @@
-# vi:sts=4:sw=4:et
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
"""Code for parsing OpenEmbedded license strings"""
import ast
@@ -8,17 +12,20 @@ from fnmatch import fnmatchcase as fnmatch
def license_ok(license, dont_want_licenses):
""" Return False if License exist in dont_want_licenses else True """
for dwl in dont_want_licenses:
- # If you want to exclude license named generically 'X', we
- # surely want to exclude 'X+' as well. In consequence, we
- # will exclude a trailing '+' character from LICENSE in
- # case INCOMPATIBLE_LICENSE is not a 'X+' license.
- lic = license
- if not re.search('\+$', dwl):
- lic = re.sub('\+', '', license)
- if fnmatch(lic, dwl):
+ if fnmatch(license, dwl):
return False
return True
+def obsolete_license_list():
+ return ["AGPL-3", "AGPL-3+", "AGPLv3", "AGPLv3+", "AGPLv3.0", "AGPLv3.0+", "AGPL-3.0", "AGPL-3.0+", "BSD-0-Clause",
+ "GPL-1", "GPL-1+", "GPLv1", "GPLv1+", "GPLv1.0", "GPLv1.0+", "GPL-1.0", "GPL-1.0+", "GPL-2", "GPL-2+", "GPLv2",
+ "GPLv2+", "GPLv2.0", "GPLv2.0+", "GPL-2.0", "GPL-2.0+", "GPL-3", "GPL-3+", "GPLv3", "GPLv3+", "GPLv3.0", "GPLv3.0+",
+ "GPL-3.0", "GPL-3.0+", "LGPLv2", "LGPLv2+", "LGPLv2.0", "LGPLv2.0+", "LGPL-2.0", "LGPL-2.0+", "LGPL2.1", "LGPL2.1+",
+ "LGPLv2.1", "LGPLv2.1+", "LGPL-2.1", "LGPL-2.1+", "LGPLv3", "LGPLv3+", "LGPL-3.0", "LGPL-3.0+", "MPL-1", "MPLv1",
+ "MPLv1.1", "MPLv2", "MIT-X", "MIT-style", "openssl", "PSF", "PSFv2", "Python-2", "Apachev2", "Apache-2", "Artisticv1",
+ "Artistic-1", "AFL-2", "AFL-1", "AFLv2", "AFLv1", "CDDLv1", "CDDL-1", "EPLv1.0", "FreeType", "Nauman",
+ "tcl", "vim", "SGIv1"]
+
class LicenseError(Exception):
pass
@@ -40,8 +47,8 @@ class InvalidLicense(LicenseError):
return "invalid characters in license '%s'" % self.license
license_operator_chars = '&|() '
-license_operator = re.compile('([' + license_operator_chars + '])')
-license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
+license_operator = re.compile(r'([' + license_operator_chars + '])')
+license_pattern = re.compile(r'[a-zA-Z0-9.+_\-]+$')
class LicenseVisitor(ast.NodeVisitor):
"""Get elements based on OpenEmbedded license strings"""
@@ -79,6 +86,9 @@ class FlattenVisitor(LicenseVisitor):
def visit_Str(self, node):
self.licenses.append(node.s)
+ def visit_Constant(self, node):
+ self.licenses.append(node.value)
+
def visit_BinOp(self, node):
if isinstance(node.op, ast.BitOr):
left = FlattenVisitor(self.choose_licenses)
@@ -101,26 +111,26 @@ def flattened_licenses(licensestr, choose_licenses):
raise LicenseSyntaxError(licensestr, exc)
return flatten.licenses
-def is_included(licensestr, whitelist=None, blacklist=None):
- """Given a license string and whitelist and blacklist, determine if the
- license string matches the whitelist and does not match the blacklist.
+def is_included(licensestr, include_licenses=None, exclude_licenses=None):
+ """Given a license string, a list of licenses to include and a list of
+ licenses to exclude, determine if the license string matches the include
+ list and does not match the exclude list.
Returns a tuple holding the boolean state and a list of the applicable
licenses that were excluded if state is False, or the licenses that were
- included if the state is True.
- """
+ included if the state is True."""
def include_license(license):
- return any(fnmatch(license, pattern) for pattern in whitelist)
+ return any(fnmatch(license, pattern) for pattern in include_licenses)
def exclude_license(license):
- return any(fnmatch(license, pattern) for pattern in blacklist)
+ return any(fnmatch(license, pattern) for pattern in exclude_licenses)
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
included licenses and no excluded licenses)."""
# The factor 1000 below is arbitrary, just expected to be much larger
- # that the number of licenses actually specified. That way the weight
+ # than the number of licenses actually specified. That way the weight
# will be negative if the list of licenses contains an excluded license,
# but still gives a higher weight to the list with the most included
# licenses.
@@ -133,11 +143,11 @@ def is_included(licensestr, whitelist=None, blacklist=None):
else:
return beta
- if not whitelist:
- whitelist = ['*']
+ if not include_licenses:
+ include_licenses = ['*']
- if not blacklist:
- blacklist = []
+ if not exclude_licenses:
+ exclude_licenses = []
licenses = flattened_licenses(licensestr, choose_licenses)
excluded = [lic for lic in licenses if exclude_license(lic)]
@@ -232,6 +242,9 @@ class ListVisitor(LicenseVisitor):
def visit_Str(self, node):
self.licenses.add(node.s)
+ def visit_Constant(self, node):
+ self.licenses.add(node.value)
+
def list_licenses(licensestr):
"""Simply get a list of all licenses mentioned in a license string.
Binary operators are not applied or taken into account in any way"""
@@ -241,3 +254,8 @@ def list_licenses(licensestr):
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return visitor.licenses
+
+def apply_pkg_license_exception(pkg, bad_licenses, exceptions):
+ """Return remaining bad licenses after removing any package exceptions"""
+
+ return [lic for lic in bad_licenses if pkg + ':' + lic not in exceptions]
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py
index 71c0992c5d..3ec03e5042 100644
--- a/meta/lib/oe/lsb.py
+++ b/meta/lib/oe/lsb.py
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
def get_os_release():
"""Get all key-value pairs from /etc/os-release as a dict"""
from collections import OrderedDict
@@ -106,12 +112,12 @@ def distro_identifier(adjust_hook=None):
if adjust_hook:
distro_id, release = adjust_hook(distro_id, release)
if not distro_id:
- return "Unknown"
- # Filter out any non-alphanumerics
- distro_id = re.sub(r'\W', '', distro_id)
+ return "unknown"
+ # Filter out any non-alphanumerics and convert to lowercase
+ distro_id = re.sub(r'\W', '', distro_id).lower()
if release:
- id_str = '{0}-{1}'.format(distro_id.lower(), release)
+ id_str = '{0}-{1}'.format(distro_id, release)
else:
id_str = distro_id
return id_str.replace(' ','-').replace('/','-')
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py
index f88981dd90..7a83bdf602 100644
--- a/meta/lib/oe/maketype.py
+++ b/meta/lib/oe/maketype.py
@@ -1,3 +1,8 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
"""OpenEmbedded variable typing support
Types are defined in the metadata by name, using the 'type' flag on a
@@ -7,7 +12,7 @@ the arguments of the type's factory for details.
import inspect
import oe.types as types
-import collections
+from collections.abc import Callable
available_types = {}
@@ -96,7 +101,7 @@ for name in dir(types):
continue
obj = getattr(types, name)
- if not isinstance(obj, collections.Callable):
+ if not isinstance(obj, Callable):
continue
register(name, obj)
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py
index 60c49be0e9..61f18adc4a 100644
--- a/meta/lib/oe/manifest.py
+++ b/meta/lib/oe/manifest.py
@@ -1,9 +1,14 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from abc import ABCMeta, abstractmethod
import os
import re
import bb
-
class Manifest(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
@@ -185,154 +190,11 @@ class Manifest(object, metaclass=ABCMeta):
return installed_pkgs
-class RpmManifest(Manifest):
- """
- Returns a dictionary object with mip and mlp packages.
- """
- def _split_multilib(self, pkg_list):
- pkgs = dict()
-
- for pkg in pkg_list.split():
- pkg_type = self.PKG_TYPE_MUST_INSTALL
-
- ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
-
- for ml_variant in ml_variants:
- if pkg.startswith(ml_variant + '-'):
- pkg_type = self.PKG_TYPE_MULTILIB
-
- if not pkg_type in pkgs:
- pkgs[pkg_type] = pkg
- else:
- pkgs[pkg_type] += " " + pkg
-
- return pkgs
-
- def create_initial(self):
- pkgs = dict()
-
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var))
- if split_pkgs is not None:
- pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
- else:
- pkg_list = self.d.getVar(var)
- if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
-
- for pkg_type in pkgs:
- for pkg in pkgs[pkg_type].split():
- manifest.write("%s,%s\n" % (pkg_type, pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- pass
-
-
-class OpkgManifest(Manifest):
- """
- Returns a dictionary object with mip and mlp packages.
- """
- def _split_multilib(self, pkg_list):
- pkgs = dict()
-
- for pkg in pkg_list.split():
- pkg_type = self.PKG_TYPE_MUST_INSTALL
-
- ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
-
- for ml_variant in ml_variants:
- if pkg.startswith(ml_variant + '-'):
- pkg_type = self.PKG_TYPE_MULTILIB
-
- if not pkg_type in pkgs:
- pkgs[pkg_type] = pkg
- else:
- pkgs[pkg_type] += " " + pkg
-
- return pkgs
-
- def create_initial(self):
- pkgs = dict()
-
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var))
- if split_pkgs is not None:
- pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
- else:
- pkg_list = self.d.getVar(var)
- if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
-
- for pkg_type in pkgs:
- for pkg in pkgs[pkg_type].split():
- manifest.write("%s,%s\n" % (pkg_type, pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- if not os.path.exists(self.initial_manifest):
- self.create_initial()
-
- initial_manifest = self.parse_initial_manifest()
- pkgs_to_install = list()
- for pkg_type in initial_manifest:
- pkgs_to_install += initial_manifest[pkg_type]
- if len(pkgs_to_install) == 0:
- return
-
- output = pm.dummy_install(pkgs_to_install)
-
- with open(self.full_manifest, 'w+') as manifest:
- pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
- for line in set(output.split('\n')):
- m = pkg_re.match(line)
- if m:
- manifest.write(m.group(1) + '\n')
-
- return
-
-
-class DpkgManifest(Manifest):
- def create_initial(self):
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- pkg_list = self.d.getVar(var)
-
- if pkg_list is None:
- continue
-
- for pkg in pkg_list.split():
- manifest.write("%s,%s\n" %
- (self.var_maps[self.manifest_type][var], pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- pass
-
def create_manifest(d, final_manifest=False, manifest_dir=None,
manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
- manifest_map = {'rpm': RpmManifest,
- 'ipk': OpkgManifest,
- 'deb': DpkgManifest}
-
- manifest = manifest_map[d.getVar('IMAGE_PKGTYPE')](d, manifest_dir, manifest_type)
+ import importlib
+ manifest = importlib.import_module('oe.package_manager.' + d.getVar('IMAGE_PKGTYPE') + '.manifest').PkgManifest(d, manifest_dir, manifest_type)
if final_manifest:
manifest.create_final()
diff --git a/meta/lib/oe/npm_registry.py b/meta/lib/oe/npm_registry.py
new file mode 100644
index 0000000000..d97ced7cda
--- /dev/null
+++ b/meta/lib/oe/npm_registry.py
@@ -0,0 +1,175 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import bb
+import json
+import subprocess
+
+_ALWAYS_SAFE = frozenset('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ 'abcdefghijklmnopqrstuvwxyz'
+ '0123456789'
+ '_.-~()')
+
+MISSING_OK = object()
+
+REGISTRY = "https://registry.npmjs.org"
+
+# we can not use urllib.parse here because npm expects lowercase
+# hex-chars but urllib generates uppercase ones
+def uri_quote(s, safe = '/'):
+ res = ""
+ safe_set = set(safe)
+ for c in s:
+ if c in _ALWAYS_SAFE or c in safe_set:
+ res += c
+ else:
+ res += '%%%02x' % ord(c)
+ return res
+
+class PackageJson:
+ def __init__(self, spec):
+ self.__spec = spec
+
+ @property
+ def name(self):
+ return self.__spec['name']
+
+ @property
+ def version(self):
+ return self.__spec['version']
+
+ @property
+ def empty_manifest(self):
+ return {
+ 'name': self.name,
+ 'description': self.__spec.get('description', ''),
+ 'versions': {},
+ }
+
+ def base_filename(self):
+ return uri_quote(self.name, safe = '@')
+
+ def as_manifest_entry(self, tarball_uri):
+ res = {}
+
+ ## NOTE: 'npm install' requires more than basic meta information;
+ ## e.g. it takes 'bin' from this manifest entry but not the actual
+ ## 'package.json'
+ for (idx,dflt) in [('name', None),
+ ('description', ""),
+ ('version', None),
+ ('bin', MISSING_OK),
+ ('man', MISSING_OK),
+ ('scripts', MISSING_OK),
+ ('directories', MISSING_OK),
+ ('dependencies', MISSING_OK),
+ ('devDependencies', MISSING_OK),
+ ('optionalDependencies', MISSING_OK),
+ ('license', "unknown")]:
+ if idx in self.__spec:
+ res[idx] = self.__spec[idx]
+ elif dflt == MISSING_OK:
+ pass
+ elif dflt != None:
+ res[idx] = dflt
+ else:
+ raise Exception("%s-%s: missing key %s" % (self.name,
+ self.version,
+ idx))
+
+ res['dist'] = {
+ 'tarball': tarball_uri,
+ }
+
+ return res
+
+class ManifestImpl:
+ def __init__(self, base_fname, spec):
+ self.__base = base_fname
+ self.__spec = spec
+
+ def load(self):
+ try:
+ with open(self.filename, "r") as f:
+ res = json.load(f)
+ except IOError:
+ res = self.__spec.empty_manifest
+
+ return res
+
+ def save(self, meta):
+ with open(self.filename, "w") as f:
+ json.dump(meta, f, indent = 2)
+
+ @property
+ def filename(self):
+ return self.__base + ".meta"
+
+class Manifest:
+ def __init__(self, base_fname, spec):
+ self.__base = base_fname
+ self.__spec = spec
+ self.__lockf = None
+ self.__impl = None
+
+ def __enter__(self):
+ self.__lockf = bb.utils.lockfile(self.__base + ".lock")
+ self.__impl = ManifestImpl(self.__base, self.__spec)
+ return self.__impl
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ bb.utils.unlockfile(self.__lockf)
+
+class NpmCache:
+ def __init__(self, cache):
+ self.__cache = cache
+
+ @property
+ def path(self):
+ return self.__cache
+
+ def run(self, type, key, fname):
+ subprocess.run(['oe-npm-cache', self.__cache, type, key, fname],
+ check = True)
+
+class NpmRegistry:
+ def __init__(self, path, cache):
+ self.__path = path
+ self.__cache = NpmCache(cache + '/_cacache')
+ bb.utils.mkdirhier(self.__path)
+ bb.utils.mkdirhier(self.__cache.path)
+
+ @staticmethod
+ ## This function is critical and must match nodejs expectations
+ def _meta_uri(spec):
+ return REGISTRY + '/' + uri_quote(spec.name, safe = '@')
+
+ @staticmethod
+ ## Exact return value does not matter; just make it look like a
+ ## usual registry url
+ def _tarball_uri(spec):
+ return '%s/%s/-/%s-%s.tgz' % (REGISTRY,
+ uri_quote(spec.name, safe = '@'),
+ uri_quote(spec.name, safe = '@/'),
+ spec.version)
+
+ def add_pkg(self, tarball, pkg_json):
+ pkg_json = PackageJson(pkg_json)
+ base = os.path.join(self.__path, pkg_json.base_filename())
+
+ with Manifest(base, pkg_json) as manifest:
+ meta = manifest.load()
+ tarball_uri = self._tarball_uri(pkg_json)
+
+ meta['versions'][pkg_json.version] = pkg_json.as_manifest_entry(tarball_uri)
+
+ manifest.save(meta)
+
+ ## Cache entries are a little bit dependent on the nodejs
+ ## version; version specific cache implementation must
+ ## mitigate differences
+ self.__cache.run('meta', self._meta_uri(pkg_json), manifest.filename);
+ self.__cache.run('tgz', tarball_uri, tarball);
diff --git a/meta/lib/oe/overlayfs.py b/meta/lib/oe/overlayfs.py
new file mode 100644
index 0000000000..8b88900f71
--- /dev/null
+++ b/meta/lib/oe/overlayfs.py
@@ -0,0 +1,54 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This file contains common functions for overlayfs and its QA check
+
+# this function is based on https://github.com/systemd/systemd/blob/main/src/basic/unit-name.c
+def escapeSystemdUnitName(path):
+ escapeMap = {
+ '/': '-',
+ '-': "\\x2d",
+ '\\': "\\x5d"
+ }
+ return "".join([escapeMap.get(c, c) for c in path.strip('/')])
+
+def strForBash(s):
+ return s.replace('\\', '\\\\')
+
+def allOverlaysUnitName(d):
+ return d.getVar('PN') + '-overlays.service'
+
+def mountUnitName(unit):
+ return escapeSystemdUnitName(unit) + '.mount'
+
+def helperUnitName(unit):
+ return escapeSystemdUnitName(unit) + '-create-upper-dir.service'
+
+def unitFileList(d):
+ fileList = []
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
+
+ if not overlayMountPoints:
+ bb.fatal("A recipe uses overlayfs class but there is no OVERLAYFS_MOUNT_POINT set in your MACHINE configuration")
+
+ # check that we have required mount points set first
+ requiredMountPoints = d.getVarFlags('OVERLAYFS_WRITABLE_PATHS')
+ for mountPoint in requiredMountPoints:
+ if mountPoint not in overlayMountPoints:
+ bb.fatal("Missing required mount point for OVERLAYFS_MOUNT_POINT[%s] in your MACHINE configuration" % mountPoint)
+
+ for mountPoint in overlayMountPoints:
+ mountPointList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint)
+ if not mountPointList:
+ bb.debug(1, "No mount points defined for %s flag, don't add to file list", mountPoint)
+ continue
+ for path in mountPointList.split():
+ fileList.append(mountUnitName(path))
+ fileList.append(helperUnitName(path))
+
+ fileList.append(allOverlaysUnitName(d))
+
+ return fileList
+
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py
index 1e5c3aa8e1..1511ba47c4 100644
--- a/meta/lib/oe/package.py
+++ b/meta/lib/oe/package.py
@@ -1,16 +1,37 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import errno
+import fnmatch
+import itertools
+import os
+import shlex
+import re
+import glob
+import stat
+import mmap
+import subprocess
+
+import oe.cachedpath
+
def runstrip(arg):
# Function to strip a single file, called from split_and_strip_files below
# A working 'file' (one which works on the target architecture)
#
- # The elftype is a bit pattern (explained in split_and_strip_files) to tell
+ # The elftype is a bit pattern (explained in is_elf below) to tell
# us what type of file we're processing...
# 4 - executable
# 8 - shared library
# 16 - kernel module
- import stat, subprocess
-
- (file, elftype, strip) = arg
+ if len(arg) == 3:
+ (file, elftype, strip) = arg
+ extra_strip_sections = ''
+ else:
+ (file, elftype, strip, extra_strip_sections) = arg
newmode = None
if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
@@ -19,33 +40,81 @@ def runstrip(arg):
os.chmod(file, newmode)
stripcmd = [strip]
-
- # kernel module
+ skip_strip = False
+ # kernel module
if elftype & 16:
- stripcmd.extend(["--strip-debug", "--remove-section=.comment",
- "--remove-section=.note", "--preserve-dates"])
+ if is_kernel_module_signed(file):
+ bb.debug(1, "Skip strip on signed module %s" % file)
+ skip_strip = True
+ else:
+ stripcmd.extend(["--strip-debug", "--remove-section=.comment",
+ "--remove-section=.note", "--preserve-dates"])
# .so and shared library
elif ".so" in file and elftype & 8:
stripcmd.extend(["--remove-section=.comment", "--remove-section=.note", "--strip-unneeded"])
# shared or executable:
elif elftype & 8 or elftype & 4:
stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"])
+ if extra_strip_sections != '':
+ for section in extra_strip_sections.split():
+ stripcmd.extend(["--remove-section=" + section])
stripcmd.append(file)
bb.debug(1, "runstrip: %s" % stripcmd)
- try:
+ if not skip_strip:
output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.error("runstrip: '%s' strip command failed with %s (%s)" % (stripcmd, e.returncode, e.output))
if newmode:
os.chmod(file, origmode)
- return
+# Detect .ko module by searching for "vermagic=" string
+def is_kernel_module(path):
+ with open(path) as f:
+ return mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ).find(b"vermagic=") >= 0
+# Detect if .ko module is signed
+def is_kernel_module_signed(path):
+ with open(path, "rb") as f:
+ f.seek(-28, 2)
+ module_tail = f.read()
+ return "Module signature appended" in "".join(chr(c) for c in bytearray(module_tail))
-def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, qa_already_stripped=False):
+# Return type (bits):
+# 0 - not elf
+# 1 - ELF
+# 2 - stripped
+# 4 - executable
+# 8 - shared library
+# 16 - kernel module
+def is_elf(path):
+ exec_type = 0
+ result = subprocess.check_output(["file", "-b", path], stderr=subprocess.STDOUT).decode("utf-8")
+
+ if "ELF" in result:
+ exec_type |= 1
+ if "not stripped" not in result:
+ exec_type |= 2
+ if "executable" in result:
+ exec_type |= 4
+ if "shared" in result:
+ exec_type |= 8
+ if "relocatable" in result:
+ if path.endswith(".ko") and path.find("/lib/modules/") != -1 and is_kernel_module(path):
+ exec_type |= 16
+ return (path, exec_type)
+
+def is_static_lib(path):
+ if path.endswith('.a') and not os.path.islink(path):
+ with open(path, 'rb') as fh:
+ # The magic must include the first slash to avoid
+ # matching golang static libraries
+ magic = b'!<arch>\x0a/'
+ start = fh.read(len(magic))
+ return start == magic
+ return False
+
+def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, max_process, qa_already_stripped=False):
"""
Strip executable code (like executables, shared libraries) _in_place_
- Based on sysroot_strip in staging.bbclass
@@ -53,43 +122,11 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, qa_already_stripped=
:param strip_cmd: Strip command (usually ${STRIP})
:param libdir: ${libdir} - strip .so files in this directory
:param base_libdir: ${base_libdir} - strip .so files in this directory
+ :param max_process: number of stripping processes started in parallel
:param qa_already_stripped: Set to True if already-stripped' in ${INSANE_SKIP}
This is for proper logging and messages only.
"""
- import stat, errno, oe.path, oe.utils, mmap
-
- # Detect .ko module by searching for "vermagic=" string
- def is_kernel_module(path):
- with open(path) as f:
- return mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ).find(b"vermagic=") >= 0
-
- # Return type (bits):
- # 0 - not elf
- # 1 - ELF
- # 2 - stripped
- # 4 - executable
- # 8 - shared library
- # 16 - kernel module
- def is_elf(path):
- exec_type = 0
- ret, result = oe.utils.getstatusoutput(
- "file \"%s\"" % path.replace("\"", "\\\""))
-
- if ret:
- bb.error("split_and_strip_files: 'file %s' failed" % path)
- return exec_type
-
- if "ELF" in result:
- exec_type |= 1
- if "not stripped" not in result:
- exec_type |= 2
- if "executable" in result:
- exec_type |= 4
- if "shared" in result:
- exec_type |= 8
- if "relocatable" in result and is_kernel_module(path):
- exec_type |= 16
- return exec_type
+ import stat, errno, oe.path, oe.utils
elffiles = {}
inodes = {}
@@ -99,6 +136,8 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, qa_already_stripped=
#
# First lets figure out all of the files we may have to process
#
+ checkelf = []
+ inodecache = {}
for root, dirs, files in os.walk(dstdir):
for f in files:
file = os.path.join(root, f)
@@ -124,7 +163,11 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, qa_already_stripped=
# It's a file (or hardlink), not a link
# ...but is it ELF, and is it already stripped?
- elf_file = is_elf(file)
+ checkelf.append(file)
+ inodecache[file] = s.st_ino
+ results = oe.utils.multiprocess_launch_mp(is_elf, checkelf, max_process)
+ for (file, elf_file) in results:
+ #elf_file = is_elf(file)
if elf_file & 1:
if elf_file & 2:
if qa_already_stripped:
@@ -133,13 +176,13 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, qa_already_stripped=
bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dstdir):], pn))
continue
- if s.st_ino in inodes:
+ if inodecache[file] in inodes:
os.unlink(file)
- os.link(inodes[s.st_ino], file)
+ os.link(inodes[inodecache[file]], file)
else:
# break hardlinks so that we do not strip the original.
- inodes[s.st_ino] = file
- bb.utils.copyfile(file, file)
+ inodes[inodecache[file]] = file
+ bb.utils.break_hardlinks(file)
elffiles[file] = elf_file
#
@@ -150,8 +193,7 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, qa_already_stripped=
elf_file = int(elffiles[file])
sfiles.append((file, elf_file, strip_cmd))
- oe.utils.multiprocess_exec(sfiles, runstrip)
-
+ oe.utils.multiprocess_launch_mp(runstrip, sfiles, max_process)
def file_translate(file):
@@ -236,13 +278,13 @@ def read_shlib_providers(d):
shlib_provider = {}
shlibs_dirs = d.getVar('SHLIBSDIRS').split()
- list_re = re.compile('^(.*)\.list$')
+ list_re = re.compile(r'^(.*)\.list$')
# Go from least to most specific since the last one found wins
for dir in reversed(shlibs_dirs):
bb.debug(2, "Reading shlib providers in %s" % (dir))
if not os.path.exists(dir):
continue
- for file in os.listdir(dir):
+ for file in sorted(os.listdir(dir)):
m = list_re.match(file)
if m:
dep_pkg = m.group(1)
@@ -261,35 +303,1724 @@ def read_shlib_providers(d):
shlib_provider[s[0]][s[1]] = (dep_pkg, s[2])
return shlib_provider
+# We generate a master list of directories to process, we start by
+# seeding this list with reasonable defaults, then load from
+# the fs-perms.txt files
+def fixup_perms(d):
+ import pwd, grp
+
+ cpath = oe.cachedpath.CachedPath()
+ dvar = d.getVar('PKGD')
+
+ # init using a string with the same format as a line as documented in
+ # the fs-perms.txt file
+ # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
+ # <path> link <link target>
+ #
+ # __str__ can be used to print out an entry in the input format
+ #
+ # if fs_perms_entry.path is None:
+ # an error occurred
+ # if fs_perms_entry.link, you can retrieve:
+ # fs_perms_entry.path = path
+ # fs_perms_entry.link = target of link
+ # if not fs_perms_entry.link, you can retrieve:
+ # fs_perms_entry.path = path
+ # fs_perms_entry.mode = expected dir mode or None
+ # fs_perms_entry.uid = expected uid or -1
+ # fs_perms_entry.gid = expected gid or -1
+ # fs_perms_entry.walk = 'true' or something else
+ # fs_perms_entry.fmode = expected file mode or None
+ # fs_perms_entry.fuid = expected file uid or -1
+ # fs_perms_entry_fgid = expected file gid or -1
+ class fs_perms_entry():
+ def __init__(self, line):
+ lsplit = line.split()
+ if len(lsplit) == 3 and lsplit[1].lower() == "link":
+ self._setlink(lsplit[0], lsplit[2])
+ elif len(lsplit) == 8:
+ self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
+ else:
+ msg = "Fixup Perms: invalid config line %s" % line
+ oe.qa.handle_error("perm-config", msg, d)
+ self.path = None
+ self.link = None
+
+ def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
+ self.path = os.path.normpath(path)
+ self.link = None
+ self.mode = self._procmode(mode)
+ self.uid = self._procuid(uid)
+ self.gid = self._procgid(gid)
+ self.walk = walk.lower()
+ self.fmode = self._procmode(fmode)
+ self.fuid = self._procuid(fuid)
+ self.fgid = self._procgid(fgid)
+
+ def _setlink(self, path, link):
+ self.path = os.path.normpath(path)
+ self.link = link
+
+ def _procmode(self, mode):
+ if not mode or (mode and mode == "-"):
+ return None
+ else:
+ return int(mode,8)
+
+ # Note uid/gid -1 has special significance in os.lchown
+ def _procuid(self, uid):
+ if uid is None or uid == "-":
+ return -1
+ elif uid.isdigit():
+ return int(uid)
+ else:
+ return pwd.getpwnam(uid).pw_uid
+
+ def _procgid(self, gid):
+ if gid is None or gid == "-":
+ return -1
+ elif gid.isdigit():
+ return int(gid)
+ else:
+ return grp.getgrnam(gid).gr_gid
+
+ # Use for debugging the entries
+ def __str__(self):
+ if self.link:
+ return "%s link %s" % (self.path, self.link)
+ else:
+ mode = "-"
+ if self.mode:
+ mode = "0%o" % self.mode
+ fmode = "-"
+ if self.fmode:
+ fmode = "0%o" % self.fmode
+ uid = self._mapugid(self.uid)
+ gid = self._mapugid(self.gid)
+ fuid = self._mapugid(self.fuid)
+ fgid = self._mapugid(self.fgid)
+ return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
+
+ def _mapugid(self, id):
+ if id is None or id == -1:
+ return "-"
+ else:
+ return "%d" % id
+
+ # Fix the permission, owner and group of path
+ def fix_perms(path, mode, uid, gid, dir):
+ if mode and not os.path.islink(path):
+ #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
+ os.chmod(path, mode)
+ # -1 is a special value that means don't change the uid/gid
+ # if they are BOTH -1, don't bother to lchown
+ if not (uid == -1 and gid == -1):
+ #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
+ os.lchown(path, uid, gid)
+
+ # Return a list of configuration files based on either the default
+ # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
+ # paths are resolved via BBPATH
+ def get_fs_perms_list(d):
+ str = ""
+ bbpath = d.getVar('BBPATH')
+ fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or ""
+ for conf_file in fs_perms_tables.split():
+ confpath = bb.utils.which(bbpath, conf_file)
+ if confpath:
+ str += " %s" % bb.utils.which(bbpath, conf_file)
+ else:
+ bb.warn("cannot find %s specified in FILESYSTEM_PERMS_TABLES" % conf_file)
+ return str
+
+ fs_perms_table = {}
+ fs_link_table = {}
+
+ # By default all of the standard directories specified in
+ # bitbake.conf will get 0755 root:root.
+ target_path_vars = [ 'base_prefix',
+ 'prefix',
+ 'exec_prefix',
+ 'base_bindir',
+ 'base_sbindir',
+ 'base_libdir',
+ 'datadir',
+ 'sysconfdir',
+ 'servicedir',
+ 'sharedstatedir',
+ 'localstatedir',
+ 'infodir',
+ 'mandir',
+ 'docdir',
+ 'bindir',
+ 'sbindir',
+ 'libexecdir',
+ 'libdir',
+ 'includedir' ]
+
+ for path in target_path_vars:
+ dir = d.getVar(path) or ""
+ if dir == "":
+ continue
+ fs_perms_table[dir] = fs_perms_entry(d.expand("%s 0755 root root false - - -" % (dir)))
+
+ # Now we actually load from the configuration files
+ for conf in get_fs_perms_list(d).split():
+ if not os.path.exists(conf):
+ continue
+ with open(conf) as f:
+ for line in f:
+ if line.startswith('#'):
+ continue
+ lsplit = line.split()
+ if len(lsplit) == 0:
+ continue
+ if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
+ msg = "Fixup perms: %s invalid line: %s" % (conf, line)
+ oe.qa.handle_error("perm-line", msg, d)
+ continue
+ entry = fs_perms_entry(d.expand(line))
+ if entry and entry.path:
+ if entry.link:
+ fs_link_table[entry.path] = entry
+ if entry.path in fs_perms_table:
+ fs_perms_table.pop(entry.path)
+ else:
+ fs_perms_table[entry.path] = entry
+ if entry.path in fs_link_table:
+ fs_link_table.pop(entry.path)
+
+ # Debug -- list out in-memory table
+ #for dir in fs_perms_table:
+ # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
+ #for link in fs_link_table:
+ # bb.note("Fixup Perms: %s: %s" % (link, str(fs_link_table[link])))
+
+ # We process links first, so we can go back and fixup directory ownership
+ # for any newly created directories
+ # Process in sorted order so /run gets created before /run/lock, etc.
+ for entry in sorted(fs_link_table.values(), key=lambda x: x.link):
+ link = entry.link
+ dir = entry.path
+ origin = dvar + dir
+ if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
+ continue
+
+ if link[0] == "/":
+ target = dvar + link
+ ptarget = link
+ else:
+ target = os.path.join(os.path.dirname(origin), link)
+ ptarget = os.path.join(os.path.dirname(dir), link)
+ if os.path.exists(target):
+ msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
+ oe.qa.handle_error("perm-link", msg, d)
+ continue
+
+ # Create path to move directory to, move it, and then setup the symlink
+ bb.utils.mkdirhier(os.path.dirname(target))
+ #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
+ bb.utils.rename(origin, target)
+ #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
+ os.symlink(link, origin)
+
+ for dir in fs_perms_table:
+ origin = dvar + dir
+ if not (cpath.exists(origin) and cpath.isdir(origin)):
+ continue
+
+ fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
+
+ if fs_perms_table[dir].walk == 'true':
+ for root, dirs, files in os.walk(origin):
+ for dr in dirs:
+ each_dir = os.path.join(root, dr)
+ fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
+ for f in files:
+ each_file = os.path.join(root, f)
+ fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
+
+# Get a list of files from file vars by searching files under current working directory
+# The list contains symlinks, directories and normal files.
+def files_from_filevars(filevars):
+ cpath = oe.cachedpath.CachedPath()
+ files = []
+ for f in filevars:
+ if os.path.isabs(f):
+ f = '.' + f
+ if not f.startswith("./"):
+ f = './' + f
+ globbed = glob.glob(f, recursive=True)
+ if globbed:
+ if [ f ] != globbed:
+ files += globbed
+ continue
+ files.append(f)
+
+ symlink_paths = []
+ for ind, f in enumerate(files):
+ # Handle directory symlinks. Truncate path to the lowest level symlink
+ parent = ''
+ for dirname in f.split('/')[:-1]:
+ parent = os.path.join(parent, dirname)
+ if dirname == '.':
+ continue
+ if cpath.islink(parent):
+ bb.warn("FILES contains file '%s' which resides under a "
+ "directory symlink. Please fix the recipe and use the "
+ "real path for the file." % f[1:])
+ symlink_paths.append(f)
+ files[ind] = parent
+ f = parent
+ break
+
+ if not cpath.islink(f):
+ if cpath.isdir(f):
+ newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
+ if newfiles:
+ files += newfiles
+
+ return files, symlink_paths
+
+# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
+def get_conffiles(pkg, d):
+ pkgdest = d.getVar('PKGDEST')
+ root = os.path.join(pkgdest, pkg)
+ cwd = os.getcwd()
+ os.chdir(root)
+
+ conffiles = d.getVar('CONFFILES:%s' % pkg);
+ if conffiles == None:
+ conffiles = d.getVar('CONFFILES')
+ if conffiles == None:
+ conffiles = ""
+ conffiles = conffiles.split()
+ conf_orig_list = files_from_filevars(conffiles)[0]
+
+ # Remove links and directories from conf_orig_list to get conf_list which only contains normal files
+ conf_list = []
+ for f in conf_orig_list:
+ if os.path.isdir(f):
+ continue
+ if os.path.islink(f):
+ continue
+ if not os.path.exists(f):
+ continue
+ conf_list.append(f)
+
+ # Remove the leading './'
+ for i in range(0, len(conf_list)):
+ conf_list[i] = conf_list[i][1:]
+
+ os.chdir(cwd)
+ return sorted(conf_list)
+
+def legitimize_package_name(s):
+ """
+ Make sure package names are legitimate strings
+ """
+
+ def fixutf(m):
+ cp = m.group(1)
+ if cp:
+ return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape')
+
+ # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
+ s = re.sub(r'<U([0-9A-Fa-f]{1,4})>', fixutf, s)
+
+ # Remaining package name validity fixes
+ return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
+
+def split_locales(d):
+ cpath = oe.cachedpath.CachedPath()
+ if (d.getVar('PACKAGE_NO_LOCALE') == '1'):
+ bb.debug(1, "package requested not splitting locales")
+ return
+
+ packages = (d.getVar('PACKAGES') or "").split()
+
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('LOCALEBASEPN')
+
+ try:
+ locale_index = packages.index(pn + '-locale')
+ packages.pop(locale_index)
+ except ValueError:
+ locale_index = len(packages)
+
+ localepaths = []
+ locales = set()
+ for localepath in (d.getVar('LOCALE_PATHS') or "").split():
+ localedir = dvar + localepath
+ if not cpath.isdir(localedir):
+ bb.debug(1, 'No locale files in %s' % localepath)
+ continue
+
+ localepaths.append(localepath)
+ with os.scandir(localedir) as it:
+ for entry in it:
+ if entry.is_dir():
+ locales.add(entry.name)
+
+ if len(locales) == 0:
+ bb.debug(1, "No locale files in this package")
+ return
+
+ summary = d.getVar('SUMMARY') or pn
+ description = d.getVar('DESCRIPTION') or ""
+ locale_section = d.getVar('LOCALE_SECTION')
+ mlprefix = d.getVar('MLPREFIX') or ""
+ for l in sorted(locales):
+ ln = legitimize_package_name(l)
+ pkg = pn + '-locale-' + ln
+ packages.insert(locale_index, pkg)
+ locale_index += 1
+ files = []
+ for localepath in localepaths:
+ files.append(os.path.join(localepath, l))
+ d.setVar('FILES:' + pkg, " ".join(files))
+ d.setVar('RRECOMMENDS:' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
+ d.setVar('RPROVIDES:' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
+ d.setVar('SUMMARY:' + pkg, '%s - %s translations' % (summary, l))
+ d.setVar('DESCRIPTION:' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
+ if locale_section:
+ d.setVar('SECTION:' + pkg, locale_section)
+
+ d.setVar('PACKAGES', ' '.join(packages))
+
+ # Disabled by RP 18/06/07
+ # Wildcards aren't supported in debian
+ # They break with ipkg since glibc-locale* will mean that
+ # glibc-localedata-translit* won't install as a dependency
+ # for some other package which breaks meta-toolchain
+ # Probably breaks since virtual-locale- isn't provided anywhere
+ #rdep = (d.getVar('RDEPENDS:%s' % pn) or "").split()
+ #rdep.append('%s-locale*' % pn)
+ #d.setVar('RDEPENDS:%s' % pn, ' '.join(rdep))
+
+def package_debug_vars(d):
+ # We default to '.debug' style
+ if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
+ # Single debug-file-directory style debug info
+ debug_vars = {
+ "append": ".debug",
+ "staticappend": "",
+ "dir": "",
+ "staticdir": "",
+ "libdir": "/usr/lib/debug",
+ "staticlibdir": "/usr/lib/debug-static",
+ "srcdir": "/usr/src/debug",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
+ # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+ else:
+ # Original OE-core, a.k.a. ".debug", style debug info
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+
+ return debug_vars
+
+
+def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
+ debugfiles = {}
+
+ for line in dwarfsrcfiles_output.splitlines():
+ if line.startswith("\t"):
+ debugfiles[os.path.normpath(line.split()[0])] = ""
+
+ return debugfiles.keys()
+
+def source_info(file, d, fatal=True):
+ cmd = ["dwarfsrcfiles", file]
+ try:
+ output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT)
+ retval = 0
+ except subprocess.CalledProcessError as exc:
+ output = exc.output
+ retval = exc.returncode
+
+ # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure
+ if retval != 0 and retval != 255:
+ msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")
+ if fatal:
+ bb.fatal(msg)
+ bb.note(msg)
+
+ debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
+
+ return list(debugsources)
+
+def splitdebuginfo(file, dvar, dv, d):
+ # Function to split a single file into two components, one is the stripped
+ # target system binary, the other contains any debugging information. The
+ # two files are linked to reference each other.
+ #
+ # return a mapping of files:debugsources
+
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
+ debugfile = dvar + dest
+ sources = []
+
+ if file.endswith(".ko") and file.find("/lib/modules/") != -1:
+ if oe.package.is_kernel_module_signed(file):
+ bb.debug(1, "Skip strip on signed module %s" % file)
+ return (file, sources)
+
+ # Split the file...
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+ #bb.note("Split %s -> %s" % (file, debugfile))
+ # Only store off the hard link reference if we successfully split!
+
+ dvar = d.getVar('PKGD')
+ objcopy = d.getVar("OBJCOPY")
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ # We need to extract the debug src information here...
+ if dv["srcdir"]:
+ sources = source_info(file, d)
+
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+
+ subprocess.check_output([objcopy, '--only-keep-debug', file, debugfile], stderr=subprocess.STDOUT)
+
+ # Set the debuglink to have the view of the file path on the target
+ subprocess.check_output([objcopy, '--add-gnu-debuglink', debugfile, file], stderr=subprocess.STDOUT)
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return (file, sources)
+
+def splitstaticdebuginfo(file, dvar, dv, d):
+ # Unlike the function above, there is no way to split a static library
+ # two components. So to get similar results we will copy the unmodified
+ # static library (containing the debug symbols) into a new directory.
+ # We will then strip (preserving symbols) the static library in the
+ # typical location.
+ #
+ # return a mapping of files:debugsources
+
+ src = file[len(dvar):]
+ dest = dv["staticlibdir"] + os.path.dirname(src) + dv["staticdir"] + "/" + os.path.basename(src) + dv["staticappend"]
+ debugfile = dvar + dest
+ sources = []
+
+ # Copy the file...
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+ #bb.note("Copy %s -> %s" % (file, debugfile))
+
+ dvar = d.getVar('PKGD')
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ # We need to extract the debug src information here...
+ if dv["srcdir"]:
+ sources = source_info(file, d)
+
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+
+ # Copy the unmodified item to the debug directory
+ shutil.copy2(file, debugfile)
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return (file, sources)
+
+def inject_minidebuginfo(file, dvar, dv, d):
+ # Extract just the symbols from debuginfo into minidebuginfo,
+ # compress it with xz and inject it back into the binary in a .gnu_debugdata section.
+ # https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html
+
+ readelf = d.getVar('READELF')
+ nm = d.getVar('NM')
+ objcopy = d.getVar('OBJCOPY')
+
+ minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo')
+
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
+ debugfile = dvar + dest
+ minidebugfile = minidebuginfodir + src + '.minidebug'
+ bb.utils.mkdirhier(os.path.dirname(minidebugfile))
+
+ # If we didn't produce debuginfo for any reason, we can't produce minidebuginfo either
+ # so skip it.
+ if not os.path.exists(debugfile):
+ bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file))
+ return
+
+ # minidebuginfo does not make sense to apply to ELF objects other than
+ # executables and shared libraries, skip applying the minidebuginfo
+ # generation for objects like kernel modules.
+ for line in subprocess.check_output([readelf, '-h', debugfile], universal_newlines=True).splitlines():
+ if not line.strip().startswith("Type:"):
+ continue
+ elftype = line.split(":")[1].strip()
+ if not any(elftype.startswith(i) for i in ["EXEC", "DYN"]):
+ bb.debug(1, 'ELF file {} is not executable/shared, skipping minidebuginfo injection'.format(file))
+ return
+ break
+
+ # Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo.
+ # We will exclude all of these from minidebuginfo to save space.
+ remove_section_names = []
+ for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines():
+ # strip the leading " [ 1]" section index to allow splitting on space
+ if ']' not in line:
+ continue
+ fields = line[line.index(']') + 1:].split()
+ if len(fields) < 7:
+ continue
+ name = fields[0]
+ type = fields[1]
+ flags = fields[6]
+ # .debug_ sections will be removed by objcopy -S so no need to explicitly remove them
+ if name.startswith('.debug_'):
+ continue
+ if 'A' not in flags and type in ['PROGBITS', 'NOTE', 'NOBITS']:
+ remove_section_names.append(name)
+
+ # List dynamic symbols in the binary. We can exclude these from minidebuginfo
+ # because they are always present in the binary.
+ dynsyms = set()
+ for line in subprocess.check_output([nm, '-D', file, '--format=posix', '--defined-only'], universal_newlines=True).splitlines():
+ dynsyms.add(line.split()[0])
+
+ # Find all function symbols from debuginfo which aren't in the dynamic symbols table.
+ # These are the ones we want to keep in minidebuginfo.
+ keep_symbols_file = minidebugfile + '.symlist'
+ found_any_symbols = False
+ with open(keep_symbols_file, 'w') as f:
+ for line in subprocess.check_output([nm, debugfile, '--format=sysv', '--defined-only'], universal_newlines=True).splitlines():
+ fields = line.split('|')
+ if len(fields) < 7:
+ continue
+ name = fields[0].strip()
+ type = fields[3].strip()
+ if type == 'FUNC' and name not in dynsyms:
+ f.write('{}\n'.format(name))
+ found_any_symbols = True
+
+ if not found_any_symbols:
+ bb.debug(1, 'ELF file {} contains no symbols, skipping minidebuginfo injection'.format(file))
+ return
+
+ bb.utils.remove(minidebugfile)
+ bb.utils.remove(minidebugfile + '.xz')
+
+ subprocess.check_call([objcopy, '-S'] +
+ ['--remove-section={}'.format(s) for s in remove_section_names] +
+ ['--keep-symbols={}'.format(keep_symbols_file), debugfile, minidebugfile])
+
+ subprocess.check_call(['xz', '--keep', minidebugfile])
+
+ subprocess.check_call([objcopy, '--add-section', '.gnu_debugdata={}.xz'.format(minidebugfile), file])
+
+def copydebugsources(debugsrcdir, sources, d):
+ # The debug src information written out to sourcefile is further processed
+ # and copied to the destination here.
+
+ cpath = oe.cachedpath.CachedPath()
+
+ if debugsrcdir and sources:
+ sourcefile = d.expand("${WORKDIR}/debugsources.list")
+ bb.utils.remove(sourcefile)
+
+ # filenames are null-separated - this is an artefact of the previous use
+ # of rpm's debugedit, which was writing them out that way, and the code elsewhere
+ # is still assuming that.
+ debuglistoutput = '\0'.join(sources) + '\0'
+ with open(sourcefile, 'a') as sf:
+ sf.write(debuglistoutput)
+
+ dvar = d.getVar('PKGD')
+ strip = d.getVar("STRIP")
+ objcopy = d.getVar("OBJCOPY")
+ workdir = d.getVar("WORKDIR")
+ sdir = d.getVar("S")
+ cflags = d.expand("${CFLAGS}")
+
+ prefixmap = {}
+ for flag in cflags.split():
+ if not flag.startswith("-fdebug-prefix-map"):
+ continue
+ if "recipe-sysroot" in flag:
+ continue
+ flag = flag.split("=")
+ prefixmap[flag[1]] = flag[2]
+
+ nosuchdir = []
+ basepath = dvar
+ for p in debugsrcdir.split("/"):
+ basepath = basepath + "/" + p
+ if not cpath.exists(basepath):
+ nosuchdir.append(basepath)
+ bb.utils.mkdirhier(basepath)
+ cpath.updatecache(basepath)
+
+ for pmap in prefixmap:
+ # Ignore files from the recipe sysroots (target and native)
+ cmd = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '((<internal>|<built-in>)$|/.*recipe-sysroot.*/)' | " % sourcefile
+ # We need to ignore files that are not actually ours
+ # we do this by only paying attention to items from this package
+ cmd += "fgrep -zw '%s' | " % prefixmap[pmap]
+ # Remove prefix in the source paths
+ cmd += "sed 's#%s/##g' | " % (prefixmap[pmap])
+ cmd += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)" % (pmap, dvar, prefixmap[pmap])
+
+ try:
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ # Can "fail" if internal headers/transient sources are attempted
+ pass
+ # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
+ # Work around this by manually finding and copying any symbolic links that made it through.
+ cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
+ (dvar, prefixmap[pmap], dvar, prefixmap[pmap], pmap, dvar, prefixmap[pmap])
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # debugsources.list may be polluted from the host if we used externalsrc,
+ # cpio uses copy-pass and may have just created a directory structure
+ # matching the one from the host, if thats the case move those files to
+ # debugsrcdir to avoid host contamination.
+ # Empty dir structure will be deleted in the next step.
+
+ # Same check as above for externalsrc
+ if workdir not in sdir:
+ if os.path.exists(dvar + debugsrcdir + sdir):
+ cmd = "mv %s%s%s/* %s%s" % (dvar, debugsrcdir, sdir, dvar,debugsrcdir)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # The copy by cpio may have resulted in some empty directories! Remove these
+ cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # Also remove debugsrcdir if its empty
+ for p in nosuchdir[::-1]:
+ if os.path.exists(p) and not os.listdir(p):
+ os.rmdir(p)
+
+
+def process_split_and_strip_files(d):
+ cpath = oe.cachedpath.CachedPath()
+
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('PN')
+ hostos = d.getVar('HOST_OS')
+
+ oldcwd = os.getcwd()
+ os.chdir(dvar)
+
+ dv = package_debug_vars(d)
+
+ #
+ # First lets figure out all of the files we may have to process ... do this only once!
+ #
+ elffiles = {}
+ symlinks = {}
+ staticlibs = []
+ inodes = {}
+ libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
+ baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
+ skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split()
+ if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
+ d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
+ checkelf = {}
+ checkelflinks = {}
+ for root, dirs, files in cpath.walk(dvar):
+ for f in files:
+ file = os.path.join(root, f)
+
+ # Skip debug files
+ if dv["append"] and file.endswith(dv["append"]):
+ continue
+ if dv["dir"] and dv["dir"] in os.path.dirname(file[len(dvar):]):
+ continue
+
+ if file in skipfiles:
+ continue
+
+ if oe.package.is_static_lib(file):
+ staticlibs.append(file)
+ continue
+
+ try:
+ ltarget = cpath.realpath(file, dvar, False)
+ s = cpath.lstat(ltarget)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ # Skip broken symlinks
+ continue
+ if not s:
+ continue
+ # Check its an executable
+ if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) \
+ or (s[stat.ST_MODE] & stat.S_IXOTH) \
+ or ((file.startswith(libdir) or file.startswith(baselibdir)) \
+ and (".so" in f or ".node" in f)) \
+ or (f.startswith('vmlinux') or ".ko" in f):
+
+ if cpath.islink(file):
+ checkelflinks[file] = ltarget
+ continue
+ # Use a reference of device ID and inode number to identify files
+ file_reference = "%d_%d" % (s.st_dev, s.st_ino)
+ checkelf[file] = (file, file_reference)
+
+ results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelflinks.values(), d)
+ results_map = {}
+ for (ltarget, elf_file) in results:
+ results_map[ltarget] = elf_file
+ for file in checkelflinks:
+ ltarget = checkelflinks[file]
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if results_map[ltarget]:
+ target = os.readlink(file)
+ #bb.note("Sym: %s (%d)" % (ltarget, results_map[ltarget]))
+ symlinks[file] = target
+
+ results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
+
+ # Sort results by file path. This ensures that the files are always
+ # processed in the same order, which is important to make sure builds
+ # are reproducible when dealing with hardlinks
+ results.sort(key=lambda x: x[0])
+
+ for (file, elf_file) in results:
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ if elf_file & 1:
+ if elf_file & 2:
+ if 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split():
+ bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
+ else:
+ msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
+ oe.qa.handle_error("already-stripped", msg, d)
+ continue
+
+ # At this point we have an unstripped elf file. We need to:
+ # a) Make sure any file we strip is not hardlinked to anything else outside this tree
+ # b) Only strip any hardlinked file once (no races)
+ # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
+
+ # Use a reference of device ID and inode number to identify files
+ file_reference = checkelf[file][1]
+ if file_reference in inodes:
+ os.unlink(file)
+ os.link(inodes[file_reference][0], file)
+ inodes[file_reference].append(file)
+ else:
+ inodes[file_reference] = [file]
+ # break hardlink
+ bb.utils.break_hardlinks(file)
+ elffiles[file] = elf_file
+ # Modified the file so clear the cache
+ cpath.updatecache(file)
+
+ def strip_pkgd_prefix(f):
+ nonlocal dvar
+
+ if f.startswith(dvar):
+ return f[len(dvar):]
+
+ return f
+
+ #
+ # First lets process debug splitting
+ #
+ if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
+ results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, dv, d))
+
+ if dv["srcdir"] and not hostos.startswith("mingw"):
+ if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
+ results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, dv, d))
+ else:
+ for file in staticlibs:
+ results.append( (file,source_info(file, d)) )
+
+ d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
+
+ sources = set()
+ for r in results:
+ sources.update(r[1])
+
+ # Hardlink our debug symbols to the other hardlink copies
+ for ref in inodes:
+ if len(inodes[ref]) == 1:
+ continue
+
+ target = inodes[ref][0][len(dvar):]
+ for file in inodes[ref][1:]:
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
+ fpath = dvar + dest
+ ftarget = dvar + dv["libdir"] + os.path.dirname(target) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
+ bb.utils.mkdirhier(os.path.dirname(fpath))
+ # Only one hardlink of separated debug info file in each directory
+ if not os.access(fpath, os.R_OK):
+ #bb.note("Link %s -> %s" % (fpath, ftarget))
+ os.link(ftarget, fpath)
+
+ # Create symlinks for all cases we were able to split symbols
+ for file in symlinks:
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
+ fpath = dvar + dest
+ # Skip it if the target doesn't exist
+ try:
+ s = os.stat(fpath)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ continue
+
+ ltarget = symlinks[file]
+ lpath = os.path.dirname(ltarget)
+ lbase = os.path.basename(ltarget)
+ ftarget = ""
+ if lpath and lpath != ".":
+ ftarget += lpath + dv["dir"] + "/"
+ ftarget += lbase + dv["append"]
+ if lpath.startswith(".."):
+ ftarget = os.path.join("..", ftarget)
+ bb.utils.mkdirhier(os.path.dirname(fpath))
+ #bb.note("Symlink %s -> %s" % (fpath, ftarget))
+ os.symlink(ftarget, fpath)
+
+ # Process the dv["srcdir"] if requested...
+ # This copies and places the referenced sources for later debugging...
+ copydebugsources(dv["srcdir"], sources, d)
+ #
+ # End of debug splitting
+ #
+
+ #
+ # Now lets go back over things and strip them
+ #
+ if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1'):
+ strip = d.getVar("STRIP")
+ sfiles = []
+ for file in elffiles:
+ elf_file = int(elffiles[file])
+ #bb.note("Strip %s" % file)
+ sfiles.append((file, elf_file, strip))
+ if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
+ for f in staticlibs:
+ sfiles.append((f, 16, strip))
+
+ oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
+
+ # Build "minidebuginfo" and reinject it back into the stripped binaries
+ if bb.utils.contains('DISTRO_FEATURES', 'minidebuginfo', True, False, d):
+ oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d,
+ extraargs=(dvar, dv, d))
+
+ #
+ # End of strip
+ #
+ os.chdir(oldcwd)
+
+
+def populate_packages(d):
+ cpath = oe.cachedpath.CachedPath()
+
+ workdir = d.getVar('WORKDIR')
+ outdir = d.getVar('DEPLOY_DIR')
+ dvar = d.getVar('PKGD')
+ packages = d.getVar('PACKAGES').split()
+ pn = d.getVar('PN')
+
+ bb.utils.mkdirhier(outdir)
+ os.chdir(dvar)
+
+ autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False)
+
+ split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
+
+ # If debug-with-srcpkg mode is enabled then add the source package if it
+ # doesn't exist and add the source file contents to the source package.
+ if split_source_package:
+ src_package_name = ('%s-src' % d.getVar('PN'))
+ if not src_package_name in packages:
+ packages.append(src_package_name)
+ d.setVar('FILES:%s' % src_package_name, '/usr/src/debug')
+
+ # Sanity check PACKAGES for duplicates
+ # Sanity should be moved to sanity.bbclass once we have the infrastructure
+ package_dict = {}
+
+ for i, pkg in enumerate(packages):
+ if pkg in package_dict:
+ msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
+ oe.qa.handle_error("packages-list", msg, d)
+ # Ensure the source package gets the chance to pick up the source files
+ # before the debug package by ordering it first in PACKAGES. Whether it
+ # actually picks up any source files is controlled by
+ # PACKAGE_DEBUG_SPLIT_STYLE.
+ elif pkg.endswith("-src"):
+ package_dict[pkg] = (10, i)
+ elif autodebug and pkg.endswith("-dbg"):
+ package_dict[pkg] = (30, i)
+ else:
+ package_dict[pkg] = (50, i)
+ packages = sorted(package_dict.keys(), key=package_dict.get)
+ d.setVar('PACKAGES', ' '.join(packages))
+ pkgdest = d.getVar('PKGDEST')
+
+ seen = []
+
+ # os.mkdir masks the permissions with umask so we have to unset it first
+ oldumask = os.umask(0)
+
+ debug = []
+ for root, dirs, files in cpath.walk(dvar):
+ dir = root[len(dvar):]
+ if not dir:
+ dir = os.sep
+ for f in (files + dirs):
+ path = "." + os.path.join(dir, f)
+ if "/.debug/" in path or "/.debug-static/" in path or path.endswith("/.debug"):
+ debug.append(path)
+
+ for pkg in packages:
+ root = os.path.join(pkgdest, pkg)
+ bb.utils.mkdirhier(root)
+
+ filesvar = d.getVar('FILES:%s' % pkg) or ""
+ if "//" in filesvar:
+ msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
+ oe.qa.handle_error("files-invalid", msg, d)
+ filesvar.replace("//", "/")
+
+ origfiles = filesvar.split()
+ files, symlink_paths = oe.package.files_from_filevars(origfiles)
+
+ if autodebug and pkg.endswith("-dbg"):
+ files.extend(debug)
+
+ for file in files:
+ if (not cpath.islink(file)) and (not cpath.exists(file)):
+ continue
+ if file in seen:
+ continue
+ seen.append(file)
+
+ def mkdir(src, dest, p):
+ src = os.path.join(src, p)
+ dest = os.path.join(dest, p)
+ fstat = cpath.stat(src)
+ os.mkdir(dest)
+ os.chmod(dest, fstat.st_mode)
+ os.chown(dest, fstat.st_uid, fstat.st_gid)
+ if p not in seen:
+ seen.append(p)
+ cpath.updatecache(dest)
+
+ def mkdir_recurse(src, dest, paths):
+ if cpath.exists(dest + '/' + paths):
+ return
+ while paths.startswith("./"):
+ paths = paths[2:]
+ p = "."
+ for c in paths.split("/"):
+ p = os.path.join(p, c)
+ if not cpath.exists(os.path.join(dest, p)):
+ mkdir(src, dest, p)
+
+ if cpath.isdir(file) and not cpath.islink(file):
+ mkdir_recurse(dvar, root, file)
+ continue
-def npm_split_package_dirs(pkgdir):
+ mkdir_recurse(dvar, root, os.path.dirname(file))
+ fpath = os.path.join(root,file)
+ if not cpath.islink(file):
+ os.link(file, fpath)
+ continue
+ ret = bb.utils.copyfile(file, fpath)
+ if ret is False or ret == 0:
+ bb.fatal("File population failed")
+
+ # Check if symlink paths exist
+ for file in symlink_paths:
+ if not os.path.exists(os.path.join(root,file)):
+ bb.fatal("File '%s' cannot be packaged into '%s' because its "
+ "parent directory structure does not exist. One of "
+ "its parent directories is a symlink whose target "
+ "directory is not included in the package." %
+ (file, pkg))
+
+ os.umask(oldumask)
+ os.chdir(workdir)
+
+ # Handle excluding packages with incompatible licenses
+ package_list = []
+ for pkg in packages:
+ licenses = d.getVar('_exclude_incompatible-' + pkg)
+ if licenses:
+ msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
+ oe.qa.handle_error("incompatible-license", msg, d)
+ else:
+ package_list.append(pkg)
+ d.setVar('PACKAGES', ' '.join(package_list))
+
+ unshipped = []
+ for root, dirs, files in cpath.walk(dvar):
+ dir = root[len(dvar):]
+ if not dir:
+ dir = os.sep
+ for f in (files + dirs):
+ path = os.path.join(dir, f)
+ if ('.' + path) not in seen:
+ unshipped.append(path)
+
+ if unshipped != []:
+ msg = pn + ": Files/directories were installed but not shipped in any package:"
+ if "installed-vs-shipped" in (d.getVar('INSANE_SKIP:' + pn) or "").split():
+ bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
+ else:
+ for f in unshipped:
+ msg = msg + "\n " + f
+ msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
+ msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
+ oe.qa.handle_error("installed-vs-shipped", msg, d)
+
+def process_fixsymlinks(pkgfiles, d):
+ cpath = oe.cachedpath.CachedPath()
+ pkgdest = d.getVar('PKGDEST')
+ packages = d.getVar("PACKAGES", False).split()
+
+ dangling_links = {}
+ pkg_files = {}
+ for pkg in packages:
+ dangling_links[pkg] = []
+ pkg_files[pkg] = []
+ inst_root = os.path.join(pkgdest, pkg)
+ for path in pkgfiles[pkg]:
+ rpath = path[len(inst_root):]
+ pkg_files[pkg].append(rpath)
+ rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
+ if not cpath.lexists(rtarget):
+ dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
+
+ newrdepends = {}
+ for pkg in dangling_links:
+ for l in dangling_links[pkg]:
+ found = False
+ bb.debug(1, "%s contains dangling link %s" % (pkg, l))
+ for p in packages:
+ if l in pkg_files[p]:
+ found = True
+ bb.debug(1, "target found in %s" % p)
+ if p == pkg:
+ break
+ if pkg not in newrdepends:
+ newrdepends[pkg] = []
+ newrdepends[pkg].append(p)
+ break
+ if found == False:
+ bb.note("%s contains dangling symlink to %s" % (pkg, l))
+
+ for pkg in newrdepends:
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
+ for p in newrdepends[pkg]:
+ if p not in rdepends:
+ rdepends[p] = []
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+
+def process_filedeps(pkgfiles, d):
"""
- Work out the packages fetched and unpacked by BitBake's npm fetcher
- Returns a dict of packagename -> (relpath, package.json) ordered
- such that it is suitable for use in PACKAGES and FILES
+ Collect perfile run-time dependency metadata
+ Output:
+ FILERPROVIDESFLIST:pkg - list of all files w/ deps
+ FILERPROVIDES:filepath:pkg - per file dep
+
+ FILERDEPENDSFLIST:pkg - list of all files w/ deps
+ FILERDEPENDS:filepath:pkg - per file dep
"""
- from collections import OrderedDict
- import json
- packages = {}
- for root, dirs, files in os.walk(pkgdir):
- if os.path.basename(root) == 'node_modules':
- for dn in dirs:
- relpth = os.path.relpath(os.path.join(root, dn), pkgdir)
- pkgitems = ['${PN}']
- for pathitem in relpth.split('/'):
- if pathitem == 'node_modules':
+ if d.getVar('SKIP_FILEDEPS') == '1':
+ return
+
+ pkgdest = d.getVar('PKGDEST')
+ packages = d.getVar('PACKAGES')
+ rpmdeps = d.getVar('RPMDEPS')
+
+ def chunks(files, n):
+ return [files[i:i+n] for i in range(0, len(files), n)]
+
+ pkglist = []
+ for pkg in packages.split():
+ if d.getVar('SKIP_FILEDEPS:' + pkg) == '1':
+ continue
+ if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
+ continue
+ for files in chunks(pkgfiles[pkg], 100):
+ pkglist.append((pkg, files, rpmdeps, pkgdest))
+
+ processed = oe.utils.multiprocess_launch(oe.package.filedeprunner, pkglist, d)
+
+ provides_files = {}
+ requires_files = {}
+
+ for result in processed:
+ (pkg, provides, requires) = result
+
+ if pkg not in provides_files:
+ provides_files[pkg] = []
+ if pkg not in requires_files:
+ requires_files[pkg] = []
+
+ for file in sorted(provides):
+ provides_files[pkg].append(file)
+ key = "FILERPROVIDES:" + file + ":" + pkg
+ d.appendVar(key, " " + " ".join(provides[file]))
+
+ for file in sorted(requires):
+ requires_files[pkg].append(file)
+ key = "FILERDEPENDS:" + file + ":" + pkg
+ d.appendVar(key, " " + " ".join(requires[file]))
+
+ for pkg in requires_files:
+ d.setVar("FILERDEPENDSFLIST:" + pkg, " ".join(sorted(requires_files[pkg])))
+ for pkg in provides_files:
+ d.setVar("FILERPROVIDESFLIST:" + pkg, " ".join(sorted(provides_files[pkg])))
+
+def process_shlibs(pkgfiles, d):
+ cpath = oe.cachedpath.CachedPath()
+
+ exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False)
+ if exclude_shlibs:
+ bb.note("not generating shlibs")
+ return
+
+ lib_re = re.compile(r"^.*\.so")
+ libdir_re = re.compile(r".*/%s$" % d.getVar('baselib'))
+
+ packages = d.getVar('PACKAGES')
+
+ shlib_pkgs = []
+ exclusion_list = d.getVar("EXCLUDE_PACKAGES_FROM_SHLIBS")
+ if exclusion_list:
+ for pkg in packages.split():
+ if pkg not in exclusion_list.split():
+ shlib_pkgs.append(pkg)
+ else:
+ bb.note("not generating shlibs for %s" % pkg)
+ else:
+ shlib_pkgs = packages.split()
+
+ hostos = d.getVar('HOST_OS')
+
+ workdir = d.getVar('WORKDIR')
+
+ ver = d.getVar('PKGV')
+ if not ver:
+ msg = "PKGV not defined"
+ oe.qa.handle_error("pkgv-undefined", msg, d)
+ return
+
+ pkgdest = d.getVar('PKGDEST')
+
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR')
+
+ def linux_so(file, pkg, pkgver, d):
+ needs_ldconfig = False
+ needed = set()
+ sonames = set()
+ renames = []
+ ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
+ cmd = d.getVar('OBJDUMP') + " -p " + shlex.quote(file) + " 2>/dev/null"
+ fd = os.popen(cmd)
+ lines = fd.readlines()
+ fd.close()
+ rpath = tuple()
+ for l in lines:
+ m = re.match(r"\s+RPATH\s+([^\s]*)", l)
+ if m:
+ rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
+ rpath = tuple(map(os.path.normpath, rpaths))
+ for l in lines:
+ m = re.match(r"\s+NEEDED\s+([^\s]*)", l)
+ if m:
+ dep = m.group(1)
+ if dep not in needed:
+ needed.add((dep, file, rpath))
+ m = re.match(r"\s+SONAME\s+([^\s]*)", l)
+ if m:
+ this_soname = m.group(1)
+ prov = (this_soname, ldir, pkgver)
+ if not prov in sonames:
+ # if library is private (only used by package) then do not build shlib for it
+ if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0:
+ sonames.add(prov)
+ if libdir_re.match(os.path.dirname(file)):
+ needs_ldconfig = True
+ if needs_ldconfig and snap_symlinks and (os.path.basename(file) != this_soname):
+ renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
+ return (needs_ldconfig, needed, sonames, renames)
+
+ def darwin_so(file, needed, sonames, renames, pkgver):
+ if not os.path.exists(file):
+ return
+ ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
+
+ def get_combinations(base):
+ #
+ # Given a base library name, find all combinations of this split by "." and "-"
+ #
+ combos = []
+ options = base.split(".")
+ for i in range(1, len(options) + 1):
+ combos.append(".".join(options[0:i]))
+ options = base.split("-")
+ for i in range(1, len(options) + 1):
+ combos.append("-".join(options[0:i]))
+ return combos
+
+ if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg') and not pkg.endswith('-src'):
+ # Drop suffix
+ name = os.path.basename(file).rsplit(".",1)[0]
+ # Find all combinations
+ combos = get_combinations(name)
+ for combo in combos:
+ if not combo in sonames:
+ prov = (combo, ldir, pkgver)
+ sonames.add(prov)
+ if file.endswith('.dylib') or file.endswith('.so'):
+ rpath = []
+ p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ out, err = p.communicate()
+ # If returned successfully, process stdout for results
+ if p.returncode == 0:
+ for l in out.split("\n"):
+ l = l.strip()
+ if l.startswith('path '):
+ rpath.append(l.split()[1])
+
+ p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ out, err = p.communicate()
+ # If returned successfully, process stdout for results
+ if p.returncode == 0:
+ for l in out.split("\n"):
+ l = l.strip()
+ if not l or l.endswith(":"):
+ continue
+ if "is not an object file" in l:
+ continue
+ name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
+ if name and name not in needed[pkg]:
+ needed[pkg].add((name, file, tuple()))
+
+ def mingw_dll(file, needed, sonames, renames, pkgver):
+ if not os.path.exists(file):
+ return
+
+ if file.endswith(".dll"):
+ # assume all dlls are shared objects provided by the package
+ sonames.add((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
+
+ if (file.endswith(".dll") or file.endswith(".exe")):
+ # use objdump to search for "DLL Name: .*\.dll"
+ p = subprocess.Popen([d.expand("${OBJDUMP}"), "-p", file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ # process the output, grabbing all .dll names
+ if p.returncode == 0:
+ for m in re.finditer(r"DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
+ dllname = m.group(1)
+ if dllname:
+ needed[pkg].add((dllname, file, tuple()))
+
+ if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1":
+ snap_symlinks = True
+ else:
+ snap_symlinks = False
+
+ needed = {}
+
+ shlib_provider = oe.package.read_shlib_providers(d)
+
+ for pkg in shlib_pkgs:
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = private_libs.split()
+ needs_ldconfig = False
+ bb.debug(2, "calculating shlib provides for %s" % pkg)
+
+ pkgver = d.getVar('PKGV:' + pkg)
+ if not pkgver:
+ pkgver = d.getVar('PV_' + pkg)
+ if not pkgver:
+ pkgver = ver
+
+ needed[pkg] = set()
+ sonames = set()
+ renames = []
+ linuxlist = []
+ for file in pkgfiles[pkg]:
+ soname = None
+ if cpath.islink(file):
+ continue
+ if hostos.startswith("darwin"):
+ darwin_so(file, needed, sonames, renames, pkgver)
+ elif hostos.startswith("mingw"):
+ mingw_dll(file, needed, sonames, renames, pkgver)
+ elif os.access(file, os.X_OK) or lib_re.match(file):
+ linuxlist.append(file)
+
+ if linuxlist:
+ results = oe.utils.multiprocess_launch(linux_so, linuxlist, d, extraargs=(pkg, pkgver, d))
+ for r in results:
+ ldconfig = r[0]
+ needed[pkg] |= r[1]
+ sonames |= r[2]
+ renames.extend(r[3])
+ needs_ldconfig = needs_ldconfig or ldconfig
+
+ for (old, new) in renames:
+ bb.note("Renaming %s to %s" % (old, new))
+ bb.utils.rename(old, new)
+ pkgfiles[pkg].remove(old)
+
+ shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
+ if len(sonames):
+ with open(shlibs_file, 'w') as fd:
+ for s in sorted(sonames):
+ if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
+ (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
+ if old_pkg != pkg:
+ bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
+ bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
+ fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (pkg, pkgver)
+ if needs_ldconfig:
+ bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('ldconfig_postinst_fragment')
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
+
+ assumed_libs = d.getVar('ASSUME_SHLIBS')
+ if assumed_libs:
+ libdir = d.getVar("libdir")
+ for e in assumed_libs.split():
+ l, dep_pkg = e.split(":")
+ lib_ver = None
+ dep_pkg = dep_pkg.rsplit("_", 1)
+ if len(dep_pkg) == 2:
+ lib_ver = dep_pkg[1]
+ dep_pkg = dep_pkg[0]
+ if l not in shlib_provider:
+ shlib_provider[l] = {}
+ shlib_provider[l][libdir] = (dep_pkg, lib_ver)
+
+ libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')]
+
+ for pkg in shlib_pkgs:
+ bb.debug(2, "calculating shlib requirements for %s" % pkg)
+
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = private_libs.split()
+
+ deps = list()
+ for n in needed[pkg]:
+ # if n is in private libraries, don't try to search provider for it
+ # this could cause problem in case some abc.bb provides private
+ # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
+ # but skipping it is still better alternative than providing own
+ # version and then adding runtime dependency for the same system library
+ if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0:
+ bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
+ continue
+ if n[0] in shlib_provider.keys():
+ shlib_provider_map = shlib_provider[n[0]]
+ matches = set()
+ for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
+ if p in shlib_provider_map:
+ matches.add(p)
+ if len(matches) > 1:
+ matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
+ bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
+ elif len(matches) == 1:
+ (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
+
+ bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
+
+ if dep_pkg == pkg:
+ continue
+
+ if ver_needed:
+ dep = "%s (>= %s)" % (dep_pkg, ver_needed)
+ else:
+ dep = dep_pkg
+ if not dep in deps:
+ deps.append(dep)
+ continue
+ bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1]))
+
+ deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
+ if os.path.exists(deps_file):
+ os.remove(deps_file)
+ if deps:
+ with open(deps_file, 'w') as fd:
+ for dep in sorted(deps):
+ fd.write(dep + '\n')
+
+def process_pkgconfig(pkgfiles, d):
+ packages = d.getVar('PACKAGES')
+ workdir = d.getVar('WORKDIR')
+ pkgdest = d.getVar('PKGDEST')
+
+ shlibs_dirs = d.getVar('SHLIBSDIRS').split()
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR')
+
+ pc_re = re.compile(r'(.*)\.pc$')
+ var_re = re.compile(r'(.*)=(.*)')
+ field_re = re.compile(r'(.*): (.*)')
+
+ pkgconfig_provided = {}
+ pkgconfig_needed = {}
+ for pkg in packages.split():
+ pkgconfig_provided[pkg] = []
+ pkgconfig_needed[pkg] = []
+ for file in sorted(pkgfiles[pkg]):
+ m = pc_re.match(file)
+ if m:
+ pd = bb.data.init()
+ name = m.group(1)
+ pkgconfig_provided[pkg].append(os.path.basename(name))
+ if not os.access(file, os.R_OK):
continue
- pkgitems.append(pathitem)
- pkgname = '-'.join(pkgitems).replace('_', '-')
- pkgname = pkgname.replace('@', '')
- pkgfile = os.path.join(root, dn, 'package.json')
- data = None
- if os.path.exists(pkgfile):
- with open(pkgfile, 'r') as f:
- data = json.loads(f.read())
- packages[pkgname] = (relpth, data)
- # We want the main package for a module sorted *after* its subpackages
- # (so that it doesn't otherwise steal the files for the subpackage), so
- # this is a cheap way to do that whilst still having an otherwise
- # alphabetical sort
- return OrderedDict((key, packages[key]) for key in sorted(packages, key=lambda pkg: pkg + '~'))
+ with open(file, 'r') as f:
+ lines = f.readlines()
+ for l in lines:
+ m = field_re.match(l)
+ if m:
+ hdr = m.group(1)
+ exp = pd.expand(m.group(2))
+ if hdr == 'Requires':
+ pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
+ continue
+ m = var_re.match(l)
+ if m:
+ name = m.group(1)
+ val = m.group(2)
+ pd.setVar(name, pd.expand(val))
+
+ for pkg in packages.split():
+ pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
+ if pkgconfig_provided[pkg] != []:
+ with open(pkgs_file, 'w') as f:
+ for p in sorted(pkgconfig_provided[pkg]):
+ f.write('%s\n' % p)
+
+ # Go from least to most specific since the last one found wins
+ for dir in reversed(shlibs_dirs):
+ if not os.path.exists(dir):
+ continue
+ for file in sorted(os.listdir(dir)):
+ m = re.match(r'^(.*)\.pclist$', file)
+ if m:
+ pkg = m.group(1)
+ with open(os.path.join(dir, file)) as fd:
+ lines = fd.readlines()
+ pkgconfig_provided[pkg] = []
+ for l in lines:
+ pkgconfig_provided[pkg].append(l.rstrip())
+
+ for pkg in packages.split():
+ deps = []
+ for n in pkgconfig_needed[pkg]:
+ found = False
+ for k in pkgconfig_provided.keys():
+ if n in pkgconfig_provided[k]:
+ if k != pkg and not (k in deps):
+ deps.append(k)
+ found = True
+ if found == False:
+ bb.note("couldn't find pkgconfig module '%s' in any package" % n)
+ deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
+ if len(deps):
+ with open(deps_file, 'w') as fd:
+ for dep in deps:
+ fd.write(dep + '\n')
+
+def read_libdep_files(d):
+ pkglibdeps = {}
+ packages = d.getVar('PACKAGES').split()
+ for pkg in packages:
+ pkglibdeps[pkg] = {}
+ for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
+ depsfile = d.expand("${PKGDEST}/" + pkg + extension)
+ if os.access(depsfile, os.R_OK):
+ with open(depsfile) as fd:
+ lines = fd.readlines()
+ for l in lines:
+ l.rstrip()
+ deps = bb.utils.explode_dep_versions2(l)
+ for dep in deps:
+ if not dep in pkglibdeps[pkg]:
+ pkglibdeps[pkg][dep] = deps[dep]
+ return pkglibdeps
+
+def process_depchains(pkgfiles, d):
+ """
+ For a given set of prefix and postfix modifiers, make those packages
+ RRECOMMENDS on the corresponding packages for its RDEPENDS.
+
+ Example: If package A depends upon package B, and A's .bb emits an
+ A-dev package, this would make A-dev Recommends: B-dev.
+
+ If only one of a given suffix is specified, it will take the RRECOMMENDS
+ based on the RDEPENDS of *all* other packages. If more than one of a given
+ suffix is specified, its will only use the RDEPENDS of the single parent
+ package.
+ """
+
+ packages = d.getVar('PACKAGES')
+ postfixes = (d.getVar('DEPCHAIN_POST') or '').split()
+ prefixes = (d.getVar('DEPCHAIN_PRE') or '').split()
+
+ def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
+
+ #bb.note('depends for %s is %s' % (base, depends))
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
+
+ for depend in sorted(depends):
+ if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
+ #bb.note("Skipping %s" % depend)
+ continue
+ if depend.endswith('-dev'):
+ depend = depend[:-4]
+ if depend.endswith('-dbg'):
+ depend = depend[:-4]
+ pkgname = getname(depend, suffix)
+ #bb.note("Adding %s for %s" % (pkgname, depend))
+ if pkgname not in rreclist and pkgname != pkg:
+ rreclist[pkgname] = []
+
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+
+ def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
+
+ #bb.note('rdepends for %s is %s' % (base, rdepends))
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
+
+ for depend in sorted(rdepends):
+ if depend.find('virtual-locale-') != -1:
+ #bb.note("Skipping %s" % depend)
+ continue
+ if depend.endswith('-dev'):
+ depend = depend[:-4]
+ if depend.endswith('-dbg'):
+ depend = depend[:-4]
+ pkgname = getname(depend, suffix)
+ #bb.note("Adding %s for %s" % (pkgname, depend))
+ if pkgname not in rreclist and pkgname != pkg:
+ rreclist[pkgname] = []
+
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+
+ def add_dep(list, dep):
+ if dep not in list:
+ list.append(dep)
+
+ depends = []
+ for dep in bb.utils.explode_deps(d.getVar('DEPENDS') or ""):
+ add_dep(depends, dep)
+
+ rdepends = []
+ for pkg in packages.split():
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + pkg) or ""):
+ add_dep(rdepends, dep)
+
+ #bb.note('rdepends is %s' % rdepends)
+
+ def post_getname(name, suffix):
+ return '%s%s' % (name, suffix)
+ def pre_getname(name, suffix):
+ return '%s%s' % (suffix, name)
+
+ pkgs = {}
+ for pkg in packages.split():
+ for postfix in postfixes:
+ if pkg.endswith(postfix):
+ if not postfix in pkgs:
+ pkgs[postfix] = {}
+ pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
+
+ for prefix in prefixes:
+ if pkg.startswith(prefix):
+ if not prefix in pkgs:
+ pkgs[prefix] = {}
+ pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
+
+ if "-dbg" in pkgs:
+ pkglibdeps = read_libdep_files(d)
+ pkglibdeplist = []
+ for pkg in pkglibdeps:
+ for k in pkglibdeps[pkg]:
+ add_dep(pkglibdeplist, k)
+ dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS') == '1') or (bb.data.inherits_class('packagegroup', d)))
+
+ for suffix in pkgs:
+ for pkg in pkgs[suffix]:
+ if d.getVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs'):
+ continue
+ (base, func) = pkgs[suffix][pkg]
+ if suffix == "-dev":
+ pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
+ elif suffix == "-dbg":
+ if not dbgdefaultdeps:
+ pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
+ continue
+ if len(pkgs[suffix]) == 1:
+ pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
+ else:
+ rdeps = []
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + base) or ""):
+ add_dep(rdeps, dep)
+ pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
deleted file mode 100644
index ef69b18b64..0000000000
--- a/meta/lib/oe/package_manager.py
+++ /dev/null
@@ -1,1595 +0,0 @@
-from abc import ABCMeta, abstractmethod
-import os
-import glob
-import subprocess
-import shutil
-import multiprocessing
-import re
-import collections
-import bb
-import tempfile
-import oe.utils
-import oe.path
-import string
-from oe.gpg_sign import get_signer
-
-# this can be used by all PM backends to create the index files in parallel
-def create_index(arg):
- index_cmd = arg
-
- bb.note("Executing '%s' ..." % index_cmd)
- result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- if result:
- bb.note(result)
-
-"""
-This method parse the output from the package managerand return
-a dictionary with the information of the packages. This is used
-when the packages are in deb or ipk format.
-"""
-def opkg_query(cmd_output):
- verregex = re.compile(' \([=<>]* [^ )]*\)')
- output = dict()
- pkg = ""
- arch = ""
- ver = ""
- filename = ""
- dep = []
- pkgarch = ""
- for line in cmd_output.splitlines():
- line = line.rstrip()
- if ':' in line:
- if line.startswith("Package: "):
- pkg = line.split(": ")[1]
- elif line.startswith("Architecture: "):
- arch = line.split(": ")[1]
- elif line.startswith("Version: "):
- ver = line.split(": ")[1]
- elif line.startswith("File: ") or line.startswith("Filename:"):
- filename = line.split(": ")[1]
- if "/" in filename:
- filename = os.path.basename(filename)
- elif line.startswith("Depends: "):
- depends = verregex.sub('', line.split(": ")[1])
- for depend in depends.split(", "):
- dep.append(depend)
- elif line.startswith("Recommends: "):
- recommends = verregex.sub('', line.split(": ")[1])
- for recommend in recommends.split(", "):
- dep.append("%s [REC]" % recommend)
- elif line.startswith("PackageArch: "):
- pkgarch = line.split(": ")[1]
-
- # When there is a blank line save the package information
- elif not line:
- # IPK doesn't include the filename
- if not filename:
- filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
- if pkg:
- output[pkg] = {"arch":arch, "ver":ver,
- "filename":filename, "deps": dep, "pkgarch":pkgarch }
- pkg = ""
- arch = ""
- ver = ""
- filename = ""
- dep = []
- pkgarch = ""
-
- if pkg:
- if not filename:
- filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
- output[pkg] = {"arch":arch, "ver":ver,
- "filename":filename, "deps": dep }
-
- return output
-
-
-class Indexer(object, metaclass=ABCMeta):
- def __init__(self, d, deploy_dir):
- self.d = d
- self.deploy_dir = deploy_dir
-
- @abstractmethod
- def write_index(self):
- pass
-
-
-class RpmIndexer(Indexer):
- def write_index(self):
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
- else:
- signer = None
-
- createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
- result = create_index("%s --update -q %s" % (createrepo_c, self.deploy_dir))
- if result:
- bb.fatal(result)
-
- # Sign repomd
- if signer:
- sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
- is_ascii_sig = (sig_type.upper() != "BIN")
- signer.detach_sign(os.path.join(self.deploy_dir, 'repodata', 'repomd.xml'),
- self.d.getVar('PACKAGE_FEED_GPG_NAME'),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
- armor=is_ascii_sig)
-
-
-class OpkgIndexer(Indexer):
- def write_index(self):
- arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
- "SDK_PACKAGE_ARCHS",
- "MULTILIB_ARCHS"]
-
- opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
- else:
- signer = None
-
- if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
- open(os.path.join(self.deploy_dir, "Packages"), "w").close()
-
- index_cmds = set()
- index_sign_files = set()
- for arch_var in arch_vars:
- archs = self.d.getVar(arch_var)
- if archs is None:
- continue
-
- for arch in archs.split():
- pkgs_dir = os.path.join(self.deploy_dir, arch)
- pkgs_file = os.path.join(pkgs_dir, "Packages")
-
- if not os.path.isdir(pkgs_dir):
- continue
-
- if not os.path.exists(pkgs_file):
- open(pkgs_file, "w").close()
-
- index_cmds.add('%s -r %s -p %s -m %s' %
- (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
-
- index_sign_files.add(pkgs_file)
-
- if len(index_cmds) == 0:
- bb.note("There are no packages in %s!" % self.deploy_dir)
- return
-
- oe.utils.multiprocess_exec(index_cmds, create_index)
-
- if signer:
- feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
- is_ascii_sig = (feed_sig_type.upper() != "BIN")
- for f in index_sign_files:
- signer.detach_sign(f,
- self.d.getVar('PACKAGE_FEED_GPG_NAME'),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
- armor=is_ascii_sig)
-
-
-class DpkgIndexer(Indexer):
- def _create_configs(self):
- bb.utils.mkdirhier(self.apt_conf_dir)
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
-
- with open(os.path.join(self.apt_conf_dir, "preferences"),
- "w") as prefs_file:
- pass
- with open(os.path.join(self.apt_conf_dir, "sources.list"),
- "w+") as sources_file:
- pass
-
- with open(self.apt_conf_file, "w") as apt_conf:
- with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
- "apt", "apt.conf.sample")) as apt_conf_sample:
- for line in apt_conf_sample.read().split("\n"):
- line = re.sub("#ROOTFS#", "/dev/null", line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
- apt_conf.write(line + "\n")
-
- def write_index(self):
- self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
- "apt-ftparchive")
- self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
- self._create_configs()
-
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- pkg_archs = self.d.getVar('PACKAGE_ARCHS')
- if pkg_archs is not None:
- arch_list = pkg_archs.split()
- sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
- if sdk_pkg_archs is not None:
- for a in sdk_pkg_archs.split():
- if a not in pkg_archs:
- arch_list.append(a)
-
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
- arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
-
- apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
- gzip = bb.utils.which(os.getenv('PATH'), "gzip")
-
- index_cmds = []
- deb_dirs_found = False
- for arch in arch_list:
- arch_dir = os.path.join(self.deploy_dir, arch)
- if not os.path.isdir(arch_dir):
- continue
-
- cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
-
- cmd += "%s -fcn Packages > Packages.gz;" % gzip
-
- with open(os.path.join(arch_dir, "Release"), "w+") as release:
- release.write("Label: %s\n" % arch)
-
- cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
-
- index_cmds.append(cmd)
-
- deb_dirs_found = True
-
- if not deb_dirs_found:
- bb.note("There are no packages in %s" % self.deploy_dir)
- return
-
- oe.utils.multiprocess_exec(index_cmds, create_index)
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- raise NotImplementedError('Package feed signing not implementd for dpkg')
-
-
-
-class PkgsList(object, metaclass=ABCMeta):
- def __init__(self, d, rootfs_dir):
- self.d = d
- self.rootfs_dir = rootfs_dir
-
- @abstractmethod
- def list_pkgs(self):
- pass
-
-class RpmPkgsList(PkgsList):
- def list_pkgs(self):
- return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR')).list_installed()
-
-class OpkgPkgsList(PkgsList):
- def __init__(self, d, rootfs_dir, config_file):
- super(OpkgPkgsList, self).__init__(d, rootfs_dir)
-
- self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
- self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
- self.opkg_args += self.d.getVar("OPKG_ARGS")
-
- def list_pkgs(self, format=None):
- cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
-
- # opkg returns success even when it printed some
- # "Collected errors:" report to stderr. Mixing stderr into
- # stdout then leads to random failures later on when
- # parsing the output. To avoid this we need to collect both
- # output streams separately and check for empty stderr.
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
- cmd_output, cmd_stderr = p.communicate()
- cmd_output = cmd_output.decode("utf-8")
- cmd_stderr = cmd_stderr.decode("utf-8")
- if p.returncode or cmd_stderr:
- bb.fatal("Cannot get the installed packages list. Command '%s' "
- "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
-
- return opkg_query(cmd_output)
-
-
-class DpkgPkgsList(PkgsList):
-
- def list_pkgs(self):
- cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
- "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
- "-W"]
-
- cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
-
- try:
- cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get the installed packages list. Command '%s' "
- "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- return opkg_query(cmd_output)
-
-
-class PackageManager(object, metaclass=ABCMeta):
- """
- This is an abstract class. Do not instantiate this directly.
- """
-
- def __init__(self, d):
- self.d = d
- self.deploy_dir = None
- self.deploy_lock = None
-
- """
- Update the package manager package database.
- """
- @abstractmethod
- def update(self):
- pass
-
- """
- Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
- True, installation failures are ignored.
- """
- @abstractmethod
- def install(self, pkgs, attempt_only=False):
- pass
-
- """
- Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
- is False, the any dependencies are left in place.
- """
- @abstractmethod
- def remove(self, pkgs, with_dependencies=True):
- pass
-
- """
- This function creates the index files
- """
- @abstractmethod
- def write_index(self):
- pass
-
- @abstractmethod
- def remove_packaging_data(self):
- pass
-
- @abstractmethod
- def list_installed(self):
- pass
-
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
-
- """
- @abstractmethod
- def extract(self, pkg):
- pass
-
- """
- Add remote package feeds into repository manager configuration. The parameters
- for the feeds are set by feed_uris, feed_base_paths and feed_archs.
- See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
- for their description.
- """
- @abstractmethod
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- pass
-
- """
- Install complementary packages based upon the list of currently installed
- packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
- these packages, if they don't exist then no error will occur. Note: every
- backend needs to call this function explicitly after the normal package
- installation
- """
- def install_complementary(self, globs=None):
- if globs is None:
- globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
- split_linguas = set()
-
- for translation in self.d.getVar('IMAGE_LINGUAS').split():
- split_linguas.add(translation)
- split_linguas.add(translation.split('-')[0])
-
- split_linguas = sorted(split_linguas)
-
- for lang in split_linguas:
- globs += " *-locale-%s" % lang
-
- if globs is None:
- return
-
- # we need to write the list of installed packages to a file because the
- # oe-pkgdata-util reads it from a file
- with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
- pkgs = self.list_installed()
- output = oe.utils.format_pkg_list(pkgs, "arch")
- installed_pkgs.write(output)
- installed_pkgs.flush()
-
- cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
- "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
- globs]
- exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
- if exclude:
- cmd.extend(['--exclude=' + '|'.join(exclude.split())])
- try:
- bb.note("Installing complementary packages ...")
- bb.note('Running %s' % cmd)
- complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not compute complementary packages list. Command "
- "'%s' returned %d:\n%s" %
- (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- self.install(complementary_pkgs.split(), attempt_only=True)
-
- def deploy_dir_lock(self):
- if self.deploy_dir is None:
- raise RuntimeError("deploy_dir is not set!")
-
- lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
-
- self.deploy_lock = bb.utils.lockfile(lock_file_name)
-
- def deploy_dir_unlock(self):
- if self.deploy_lock is None:
- return
-
- bb.utils.unlockfile(self.deploy_lock)
-
- self.deploy_lock = None
-
- """
- Construct URIs based on the following pattern: uri/base_path where 'uri'
- and 'base_path' correspond to each element of the corresponding array
- argument leading to len(uris) x len(base_paths) elements on the returned
- array
- """
- def construct_uris(self, uris, base_paths):
- def _append(arr1, arr2, sep='/'):
- res = []
- narr1 = [a.rstrip(sep) for a in arr1]
- narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
- for a1 in narr1:
- if arr2:
- for a2 in narr2:
- res.append("%s%s%s" % (a1, sep, a2))
- else:
- res.append(a1)
- return res
- return _append(uris, base_paths)
-
-class RpmPM(PackageManager):
- def __init__(self,
- d,
- target_rootfs,
- target_vendor,
- task_name='target',
- providename=None,
- arch_var=None,
- os_var=None):
- super(RpmPM, self).__init__(d)
- self.target_rootfs = target_rootfs
- self.target_vendor = target_vendor
- self.task_name = task_name
- if arch_var == None:
- self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
- else:
- self.archs = self.d.getVar(arch_var).replace("-","_")
- if task_name == "host":
- self.primary_arch = self.d.getVar('SDK_ARCH')
- else:
- self.primary_arch = self.d.getVar('MACHINE_ARCH')
-
- self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), "oe-rootfs-repo")
- bb.utils.mkdirhier(self.rpm_repo_dir)
- oe.path.symlink(self.d.getVar('DEPLOY_DIR_RPM'), oe.path.join(self.rpm_repo_dir, "rpm"), True)
-
- self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
- self.packaging_data_dirs = ['var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
- self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
- self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved'))
-
- def _configure_dnf(self):
- # libsolv handles 'noarch' internally, we don't need to specify it explicitly
- archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
- # This prevents accidental matching against libsolv's built-in policies
- if len(archs) <= 1:
- archs = archs + ["bogusarch"]
- confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
- bb.utils.mkdirhier(confdir)
- open(confdir + "arch", 'w').write(":".join(archs))
- distro_codename = self.d.getVar('DISTRO_CODENAME')
- open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
-
- open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
-
-
- def _configure_rpm(self):
- # We need to configure rpm to use our primary package architecture as the installation architecture,
- # and to make it compatible with other package architectures that we use.
- # Otherwise it will refuse to proceed with packages installation.
- platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
- rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
- bb.utils.mkdirhier(platformconfdir)
- open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
- open(rpmrcconfdir + "rpmrc", 'w').write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
-
- open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
- if self.d.getVar('RPM_PREFER_ELF_ARCH'):
- open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
- else:
- open(platformconfdir + "macros", 'a').write("%_prefer_color 7")
-
- if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
- signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
- pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
- signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
- rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
- cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Importing GPG key failed. Command '%s' "
- "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- def create_configs(self):
- self._configure_dnf()
- self._configure_rpm()
-
- def write_index(self):
- lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
- lf = bb.utils.lockfile(lockfilename, False)
- RpmIndexer(self.d, self.rpm_repo_dir).write_index()
- bb.utils.unlockfile(lf)
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- from urllib.parse import urlparse
-
- if feed_uris == "":
- return
-
- gpg_opts = ''
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- gpg_opts += 'repo_gpgcheck=1\n'
- gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
-
- if self.d.getVar('RPM_SIGN_PACKAGES') == '0':
- gpg_opts += 'gpgcheck=0\n'
-
- bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
- remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
- for uri in remote_uris:
- repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
- if feed_archs is not None:
- for arch in feed_archs.split():
- repo_uri = uri + "/" + arch
- repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
- repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
- else:
- repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
- repo_uri = uri
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
-
- def _prepare_pkg_transaction(self):
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = oe.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
-
- def install(self, pkgs, attempt_only = False):
- if len(pkgs) == 0:
- return
- self._prepare_pkg_transaction()
-
- bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
- package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
- exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
-
- output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
- (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
- (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) +
- (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
- ["install"] +
- pkgs)
-
- failed_scriptlets_pkgnames = collections.OrderedDict()
- for line in output.splitlines():
- if line.startswith("Non-fatal POSTIN scriptlet failure in rpm package"):
- failed_scriptlets_pkgnames[line.split()[-1]] = True
-
- for pkg in failed_scriptlets_pkgnames.keys():
- self.save_rpmpostinst(pkg)
-
- def remove(self, pkgs, with_dependencies = True):
- if len(pkgs) == 0:
- return
- self._prepare_pkg_transaction()
-
- if with_dependencies:
- self._invoke_dnf(["remove"] + pkgs)
- else:
- cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
-
- try:
- bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
- output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not invoke rpm. Command "
- "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
-
- def upgrade(self):
- self._prepare_pkg_transaction()
- self._invoke_dnf(["upgrade"])
-
- def autoremove(self):
- self._prepare_pkg_transaction()
- self._invoke_dnf(["autoremove"])
-
- def remove_packaging_data(self):
- self._invoke_dnf(["clean", "all"])
- for dir in self.packaging_data_dirs:
- bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
-
- def backup_packaging_data(self):
- # Save the packaging dirs for increment rpm image generation
- if os.path.exists(self.saved_packaging_data):
- bb.utils.remove(self.saved_packaging_data, True)
- for i in self.packaging_data_dirs:
- source_dir = oe.path.join(self.target_rootfs, i)
- target_dir = oe.path.join(self.saved_packaging_data, i)
- shutil.copytree(source_dir, target_dir, symlinks=True)
-
- def recovery_packaging_data(self):
- # Move the rpmlib back
- if os.path.exists(self.saved_packaging_data):
- for i in self.packaging_data_dirs:
- target_dir = oe.path.join(self.target_rootfs, i)
- if os.path.exists(target_dir):
- bb.utils.remove(target_dir, True)
- source_dir = oe.path.join(self.saved_packaging_data, i)
- shutil.copytree(source_dir,
- target_dir,
- symlinks=True)
-
- def list_installed(self):
- output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
- print_output = False)
- packages = {}
- current_package = None
- current_deps = None
- current_state = "initial"
- for line in output.splitlines():
- if line.startswith("Package:"):
- package_info = line.split(" ")[1:]
- current_package = package_info[0]
- package_arch = package_info[1]
- package_version = package_info[2]
- package_rpm = package_info[3]
- packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
- current_deps = []
- elif line.startswith("Dependencies:"):
- current_state = "dependencies"
- elif line.startswith("Recommendations"):
- current_state = "recommendations"
- elif line.startswith("DependenciesEndHere:"):
- current_state = "initial"
- packages[current_package]["deps"] = current_deps
- elif len(line) > 0:
- if current_state == "dependencies":
- current_deps.append(line)
- elif current_state == "recommendations":
- current_deps.append("%s [REC]" % line)
-
- return packages
-
- def update(self):
- self._invoke_dnf(["makecache", "--refresh"])
-
- def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
- os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
-
- dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
- standard_dnf_args = (["-v", "--rpmverbosity=debug"] if self.d.getVar('ROOTFS_RPM_DEBUG') else []) + ["-y",
- "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
- "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
- "--repofrompath=oe-repo,%s" % (self.rpm_repo_dir),
- "--installroot=%s" % (self.target_rootfs),
- "--setopt=logdir=%s" % (self.d.getVar('T'))
- ]
- cmd = [dnf_cmd] + standard_dnf_args + dnf_args
- try:
- output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
- if print_output:
- bb.note(output)
- return output
- except subprocess.CalledProcessError as e:
- if print_output:
- (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
- "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- else:
- (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
- "'%s' returned %d:" % (' '.join(cmd), e.returncode))
- return e.output.decode("utf-8")
-
- def dump_install_solution(self, pkgs):
- open(self.solution_manifest, 'w').write(" ".join(pkgs))
- return pkgs
-
- def load_old_install_solution(self):
- if not os.path.exists(self.solution_manifest):
- return []
-
- return open(self.solution_manifest, 'r').read().split()
-
- def _script_num_prefix(self, path):
- files = os.listdir(path)
- numbers = set()
- numbers.add(99)
- for f in files:
- numbers.add(int(f.split("-")[0]))
- return max(numbers) + 1
-
- def save_rpmpostinst(self, pkg):
- bb.note("Saving postinstall script of %s" % (pkg))
- cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
-
- try:
- output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not invoke rpm. Command "
- "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
-
- # may need to prepend #!/bin/sh to output
-
- target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
- bb.utils.mkdirhier(target_path)
- num = self._script_num_prefix(target_path)
- saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
- open(saved_script_name, 'w').write(output)
- os.chmod(saved_script_name, 0o755)
-
- def extract(self, pkg):
- output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
- pkg_name = output.splitlines()[-1]
- if not pkg_name.endswith(".rpm"):
- bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
- pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
-
- cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
- rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
-
- if not os.path.isfile(pkg_path):
- bb.fatal("Unable to extract package for '%s'."
- "File %s doesn't exists" % (pkg, pkg_path))
-
- tmp_dir = tempfile.mkdtemp()
- current_dir = os.getcwd()
- os.chdir(tmp_dir)
-
- try:
- cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
- except OSError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
-
- bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
- os.chdir(current_dir)
-
- return tmp_dir
-
-
-class OpkgDpkgPM(PackageManager):
- """
- This is an abstract class. Do not instantiate this directly.
- """
- def __init__(self, d):
- super(OpkgDpkgPM, self).__init__(d)
-
- """
- Returns a dictionary with the package info.
-
- This method extracts the common parts for Opkg and Dpkg
- """
- def package_info(self, pkg, cmd):
-
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to list available packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- return opkg_query(output)
-
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
-
- This method extracts the common parts for Opkg and Dpkg
- """
- def extract(self, pkg, pkg_info):
-
- ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
- tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
- pkg_path = pkg_info[pkg]["filepath"]
-
- if not os.path.isfile(pkg_path):
- bb.fatal("Unable to extract package for '%s'."
- "File %s doesn't exists" % (pkg, pkg_path))
-
- tmp_dir = tempfile.mkdtemp()
- current_dir = os.getcwd()
- os.chdir(tmp_dir)
- if self.d.getVar('IMAGE_PKGTYPE') == 'deb':
- data_tar = 'data.tar.xz'
- else:
- data_tar = 'data.tar.gz'
-
- try:
- cmd = [ar_cmd, 'x', pkg_path]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- cmd = [tar_cmd, 'xf', data_tar]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- except OSError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
-
- bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
- bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
- bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
- os.chdir(current_dir)
-
- return tmp_dir
-
-
-class OpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
- super(OpkgPM, self).__init__(d)
-
- self.target_rootfs = target_rootfs
- self.config_file = config_file
- self.pkg_archs = archs
- self.task_name = task_name
-
- self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK")
- self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
- self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
- self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
- self.opkg_args += self.d.getVar("OPKG_ARGS")
-
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
- if opkg_lib_dir[0] == "/":
- opkg_lib_dir = opkg_lib_dir[1:]
-
- self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
-
- bb.utils.mkdirhier(self.opkg_dir)
-
- self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved'))
-
- self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
- if self.from_feeds:
- self._create_custom_config()
- else:
- self._create_config()
-
- self.indexer = OpkgIndexer(self.d, self.deploy_dir)
-
- """
- This function will change a package's status in /var/lib/opkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
- def mark_packages(self, status_tag, packages=None):
- status_file = os.path.join(self.opkg_dir, "status")
-
- with open(status_file, "r") as sf:
- with open(status_file + ".tmp", "w+") as tmp_sf:
- if packages is None:
- tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
- r"Package: \1\n\2Status: \3%s" % status_tag,
- sf.read()))
- else:
- if type(packages).__name__ != "list":
- raise TypeError("'packages' should be a list object")
-
- status = sf.read()
- for pkg in packages:
- status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
- r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
- status)
-
- tmp_sf.write(status)
-
- os.rename(status_file + ".tmp", status_file)
-
- def _create_custom_config(self):
- bb.note("Building from feeds activated!")
-
- with open(self.config_file, "w+") as config_file:
- priority = 1
- for arch in self.pkg_archs.split():
- config_file.write("arch %s %d\n" % (arch, priority))
- priority += 5
-
- for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
- feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
-
- if feed_match is not None:
- feed_name = feed_match.group(1)
- feed_uri = feed_match.group(2)
-
- bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
-
- config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
-
- """
- Allow to use package deploy directory contents as quick devel-testing
- feed. This creates individual feed configs for each arch subdir of those
- specified as compatible for the current machine.
- NOTE: Development-helper feature, NOT a full-fledged feed.
- """
- if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
- for arch in self.pkg_archs.split():
- cfg_file_name = os.path.join(self.target_rootfs,
- self.d.getVar("sysconfdir"),
- "opkg",
- "local-%s-feed.conf" % arch)
-
- with open(cfg_file_name, "w+") as cfg_file:
- cfg_file.write("src/gz local-%s %s/%s" %
- (arch,
- self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
- arch))
-
- if self.d.getVar('OPKGLIBDIR') != '/var/lib':
- # There is no command line option for this anymore, we need to add
- # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
- # the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
- cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
- cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
- cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
-
-
- def _create_config(self):
- with open(self.config_file, "w+") as config_file:
- priority = 1
- for arch in self.pkg_archs.split():
- config_file.write("arch %s %d\n" % (arch, priority))
- priority += 5
-
- config_file.write("src oe file:%s\n" % self.deploy_dir)
-
- for arch in self.pkg_archs.split():
- pkgs_dir = os.path.join(self.deploy_dir, arch)
- if os.path.isdir(pkgs_dir):
- config_file.write("src oe-%s file:%s\n" %
- (arch, pkgs_dir))
-
- if self.d.getVar('OPKGLIBDIR') != '/var/lib':
- # There is no command line option for this anymore, we need to add
- # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
- # the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
- config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
- config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
- config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- if feed_uris == "":
- return
-
- rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
- % self.target_rootfs)
-
- feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
- archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
-
- with open(rootfs_config, "w+") as config_file:
- uri_iterator = 0
- for uri in feed_uris:
- if archs:
- for arch in archs:
- if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
- continue
- bb.note('Adding opkg feed url-%s-%d (%s)' %
- (arch, uri_iterator, uri))
- config_file.write("src/gz uri-%s-%d %s/%s\n" %
- (arch, uri_iterator, uri, arch))
- else:
- bb.note('Adding opkg feed url-%d (%s)' %
- (uri_iterator, uri))
- config_file.write("src/gz uri-%d %s\n" %
- (uri_iterator, uri))
-
- uri_iterator += 1
-
- def update(self):
- self.deploy_dir_lock()
-
- cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- self.deploy_dir_unlock()
- bb.fatal("Unable to update the package index files. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- self.deploy_dir_unlock()
-
- def install(self, pkgs, attempt_only=False):
- if not pkgs:
- return
-
- cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
-
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
- try:
- bb.note("Installing the following packages: %s" % ' '.join(pkgs))
- bb.note(cmd)
- output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
- "Command '%s' returned %d:\n%s" %
- (cmd, e.returncode, e.output.decode("utf-8")))
-
- def remove(self, pkgs, with_dependencies=True):
- if with_dependencies:
- cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
- (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
- else:
- cmd = "%s %s --force-depends remove %s" % \
- (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
-
- try:
- bb.note(cmd)
- output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to remove packages. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- def write_index(self):
- self.deploy_dir_lock()
-
- result = self.indexer.write_index()
-
- self.deploy_dir_unlock()
-
- if result is not None:
- bb.fatal(result)
-
- def remove_packaging_data(self):
- bb.utils.remove(self.opkg_dir, True)
- # create the directory back, it's needed by PM lock
- bb.utils.mkdirhier(self.opkg_dir)
-
- def remove_lists(self):
- if not self.from_feeds:
- bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
-
- def list_installed(self):
- return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
-
- def handle_bad_recommendations(self):
- bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS") or ""
- if bad_recommendations.strip() == "":
- return
-
- status_file = os.path.join(self.opkg_dir, "status")
-
- # If status file existed, it means the bad recommendations has already
- # been handled
- if os.path.exists(status_file):
- return
-
- cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
-
- with open(status_file, "w+") as status:
- for pkg in bad_recommendations.split():
- pkg_info = cmd + pkg
-
- try:
- output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get package info. Command '%s' "
- "returned %d:\n%s" % (pkg_info, e.returncode, e.output.decode("utf-8")))
-
- if output == "":
- bb.note("Ignored bad recommendation: '%s' is "
- "not a package" % pkg)
- continue
-
- for line in output.split('\n'):
- if line.startswith("Status:"):
- status.write("Status: deinstall hold not-installed\n")
- else:
- status.write(line + "\n")
-
- # Append a blank line after each package entry to ensure that it
- # is separated from the following entry
- status.write("\n")
-
- '''
- The following function dummy installs pkgs and returns the log of output.
- '''
- def dummy_install(self, pkgs):
- if len(pkgs) == 0:
- return
-
- # Create an temp dir as opkg root for dummy installation
- temp_rootfs = self.d.expand('${T}/opkg')
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
- if opkg_lib_dir[0] == "/":
- opkg_lib_dir = opkg_lib_dir[1:]
- temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
- bb.utils.mkdirhier(temp_opkg_dir)
-
- opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
- opkg_args += self.d.getVar("OPKG_ARGS")
-
- cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to update. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- # Dummy installation
- cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
- opkg_args,
- ' '.join(pkgs))
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to dummy install packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- bb.utils.remove(temp_rootfs, True)
-
- return output
-
- def backup_packaging_data(self):
- # Save the opkglib for increment ipk image generation
- if os.path.exists(self.saved_opkg_dir):
- bb.utils.remove(self.saved_opkg_dir, True)
- shutil.copytree(self.opkg_dir,
- self.saved_opkg_dir,
- symlinks=True)
-
- def recover_packaging_data(self):
- # Move the opkglib back
- if os.path.exists(self.saved_opkg_dir):
- if os.path.exists(self.opkg_dir):
- bb.utils.remove(self.opkg_dir, True)
-
- bb.note('Recover packaging data')
- shutil.copytree(self.saved_opkg_dir,
- self.opkg_dir,
- symlinks=True)
-
- """
- Returns a dictionary with the package info.
- """
- def package_info(self, pkg):
- cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
- pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
-
- pkg_arch = pkg_info[pkg]["arch"]
- pkg_filename = pkg_info[pkg]["filename"]
- pkg_info[pkg]["filepath"] = \
- os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
-
- return pkg_info
-
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
- def extract(self, pkg):
- pkg_info = self.package_info(pkg)
- if not pkg_info:
- bb.fatal("Unable to get information for package '%s' while "
- "trying to extract the package." % pkg)
-
- tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.gz"))
-
- return tmp_dir
-
-class DpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
- super(DpkgPM, self).__init__(d)
- self.target_rootfs = target_rootfs
- self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB')
- if apt_conf_dir is None:
- self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
- else:
- self.apt_conf_dir = apt_conf_dir
- self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
- self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
- self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
-
- self.apt_args = d.getVar("APT_ARGS")
-
- self.all_arch_list = archs.split()
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
- self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
-
- self._create_configs(archs, base_archs)
-
- self.indexer = DpkgIndexer(self.d, self.deploy_dir)
-
- """
- This function will change a package's status in /var/lib/dpkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
- def mark_packages(self, status_tag, packages=None):
- status_file = self.target_rootfs + "/var/lib/dpkg/status"
-
- with open(status_file, "r") as sf:
- with open(status_file + ".tmp", "w+") as tmp_sf:
- if packages is None:
- tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
- r"Package: \1\n\2Status: \3%s" % status_tag,
- sf.read()))
- else:
- if type(packages).__name__ != "list":
- raise TypeError("'packages' should be a list object")
-
- status = sf.read()
- for pkg in packages:
- status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
- r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
- status)
-
- tmp_sf.write(status)
-
- os.rename(status_file + ".tmp", status_file)
-
- """
- Run the pre/post installs for package "package_name". If package_name is
- None, then run all pre/post install scriptlets.
- """
- def run_pre_post_installs(self, package_name=None):
- info_dir = self.target_rootfs + "/var/lib/dpkg/info"
- ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
- control_scripts = [
- ControlScript(".preinst", "Preinstall", "install"),
- ControlScript(".postinst", "Postinstall", "configure")]
- status_file = self.target_rootfs + "/var/lib/dpkg/status"
- installed_pkgs = []
-
- with open(status_file, "r") as status:
- for line in status.read().split('\n'):
- m = re.match("^Package: (.*)", line)
- if m is not None:
- installed_pkgs.append(m.group(1))
-
- if package_name is not None and not package_name in installed_pkgs:
- return
-
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
- failed_pkgs = []
- for pkg_name in installed_pkgs:
- for control_script in control_scripts:
- p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
- if os.path.exists(p_full):
- try:
- bb.note("Executing %s for package: %s ..." %
- (control_script.name.lower(), pkg_name))
- output = subprocess.check_output([p_full, control_script.argument],
- stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.note("%s for package %s failed with %d:\n%s" %
- (control_script.name, pkg_name, e.returncode,
- e.output.decode("utf-8")))
- failed_pkgs.append(pkg_name)
- break
-
- if len(failed_pkgs):
- self.mark_packages("unpacked", failed_pkgs)
-
- def update(self):
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- self.deploy_dir_lock()
-
- cmd = "%s update" % self.apt_get_cmd
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to update the package index files. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- self.deploy_dir_unlock()
-
- def install(self, pkgs, attempt_only=False):
- if attempt_only and len(pkgs) == 0:
- return
-
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
- (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
-
- try:
- bb.note("Installing the following packages: %s" % ' '.join(pkgs))
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
- "Command '%s' returned %d:\n%s" %
- (cmd, e.returncode, e.output.decode("utf-8")))
-
- # rename *.dpkg-new files/dirs
- for root, dirs, files in os.walk(self.target_rootfs):
- for dir in dirs:
- new_dir = re.sub("\.dpkg-new", "", dir)
- if dir != new_dir:
- os.rename(os.path.join(root, dir),
- os.path.join(root, new_dir))
-
- for file in files:
- new_file = re.sub("\.dpkg-new", "", file)
- if file != new_file:
- os.rename(os.path.join(root, file),
- os.path.join(root, new_file))
-
-
- def remove(self, pkgs, with_dependencies=True):
- if with_dependencies:
- os.environ['APT_CONFIG'] = self.apt_conf_file
- cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
- else:
- cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
- " -P --force-depends %s" % \
- (bb.utils.which(os.getenv('PATH'), "dpkg"),
- self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to remove packages. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- def write_index(self):
- self.deploy_dir_lock()
-
- result = self.indexer.write_index()
-
- self.deploy_dir_unlock()
-
- if result is not None:
- bb.fatal(result)
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- if feed_uris == "":
- return
-
- sources_conf = os.path.join("%s/etc/apt/sources.list"
- % self.target_rootfs)
- arch_list = []
-
- if feed_archs is None:
- for arch in self.all_arch_list:
- if not os.path.exists(os.path.join(self.deploy_dir, arch)):
- continue
- arch_list.append(arch)
- else:
- arch_list = feed_archs.split()
-
- feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
-
- with open(sources_conf, "w+") as sources_file:
- for uri in feed_uris:
- if arch_list:
- for arch in arch_list:
- bb.note('Adding dpkg channel at (%s)' % uri)
- sources_file.write("deb %s/%s ./\n" %
- (uri, arch))
- else:
- bb.note('Adding dpkg channel at (%s)' % uri)
- sources_file.write("deb %s ./\n" % uri)
-
- def _create_configs(self, archs, base_archs):
- base_archs = re.sub("_", "-", base_archs)
-
- if os.path.exists(self.apt_conf_dir):
- bb.utils.remove(self.apt_conf_dir, True)
-
- bb.utils.mkdirhier(self.apt_conf_dir)
- bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
- bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
- bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
-
- arch_list = []
- for arch in self.all_arch_list:
- if not os.path.exists(os.path.join(self.deploy_dir, arch)):
- continue
- arch_list.append(arch)
-
- with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
- priority = 801
- for arch in arch_list:
- prefs_file.write(
- "Package: *\n"
- "Pin: release l=%s\n"
- "Pin-Priority: %d\n\n" % (arch, priority))
-
- priority += 5
-
- pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
- for pkg in pkg_exclude.split():
- prefs_file.write(
- "Package: %s\n"
- "Pin: release *\n"
- "Pin-Priority: -1\n\n" % pkg)
-
- arch_list.reverse()
-
- with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
- for arch in arch_list:
- sources_file.write("deb file:%s/ ./\n" %
- os.path.join(self.deploy_dir, arch))
-
- base_arch_list = base_archs.split()
- multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
- for variant in multilib_variants.split():
- localdata = bb.data.createCopy(self.d)
- variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
- orig_arch = localdata.getVar("DPKG_ARCH")
- localdata.setVar("DEFAULTTUNE", variant_tune)
- variant_arch = localdata.getVar("DPKG_ARCH")
- if variant_arch not in base_arch_list:
- base_arch_list.append(variant_arch)
-
- with open(self.apt_conf_file, "w+") as apt_conf:
- with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
- for line in apt_conf_sample.read().split("\n"):
- match_arch = re.match(" Architecture \".*\";$", line)
- architectures = ""
- if match_arch:
- for base_arch in base_arch_list:
- architectures += "\"%s\";" % base_arch
- apt_conf.write(" Architectures {%s};\n" % architectures);
- apt_conf.write(" Architecture \"%s\";\n" % base_archs)
- else:
- line = re.sub("#ROOTFS#", self.target_rootfs, line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
- apt_conf.write(line + "\n")
-
- target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
- bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
-
- bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
-
- if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
- open(os.path.join(target_dpkg_dir, "status"), "w+").close()
- if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
- open(os.path.join(target_dpkg_dir, "available"), "w+").close()
-
- def remove_packaging_data(self):
- bb.utils.remove(os.path.join(self.target_rootfs,
- self.d.getVar('opkglibdir')), True)
- bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
-
- def fix_broken_dependencies(self):
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args)
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot fix broken dependencies. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- def list_installed(self):
- return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
-
- """
- Returns a dictionary with the package info.
- """
- def package_info(self, pkg):
- cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
- pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
-
- pkg_arch = pkg_info[pkg]["pkgarch"]
- pkg_filename = pkg_info[pkg]["filename"]
- pkg_info[pkg]["filepath"] = \
- os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
-
- return pkg_info
-
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
- def extract(self, pkg):
- pkg_info = self.package_info(pkg)
- if not pkg_info:
- bb.fatal("Unable to get information for package '%s' while "
- "trying to extract the package." % pkg)
-
- tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
-
- return tmp_dir
-
-def generate_index_files(d):
- classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
-
- indexer_map = {
- "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM')),
- "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
- "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
- }
-
- result = None
-
- for pkg_class in classes:
- if not pkg_class in indexer_map:
- continue
-
- if os.path.exists(indexer_map[pkg_class][1]):
- result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
-
- if result is not None:
- bb.fatal(result)
-
-if __name__ == "__main__":
- """
- We should be able to run this as a standalone script, from outside bitbake
- environment.
- """
- """
- TBD
- """
diff --git a/meta/lib/oe/package_manager/__init__.py b/meta/lib/oe/package_manager/__init__.py
new file mode 100644
index 0000000000..6774cdb794
--- /dev/null
+++ b/meta/lib/oe/package_manager/__init__.py
@@ -0,0 +1,562 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from abc import ABCMeta, abstractmethod
+import os
+import glob
+import subprocess
+import shutil
+import re
+import collections
+import bb
+import tempfile
+import oe.utils
+import oe.path
+import string
+from oe.gpg_sign import get_signer
+import hashlib
+import fnmatch
+
+# this can be used by all PM backends to create the index files in parallel
+def create_index(arg):
+ index_cmd = arg
+
+ bb.note("Executing '%s' ..." % index_cmd)
+ result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ if result:
+ bb.note(result)
+
+def opkg_query(cmd_output):
+ """
+ This method parse the output from the package managerand return
+ a dictionary with the information of the packages. This is used
+ when the packages are in deb or ipk format.
+ """
+ verregex = re.compile(r' \([=<>]* [^ )]*\)')
+ output = dict()
+ pkg = ""
+ arch = ""
+ ver = ""
+ filename = ""
+ dep = []
+ prov = []
+ pkgarch = ""
+ for line in cmd_output.splitlines()+['']:
+ line = line.rstrip()
+ if ':' in line:
+ if line.startswith("Package: "):
+ pkg = line.split(": ")[1]
+ elif line.startswith("Architecture: "):
+ arch = line.split(": ")[1]
+ elif line.startswith("Version: "):
+ ver = line.split(": ")[1]
+ elif line.startswith("File: ") or line.startswith("Filename:"):
+ filename = line.split(": ")[1]
+ if "/" in filename:
+ filename = os.path.basename(filename)
+ elif line.startswith("Depends: "):
+ depends = verregex.sub('', line.split(": ")[1])
+ for depend in depends.split(", "):
+ dep.append(depend)
+ elif line.startswith("Recommends: "):
+ recommends = verregex.sub('', line.split(": ")[1])
+ for recommend in recommends.split(", "):
+ dep.append("%s [REC]" % recommend)
+ elif line.startswith("PackageArch: "):
+ pkgarch = line.split(": ")[1]
+ elif line.startswith("Provides: "):
+ provides = verregex.sub('', line.split(": ")[1])
+ for provide in provides.split(", "):
+ prov.append(provide)
+
+ # When there is a blank line save the package information
+ elif not line:
+ # IPK doesn't include the filename
+ if not filename:
+ filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
+ if pkg:
+ output[pkg] = {"arch":arch, "ver":ver,
+ "filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov}
+ pkg = ""
+ arch = ""
+ ver = ""
+ filename = ""
+ dep = []
+ prov = []
+ pkgarch = ""
+
+ return output
+
+def failed_postinsts_abort(pkgs, log_path):
+ bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
+then please place them into pkg_postinst_ontarget:${PN} ().
+Deferring to first boot via 'exit 1' is no longer supported.
+Details of the failure are in %s.""" %(pkgs, log_path))
+
+def generate_locale_archive(d, rootfs, target_arch, localedir):
+ # Pretty sure we don't need this for locale archive generation but
+ # keeping it to be safe...
+ locale_arch_options = { \
+ "arc": ["--uint32-align=4", "--little-endian"],
+ "arceb": ["--uint32-align=4", "--big-endian"],
+ "arm": ["--uint32-align=4", "--little-endian"],
+ "armeb": ["--uint32-align=4", "--big-endian"],
+ "aarch64": ["--uint32-align=4", "--little-endian"],
+ "aarch64_be": ["--uint32-align=4", "--big-endian"],
+ "sh4": ["--uint32-align=4", "--big-endian"],
+ "powerpc": ["--uint32-align=4", "--big-endian"],
+ "powerpc64": ["--uint32-align=4", "--big-endian"],
+ "powerpc64le": ["--uint32-align=4", "--little-endian"],
+ "mips": ["--uint32-align=4", "--big-endian"],
+ "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
+ "mips64": ["--uint32-align=4", "--big-endian"],
+ "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
+ "mipsel": ["--uint32-align=4", "--little-endian"],
+ "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
+ "mips64el": ["--uint32-align=4", "--little-endian"],
+ "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
+ "riscv64": ["--uint32-align=4", "--little-endian"],
+ "riscv32": ["--uint32-align=4", "--little-endian"],
+ "i586": ["--uint32-align=4", "--little-endian"],
+ "i686": ["--uint32-align=4", "--little-endian"],
+ "x86_64": ["--uint32-align=4", "--little-endian"],
+ "loongarch64": ["--uint32-align=4", "--little-endian"]
+ }
+ if target_arch in locale_arch_options:
+ arch_options = locale_arch_options[target_arch]
+ else:
+ bb.error("locale_arch_options not found for target_arch=" + target_arch)
+ bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
+
+ # Need to set this so cross-localedef knows where the archive is
+ env = dict(os.environ)
+ env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
+
+ for name in sorted(os.listdir(localedir)):
+ path = os.path.join(localedir, name)
+ if os.path.isdir(path):
+ cmd = ["cross-localedef", "--verbose"]
+ cmd += arch_options
+ cmd += ["--add-to-archive", path]
+ subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
+
+class Indexer(object, metaclass=ABCMeta):
+ def __init__(self, d, deploy_dir):
+ self.d = d
+ self.deploy_dir = deploy_dir
+
+ @abstractmethod
+ def write_index(self):
+ pass
+
+class PkgsList(object, metaclass=ABCMeta):
+ def __init__(self, d, rootfs_dir):
+ self.d = d
+ self.rootfs_dir = rootfs_dir
+
+ @abstractmethod
+ def list_pkgs(self):
+ pass
+
+class PackageManager(object, metaclass=ABCMeta):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+
+ def __init__(self, d, target_rootfs):
+ self.d = d
+ self.target_rootfs = target_rootfs
+ self.deploy_dir = None
+ self.deploy_lock = None
+ self._initialize_intercepts()
+
+ def _initialize_intercepts(self):
+ bb.note("Initializing intercept dir for %s" % self.target_rootfs)
+ # As there might be more than one instance of PackageManager operating at the same time
+ # we need to isolate the intercept_scripts directories from each other,
+ # hence the ugly hash digest in dir name.
+ self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
+ (hashlib.sha256(self.target_rootfs.encode()).hexdigest()))
+
+ postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
+ if not postinst_intercepts:
+ postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
+ if not postinst_intercepts_path:
+ postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+ postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)
+
+ bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts))
+ bb.utils.remove(self.intercepts_dir, True)
+ bb.utils.mkdirhier(self.intercepts_dir)
+ for intercept in postinst_intercepts:
+ shutil.copy(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
+
+ @abstractmethod
+ def _handle_intercept_failure(self, failed_script):
+ pass
+
+ def _postpone_to_first_boot(self, postinst_intercept_hook):
+ with open(postinst_intercept_hook) as intercept:
+ registered_pkgs = None
+ for line in intercept.read().split("\n"):
+ m = re.match(r"^##PKGS:(.*)", line)
+ if m is not None:
+ registered_pkgs = m.group(1).strip()
+ break
+
+ if registered_pkgs is not None:
+ bb.note("If an image is being built, the postinstalls for the following packages "
+ "will be postponed for first boot: %s" %
+ registered_pkgs)
+
+ # call the backend dependent handler
+ self._handle_intercept_failure(registered_pkgs)
+
+
+ def run_intercepts(self, populate_sdk=None):
+ intercepts_dir = self.intercepts_dir
+
+ bb.note("Running intercept scripts:")
+ os.environ['D'] = self.target_rootfs
+ os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
+ for script in os.listdir(intercepts_dir):
+ script_full = os.path.join(intercepts_dir, script)
+
+ if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
+ continue
+
+ # we do not want to run any multilib variant of this
+ if script.startswith("delay_to_first_boot"):
+ self._postpone_to_first_boot(script_full)
+ continue
+
+ if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ continue
+
+ bb.note("> Executing %s intercept ..." % script)
+
+ try:
+ output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
+ if output: bb.note(output.decode("utf-8"))
+ except subprocess.CalledProcessError as e:
+ bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
+ if populate_sdk == 'host':
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ elif populate_sdk == 'target':
+ if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ else:
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ else:
+ if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ self._postpone_to_first_boot(script_full)
+ else:
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+
+ @abstractmethod
+ def update(self):
+ """
+ Update the package manager package database.
+ """
+ pass
+
+ @abstractmethod
+ def install(self, pkgs, attempt_only=False, hard_depends_only=False):
+ """
+ Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
+ True, installation failures are ignored.
+ """
+ pass
+
+ @abstractmethod
+ def remove(self, pkgs, with_dependencies=True):
+ """
+ Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
+ is False, then any dependencies are left in place.
+ """
+ pass
+
+ @abstractmethod
+ def write_index(self):
+ """
+ This function creates the index files
+ """
+ pass
+
+ @abstractmethod
+ def remove_packaging_data(self):
+ pass
+
+ @abstractmethod
+ def list_installed(self):
+ pass
+
+ @abstractmethod
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pass
+
+ @abstractmethod
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ """
+ Add remote package feeds into repository manager configuration. The parameters
+ for the feeds are set by feed_uris, feed_base_paths and feed_archs.
+ See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
+ for their description.
+ """
+ pass
+
+ def install_glob(self, globs, sdk=False):
+ """
+ Install all packages that match a glob.
+ """
+ # TODO don't have sdk here but have a property on the superclass
+ # (and respect in install_complementary)
+ if sdk:
+ pkgdatadir = self.d.getVar("PKGDATA_DIR_SDK")
+ else:
+ pkgdatadir = self.d.getVar("PKGDATA_DIR")
+
+ try:
+ bb.note("Installing globbed packages...")
+ cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
+ bb.note('Running %s' % cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = proc.communicate()
+ if stderr: bb.note(stderr.decode("utf-8"))
+ pkgs = stdout.decode("utf-8")
+ self.install(pkgs.split(), attempt_only=True)
+ except subprocess.CalledProcessError as e:
+ # Return code 1 means no packages matched
+ if e.returncode != 1:
+ bb.fatal("Could not compute globbed packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ def install_complementary(self, globs=None):
+ """
+ Install complementary packages based upon the list of currently installed
+ packages e.g. locales, *-dev, *-dbg, etc. Note: every backend needs to
+ call this function explicitly after the normal package installation.
+ """
+ if globs is None:
+ globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
+ split_linguas = set()
+
+ for translation in self.d.getVar('IMAGE_LINGUAS').split():
+ split_linguas.add(translation)
+ split_linguas.add(translation.split('-')[0])
+
+ split_linguas = sorted(split_linguas)
+
+ for lang in split_linguas:
+ globs += " *-locale-%s" % lang
+ for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
+ globs += (" " + complementary_linguas) % lang
+
+ if globs is None:
+ return
+
+ # we need to write the list of installed packages to a file because the
+ # oe-pkgdata-util reads it from a file
+ with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
+ pkgs = self.list_installed()
+
+ provided_pkgs = set()
+ for pkg in pkgs.values():
+ provided_pkgs |= set(pkg.get('provs', []))
+
+ output = oe.utils.format_pkg_list(pkgs, "arch")
+ installed_pkgs.write(output)
+ installed_pkgs.flush()
+
+ cmd = ["oe-pkgdata-util",
+ "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
+ globs]
+ exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
+ if exclude:
+ cmd.extend(['--exclude=' + '|'.join(exclude.split())])
+ try:
+ bb.note('Running %s' % cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = proc.communicate()
+ if stderr: bb.note(stderr.decode("utf-8"))
+ complementary_pkgs = stdout.decode("utf-8")
+ complementary_pkgs = set(complementary_pkgs.split())
+ skip_pkgs = sorted(complementary_pkgs & provided_pkgs)
+ install_pkgs = sorted(complementary_pkgs - provided_pkgs)
+ bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % (
+ ' '.join(install_pkgs),
+ ' '.join(skip_pkgs)))
+ self.install(install_pkgs, hard_depends_only=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not compute complementary packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1':
+ target_arch = self.d.getVar('TARGET_ARCH')
+ localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
+ if os.path.exists(localedir) and os.listdir(localedir):
+ generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
+
+ def deploy_dir_lock(self):
+ if self.deploy_dir is None:
+ raise RuntimeError("deploy_dir is not set!")
+
+ lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
+
+ self.deploy_lock = bb.utils.lockfile(lock_file_name)
+
+ def deploy_dir_unlock(self):
+ if self.deploy_lock is None:
+ return
+
+ bb.utils.unlockfile(self.deploy_lock)
+
+ self.deploy_lock = None
+
+ def construct_uris(self, uris, base_paths):
+ """
+ Construct URIs based on the following pattern: uri/base_path where 'uri'
+ and 'base_path' correspond to each element of the corresponding array
+ argument leading to len(uris) x len(base_paths) elements on the returned
+ array
+ """
+ def _append(arr1, arr2, sep='/'):
+ res = []
+ narr1 = [a.rstrip(sep) for a in arr1]
+ narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
+ for a1 in narr1:
+ if arr2:
+ for a2 in narr2:
+ res.append("%s%s%s" % (a1, sep, a2))
+ else:
+ res.append(a1)
+ return res
+ return _append(uris, base_paths)
+
+def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies):
+ """
+ Go through our do_package_write_X dependencies and hardlink the packages we depend
+ upon into the repo directory. This prevents us seeing other packages that may
+ have been built that we don't depend upon and also packages for architectures we don't
+ support.
+ """
+ import errno
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ pn = d.getVar("PN")
+ seendirs = set()
+ multilibs = {}
+
+ bb.utils.remove(subrepo_dir, recurse=True)
+ bb.utils.mkdirhier(subrepo_dir)
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps or not filterbydependencies:
+ for arch in d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").split() + d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").replace("-", "_").split():
+ target = os.path.join(deploydir + "/" + arch)
+ if os.path.exists(target):
+ oe.path.symlink(target, subrepo_dir + "/" + arch, True)
+ return
+
+ start = None
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+ pkgdeps = set()
+ start = [start]
+ seen = set(start)
+ # Support direct dependencies (do_rootfs -> do_package_write_X)
+ # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
+ while start:
+ next = []
+ for dep2 in start:
+ for dep in taskdepdata[dep2][3]:
+ if taskdepdata[dep][0] != pn:
+ if "do_" + taskname in dep:
+ pkgdeps.add(dep)
+ elif dep not in seen:
+ next.append(dep)
+ seen.add(dep)
+ start = next
+
+ for dep in pkgdeps:
+ c = taskdepdata[dep][0]
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
+ if not manifest:
+ bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
+ if not os.path.exists(manifest):
+ continue
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ deploydir = os.path.normpath(deploydir)
+ if bb.data.inherits_class('packagefeed-stability', d):
+ dest = l.replace(deploydir + "-prediff", "")
+ else:
+ dest = l.replace(deploydir, "")
+ dest = subrepo_dir + dest
+ if l.endswith("/"):
+ if dest not in seendirs:
+ bb.utils.mkdirhier(dest)
+ seendirs.add(dest)
+ continue
+ # Try to hardlink the file, copy if that fails
+ destdir = os.path.dirname(dest)
+ if destdir not in seendirs:
+ bb.utils.mkdirhier(destdir)
+ seendirs.add(destdir)
+ try:
+ os.link(l, dest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(l, dest)
+ else:
+ raise
+
+
+def generate_index_files(d):
+ from oe.package_manager.rpm import RpmSubdirIndexer
+ from oe.package_manager.ipk import OpkgIndexer
+ from oe.package_manager.deb import DpkgIndexer
+
+ classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
+
+ indexer_map = {
+ "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
+ "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
+ "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
+ }
+
+ result = None
+
+ for pkg_class in classes:
+ if not pkg_class in indexer_map:
+ continue
+
+ if os.path.exists(indexer_map[pkg_class][1]):
+ result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
+
+ if result is not None:
+ bb.fatal(result)
diff --git a/meta/lib/oe/package_manager/deb/__init__.py b/meta/lib/oe/package_manager/deb/__init__.py
new file mode 100644
index 0000000000..0c23c884c1
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/__init__.py
@@ -0,0 +1,522 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import subprocess
+from oe.package_manager import *
+
+class DpkgIndexer(Indexer):
+ def _create_configs(self):
+ bb.utils.mkdirhier(self.apt_conf_dir)
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
+
+ with open(os.path.join(self.apt_conf_dir, "preferences"),
+ "w") as prefs_file:
+ pass
+ with open(os.path.join(self.apt_conf_dir, "sources.list"),
+ "w+") as sources_file:
+ pass
+
+ with open(self.apt_conf_file, "w") as apt_conf:
+ with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
+ "apt", "apt.conf.sample")) as apt_conf_sample:
+ for line in apt_conf_sample.read().split("\n"):
+ line = re.sub(r"#ROOTFS#", "/dev/null", line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
+ apt_conf.write(line + "\n")
+
+ def write_index(self):
+ self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
+ "apt-ftparchive")
+ self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+ self._create_configs()
+
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ pkg_archs = self.d.getVar('PACKAGE_ARCHS')
+ if pkg_archs is not None:
+ arch_list = pkg_archs.split()
+ sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
+ if sdk_pkg_archs is not None:
+ for a in sdk_pkg_archs.split():
+ if a not in pkg_archs:
+ arch_list.append(a)
+
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
+ arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
+
+ apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
+ gzip = bb.utils.which(os.getenv('PATH'), "gzip")
+
+ index_cmds = []
+ deb_dirs_found = False
+ index_sign_files = set()
+ for arch in arch_list:
+ arch_dir = os.path.join(self.deploy_dir, arch)
+ if not os.path.isdir(arch_dir):
+ continue
+
+ cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
+
+ cmd += "%s -fcn Packages > Packages.gz;" % gzip
+
+ release_file = os.path.join(arch_dir, "Release")
+ index_sign_files.add(release_file)
+
+ with open(release_file, "w+") as release:
+ release.write("Label: %s\n" % arch)
+
+ cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
+
+ index_cmds.append(cmd)
+
+ deb_dirs_found = True
+
+ if not deb_dirs_found:
+ bb.note("There are no packages in %s" % self.deploy_dir)
+ return
+
+ oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
+ else:
+ signer = None
+ if signer:
+ for f in index_sign_files:
+ signer.detach_sign(f,
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ output_suffix="gpg",
+ use_sha256=True)
+
+class PMPkgsList(PkgsList):
+
+ def list_pkgs(self):
+ cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
+ "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
+ "-W"]
+
+ cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\nProvides: ${Provides}\n\n")
+
+ try:
+ cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ return opkg_query(cmd_output)
+
+class OpkgDpkgPM(PackageManager):
+ def __init__(self, d, target_rootfs):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ super(OpkgDpkgPM, self).__init__(d, target_rootfs)
+
+ def package_info(self, pkg, cmd):
+ """
+ Returns a dictionary with the package info.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to list available packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ return opkg_query(output)
+
+ def extract(self, pkg, pkg_info):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
+ tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
+ pkg_path = pkg_info[pkg]["filepath"]
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+ data_tar = 'data.tar.xz'
+
+ try:
+ cmd = [ar_cmd, 'x', pkg_path]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ cmd = [tar_cmd, 'xf', data_tar]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
+ bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
+ os.chdir(current_dir)
+
+ return tmp_dir
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.mark_packages("unpacked", registered_pkgs.split())
+
+class DpkgPM(OpkgDpkgPM):
+ def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True):
+ super(DpkgPM, self).__init__(d, target_rootfs)
+ self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir)
+
+ create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies)
+
+ if apt_conf_dir is None:
+ self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
+ else:
+ self.apt_conf_dir = apt_conf_dir
+ self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+ self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
+ self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
+
+ self.apt_args = d.getVar("APT_ARGS")
+
+ self.all_arch_list = archs.split()
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
+ self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
+
+ self._create_configs(archs, base_archs)
+
+ self.indexer = DpkgIndexer(self.d, self.deploy_dir)
+
+ def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/dpkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
+ status_file = self.target_rootfs + "/var/lib/dpkg/status"
+
+ with open(status_file, "r") as sf:
+ with open(status_file + ".tmp", "w+") as tmp_sf:
+ if packages is None:
+ tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+ r"Package: \1\n\2Status: \3%s" % status_tag,
+ sf.read()))
+ else:
+ if type(packages).__name__ != "list":
+ raise TypeError("'packages' should be a list object")
+
+ status = sf.read()
+ for pkg in packages:
+ status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+ r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+ status)
+
+ tmp_sf.write(status)
+
+ bb.utils.rename(status_file + ".tmp", status_file)
+
+ def run_pre_post_installs(self, package_name=None):
+ """
+ Run the pre/post installs for package "package_name". If package_name is
+ None, then run all pre/post install scriptlets.
+ """
+ info_dir = self.target_rootfs + "/var/lib/dpkg/info"
+ ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
+ control_scripts = [
+ ControlScript(".preinst", "Preinstall", "install"),
+ ControlScript(".postinst", "Postinstall", "configure")]
+ status_file = self.target_rootfs + "/var/lib/dpkg/status"
+ installed_pkgs = []
+
+ with open(status_file, "r") as status:
+ for line in status.read().split('\n'):
+ m = re.match(r"^Package: (.*)", line)
+ if m is not None:
+ installed_pkgs.append(m.group(1))
+
+ if package_name is not None and not package_name in installed_pkgs:
+ return
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+ for pkg_name in installed_pkgs:
+ for control_script in control_scripts:
+ p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
+ if os.path.exists(p_full):
+ try:
+ bb.note("Executing %s for package: %s ..." %
+ (control_script.name.lower(), pkg_name))
+ output = subprocess.check_output([p_full, control_script.argument],
+ stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.warn("%s for package %s failed with %d:\n%s" %
+ (control_script.name, pkg_name, e.returncode,
+ e.output.decode("utf-8")))
+ failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+
+ def update(self):
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ self.deploy_dir_lock()
+
+ cmd = "%s update" % self.apt_get_cmd
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to update the package index files. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ self.deploy_dir_unlock()
+
+ def install(self, pkgs, attempt_only=False, hard_depends_only=False):
+ if attempt_only and len(pkgs) == 0:
+ return
+
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ extra_args = ""
+ if hard_depends_only:
+ extra_args = "--no-install-recommends"
+
+ cmd = "%s %s install --allow-downgrades --allow-remove-essential --allow-change-held-packages --allow-unauthenticated --no-remove %s %s" % \
+ (self.apt_get_cmd, self.apt_args, extra_args, ' '.join(pkgs))
+
+ try:
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ bb.note(output.decode("utf-8"))
+ except subprocess.CalledProcessError as e:
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
+ "Command '%s' returned %d:\n%s" %
+ (cmd, e.returncode, e.output.decode("utf-8")))
+
+ # rename *.dpkg-new files/dirs
+ for root, dirs, files in os.walk(self.target_rootfs):
+ for dir in dirs:
+ new_dir = re.sub(r"\.dpkg-new", "", dir)
+ if dir != new_dir:
+ bb.utils.rename(os.path.join(root, dir),
+ os.path.join(root, new_dir))
+
+ for file in files:
+ new_file = re.sub(r"\.dpkg-new", "", file)
+ if file != new_file:
+ bb.utils.rename(os.path.join(root, file),
+ os.path.join(root, new_file))
+
+
+ def remove(self, pkgs, with_dependencies=True):
+ if not pkgs:
+ return
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+
+ if with_dependencies:
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+ cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
+ else:
+ cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
+ " -P --force-depends %s" % \
+ (bb.utils.which(os.getenv('PATH'), "dpkg"),
+ self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ def write_index(self):
+ self.deploy_dir_lock()
+
+ result = self.indexer.write_index()
+
+ self.deploy_dir_unlock()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
+ return
+
+
+ sources_conf = os.path.join("%s/etc/apt/sources.list"
+ % self.target_rootfs)
+ if not os.path.exists(os.path.dirname(sources_conf)):
+ return
+
+ arch_list = []
+
+ if feed_archs is None:
+ for arch in self.all_arch_list:
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+ else:
+ arch_list = feed_archs.split()
+
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+
+ with open(sources_conf, "w+") as sources_file:
+ for uri in feed_uris:
+ if arch_list:
+ for arch in arch_list:
+ bb.note('Adding dpkg channel at (%s)' % uri)
+ sources_file.write("deb [trusted=yes] %s/%s ./\n" %
+ (uri, arch))
+ else:
+ bb.note('Adding dpkg channel at (%s)' % uri)
+ sources_file.write("deb [trusted=yes] %s ./\n" % uri)
+
+ def _create_configs(self, archs, base_archs):
+ base_archs = re.sub(r"_", r"-", base_archs)
+
+ if os.path.exists(self.apt_conf_dir):
+ bb.utils.remove(self.apt_conf_dir, True)
+
+ bb.utils.mkdirhier(self.apt_conf_dir)
+ bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
+ bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
+ bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
+
+ arch_list = []
+ for arch in self.all_arch_list:
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+
+ with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
+ priority = 801
+ for arch in arch_list:
+ prefs_file.write(
+ "Package: *\n"
+ "Pin: release l=%s\n"
+ "Pin-Priority: %d\n\n" % (arch, priority))
+
+ priority += 5
+
+ pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
+ for pkg in pkg_exclude.split():
+ prefs_file.write(
+ "Package: %s\n"
+ "Pin: release *\n"
+ "Pin-Priority: -1\n\n" % pkg)
+
+ arch_list.reverse()
+
+ with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
+ for arch in arch_list:
+ sources_file.write("deb [trusted=yes] file:%s/ ./\n" %
+ os.path.join(self.deploy_dir, arch))
+
+ base_arch_list = base_archs.split()
+ multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
+ for variant in multilib_variants.split():
+ localdata = bb.data.createCopy(self.d)
+ variant_tune = localdata.getVar("DEFAULTTUNE:virtclass-multilib-" + variant, False)
+ orig_arch = localdata.getVar("DPKG_ARCH")
+ localdata.setVar("DEFAULTTUNE", variant_tune)
+ variant_arch = localdata.getVar("DPKG_ARCH")
+ if variant_arch not in base_arch_list:
+ base_arch_list.append(variant_arch)
+
+ with open(self.apt_conf_file, "w+") as apt_conf:
+ with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
+ for line in apt_conf_sample.read().split("\n"):
+ match_arch = re.match(r" Architecture \".*\";$", line)
+ architectures = ""
+ if match_arch:
+ for base_arch in base_arch_list:
+ architectures += "\"%s\";" % base_arch
+ apt_conf.write(" Architectures {%s};\n" % architectures);
+ apt_conf.write(" Architecture \"%s\";\n" % base_archs)
+ else:
+ line = re.sub(r"#ROOTFS#", self.target_rootfs, line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
+ apt_conf.write(line + "\n")
+
+ target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
+ bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
+
+ bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
+
+ if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
+ open(os.path.join(target_dpkg_dir, "status"), "w+").close()
+ if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
+ open(os.path.join(target_dpkg_dir, "available"), "w+").close()
+
+ def remove_packaging_data(self):
+ bb.utils.remove(self.target_rootfs + self.d.getVar('opkglibdir'), True)
+ bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
+
+ def fix_broken_dependencies(self):
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ cmd = "%s %s --allow-unauthenticated -f install" % (self.apt_get_cmd, self.apt_args)
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot fix broken dependencies. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ def list_installed(self):
+ return PMPkgsList(self.d, self.target_rootfs).list_pkgs()
+
+ def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
+ cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
+ pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
+
+ pkg_arch = pkg_info[pkg]["pkgarch"]
+ pkg_filename = pkg_info[pkg]["filename"]
+ pkg_info[pkg]["filepath"] = \
+ os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
+
+ return pkg_info
+
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pkg_info = self.package_info(pkg)
+ if not pkg_info:
+ bb.fatal("Unable to get information for package '%s' while "
+ "trying to extract the package." % pkg)
+
+ tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
+
+ return tmp_dir
diff --git a/meta/lib/oe/package_manager/deb/manifest.py b/meta/lib/oe/package_manager/deb/manifest.py
new file mode 100644
index 0000000000..72983bae98
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/manifest.py
@@ -0,0 +1,28 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.manifest import Manifest
+
+class PkgManifest(Manifest):
+ def create_initial(self):
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ pkg_list = self.d.getVar(var)
+
+ if pkg_list is None:
+ continue
+
+ for pkg in pkg_list.split():
+ manifest.write("%s,%s\n" %
+ (self.var_maps[self.manifest_type][var], pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ pass
diff --git a/meta/lib/oe/package_manager/deb/rootfs.py b/meta/lib/oe/package_manager/deb/rootfs.py
new file mode 100644
index 0000000000..1e25b64ed9
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/rootfs.py
@@ -0,0 +1,212 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import shutil
+from oe.rootfs import Rootfs
+from oe.manifest import Manifest
+from oe.utils import execute_pre_post_process
+from oe.package_manager.deb.manifest import PkgManifest
+from oe.package_manager.deb import DpkgPM
+
+class DpkgOpkgRootfs(Rootfs):
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
+ super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+
+ def _get_pkgs_postinsts(self, status_file):
+ def _get_pkg_depends_list(pkg_depends):
+ pkg_depends_list = []
+ # filter version requirements like libc (>= 1.1)
+ for dep in pkg_depends.split(', '):
+ m_dep = re.match(r"^(.*) \(.*\)$", dep)
+ if m_dep:
+ dep = m_dep.group(1)
+ pkg_depends_list.append(dep)
+
+ return pkg_depends_list
+
+ pkgs = {}
+ pkg_name = ""
+ pkg_status_match = False
+ pkg_depends = ""
+
+ with open(status_file) as status:
+ data = status.read()
+ status.close()
+ for line in data.split('\n'):
+ m_pkg = re.match(r"^Package: (.*)", line)
+ m_status = re.match(r"^Status:.*unpacked", line)
+ m_depends = re.match(r"^Depends: (.*)", line)
+
+ #Only one of m_pkg, m_status or m_depends is not None at time
+ #If m_pkg is not None, we started a new package
+ if m_pkg is not None:
+ #Get Package name
+ pkg_name = m_pkg.group(1)
+ #Make sure we reset other variables
+ pkg_status_match = False
+ pkg_depends = ""
+ elif m_status is not None:
+ #New status matched
+ pkg_status_match = True
+ elif m_depends is not None:
+ #New depends macthed
+ pkg_depends = m_depends.group(1)
+ else:
+ pass
+
+ #Now check if we can process package depends and postinst
+ if "" != pkg_name and pkg_status_match:
+ pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+ else:
+ #Not enough information
+ pass
+
+ # remove package dependencies not in postinsts
+ pkg_names = list(pkgs.keys())
+ for pkg_name in pkg_names:
+ deps = pkgs[pkg_name][:]
+
+ for d in deps:
+ if d not in pkg_names:
+ pkgs[pkg_name].remove(d)
+
+ return pkgs
+
+ def _get_delayed_postinsts_common(self, status_file):
+ def _dep_resolve(graph, node, resolved, seen):
+ seen.append(node)
+
+ for edge in graph[node]:
+ if edge not in resolved:
+ if edge in seen:
+ raise RuntimeError("Packages %s and %s have " \
+ "a circular dependency in postinsts scripts." \
+ % (node, edge))
+ _dep_resolve(graph, edge, resolved, seen)
+
+ resolved.append(node)
+
+ pkg_list = []
+
+ pkgs = None
+ if not self.d.getVar('PACKAGE_INSTALL').strip():
+ bb.note("Building empty image")
+ else:
+ pkgs = self._get_pkgs_postinsts(status_file)
+ if pkgs:
+ root = "__packagegroup_postinst__"
+ pkgs[root] = list(pkgs.keys())
+ _dep_resolve(pkgs, root, pkg_list, [])
+ pkg_list.remove(root)
+
+ if len(pkg_list) == 0:
+ return None
+
+ return pkg_list
+
+ def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d):
+ return
+ num = 0
+ for p in self._get_delayed_postinsts():
+ bb.utils.mkdirhier(dst_postinst_dir)
+
+ if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
+ shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
+ os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
+
+ num += 1
+
+class PkgRootfs(DpkgOpkgRootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = '^E:'
+ self.log_check_expected_regexes = \
+ [
+ "^E: Unmet dependencies."
+ ]
+
+ bb.utils.remove(self.image_rootfs, True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+ self.manifest = PkgManifest(d, manifest_dir)
+ self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
+ d.getVar('PACKAGE_ARCHS'),
+ d.getVar('DPKG_ARCH'))
+
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
+ deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
+
+ alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
+ bb.utils.mkdirhier(alt_dir)
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, deb_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+ # Don't support incremental, so skip that
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ self.pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+ self.pm.fix_broken_dependencies()
+
+ if self.progress_reporter:
+ # Don't support attemptonly, so skip that
+ self.progress_reporter.next_stage()
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self._setup_dbg_rootfs(['/var/lib/dpkg'])
+
+ self.pm.fix_broken_dependencies()
+
+ self.pm.mark_packages("installed")
+
+ self.pm.run_pre_post_installs()
+
+ execute_pre_post_process(self.d, deb_post_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ @staticmethod
+ def _depends_list():
+ return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
+
+ def _get_delayed_postinsts(self):
+ status_file = self.image_rootfs + "/var/lib/dpkg/status"
+ return self._get_delayed_postinsts_common(status_file)
+
+ def _save_postinsts(self):
+ dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
+ src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
+ return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ pass
diff --git a/meta/lib/oe/package_manager/deb/sdk.py b/meta/lib/oe/package_manager/deb/sdk.py
new file mode 100644
index 0000000000..6f3005053e
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/sdk.py
@@ -0,0 +1,107 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import glob
+import shutil
+from oe.utils import execute_pre_post_process
+from oe.sdk import Sdk
+from oe.manifest import Manifest
+from oe.package_manager.deb import DpkgPM
+from oe.package_manager.deb.manifest import PkgManifest
+
+class PkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(PkgSdk, self).__init__(d, manifest_dir)
+
+ self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
+ self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
+
+
+ self.target_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ deb_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ deb_repo_workdir = "oe-sdk-ext-repo"
+
+ self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
+ self.d.getVar("PACKAGE_ARCHS"),
+ self.d.getVar("DPKG_ARCH"),
+ self.target_conf_dir,
+ deb_repo_workdir=deb_repo_workdir)
+
+ self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ self.d.getVar("DEB_SDK_ARCH"),
+ self.host_conf_dir,
+ deb_repo_workdir=deb_repo_workdir)
+
+ def _copy_apt_dir_to(self, dst_dir):
+ staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
+
+ self.remove(dst_dir, True)
+
+ shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ pm.write_index()
+ pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_pre_post_installs()
+
+ env_bkp = os.environ.copy()
+ os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
+ os.pathsep + os.environ["PATH"]
+
+ self.target_pm.run_intercepts(populate_sdk='target')
+ os.environ.update(env_bkp)
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_pre_post_installs()
+
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
+ "etc", "apt"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+ "var", "lib", "dpkg")
+ self.mkdirhier(native_dpkg_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
+ self.movefile(f, native_dpkg_state_dir)
+ self.remove(os.path.join(self.sdk_output, "var"), True)
diff --git a/meta/lib/oe/package_manager/ipk/__init__.py b/meta/lib/oe/package_manager/ipk/__init__.py
new file mode 100644
index 0000000000..8cc9953a02
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/__init__.py
@@ -0,0 +1,511 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import shutil
+import subprocess
+from oe.package_manager import *
+
+class OpkgIndexer(Indexer):
+ def write_index(self):
+ arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
+ "SDK_PACKAGE_ARCHS",
+ ]
+
+ opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
+ opkg_index_cmd_extra_params = self.d.getVar('OPKG_MAKE_INDEX_EXTRA_PARAMS') or ""
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
+ else:
+ signer = None
+
+ if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
+ open(os.path.join(self.deploy_dir, "Packages"), "w").close()
+
+ index_cmds = set()
+ index_sign_files = set()
+ for arch_var in arch_vars:
+ archs = self.d.getVar(arch_var)
+ if archs is None:
+ continue
+
+ for arch in archs.split():
+ pkgs_dir = os.path.join(self.deploy_dir, arch)
+ pkgs_file = os.path.join(pkgs_dir, "Packages")
+
+ if not os.path.isdir(pkgs_dir):
+ continue
+
+ if not os.path.exists(pkgs_file):
+ open(pkgs_file, "w").close()
+
+ index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s %s' %
+ (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir, opkg_index_cmd_extra_params))
+
+ index_sign_files.add(pkgs_file)
+
+ if len(index_cmds) == 0:
+ bb.note("There are no packages in %s!" % self.deploy_dir)
+ return
+
+ oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
+
+ if signer:
+ feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
+ is_ascii_sig = (feed_sig_type.upper() != "BIN")
+ for f in index_sign_files:
+ signer.detach_sign(f,
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ armor=is_ascii_sig)
+
+class PMPkgsList(PkgsList):
+ def __init__(self, d, rootfs_dir):
+ super(PMPkgsList, self).__init__(d, rootfs_dir)
+ config_file = d.getVar("IPKGCONF_TARGET")
+
+ self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+ self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
+
+ def list_pkgs(self, format=None):
+ cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
+
+ # opkg returns success even when it printed some
+ # "Collected errors:" report to stderr. Mixing stderr into
+ # stdout then leads to random failures later on when
+ # parsing the output. To avoid this we need to collect both
+ # output streams separately and check for empty stderr.
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ cmd_output, cmd_stderr = p.communicate()
+ cmd_output = cmd_output.decode("utf-8")
+ cmd_stderr = cmd_stderr.decode("utf-8")
+ if p.returncode or cmd_stderr:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
+
+ return opkg_query(cmd_output)
+
+
+
+class OpkgDpkgPM(PackageManager):
+ def __init__(self, d, target_rootfs):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ super(OpkgDpkgPM, self).__init__(d, target_rootfs)
+
+ def package_info(self, pkg, cmd):
+ """
+ Returns a dictionary with the package info.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ proc = subprocess.run(cmd, capture_output=True, encoding="utf-8", shell=True)
+ if proc.returncode:
+ bb.fatal("Unable to list available packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, proc.returncode, proc.stderr))
+ elif proc.stderr:
+ bb.note("Command '%s' returned stderr: %s" % (cmd, proc.stderr))
+
+ return opkg_query(proc.stdout)
+
+ def extract(self, pkg, pkg_info):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
+ tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
+ pkg_path = pkg_info[pkg]["filepath"]
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+ data_tar = 'data.tar.zst'
+
+ try:
+ cmd = [ar_cmd, 'x', pkg_path]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ cmd = [tar_cmd, 'xf', data_tar]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
+ bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
+ os.chdir(current_dir)
+
+ return tmp_dir
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.mark_packages("unpacked", registered_pkgs.split())
+
+class OpkgPM(OpkgDpkgPM):
+ def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True):
+ super(OpkgPM, self).__init__(d, target_rootfs)
+
+ self.config_file = config_file
+ self.pkg_archs = archs
+ self.task_name = task_name
+
+ self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir)
+ self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
+ self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+ self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
+
+ if prepare_index:
+ create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies)
+
+ self.opkg_dir = oe.path.join(target_rootfs, self.d.getVar('OPKGLIBDIR'), "opkg")
+ bb.utils.mkdirhier(self.opkg_dir)
+
+ self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+ self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
+ if self.from_feeds:
+ self._create_custom_config()
+ else:
+ self._create_config()
+
+ self.indexer = OpkgIndexer(self.d, self.deploy_dir)
+
+ def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/opkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
+ status_file = os.path.join(self.opkg_dir, "status")
+
+ with open(status_file, "r") as sf:
+ with open(status_file + ".tmp", "w+") as tmp_sf:
+ if packages is None:
+ tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+ r"Package: \1\n\2Status: \3%s" % status_tag,
+ sf.read()))
+ else:
+ if type(packages).__name__ != "list":
+ raise TypeError("'packages' should be a list object")
+
+ status = sf.read()
+ for pkg in packages:
+ status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+ r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+ status)
+
+ tmp_sf.write(status)
+
+ bb.utils.rename(status_file + ".tmp", status_file)
+
+ def _create_custom_config(self):
+ bb.note("Building from feeds activated!")
+
+ with open(self.config_file, "w+") as config_file:
+ priority = 1
+ for arch in self.pkg_archs.split():
+ config_file.write("arch %s %d\n" % (arch, priority))
+ priority += 5
+
+ for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
+ feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
+
+ if feed_match is not None:
+ feed_name = feed_match.group(1)
+ feed_uri = feed_match.group(2)
+
+ bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
+
+ config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
+
+ """
+ Allow to use package deploy directory contents as quick devel-testing
+ feed. This creates individual feed configs for each arch subdir of those
+ specified as compatible for the current machine.
+ NOTE: Development-helper feature, NOT a full-fledged feed.
+ """
+ if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
+ for arch in self.pkg_archs.split():
+ cfg_file_name = oe.path.join(self.target_rootfs,
+ self.d.getVar("sysconfdir"),
+ "opkg",
+ "local-%s-feed.conf" % arch)
+
+ with open(cfg_file_name, "w+") as cfg_file:
+ cfg_file.write("src/gz local-%s %s/%s" %
+ (arch,
+ self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
+ arch))
+
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
+
+
+ def _create_config(self):
+ with open(self.config_file, "w+") as config_file:
+ priority = 1
+ for arch in self.pkg_archs.split():
+ config_file.write("arch %s %d\n" % (arch, priority))
+ priority += 5
+
+ config_file.write("src oe file:%s\n" % self.deploy_dir)
+
+ for arch in self.pkg_archs.split():
+ pkgs_dir = os.path.join(self.deploy_dir, arch)
+ if os.path.isdir(pkgs_dir):
+ config_file.write("src oe-%s file:%s\n" %
+ (arch, pkgs_dir))
+
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
+ return
+
+ rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
+ % self.target_rootfs)
+
+ os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True)
+
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
+
+ with open(rootfs_config, "w+") as config_file:
+ uri_iterator = 0
+ for uri in feed_uris:
+ if archs:
+ for arch in archs:
+ if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
+ continue
+ bb.note('Adding opkg feed url-%s-%d (%s)' %
+ (arch, uri_iterator, uri))
+ config_file.write("src/gz uri-%s-%d %s/%s\n" %
+ (arch, uri_iterator, uri, arch))
+ else:
+ bb.note('Adding opkg feed url-%d (%s)' %
+ (uri_iterator, uri))
+ config_file.write("src/gz uri-%d %s\n" %
+ (uri_iterator, uri))
+
+ uri_iterator += 1
+
+ def update(self):
+ self.deploy_dir_lock()
+
+ cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ self.deploy_dir_unlock()
+ bb.fatal("Unable to update the package index files. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ self.deploy_dir_unlock()
+
+ def install(self, pkgs, attempt_only=False, hard_depends_only=False):
+ if not pkgs:
+ return
+
+ cmd = "%s %s" % (self.opkg_cmd, self.opkg_args)
+ for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split():
+ cmd += " --add-exclude %s" % exclude
+ for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split():
+ cmd += " --add-ignore-recommends %s" % bad_recommendation
+ if hard_depends_only:
+ cmd += " --no-install-recommends"
+ cmd += " install "
+ cmd += " ".join(pkgs)
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+ try:
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ bb.note(cmd)
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ failed_pkgs = []
+ for line in output.split('\n'):
+ if line.endswith("configuration required on target."):
+ bb.warn(line)
+ failed_pkgs.append(line.split(".")[0])
+ if failed_pkgs:
+ failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+ except subprocess.CalledProcessError as e:
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
+ "Command '%s' returned %d:\n%s" %
+ (cmd, e.returncode, e.output.decode("utf-8")))
+
+ def remove(self, pkgs, with_dependencies=True):
+ if not pkgs:
+ return
+
+ if with_dependencies:
+ cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
+ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+ else:
+ cmd = "%s %s --force-depends remove %s" % \
+ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+
+ try:
+ bb.note(cmd)
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ def write_index(self):
+ self.deploy_dir_lock()
+
+ result = self.indexer.write_index()
+
+ self.deploy_dir_unlock()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def remove_packaging_data(self):
+ cachedir = oe.path.join(self.target_rootfs, self.d.getVar("localstatedir"), "cache", "opkg")
+ bb.utils.remove(self.opkg_dir, True)
+ bb.utils.remove(cachedir, True)
+
+ def remove_lists(self):
+ if not self.from_feeds:
+ bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
+
+ def list_installed(self):
+ return PMPkgsList(self.d, self.target_rootfs).list_pkgs()
+
+ def dummy_install(self, pkgs):
+ """
+ The following function dummy installs pkgs and returns the log of output.
+ """
+ if len(pkgs) == 0:
+ return
+
+ # Create an temp dir as opkg root for dummy installation
+ temp_rootfs = self.d.expand('${T}/opkg')
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ if opkg_lib_dir[0] == "/":
+ opkg_lib_dir = opkg_lib_dir[1:]
+ temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
+ bb.utils.mkdirhier(temp_opkg_dir)
+
+ opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
+ opkg_args += self.d.getVar("OPKG_ARGS")
+
+ cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to update. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ # Dummy installation
+ cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
+ opkg_args,
+ ' '.join(pkgs))
+ proc = subprocess.run(cmd, capture_output=True, encoding="utf-8", shell=True)
+ if proc.returncode:
+ bb.fatal("Unable to dummy install packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, proc.returncode, proc.stderr))
+ elif proc.stderr:
+ bb.note("Command '%s' returned stderr: %s" % (cmd, proc.stderr))
+
+ bb.utils.remove(temp_rootfs, True)
+
+ return proc.stdout
+
+ def backup_packaging_data(self):
+ # Save the opkglib for increment ipk image generation
+ if os.path.exists(self.saved_opkg_dir):
+ bb.utils.remove(self.saved_opkg_dir, True)
+ shutil.copytree(self.opkg_dir,
+ self.saved_opkg_dir,
+ symlinks=True)
+
+ def recover_packaging_data(self):
+ # Move the opkglib back
+ if os.path.exists(self.saved_opkg_dir):
+ if os.path.exists(self.opkg_dir):
+ bb.utils.remove(self.opkg_dir, True)
+
+ bb.note('Recover packaging data')
+ shutil.copytree(self.saved_opkg_dir,
+ self.opkg_dir,
+ symlinks=True)
+
+ def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
+ cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
+ pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
+
+ pkg_arch = pkg_info[pkg]["arch"]
+ pkg_filename = pkg_info[pkg]["filename"]
+ pkg_info[pkg]["filepath"] = \
+ os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
+
+ return pkg_info
+
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pkg_info = self.package_info(pkg)
+ if not pkg_info:
+ bb.fatal("Unable to get information for package '%s' while "
+ "trying to extract the package." % pkg)
+
+ tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.zst"))
+
+ return tmp_dir
diff --git a/meta/lib/oe/package_manager/ipk/manifest.py b/meta/lib/oe/package_manager/ipk/manifest.py
new file mode 100644
index 0000000000..3549d7428d
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/manifest.py
@@ -0,0 +1,76 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.manifest import Manifest
+import re
+
+class PkgManifest(Manifest):
+ """
+ Returns a dictionary object with mip and mlp packages.
+ """
+ def _split_multilib(self, pkg_list):
+ pkgs = dict()
+
+ for pkg in pkg_list.split():
+ pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
+
+ for ml_variant in ml_variants:
+ if pkg.startswith(ml_variant + '-'):
+ pkg_type = self.PKG_TYPE_MULTILIB
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = pkg
+ else:
+ pkgs[pkg_type] += " " + pkg
+
+ return pkgs
+
+ def create_initial(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ if var in self.vars_to_split:
+ split_pkgs = self._split_multilib(self.d.getVar(var))
+ if split_pkgs is not None:
+ pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
+ else:
+ pkg_list = self.d.getVar(var)
+ if pkg_list is not None:
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
+
+ for pkg_type in sorted(pkgs):
+ for pkg in sorted(pkgs[pkg_type].split()):
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ if not os.path.exists(self.initial_manifest):
+ self.create_initial()
+
+ initial_manifest = self.parse_initial_manifest()
+ pkgs_to_install = list()
+ for pkg_type in initial_manifest:
+ pkgs_to_install += initial_manifest[pkg_type]
+ if len(pkgs_to_install) == 0:
+ return
+
+ output = pm.dummy_install(pkgs_to_install)
+
+ with open(self.full_manifest, 'w+') as manifest:
+ pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
+ for line in set(output.split('\n')):
+ m = pkg_re.match(line)
+ if m:
+ manifest.write(m.group(1) + '\n')
+
+ return
diff --git a/meta/lib/oe/package_manager/ipk/rootfs.py b/meta/lib/oe/package_manager/ipk/rootfs.py
new file mode 100644
index 0000000000..ba93eb62ea
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/rootfs.py
@@ -0,0 +1,352 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import filecmp
+import shutil
+from oe.rootfs import Rootfs
+from oe.manifest import Manifest
+from oe.utils import execute_pre_post_process
+from oe.package_manager.ipk.manifest import PkgManifest
+from oe.package_manager.ipk import OpkgPM
+
+class DpkgOpkgRootfs(Rootfs):
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
+ super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+
+ def _get_pkgs_postinsts(self, status_file):
+ def _get_pkg_depends_list(pkg_depends):
+ pkg_depends_list = []
+ # filter version requirements like libc (>= 1.1)
+ for dep in pkg_depends.split(', '):
+ m_dep = re.match(r"^(.*) \(.*\)$", dep)
+ if m_dep:
+ dep = m_dep.group(1)
+ pkg_depends_list.append(dep)
+
+ return pkg_depends_list
+
+ pkgs = {}
+ pkg_name = ""
+ pkg_status_match = False
+ pkg_depends = ""
+
+ with open(status_file) as status:
+ data = status.read()
+ status.close()
+ for line in data.split('\n'):
+ m_pkg = re.match(r"^Package: (.*)", line)
+ m_status = re.match(r"^Status:.*unpacked", line)
+ m_depends = re.match(r"^Depends: (.*)", line)
+
+ #Only one of m_pkg, m_status or m_depends is not None at time
+ #If m_pkg is not None, we started a new package
+ if m_pkg is not None:
+ #Get Package name
+ pkg_name = m_pkg.group(1)
+ #Make sure we reset other variables
+ pkg_status_match = False
+ pkg_depends = ""
+ elif m_status is not None:
+ #New status matched
+ pkg_status_match = True
+ elif m_depends is not None:
+ #New depends macthed
+ pkg_depends = m_depends.group(1)
+ else:
+ pass
+
+ #Now check if we can process package depends and postinst
+ if "" != pkg_name and pkg_status_match:
+ pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+ else:
+ #Not enough information
+ pass
+
+ # remove package dependencies not in postinsts
+ pkg_names = list(pkgs.keys())
+ for pkg_name in pkg_names:
+ deps = pkgs[pkg_name][:]
+
+ for d in deps:
+ if d not in pkg_names:
+ pkgs[pkg_name].remove(d)
+
+ return pkgs
+
+ def _get_delayed_postinsts_common(self, status_file):
+ def _dep_resolve(graph, node, resolved, seen):
+ seen.append(node)
+
+ for edge in graph[node]:
+ if edge not in resolved:
+ if edge in seen:
+ raise RuntimeError("Packages %s and %s have " \
+ "a circular dependency in postinsts scripts." \
+ % (node, edge))
+ _dep_resolve(graph, edge, resolved, seen)
+
+ resolved.append(node)
+
+ pkg_list = []
+
+ pkgs = None
+ if not self.d.getVar('PACKAGE_INSTALL').strip():
+ bb.note("Building empty image")
+ else:
+ pkgs = self._get_pkgs_postinsts(status_file)
+ if pkgs:
+ root = "__packagegroup_postinst__"
+ pkgs[root] = list(pkgs.keys())
+ _dep_resolve(pkgs, root, pkg_list, [])
+ pkg_list.remove(root)
+
+ if len(pkg_list) == 0:
+ return None
+
+ return pkg_list
+
+ def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d):
+ return
+ num = 0
+ for p in self._get_delayed_postinsts():
+ bb.utils.mkdirhier(dst_postinst_dir)
+
+ if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
+ shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
+ os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
+
+ num += 1
+
+class PkgRootfs(DpkgOpkgRootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = '(exit 1|Collected errors)'
+
+ self.manifest = PkgManifest(d, manifest_dir)
+ self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
+
+ self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
+ if self._remove_old_rootfs():
+ bb.utils.remove(self.image_rootfs, True)
+ self.pm = OpkgPM(d,
+ self.image_rootfs,
+ self.opkg_conf,
+ self.pkg_archs)
+ else:
+ self.pm = OpkgPM(d,
+ self.image_rootfs,
+ self.opkg_conf,
+ self.pkg_archs)
+ self.pm.recover_packaging_data()
+
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+ '''
+ Compare two files with the same key twice to see if they are equal.
+ If they are not equal, it means they are duplicated and come from
+ different packages.
+ '''
+ def _file_equal(self, key, f1, f2):
+ if filecmp.cmp(f1, f2):
+ return True
+ # Not equal
+ return False
+
+ """
+ This function was reused from the old implementation.
+ See commit: "image.bbclass: Added variables for multilib support." by
+ Lianhao Lu.
+ """
+ def _multilib_sanity_test(self, dirs):
+
+ allow_replace = "|".join((self.d.getVar("MULTILIBRE_ALLOW_REP") or "").split())
+ if allow_replace is None:
+ allow_replace = ""
+
+ allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace))
+ error_prompt = "Multilib check error:"
+
+ files = {}
+ for dir in dirs:
+ for root, subfolders, subfiles in os.walk(dir):
+ for file in subfiles:
+ item = os.path.join(root, file)
+ key = str(os.path.join("/", os.path.relpath(item, dir)))
+
+ valid = True
+ if key in files:
+ #check whether the file is allow to replace
+ if allow_rep.match(key):
+ valid = True
+ else:
+ if os.path.exists(files[key]) and \
+ os.path.exists(item) and \
+ not self._file_equal(key, files[key], item):
+ valid = False
+ bb.fatal("%s duplicate files %s %s is not the same\n" %
+ (error_prompt, item, files[key]))
+
+ #pass the check, add to list
+ if valid:
+ files[key] = item
+
+ def _multilib_test_install(self, pkgs):
+ ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
+ bb.utils.mkdirhier(ml_temp)
+
+ dirs = [self.image_rootfs]
+
+ for variant in self.d.getVar("MULTILIB_VARIANTS").split():
+ ml_target_rootfs = os.path.join(ml_temp, variant)
+
+ bb.utils.remove(ml_target_rootfs, True)
+
+ ml_opkg_conf = os.path.join(ml_temp,
+ variant + "-" + os.path.basename(self.opkg_conf))
+
+ ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False)
+
+ ml_pm.update()
+ ml_pm.install(pkgs)
+
+ dirs.append(ml_target_rootfs)
+
+ self._multilib_sanity_test(dirs)
+
+ '''
+ While ipk incremental image generation is enabled, it will remove the
+ unneeded pkgs by comparing the old full manifest in previous existing
+ image and the new full manifest in the current image.
+ '''
+ def _remove_extra_packages(self, pkgs_initial_install):
+ if self.inc_opkg_image_gen == "1":
+ # Parse full manifest in previous existing image creation session
+ old_full_manifest = self.manifest.parse_full_manifest()
+
+ # Create full manifest for the current image session, the old one
+ # will be replaced by the new one.
+ self.manifest.create_full(self.pm)
+
+ # Parse full manifest in current image creation session
+ new_full_manifest = self.manifest.parse_full_manifest()
+
+ pkg_to_remove = list()
+ for pkg in old_full_manifest:
+ if pkg not in new_full_manifest:
+ pkg_to_remove.append(pkg)
+
+ if pkg_to_remove != []:
+ bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
+ self.pm.remove(pkg_to_remove)
+
+ '''
+ Compare with previous existing image creation, if some conditions
+ triggered, the previous old image should be removed.
+ The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
+ and BAD_RECOMMENDATIONS' has been changed.
+ '''
+ def _remove_old_rootfs(self):
+ if self.inc_opkg_image_gen != "1":
+ return True
+
+ vars_list_file = self.d.expand('${T}/vars_list')
+
+ old_vars_list = ""
+ if os.path.exists(vars_list_file):
+ old_vars_list = open(vars_list_file, 'r+').read()
+
+ new_vars_list = '%s:%s:%s\n' % \
+ ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
+ open(vars_list_file, 'w+').write(new_vars_list)
+
+ if old_vars_list != new_vars_list:
+ return True
+
+ return False
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
+ opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, opkg_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+ # Steps are a bit different in order, skip next
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ if self.inc_opkg_image_gen == "1":
+ self._remove_extra_packages(pkgs_to_install)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ # For multilib, we perform a sanity test before final install
+ # If sanity test fails, it will automatically do a bb.fatal()
+ # and the installation will stop
+ if pkg_type == Manifest.PKG_TYPE_MULTILIB:
+ self._multilib_test_install(pkgs_to_install[pkg_type])
+
+ self.pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
+ self._setup_dbg_rootfs([opkg_dir])
+
+ execute_pre_post_process(self.d, opkg_post_process_cmds)
+
+ if self.inc_opkg_image_gen == "1":
+ self.pm.backup_packaging_data()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ @staticmethod
+ def _depends_list():
+ return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
+
+ def _get_delayed_postinsts(self):
+ status_file = os.path.join(self.image_rootfs,
+ self.d.getVar('OPKGLIBDIR').strip('/'),
+ "opkg", "status")
+ return self._get_delayed_postinsts_common(status_file)
+
+ def _save_postinsts(self):
+ dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
+ src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
+ return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ self.pm.remove_lists()
diff --git a/meta/lib/oe/package_manager/ipk/sdk.py b/meta/lib/oe/package_manager/ipk/sdk.py
new file mode 100644
index 0000000000..3acd55f548
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/sdk.py
@@ -0,0 +1,113 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import glob
+import shutil
+from oe.utils import execute_pre_post_process
+from oe.sdk import Sdk
+from oe.package_manager.ipk.manifest import PkgManifest
+from oe.manifest import Manifest
+from oe.package_manager.ipk import OpkgPM
+
+class PkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(PkgSdk, self).__init__(d, manifest_dir)
+
+ # In sdk_list_installed_packages the call to opkg is hardcoded to
+ # always use IPKGCONF_TARGET and there's no exposed API to change this
+ # so simply override IPKGCONF_TARGET to use this separated config file.
+ ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK_TARGET")
+ d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
+
+ self.target_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.host_conf = self.d.getVar("IPKGCONF_SDK")
+
+ self.target_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ ipk_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ ipk_repo_workdir = "oe-sdk-ext-repo"
+
+ self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
+ self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"),
+ ipk_repo_workdir=ipk_repo_workdir)
+
+ self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ ipk_repo_workdir=ipk_repo_workdir)
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
+ pm.write_index()
+
+ pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ env_bkp = os.environ.copy()
+ os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
+ os.pathsep + os.environ["PATH"]
+
+ self.target_pm.run_intercepts(populate_sdk='target')
+ os.environ.update(env_bkp)
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+ else:
+ self.target_pm.remove_lists()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+ else:
+ self.host_pm.remove_lists()
+
+ target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
+ host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
+
+ self.mkdirhier(target_sysconfdir)
+ shutil.copy(self.target_conf, target_sysconfdir)
+ os.chmod(os.path.join(target_sysconfdir,
+ os.path.basename(self.target_conf)), 0o644)
+
+ self.mkdirhier(host_sysconfdir)
+ shutil.copy(self.host_conf, host_sysconfdir)
+ os.chmod(os.path.join(host_sysconfdir,
+ os.path.basename(self.host_conf)), 0o644)
+
+ native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
+ "lib", "opkg")
+ self.mkdirhier(native_opkg_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
+ self.movefile(f, native_opkg_state_dir)
+
+ self.remove(os.path.join(self.sdk_output, "var"), True)
diff --git a/meta/lib/oe/package_manager/rpm/__init__.py b/meta/lib/oe/package_manager/rpm/__init__.py
new file mode 100644
index 0000000000..f40c880af4
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/__init__.py
@@ -0,0 +1,422 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import shutil
+import subprocess
+from oe.package_manager import *
+
+class RpmIndexer(Indexer):
+ def write_index(self):
+ self.do_write_index(self.deploy_dir)
+
+ def do_write_index(self, deploy_dir):
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
+ else:
+ signer = None
+
+ createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
+ result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
+ if result:
+ bb.fatal(result)
+
+ # Sign repomd
+ if signer:
+ sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
+ is_ascii_sig = (sig_type.upper() != "BIN")
+ signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ armor=is_ascii_sig)
+
+class RpmSubdirIndexer(RpmIndexer):
+ def write_index(self):
+ bb.note("Generating package index for %s" %(self.deploy_dir))
+ # Remove the existing repodata to ensure that we re-generate it no matter what
+ bb.utils.remove(os.path.join(self.deploy_dir, "repodata"), recurse=True)
+
+ self.do_write_index(self.deploy_dir)
+ for entry in os.walk(self.deploy_dir):
+ if os.path.samefile(self.deploy_dir, entry[0]):
+ for dir in entry[1]:
+ if dir != 'repodata':
+ dir_path = oe.path.join(self.deploy_dir, dir)
+ bb.note("Generating package index for %s" %(dir_path))
+ self.do_write_index(dir_path)
+
+
+class PMPkgsList(PkgsList):
+ def list_pkgs(self):
+ return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed()
+
+class RpmPM(PackageManager):
+ def __init__(self,
+ d,
+ target_rootfs,
+ target_vendor,
+ task_name='target',
+ arch_var=None,
+ os_var=None,
+ rpm_repo_workdir="oe-rootfs-repo",
+ filterbydependencies=True,
+ needfeed=True):
+ super(RpmPM, self).__init__(d, target_rootfs)
+ self.target_vendor = target_vendor
+ self.task_name = task_name
+ if arch_var == None:
+ self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
+ else:
+ self.archs = self.d.getVar(arch_var).replace("-","_")
+ if task_name == "host":
+ self.primary_arch = self.d.getVar('SDK_ARCH')
+ else:
+ self.primary_arch = self.d.getVar('MACHINE_ARCH')
+
+ if needfeed:
+ self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
+ create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
+
+ self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
+ self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
+ self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
+ self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+ def _configure_dnf(self):
+ # libsolv handles 'noarch' internally, we don't need to specify it explicitly
+ archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
+ # This prevents accidental matching against libsolv's built-in policies
+ if len(archs) <= 1:
+ archs = archs + ["bogusarch"]
+ # This architecture needs to be upfront so that packages using it are properly prioritized
+ archs = ["sdk_provides_dummy_target"] + archs
+ confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
+ bb.utils.mkdirhier(confdir)
+ with open(confdir + "arch", 'w') as f:
+ f.write(":".join(archs))
+
+ distro_codename = self.d.getVar('DISTRO_CODENAME')
+ with open(confdir + "releasever", 'w') as f:
+ f.write(distro_codename if distro_codename is not None else '')
+
+ with open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w') as f:
+ f.write("")
+
+
+ def _configure_rpm(self):
+ # We need to configure rpm to use our primary package architecture as the installation architecture,
+ # and to make it compatible with other package architectures that we use.
+ # Otherwise it will refuse to proceed with packages installation.
+ platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
+ rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
+ bb.utils.mkdirhier(platformconfdir)
+ with open(platformconfdir + "platform", 'w') as f:
+ f.write("%s-pc-linux" % self.primary_arch)
+ with open(rpmrcconfdir + "rpmrc", 'w') as f:
+ f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
+ f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
+
+ with open(platformconfdir + "macros", 'w') as f:
+ f.write("%_transaction_color 7\n")
+ if self.d.getVar('RPM_PREFER_ELF_ARCH'):
+ with open(platformconfdir + "macros", 'a') as f:
+ f.write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
+ signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
+ pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
+ signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
+ rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
+ cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Importing GPG key failed. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ def create_configs(self):
+ self._configure_dnf()
+ self._configure_rpm()
+
+ def write_index(self):
+ lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
+ lf = bb.utils.lockfile(lockfilename, False)
+ RpmIndexer(self.d, self.rpm_repo_dir).write_index()
+ bb.utils.unlockfile(lf)
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ from urllib.parse import urlparse
+
+ if feed_uris == "":
+ return
+
+ gpg_opts = ''
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ gpg_opts += 'repo_gpgcheck=1\n'
+ gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
+ gpg_opts += 'gpgcheck=0\n'
+
+ bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
+ remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ for uri in remote_uris:
+ repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
+ if feed_archs is not None:
+ for arch in feed_archs.split():
+ repo_uri = uri + "/" + arch
+ repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
+ with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a') as f:
+ f.write("[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
+ else:
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
+ repo_uri = uri
+ with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w') as f:
+ f.write("[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
+
+ def _prepare_pkg_transaction(self):
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+
+ def install(self, pkgs, attempt_only=False, hard_depends_only=False):
+ if len(pkgs) == 0:
+ return
+ self._prepare_pkg_transaction()
+
+ bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
+ package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
+ exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
+
+ output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
+ (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
+ (["--setopt=install_weak_deps=False"] if (hard_depends_only or self.d.getVar('NO_RECOMMENDATIONS') == "1") else []) +
+ (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
+ ["install"] +
+ pkgs)
+
+ failed_scriptlets_pkgnames = collections.OrderedDict()
+ for line in output.splitlines():
+ if line.startswith("Error: Systemctl"):
+ bb.error(line)
+
+ if line.startswith("Error in POSTIN scriptlet in rpm package"):
+ failed_scriptlets_pkgnames[line.split()[-1]] = True
+
+ if len(failed_scriptlets_pkgnames) > 0:
+ failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+
+ def remove(self, pkgs, with_dependencies = True):
+ if not pkgs:
+ return
+
+ self._prepare_pkg_transaction()
+
+ if with_dependencies:
+ self._invoke_dnf(["remove"] + pkgs)
+ else:
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
+
+ try:
+ bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
+ output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
+
+ def upgrade(self):
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["upgrade"])
+
+ def autoremove(self):
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["autoremove"])
+
+ def remove_packaging_data(self):
+ self._invoke_dnf(["clean", "all"])
+ for dir in self.packaging_data_dirs:
+ bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
+
+ def backup_packaging_data(self):
+ # Save the packaging dirs for increment rpm image generation
+ if os.path.exists(self.saved_packaging_data):
+ bb.utils.remove(self.saved_packaging_data, True)
+ for i in self.packaging_data_dirs:
+ source_dir = oe.path.join(self.target_rootfs, i)
+ target_dir = oe.path.join(self.saved_packaging_data, i)
+ if os.path.isdir(source_dir):
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+ elif os.path.isfile(source_dir):
+ shutil.copy2(source_dir, target_dir)
+
+ def recovery_packaging_data(self):
+ # Move the rpmlib back
+ if os.path.exists(self.saved_packaging_data):
+ for i in self.packaging_data_dirs:
+ target_dir = oe.path.join(self.target_rootfs, i)
+ if os.path.exists(target_dir):
+ bb.utils.remove(target_dir, True)
+ source_dir = oe.path.join(self.saved_packaging_data, i)
+ if os.path.isdir(source_dir):
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+ elif os.path.isfile(source_dir):
+ shutil.copy2(source_dir, target_dir)
+
+ def list_installed(self):
+ output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
+ print_output = False)
+ packages = {}
+ current_package = None
+ current_deps = None
+ current_state = "initial"
+ for line in output.splitlines():
+ if line.startswith("Package:"):
+ package_info = line.split(" ")[1:]
+ current_package = package_info[0]
+ package_arch = package_info[1]
+ package_version = package_info[2]
+ package_rpm = package_info[3]
+ packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
+ current_deps = []
+ elif line.startswith("Dependencies:"):
+ current_state = "dependencies"
+ elif line.startswith("Recommendations"):
+ current_state = "recommendations"
+ elif line.startswith("DependenciesEndHere:"):
+ current_state = "initial"
+ packages[current_package]["deps"] = current_deps
+ elif len(line) > 0:
+ if current_state == "dependencies":
+ current_deps.append(line)
+ elif current_state == "recommendations":
+ current_deps.append("%s [REC]" % line)
+
+ return packages
+
+ def update(self):
+ self._invoke_dnf(["makecache", "--refresh"])
+
+ def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
+ os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
+
+ dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
+ standard_dnf_args = ["-v", "--rpmverbosity=info", "-y",
+ "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
+ "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
+ "--installroot=%s" % (self.target_rootfs),
+ "--setopt=logdir=%s" % (self.d.getVar('T'))
+ ]
+ if hasattr(self, "rpm_repo_dir"):
+ standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir))
+ cmd = [dnf_cmd] + standard_dnf_args + dnf_args
+ bb.note('Running %s' % ' '.join(cmd))
+ try:
+ output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
+ if print_output:
+ bb.debug(1, output)
+ return output
+ except subprocess.CalledProcessError as e:
+ if print_output:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ else:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:" % (' '.join(cmd), e.returncode))
+ return e.output.decode("utf-8")
+
+ def dump_install_solution(self, pkgs):
+ with open(self.solution_manifest, 'w') as f:
+ f.write(" ".join(pkgs))
+ return pkgs
+
+ def load_old_install_solution(self):
+ if not os.path.exists(self.solution_manifest):
+ return []
+ with open(self.solution_manifest, 'r') as fd:
+ return fd.read().split()
+
+ def _script_num_prefix(self, path):
+ files = os.listdir(path)
+ numbers = set()
+ numbers.add(99)
+ for f in files:
+ numbers.add(int(f.split("-")[0]))
+ return max(numbers) + 1
+
+ def save_rpmpostinst(self, pkg):
+ bb.note("Saving postinstall script of %s" % (pkg))
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
+
+ try:
+ output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
+
+ # may need to prepend #!/bin/sh to output
+
+ target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
+ bb.utils.mkdirhier(target_path)
+ num = self._script_num_prefix(target_path)
+ saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
+ with open(saved_script_name, 'w') as f:
+ f.write(output)
+ os.chmod(saved_script_name, 0o755)
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
+ bb.utils.mkdirhier(rpm_postinsts_dir)
+
+ # Save the package postinstalls in /etc/rpm-postinsts
+ for pkg in registered_pkgs.split():
+ self.save_rpmpostinst(pkg)
+
+ def extract(self, pkg):
+ output = self._invoke_dnf(["repoquery", "--location", pkg])
+ pkg_name = output.splitlines()[-1]
+ if not pkg_name.endswith(".rpm"):
+ bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
+ # Strip file: prefix
+ pkg_path = pkg_name[5:]
+
+ cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
+ rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+
+ try:
+ cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ os.chdir(current_dir)
+
+ return tmp_dir
diff --git a/meta/lib/oe/package_manager/rpm/manifest.py b/meta/lib/oe/package_manager/rpm/manifest.py
new file mode 100644
index 0000000000..6ee7c329f0
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/manifest.py
@@ -0,0 +1,56 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.manifest import Manifest
+
+class PkgManifest(Manifest):
+ """
+ Returns a dictionary object with mip and mlp packages.
+ """
+ def _split_multilib(self, pkg_list):
+ pkgs = dict()
+
+ for pkg in pkg_list.split():
+ pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
+
+ for ml_variant in ml_variants:
+ if pkg.startswith(ml_variant + '-'):
+ pkg_type = self.PKG_TYPE_MULTILIB
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = pkg
+ else:
+ pkgs[pkg_type] += " " + pkg
+
+ return pkgs
+
+ def create_initial(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ if var in self.vars_to_split:
+ split_pkgs = self._split_multilib(self.d.getVar(var))
+ if split_pkgs is not None:
+ pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
+ else:
+ pkg_list = self.d.getVar(var)
+ if pkg_list is not None:
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
+
+ for pkg_type in pkgs:
+ for pkg in pkgs[pkg_type].split():
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ pass
diff --git a/meta/lib/oe/package_manager/rpm/rootfs.py b/meta/lib/oe/package_manager/rpm/rootfs.py
new file mode 100644
index 0000000000..3ba5396320
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/rootfs.py
@@ -0,0 +1,150 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.rootfs import Rootfs
+from oe.manifest import Manifest
+from oe.utils import execute_pre_post_process
+from oe.package_manager.rpm.manifest import PkgManifest
+from oe.package_manager.rpm import RpmPM
+
+class PkgRootfs(Rootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\
+ r'|exit 1|ERROR: |Error: |Error |ERROR '\
+ r'|Failed |Failed: |Failed$|Failed\(\d+\):)'
+
+ self.manifest = PkgManifest(d, manifest_dir)
+
+ self.pm = RpmPM(d,
+ d.getVar('IMAGE_ROOTFS'),
+ self.d.getVar('TARGET_VENDOR')
+ )
+
+ self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
+ if self.inc_rpm_image_gen != "1":
+ bb.utils.remove(self.image_rootfs, True)
+ else:
+ self.pm.recovery_packaging_data()
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+
+ self.pm.create_configs()
+
+ '''
+ While rpm incremental image generation is enabled, it will remove the
+ unneeded pkgs by comparing the new install solution manifest and the
+ old installed manifest.
+ '''
+ def _create_incremental(self, pkgs_initial_install):
+ if self.inc_rpm_image_gen == "1":
+
+ pkgs_to_install = list()
+ for pkg_type in pkgs_initial_install:
+ pkgs_to_install += pkgs_initial_install[pkg_type]
+
+ installed_manifest = self.pm.load_old_install_solution()
+ solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
+
+ pkg_to_remove = list()
+ for pkg in installed_manifest:
+ if pkg not in solution_manifest:
+ pkg_to_remove.append(pkg)
+
+ self.pm.update()
+
+ bb.note('incremental update -- upgrade packages in place ')
+ self.pm.upgrade()
+ if pkg_to_remove != []:
+ bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
+ self.pm.remove(pkg_to_remove)
+
+ self.pm.autoremove()
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
+ rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, rpm_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ if self.inc_rpm_image_gen == "1":
+ self._create_incremental(pkgs_to_install)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ pkgs = []
+ pkgs_attempt = []
+ for pkg_type in pkgs_to_install:
+ if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+ pkgs_attempt += pkgs_to_install[pkg_type]
+ else:
+ pkgs += pkgs_to_install[pkg_type]
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install(pkgs)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install(pkgs_attempt, True)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self._setup_dbg_rootfs(['/etc/rpm', '/etc/rpmrc', '/etc/dnf', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
+
+ execute_pre_post_process(self.d, rpm_post_process_cmds)
+
+ if self.inc_rpm_image_gen == "1":
+ self.pm.backup_packaging_data()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+
+ @staticmethod
+ def _depends_list():
+ return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
+ 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
+
+ def _get_delayed_postinsts(self):
+ postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
+ if os.path.isdir(postinst_dir):
+ files = os.listdir(postinst_dir)
+ for f in files:
+ bb.note('Delayed package scriptlet: %s' % f)
+ return files
+
+ return None
+
+ def _save_postinsts(self):
+ # this is just a stub. For RPM, the failed postinstalls are
+ # already saved in /etc/rpm-postinsts
+ pass
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d):
+ self.pm._invoke_dnf(["clean", "all"])
diff --git a/meta/lib/oe/package_manager/rpm/sdk.py b/meta/lib/oe/package_manager/rpm/sdk.py
new file mode 100644
index 0000000000..ea79fe050b
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/sdk.py
@@ -0,0 +1,122 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import glob
+from oe.utils import execute_pre_post_process
+from oe.sdk import Sdk
+from oe.manifest import Manifest
+from oe.package_manager.rpm.manifest import PkgManifest
+from oe.package_manager.rpm import RpmPM
+
+class PkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
+ super(PkgSdk, self).__init__(d, manifest_dir)
+
+ self.target_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ rpm_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ rpm_repo_workdir = "oe-sdk-ext-repo"
+
+ self.target_pm = RpmPM(d,
+ self.sdk_target_sysroot,
+ self.d.getVar('TARGET_VENDOR'),
+ 'target',
+ rpm_repo_workdir=rpm_repo_workdir
+ )
+
+ self.host_pm = RpmPM(d,
+ self.sdk_host_sysroot,
+ self.d.getVar('SDK_VENDOR'),
+ 'host',
+ "SDK_PACKAGE_ARCHS",
+ "SDK_OS",
+ rpm_repo_workdir=rpm_repo_workdir
+ )
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ pm.create_configs()
+ pm.write_index()
+ pm.update()
+
+ pkgs = []
+ pkgs_attempt = []
+ for pkg_type in pkgs_to_install:
+ if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+ pkgs_attempt += pkgs_to_install[pkg_type]
+ else:
+ pkgs += pkgs_to_install[pkg_type]
+
+ pm.install(pkgs)
+
+ pm.install(pkgs_attempt, True)
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ env_bkp = os.environ.copy()
+ os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
+ os.pathsep + os.environ["PATH"]
+
+ self.target_pm.run_intercepts(populate_sdk='target')
+ os.environ.update(env_bkp)
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ # Move host RPM library data
+ native_rpm_state_dir = os.path.join(self.sdk_output,
+ self.sdk_native_path,
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
+ "lib",
+ "rpm"
+ )
+ self.mkdirhier(native_rpm_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output,
+ "var",
+ "lib",
+ "rpm",
+ "*")):
+ self.movefile(f, native_rpm_state_dir)
+
+ self.remove(os.path.join(self.sdk_output, "var"), True)
+
+ # Move host sysconfig data
+ native_sysconf_dir = os.path.join(self.sdk_output,
+ self.sdk_native_path,
+ self.d.getVar('sysconfdir',
+ True).strip('/'),
+ )
+ self.mkdirhier(native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
+ self.movefile(f, native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
+ self.mkdirhier(native_sysconf_dir + "/dnf")
+ self.movefile(f, native_sysconf_dir + "/dnf")
+ self.remove(os.path.join(self.sdk_output, "etc"), True)
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
index 32e5c82a94..2d1d6ddeb7 100644
--- a/meta/lib/oe/packagedata.py
+++ b/meta/lib/oe/packagedata.py
@@ -1,5 +1,16 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import codecs
import os
+import json
+import bb.compress.zstd
+import oe.path
+
+from glob import glob
def packaged(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
@@ -13,10 +24,9 @@ def read_pkgdatafile(fn):
if os.access(fn, os.R_OK):
import re
- f = open(fn, 'r')
- lines = f.readlines()
- f.close()
- r = re.compile("([^:]+):\s*(.*)")
+ with open(fn, 'r') as f:
+ lines = f.readlines()
+ r = re.compile(r"(^.+?):\s+(.*)")
for l in lines:
m = r.match(l)
if m:
@@ -42,18 +52,30 @@ def read_pkgdata(pn, d):
return read_pkgdatafile(fn)
#
-# Collapse FOO_pkg variables into FOO
+# Collapse FOO:pkg variables into FOO
#
def read_subpkgdata_dict(pkg, d):
ret = {}
subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
for var in subd:
- newvar = var.replace("_" + pkg, "")
- if newvar == var and var + "_" + pkg in subd:
+ newvar = var.replace(":" + pkg, "")
+ if newvar == var and var + ":" + pkg in subd:
continue
ret[newvar] = subd[var]
return ret
+def read_subpkgdata_extended(pkg, d):
+ import json
+ import bb.compress.zstd
+
+ fn = d.expand("${PKGDATA_DIR}/extended/%s.json.zstd" % pkg)
+ try:
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+ with bb.compress.zstd.open(fn, "rt", encoding="utf-8", num_threads=num_threads) as f:
+ return json.load(f)
+ except FileNotFoundError:
+ return None
+
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
@@ -93,3 +115,252 @@ def recipename(pkg, d):
"""Return the recipe name for the given binary package name."""
return pkgmap(d).get(pkg)
+
+def foreach_runtime_provider_pkgdata(d, rdep, include_rdep=False):
+ pkgdata_dir = d.getVar("PKGDATA_DIR")
+ possibles = set()
+ try:
+ possibles |= set(os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdep)))
+ except OSError:
+ pass
+
+ if include_rdep:
+ possibles.add(rdep)
+
+ for p in sorted(list(possibles)):
+ rdep_data = read_subpkgdata(p, d)
+ yield p, rdep_data
+
+def get_package_mapping(pkg, basepkg, d, depversions=None):
+ import oe.packagedata
+
+ data = oe.packagedata.read_subpkgdata(pkg, d)
+ key = "PKG:%s" % pkg
+
+ if key in data:
+ if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]:
+ bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key]))
+ # Have to avoid undoing the write_extra_pkgs(global_variants...)
+ if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
+ and data[key] == basepkg:
+ return pkg
+ if depversions == []:
+ # Avoid returning a mapping if the renamed package rprovides its original name
+ rprovkey = "RPROVIDES:%s" % pkg
+ if rprovkey in data:
+ if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
+ bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
+ return pkg
+ # Do map to rewritten package name
+ return data[key]
+
+ return pkg
+
+def get_package_additional_metadata(pkg_type, d):
+ base_key = "PACKAGE_ADD_METADATA"
+ for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
+ if d.getVar(key, False) is None:
+ continue
+ d.setVarFlag(key, "type", "list")
+ if d.getVarFlag(key, "separator") is None:
+ d.setVarFlag(key, "separator", "\\n")
+ metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
+ return "\n".join(metadata_fields).strip()
+
+def runtime_mapping_rename(varname, pkg, d):
+ #bb.note("%s before: %s" % (varname, d.getVar(varname)))
+
+ new_depends = {}
+ deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
+ for depend, depversions in deps.items():
+ new_depend = get_package_mapping(depend, pkg, d, depversions)
+ if depend != new_depend:
+ bb.note("package name mapping done: %s -> %s" % (depend, new_depend))
+ new_depends[new_depend] = deps[depend]
+
+ d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
+
+ #bb.note("%s after: %s" % (varname, d.getVar(varname)))
+
+def emit_pkgdata(pkgfiles, d):
+ def process_postinst_on_target(pkg, mlprefix):
+ pkgval = d.getVar('PKG:%s' % pkg)
+ if pkgval is None:
+ pkgval = pkg
+
+ defer_fragment = """
+if [ -n "$D" ]; then
+ $INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
+ exit 0
+fi
+""" % (pkgval, mlprefix)
+
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg)
+
+ if postinst_ontarget:
+ bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += defer_fragment
+ postinst += postinst_ontarget
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+
+ def add_set_e_to_scriptlets(pkg):
+ for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
+ scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg))
+ if scriptlet:
+ scriptlet_split = scriptlet.split('\n')
+ if scriptlet_split[0].startswith("#!"):
+ scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
+ else:
+ scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
+ d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet)
+
+ def write_if_exists(f, pkg, var):
+ def encode(str):
+ import codecs
+ c = codecs.getencoder("unicode_escape")
+ return c(str)[0].decode("latin1")
+
+ val = d.getVar('%s:%s' % (var, pkg))
+ if val:
+ f.write('%s:%s: %s\n' % (var, pkg, encode(val)))
+ return val
+ val = d.getVar('%s' % (var))
+ if val:
+ f.write('%s: %s\n' % (var, encode(val)))
+ return val
+
+ def write_extra_pkgs(variants, pn, packages, pkgdatadir):
+ for variant in variants:
+ with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
+ fd.write("PACKAGES: %s\n" % ' '.join(
+ map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
+
+ def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
+ for variant in variants:
+ for pkg in packages.split():
+ ml_pkg = "%s-%s" % (variant, pkg)
+ subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
+ with open(subdata_file, 'w') as fd:
+ fd.write("PKG:%s: %s" % (ml_pkg, pkg))
+
+ packages = d.getVar('PACKAGES')
+ pkgdest = d.getVar('PKGDEST')
+ pkgdatadir = d.getVar('PKGDESTWORK')
+
+ data_file = pkgdatadir + d.expand("/${PN}")
+ with open(data_file, 'w') as fd:
+ fd.write("PACKAGES: %s\n" % packages)
+
+ pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
+
+ pn = d.getVar('PN')
+ global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
+ variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
+
+ if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
+ write_extra_pkgs(variants, pn, packages, pkgdatadir)
+
+ if bb.data.inherits_class('allarch', d) and not variants \
+ and not bb.data.inherits_class('packagegroup', d):
+ write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
+
+ workdir = d.getVar('WORKDIR')
+
+ for pkg in packages.split():
+ pkgval = d.getVar('PKG:%s' % pkg)
+ if pkgval is None:
+ pkgval = pkg
+ d.setVar('PKG:%s' % pkg, pkg)
+
+ extended_data = {
+ "files_info": {}
+ }
+
+ pkgdestpkg = os.path.join(pkgdest, pkg)
+ files = {}
+ files_extra = {}
+ total_size = 0
+ seen = set()
+ for f in pkgfiles[pkg]:
+ fpath = os.sep + os.path.relpath(f, pkgdestpkg)
+
+ fstat = os.lstat(f)
+ files[fpath] = fstat.st_size
+
+ extended_data["files_info"].setdefault(fpath, {})
+ extended_data["files_info"][fpath]['size'] = fstat.st_size
+
+ if fstat.st_ino not in seen:
+ seen.add(fstat.st_ino)
+ total_size += fstat.st_size
+
+ if fpath in pkgdebugsource:
+ extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
+ del pkgdebugsource[fpath]
+
+ d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True))
+
+ process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
+ add_set_e_to_scriptlets(pkg)
+
+ subdata_file = pkgdatadir + "/runtime/%s" % pkg
+ with open(subdata_file, 'w') as sf:
+ for var in (d.getVar('PKGDATA_VARS') or "").split():
+ val = write_if_exists(sf, pkg, var)
+
+ write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
+ for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile)
+
+ write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
+ for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile)
+
+ sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size))
+
+ subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+ with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f:
+ json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
+
+ # Symlinks needed for rprovides lookup
+ rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES')
+ if rprov:
+ for p in bb.utils.explode_deps(rprov):
+ subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
+ bb.utils.mkdirhier(os.path.dirname(subdata_sym))
+ oe.path.relsymlink(subdata_file, subdata_sym, True)
+
+ allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg)
+ if not allow_empty:
+ allow_empty = d.getVar('ALLOW_EMPTY')
+ root = "%s/%s" % (pkgdest, pkg)
+ os.chdir(root)
+ g = glob('*')
+ if g or allow_empty == "1":
+ # Symlinks needed for reverse lookups (from the final package name)
+ subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
+ oe.path.relsymlink(subdata_file, subdata_sym, True)
+
+ packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
+ open(packagedfile, 'w').close()
+
+ if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
+ write_extra_runtime_pkgs(variants, packages, pkgdatadir)
+
+ if bb.data.inherits_class('allarch', d) and not variants \
+ and not bb.data.inherits_class('packagegroup', d):
+ write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
+
+def mapping_rename_hook(d):
+ """
+ Rewrite variables to account for package renaming in things
+ like debian.bbclass or manual PKG variable name changes
+ """
+ pkg = d.getVar("PKG")
+ oe.packagedata.runtime_mapping_rename("RDEPENDS", pkg, d)
+ oe.packagedata.runtime_mapping_rename("RRECOMMENDS", pkg, d)
+ oe.packagedata.runtime_mapping_rename("RSUGGESTS", pkg, d)
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py
index 4bc5d3e4bb..7b7594751a 100644
--- a/meta/lib/oe/packagegroup.py
+++ b/meta/lib/oe/packagegroup.py
@@ -1,17 +1,17 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import itertools
def is_optional(feature, d):
- packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
- if packages:
- return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
- else:
- return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional"))
+ return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
def packages(features, d):
for feature in features:
packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
- if not packages:
- packages = d.getVar("PACKAGE_GROUP_%s" % feature)
for pkg in (packages or "").split():
yield pkg
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
index f1ab3dd809..60a0cc8291 100644
--- a/meta/lib/oe/patch.py
+++ b/meta/lib/oe/patch.py
@@ -1,4 +1,14 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import shlex
+import subprocess
import oe.path
+import oe.types
class NotFoundError(bb.BBHandledException):
def __init__(self, path):
@@ -19,8 +29,6 @@ class CmdError(bb.BBHandledException):
def runcmd(args, dir = None):
- import pipes
-
if dir:
olddir = os.path.abspath(os.curdir)
if not os.path.exists(dir):
@@ -29,18 +37,28 @@ def runcmd(args, dir = None):
# print("cwd: %s -> %s" % (olddir, dir))
try:
- args = [ pipes.quote(str(arg)) for arg in args ]
+ args = [ shlex.quote(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
- (exitstatus, output) = oe.utils.getstatusoutput(cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ stdout, stderr = proc.communicate()
+ stdout = stdout.decode('utf-8')
+ stderr = stderr.decode('utf-8')
+ exitstatus = proc.returncode
if exitstatus != 0:
- raise CmdError(cmd, exitstatus >> 8, output)
- return output
+ raise CmdError(cmd, exitstatus >> 8, "stdout: %s\nstderr: %s" % (stdout, stderr))
+ if " fuzz " in stdout and "Hunk " in stdout:
+ # Drop patch fuzz info with header and footer to log file so
+ # insane.bbclass can handle to throw error/warning
+ bb.note("--- Patch fuzz start ---\n%s\n--- Patch fuzz end ---" % format(stdout))
+
+ return stdout
finally:
if dir:
os.chdir(olddir)
+
class PatchError(Exception):
def __init__(self, msg):
self.msg = msg
@@ -199,7 +217,7 @@ class PatchTree(PatchSet):
with open(self.seriespath, 'w') as f:
for p in patches:
f.write(p)
-
+
def Import(self, patch, force = None):
""""""
PatchSet.Import(self, patch, force)
@@ -211,7 +229,7 @@ class PatchTree(PatchSet):
self.patches.insert(i, patch)
def _applypatch(self, patch, force = False, reverse = False, run = True):
- shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
+ shellcmd = ["cat", patch['file'], "|", "patch", "--no-backup-if-mismatch", "-p", patch['strippath']]
if reverse:
shellcmd.append('-R')
@@ -276,13 +294,32 @@ class PatchTree(PatchSet):
self.Pop(all=True)
class GitApplyTree(PatchTree):
- patch_line_prefix = '%% original patch'
- ignore_commit_prefix = '%% ignore'
+ notes_ref = "refs/notes/devtool"
+ original_patch = 'original patch'
+ ignore_commit = 'ignore'
def __init__(self, dir, d):
PatchTree.__init__(self, dir, d)
self.commituser = d.getVar('PATCH_GIT_USER_NAME')
self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
+ if not self._isInitialized(d):
+ self._initRepo()
+
+ def _isInitialized(self, d):
+ cmd = "git rev-parse --show-toplevel"
+ try:
+ output = runcmd(cmd.split(), self.dir).strip()
+ except CmdError as err:
+ ## runcmd returned non-zero which most likely means 128
+ ## Not a git directory
+ return False
+ ## Make sure repo is in builddir to not break top-level git repos, or under workdir
+ return os.path.samefile(output, self.dir) or oe.path.is_path_parent(d.getVar('WORKDIR'), output)
+
+ def _initRepo(self):
+ runcmd("git init".split(), self.dir)
+ runcmd("git add .".split(), self.dir)
+ runcmd("git commit -a --allow-empty -m bitbake_patching_started".split(), self.dir)
@staticmethod
def extractPatchHeader(patchfile):
@@ -316,8 +353,8 @@ class GitApplyTree(PatchTree):
@staticmethod
def interpretPatchHeader(headerlines):
import re
- author_re = re.compile('[\S ]+ <\S+@\S+\.\S+>')
- from_commit_re = re.compile('^From [a-z0-9]{40} .*')
+ author_re = re.compile(r'[\S ]+ <\S+@\S+\.\S+>')
+ from_commit_re = re.compile(r'^From [a-z0-9]{40} .*')
outlines = []
author = None
date = None
@@ -405,7 +442,7 @@ class GitApplyTree(PatchTree):
date = newdate
if not subject:
subject = newsubject
- if subject and outlines and not outlines[0].strip() == subject:
+ if subject and not (outlines and outlines[0].strip() == subject):
outlines.insert(0, '%s\n\n' % subject.strip())
# Write out commit message to a file
@@ -416,7 +453,7 @@ class GitApplyTree(PatchTree):
# Prepare git command
cmd = ["git"]
GitApplyTree.gitCommandUserOptions(cmd, commituser, commitemail)
- cmd += ["commit", "-F", tmpfile]
+ cmd += ["commit", "-F", tmpfile, "--no-verify"]
# git doesn't like plain email addresses as authors
if author and '<' in author:
cmd.append('--author="%s"' % author)
@@ -425,48 +462,131 @@ class GitApplyTree(PatchTree):
return (tmpfile, cmd)
@staticmethod
- def extractPatches(tree, startcommit, outdir, paths=None):
+ def addNote(repo, ref, key, value=None):
+ note = key + (": %s" % value if value else "")
+ notes_ref = GitApplyTree.notes_ref
+ runcmd(["git", "config", "notes.rewriteMode", "ignore"], repo)
+ runcmd(["git", "config", "notes.displayRef", notes_ref, notes_ref], repo)
+ runcmd(["git", "config", "notes.rewriteRef", notes_ref, notes_ref], repo)
+ runcmd(["git", "notes", "--ref", notes_ref, "append", "-m", note, ref], repo)
+
+ @staticmethod
+ def removeNote(repo, ref, key):
+ notes = GitApplyTree.getNotes(repo, ref)
+ notes = {k: v for k, v in notes.items() if k != key and not k.startswith(key + ":")}
+ runcmd(["git", "notes", "--ref", GitApplyTree.notes_ref, "remove", "--ignore-missing", ref], repo)
+ for note, value in notes.items():
+ GitApplyTree.addNote(repo, ref, note, value)
+
+ @staticmethod
+ def getNotes(repo, ref):
+ import re
+
+ note = None
+ try:
+ note = runcmd(["git", "notes", "--ref", GitApplyTree.notes_ref, "show", ref], repo)
+ prefix = ""
+ except CmdError:
+ note = runcmd(['git', 'show', '-s', '--format=%B', ref], repo)
+ prefix = "%% "
+
+ note_re = re.compile(r'^%s(.*?)(?::\s*(.*))?$' % prefix)
+ notes = dict()
+ for line in note.splitlines():
+ m = note_re.match(line)
+ if m:
+ notes[m.group(1)] = m.group(2)
+
+ return notes
+
+ @staticmethod
+ def commitIgnored(subject, dir=None, files=None, d=None):
+ if files:
+ runcmd(['git', 'add'] + files, dir)
+ cmd = ["git"]
+ GitApplyTree.gitCommandUserOptions(cmd, d=d)
+ cmd += ["commit", "-m", subject, "--no-verify"]
+ runcmd(cmd, dir)
+ GitApplyTree.addNote(dir, "HEAD", GitApplyTree.ignore_commit)
+
+ @staticmethod
+ def extractPatches(tree, startcommits, outdir, paths=None):
import tempfile
import shutil
- import re
tempdir = tempfile.mkdtemp(prefix='oepatch')
try:
- shellcmd = ["git", "format-patch", startcommit, "-o", tempdir]
- if paths:
- shellcmd.append('--')
- shellcmd.extend(paths)
- out = runcmd(["sh", "-c", " ".join(shellcmd)], tree)
- if out:
- for srcfile in out.split():
- for encoding in ['utf-8', 'latin-1']:
- patchlines = []
- outfile = None
- try:
- with open(srcfile, 'r', encoding=encoding) as f:
- for line in f:
- checkline = line
- if checkline.startswith('Subject: '):
- checkline = re.sub(r'\[.+?\]\s*', '', checkline[9:])
- if checkline.startswith(GitApplyTree.patch_line_prefix):
- outfile = line.split()[-1].strip()
- continue
- if checkline.startswith(GitApplyTree.ignore_commit_prefix):
- continue
- patchlines.append(line)
- except UnicodeDecodeError:
+ for name, rev in startcommits.items():
+ shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", rev, "-o", tempdir]
+ if paths:
+ shellcmd.append('--')
+ shellcmd.extend(paths)
+ out = runcmd(["sh", "-c", " ".join(shellcmd)], os.path.join(tree, name))
+ if out:
+ for srcfile in out.split():
+ # This loop, which is used to remove any line that
+ # starts with "%% original patch", is kept for backwards
+ # compatibility. If/when that compatibility is dropped,
+ # it can be replaced with code to just read the first
+ # line of the patch file to get the SHA-1, and the code
+ # below that writes the modified patch file can be
+ # replaced with a simple file move.
+ for encoding in ['utf-8', 'latin-1']:
+ patchlines = []
+ try:
+ with open(srcfile, 'r', encoding=encoding, newline='') as f:
+ for line in f:
+ if line.startswith("%% " + GitApplyTree.original_patch):
+ continue
+ patchlines.append(line)
+ except UnicodeDecodeError:
+ continue
+ break
+ else:
+ raise PatchError('Unable to find a character encoding to decode %s' % srcfile)
+
+ sha1 = patchlines[0].split()[1]
+ notes = GitApplyTree.getNotes(os.path.join(tree, name), sha1)
+ if GitApplyTree.ignore_commit in notes:
continue
- break
- else:
- raise PatchError('Unable to find a character encoding to decode %s' % srcfile)
-
- if not outfile:
- outfile = os.path.basename(srcfile)
- with open(os.path.join(outdir, outfile), 'w') as of:
- for line in patchlines:
- of.write(line)
+ outfile = notes.get(GitApplyTree.original_patch, os.path.basename(srcfile))
+
+ bb.utils.mkdirhier(os.path.join(outdir, name))
+ with open(os.path.join(outdir, name, outfile), 'w') as of:
+ for line in patchlines:
+ of.write(line)
finally:
shutil.rmtree(tempdir)
+ def _need_dirty_check(self):
+ fetch = bb.fetch2.Fetch([], self.d)
+ check_dirtyness = False
+ for url in fetch.urls:
+ url_data = fetch.ud[url]
+ parm = url_data.parm
+ # a git url with subpath param will surely be dirty
+ # since the git tree from which we clone will be emptied
+ # from all files that are not in the subpath
+ if url_data.type == 'git' and parm.get('subpath'):
+ check_dirtyness = True
+ return check_dirtyness
+
+ def _commitpatch(self, patch, patchfilevar):
+ output = ""
+ # Add all files
+ shellcmd = ["git", "add", "-f", "-A", "."]
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Exclude the patches directory
+ shellcmd = ["git", "reset", "HEAD", self.patchdir]
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Commit the result
+ (tmpfile, shellcmd) = self.prepareCommit(patch['file'], self.commituser, self.commitemail)
+ try:
+ shellcmd.insert(0, patchfilevar)
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ finally:
+ os.remove(tmpfile)
+ return output
+
def _applypatch(self, patch, force = False, reverse = False, run = True):
import shutil
@@ -481,32 +601,30 @@ class GitApplyTree(PatchTree):
return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Add hooks which add a pointer to the original patch file name in the commit message
reporoot = (runcmd("git rev-parse --show-toplevel".split(), self.dir) or '').strip()
if not reporoot:
raise Exception("Cannot get repository root for directory %s" % self.dir)
- hooks_dir = os.path.join(reporoot, '.git', 'hooks')
- hooks_dir_backup = hooks_dir + '.devtool-orig'
- if os.path.lexists(hooks_dir_backup):
- raise Exception("Git hooks backup directory already exists: %s" % hooks_dir_backup)
- if os.path.lexists(hooks_dir):
- shutil.move(hooks_dir, hooks_dir_backup)
- os.mkdir(hooks_dir)
- commithook = os.path.join(hooks_dir, 'commit-msg')
- applyhook = os.path.join(hooks_dir, 'applypatch-msg')
- with open(commithook, 'w') as f:
- # NOTE: the formatting here is significant; if you change it you'll also need to
- # change other places which read it back
- f.write('echo >> $1\n')
- f.write('echo "%s: $PATCHFILE" >> $1\n' % GitApplyTree.patch_line_prefix)
- os.chmod(commithook, 0o755)
- shutil.copy2(commithook, applyhook)
+
+ patch_applied = True
try:
patchfilevar = 'PATCHFILE="%s"' % os.path.basename(patch['file'])
+ if self._need_dirty_check():
+ # Check dirtyness of the tree
+ try:
+ output = runcmd(["git", "--work-tree=%s" % reporoot, "status", "--short"])
+ except CmdError:
+ pass
+ else:
+ if output:
+ # The tree is dirty, no need to try to apply patches with git anymore
+ # since they fail, fallback directly to patch
+ output = PatchTree._applypatch(self, patch, force, reverse, run)
+ output += self._commitpatch(patch, patchfilevar)
+ return output
try:
shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot]
self.gitCommandUserOptions(shellcmd, self.commituser, self.commitemail)
- shellcmd += ["am", "-3", "--keep-cr", "-p%s" % patch['strippath']]
+ shellcmd += ["am", "-3", "--keep-cr", "--no-scissors", "-p%s" % patch['strippath']]
return _applypatchhelper(shellcmd, patch, force, reverse, run)
except CmdError:
# Need to abort the git am, or we'll still be within it at the end
@@ -529,24 +647,14 @@ class GitApplyTree(PatchTree):
except CmdError:
# Fall back to patch
output = PatchTree._applypatch(self, patch, force, reverse, run)
- # Add all files
- shellcmd = ["git", "add", "-f", "-A", "."]
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Exclude the patches directory
- shellcmd = ["git", "reset", "HEAD", self.patchdir]
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Commit the result
- (tmpfile, shellcmd) = self.prepareCommit(patch['file'], self.commituser, self.commitemail)
- try:
- shellcmd.insert(0, patchfilevar)
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- finally:
- os.remove(tmpfile)
+ output += self._commitpatch(patch, patchfilevar)
return output
+ except:
+ patch_applied = False
+ raise
finally:
- shutil.rmtree(hooks_dir)
- if os.path.lexists(hooks_dir_backup):
- shutil.move(hooks_dir_backup, hooks_dir)
+ if patch_applied:
+ GitApplyTree.addNote(self.dir, "HEAD", GitApplyTree.original_patch, os.path.basename(patch['file']))
class QuiltTree(PatchSet):
@@ -569,6 +677,8 @@ class QuiltTree(PatchSet):
def Clean(self):
try:
+ # make sure that patches/series file exists before quilt pop to keep quilt-0.67 happy
+ open(os.path.join(self.dir, "patches","series"), 'a').close()
self._runcmd(["pop", "-a", "-f"])
oe.path.remove(os.path.join(self.dir, "patches","series"))
except Exception:
@@ -705,8 +815,9 @@ class NOOPResolver(Resolver):
self.patchset.Push()
except Exception:
import sys
- os.chdir(olddir)
raise
+ finally:
+ os.chdir(olddir)
# Patch resolver which relies on the user doing all the work involved in the
# resolution, with the exception of refreshing the remote copy of the patch
@@ -766,15 +877,17 @@ class UserResolver(Resolver):
# User did not fix the problem. Abort.
raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
except Exception:
- os.chdir(olddir)
raise
- os.chdir(olddir)
+ finally:
+ os.chdir(olddir)
def patch_path(url, fetch, workdir, expand=True):
- """Return the local path of a patch, or None if this isn't a patch"""
+ """Return the local path of a patch, or return nothing if this isn't a patch"""
local = fetch.localpath(url)
+ if os.path.isdir(local):
+ return
base, ext = os.path.splitext(os.path.basename(local))
if ext in ('.gz', '.bz2', '.xz', '.Z'):
if expand:
@@ -838,6 +951,7 @@ def src_patches(d, all=False, expand=True):
def should_apply(parm, d):
+ import bb.utils
if "mindate" in parm or "maxdate" in parm:
pn = d.getVar('PN')
srcdate = d.getVar('SRCDATE_%s' % pn)
@@ -874,5 +988,14 @@ def should_apply(parm, d):
if srcrev and parm["notrev"] in srcrev:
return False, "doesn't apply to revision"
- return True, None
+ if "maxver" in parm:
+ pv = d.getVar('PV')
+ if bb.utils.vercmp_string_op(pv, parm["maxver"], ">"):
+ return False, "applies to earlier version"
+
+ if "minver" in parm:
+ pv = d.getVar('PV')
+ if bb.utils.vercmp_string_op(pv, parm["minver"], "<"):
+ return False, "applies to later version"
+ return True, None
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py
index 1ea03d5d56..5d21cdcbdf 100644
--- a/meta/lib/oe/path.py
+++ b/meta/lib/oe/path.py
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import errno
import glob
import shutil
@@ -86,25 +92,41 @@ def copytree(src, dst):
# This way we also preserve hardlinks between files in the tree.
bb.utils.mkdirhier(dst)
- cmd = "tar --xattrs --xattrs-include='*' -cf - -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
+ cmd = "tar --xattrs --xattrs-include='*' -cf - -S -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def copyhardlinktree(src, dst):
- """ Make the hard link when possible, otherwise copy. """
+ """Make a tree of hard links when possible, otherwise copy."""
bb.utils.mkdirhier(dst)
if os.path.isdir(src) and not len(os.listdir(src)):
return
- if (os.stat(src).st_dev == os.stat(dst).st_dev):
+ canhard = False
+ testfile = None
+ for root, dirs, files in os.walk(src):
+ if len(files):
+ testfile = os.path.join(root, files[0])
+ break
+
+ if testfile is not None:
+ try:
+ os.link(testfile, os.path.join(dst, 'testfile'))
+ os.unlink(os.path.join(dst, 'testfile'))
+ canhard = True
+ except Exception as e:
+ bb.debug(2, "Hardlink test failed with " + str(e))
+
+ if (canhard):
# Need to copy directories only with tar first since cp will error if two
# writers try and create a directory at the same time
- cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
+ cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -S -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
source = ''
if os.path.isdir(src):
if len(glob.glob('%s/.??*' % src)) > 0:
source = './.??* '
- source += './*'
+ if len(glob.glob('%s/**' % src)) > 0:
+ source += './*'
s_dir = src
else:
source = src
@@ -114,6 +136,14 @@ def copyhardlinktree(src, dst):
else:
copytree(src, dst)
+def copyhardlink(src, dst):
+ """Make a hard link when possible, otherwise copy."""
+
+ try:
+ os.link(src, dst)
+ except OSError:
+ shutil.copy(src, dst)
+
def remove(path, recurse=True):
"""
Equivalent to rm -f or rm -rf
@@ -142,6 +172,9 @@ def symlink(source, destination, force=False):
if e.errno != errno.EEXIST or os.readlink(destination) != source:
raise
+def relsymlink(target, name, force=False):
+ symlink(os.path.relpath(target, os.path.dirname(name)), name, force=force)
+
def find(dir, **walkoptions):
""" Given a directory, recurses into that directory,
returning all files as absolute paths. """
@@ -237,3 +270,80 @@ def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False)
raise
return file
+
+def is_path_parent(possible_parent, *paths):
+ """
+ Return True if a path is the parent of another, False otherwise.
+ Multiple paths to test can be specified in which case all
+ specified test paths must be under the parent in order to
+ return True.
+ """
+ def abs_path_trailing(pth):
+ pth_abs = os.path.abspath(pth)
+ if not pth_abs.endswith(os.sep):
+ pth_abs += os.sep
+ return pth_abs
+
+ possible_parent_abs = abs_path_trailing(possible_parent)
+ if not paths:
+ return False
+ for path in paths:
+ path_abs = abs_path_trailing(path)
+ if not path_abs.startswith(possible_parent_abs):
+ return False
+ return True
+
+def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=False):
+ """Search a search path for pathname, supporting wildcards.
+
+ Return all paths in the specific search path matching the wildcard pattern
+ in pathname, returning only the first encountered for each file. If
+ candidates is True, information on all potential candidate paths are
+ included.
+ """
+ paths = (path or os.environ.get('PATH', os.defpath)).split(':')
+ if reverse:
+ paths.reverse()
+
+ seen, files = set(), []
+ for index, element in enumerate(paths):
+ if not os.path.isabs(element):
+ element = os.path.abspath(element)
+
+ candidate = os.path.join(element, pathname)
+ globbed = glob.glob(candidate)
+ if globbed:
+ for found_path in sorted(globbed):
+ if not os.access(found_path, mode):
+ continue
+ rel = os.path.relpath(found_path, element)
+ if rel not in seen:
+ seen.add(rel)
+ if candidates:
+ files.append((found_path, [os.path.join(p, rel) for p in paths[:index+1]]))
+ else:
+ files.append(found_path)
+
+ return files
+
+def canonicalize(paths, sep=','):
+ """Given a string with paths (separated by commas by default), expand
+ each path using os.path.realpath() and return the resulting paths as a
+ string (separated using the same separator a the original string).
+ """
+ # Ignore paths containing "$" as they are assumed to be unexpanded bitbake
+ # variables. Normally they would be ignored, e.g., when passing the paths
+ # through the shell they would expand to empty strings. However, when they
+ # are passed through os.path.realpath(), it will cause them to be prefixed
+ # with the absolute path to the current directory and thus not be empty
+ # anymore.
+ #
+ # Also maintain trailing slashes, as the paths may actually be used as
+ # prefixes in sting compares later on, where the slashes then are important.
+ canonical_paths = []
+ for path in (paths or '').split(sep):
+ if '$' not in path:
+ trailing_slash = path.endswith('/') and '/' or ''
+ canonical_paths.append(os.path.realpath(path) + trailing_slash)
+
+ return sep.join(canonical_paths)
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
index 32dfc15e88..c41242c878 100644
--- a/meta/lib/oe/prservice.py
+++ b/meta/lib/oe/prservice.py
@@ -1,14 +1,18 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
def prserv_make_conn(d, check = False):
import prserv.serv
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
conn = None
- conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
+ conn = prserv.serv.connect(host_params[0], int(host_params[1]))
if check:
if not conn.ping():
raise Exception('service not available')
- d.setVar("__PRSERV_CONN",conn)
except Exception as exc:
bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
@@ -19,31 +23,29 @@ def prserv_dump_db(d):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN")
+ conn = prserv_make_conn(d)
if conn is None:
- conn = prserv_make_conn(d)
- if conn is None:
- bb.error("Making connection failed to remote PR service")
- return None
+ bb.error("Making connection failed to remote PR service")
+ return None
#dump db
opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
- return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+ d = conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+ conn.close()
+ return d
def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN")
+ conn = prserv_make_conn(d)
if conn is None:
- conn = prserv_make_conn(d)
- if conn is None:
- bb.error("Making connection failed to remote PR service")
- return None
+ bb.error("Making connection failed to remote PR service")
+ return None
#get the entry values
imported = []
prefix = "PRAUTO$"
@@ -67,6 +69,7 @@ def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksu
bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
else:
imported.append((version,pkgarch,checksum,value))
+ conn.close()
return imported
def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
@@ -75,43 +78,40 @@ def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR'))
df = d.getVar('PRSERV_DUMPFILE')
#write data
- lf = bb.utils.lockfile("%s.lock" % df)
- f = open(df, "a")
- if metainfo:
- #dump column info
- f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
- f.write("#Table: %s\n" % metainfo['tbl_name'])
- f.write("#Columns:\n")
- f.write("#name \t type \t notn \t dflt \t pk\n")
- f.write("#----------\t --------\t --------\t --------\t ----\n")
- for i in range(len(metainfo['col_info'])):
- f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
- (metainfo['col_info'][i]['name'],
- metainfo['col_info'][i]['type'],
- metainfo['col_info'][i]['notnull'],
- metainfo['col_info'][i]['dflt_value'],
- metainfo['col_info'][i]['pk']))
- f.write("\n")
+ with open(df, "a") as f, bb.utils.fileslocked(["%s.lock" % df]) as locks:
+ if metainfo:
+ #dump column info
+ f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
+ f.write("#Table: %s\n" % metainfo['tbl_name'])
+ f.write("#Columns:\n")
+ f.write("#name \t type \t notn \t dflt \t pk\n")
+ f.write("#----------\t --------\t --------\t --------\t ----\n")
+ for i in range(len(metainfo['col_info'])):
+ f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
+ (metainfo['col_info'][i]['name'],
+ metainfo['col_info'][i]['type'],
+ metainfo['col_info'][i]['notnull'],
+ metainfo['col_info'][i]['dflt_value'],
+ metainfo['col_info'][i]['pk']))
+ f.write("\n")
- if lockdown:
- f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
+ if lockdown:
+ f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
- if datainfo:
- idx = {}
- for i in range(len(datainfo)):
- pkgarch = datainfo[i]['pkgarch']
- value = datainfo[i]['value']
- if pkgarch not in idx:
- idx[pkgarch] = i
- elif value > datainfo[idx[pkgarch]]['value']:
- idx[pkgarch] = i
- f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
- (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
- if not nomax:
- for i in idx:
- f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
- f.close()
- bb.utils.unlockfile(lf)
+ if datainfo:
+ idx = {}
+ for i in range(len(datainfo)):
+ pkgarch = datainfo[i]['pkgarch']
+ value = datainfo[i]['value']
+ if pkgarch not in idx:
+ idx[pkgarch] = i
+ elif value > datainfo[idx[pkgarch]]['value']:
+ idx[pkgarch] = i
+ f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
+ (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
+ if not nomax:
+ for i in idx:
+ f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
def prserv_check_avail(d):
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
@@ -123,4 +123,5 @@ def prserv_check_avail(d):
except TypeError:
bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
else:
- prserv_make_conn(d, True)
+ conn = prserv_make_conn(d, True)
+ conn.close()
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py
index 3231e60cea..f8ae3c743f 100644
--- a/meta/lib/oe/qa.py
+++ b/meta/lib/oe/qa.py
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import os, struct, mmap
class NotELFFileError(Exception):
@@ -37,13 +43,18 @@ class ELFFile:
def __init__(self, name):
self.name = name
self.objdump_output = {}
+ self.data = None
# Context Manager functions to close the mmap explicitly
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
- self.data.close()
+ self.close()
+
+ def close(self):
+ if self.data:
+ self.data.close()
def open(self):
with open(self.name, "rb") as f:
@@ -122,6 +133,9 @@ class ELFFile:
"""
return self.getShort(ELFFile.E_MACHINE)
+ def set_objdump(self, cmd, output):
+ self.objdump_output[cmd] = output
+
def run_objdump(self, cmd, d):
import bb.process
import sys
@@ -150,6 +164,7 @@ def elf_machine_to_string(machine):
"""
try:
return {
+ 0x00: "Unset",
0x02: "SPARC",
0x03: "x86",
0x08: "MIPS",
@@ -158,11 +173,63 @@ def elf_machine_to_string(machine):
0x2A: "SuperH",
0x32: "IA-64",
0x3E: "x86-64",
- 0xB7: "AArch64"
+ 0xB7: "AArch64",
+ 0xF7: "BPF"
}[machine]
except:
return "Unknown (%s)" % repr(machine)
+def write_error(type, error, d):
+ logfile = d.getVar('QA_LOGFILE')
+ if logfile:
+ p = d.getVar('P')
+ with open(logfile, "a+") as f:
+ f.write("%s: %s [%s]\n" % (p, error, type))
+
+def handle_error(error_class, error_msg, d):
+ if error_class in (d.getVar("ERROR_QA") or "").split():
+ write_error(error_class, error_msg, d)
+ bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
+ d.setVar("QA_ERRORS_FOUND", "True")
+ return False
+ elif error_class in (d.getVar("WARN_QA") or "").split():
+ write_error(error_class, error_msg, d)
+ bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
+ else:
+ bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
+ return True
+
+def add_message(messages, section, new_msg):
+ if section not in messages:
+ messages[section] = new_msg
+ else:
+ messages[section] = messages[section] + "\n" + new_msg
+
+def exit_with_message_if_errors(message, d):
+ qa_fatal_errors = bb.utils.to_boolean(d.getVar("QA_ERRORS_FOUND"), False)
+ if qa_fatal_errors:
+ bb.fatal(message)
+
+def exit_if_errors(d):
+ exit_with_message_if_errors("Fatal QA errors were found, failing task.", d)
+
+def check_upstream_status(fullpath):
+ import re
+ kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
+ strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE)
+ guidelines = "https://docs.yoctoproject.org/contributor-guide/recipe-style-guide.html#patch-upstream-status"
+
+ with open(fullpath, encoding='utf-8', errors='ignore') as f:
+ file_content = f.read()
+ match_kinda = kinda_status_re.search(file_content)
+ match_strict = strict_status_re.search(file_content)
+
+ if not match_strict:
+ if match_kinda:
+ return "Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0))
+ else:
+ return "Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines)
+
if __name__ == "__main__":
import sys
diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py
index 4e0859e6d9..de1fbdd3a8 100644
--- a/meta/lib/oe/recipeutils.py
+++ b/meta/lib/oe/recipeutils.py
@@ -4,6 +4,8 @@
#
# Copyright (C) 2013-2017 Intel Corporation
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys
import os
@@ -16,40 +18,40 @@ import shutil
import re
import fnmatch
import glob
-from collections import OrderedDict, defaultdict
+import bb.tinfoil
+from collections import OrderedDict, defaultdict
+from bb.utils import vercmp_string
# Help us to find places to insert values
-recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()']
+recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()', 'BBCLASSEXTEND']
# Variables that sometimes are a bit long but shouldn't be wrapped
-nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI\[(.+\.)?md5sum\]', 'SRC_URI\[(.+\.)?sha256sum\]']
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha[0-9]+sum\]']
list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
-def pn_to_recipe(cooker, pn, mc=''):
- """Convert a recipe name (PN) to the path to the recipe file"""
-
- best = cooker.findBestProvider(pn, mc)
- return best[3]
-
-
-def get_unavailable_reasons(cooker, pn):
- """If a recipe could not be found, find out why if possible"""
- import bb.taskdata
- taskdata = bb.taskdata.TaskData(None, skiplist=cooker.skiplist)
- return taskdata.get_reasons(pn)
-
-
-def parse_recipe(cooker, fn, appendfiles):
+def simplify_history(history, d):
"""
- Parse an individual recipe file, optionally with a list of
- bbappend files.
+ Eliminate any irrelevant events from a variable history
"""
- import bb.cache
- parser = bb.cache.NoCache(cooker.databuilder)
- envdata = parser.loadDataFull(fn, appendfiles)
- return envdata
+ ret_history = []
+ has_set = False
+ # Go backwards through the history and remove any immediate operations
+ # before the most recent set
+ for event in reversed(history):
+ if 'flag' in event or not 'file' in event:
+ continue
+ if event['op'] == 'set':
+ if has_set:
+ continue
+ has_set = True
+ elif event['op'] in ('append', 'prepend', 'postdot', 'predot'):
+ # Reminder: "append" and "prepend" mean += and =+ respectively, NOT :append / :prepend
+ if has_set:
+ continue
+ ret_history.insert(0, event)
+ return ret_history
def get_var_files(fn, varlist, d):
@@ -58,11 +60,19 @@ def get_var_files(fn, varlist, d):
"""
varfiles = {}
for v in varlist:
- history = d.varhistory.variable(v)
files = []
- for event in history:
- if 'file' in event and not 'flag' in event:
- files.append(event['file'])
+ if '[' in v:
+ varsplit = v.split('[')
+ varflag = varsplit[1].split(']')[0]
+ history = d.varhistory.variable(varsplit[0])
+ for event in history:
+ if 'file' in event and event.get('flag', '') == varflag:
+ files.append(event['file'])
+ else:
+ history = d.varhistory.variable(v)
+ for event in history:
+ if 'file' in event and not 'flag' in event:
+ files.append(event['file'])
if files:
actualfile = files[-1]
else:
@@ -153,7 +163,7 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
key = item[:-2]
else:
key = item
- restr = '%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
+ restr = r'%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
if item.endswith('()'):
recipe_progression_restrs.append(restr + '()')
else:
@@ -176,7 +186,14 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
def outputvalue(name, lines, rewindcomments=False):
if values[name] is None:
return
- rawtext = '%s = "%s"%s' % (name, values[name], newline)
+ if isinstance(values[name], tuple):
+ op, value = values[name]
+ if op == '+=' and value.strip() == '':
+ return
+ else:
+ value = values[name]
+ op = '='
+ rawtext = '%s %s "%s"%s' % (name, op, value, newline)
addlines = []
nowrap = False
for nowrap_re in nowrap_vars_res:
@@ -186,10 +203,10 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
if nowrap:
addlines.append(rawtext)
elif name in list_vars:
- splitvalue = split_var_value(values[name], assignment=False)
+ splitvalue = split_var_value(value, assignment=False)
if len(splitvalue) > 1:
linesplit = ' \\\n' + (' ' * (len(name) + 4))
- addlines.append('%s = "%s%s"%s' % (name, linesplit.join(splitvalue), linesplit, newline))
+ addlines.append('%s %s "%s%s"%s' % (name, op, linesplit.join(splitvalue), linesplit, newline))
else:
addlines.append(rawtext)
else:
@@ -321,12 +338,47 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None
"""Modify a list of variable values in the specified recipe. Handles inc files if
used by the recipe.
"""
+ overrides = d.getVar('OVERRIDES').split(':')
+ def override_applicable(hevent):
+ op = hevent['op']
+ if '[' in op:
+ opoverrides = op.split('[')[1].split(']')[0].split(':')
+ for opoverride in opoverrides:
+ if not opoverride in overrides:
+ return False
+ return True
+
varlist = varvalues.keys()
+ fn = os.path.abspath(fn)
varfiles = get_var_files(fn, varlist, d)
locs = localise_file_vars(fn, varfiles, varlist)
patches = []
for f,v in locs.items():
vals = {k: varvalues[k] for k in v}
+ f = os.path.abspath(f)
+ if f == fn:
+ extravals = {}
+ for var, value in vals.items():
+ if var in list_vars:
+ history = simplify_history(d.varhistory.variable(var), d)
+ recipe_set = False
+ for event in history:
+ if os.path.abspath(event['file']) == fn:
+ if event['op'] == 'set':
+ recipe_set = True
+ if not recipe_set:
+ for event in history:
+ if event['op'].startswith(':remove'):
+ continue
+ if not override_applicable(event):
+ continue
+ newvalue = value.replace(event['detail'], '')
+ if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith(':'):
+ op = event['op'].replace('[', ':').replace(']', '')
+ extravals[var + op] = None
+ value = newvalue
+ vals[var] = ('+=', value)
+ vals.update(extravals)
patchdata = patch_recipe_file(f, vals, patch, relpath, redirect_output)
if patch:
patches.append(patchdata)
@@ -357,12 +409,12 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=F
fetch.download()
for pth in fetch.localpaths():
if pth not in localpaths:
- localpaths.append(pth)
+ localpaths.append(os.path.abspath(pth))
uri_values.append(srcuri)
fetch_urls(d)
if all_variants:
- # Get files for other variants e.g. in the case of a SRC_URI_append
+ # Get files for other variants e.g. in the case of a SRC_URI:append
localdata = bb.data.createCopy(d)
variants = (localdata.getVar('BBCLASSEXTEND') or '').split()
if variants:
@@ -408,7 +460,7 @@ def get_recipe_local_files(d, patches=False, archives=False):
# fetcher) though note that this only encompasses actual container formats
# i.e. that can contain multiple files as opposed to those that only
# contain a compressed stream (i.e. .tar.gz as opposed to just .gz)
- archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
+ archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.txz', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
ret = {}
for uri in uris:
if fetch.ud[uri].type == 'file':
@@ -432,7 +484,14 @@ def get_recipe_local_files(d, patches=False, archives=False):
unpack = fetch.ud[uri].parm.get('unpack', True)
if unpack:
continue
- ret[fname] = localpath
+ if os.path.isdir(localpath):
+ for root, dirs, files in os.walk(localpath):
+ for fname in files:
+ fileabspath = os.path.join(root,fname)
+ srcdir = os.path.dirname(localpath)
+ ret[os.path.relpath(fileabspath,srcdir)] = fileabspath
+ else:
+ ret[fname] = localpath
return ret
@@ -502,6 +561,23 @@ def get_bbfile_path(d, destdir, extrapathhint=None):
confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
pn = d.getVar('PN')
+ # Parse BBFILES_DYNAMIC and append to BBFILES
+ bbfiles_dynamic = (confdata.getVar('BBFILES_DYNAMIC') or "").split()
+ collections = (confdata.getVar('BBFILE_COLLECTIONS') or "").split()
+ invalid = []
+ for entry in bbfiles_dynamic:
+ parts = entry.split(":", 1)
+ if len(parts) != 2:
+ invalid.append(entry)
+ continue
+ l, f = parts
+ invert = l[0] == "!"
+ if invert:
+ l = l[1:]
+ if (l in collections and not invert) or (l not in collections and invert):
+ confdata.appendVar("BBFILES", " " + f)
+ if invalid:
+ return None
bbfilespecs = (confdata.getVar('BBFILES') or '').split()
if destdir == destlayerdir:
for bbfilespec in bbfilespecs:
@@ -588,19 +664,23 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
return (appendpath, pathok)
-def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None):
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None, params=None, update_original_recipe=False):
"""
Writes a bbappend file for a recipe
Parameters:
rd: data dictionary for the recipe
destlayerdir: base directory of the layer to place the bbappend in
(subdirectory path from there will be determined automatically)
- srcfiles: dict of source files to add to SRC_URI, where the value
- is the full path to the file to be added, and the value is the
- original filename as it would appear in SRC_URI or None if it
- isn't already present. You may pass None for this parameter if
- you simply want to specify your own content via the extralines
- parameter.
+ srcfiles: dict of source files to add to SRC_URI, where the key
+ is the full path to the file to be added, and the value is a
+ dict with following optional keys:
+ path: the original filename as it would appear in SRC_URI
+ or None if it isn't already present.
+ patchdir: the patchdir parameter
+ newname: the name to give to the new added file. None to use
+ the default value: basename(path)
+ You may pass None for this parameter if you simply want to specify
+ your own content via the extralines parameter.
install: dict mapping entries in srcfiles to a tuple of two elements:
install path (*without* ${D} prefix) and permission value (as a
string, e.g. '0644').
@@ -618,18 +698,32 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
redirect_output:
If specified, redirects writing the output file to the
specified directory (for dry-run purposes)
+ params:
+ Parameters to use when adding entries to SRC_URI. If specified,
+ should be a list of dicts with the same length as srcfiles.
+ update_original_recipe:
+ Force to update the original recipe instead of creating/updating
+ a bbapend. destlayerdir must contain the original recipe
"""
if not removevalues:
removevalues = {}
- # Determine how the bbappend should be named
- appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver)
- if not appendpath:
- bb.error('Unable to determine layer directory containing %s' % recipefile)
- return (None, None)
- if not pathok:
- bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
+ recipefile = rd.getVar('FILE')
+ if update_original_recipe:
+ if destlayerdir not in recipefile:
+ bb.error("destlayerdir %s doesn't contain the original recipe (%s), cannot update it" % (destlayerdir, recipefile))
+ return (None, None)
+
+ appendpath = recipefile
+ else:
+ # Determine how the bbappend should be named
+ appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver)
+ if not appendpath:
+ bb.error('Unable to determine layer directory containing %s' % recipefile)
+ return (None, None)
+ if not pathok:
+ bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
appenddir = os.path.dirname(appendpath)
if not redirect_output:
@@ -674,30 +768,48 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
bbappendlines.append((varname, op, value))
destsubdir = rd.getVar('PN')
- if srcfiles:
- bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
+ if not update_original_recipe and srcfiles:
+ bbappendlines.append(('FILESEXTRAPATHS:prepend', ':=', '${THISDIR}/${PN}:'))
appendoverride = ''
if machine:
bbappendlines.append(('PACKAGE_ARCH', '=', '${MACHINE_ARCH}'))
- appendoverride = '_%s' % machine
+ appendoverride = ':%s' % machine
copyfiles = {}
if srcfiles:
instfunclines = []
- for newfile, origsrcfile in srcfiles.items():
- srcfile = origsrcfile
+ for i, (newfile, param) in enumerate(srcfiles.items()):
srcurientry = None
- if not srcfile:
- srcfile = os.path.basename(newfile)
+ if not 'path' in param or not param['path']:
+ if 'newname' in param and param['newname']:
+ srcfile = param['newname']
+ else:
+ srcfile = os.path.basename(newfile)
srcurientry = 'file://%s' % srcfile
+ oldentry = None
+ for uri in rd.getVar('SRC_URI').split():
+ if srcurientry in uri:
+ oldentry = uri
+ if params and params[i]:
+ srcurientry = '%s;%s' % (srcurientry, ';'.join('%s=%s' % (k,v) for k,v in params[i].items()))
# Double-check it's not there already
# FIXME do we care if the entry is added by another bbappend that might go away?
if not srcurientry in rd.getVar('SRC_URI').split():
if machine:
- appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
+ if oldentry:
+ appendline('SRC_URI:remove%s' % appendoverride, '=', ' ' + oldentry)
+ appendline('SRC_URI:append%s' % appendoverride, '=', ' ' + srcurientry)
else:
+ if oldentry:
+ if update_original_recipe:
+ removevalues['SRC_URI'] = oldentry
+ else:
+ appendline('SRC_URI:remove', '=', oldentry)
appendline('SRC_URI', '+=', srcurientry)
- copyfiles[newfile] = srcfile
+ param['path'] = srcfile
+ else:
+ srcfile = param['path']
+ copyfiles[newfile] = param
if install:
institem = install.pop(newfile, None)
if institem:
@@ -708,7 +820,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
instfunclines.append(instdirline)
instfunclines.append('install -m %s ${WORKDIR}/%s ${D}%s' % (perms, os.path.basename(srcfile), instdestpath))
if instfunclines:
- bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
+ bbappendlines.append(('do_install:append%s()' % appendoverride, '', instfunclines))
if redirect_output:
bb.note('Writing append file %s (dry-run)' % appendpath)
@@ -717,6 +829,8 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
# multiple times per operation when we're handling overrides)
if os.path.exists(appendpath) and not os.path.exists(outfile):
shutil.copy2(appendpath, outfile)
+ elif update_original_recipe:
+ outfile = recipefile
else:
bb.note('Writing append file %s' % appendpath)
outfile = appendpath
@@ -726,15 +840,15 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
extvars = {'destsubdir': destsubdir}
def appendfile_varfunc(varname, origvalue, op, newlines):
- if varname == 'FILESEXTRAPATHS_prepend':
+ if varname == 'FILESEXTRAPATHS:prepend':
if origvalue.startswith('${THISDIR}/'):
- popline('FILESEXTRAPATHS_prepend')
+ popline('FILESEXTRAPATHS:prepend')
extvars['destsubdir'] = rd.expand(origvalue.split('${THISDIR}/', 1)[1].rstrip(':'))
elif varname == 'PACKAGE_ARCH':
if machine:
popline('PACKAGE_ARCH')
return (machine, None, 4, False)
- elif varname.startswith('do_install_append'):
+ elif varname.startswith('do_install:append'):
func = popline(varname)
if func:
instfunclines = [line.strip() for line in origvalue.strip('\n').splitlines()]
@@ -746,7 +860,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
splitval = split_var_value(origvalue, assignment=False)
changed = False
removevar = varname
- if varname in ['SRC_URI', 'SRC_URI_append%s' % appendoverride]:
+ if varname in ['SRC_URI', 'SRC_URI:append%s' % appendoverride]:
removevar = 'SRC_URI'
line = popline(varname)
if line:
@@ -775,11 +889,11 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
newvalue = splitval
if len(newvalue) == 1:
# Ensure it's written out as one line
- if '_append' in varname:
+ if ':append' in varname:
newvalue = ' ' + newvalue[0]
else:
newvalue = newvalue[0]
- if not newvalue and (op in ['+=', '.='] or '_append' in varname):
+ if not newvalue and (op in ['+=', '.='] or ':append' in varname):
# There's no point appending nothing
newvalue = None
if varname.endswith('()'):
@@ -820,7 +934,12 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
outdir = redirect_output
else:
outdir = appenddir
- for newfile, srcfile in copyfiles.items():
+ for newfile, param in copyfiles.items():
+ srcfile = param['path']
+ patchdir = param.get('patchdir', ".")
+
+ if patchdir != ".":
+ newfile = os.path.join(os.path.split(newfile)[0], patchdir, os.path.split(newfile)[1])
filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile))
if os.path.abspath(newfile) != os.path.abspath(filedest):
if newfile.startswith(tempfile.gettempdir()):
@@ -864,10 +983,9 @@ def replace_dir_vars(path, d):
path = path.replace(dirpath, '${%s}' % dirvars[dirpath])
return path
-def get_recipe_pv_without_srcpv(pv, uri_type):
+def get_recipe_pv_with_pfx_sfx(pv, uri_type):
"""
- Get PV without SRCPV common in SCM's for now only
- support git.
+ Get PV separating prefix and suffix components.
Returns tuple with pv, prefix and suffix.
"""
@@ -875,7 +993,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type):
sfx = ''
if uri_type == 'git':
- git_regex = re.compile("(?P<pfx>v?)(?P<ver>[^\+]*)((?P<sfx>\+(git)?r?(AUTOINC\+))(?P<rev>.*))?")
+ git_regex = re.compile(r"(?P<pfx>v?)(?P<ver>.*?)(?P<sfx>\+[^\+]*(git)?r?(AUTOINC\+)?)(?P<rev>.*)")
m = git_regex.match(pv)
if m:
@@ -883,7 +1001,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type):
pfx = m.group('pfx')
sfx = m.group('sfx')
else:
- regex = re.compile("(?P<pfx>(v|r)?)(?P<ver>.*)")
+ regex = re.compile(r"(?P<pfx>(v|r)?)(?P<ver>.*)")
m = regex.match(pv)
if m:
pv = m.group('ver')
@@ -927,7 +1045,7 @@ def get_recipe_upstream_version(rd):
src_uri = src_uris.split()[0]
uri_type, _, _, _, _, _ = decodeurl(src_uri)
- (pv, pfx, sfx) = get_recipe_pv_without_srcpv(rd.getVar('PV'), uri_type)
+ (pv, pfx, sfx) = get_recipe_pv_with_pfx_sfx(rd.getVar('PV'), uri_type)
ru['current_version'] = pv
manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
@@ -951,10 +1069,11 @@ def get_recipe_upstream_version(rd):
else:
ud = bb.fetch2.FetchData(src_uri, rd)
if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1":
+ bb.fetch2.get_srcrev(rd)
revision = ud.method.latest_revision(ud, rd, 'default')
upversion = pv
if revision != rd.getVar("SRCREV"):
- upversion = upversion + "-new-commits-available"
+ upversion = upversion + "-new-commits-available"
else:
pupver = ud.method.latest_versionstring(ud, rd)
(upversion, revision) = pupver
@@ -969,3 +1088,98 @@ def get_recipe_upstream_version(rd):
ru['datetime'] = datetime.now()
return ru
+
+def _get_recipe_upgrade_status(data):
+ uv = get_recipe_upstream_version(data)
+
+ pn = data.getVar('PN')
+ cur_ver = uv['current_version']
+
+ upstream_version_unknown = data.getVar('UPSTREAM_VERSION_UNKNOWN')
+ if not uv['version']:
+ status = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
+ else:
+ cmp = vercmp_string(uv['current_version'], uv['version'])
+ if cmp == -1:
+ status = "UPDATE" if not upstream_version_unknown else "KNOWN_BROKEN"
+ elif cmp == 0:
+ status = "MATCH" if not upstream_version_unknown else "KNOWN_BROKEN"
+ else:
+ status = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
+
+ next_ver = uv['version'] if uv['version'] else "N/A"
+ revision = uv['revision'] if uv['revision'] else "N/A"
+ maintainer = data.getVar('RECIPE_MAINTAINER')
+ no_upgrade_reason = data.getVar('RECIPE_NO_UPDATE_REASON')
+
+ return (pn, status, cur_ver, next_ver, maintainer, revision, no_upgrade_reason)
+
+def get_recipe_upgrade_status(recipes=None):
+ pkgs_list = []
+ data_copy_list = []
+ copy_vars = ('SRC_URI',
+ 'PV',
+ 'DL_DIR',
+ 'PN',
+ 'CACHE',
+ 'PERSISTENT_DIR',
+ 'BB_URI_HEADREVS',
+ 'UPSTREAM_CHECK_COMMITS',
+ 'UPSTREAM_CHECK_GITTAGREGEX',
+ 'UPSTREAM_CHECK_REGEX',
+ 'UPSTREAM_CHECK_URI',
+ 'UPSTREAM_VERSION_UNKNOWN',
+ 'RECIPE_MAINTAINER',
+ 'RECIPE_NO_UPDATE_REASON',
+ 'RECIPE_UPSTREAM_VERSION',
+ 'RECIPE_UPSTREAM_DATE',
+ 'CHECK_DATE',
+ 'FETCHCMD_bzr',
+ 'FETCHCMD_ccrc',
+ 'FETCHCMD_cvs',
+ 'FETCHCMD_git',
+ 'FETCHCMD_hg',
+ 'FETCHCMD_npm',
+ 'FETCHCMD_osc',
+ 'FETCHCMD_p4',
+ 'FETCHCMD_repo',
+ 'FETCHCMD_s3',
+ 'FETCHCMD_svn',
+ 'FETCHCMD_wget',
+ )
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False)
+
+ if not recipes:
+ recipes = tinfoil.all_recipe_files(variants=False)
+
+ for fn in recipes:
+ try:
+ if fn.startswith("/"):
+ data = tinfoil.parse_recipe_file(fn)
+ else:
+ data = tinfoil.parse_recipe(fn)
+ except bb.providers.NoProvider:
+ bb.note(" No provider for %s" % fn)
+ continue
+
+ unreliable = data.getVar('UPSTREAM_CHECK_UNRELIABLE')
+ if unreliable == "1":
+ bb.note(" Skip package %s as upstream check unreliable" % pn)
+ continue
+
+ data_copy = bb.data.init()
+ for var in copy_vars:
+ data_copy.setVar(var, data.getVar(var))
+ for k in data:
+ if k.startswith('SRCREV'):
+ data_copy.setVar(k, data.getVar(k))
+
+ data_copy_list.append(data_copy)
+
+ from concurrent.futures import ProcessPoolExecutor
+ with ProcessPoolExecutor(max_workers=utils.cpu_count()) as executor:
+ pkgs_list = executor.map(_get_recipe_upgrade_status, data_copy_list)
+
+ return pkgs_list
diff --git a/meta/lib/oe/reproducible.py b/meta/lib/oe/reproducible.py
new file mode 100644
index 0000000000..448befce33
--- /dev/null
+++ b/meta/lib/oe/reproducible.py
@@ -0,0 +1,197 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+import os
+import subprocess
+import bb
+
+# For reproducible builds, this code sets the default SOURCE_DATE_EPOCH in each
+# component's build environment. The format is number of seconds since the
+# system epoch.
+#
+# Upstream components (generally) respect this environment variable,
+# using it in place of the "current" date and time.
+# See https://reproducible-builds.org/specs/source-date-epoch/
+#
+# The default value of SOURCE_DATE_EPOCH comes from the function
+# get_source_date_epoch_value which reads from the SDE_FILE, or if the file
+# is not available will use the fallback of SOURCE_DATE_EPOCH_FALLBACK.
+#
+# The SDE_FILE is normally constructed from the function
+# create_source_date_epoch_stamp which is typically added as a postfuncs to
+# the do_unpack task. If a recipe does NOT have do_unpack, it should be added
+# to a task that runs after the source is available and before the
+# do_deploy_source_date_epoch task is executed.
+#
+# If a recipe wishes to override the default behavior it should set it's own
+# SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task
+# with recipe-specific functionality to write the appropriate
+# SOURCE_DATE_EPOCH into the SDE_FILE.
+#
+# SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should
+# be reproducible for anyone who builds the same revision from the same
+# sources.
+#
+# There are 4 ways the create_source_date_epoch_stamp function determines what
+# becomes SOURCE_DATE_EPOCH:
+#
+# 1. Use the value from __source_date_epoch.txt file if this file exists.
+# This file was most likely created in the previous build by one of the
+# following methods 2,3,4.
+# Alternatively, it can be provided by a recipe via SRC_URI.
+#
+# If the file does not exist:
+#
+# 2. If there is a git checkout, use the last git commit timestamp.
+# Git does not preserve file timestamps on checkout.
+#
+# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
+# This works for well-kept repositories distributed via tarball.
+#
+# 4. Use the modification time of the youngest file in the source tree, if
+# there is one.
+# This will be the newest file from the distribution tarball, if any.
+#
+# 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK).
+#
+# Once the value is determined, it is stored in the recipe's SDE_FILE.
+
+def get_source_date_epoch_from_known_files(d, sourcedir):
+ source_date_epoch = None
+ newest_file = None
+ known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
+ for file in known_files:
+ filepath = os.path.join(sourcedir, file)
+ if os.path.isfile(filepath):
+ mtime = int(os.lstat(filepath).st_mtime)
+ # There may be more than one "known_file" present, if so, use the youngest one
+ if not source_date_epoch or mtime > source_date_epoch:
+ source_date_epoch = mtime
+ newest_file = filepath
+ if newest_file:
+ bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
+ return source_date_epoch
+
+def find_git_folder(d, sourcedir):
+ # First guess: WORKDIR/git
+ # This is the default git fetcher unpack path
+ workdir = d.getVar('WORKDIR')
+ gitpath = os.path.join(workdir, "git/.git")
+ if os.path.isdir(gitpath):
+ return gitpath
+
+ # Second guess: ${S}
+ gitpath = os.path.join(sourcedir, ".git")
+ if os.path.isdir(gitpath):
+ return gitpath
+
+ # Perhaps there was a subpath or destsuffix specified.
+ # Go looking in the WORKDIR
+ exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
+ "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
+ for root, dirs, files in os.walk(workdir, topdown=True):
+ dirs[:] = [d for d in dirs if d not in exclude]
+ if '.git' in dirs:
+ return os.path.join(root, ".git")
+
+ bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
+ return None
+
+def get_source_date_epoch_from_git(d, sourcedir):
+ if not "git://" in d.getVar('SRC_URI') and not "gitsm://" in d.getVar('SRC_URI'):
+ return None
+
+ gitpath = find_git_folder(d, sourcedir)
+ if not gitpath:
+ return None
+
+ # Check that the repository has a valid HEAD; it may not if subdir is used
+ # in SRC_URI
+ p = subprocess.run(['git', '--git-dir', gitpath, 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ if p.returncode != 0:
+ bb.debug(1, "%s does not have a valid HEAD: %s" % (gitpath, p.stdout.decode('utf-8')))
+ return None
+
+ bb.debug(1, "git repository: %s" % gitpath)
+ p = subprocess.run(['git', '-c', 'log.showSignature=false', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'],
+ check=True, stdout=subprocess.PIPE)
+ return int(p.stdout.decode('utf-8'))
+
+def get_source_date_epoch_from_youngest_file(d, sourcedir):
+ if sourcedir == d.getVar('WORKDIR'):
+ # These sources are almost certainly not from a tarball
+ return None
+
+ # Do it the hard way: check all files and find the youngest one...
+ source_date_epoch = None
+ newest_file = None
+ for root, dirs, files in os.walk(sourcedir, topdown=True):
+ files = [f for f in files if not f[0] == '.']
+
+ for fname in files:
+ if fname == "singletask.lock":
+ # Ignore externalsrc/devtool lockfile [YOCTO #14921]
+ continue
+ filename = os.path.join(root, fname)
+ try:
+ mtime = int(os.lstat(filename).st_mtime)
+ except ValueError:
+ mtime = 0
+ if not source_date_epoch or mtime > source_date_epoch:
+ source_date_epoch = mtime
+ newest_file = filename
+
+ if newest_file:
+ bb.debug(1, "Newest file found: %s" % newest_file)
+ return source_date_epoch
+
+def fixed_source_date_epoch(d):
+ bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
+ source_date_epoch = d.getVar('SOURCE_DATE_EPOCH_FALLBACK')
+ if source_date_epoch:
+ bb.debug(1, "Using SOURCE_DATE_EPOCH_FALLBACK")
+ return int(source_date_epoch)
+ return 0
+
+def get_source_date_epoch(d, sourcedir):
+ return (
+ get_source_date_epoch_from_git(d, sourcedir) or
+ get_source_date_epoch_from_youngest_file(d, sourcedir) or
+ fixed_source_date_epoch(d) # Last resort
+ )
+
+def epochfile_read(epochfile, d):
+ cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None)
+ if cached and efile == epochfile:
+ return cached
+
+ if cached and epochfile != efile:
+ bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile))
+
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
+ try:
+ with open(epochfile, 'r') as f:
+ s = f.read()
+ try:
+ source_date_epoch = int(s)
+ except ValueError:
+ bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ except FileNotFoundError:
+ bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
+
+ d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile))
+ return str(source_date_epoch)
+
+def epochfile_write(source_date_epoch, epochfile, d):
+
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ bb.utils.mkdirhier(os.path.dirname(epochfile))
+
+ tmp_file = "%s.new" % epochfile
+ with open(tmp_file, 'w') as f:
+ f.write(str(source_date_epoch))
+ os.rename(tmp_file, epochfile)
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index 754ef563ab..8cd48f9450 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -1,15 +1,18 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.package_manager import *
from oe.manifest import *
import oe.path
-import filecmp
import shutil
import os
import subprocess
import re
-
class Rootfs(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
@@ -48,6 +51,8 @@ class Rootfs(object, metaclass=ABCMeta):
excludes = [ 'log_check', r'^\+' ]
if hasattr(self, 'log_check_expected_regexes'):
excludes.extend(self.log_check_expected_regexes)
+ # Insert custom log_check excludes
+ excludes += [x for x in (self.d.getVar("IMAGE_LOG_CHECK_EXCLUDES") or "").split(" ") if x]
excludes = [re.compile(x) for x in excludes]
r = re.compile(match)
log_path = self.d.expand("${T}/log.do_rootfs")
@@ -92,10 +97,6 @@ class Rootfs(object, metaclass=ABCMeta):
self.d.getVar('PACKAGE_FEED_ARCHS'))
- @abstractmethod
- def _handle_intercept_failure(self, failed_script):
- pass
-
"""
The _cleanup() method should be used to clean-up stuff that we don't really
want to end up on target. For example, in the case of RPM, the DB locks.
@@ -105,7 +106,7 @@ class Rootfs(object, metaclass=ABCMeta):
def _cleanup(self):
pass
- def _setup_dbg_rootfs(self, dirs):
+ def _setup_dbg_rootfs(self, package_paths):
gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
if gen_debugfs != '1':
return
@@ -115,29 +116,29 @@ class Rootfs(object, metaclass=ABCMeta):
shutil.rmtree(self.image_rootfs + '-orig')
except:
pass
- os.rename(self.image_rootfs, self.image_rootfs + '-orig')
+ bb.utils.rename(self.image_rootfs, self.image_rootfs + '-orig')
bb.note(" Creating debug rootfs...")
bb.utils.mkdirhier(self.image_rootfs)
bb.note(" Copying back package database...")
- for dir in dirs:
- if not os.path.isdir(self.image_rootfs + '-orig' + dir):
- continue
- bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir))
- shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir, symlinks=True)
+ for path in package_paths:
+ bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(path))
+ if os.path.isdir(self.image_rootfs + '-orig' + path):
+ shutil.copytree(self.image_rootfs + '-orig' + path, self.image_rootfs + path, symlinks=True)
+ elif os.path.isfile(self.image_rootfs + '-orig' + path):
+ shutil.copyfile(self.image_rootfs + '-orig' + path, self.image_rootfs + path)
- cpath = oe.cachedpath.CachedPath()
# Copy files located in /usr/lib/debug or /usr/src/debug
for dir in ["/usr/lib/debug", "/usr/src/debug"]:
src = self.image_rootfs + '-orig' + dir
- if cpath.exists(src):
+ if os.path.exists(src):
dst = self.image_rootfs + dir
bb.utils.mkdirhier(os.path.dirname(dst))
shutil.copytree(src, dst)
# Copy files with suffix '.debug' or located in '.debug' dir.
- for root, dirs, files in cpath.walk(self.image_rootfs + '-orig'):
+ for root, dirs, files in os.walk(self.image_rootfs + '-orig'):
relative_dir = root[len(self.image_rootfs + '-orig'):]
for f in files:
if f.endswith('.debug') or '/.debug' in relative_dir:
@@ -148,25 +149,40 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note(" Install complementary '*-dbg' packages...")
self.pm.install_complementary('*-dbg')
+ if self.d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ bb.note(" Install complementary '*-src' packages...")
+ self.pm.install_complementary('*-src')
+
+ """
+ Install additional debug packages. Possibility to install additional packages,
+ which are not automatically installed as complementary package of
+ standard one, e.g. debug package of static libraries.
+ """
+ extra_debug_pkgs = self.d.getVar('IMAGE_INSTALL_DEBUGFS')
+ if extra_debug_pkgs:
+ bb.note(" Install extra debug packages...")
+ self.pm.install(extra_debug_pkgs.split(), True)
+
+ bb.note(" Removing package database...")
+ for path in package_paths:
+ if os.path.isdir(self.image_rootfs + path):
+ shutil.rmtree(self.image_rootfs + path)
+ elif os.path.isfile(self.image_rootfs + path):
+ os.remove(self.image_rootfs + path)
+
bb.note(" Rename debug rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-dbg')
except:
pass
- os.rename(self.image_rootfs, self.image_rootfs + '-dbg')
+ bb.utils.rename(self.image_rootfs, self.image_rootfs + '-dbg')
- bb.note(" Restoreing original rootfs...")
- os.rename(self.image_rootfs + '-orig', self.image_rootfs)
+ bb.note(" Restoring original rootfs...")
+ bb.utils.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
- fakerootcmd = self.d.getVar('FAKEROOT')
- if fakerootcmd is not None:
- exec_cmd = [fakerootcmd, cmd]
- else:
- exec_cmd = cmd
-
try:
- subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT)
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
@@ -178,19 +194,17 @@ class Rootfs(object, metaclass=ABCMeta):
post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
- postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR")
- if not postinst_intercepts_dir:
- postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
-
- bb.utils.remove(intercepts_dir, True)
-
- bb.utils.mkdirhier(self.image_rootfs)
-
- bb.utils.mkdirhier(self.deploydir)
+ def make_last(command, commands):
+ commands = commands.split()
+ if command in commands:
+ commands.remove(command)
+ commands.append(command)
+ return "".join(commands)
- shutil.copytree(postinst_intercepts_dir, intercepts_dir)
+ # We want this to run as late as possible, in particular after
+ # systemd_sysusers_create and set_user_group. Using :append is not enough
+ make_last("tidy_shadowutils_files", post_process_cmds)
+ make_last("rootfs_reproducible", post_process_cmds)
execute_pre_post_process(self.d, pre_process_cmds)
@@ -207,7 +221,7 @@ class Rootfs(object, metaclass=ABCMeta):
execute_pre_post_process(self.d, rootfs_post_install_cmds)
- self._run_intercepts()
+ self.pm.run_intercepts()
execute_pre_post_process(self.d, post_process_cmds)
@@ -215,6 +229,9 @@ class Rootfs(object, metaclass=ABCMeta):
self.progress_reporter.next_stage()
if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d) and \
+ not bb.utils.contains("IMAGE_FEATURES",
+ "read-only-rootfs-delayed-postinsts",
True, False, self.d):
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is not None:
@@ -245,13 +262,11 @@ class Rootfs(object, metaclass=ABCMeta):
def _uninstall_unneeded(self):
- # Remove unneeded init script symlinks
+ # Remove the run-postinsts package if no delayed postinsts are found
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is None:
- if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
- self._exec_shell_cmd(["update-rc.d", "-f", "-r",
- self.d.getVar('IMAGE_ROOTFS'),
- "run-postinsts", "remove"])
+ if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")) or os.path.exists(self.d.expand("${IMAGE_ROOTFS}${systemd_system_unitdir}/run-postinsts.service")):
+ self.pm.remove(["run-postinsts"])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d)
@@ -293,54 +308,26 @@ class Rootfs(object, metaclass=ABCMeta):
# Remove the package manager data files
self.pm.remove_packaging_data()
- def _run_intercepts(self):
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
-
- bb.note("Running intercept scripts:")
- os.environ['D'] = self.image_rootfs
- os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
- for script in os.listdir(intercepts_dir):
- script_full = os.path.join(intercepts_dir, script)
-
- if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
- continue
-
- bb.note("> Executing %s intercept ..." % script)
-
- try:
- output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
- if output: bb.note(output.decode("utf-8"))
- except subprocess.CalledProcessError as e:
- bb.warn("The postinstall intercept hook '%s' failed, details in log.do_rootfs" % script)
- bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
-
- with open(script_full) as intercept:
- registered_pkgs = None
- for line in intercept.read().split("\n"):
- m = re.match("^##PKGS:(.*)", line)
- if m is not None:
- registered_pkgs = m.group(1).strip()
- break
-
- if registered_pkgs is not None:
- bb.warn("The postinstalls for the following packages "
- "will be postponed for first boot: %s" %
- registered_pkgs)
-
- # call the backend dependent handler
- self._handle_intercept_failure(registered_pkgs)
-
def _run_ldconfig(self):
if self.d.getVar('LDCONFIGDEPEND'):
- bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
+ bb.note("Executing: ldconfig -r " + self.image_rootfs + " -c new -v -X")
self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
- 'new', '-v'])
+ 'new', '-v', '-X'])
+
+ image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d)
+ ldconfig_in_features = bb.utils.contains("DISTRO_FEATURES", "ldconfig",
+ True, False, self.d)
+ if image_rorfs or not ldconfig_in_features:
+ ldconfig_cache_dir = os.path.join(self.image_rootfs, "var/cache/ldconfig")
+ if os.path.exists(ldconfig_cache_dir):
+ bb.note("Removing ldconfig auxiliary cache...")
+ shutil.rmtree(ldconfig_cache_dir)
def _check_for_kernel_modules(self, modules_dir):
for root, dirs, files in os.walk(modules_dir, topdown=True):
for name in files:
- found_ko = name.endswith(".ko")
+ found_ko = name.endswith((".ko", ".ko.gz", ".ko.xz", ".ko.zst"))
if found_ko:
return found_ko
return False
@@ -352,17 +339,30 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note("No Kernel Modules found, not running depmod")
return
- kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR'), "kernel-depmod",
- 'kernel-abiversion')
- if not os.path.exists(kernel_abi_ver_file):
- bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
+ pkgdatadir = self.d.getVar('PKGDATA_DIR')
- kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
- versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
+ # PKGDATA_DIR can include multiple kernels so we run depmod for each
+ # one of them.
+ for direntry in os.listdir(pkgdatadir):
+ match = re.match('(.*)-depmod', direntry)
+ if not match:
+ continue
+ kernel_package_name = match.group(1)
- bb.utils.mkdirhier(versioned_modules_dir)
+ kernel_abi_ver_file = oe.path.join(pkgdatadir, direntry, kernel_package_name + '-abiversion')
+ if not os.path.exists(kernel_abi_ver_file):
+ bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
- self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver])
+ with open(kernel_abi_ver_file) as f:
+ kernel_ver = f.read().strip(' \n')
+
+ versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
+
+ bb.utils.mkdirhier(versioned_modules_dir)
+
+ bb.note("Running depmodwrapper for %s ..." % versioned_modules_dir)
+ if self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver, kernel_package_name]):
+ bb.fatal("Kernel modules dependency generation failed")
"""
Create devfs:
@@ -389,613 +389,10 @@ class Rootfs(object, metaclass=ABCMeta):
self.image_rootfs, "-D", devtable])
-class RpmRootfs(Rootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '(unpacking of archive failed|Cannot find package'\
- '|exit 1|ERROR: |Error: |Error |ERROR '\
- '|Failed |Failed: |Failed$|Failed\(\d+\):)'
- self.manifest = RpmManifest(d, manifest_dir)
-
- self.pm = RpmPM(d,
- d.getVar('IMAGE_ROOTFS'),
- self.d.getVar('TARGET_VENDOR')
- )
-
- self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
- if self.inc_rpm_image_gen != "1":
- bb.utils.remove(self.image_rootfs, True)
- else:
- self.pm.recovery_packaging_data()
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
-
- self.pm.create_configs()
-
- '''
- While rpm incremental image generation is enabled, it will remove the
- unneeded pkgs by comparing the new install solution manifest and the
- old installed manifest.
- '''
- def _create_incremental(self, pkgs_initial_install):
- if self.inc_rpm_image_gen == "1":
-
- pkgs_to_install = list()
- for pkg_type in pkgs_initial_install:
- pkgs_to_install += pkgs_initial_install[pkg_type]
-
- installed_manifest = self.pm.load_old_install_solution()
- solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
-
- pkg_to_remove = list()
- for pkg in installed_manifest:
- if pkg not in solution_manifest:
- pkg_to_remove.append(pkg)
-
- self.pm.update()
-
- bb.note('incremental update -- upgrade packages in place ')
- self.pm.upgrade()
- if pkg_to_remove != []:
- bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
- self.pm.remove(pkg_to_remove)
-
- self.pm.autoremove()
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
- rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, rpm_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- if self.inc_rpm_image_gen == "1":
- self._create_incremental(pkgs_to_install)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- pkgs = []
- pkgs_attempt = []
- for pkg_type in pkgs_to_install:
- if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
- pkgs_attempt += pkgs_to_install[pkg_type]
- else:
- pkgs += pkgs_to_install[pkg_type]
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install(pkgs)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install(pkgs_attempt, True)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
-
- execute_pre_post_process(self.d, rpm_post_process_cmds)
-
- if self.inc_rpm_image_gen == "1":
- self.pm.backup_packaging_data()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
-
- @staticmethod
- def _depends_list():
- return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
- 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
-
- def _get_delayed_postinsts(self):
- postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
- if os.path.isdir(postinst_dir):
- files = os.listdir(postinst_dir)
- for f in files:
- bb.note('Delayed package scriptlet: %s' % f)
- return files
-
- return None
-
- def _save_postinsts(self):
- # this is just a stub. For RPM, the failed postinstalls are
- # already saved in /etc/rpm-postinsts
- pass
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _handle_intercept_failure(self, registered_pkgs):
- rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
- bb.utils.mkdirhier(rpm_postinsts_dir)
-
- # Save the package postinstalls in /etc/rpm-postinsts
- for pkg in registered_pkgs.split():
- self.pm.save_rpmpostinst(pkg)
-
- def _cleanup(self):
- self.pm._invoke_dnf(["clean", "all"])
-
-
-class DpkgOpkgRootfs(Rootfs):
- def __init__(self, d, progress_reporter=None, logcatcher=None):
- super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
-
- def _get_pkgs_postinsts(self, status_file):
- def _get_pkg_depends_list(pkg_depends):
- pkg_depends_list = []
- # filter version requirements like libc (>= 1.1)
- for dep in pkg_depends.split(', '):
- m_dep = re.match("^(.*) \(.*\)$", dep)
- if m_dep:
- dep = m_dep.group(1)
- pkg_depends_list.append(dep)
-
- return pkg_depends_list
-
- pkgs = {}
- pkg_name = ""
- pkg_status_match = False
- pkg_depends = ""
-
- with open(status_file) as status:
- data = status.read()
- status.close()
- for line in data.split('\n'):
- m_pkg = re.match("^Package: (.*)", line)
- m_status = re.match("^Status:.*unpacked", line)
- m_depends = re.match("^Depends: (.*)", line)
-
- if m_pkg is not None:
- if pkg_name and pkg_status_match:
- pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
-
- pkg_name = m_pkg.group(1)
- pkg_status_match = False
- pkg_depends = ""
- elif m_status is not None:
- pkg_status_match = True
- elif m_depends is not None:
- pkg_depends = m_depends.group(1)
-
- # remove package dependencies not in postinsts
- pkg_names = list(pkgs.keys())
- for pkg_name in pkg_names:
- deps = pkgs[pkg_name][:]
-
- for d in deps:
- if d not in pkg_names:
- pkgs[pkg_name].remove(d)
-
- return pkgs
-
- def _get_delayed_postinsts_common(self, status_file):
- def _dep_resolve(graph, node, resolved, seen):
- seen.append(node)
-
- for edge in graph[node]:
- if edge not in resolved:
- if edge in seen:
- raise RuntimeError("Packages %s and %s have " \
- "a circular dependency in postinsts scripts." \
- % (node, edge))
- _dep_resolve(graph, edge, resolved, seen)
-
- resolved.append(node)
-
- pkg_list = []
-
- pkgs = None
- if not self.d.getVar('PACKAGE_INSTALL').strip():
- bb.note("Building empty image")
- else:
- pkgs = self._get_pkgs_postinsts(status_file)
- if pkgs:
- root = "__packagegroup_postinst__"
- pkgs[root] = list(pkgs.keys())
- _dep_resolve(pkgs, root, pkg_list, [])
- pkg_list.remove(root)
-
- if len(pkg_list) == 0:
- return None
-
- return pkg_list
-
- def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
- num = 0
- for p in self._get_delayed_postinsts():
- bb.utils.mkdirhier(dst_postinst_dir)
-
- if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
- shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
- os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
-
- num += 1
-
-class DpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '^E:'
- self.log_check_expected_regexes = \
- [
- "^E: Unmet dependencies."
- ]
-
- bb.utils.remove(self.image_rootfs, True)
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
- self.manifest = DpkgManifest(d, manifest_dir)
- self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
- d.getVar('PACKAGE_ARCHS'),
- d.getVar('DPKG_ARCH'))
-
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
- deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
-
- alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
- bb.utils.mkdirhier(alt_dir)
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, deb_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
- # Don't support incremental, so skip that
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- self.pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- if self.progress_reporter:
- # Don't support attemptonly, so skip that
- self.progress_reporter.next_stage()
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/var/lib/dpkg'])
-
- self.pm.fix_broken_dependencies()
-
- self.pm.mark_packages("installed")
-
- self.pm.run_pre_post_installs()
-
- execute_pre_post_process(self.d, deb_post_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- @staticmethod
- def _depends_list():
- return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
-
- def _get_delayed_postinsts(self):
- status_file = self.image_rootfs + "/var/lib/dpkg/status"
- return self._get_delayed_postinsts_common(status_file)
-
- def _save_postinsts(self):
- dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
- src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
- return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
-
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- pass
-
-
-class OpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '(exit 1|Collected errors)'
-
- self.manifest = OpkgManifest(d, manifest_dir)
- self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
- self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
-
- self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
- if self._remove_old_rootfs():
- bb.utils.remove(self.image_rootfs, True)
- self.pm = OpkgPM(d,
- self.image_rootfs,
- self.opkg_conf,
- self.pkg_archs)
- else:
- self.pm = OpkgPM(d,
- self.image_rootfs,
- self.opkg_conf,
- self.pkg_archs)
- self.pm.recover_packaging_data()
-
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
-
- def _prelink_file(self, root_dir, filename):
- bb.note('prelink %s in %s' % (filename, root_dir))
- prelink_cfg = oe.path.join(root_dir,
- self.d.expand('${sysconfdir}/prelink.conf'))
- if not os.path.exists(prelink_cfg):
- shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
- prelink_cfg)
-
- cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
- self._exec_shell_cmd([cmd_prelink,
- '--root',
- root_dir,
- '-amR',
- '-N',
- '-c',
- self.d.expand('${sysconfdir}/prelink.conf')])
-
- '''
- Compare two files with the same key twice to see if they are equal.
- If they are not equal, it means they are duplicated and come from
- different packages.
- 1st: Comapre them directly;
- 2nd: While incremental image creation is enabled, one of the
- files could be probaly prelinked in the previous image
- creation and the file has been changed, so we need to
- prelink the other one and compare them.
- '''
- def _file_equal(self, key, f1, f2):
-
- # Both of them are not prelinked
- if filecmp.cmp(f1, f2):
- return True
-
- if self.image_rootfs not in f1:
- self._prelink_file(f1.replace(key, ''), f1)
-
- if self.image_rootfs not in f2:
- self._prelink_file(f2.replace(key, ''), f2)
-
- # Both of them are prelinked
- if filecmp.cmp(f1, f2):
- return True
-
- # Not equal
- return False
-
- """
- This function was reused from the old implementation.
- See commit: "image.bbclass: Added variables for multilib support." by
- Lianhao Lu.
- """
- def _multilib_sanity_test(self, dirs):
-
- allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
- if allow_replace is None:
- allow_replace = ""
-
- allow_rep = re.compile(re.sub("\|$", "", allow_replace))
- error_prompt = "Multilib check error:"
-
- files = {}
- for dir in dirs:
- for root, subfolders, subfiles in os.walk(dir):
- for file in subfiles:
- item = os.path.join(root, file)
- key = str(os.path.join("/", os.path.relpath(item, dir)))
-
- valid = True
- if key in files:
- #check whether the file is allow to replace
- if allow_rep.match(key):
- valid = True
- else:
- if os.path.exists(files[key]) and \
- os.path.exists(item) and \
- not self._file_equal(key, files[key], item):
- valid = False
- bb.fatal("%s duplicate files %s %s is not the same\n" %
- (error_prompt, item, files[key]))
-
- #pass the check, add to list
- if valid:
- files[key] = item
-
- def _multilib_test_install(self, pkgs):
- ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
- bb.utils.mkdirhier(ml_temp)
-
- dirs = [self.image_rootfs]
-
- for variant in self.d.getVar("MULTILIB_VARIANTS").split():
- ml_target_rootfs = os.path.join(ml_temp, variant)
-
- bb.utils.remove(ml_target_rootfs, True)
-
- ml_opkg_conf = os.path.join(ml_temp,
- variant + "-" + os.path.basename(self.opkg_conf))
-
- ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
-
- ml_pm.update()
- ml_pm.install(pkgs)
-
- dirs.append(ml_target_rootfs)
-
- self._multilib_sanity_test(dirs)
-
- '''
- While ipk incremental image generation is enabled, it will remove the
- unneeded pkgs by comparing the old full manifest in previous existing
- image and the new full manifest in the current image.
- '''
- def _remove_extra_packages(self, pkgs_initial_install):
- if self.inc_opkg_image_gen == "1":
- # Parse full manifest in previous existing image creation session
- old_full_manifest = self.manifest.parse_full_manifest()
-
- # Create full manifest for the current image session, the old one
- # will be replaced by the new one.
- self.manifest.create_full(self.pm)
-
- # Parse full manifest in current image creation session
- new_full_manifest = self.manifest.parse_full_manifest()
-
- pkg_to_remove = list()
- for pkg in old_full_manifest:
- if pkg not in new_full_manifest:
- pkg_to_remove.append(pkg)
-
- if pkg_to_remove != []:
- bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
- self.pm.remove(pkg_to_remove)
-
- '''
- Compare with previous existing image creation, if some conditions
- triggered, the previous old image should be removed.
- The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
- and BAD_RECOMMENDATIONS' has been changed.
- '''
- def _remove_old_rootfs(self):
- if self.inc_opkg_image_gen != "1":
- return True
-
- vars_list_file = self.d.expand('${T}/vars_list')
-
- old_vars_list = ""
- if os.path.exists(vars_list_file):
- old_vars_list = open(vars_list_file, 'r+').read()
-
- new_vars_list = '%s:%s:%s\n' % \
- ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
- (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
- (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
- open(vars_list_file, 'w+').write(new_vars_list)
-
- if old_vars_list != new_vars_list:
- return True
-
- return False
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
- opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
-
- # update PM index files, unless users provide their own feeds
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
- self.pm.write_index()
-
- execute_pre_post_process(self.d, opkg_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
- # Steps are a bit different in order, skip next
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- self.pm.handle_bad_recommendations()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- if self.inc_opkg_image_gen == "1":
- self._remove_extra_packages(pkgs_to_install)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- # For multilib, we perform a sanity test before final install
- # If sanity test fails, it will automatically do a bb.fatal()
- # and the installation will stop
- if pkg_type == Manifest.PKG_TYPE_MULTILIB:
- self._multilib_test_install(pkgs_to_install[pkg_type])
-
- self.pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
- opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
- self._setup_dbg_rootfs([opkg_dir])
-
- execute_pre_post_process(self.d, opkg_post_process_cmds)
-
- if self.inc_opkg_image_gen == "1":
- self.pm.backup_packaging_data()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- @staticmethod
- def _depends_list():
- return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
-
- def _get_delayed_postinsts(self):
- status_file = os.path.join(self.image_rootfs,
- self.d.getVar('OPKGLIBDIR').strip('/'),
- "opkg", "status")
- return self._get_delayed_postinsts_common(status_file)
-
- def _save_postinsts(self):
- dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
- src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
- return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
-
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- self.pm.remove_lists()
-
def get_class_for_type(imgtype):
- return {"rpm": RpmRootfs,
- "ipk": OpkgRootfs,
- "deb": DpkgRootfs}[imgtype]
+ import importlib
+ mod = importlib.import_module('oe.package_manager.' + imgtype + '.rootfs')
+ return mod.PkgRootfs
def variable_depends(d, manifest_dir=None):
img_type = d.getVar('IMAGE_PKGTYPE')
@@ -1006,28 +403,26 @@ def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None)
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- RpmRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
- elif img_type == "ipk":
- OpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
- elif img_type == "deb":
- DpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
+ cls = get_class_for_type(img_type)
+ cls(d, manifest_dir, progress_reporter, logcatcher).create()
os.environ.clear()
os.environ.update(env_bkp)
def image_list_installed_packages(d, rootfs_dir=None):
+ # Theres no rootfs for baremetal images
+ if bb.data.inherits_class('baremetal-image', d):
+ return ""
+
if not rootfs_dir:
rootfs_dir = d.getVar('IMAGE_ROOTFS')
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- return RpmPkgsList(d, rootfs_dir).list_pkgs()
- elif img_type == "ipk":
- return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET")).list_pkgs()
- elif img_type == "deb":
- return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+
+ import importlib
+ cls = importlib.import_module('oe.package_manager.' + img_type)
+ return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
if __name__ == "__main__":
"""
diff --git a/meta/lib/oe/rust.py b/meta/lib/oe/rust.py
new file mode 100644
index 0000000000..185553eeeb
--- /dev/null
+++ b/meta/lib/oe/rust.py
@@ -0,0 +1,13 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+# Handle mismatches between `uname -m`-style output and Rust's arch names
+def arch_to_rust_arch(arch):
+ if arch == "ppc64le":
+ return "powerpc64le"
+ if arch in ('riscv32', 'riscv64'):
+ return arch + 'gc'
+ return arch
diff --git a/meta/lib/oe/sbom.py b/meta/lib/oe/sbom.py
new file mode 100644
index 0000000000..fd4b6895d8
--- /dev/null
+++ b/meta/lib/oe/sbom.py
@@ -0,0 +1,120 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import collections
+
+DepRecipe = collections.namedtuple("DepRecipe", ("doc", "doc_sha1", "recipe"))
+DepSource = collections.namedtuple("DepSource", ("doc", "doc_sha1", "recipe", "file"))
+
+
+def get_recipe_spdxid(d):
+ return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN"))
+
+
+def get_download_spdxid(d, idx):
+ return "SPDXRef-Download-%s-%d" % (d.getVar("PN"), idx)
+
+
+def get_package_spdxid(pkg):
+ return "SPDXRef-Package-%s" % pkg
+
+
+def get_source_file_spdxid(d, idx):
+ return "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), idx)
+
+
+def get_packaged_file_spdxid(pkg, idx):
+ return "SPDXRef-PackagedFile-%s-%d" % (pkg, idx)
+
+
+def get_image_spdxid(img):
+ return "SPDXRef-Image-%s" % img
+
+
+def get_sdk_spdxid(sdk):
+ return "SPDXRef-SDK-%s" % sdk
+
+
+def _doc_path_by_namespace(spdx_deploy, arch, doc_namespace):
+ return spdx_deploy / "by-namespace" / arch / doc_namespace.replace("/", "_")
+
+
+def doc_find_by_namespace(spdx_deploy, search_arches, doc_namespace):
+ for pkgarch in search_arches:
+ p = _doc_path_by_namespace(spdx_deploy, pkgarch, doc_namespace)
+ if os.path.exists(p):
+ return p
+ return None
+
+
+def _doc_path_by_hashfn(spdx_deploy, arch, doc_name, hashfn):
+ return (
+ spdx_deploy / "by-hash" / arch / hashfn.split()[1] / (doc_name + ".spdx.json")
+ )
+
+
+def doc_find_by_hashfn(spdx_deploy, search_arches, doc_name, hashfn):
+ for pkgarch in search_arches:
+ p = _doc_path_by_hashfn(spdx_deploy, pkgarch, doc_name, hashfn)
+ if os.path.exists(p):
+ return p
+ return None
+
+
+def doc_path(spdx_deploy, doc_name, arch, subdir):
+ return spdx_deploy / arch / subdir / (doc_name + ".spdx.json")
+
+
+def write_doc(d, spdx_doc, arch, subdir, spdx_deploy=None, indent=None):
+ from pathlib import Path
+
+ if spdx_deploy is None:
+ spdx_deploy = Path(d.getVar("SPDXDEPLOY"))
+
+ dest = doc_path(spdx_deploy, spdx_doc.name, arch, subdir)
+ dest.parent.mkdir(exist_ok=True, parents=True)
+ with dest.open("wb") as f:
+ doc_sha1 = spdx_doc.to_json(f, sort_keys=True, indent=indent)
+
+ l = _doc_path_by_namespace(spdx_deploy, arch, spdx_doc.documentNamespace)
+ l.parent.mkdir(exist_ok=True, parents=True)
+ l.symlink_to(os.path.relpath(dest, l.parent))
+
+ l = _doc_path_by_hashfn(
+ spdx_deploy, arch, spdx_doc.name, d.getVar("BB_HASHFILENAME")
+ )
+ l.parent.mkdir(exist_ok=True, parents=True)
+ l.symlink_to(os.path.relpath(dest, l.parent))
+
+ return doc_sha1
+
+
+def read_doc(fn):
+ import hashlib
+ import oe.spdx
+ import io
+ import contextlib
+
+ @contextlib.contextmanager
+ def get_file():
+ if isinstance(fn, io.IOBase):
+ yield fn
+ else:
+ with fn.open("rb") as f:
+ yield f
+
+ with get_file() as f:
+ sha1 = hashlib.sha1()
+ while True:
+ chunk = f.read(4096)
+ if not chunk:
+ break
+ sha1.update(chunk)
+
+ f.seek(0)
+ doc = oe.spdx.SPDXDocument.from_json(f)
+
+ return (doc, sha1.hexdigest())
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
index 30e1fb5316..3dc3672210 100644
--- a/meta/lib/oe/sdk.py
+++ b/meta/lib/oe/sdk.py
@@ -1,13 +1,16 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.manifest import *
from oe.package_manager import *
import os
-import shutil
-import glob
import traceback
-
class Sdk(object, metaclass=ABCMeta):
def __init__(self, d, manifest_dir):
self.d = d
@@ -67,7 +70,7 @@ class Sdk(object, metaclass=ABCMeta):
#FIXME: using umbrella exc catching because bb.utils method raises it
except Exception as e:
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
- bb.error("unable to place %s in final SDK location" % sourcefile)
+ bb.fatal("unable to place %s in final SDK location" % sourcefile)
def mkdirhier(self, dirpath):
try:
@@ -84,266 +87,27 @@ class Sdk(object, metaclass=ABCMeta):
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
bb.warn("cannot remove SDK dir: %s" % path)
-class RpmSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
- super(RpmSdk, self).__init__(d, manifest_dir)
-
- self.target_manifest = RpmManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = RpmManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- target_providename = ['/bin/sh',
- '/bin/bash',
- '/usr/bin/env',
- '/usr/bin/perl',
- 'pkgconfig'
- ]
-
- self.target_pm = RpmPM(d,
- self.sdk_target_sysroot,
- self.d.getVar('TARGET_VENDOR'),
- 'target',
- target_providename
- )
-
- sdk_providename = ['/bin/sh',
- '/bin/bash',
- '/usr/bin/env',
- '/usr/bin/perl',
- 'pkgconfig',
- 'libGL.so()(64bit)',
- 'libGL.so'
- ]
-
- self.host_pm = RpmPM(d,
- self.sdk_host_sysroot,
- self.d.getVar('SDK_VENDOR'),
- 'host',
- sdk_providename,
- "SDK_PACKAGE_ARCHS",
- "SDK_OS"
- )
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- pm.create_configs()
- pm.write_index()
- pm.update()
-
- pkgs = []
- pkgs_attempt = []
- for pkg_type in pkgs_to_install:
- if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
- pkgs_attempt += pkgs_to_install[pkg_type]
+ def install_locales(self, pm):
+ linguas = self.d.getVar("SDKIMAGE_LINGUAS")
+ if linguas:
+ import fnmatch
+ # Install the binary locales
+ if linguas == "all":
+ pm.install_glob("nativesdk-glibc-binary-localedata-*.utf-8", sdk=True)
else:
- pkgs += pkgs_to_install[pkg_type]
-
- pm.install(pkgs)
-
- pm.install(pkgs_attempt, True)
-
- def _populate(self):
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
-
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- # Move host RPM library data
- native_rpm_state_dir = os.path.join(self.sdk_output,
- self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk').strip('/'),
- "lib",
- "rpm"
- )
- self.mkdirhier(native_rpm_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output,
- "var",
- "lib",
- "rpm",
- "*")):
- self.movefile(f, native_rpm_state_dir)
-
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
- # Move host sysconfig data
- native_sysconf_dir = os.path.join(self.sdk_output,
- self.sdk_native_path,
- self.d.getVar('sysconfdir',
- True).strip('/'),
- )
- self.mkdirhier(native_sysconf_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
- self.movefile(f, native_sysconf_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
- self.movefile(f, native_sysconf_dir)
- self.remove(os.path.join(self.sdk_output, "etc"), True)
-
-
-class OpkgSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
- super(OpkgSdk, self).__init__(d, manifest_dir)
-
- self.target_conf = self.d.getVar("IPKGCONF_TARGET")
- self.host_conf = self.d.getVar("IPKGCONF_SDK")
-
- self.target_manifest = OpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = OpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
- self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
-
- self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
- self.d.getVar("SDK_PACKAGE_ARCHS"))
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
- pm.write_index()
-
- pm.update()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- def _populate(self):
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
-
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
- host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
-
- self.mkdirhier(target_sysconfdir)
- shutil.copy(self.target_conf, target_sysconfdir)
- os.chmod(os.path.join(target_sysconfdir,
- os.path.basename(self.target_conf)), 0o644)
-
- self.mkdirhier(host_sysconfdir)
- shutil.copy(self.host_conf, host_sysconfdir)
- os.chmod(os.path.join(host_sysconfdir,
- os.path.basename(self.host_conf)), 0o644)
-
- native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk').strip('/'),
- "lib", "opkg")
- self.mkdirhier(native_opkg_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
- self.movefile(f, native_opkg_state_dir)
-
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
-
-class DpkgSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
- super(DpkgSdk, self).__init__(d, manifest_dir)
-
- self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
- self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
-
- self.target_manifest = DpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = DpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
- self.d.getVar("PACKAGE_ARCHS"),
- self.d.getVar("DPKG_ARCH"),
- self.target_conf_dir)
-
- self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
- self.d.getVar("SDK_PACKAGE_ARCHS"),
- self.d.getVar("DEB_SDK_ARCH"),
- self.host_conf_dir)
-
- def _copy_apt_dir_to(self, dst_dir):
- staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
-
- self.remove(dst_dir, True)
-
- shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- pm.write_index()
- pm.update()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- def _populate(self):
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
-
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
- "etc", "apt"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
- "var", "lib", "dpkg")
- self.mkdirhier(native_dpkg_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
- self.movefile(f, native_dpkg_state_dir)
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
+ pm.install(["nativesdk-glibc-binary-localedata-%s.utf-8" % \
+ lang for lang in linguas.split()])
+ # Generate a locale archive of them
+ target_arch = self.d.getVar('SDK_ARCH')
+ rootfs = oe.path.join(self.sdk_host_sysroot, self.sdk_native_path)
+ localedir = oe.path.join(rootfs, self.d.getVar("libdir_nativesdk"), "locale")
+ generate_locale_archive(self.d, rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ pkgs = fnmatch.filter(pm.list_installed(), "nativesdk-glibc-binary-localedata-*.utf-8")
+ pm.remove(pkgs)
+ else:
+ # No linguas so do nothing
+ pass
def sdk_list_installed_packages(d, target, rootfs_dir=None):
@@ -353,27 +117,22 @@ def sdk_list_installed_packages(d, target, rootfs_dir=None):
rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
+ if target is False:
+ ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK")
+ d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
+
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
- os_var = ["SDK_OS", None][target is True]
- return RpmPkgsList(d, rootfs_dir).list_pkgs()
- elif img_type == "ipk":
- conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
- return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var)).list_pkgs()
- elif img_type == "deb":
- return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+ import importlib
+ cls = importlib.import_module('oe.package_manager.' + img_type)
+ return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
def populate_sdk(d, manifest_dir=None):
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- RpmSdk(d, manifest_dir).populate()
- elif img_type == "ipk":
- OpkgSdk(d, manifest_dir).populate()
- elif img_type == "deb":
- DpkgSdk(d, manifest_dir).populate()
+ import importlib
+ cls = importlib.import_module('oe.package_manager.' + img_type + '.sdk')
+ cls.PkgSdk(d, manifest_dir).populate()
os.environ.clear()
os.environ.update(env_bkp)
diff --git a/meta/lib/oe/spdx.py b/meta/lib/oe/spdx.py
new file mode 100644
index 0000000000..7aaf2af5ed
--- /dev/null
+++ b/meta/lib/oe/spdx.py
@@ -0,0 +1,357 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+#
+# This library is intended to capture the JSON SPDX specification in a type
+# safe manner. It is not intended to encode any particular OE specific
+# behaviors, see the sbom.py for that.
+#
+# The documented SPDX spec document doesn't cover the JSON syntax for
+# particular configuration, which can make it hard to determine what the JSON
+# syntax should be. I've found it is actually much simpler to read the official
+# SPDX JSON schema which can be found here: https://github.com/spdx/spdx-spec
+# in schemas/spdx-schema.json
+#
+
+import hashlib
+import itertools
+import json
+
+SPDX_VERSION = "2.2"
+
+
+#
+# The following are the support classes that are used to implement SPDX object
+#
+
+class _Property(object):
+ """
+ A generic SPDX object property. The different types will derive from this
+ class
+ """
+
+ def __init__(self, *, default=None):
+ self.default = default
+
+ def setdefault(self, dest, name):
+ if self.default is not None:
+ dest.setdefault(name, self.default)
+
+
+class _String(_Property):
+ """
+ A scalar string property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return source
+
+
+class _Object(_Property):
+ """
+ A scalar SPDX object property of a SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(**kwargs)
+ self.cls = cls
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = self.cls()
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper)
+
+ def init(self, source):
+ return self.cls(**source)
+
+
+class _ListProperty(_Property):
+ """
+ A list of SPDX properties
+ """
+
+ def __init__(self, prop, **kwargs):
+ super().__init__(**kwargs)
+ self.prop = prop
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = []
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = list(value)
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return [self.prop.init(o) for o in source]
+
+
+class _StringList(_ListProperty):
+ """
+ A list of strings as a property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(_String(), **kwargs)
+
+
+class _ObjectList(_ListProperty):
+ """
+ A list of SPDX objects as a property for an SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(_Object(cls), **kwargs)
+
+
+class MetaSPDXObject(type):
+ """
+ A metaclass that allows properties (anything derived from a _Property
+ class) to be defined for a SPDX object
+ """
+ def __new__(mcls, name, bases, attrs):
+ attrs["_properties"] = {}
+
+ for key in attrs.keys():
+ if isinstance(attrs[key], _Property):
+ prop = attrs[key]
+ attrs["_properties"][key] = prop
+ prop.set_property(attrs, key)
+
+ return super().__new__(mcls, name, bases, attrs)
+
+
+class SPDXObject(metaclass=MetaSPDXObject):
+ """
+ The base SPDX object; all SPDX spec classes must derive from this class
+ """
+ def __init__(self, **d):
+ self._spdx = {}
+
+ for name, prop in self._properties.items():
+ prop.setdefault(self._spdx, name)
+ if name in d:
+ self._spdx[name] = prop.init(d[name])
+
+ def serializer(self):
+ return self._spdx
+
+ def __setattr__(self, name, value):
+ if name in self._properties or name == "_spdx":
+ super().__setattr__(name, value)
+ return
+ raise KeyError("%r is not a valid SPDX property" % name)
+
+#
+# These are the SPDX objects implemented from the spec. The *only* properties
+# that can be added to these objects are ones directly specified in the SPDX
+# spec, however you may add helper functions to make operations easier.
+#
+# Defaults should *only* be specified if the SPDX spec says there is a certain
+# required value for a field (e.g. dataLicense), or if the field is mandatory
+# and has some sane "this field is unknown" (e.g. "NOASSERTION")
+#
+
+class SPDXAnnotation(SPDXObject):
+ annotationDate = _String()
+ annotationType = _String()
+ annotator = _String()
+ comment = _String()
+
+class SPDXChecksum(SPDXObject):
+ algorithm = _String()
+ checksumValue = _String()
+
+
+class SPDXRelationship(SPDXObject):
+ spdxElementId = _String()
+ relatedSpdxElement = _String()
+ relationshipType = _String()
+ comment = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+
+
+class SPDXExternalReference(SPDXObject):
+ referenceCategory = _String()
+ referenceType = _String()
+ referenceLocator = _String()
+
+
+class SPDXPackageVerificationCode(SPDXObject):
+ packageVerificationCodeValue = _String()
+ packageVerificationCodeExcludedFiles = _StringList()
+
+
+class SPDXPackage(SPDXObject):
+ ALLOWED_CHECKSUMS = [
+ "SHA1",
+ "SHA224",
+ "SHA256",
+ "SHA384",
+ "SHA512",
+ "MD2",
+ "MD4",
+ "MD5",
+ "MD6",
+ ]
+
+ name = _String()
+ SPDXID = _String()
+ versionInfo = _String()
+ downloadLocation = _String(default="NOASSERTION")
+ supplier = _String(default="NOASSERTION")
+ homepage = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ licenseDeclared = _String(default="NOASSERTION")
+ summary = _String()
+ description = _String()
+ sourceInfo = _String()
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoFromFiles = _StringList(default=["NOASSERTION"])
+ externalRefs = _ObjectList(SPDXExternalReference)
+ packageVerificationCode = _Object(SPDXPackageVerificationCode)
+ hasFiles = _StringList()
+ packageFileName = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+ checksums = _ObjectList(SPDXChecksum)
+
+
+class SPDXFile(SPDXObject):
+ SPDXID = _String()
+ fileName = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoInFiles = _StringList(default=["NOASSERTION"])
+ checksums = _ObjectList(SPDXChecksum)
+ fileTypes = _StringList()
+
+
+class SPDXCreationInfo(SPDXObject):
+ created = _String()
+ licenseListVersion = _String()
+ comment = _String()
+ creators = _StringList()
+
+
+class SPDXExternalDocumentRef(SPDXObject):
+ externalDocumentId = _String()
+ spdxDocument = _String()
+ checksum = _Object(SPDXChecksum)
+
+
+class SPDXExtractedLicensingInfo(SPDXObject):
+ name = _String()
+ comment = _String()
+ licenseId = _String()
+ extractedText = _String()
+
+
+class SPDXDocument(SPDXObject):
+ spdxVersion = _String(default="SPDX-" + SPDX_VERSION)
+ dataLicense = _String(default="CC0-1.0")
+ SPDXID = _String(default="SPDXRef-DOCUMENT")
+ name = _String()
+ documentNamespace = _String()
+ creationInfo = _Object(SPDXCreationInfo)
+ packages = _ObjectList(SPDXPackage)
+ files = _ObjectList(SPDXFile)
+ relationships = _ObjectList(SPDXRelationship)
+ externalDocumentRefs = _ObjectList(SPDXExternalDocumentRef)
+ hasExtractedLicensingInfos = _ObjectList(SPDXExtractedLicensingInfo)
+
+ def __init__(self, **d):
+ super().__init__(**d)
+
+ def to_json(self, f, *, sort_keys=False, indent=None, separators=None):
+ class Encoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, SPDXObject):
+ return o.serializer()
+
+ return super().default(o)
+
+ sha1 = hashlib.sha1()
+ for chunk in Encoder(
+ sort_keys=sort_keys,
+ indent=indent,
+ separators=separators,
+ ).iterencode(self):
+ chunk = chunk.encode("utf-8")
+ f.write(chunk)
+ sha1.update(chunk)
+
+ return sha1.hexdigest()
+
+ @classmethod
+ def from_json(cls, f):
+ return cls(**json.load(f))
+
+ def add_relationship(self, _from, relationship, _to, *, comment=None, annotation=None):
+ if isinstance(_from, SPDXObject):
+ from_spdxid = _from.SPDXID
+ else:
+ from_spdxid = _from
+
+ if isinstance(_to, SPDXObject):
+ to_spdxid = _to.SPDXID
+ else:
+ to_spdxid = _to
+
+ r = SPDXRelationship(
+ spdxElementId=from_spdxid,
+ relatedSpdxElement=to_spdxid,
+ relationshipType=relationship,
+ )
+
+ if comment is not None:
+ r.comment = comment
+
+ if annotation is not None:
+ r.annotations.append(annotation)
+
+ self.relationships.append(r)
+
+ def find_by_spdxid(self, spdxid):
+ for o in itertools.chain(self.packages, self.files):
+ if o.SPDXID == spdxid:
+ return o
+ return None
+
+ def find_external_document_ref(self, namespace):
+ for r in self.externalDocumentRefs:
+ if r.spdxDocument == namespace:
+ return r
+ return None
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index 3a8778eae0..a46e5502ab 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -1,6 +1,14 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import bb.siggen
+import bb.runqueue
+import oe
+import netrc
-def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
+def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
# Return True if we should keep the dependency, False to drop it
def isNative(x):
return x.endswith("-native")
@@ -8,35 +16,43 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
return "-cross-" in x
def isNativeSDK(x):
return x.startswith("nativesdk-")
- def isKernel(fn):
- inherits = " ".join(dataCache.inherits[fn])
+ def isKernel(mc, fn):
+ inherits = " ".join(dataCaches[mc].inherits[fn])
return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
- def isPackageGroup(fn):
- inherits = " ".join(dataCache.inherits[fn])
+ def isPackageGroup(mc, fn):
+ inherits = " ".join(dataCaches[mc].inherits[fn])
return "/packagegroup.bbclass" in inherits
- def isAllArch(fn):
- inherits = " ".join(dataCache.inherits[fn])
+ def isAllArch(mc, fn):
+ inherits = " ".join(dataCaches[mc].inherits[fn])
return "/allarch.bbclass" in inherits
- def isImage(fn):
- return "/image.bbclass" in " ".join(dataCache.inherits[fn])
-
- # (Almost) always include our own inter-task dependencies.
- # The exception is the special do_kernel_configme->do_unpack_and_patch
- # dependency from archiver.bbclass.
- if recipename == depname:
- if task == "do_kernel_configme" and dep.endswith(".do_unpack_and_patch"):
- return False
- return True
+ def isImage(mc, fn):
+ return "/image.bbclass" in " ".join(dataCaches[mc].inherits[fn])
- # Quilt (patch application) changing isn't likely to affect anything
- excludelist = ['quilt-native', 'subversion-native', 'git-native', 'ccache-native']
- if depname in excludelist and recipename != depname:
+ depmc, _, deptaskname, depmcfn = bb.runqueue.split_tid_mcfn(dep)
+ mc, _ = bb.runqueue.split_mc(fn)
+
+ # We can skip the rm_work task signature to avoid running the task
+ # when we remove some tasks from the dependencie chain
+ # i.e INHERIT:remove = "create-spdx" will trigger the do_rm_work
+ if task == "do_rm_work":
return False
+ # (Almost) always include our own inter-task dependencies (unless it comes
+ # from a mcdepends). The exception is the special
+ # do_kernel_configme->do_unpack_and_patch dependency from archiver.bbclass.
+ if recipename == depname and depmc == mc:
+ if task == "do_kernel_configme" and deptaskname == "do_unpack_and_patch":
+ return False
+ return True
+
# Exclude well defined recipe->dependency
if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
return False
+ # Check for special wildcard
+ if "*->%s" % depname in siggen.saferecipedeps and recipename != depname:
+ return False
+
# Don't change native/cross/nativesdk recipe dependencies any further
if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
return True
@@ -44,22 +60,21 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# Only target packages beyond here
# allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
- if isPackageGroup(fn) and isAllArch(fn) and not isNative(depname):
- return False
+ if isPackageGroup(mc, fn) and isAllArch(mc, fn) and not isNative(depname):
+ return False
# Exclude well defined machine specific configurations which don't change ABI
- if depname in siggen.abisaferecipes and not isImage(fn):
+ if depname in siggen.abisaferecipes and not isImage(mc, fn):
return False
# Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
- # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum
+ # if we're just doing an RRECOMMENDS:xxx = "kernel-module-*", not least because the checksum
# is machine specific.
# Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
# and we reccomend a kernel-module, we exclude the dependency.
- depfn = dep.rsplit(".", 1)[0]
- if dataCache and isKernel(depfn) and not isKernel(fn):
- for pkg in dataCache.runrecs[fn]:
- if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
+ if dataCaches and isKernel(depmc, depmcfn) and not isKernel(mc, fn):
+ for pkg in dataCaches[mc].runrecs[fn]:
+ if " ".join(dataCaches[mc].runrecs[fn][pkg]).find("kernel-module-") != -1:
return False
# Default to keep dependencies
@@ -78,17 +93,9 @@ def sstate_lockedsigs(d):
sigs[pn][task] = [h, siggen_lockedsigs_var]
return sigs
-class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
- name = "OEBasic"
- def init_rundepcheck(self, data):
- self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
- self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
- pass
- def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
- return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+class SignatureGeneratorOEBasicHashMixIn(object):
+ supports_multiconfig_datacaches = True
-class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
- name = "OEBasicHash"
def init_rundepcheck(self, data):
self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
@@ -98,9 +105,12 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
self.lockedhashfn = {}
self.machine = data.getVar("MACHINE")
self.mismatch_msgs = []
+ self.mismatch_number = 0
+ self.lockedsigs_msgs = ""
self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
"").split()
self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
+ self._internal = False
pass
def tasks_resolved(self, virtmap, virtpnmap, dataCache):
@@ -122,125 +132,170 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
newsafedeps.append(a1 + "->" + a2)
self.saferecipedeps = newsafedeps
- def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
- return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+ def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None):
+ return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches)
def get_taskdata(self):
- data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata()
- return (data, self.lockedpnmap, self.lockedhashfn)
+ return (self.lockedpnmap, self.lockedhashfn, self.lockedhashes) + super().get_taskdata()
def set_taskdata(self, data):
- coredata, self.lockedpnmap, self.lockedhashfn = data
- super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata)
+ self.lockedpnmap, self.lockedhashfn, self.lockedhashes = data[:3]
+ super().set_taskdata(data[3:])
def dump_sigs(self, dataCache, options):
- sigfile = os.getcwd() + "/locked-sigs.inc"
- bb.plain("Writing locked sigs to %s" % sigfile)
- self.dump_lockedsigs(sigfile)
+ if 'lockedsigs' in options:
+ sigfile = os.getcwd() + "/locked-sigs.inc"
+ bb.plain("Writing locked sigs to %s" % sigfile)
+ self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
- def get_taskhash(self, fn, task, deps, dataCache):
- h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache)
- recipename = dataCache.pkg_fn[fn]
+ def get_taskhash(self, tid, deps, dataCaches):
+ if tid in self.lockedhashes:
+ if self.lockedhashes[tid]:
+ return self.lockedhashes[tid]
+ else:
+ return super().get_taskhash(tid, deps, dataCaches)
+
+ h = super().get_taskhash(tid, deps, dataCaches)
+
+ (mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
+
+ recipename = dataCaches[mc].pkg_fn[fn]
self.lockedpnmap[fn] = recipename
- self.lockedhashfn[fn] = dataCache.hashfn[fn]
+ self.lockedhashfn[fn] = dataCaches[mc].hashfn[fn]
unlocked = False
if recipename in self.unlockedrecipes:
unlocked = True
else:
def recipename_from_dep(dep):
- # The dep entry will look something like
- # /path/path/recipename.bb.task, virtual:native:/p/foo.bb.task,
- # ...
- fn = dep.rsplit('.', 1)[0]
- return dataCache.pkg_fn[fn]
+ (depmc, _, _, depfn) = bb.runqueue.split_tid_mcfn(dep)
+ return dataCaches[depmc].pkg_fn[depfn]
# If any unlocked recipe is in the direct dependencies then the
# current recipe should be unlocked as well.
- depnames = [ recipename_from_dep(x) for x in deps ]
+ depnames = [ recipename_from_dep(x) for x in deps if mc == bb.runqueue.mc_from_tid(x)]
if any(x in y for y in depnames for x in self.unlockedrecipes):
self.unlockedrecipes[recipename] = ''
unlocked = True
if not unlocked and recipename in self.lockedsigs:
if task in self.lockedsigs[recipename]:
- k = fn + "." + task
h_locked = self.lockedsigs[recipename][task][0]
var = self.lockedsigs[recipename][task][1]
- self.lockedhashes[k] = h_locked
- self.taskhash[k] = h_locked
+ self.lockedhashes[tid] = h_locked
+ self._internal = True
+ unihash = self.get_unihash(tid)
+ self._internal = False
#bb.warn("Using %s %s %s" % (recipename, task, h))
- if h != h_locked:
+ if h != h_locked and h_locked != unihash:
+ self.mismatch_number += 1
self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
% (recipename, task, h, h_locked, var))
return h_locked
+
+ self.lockedhashes[tid] = False
#bb.warn("%s %s %s" % (recipename, task, h))
return h
+ def get_stampfile_hash(self, tid):
+ if tid in self.lockedhashes and self.lockedhashes[tid]:
+ return self.lockedhashes[tid]
+ return super().get_stampfile_hash(tid)
+
+ def get_cached_unihash(self, tid):
+ if tid in self.lockedhashes and self.lockedhashes[tid] and not self._internal:
+ return self.lockedhashes[tid]
+ return super().get_cached_unihash(tid)
+
def dump_sigtask(self, fn, task, stampbase, runtime):
- k = fn + "." + task
- if k in self.lockedhashes:
+ tid = fn + ":" + task
+ if tid in self.lockedhashes and self.lockedhashes[tid]:
return
super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
def dump_lockedsigs(self, sigfile, taskfilter=None):
types = {}
- for k in self.runtaskdeps:
+ for tid in self.runtaskdeps:
+ # Bitbake changed this to a tuple in newer versions
+ if isinstance(tid, tuple):
+ tid = tid[1]
if taskfilter:
- if not k in taskfilter:
+ if not tid in taskfilter:
continue
- fn = k.rsplit(".",1)[0]
+ fn = bb.runqueue.fn_from_tid(tid)
t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
t = 't-' + t.replace('_', '-')
if t not in types:
types[t] = []
- types[t].append(k)
+ types[t].append(tid)
with open(sigfile, "w") as f:
l = sorted(types)
for t in l:
f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
types[t].sort()
- sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]])
- for k in sortedk:
- fn = k.rsplit(".",1)[0]
- task = k.rsplit(".",1)[1]
- if k not in self.taskhash:
+ sortedtid = sorted(types[t], key=lambda tid: self.lockedpnmap[bb.runqueue.fn_from_tid(tid)])
+ for tid in sortedtid:
+ (_, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
+ if tid not in self.taskhash:
continue
- f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n")
+ f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n")
f.write(' "\n')
- f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l)))
+ f.write('SIGGEN_LOCKEDSIGS_TYPES:%s = "%s"' % (self.machine, " ".join(l)))
+
+ def dump_siglist(self, sigfile, path_prefix_strip=None):
+ def strip_fn(fn):
+ nonlocal path_prefix_strip
+ if not path_prefix_strip:
+ return fn
+
+ fn_exp = fn.split(":")
+ if fn_exp[-1].startswith(path_prefix_strip):
+ fn_exp[-1] = fn_exp[-1][len(path_prefix_strip):]
+
+ return ":".join(fn_exp)
- def dump_siglist(self, sigfile):
with open(sigfile, "w") as f:
tasks = []
for taskitem in self.taskhash:
- (fn, task) = taskitem.rsplit(".", 1)
+ (fn, task) = taskitem.rsplit(":", 1)
pn = self.lockedpnmap[fn]
- tasks.append((pn, task, fn, self.taskhash[taskitem]))
+ tasks.append((pn, task, strip_fn(fn), self.taskhash[taskitem]))
for (pn, task, fn, taskhash) in sorted(tasks):
- f.write('%s.%s %s %s\n' % (pn, task, fn, taskhash))
+ f.write('%s:%s %s %s\n' % (pn, task, fn, taskhash))
- def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d):
+ def checkhashes(self, sq_data, missed, found, d):
warn_msgs = []
error_msgs = []
sstate_missing_msgs = []
+ info_msgs = None
+
+ if self.lockedsigs:
+ if len(self.lockedsigs) > 10:
+ self.lockedsigs_msgs = "There are %s recipes with locked tasks (%s task(s) have non matching signature)" % (len(self.lockedsigs), self.mismatch_number)
+ else:
+ self.lockedsigs_msgs = "The following recipes have locked tasks:"
+ for pn in self.lockedsigs:
+ self.lockedsigs_msgs += " %s" % (pn)
- for task in range(len(sq_fn)):
- if task not in ret:
+ for tid in sq_data['hash']:
+ if tid not in found:
for pn in self.lockedsigs:
- if sq_hash[task] in iter(self.lockedsigs[pn].values()):
- if sq_task[task] == 'do_shared_workdir':
+ taskname = bb.runqueue.taskname_from_tid(tid)
+ if sq_data['hash'][tid] in iter(self.lockedsigs[pn].values()):
+ if taskname == 'do_shared_workdir':
continue
sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
- % (pn, sq_task[task], sq_hash[task]))
+ % (pn, taskname, sq_data['hash'][tid]))
checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
- if checklevel == 'warn':
+ if checklevel == 'info':
+ info_msgs = self.lockedsigs_msgs
+ if checklevel == 'warn' or checklevel == 'info':
warn_msgs += self.mismatch_msgs
elif checklevel == 'error':
error_msgs += self.mismatch_msgs
@@ -251,15 +306,44 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
elif checklevel == 'error':
error_msgs += sstate_missing_msgs
+ if info_msgs:
+ bb.note(info_msgs)
if warn_msgs:
bb.warn("\n".join(warn_msgs))
if error_msgs:
bb.fatal("\n".join(error_msgs))
+class SignatureGeneratorOEBasicHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEBasicHash"
+
+class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorUniHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEEquivHash"
+
+ def init_rundepcheck(self, data):
+ super().init_rundepcheck(data)
+ self.server = data.getVar('BB_HASHSERVE')
+ if not self.server:
+ bb.fatal("OEEquivHash requires BB_HASHSERVE to be set")
+ self.method = data.getVar('SSTATE_HASHEQUIV_METHOD')
+ if not self.method:
+ bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set")
+ self.max_parallel = int(data.getVar('BB_HASHSERVE_MAX_PARALLEL') or 1)
+ self.username = data.getVar("BB_HASHSERVE_USERNAME")
+ self.password = data.getVar("BB_HASHSERVE_PASSWORD")
+ if not self.username or not self.password:
+ try:
+ n = netrc.netrc()
+ auth = n.authenticators(self.server)
+ if auth is not None:
+ self.username, _, self.password = auth
+ except FileNotFoundError:
+ pass
+ except netrc.NetrcParseError as e:
+ bb.warn("Error parsing %s:%s: %s" % (e.filename, str(e.lineno), e.msg))
# Insert these classes into siggen's namespace so it can see and select them
-bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
+bb.siggen.SignatureGeneratorOEEquivHash = SignatureGeneratorOEEquivHash
def find_siginfo(pn, taskname, taskhashlist, d):
@@ -271,14 +355,14 @@ def find_siginfo(pn, taskname, taskhashlist, d):
if not taskname:
# We have to derive pn and taskname
key = pn
- splitit = key.split('.bb.')
- taskname = splitit[1]
- pn = os.path.basename(splitit[0]).split('_')[0]
- if key.startswith('virtual:native:'):
- pn = pn + '-native'
+ if key.startswith("mc:"):
+ # mc:<mc>:<pn>:<task>
+ _, _, pn, taskname = key.split(':', 3)
+ else:
+ # <pn>:<task>
+ pn, taskname = key.split(':', 1)
hashfiles = {}
- filedates = {}
def get_hashval(siginfo):
if siginfo.endswith('.siginfo'):
@@ -286,6 +370,9 @@ def find_siginfo(pn, taskname, taskhashlist, d):
else:
return siginfo.rpartition('.')[2]
+ def get_time(fullpath):
+ return os.stat(fullpath).st_mtime
+
# First search in stamps dir
localdata = d.createCopy()
localdata.setVar('MULTIMACH_TARGET_SYS', '*')
@@ -301,61 +388,55 @@ def find_siginfo(pn, taskname, taskhashlist, d):
filespec = '%s.%s.sigdata.*' % (stamp, taskname)
foundall = False
import glob
+ bb.debug(1, "Calling glob.glob on {}".format(filespec))
for fullpath in glob.glob(filespec):
match = False
if taskhashlist:
for taskhash in taskhashlist:
if fullpath.endswith('.%s' % taskhash):
- hashfiles[taskhash] = fullpath
+ hashfiles[taskhash] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)}
if len(hashfiles) == len(taskhashlist):
foundall = True
break
else:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except OSError:
- continue
hashval = get_hashval(fullpath)
- hashfiles[hashval] = fullpath
+ hashfiles[hashval] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)}
- if not taskhashlist or (len(filedates) < 2 and not foundall):
+ if not taskhashlist or (len(hashfiles) < 2 and not foundall):
# That didn't work, look in sstate-cache
- hashes = taskhashlist or ['?' * 32]
+ hashes = taskhashlist or ['?' * 64]
localdata = bb.data.createCopy(d)
for hashval in hashes:
localdata.setVar('PACKAGE_ARCH', '*')
localdata.setVar('TARGET_VENDOR', '*')
localdata.setVar('TARGET_OS', '*')
localdata.setVar('PN', pn)
+ # gcc-source is a special case, same as with local stamps above
+ if pn.startswith("gcc-source"):
+ localdata.setVar('PN', "gcc")
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('BB_TASKHASH', hashval)
+ localdata.setVar('SSTATE_CURRTASK', taskname[3:])
swspec = localdata.getVar('SSTATE_SWSPEC')
if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
- sstatename = taskname[3:]
- filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG'), sstatename)
+ filespec = '%s.siginfo' % localdata.getVar('SSTATE_PKG')
+ bb.debug(1, "Calling glob.glob on {}".format(filespec))
matchedfiles = glob.glob(filespec)
for fullpath in matchedfiles:
actual_hashval = get_hashval(fullpath)
if actual_hashval in hashfiles:
continue
- hashfiles[hashval] = fullpath
- if not taskhashlist:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except:
- continue
+ hashfiles[actual_hashval] = {'path':fullpath, 'sstate':True, 'time':get_time(fullpath)}
- if taskhashlist:
- return hashfiles
- else:
- return filedates
+ return hashfiles
bb.siggen.find_siginfo = find_siginfo
+bb.siggen.find_siginfo_version = 2
def sstate_get_manifest_filename(task, d):
@@ -368,3 +449,238 @@ def sstate_get_manifest_filename(task, d):
if extrainf:
d2.setVar("SSTATE_MANMACH", extrainf)
return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
+
+def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
+ d2 = d
+ variant = ''
+ curr_variant = ''
+ if d.getVar("BBEXTENDCURR") == "multilib":
+ curr_variant = d.getVar("BBEXTENDVARIANT")
+ if "virtclass-multilib" not in d.getVar("OVERRIDES"):
+ curr_variant = "invalid"
+ if taskdata2.startswith("virtual:multilib"):
+ variant = taskdata2.split(":")[2]
+ if curr_variant != variant:
+ if variant not in multilibcache:
+ multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
+ d2 = multilibcache[variant]
+
+ if taskdata.endswith("-native"):
+ pkgarchs = ["${BUILD_ARCH}", "${BUILD_ARCH}_${ORIGNATIVELSBSTRING}"]
+ elif taskdata.startswith("nativesdk-"):
+ pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
+ elif "-cross-canadian" in taskdata:
+ pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
+ elif "-cross-" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}"]
+ elif "-crosssdk" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
+ else:
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+ pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
+
+ searched_manifests = []
+
+ for pkgarch in pkgarchs:
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
+ if os.path.exists(manifest):
+ return manifest, d2
+ searched_manifests.append(manifest)
+ bb.fatal("The sstate manifest for task '%s:%s' (multilib variant '%s') could not be found.\nThe pkgarchs considered were: %s.\nBut none of these manifests exists:\n %s"
+ % (taskdata, taskname, variant, d2.expand(", ".join(pkgarchs)),"\n ".join(searched_manifests)))
+ return None, d2
+
+def OEOuthashBasic(path, sigfile, task, d):
+ """
+ Basic output hash function
+
+ Calculates the output hash of a task by hashing all output file metadata,
+ and file contents.
+ """
+ import hashlib
+ import stat
+ import pwd
+ import grp
+ import re
+ import fnmatch
+
+ def update_hash(s):
+ s = s.encode('utf-8')
+ h.update(s)
+ if sigfile:
+ sigfile.write(s)
+
+ h = hashlib.sha256()
+ prev_dir = os.getcwd()
+ corebase = d.getVar("COREBASE")
+ tmpdir = d.getVar("TMPDIR")
+ include_owners = os.environ.get('PSEUDO_DISABLED') == '0'
+ if "package_write_" in task or task == "package_qa":
+ include_owners = False
+ include_timestamps = False
+ include_root = True
+ if task == "package":
+ include_timestamps = True
+ include_root = False
+ hash_version = d.getVar('HASHEQUIV_HASH_VERSION')
+ extra_sigdata = d.getVar("HASHEQUIV_EXTRA_SIGDATA")
+
+ filemaps = {}
+ for m in (d.getVar('SSTATE_HASHEQUIV_FILEMAP') or '').split():
+ entry = m.split(":")
+ if len(entry) != 3 or entry[0] != task:
+ continue
+ filemaps.setdefault(entry[1], [])
+ filemaps[entry[1]].append(entry[2])
+
+ try:
+ os.chdir(path)
+ basepath = os.path.normpath(path)
+
+ update_hash("OEOuthashBasic\n")
+ if hash_version:
+ update_hash(hash_version + "\n")
+
+ if extra_sigdata:
+ update_hash(extra_sigdata + "\n")
+
+ # It is only currently useful to get equivalent hashes for things that
+ # can be restored from sstate. Since the sstate object is named using
+ # SSTATE_PKGSPEC and the task name, those should be included in the
+ # output hash calculation.
+ update_hash("SSTATE_PKGSPEC=%s\n" % d.getVar('SSTATE_PKGSPEC'))
+ update_hash("task=%s\n" % task)
+
+ for root, dirs, files in os.walk('.', topdown=True):
+ # Sort directories to ensure consistent ordering when recursing
+ dirs.sort()
+ files.sort()
+
+ def process(path):
+ s = os.lstat(path)
+
+ if stat.S_ISDIR(s.st_mode):
+ update_hash('d')
+ elif stat.S_ISCHR(s.st_mode):
+ update_hash('c')
+ elif stat.S_ISBLK(s.st_mode):
+ update_hash('b')
+ elif stat.S_ISSOCK(s.st_mode):
+ update_hash('s')
+ elif stat.S_ISLNK(s.st_mode):
+ update_hash('l')
+ elif stat.S_ISFIFO(s.st_mode):
+ update_hash('p')
+ else:
+ update_hash('-')
+
+ def add_perm(mask, on, off='-'):
+ if mask & s.st_mode:
+ update_hash(on)
+ else:
+ update_hash(off)
+
+ add_perm(stat.S_IRUSR, 'r')
+ add_perm(stat.S_IWUSR, 'w')
+ if stat.S_ISUID & s.st_mode:
+ add_perm(stat.S_IXUSR, 's', 'S')
+ else:
+ add_perm(stat.S_IXUSR, 'x')
+
+ if include_owners:
+ # Group/other permissions are only relevant in pseudo context
+ add_perm(stat.S_IRGRP, 'r')
+ add_perm(stat.S_IWGRP, 'w')
+ if stat.S_ISGID & s.st_mode:
+ add_perm(stat.S_IXGRP, 's', 'S')
+ else:
+ add_perm(stat.S_IXGRP, 'x')
+
+ add_perm(stat.S_IROTH, 'r')
+ add_perm(stat.S_IWOTH, 'w')
+ if stat.S_ISVTX & s.st_mode:
+ update_hash('t')
+ else:
+ add_perm(stat.S_IXOTH, 'x')
+
+ try:
+ update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
+ update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
+ except KeyError as e:
+ msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match "
+ "any user/group on target. This may be due to host contamination." %
+ (e, os.path.abspath(path), s.st_uid, s.st_gid))
+ raise Exception(msg).with_traceback(e.__traceback__)
+
+ if include_timestamps:
+ update_hash(" %10d" % s.st_mtime)
+
+ update_hash(" ")
+ if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
+ update_hash("%9s" % ("%d.%d" % (os.major(s.st_rdev), os.minor(s.st_rdev))))
+ else:
+ update_hash(" " * 9)
+
+ filterfile = False
+ for entry in filemaps:
+ if fnmatch.fnmatch(path, entry):
+ filterfile = True
+
+ update_hash(" ")
+ if stat.S_ISREG(s.st_mode) and not filterfile:
+ update_hash("%10d" % s.st_size)
+ else:
+ update_hash(" " * 10)
+
+ update_hash(" ")
+ fh = hashlib.sha256()
+ if stat.S_ISREG(s.st_mode):
+ # Hash file contents
+ if filterfile:
+ # Need to ignore paths in crossscripts and postinst-useradd files.
+ with open(path, 'rb') as d:
+ chunk = d.read()
+ chunk = chunk.replace(bytes(basepath, encoding='utf8'), b'')
+ for entry in filemaps:
+ if not fnmatch.fnmatch(path, entry):
+ continue
+ for r in filemaps[entry]:
+ if r.startswith("regex-"):
+ chunk = re.sub(bytes(r[6:], encoding='utf8'), b'', chunk)
+ else:
+ chunk = chunk.replace(bytes(r, encoding='utf8'), b'')
+ fh.update(chunk)
+ else:
+ with open(path, 'rb') as d:
+ for chunk in iter(lambda: d.read(4096), b""):
+ fh.update(chunk)
+ update_hash(fh.hexdigest())
+ else:
+ update_hash(" " * len(fh.hexdigest()))
+
+ update_hash(" %s" % path)
+
+ if stat.S_ISLNK(s.st_mode):
+ update_hash(" -> %s" % os.readlink(path))
+
+ update_hash("\n")
+
+ # Process this directory and all its child files
+ if include_root or root != ".":
+ process(root)
+ for f in files:
+ if f == 'fixmepath':
+ continue
+ process(os.path.join(root, f))
+
+ for dir in dirs:
+ if os.path.islink(os.path.join(root, dir)):
+ process(os.path.join(root, dir))
+ finally:
+ os.chdir(prev_dir)
+
+ return h.hexdigest()
+
+
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index 94afe394ed..4412bc14c1 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -1,8 +1,12 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import logging
import oe.classutils
import shlex
from bb.process import Popen, ExecutionError
-from distutils.version import LooseVersion
logger = logging.getLogger('BitBake.OE.Terminal')
@@ -28,9 +32,10 @@ class Registry(oe.classutils.ClassRegistry):
class Terminal(Popen, metaclass=Registry):
def __init__(self, sh_cmd, title=None, env=None, d=None):
+ from subprocess import STDOUT
fmt_sh_cmd = self.format_command(sh_cmd, title)
try:
- Popen.__init__(self, fmt_sh_cmd, env=env)
+ Popen.__init__(self, fmt_sh_cmd, env=env, stderr=STDOUT)
except OSError as exc:
import errno
if exc.errno == errno.ENOENT:
@@ -39,7 +44,7 @@ class Terminal(Popen, metaclass=Registry):
raise
def format_command(self, sh_cmd, title):
- fmt = {'title': title or 'Terminal', 'command': sh_cmd}
+ fmt = {'title': title or 'Terminal', 'command': sh_cmd, 'cwd': os.getcwd() }
if isinstance(self.command, str):
return shlex.split(self.command.format(**fmt))
else:
@@ -52,7 +57,7 @@ class XTerminal(Terminal):
raise UnsupportedTerminal(self.name)
class Gnome(XTerminal):
- command = 'gnome-terminal -t "{title}" -x {command}'
+ command = 'gnome-terminal -t "{title}" -- {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -83,10 +88,10 @@ class Konsole(XTerminal):
def __init__(self, sh_cmd, title=None, env=None, d=None):
# Check version
vernum = check_terminal_version("konsole")
- if vernum and LooseVersion(vernum) < '2.0.0':
+ if vernum and bb.utils.vercmp_string_op(vernum, "2.0.0", "<"):
# Konsole from KDE 3.x
self.command = 'konsole -T "{title}" -e {command}'
- elif vernum and LooseVersion(vernum) < '16.08.1':
+ elif vernum and bb.utils.vercmp_string_op(vernum, "16.08.1", "<"):
# Konsole pre 16.08.01 Has nofork
self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
XTerminal.__init__(self, sh_cmd, title, env, d)
@@ -99,6 +104,10 @@ class Rxvt(XTerminal):
command = 'rxvt -T "{title}" -e {command}'
priority = 1
+class URxvt(XTerminal):
+ command = 'urxvt -T "{title}" -e {command}'
+ priority = 1
+
class Screen(Terminal):
command = 'screen -D -m -t "{title}" -S devshell {command}'
@@ -112,12 +121,12 @@ class Screen(Terminal):
bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
0.5, 10), d)
else:
- logger.warn(msg)
+ logger.warning(msg)
class TmuxRunning(Terminal):
"""Open a new pane in the current running tmux window"""
name = 'tmux-running'
- command = 'tmux split-window "{command}"'
+ command = 'tmux split-window -c "{cwd}" "{command}"'
priority = 2.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -135,7 +144,7 @@ class TmuxRunning(Terminal):
class TmuxNewWindow(Terminal):
"""Open a new window in the current running tmux session"""
name = 'tmux-new-window'
- command = 'tmux new-window -n "{title}" "{command}"'
+ command = 'tmux new-window -c "{cwd}" -n "{title}" "{command}"'
priority = 2.70
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -149,7 +158,7 @@ class TmuxNewWindow(Terminal):
class Tmux(Terminal):
"""Start a new tmux session and window"""
- command = 'tmux new -d -s devshell -n devshell "{command}"'
+ command = 'tmux new -c "{cwd}" -d -s devshell -n devshell "{command}"'
priority = 0.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -160,7 +169,12 @@ class Tmux(Terminal):
# devshells, if it's already there, add a new window to it.
window_name = 'devshell-%i' % os.getpid()
- self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name)
+ self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'
+ if not check_tmux_version('1.9'):
+ # `tmux new-session -c` was added in 1.9;
+ # older versions fail with that flag
+ self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'
+ self.command = self.command.format(window_name)
Terminal.__init__(self, sh_cmd, title, env, d)
attach_cmd = 'tmux att -t {0}'.format(window_name)
@@ -168,7 +182,7 @@ class Tmux(Terminal):
if d:
bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
else:
- logger.warn(msg)
+ logger.warning(msg)
class Custom(Terminal):
command = 'false' # This is a placeholder
@@ -180,9 +194,9 @@ class Custom(Terminal):
if not '{command}' in self.command:
self.command += ' {command}'
Terminal.__init__(self, sh_cmd, title, env, d)
- logger.warn('Custom terminal was started.')
+ logger.warning('Custom terminal was started.')
else:
- logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
+ logger.debug('No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
@@ -204,13 +218,16 @@ def spawn_preferred(sh_cmd, title=None, env=None, d=None):
spawn(terminal.name, sh_cmd, title, env, d)
break
except UnsupportedTerminal:
- continue
+ pass
+ except:
+ bb.warn("Terminal %s is supported but did not start" % (terminal.name))
+ # when we've run out of options
else:
raise NoSupportedTerminals(get_cmd_list())
def spawn(name, sh_cmd, title=None, env=None, d=None):
"""Spawn the specified terminal, by name"""
- logger.debug(1, 'Attempting to spawn terminal "%s"', name)
+ logger.debug('Attempting to spawn terminal "%s"', name)
try:
terminal = Registry.registry[name]
except KeyError:
@@ -247,13 +264,18 @@ def spawn(name, sh_cmd, title=None, env=None, d=None):
except OSError:
return
+def check_tmux_version(desired):
+ vernum = check_terminal_version("tmux")
+ if vernum and bb.utils.vercmp_string_op(vernum, desired, "<"):
+ return False
+ return vernum
+
def check_tmux_pane_size(tmux):
import subprocess as sub
# On older tmux versions (<1.9), return false. The reason
# is that there is no easy way to get the height of the active panel
# on current window without nested formats (available from version 1.9)
- vernum = check_terminal_version("tmux")
- if vernum and LooseVersion(vernum) < '1.9':
+ if not check_tmux_version('1.9'):
return False
try:
p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
@@ -296,6 +318,8 @@ def check_terminal_version(terminalName):
vernum = ver.split(' ')[-1]
if ver.startswith('tmux'):
vernum = ver.split()[-1]
+ if ver.startswith('tmux next-'):
+ vernum = ver.split()[-1][5:]
return vernum
def distro_name():
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py
index 4ae58acfac..b929afb1f3 100644
--- a/meta/lib/oe/types.py
+++ b/meta/lib/oe/types.py
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import errno
import re
import os
@@ -103,8 +109,13 @@ def boolean(value):
"""OpenEmbedded 'boolean' type
Valid values for true: 'yes', 'y', 'true', 't', '1'
- Valid values for false: 'no', 'n', 'false', 'f', '0'
+ Valid values for false: 'no', 'n', 'false', 'f', '0', None
"""
+ if value is None:
+ return False
+
+ if isinstance(value, bool):
+ return value
if not isinstance(value, str):
raise TypeError("boolean accepts a string, not '%s'" % type(value))
@@ -145,9 +156,33 @@ def path(value, relativeto='', normalize='true', mustexist='false'):
if boolean(mustexist):
try:
- open(value, 'r')
+ with open(value, 'r'):
+ pass
except IOError as exc:
if exc.errno == errno.ENOENT:
raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
return value
+
+def is_x86(arch):
+ """
+ Check whether arch is x86 or x86_64
+ """
+ if arch.startswith('x86_') or re.match('i.*86', arch):
+ return True
+ else:
+ return False
+
+def qemu_use_kvm(kvm, target_arch):
+ """
+ Enable kvm if target_arch == build_arch or both of them are x86 archs.
+ """
+
+ use_kvm = False
+ if kvm and boolean(kvm):
+ build_arch = os.uname()[4]
+ if is_x86(build_arch) and is_x86(target_arch):
+ use_kvm = True
+ elif build_arch == target_arch:
+ use_kvm = True
+ return use_kvm
diff --git a/meta/lib/oe/useradd.py b/meta/lib/oe/useradd.py
index 179ac76b5e..54aa86feb5 100644
--- a/meta/lib/oe/useradd.py
+++ b/meta/lib/oe/useradd.py
@@ -1,3 +1,8 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import argparse
import re
@@ -11,7 +16,7 @@ class myArgumentParser(argparse.ArgumentParser):
error(message)
def error(self, message):
- raise bb.build.FuncFailed(message)
+ bb.fatal(message)
def split_commands(params):
params = re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
@@ -42,7 +47,6 @@ def build_useradd_parser():
parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
- parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new account")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
@@ -60,7 +64,6 @@ def build_groupadd_parser():
parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
- parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new group")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("GROUP", help="Group name of the new group")
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index 643ab78df7..14a7d07ef0 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -1,4 +1,13 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import subprocess
+import multiprocessing
+import traceback
+import errno
def read_file(filename):
try:
@@ -23,6 +32,13 @@ def conditional(variable, checkvalue, truevalue, falsevalue, d):
else:
return falsevalue
+def vartrue(var, iftrue, iffalse, d):
+ import oe.types
+ if oe.types.boolean(d.getVar(var)):
+ return iftrue
+ else:
+ return iffalse
+
def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
if float(d.getVar(variable)) <= float(checkvalue):
return truevalue
@@ -69,12 +85,12 @@ def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
- if var.endswith(suffix):
- var = var.replace(suffix, "")
+ if suffix and var.endswith(suffix):
+ var = var[:-len(suffix)]
prefix = d.getVar("MLPREFIX")
if prefix and var.startswith(prefix):
- var = var.replace(prefix, "")
+ var = var[len(prefix):]
return var
@@ -86,17 +102,6 @@ def str_filter_out(f, str, d):
from re import match
return " ".join([x for x in str.split() if not match(f, x, 0)])
-def param_bool(cfg, field, dflt = None):
- """Lookup <field> in <cfg> map and convert it to a boolean; take
- <dflt> when this <field> does not exist"""
- value = cfg.get(field, dflt)
- strvalue = str(value).lower()
- if strvalue in ('yes', 'y', 'true', 't', '1'):
- return True
- elif strvalue in ('no', 'n', 'false', 'f', '0'):
- return False
- raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
-
def build_depends_string(depends, task):
"""Append a taskname to a string of dependencies as used by the [depends] flag"""
return " ".join(dep + ":" + task for dep in depends.split())
@@ -167,18 +172,64 @@ def any_distro_features(d, features, truevalue="1", falsevalue=""):
"""
return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+def parallel_make(d, makeinst=False):
+ """
+ Return the integer value for the number of parallel threads to use when
+ building, scraped out of PARALLEL_MAKE. If no parallelization option is
+ found, returns None
+
+ e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
+ """
+ if makeinst:
+ pm = (d.getVar('PARALLEL_MAKEINST') or '').split()
+ else:
+ pm = (d.getVar('PARALLEL_MAKE') or '').split()
+ # look for '-j' and throw other options (e.g. '-l') away
+ while pm:
+ opt = pm.pop(0)
+ if opt == '-j':
+ v = pm.pop(0)
+ elif opt.startswith('-j'):
+ v = opt[2:].strip()
+ else:
+ continue
+
+ return int(v)
+
+ return ''
+
+def parallel_make_argument(d, fmt, limit=None, makeinst=False):
+ """
+ Helper utility to construct a parallel make argument from the number of
+ parallel threads specified in PARALLEL_MAKE.
+
+ Returns the input format string `fmt` where a single '%d' will be expanded
+ with the number of parallel threads to use. If `limit` is specified, the
+ number of parallel threads will be no larger than it. If no parallelization
+ option is found in PARALLEL_MAKE, returns an empty string
+
+ e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
+ "-n 10"
+ """
+ v = parallel_make(d, makeinst)
+ if v:
+ if limit:
+ v = min(limit, v)
+ return fmt % v
+ return ''
+
def packages_filter_out_system(d):
"""
Return a list of packages from PACKAGES with the "system" packages such as
PN-dbg PN-doc PN-locale-eb-gb removed.
"""
pn = d.getVar('PN')
- blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')]
+ pkgfilter = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
localepkg = pn + "-locale-"
pkgs = []
for pkg in d.getVar('PACKAGES').split():
- if pkg not in blacklist and localepkg not in pkg:
+ if pkg not in pkgfilter and localepkg not in pkg:
pkgs.append(pkg)
return pkgs
@@ -200,59 +251,131 @@ def trim_version(version, num_parts=2):
trimmed = ".".join(parts[:num_parts])
return trimmed
-def cpu_count():
- import multiprocessing
- return multiprocessing.cpu_count()
+def cpu_count(at_least=1, at_most=64):
+ cpus = len(os.sched_getaffinity(0))
+ return max(min(cpus, at_most), at_least)
def execute_pre_post_process(d, cmds):
if cmds is None:
return
- for cmd in cmds.strip().split(';'):
- cmd = cmd.strip()
- if cmd != '':
- bb.note("Executing %s ..." % cmd)
- bb.build.exec_func(cmd, d)
-
-def multiprocess_exec(commands, function):
- import signal
- import multiprocessing
-
- if not commands:
- return []
-
- def init_worker():
- signal.signal(signal.SIGINT, signal.SIG_IGN)
+ cmds = cmds.replace(";", " ")
- fails = []
+ for cmd in cmds.split():
+ bb.note("Executing %s ..." % cmd)
+ bb.build.exec_func(cmd, d)
- def failures(res):
- fails.append(res)
+def get_bb_number_threads(d):
+ return int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
- nproc = min(multiprocessing.cpu_count(), len(commands))
- pool = bb.utils.multiprocessingpool(nproc, init_worker)
+def multiprocess_launch(target, items, d, extraargs=None):
+ max_process = get_bb_number_threads(d)
+ return multiprocess_launch_mp(target, items, max_process, extraargs)
- try:
- mapresult = pool.map_async(function, commands, error_callback=failures)
-
- pool.close()
- pool.join()
- results = mapresult.get()
- except KeyboardInterrupt:
- pool.terminate()
- pool.join()
- raise
+# For each item in items, call the function 'target' with item as the first
+# argument, extraargs as the other arguments and handle any exceptions in the
+# parent thread
+def multiprocess_launch_mp(target, items, max_process, extraargs=None):
- if fails:
- raise fails[0]
+ class ProcessLaunch(multiprocessing.Process):
+ def __init__(self, *args, **kwargs):
+ multiprocessing.Process.__init__(self, *args, **kwargs)
+ self._pconn, self._cconn = multiprocessing.Pipe()
+ self._exception = None
+ self._result = None
+ def run(self):
+ try:
+ ret = self._target(*self._args, **self._kwargs)
+ self._cconn.send((None, ret))
+ except Exception as e:
+ tb = traceback.format_exc()
+ self._cconn.send((e, tb))
+
+ def update(self):
+ if self._pconn.poll():
+ (e, tb) = self._pconn.recv()
+ if e is not None:
+ self._exception = (e, tb)
+ else:
+ self._result = tb
+
+ @property
+ def exception(self):
+ self.update()
+ return self._exception
+
+ @property
+ def result(self):
+ self.update()
+ return self._result
+
+ launched = []
+ errors = []
+ results = []
+ items = list(items)
+ while (items and not errors) or launched:
+ if not errors and items and len(launched) < max_process:
+ args = (items.pop(),)
+ if extraargs is not None:
+ args = args + extraargs
+ p = ProcessLaunch(target=target, args=args)
+ p.start()
+ launched.append(p)
+ for q in launched:
+ # Have to manually call update() to avoid deadlocks. The pipe can be full and
+ # transfer stalled until we try and read the results object but the subprocess won't exit
+ # as it still has data to write (https://bugs.python.org/issue8426)
+ q.update()
+ # The finished processes are joined when calling is_alive()
+ if not q.is_alive():
+ if q.exception:
+ errors.append(q.exception)
+ if q.result:
+ results.append(q.result)
+ launched.remove(q)
+ # Paranoia doesn't hurt
+ for p in launched:
+ p.join()
+ if errors:
+ msg = ""
+ for (e, tb) in errors:
+ if isinstance(e, subprocess.CalledProcessError) and e.output:
+ msg = msg + str(e) + "\n"
+ msg = msg + "Subprocess output:"
+ msg = msg + e.output.decode("utf-8", errors="ignore")
+ else:
+ msg = msg + str(e) + ": " + str(tb) + "\n"
+ bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg)
return results
def squashspaces(string):
import re
- return re.sub("\s+", " ", string).strip()
-
-def format_pkg_list(pkg_dict, ret_format=None):
+ return re.sub(r"\s+", " ", string).strip()
+
+def rprovides_map(pkgdata_dir, pkg_dict):
+ # Map file -> pkg provider
+ rprov_map = {}
+
+ for pkg in pkg_dict:
+ path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
+ if not os.path.isfile(path_to_pkgfile):
+ continue
+ with open(path_to_pkgfile) as f:
+ for line in f:
+ if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'):
+ # List all components provided by pkg.
+ # Exclude version strings, i.e. those starting with (
+ provides = [x for x in line.split()[1:] if not x.startswith('(')]
+ for prov in provides:
+ if prov in rprov_map:
+ rprov_map[prov].append(pkg)
+ else:
+ rprov_map[prov] = [pkg]
+
+ return rprov_map
+
+def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None):
output = []
if ret_format == "arch":
@@ -265,109 +388,101 @@ def format_pkg_list(pkg_dict, ret_format=None):
for pkg in sorted(pkg_dict):
output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
elif ret_format == "deps":
+ rprov_map = rprovides_map(pkgdata_dir, pkg_dict)
for pkg in sorted(pkg_dict):
for dep in pkg_dict[pkg]["deps"]:
- output.append("%s|%s" % (pkg, dep))
+ if dep in rprov_map:
+ # There could be multiple providers within the image
+ for pkg_provider in rprov_map[dep]:
+ output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep))
+ else:
+ output.append("%s|%s" % (pkg, dep))
else:
for pkg in sorted(pkg_dict):
output.append(pkg)
- return '\n'.join(output)
+ output_str = '\n'.join(output)
+
+ if output_str:
+ # make sure last line is newline terminated
+ output_str += '\n'
+
+ return output_str
-def host_gcc_version(d):
+
+# Helper function to get the host compiler version
+# Do not assume the compiler is gcc
+def get_host_compiler_version(d, taskcontextonly=False):
import re, subprocess
+ if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
+ return
+
compiler = d.getVar("BUILD_CC")
+ # Get rid of ccache since it is not present when parsing.
+ if compiler.startswith('ccache '):
+ compiler = compiler[7:]
try:
env = os.environ.copy()
- env["PATH"] = d.getVar("PATH")
- output = subprocess.check_output("%s --version" % compiler, shell=True, env=env).decode("utf-8")
+ # datastore PATH does not contain session PATH as set by environment-setup-...
+ # this breaks the install-buildtools use-case
+ # env["PATH"] = d.getVar("PATH")
+ output = subprocess.check_output("%s --version" % compiler, \
+ shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
- match = re.match(".* (\d\.\d)\.\d.*", output.split('\n')[0])
+ match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
if not match:
bb.fatal("Can't get compiler version from %s --version output" % compiler)
version = match.group(1)
- return "-%s" % version if version in ("4.8", "4.9") else ""
+ return compiler, version
-#
-# Python 2.7 doesn't have threaded pools (just multiprocessing)
-# so implement a version here
-#
-from queue import Queue
-from threading import Thread
+def host_gcc_version(d, taskcontextonly=False):
+ import re, subprocess
-class ThreadedWorker(Thread):
- """Thread executing tasks from a given tasks queue"""
- def __init__(self, tasks, worker_init, worker_end):
- Thread.__init__(self)
- self.tasks = tasks
- self.daemon = True
+ if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
+ return
- self.worker_init = worker_init
- self.worker_end = worker_end
+ compiler = d.getVar("BUILD_CC")
+ # Get rid of ccache since it is not present when parsing.
+ if compiler.startswith('ccache '):
+ compiler = compiler[7:]
+ try:
+ env = os.environ.copy()
+ env["PATH"] = d.getVar("PATH")
+ output = subprocess.check_output("%s --version" % compiler, \
+ shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
- def run(self):
- from queue import Empty
+ match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
+ if not match:
+ bb.fatal("Can't get compiler version from %s --version output" % compiler)
- if self.worker_init is not None:
- self.worker_init(self)
+ version = match.group(1)
+ return "-%s" % version if version in ("4.8", "4.9") else ""
- while True:
- try:
- func, args, kargs = self.tasks.get(block=False)
- except Empty:
- if self.worker_end is not None:
- self.worker_end(self)
- break
- try:
- func(self, *args, **kargs)
- except Exception as e:
- print(e)
- finally:
- self.tasks.task_done()
-
-class ThreadedPool:
- """Pool of threads consuming tasks from a queue"""
- def __init__(self, num_workers, num_tasks, worker_init=None,
- worker_end=None):
- self.tasks = Queue(num_tasks)
- self.workers = []
-
- for _ in range(num_workers):
- worker = ThreadedWorker(self.tasks, worker_init, worker_end)
- self.workers.append(worker)
-
- def start(self):
- for worker in self.workers:
- worker.start()
-
- def add_task(self, func, *args, **kargs):
- """Add a task to the queue"""
- self.tasks.put((func, args, kargs))
-
- def wait_completion(self):
- """Wait for completion of all the tasks in the queue"""
- self.tasks.join()
- for worker in self.workers:
- worker.join()
-
-def write_ld_so_conf(d):
- # Some utils like prelink may not have the correct target library paths
- # so write an ld.so.conf to help them
- ldsoconf = d.expand("${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf")
- if os.path.exists(ldsoconf):
- bb.utils.remove(ldsoconf)
- bb.utils.mkdirhier(os.path.dirname(ldsoconf))
- with open(ldsoconf, "w") as f:
- f.write(d.getVar("base_libdir") + '\n')
- f.write(d.getVar("libdir") + '\n')
-
-class ImageQAFailed(bb.build.FuncFailed):
+def get_multilib_datastore(variant, d):
+ localdata = bb.data.createCopy(d)
+ if variant:
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", variant + "-")
+ else:
+ origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
+ if origdefault:
+ localdata.setVar("DEFAULTTUNE", origdefault)
+ overrides = localdata.getVar("OVERRIDES", False).split(":")
+ overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")])
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", "")
+ return localdata
+
+class ImageQAFailed(Exception):
def __init__(self, description, name=None, logfile=None):
self.description = description
self.name = name
@@ -379,3 +494,49 @@ class ImageQAFailed(bb.build.FuncFailed):
msg = msg + ' (%s)' % self.description
return msg
+
+def sh_quote(string):
+ import shlex
+ return shlex.quote(string)
+
+def directory_size(root, blocksize=4096):
+ """
+ Calculate the size of the directory, taking into account hard links,
+ rounding up every size to multiples of the blocksize.
+ """
+ def roundup(size):
+ """
+ Round the size up to the nearest multiple of the block size.
+ """
+ import math
+ return math.ceil(size / blocksize) * blocksize
+
+ def getsize(filename):
+ """
+ Get the size of the filename, not following symlinks, taking into
+ account hard links.
+ """
+ stat = os.lstat(filename)
+ if stat.st_ino not in inodes:
+ inodes.add(stat.st_ino)
+ return stat.st_size
+ else:
+ return 0
+
+ inodes = set()
+ total = 0
+ for root, dirs, files in os.walk(root):
+ total += sum(roundup(getsize(os.path.join(root, name))) for name in files)
+ total += roundup(getsize(root))
+ return total
+
+# Update the mtime of a file, skip if permission/read-only issues
+def touch(filename):
+ try:
+ os.utime(filename, None)
+ except PermissionError:
+ pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ if e.errno != errno.EROFS:
+ raise e