summaryrefslogtreecommitdiffstats
path: root/meta/lib/oe
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib/oe')
-rw-r--r--meta/lib/oe/__init__.py4
-rw-r--r--meta/lib/oe/buildhistory_analysis.py357
-rw-r--r--meta/lib/oe/cachedpath.py2
-rw-r--r--meta/lib/oe/classextend.py44
-rw-r--r--meta/lib/oe/classutils.py5
-rw-r--r--meta/lib/oe/copy_buildsystem.py67
-rw-r--r--meta/lib/oe/cve_check.py148
-rw-r--r--meta/lib/oe/data.py10
-rw-r--r--meta/lib/oe/distro_check.py22
-rw-r--r--meta/lib/oe/elf.py141
-rw-r--r--meta/lib/oe/gpg_sign.py100
-rw-r--r--meta/lib/oe/license.py76
-rw-r--r--meta/lib/oe/lsb.py45
-rw-r--r--meta/lib/oe/maketype.py7
-rw-r--r--meta/lib/oe/manifest.py152
-rw-r--r--meta/lib/oe/overlayfs.py48
-rw-r--r--meta/lib/oe/package.py265
-rw-r--r--meta/lib/oe/package_manager.py1568
-rw-r--r--meta/lib/oe/package_manager/__init__.py556
-rw-r--r--meta/lib/oe/package_manager/deb/__init__.py503
-rw-r--r--meta/lib/oe/package_manager/deb/manifest.py26
-rw-r--r--meta/lib/oe/package_manager/deb/rootfs.py210
-rw-r--r--meta/lib/oe/package_manager/deb/sdk.py100
-rw-r--r--meta/lib/oe/package_manager/ipk/__init__.py503
-rw-r--r--meta/lib/oe/package_manager/ipk/manifest.py74
-rw-r--r--meta/lib/oe/package_manager/ipk/rootfs.py350
-rw-r--r--meta/lib/oe/package_manager/ipk/sdk.py102
-rw-r--r--meta/lib/oe/package_manager/rpm/__init__.py410
-rw-r--r--meta/lib/oe/package_manager/rpm/manifest.py54
-rw-r--r--meta/lib/oe/package_manager/rpm/rootfs.py148
-rw-r--r--meta/lib/oe/package_manager/rpm/sdk.py114
-rw-r--r--meta/lib/oe/packagedata.py29
-rw-r--r--meta/lib/oe/packagegroup.py12
-rw-r--r--meta/lib/oe/patch.py80
-rw-r--r--meta/lib/oe/path.py112
-rw-r--r--meta/lib/oe/prservice.py95
-rw-r--r--meta/lib/oe/qa.py52
-rw-r--r--meta/lib/oe/recipeutils.py419
-rw-r--r--meta/lib/oe/reproducible.py192
-rw-r--r--meta/lib/oe/rootfs.py759
-rw-r--r--meta/lib/oe/rust.py5
-rw-r--r--meta/lib/oe/sbom.py78
-rw-r--r--meta/lib/oe/sdk.py324
-rw-r--r--meta/lib/oe/spdx.py342
-rw-r--r--meta/lib/oe/sstatesig.py472
-rw-r--r--meta/lib/oe/terminal.py114
-rw-r--r--meta/lib/oe/types.py37
-rw-r--r--meta/lib/oe/useradd.py69
-rw-r--r--meta/lib/oe/utils.py394
49 files changed, 6477 insertions, 3319 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py
index 3ad9513f40..4e7c09da04 100644
--- a/meta/lib/oe/__init__.py
+++ b/meta/lib/oe/__init__.py
@@ -1,2 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
index 19b3bc437c..b1856846b6 100644
--- a/meta/lib/oe/buildhistory_analysis.py
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -1,8 +1,10 @@
# Report significant differences in the buildhistory repository since a specific revision
#
-# Copyright (C) 2012 Intel Corporation
+# Copyright (C) 2012-2013, 2016-2017 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Note: requires GitPython 0.3.1+
#
# You can use this from the command line by running scripts/buildhistory-diff
@@ -13,7 +15,11 @@ import os.path
import difflib
import git
import re
+import shlex
+import hashlib
+import collections
import bb.utils
+import bb.tinfoil
# How to display fields
@@ -28,15 +34,27 @@ ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
monitor_numeric_threshold = 10
# Image files to monitor (note that image-info.txt is handled separately)
img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
-# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
-related_fields = {}
-related_fields['RDEPENDS'] = ['DEPENDS']
-related_fields['RRECOMMENDS'] = ['DEPENDS']
-related_fields['FILELIST'] = ['FILES']
-related_fields['PKGSIZE'] = ['FILELIST']
-related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
-related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+}
+
+def init_colours(use_colours):
+ global colours
+ if use_colours:
+ colours = {
+ 'colour_default': '\033[0m',
+ 'colour_add': '\033[1;32m',
+ 'colour_remove': '\033[1;31m',
+ }
+ else:
+ colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+ }
class ChangeRecord:
def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
@@ -45,7 +63,6 @@ class ChangeRecord:
self.oldvalue = oldvalue
self.newvalue = newvalue
self.monitored = monitored
- self.related = []
self.filechanges = None
def __str__(self):
@@ -76,7 +93,17 @@ class ChangeRecord:
for name in adirs - bdirs]
files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
for name in bdirs - adirs]
- renamed_dirs = [(dir1, dir2) for dir1, files1 in files_ab for dir2, files2 in files_ba if files1 == files2]
+ renamed_dirs = []
+ for dir1, files1 in files_ab:
+ rename = False
+ for dir2, files2 in files_ba:
+ if files1 == files2 and not rename:
+ renamed_dirs.append((dir1,dir2))
+ # Make sure that we don't use this (dir, files) pair again.
+ files_ba.remove((dir2,files2))
+ # If a dir has already been found to have a rename, stop and go no further.
+ rename = True
+
# remove files that belong to renamed dirs from aitems and bitems
for dir1, dir2 in renamed_dirs:
aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
@@ -85,35 +112,50 @@ class ChangeRecord:
if self.fieldname in list_fields or self.fieldname in list_order_fields:
renamed_dirs = []
+ changed_order = False
if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
aitems = pkglist_combine(depvera)
bitems = pkglist_combine(depverb)
else:
- aitems = self.oldvalue.split()
- bitems = self.newvalue.split()
if self.fieldname == 'FILELIST':
+ aitems = shlex.split(self.oldvalue)
+ bitems = shlex.split(self.newvalue)
renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems)
+ else:
+ aitems = self.oldvalue.split()
+ bitems = self.newvalue.split()
removed = list(set(aitems) - set(bitems))
added = list(set(bitems) - set(aitems))
+ if not removed and not added and self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
+ depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False)
+ depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False)
+ for i, j in zip(depvera.items(), depverb.items()):
+ if i[0] != j[0]:
+ changed_order = True
+ break
+
lines = []
if renamed_dirs:
for dfrom, dto in renamed_dirs:
- lines.append('directory renamed %s -> %s' % (dfrom, dto))
+ lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours))
if removed or added:
if removed and not bitems:
- lines.append('removed all items "%s"' % ' '.join(removed))
+ lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours))
else:
if removed:
- lines.append('removed "%s"' % ' '.join(removed))
+ lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours))
if added:
- lines.append('added "%s"' % ' '.join(added))
+ lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours))
else:
lines.append('changed order')
- out = '%s: %s' % (self.fieldname, ', '.join(lines))
+ if not (removed or added or changed_order):
+ out = ''
+ else:
+ out = '%s: %s' % (self.fieldname, ', '.join(lines))
elif self.fieldname in numeric_fields:
aval = int(self.oldvalue or 0)
@@ -122,9 +164,9 @@ class ChangeRecord:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
- out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours)
elif self.fieldname in defaultval_map:
- out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
if self.fieldname == 'PKG' and '[default]' in self.newvalue:
out += ' - may indicate debian renaming failure'
elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
@@ -139,34 +181,30 @@ class ChangeRecord:
diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
out += '\n '.join(list(diff)[2:])
out += '\n --'
- elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
- fieldname = self.fieldname
- if '/image-files/' in self.path:
- fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
- out = 'Changes to %s:\n ' % fieldname
- else:
- if outer:
- prefix = 'Changes to %s ' % self.path
- out = '(%s):\n ' % self.fieldname
- if self.filechanges:
- out += '\n '.join(['%s' % i for i in self.filechanges])
+ elif self.fieldname in img_monitor_files or '/image-files/' in self.path or self.fieldname == "sysroot":
+ if self.filechanges or (self.oldvalue and self.newvalue):
+ fieldname = self.fieldname
+ if '/image-files/' in self.path:
+ fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
+ out = 'Changes to %s:\n ' % fieldname
+ else:
+ if outer:
+ prefix = 'Changes to %s ' % self.path
+ out = '(%s):\n ' % self.fieldname
+ if self.filechanges:
+ out += '\n '.join(['%s' % i for i in self.filechanges])
+ else:
+ alines = self.oldvalue.splitlines()
+ blines = self.newvalue.splitlines()
+ diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
+ out += '\n '.join(list(diff))
+ out += '\n --'
else:
- alines = self.oldvalue.splitlines()
- blines = self.newvalue.splitlines()
- diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
- out += '\n '.join(list(diff))
- out += '\n --'
+ out = ''
else:
- out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
+ out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
- if self.related:
- for chg in self.related:
- if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
- continue
- for line in chg._str_internal(False).splitlines():
- out += '\n * %s' % line
-
- return '%s%s' % (prefix, out)
+ return '%s%s' % (prefix, out) if out else ''
class FileChange:
changetype_add = 'A'
@@ -175,6 +213,7 @@ class FileChange:
changetype_perms = 'P'
changetype_ownergroup = 'O'
changetype_link = 'L'
+ changetype_move = 'M'
def __init__(self, path, changetype, oldvalue = None, newvalue = None):
self.path = path
@@ -213,10 +252,11 @@ class FileChange:
return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_link:
return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+ elif self.changetype == self.changetype_move:
+ return '%s moved to %s' % (self.path, self.oldvalue)
else:
return '%s changed (unknown)' % self.path
-
def blob_to_dict(blob):
alines = [line for line in blob.data_stream.read().decode('utf-8').splitlines()]
adict = {}
@@ -243,11 +283,14 @@ def file_list_to_dict(lines):
adict[path] = splitv[0:3]
return adict
+numeric_removal = str.maketrans('0123456789', 'XXXXXXXXXX')
-def compare_file_lists(alines, blines):
+def compare_file_lists(alines, blines, compare_ownership=True):
adict = file_list_to_dict(alines)
bdict = file_list_to_dict(blines)
filechanges = []
+ additions = []
+ removals = []
for path, splitv in adict.items():
newsplitv = bdict.pop(path, None)
if newsplitv:
@@ -256,16 +299,20 @@ def compare_file_lists(alines, blines):
newvalue = newsplitv[0][0]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
+
# Check permissions
oldvalue = splitv[0][1:]
newvalue = newsplitv[0][1:]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
- # Check owner/group
- oldvalue = '%s/%s' % (splitv[1], splitv[2])
- newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
- if oldvalue != newvalue:
- filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+
+ if compare_ownership:
+ # Check owner/group
+ oldvalue = '%s/%s' % (splitv[1], splitv[2])
+ newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+
# Check symlink target
if newsplitv[0][0] == 'l':
if len(splitv) > 3:
@@ -276,11 +323,67 @@ def compare_file_lists(alines, blines):
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
else:
- filechanges.append(FileChange(path, FileChange.changetype_remove))
+ removals.append(path)
# Whatever is left over has been added
for path in bdict:
- filechanges.append(FileChange(path, FileChange.changetype_add))
+ additions.append(path)
+
+ # Rather than print additions and removals, its nicer to print file 'moves'
+ # where names or paths are similar.
+ revmap_remove = {}
+ for removal in removals:
+ translated = removal.translate(numeric_removal)
+ if translated not in revmap_remove:
+ revmap_remove[translated] = []
+ revmap_remove[translated].append(removal)
+
+ #
+ # We want to detect renames of large trees of files like
+ # /lib/modules/5.4.40-yocto-standard to /lib/modules/5.4.43-yocto-standard
+ #
+ renames = {}
+ for addition in additions.copy():
+ if addition not in additions:
+ continue
+ translated = addition.translate(numeric_removal)
+ if translated in revmap_remove:
+ if len(revmap_remove[translated]) != 1:
+ continue
+ removal = revmap_remove[translated][0]
+ commondir = addition.split("/")
+ commondir2 = removal.split("/")
+ idx = None
+ for i in range(len(commondir)):
+ if commondir[i] != commondir2[i]:
+ idx = i
+ break
+ commondir = "/".join(commondir[:i+1])
+ commondir2 = "/".join(commondir2[:i+1])
+ # If the common parent is in one dict and not the other its likely a rename
+ # so iterate through those files and process as such
+ if commondir2 not in bdict and commondir not in adict:
+ if commondir not in renames:
+ renames[commondir] = commondir2
+ for addition2 in additions.copy():
+ if addition2.startswith(commondir):
+ removal2 = addition2.replace(commondir, commondir2)
+ if removal2 in removals:
+ additions.remove(addition2)
+ removals.remove(removal2)
+ continue
+ filechanges.append(FileChange(removal, FileChange.changetype_move, addition))
+ if addition in additions:
+ additions.remove(addition)
+ if removal in removals:
+ removals.remove(removal)
+ for rename in renames:
+ filechanges.append(FileChange(renames[rename], FileChange.changetype_move, rename))
+
+ for addition in additions:
+ filechanges.append(FileChange(addition, FileChange.changetype_add))
+ for removal in removals:
+ filechanges.append(FileChange(removal, FileChange.changetype_remove))
return filechanges
@@ -371,15 +474,19 @@ def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
if abs(percentchg) < monitor_numeric_threshold:
continue
elif (not report_all) and key in list_fields:
- if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
+ if key == "FILELIST" and (path.endswith("-dbg") or path.endswith("-src")) and bstr.strip() != '':
continue
if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(astr, bstr)
if depvera == depverb:
continue
- alist = astr.split()
+ if key == 'FILELIST':
+ alist = shlex.split(astr)
+ blist = shlex.split(bstr)
+ else:
+ alist = astr.split()
+ blist = bstr.split()
alist.sort()
- blist = bstr.split()
blist.sort()
# We don't care about the removal of self-dependencies
if pkgname in alist and not pkgname in blist:
@@ -410,13 +517,116 @@ def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
return changes
-def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False):
+def compare_siglists(a_blob, b_blob, taskdiff=False):
+ # FIXME collapse down a recipe's tasks?
+ alines = a_blob.data_stream.read().decode('utf-8').splitlines()
+ blines = b_blob.data_stream.read().decode('utf-8').splitlines()
+ keys = []
+ pnmap = {}
+ def readsigs(lines):
+ sigs = {}
+ for line in lines:
+ linesplit = line.split()
+ if len(linesplit) > 2:
+ sigs[linesplit[0]] = linesplit[2]
+ if not linesplit[0] in keys:
+ keys.append(linesplit[0])
+ pnmap[linesplit[1]] = linesplit[0].rsplit('.', 1)[0]
+ return sigs
+ adict = readsigs(alines)
+ bdict = readsigs(blines)
+ out = []
+
+ changecount = 0
+ addcount = 0
+ removecount = 0
+ if taskdiff:
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+
+ changes = collections.OrderedDict()
+
+ def compare_hashfiles(pn, taskname, hash1, hash2):
+ hashes = [hash1, hash2]
+ hashfiles = bb.siggen.find_siginfo(pn, taskname, hashes, tinfoil.config_data)
+
+ if not taskname:
+ (pn, taskname) = pn.rsplit('.', 1)
+ pn = pnmap.get(pn, pn)
+ desc = '%s.%s' % (pn, taskname)
+
+ if len(hashfiles) == 0:
+ out.append("Unable to find matching sigdata for %s with hashes %s or %s" % (desc, hash1, hash2))
+ elif not hash1 in hashfiles:
+ out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash1))
+ elif not hash2 in hashfiles:
+ out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
+ else:
+ out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, collapsed=True)
+ for line in out2:
+ m = hashlib.sha256()
+ m.update(line.encode('utf-8'))
+ entry = changes.get(m.hexdigest(), (line, []))
+ if desc not in entry[1]:
+ changes[m.hexdigest()] = (line, entry[1] + [desc])
+
+ # Define recursion callback
+ def recursecb(key, hash1, hash2):
+ compare_hashfiles(key, None, hash1, hash2)
+ return []
+
+ for key in keys:
+ siga = adict.get(key, None)
+ sigb = bdict.get(key, None)
+ if siga is not None and sigb is not None and siga != sigb:
+ changecount += 1
+ (pn, taskname) = key.rsplit('.', 1)
+ compare_hashfiles(pn, taskname, siga, sigb)
+ elif siga is None:
+ addcount += 1
+ elif sigb is None:
+ removecount += 1
+ for key, item in changes.items():
+ line, tasks = item
+ if len(tasks) == 1:
+ desc = tasks[0]
+ elif len(tasks) == 2:
+ desc = '%s and %s' % (tasks[0], tasks[1])
+ else:
+ desc = '%s and %d others' % (tasks[-1], len(tasks)-1)
+ out.append('%s: %s' % (desc, line))
+ else:
+ for key in keys:
+ siga = adict.get(key, None)
+ sigb = bdict.get(key, None)
+ if siga is not None and sigb is not None and siga != sigb:
+ out.append('%s changed from %s to %s' % (key, siga, sigb))
+ changecount += 1
+ elif siga is None:
+ out.append('%s was added' % key)
+ addcount += 1
+ elif sigb is None:
+ out.append('%s was removed' % key)
+ removecount += 1
+ out.append('Summary: %d tasks added, %d tasks removed, %d tasks modified (%.1f%%)' % (addcount, removecount, changecount, (changecount / float(len(bdict)) * 100)))
+ return '\n'.join(out)
+
+
+def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False,
+ sigs=False, sigsdiff=False, exclude_path=None):
repo = git.Repo(repopath)
assert repo.bare == False
commit = repo.commit(revision1)
diff = commit.diff(revision2)
changes = []
+
+ if sigs or sigsdiff:
+ for d in diff.iter_change_type('M'):
+ if d.a_blob.path == 'siglist.txt':
+ changes.append(compare_siglists(d.a_blob, d.b_blob, taskdiff=sigsdiff))
+ return changes
+
for d in diff.iter_change_type('M'):
path = os.path.dirname(d.a_blob.path)
if path.startswith('packages/'):
@@ -426,6 +636,15 @@ def process_changes(repopath, revision1, revision2='HEAD', report_all=False, rep
elif filename.startswith('latest.'):
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
changes.append(chg)
+ elif filename == 'sysroot':
+ alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
+ blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
+ filechanges = compare_file_lists(alines,blines, compare_ownership=False)
+ if filechanges:
+ chg = ChangeRecord(path, filename, None, None, True)
+ chg.filechanges = filechanges
+ changes.append(chg)
+
elif path.startswith('images/'):
filename = os.path.basename(d.a_blob.path)
if filename in img_monitor_files:
@@ -485,16 +704,18 @@ def process_changes(repopath, revision1, revision2='HEAD', report_all=False, rep
chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True)
changes.append(chg)
- # Link related changes
- for chg in changes:
- if chg.monitored:
- for chg2 in changes:
- # (Check dirname in the case of fields from recipe info files)
- if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
- if chg2.fieldname in related_fields.get(chg.fieldname, []):
- chg.related.append(chg2)
- elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
- chg.related.append(chg2)
+ # filter out unwanted paths
+ if exclude_path:
+ for chg in changes:
+ if chg.filechanges:
+ fchgs = []
+ for fchg in chg.filechanges:
+ for epath in exclude_path:
+ if fchg.path.startswith(epath):
+ break
+ else:
+ fchgs.append(fchg)
+ chg.filechanges = fchgs
if report_all:
return changes
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py
index 0840cc4c3f..254257a83f 100644
--- a/meta/lib/oe/cachedpath.py
+++ b/meta/lib/oe/cachedpath.py
@@ -1,4 +1,6 @@
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Based on standard python library functions but avoid
# repeated stat calls. Its assumed the files will not change from under us
# so we can cache stat calls.
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
index d2eeaf0e5c..e08d788b75 100644
--- a/meta/lib/oe/classextend.py
+++ b/meta/lib/oe/classextend.py
@@ -1,10 +1,24 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import collections
+def get_packages(d):
+ pkgs = d.getVar("PACKAGES_NONML")
+ extcls = d.getVar("EXTENDERCLASS")
+ return extcls.rename_packages_internal(pkgs)
+
+def get_depends(varprefix, d):
+ extcls = d.getVar("EXTENDERCLASS")
+ return extcls.map_depends_variable(varprefix + "_NONML")
+
class ClassExtender(object):
def __init__(self, extname, d):
self.extname = extname
self.d = d
self.pkgs_mapping = []
+ self.d.setVar("EXTENDERCLASS", self)
def extend_name(self, name):
if name.startswith("kernel-") or name == "virtual/kernel":
@@ -20,6 +34,8 @@ class ClassExtender(object):
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
return name
+ if name.startswith("/") or (name.startswith("${") and name.endswith("}")):
+ return name
if not name.startswith(self.extname):
return self.extname + "-" + name
return name
@@ -71,7 +87,7 @@ class ClassExtender(object):
def map_depends_variable(self, varname, suffix = ""):
# We need to preserve EXTENDPKGV so it can be expanded correctly later
if suffix:
- varname = varname + "_" + suffix
+ varname = varname + ":" + suffix
orig = self.d.getVar("EXTENDPKGV", False)
self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
deps = self.d.getVar(varname)
@@ -83,8 +99,13 @@ class ClassExtender(object):
for dep in deps:
newdeps[self.map_depends(dep)] = deps[dep]
- self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}"))
+ if not varname.endswith("_NONML"):
+ self.d.renameVar(varname, varname + "_NONML")
+ self.d.setVar(varname, "${@oe.classextend.get_depends('%s', d)}" % varname)
+ self.d.appendVarFlag(varname, "vardeps", " " + varname + "_NONML")
+ ret = bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}")
self.d.setVar("EXTENDPKGV", orig)
+ return ret
def map_packagevars(self):
for pkg in (self.d.getVar("PACKAGES").split() + [""]):
@@ -103,18 +124,31 @@ class ClassExtender(object):
continue
self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
- self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping]))
+ self.d.renameVar("PACKAGES", "PACKAGES_NONML")
+ self.d.setVar("PACKAGES", "${@oe.classextend.get_packages(d)}")
+
+ def rename_packages_internal(self, pkgs):
+ self.pkgs_mapping = []
+ for pkg in (self.d.expand(pkgs) or "").split():
+ if pkg.startswith(self.extname):
+ self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
+ continue
+ self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
+
+ return " ".join([row[1] for row in self.pkgs_mapping])
def rename_package_variables(self, variables):
for pkg_mapping in self.pkgs_mapping:
+ if pkg_mapping[0].startswith("${") and pkg_mapping[0].endswith("}"):
+ continue
for subs in variables:
- self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
+ self.d.renameVar("%s:%s" % (subs, pkg_mapping[0]), "%s:%s" % (subs, pkg_mapping[1]))
class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
if dep.startswith(self.extname):
return dep
- if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
+ if dep.endswith(("-gcc", "-g++")):
return dep + "-crosssdk"
elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
return dep
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py
index e7856c86f2..08bb66b365 100644
--- a/meta/lib/oe/classutils.py
+++ b/meta/lib/oe/classutils.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
class ClassRegistryMeta(type):
"""Give each ClassRegistry their own registry"""
@@ -36,7 +39,7 @@ abstract base classes out of the registry)."""
@classmethod
def prioritized(tcls):
return sorted(list(tcls.registry.values()),
- key=lambda v: v.priority, reverse=True)
+ key=lambda v: (v.priority, v.name), reverse=True)
def unregister(cls):
for key in cls.registry.keys():
diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py
index a372904183..79642fd76a 100644
--- a/meta/lib/oe/copy_buildsystem.py
+++ b/meta/lib/oe/copy_buildsystem.py
@@ -1,5 +1,15 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This class should provide easy access to the different aspects of the
# buildsystem such as layers, bitbake location, etc.
+#
+# SDK_LAYERS_EXCLUDE: Layers which will be excluded from SDK layers.
+# SDK_LAYERS_EXCLUDE_PATTERN: The simiar to SDK_LAYERS_EXCLUDE, this supports
+# python regular expression, use space as separator,
+# e.g.: ".*-downloads closed-.*"
+#
+
import stat
import shutil
@@ -10,7 +20,7 @@ def _smart_copy(src, dest):
mode = os.stat(src).st_mode
if stat.S_ISDIR(mode):
bb.utils.mkdirhier(dest)
- cmd = "tar --exclude='.git' --xattrs --xattrs-include='*' -chf - -C %s -p . \
+ cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -chf - -C %s -p . \
| tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
@@ -23,21 +33,37 @@ class BuildSystem(object):
self.context = context
self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()]
self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split()
+ self.layers_exclude_pattern = d.getVar('SDK_LAYERS_EXCLUDE_PATTERN')
def copy_bitbake_and_layers(self, destdir, workspace_name=None):
+ import re
# Copy in all metadata layers + bitbake (as repositories)
+ copied_corebase = None
layers_copied = []
bb.utils.mkdirhier(destdir)
layers = list(self.layerdirs)
corebase = os.path.abspath(self.d.getVar('COREBASE'))
layers.append(corebase)
+ # The bitbake build system uses the meta-skeleton layer as a layout
+ # for common recipies, e.g: the recipetool script to create kernel recipies
+ # Add the meta-skeleton layer to be included as part of the eSDK installation
+ layers.append(os.path.join(corebase, 'meta-skeleton'))
# Exclude layers
for layer_exclude in self.layers_exclude:
if layer_exclude in layers:
+ bb.note('Excluded %s from sdk layers since it is in SDK_LAYERS_EXCLUDE' % layer_exclude)
layers.remove(layer_exclude)
+ if self.layers_exclude_pattern:
+ layers_cp = layers[:]
+ for pattern in self.layers_exclude_pattern.split():
+ for layer in layers_cp:
+ if re.match(pattern, layer):
+ bb.note('Excluded %s from sdk layers since matched SDK_LAYERS_EXCLUDE_PATTERN' % layer)
+ layers.remove(layer)
+
workspace_newname = workspace_name
if workspace_newname:
layernames = [os.path.basename(layer) for layer in layers]
@@ -71,22 +97,30 @@ class BuildSystem(object):
layerdestpath = destdir
if corebase == os.path.dirname(layer):
layerdestpath += '/' + os.path.basename(corebase)
+ # If the layer is located somewhere under the same parent directory
+ # as corebase we keep the layer structure.
+ elif os.path.commonpath([layer, corebase]) == os.path.dirname(corebase):
+ layer_relative = os.path.relpath(layer, os.path.dirname(corebase))
+ if os.path.dirname(layer_relative) != layernewname:
+ layerdestpath += '/' + os.path.dirname(layer_relative)
+
layerdestpath += '/' + layernewname
layer_relative = os.path.relpath(layerdestpath,
destdir)
- layers_copied.append(layer_relative)
-
# Treat corebase as special since it typically will contain
# build directories or other custom items.
if corebase == layer:
+ copied_corebase = layer_relative
bb.utils.mkdirhier(layerdestpath)
for f in corebase_files:
f_basename = os.path.basename(f)
destname = os.path.join(layerdestpath, f_basename)
_smart_copy(f, destname)
else:
- if os.path.exists(layerdestpath):
+ layers_copied.append(layer_relative)
+
+ if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
bb.note("Skipping layer %s, already handled" % layer)
else:
_smart_copy(layer, layerdestpath)
@@ -123,15 +157,23 @@ class BuildSystem(object):
line = line.replace('workspacelayer', workspace_newname)
f.write(line)
- return layers_copied
+ # meta-skeleton layer is added as part of the build system
+ # but not as a layer included in the build, therefore it is
+ # not reported to the function caller.
+ for layer in layers_copied:
+ if layer.endswith('/meta-skeleton'):
+ layers_copied.remove(layer)
+ break
+
+ return copied_corebase, layers_copied
def generate_locked_sigs(sigfile, d):
bb.utils.mkdirhier(os.path.dirname(sigfile))
depd = d.getVar('BB_TASKDEPDATA', False)
- tasks = ['%s.%s' % (v[2], v[1]) for v in depd.values()]
+ tasks = ['%s:%s' % (v[2], v[1]) for v in depd.values()]
bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
-def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output):
+def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, onlynative, pruned_output):
with open(lockedsigs, 'r') as infile:
bb.utils.mkdirhier(os.path.dirname(pruned_output))
with open(pruned_output, 'w') as f:
@@ -141,7 +183,11 @@ def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output
if line.endswith('\\\n'):
splitval = line.strip().split(':')
if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets:
- f.write(line)
+ if onlynative:
+ if 'nativesdk' in splitval[0]:
+ f.write(line)
+ else:
+ f.write(line)
else:
f.write(line)
invalue = False
@@ -209,7 +255,7 @@ def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cac
bb.note('Generating sstate-cache...')
nativelsbstring = d.getVar('NATIVELSBSTRING')
- bb.process.run("gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
+ bb.process.run("PYTHONDONTWRITEBYTECODE=1 gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
if fixedlsbstring and nativelsbstring != fixedlsbstring:
nativedir = output_sstate_cache + '/' + nativelsbstring
if os.path.isdir(nativedir):
@@ -236,9 +282,10 @@ def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, lo
logparam = '-l %s' % logfile
else:
logparam = ''
- cmd = "%sBB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
+ cmd = "%sPYTHONDONTWRITEBYTECODE=1 BB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
env = dict(d.getVar('BB_ORIGENV', False))
env.pop('BUILDDIR', '')
+ env.pop('BBPATH', '')
pathitems = env['PATH'].split(':')
env['PATH'] = ':'.join([item for item in pathitems if not item.endswith('/bitbake/bin')])
bb.process.run(cmd, stderr=subprocess.STDOUT, env=env, cwd=cwd, executable='/bin/bash')
diff --git a/meta/lib/oe/cve_check.py b/meta/lib/oe/cve_check.py
new file mode 100644
index 0000000000..0302beeb4a
--- /dev/null
+++ b/meta/lib/oe/cve_check.py
@@ -0,0 +1,148 @@
+import collections
+import re
+import itertools
+import functools
+
+_Version = collections.namedtuple(
+ "_Version", ["release", "patch_l", "pre_l", "pre_v"]
+)
+
+@functools.total_ordering
+class Version():
+
+ def __init__(self, version, suffix=None):
+
+ suffixes = ["alphabetical", "patch"]
+
+ if str(suffix) == "alphabetical":
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(?P<patch_l>[a-z]))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ elif str(suffix) == "patch":
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(p|patch)(?P<patch_l>[0-9]+))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ else:
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ regex = re.compile(r"^\s*" + version_pattern + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ match = regex.search(version)
+ if not match:
+ raise Exception("Invalid version: '{0}'".format(version))
+
+ self._version = _Version(
+ release=tuple(int(i) for i in match.group("release").replace("-",".").split(".")),
+ patch_l=match.group("patch_l") if str(suffix) in suffixes and match.group("patch_l") else "",
+ pre_l=match.group("pre_l"),
+ pre_v=match.group("pre_v")
+ )
+
+ self._key = _cmpkey(
+ self._version.release,
+ self._version.patch_l,
+ self._version.pre_l,
+ self._version.pre_v
+ )
+
+ def __eq__(self, other):
+ if not isinstance(other, Version):
+ return NotImplemented
+ return self._key == other._key
+
+ def __gt__(self, other):
+ if not isinstance(other, Version):
+ return NotImplemented
+ return self._key > other._key
+
+def _cmpkey(release, patch_l, pre_l, pre_v):
+ # remove leading 0
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+ )
+
+ _patch = patch_l.upper()
+
+ if pre_l is None and pre_v is None:
+ _pre = float('inf')
+ else:
+ _pre = float(pre_v) if pre_v else float('-inf')
+ return _release, _patch, _pre
+
+
+def get_patched_cves(d):
+ """
+ Get patches that solve CVEs using the "CVE: " tag.
+ """
+
+ import re
+ import oe.patch
+
+ pn = d.getVar("PN")
+ cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
+
+ # Matches the last "CVE-YYYY-ID" in the file name, also if written
+ # in lowercase. Possible to have multiple CVE IDs in a single
+ # file name, but only the last one will be detected from the file name.
+ # However, patch files contents addressing multiple CVE IDs are supported
+ # (cve_match regular expression)
+
+ cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
+
+ patched_cves = set()
+ bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
+ for url in oe.patch.src_patches(d):
+ patch_file = bb.fetch.decodeurl(url)[2]
+
+ if not os.path.isfile(patch_file):
+ bb.error("File Not found: %s" % patch_file)
+ raise FileNotFoundError
+
+ # Check patch file name for CVE ID
+ fname_match = cve_file_name_match.search(patch_file)
+ if fname_match:
+ cve = fname_match.group(1).upper()
+ patched_cves.add(cve)
+ bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
+
+ with open(patch_file, "r", encoding="utf-8") as f:
+ try:
+ patch_text = f.read()
+ except UnicodeDecodeError:
+ bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
+ " trying with iso8859-1" % patch_file)
+ f.close()
+ with open(patch_file, "r", encoding="iso8859-1") as f:
+ patch_text = f.read()
+
+ # Search for one or more "CVE: " lines
+ text_match = False
+ for match in cve_match.finditer(patch_text):
+ # Get only the CVEs without the "CVE: " tag
+ cves = patch_text[match.start()+5:match.end()]
+ for cve in cves.split():
+ bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
+ patched_cves.add(cve)
+ text_match = True
+
+ if not fname_match and not text_match:
+ bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+
+ return patched_cves
+
+
+def get_cpe_ids(cve_product, version):
+ """
+ Get list of CPE identifiers for the given product and version
+ """
+
+ version = version.split("+git")[0]
+
+ cpe_ids = []
+ for product in cve_product.split():
+ # CVE_PRODUCT in recipes may include vendor information for CPE identifiers. If not,
+ # use wildcard for vendor.
+ if ":" in product:
+ vendor, product = product.split(":", 1)
+ else:
+ vendor = "*"
+
+ cpe_id = f'cpe:2.3:a:{vendor}:{product}:{version}:*:*:*:*:*:*:*'
+ cpe_ids.append(cpe_id)
+
+ return cpe_ids
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py
index 80bba2b9d2..602130a904 100644
--- a/meta/lib/oe/data.py
+++ b/meta/lib/oe/data.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import json
import oe.maketype
@@ -17,7 +21,7 @@ def typed_value(key, d):
except (TypeError, ValueError) as exc:
bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
-def export2json(d, json_file, expand=True):
+def export2json(d, json_file, expand=True, searchString="",replaceString=""):
data2export = {}
keys2export = []
@@ -37,9 +41,11 @@ def export2json(d, json_file, expand=True):
for key in keys2export:
try:
- data2export[key] = d.getVar(key, expand)
+ data2export[key] = d.getVar(key, expand).replace(searchString,replaceString)
except bb.data_smart.ExpansionError:
data2export[key] = ''
+ except AttributeError:
+ pass
with open(json_file, "w") as f:
json.dump(data2export, f, skipkeys=True, indent=4, sort_keys=True)
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py
index 37f04ed359..4b2a9bec01 100644
--- a/meta/lib/oe/distro_check.py
+++ b/meta/lib/oe/distro_check.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
def create_socket(url, d):
import urllib
from bb.utils import export_proxies
@@ -22,7 +26,7 @@ def find_latest_numeric_release(url, d):
maxstr=""
for link in get_links_from_url(url, d):
try:
- # TODO use LooseVersion
+ # TODO use bb.utils.vercmp_string_op()
release = float(link)
except:
release = 0
@@ -77,17 +81,10 @@ def get_latest_released_fedora_source_package_list(d):
def get_latest_released_opensuse_source_package_list(d):
"Returns list of all the name os packages in the latest opensuse distro"
- latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/",d)
-
- package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main", d)
- package_names |= get_source_package_list_from_url("http://download.opensuse.org/update/%s/src/" % latest, "updates", d)
- return latest, package_names
+ latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/leap", d)
-def get_latest_released_mandriva_source_package_list(d):
- "Returns list of all the name os packages in the latest mandriva distro"
- latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/", d)
- package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main", d)
- package_names |= get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates", d)
+ package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/leap/%s/repo/oss/suse/src/" % latest, "main", d)
+ package_names |= get_source_package_list_from_url("http://download.opensuse.org/update/leap/%s/oss/src/" % latest, "updates", d)
return latest, package_names
def get_latest_released_clear_source_package_list(d):
@@ -161,8 +158,7 @@ def create_distro_packages_list(distro_check_dir, d):
("Debian", get_latest_released_debian_source_package_list),
("Ubuntu", get_latest_released_ubuntu_source_package_list),
("Fedora", get_latest_released_fedora_source_package_list),
- ("OpenSuSE", get_latest_released_opensuse_source_package_list),
- ("Mandriva", get_latest_released_mandriva_source_package_list),
+ ("openSUSE", get_latest_released_opensuse_source_package_list),
("Clear", get_latest_released_clear_source_package_list),
)
diff --git a/meta/lib/oe/elf.py b/meta/lib/oe/elf.py
new file mode 100644
index 0000000000..46c884a775
--- /dev/null
+++ b/meta/lib/oe/elf.py
@@ -0,0 +1,141 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+def machine_dict(d):
+# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
+ machdata = {
+ "darwin9" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "eabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "elf" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "i586" : (3, 0, 0, True, 32),
+ "i686" : (3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "epiphany": (4643, 0, 0, True, 32),
+ "lm32": (138, 0, 0, False, 32),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ "powerpc": (20, 0, 0, False, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ },
+ "linux" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "arm" : (40, 97, 0, True, 32),
+ "armeb": (40, 97, 0, False, 32),
+ "powerpc": (20, 0, 0, False, 32),
+ "powerpc64": (21, 0, 0, False, 64),
+ "powerpc64le": (21, 0, 0, True, 64),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "ia64": (50, 0, 0, True, 64),
+ "alpha": (36902, 0, 0, True, 64),
+ "hppa": (15, 3, 0, False, 32),
+ "m68k": ( 4, 0, 0, False, 32),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "mipsisa32r6": ( 8, 0, 0, False, 32),
+ "mipsisa32r6el": ( 8, 0, 0, True, 32),
+ "mipsisa64r6": ( 8, 0, 0, False, 64),
+ "mipsisa64r6el": ( 8, 0, 0, True, 64),
+ "nios2": (113, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ "s390": (22, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ "sparc": ( 2, 0, 0, False, 32),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ },
+ "linux-android" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ },
+ "linux-androideabi" : {
+ "arm" : (40, 97, 0, True, 32),
+ },
+ "linux-musl" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "arm" : ( 40, 97, 0, True, 32),
+ "armeb": ( 40, 97, 0, False, 32),
+ "powerpc": ( 20, 0, 0, False, 32),
+ "powerpc64": ( 21, 0, 0, False, 64),
+ "powerpc64le": (21, 0, 0, True, 64),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": ( 62, 0, 0, True, 64),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ "sh4": ( 42, 0, 0, True, 32),
+ },
+ "uclinux-uclibc" : {
+ "bfin": ( 106, 0, 0, True, 32),
+ },
+ "linux-gnueabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-musleabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-gnuspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-muslspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-gnu" : {
+ "powerpc": (20, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ },
+ "linux-gnu_ilp32" : {
+ "aarch64" : (183, 0, 0, True, 32),
+ },
+ "linux-gnux32" : {
+ "x86_64": (62, 0, 0, True, 32),
+ },
+ "linux-muslx32" : {
+ "x86_64": (62, 0, 0, True, 32),
+ },
+ "linux-gnun32" : {
+ "mips64": ( 8, 0, 0, False, 32),
+ "mips64el": ( 8, 0, 0, True, 32),
+ "mipsisa64r6": ( 8, 0, 0, False, 32),
+ "mipsisa64r6el":( 8, 0, 0, True, 32),
+ },
+ }
+
+ # Add in any extra user supplied data which may come from a BSP layer, removing the
+ # need to always change this class directly
+ extra_machdata = (d and d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS" or None) or "").split()
+ for m in extra_machdata:
+ call = m + "(machdata, d)"
+ locs = { "machdata" : machdata, "d" : d}
+ machdata = bb.utils.better_eval(call, locs)
+
+ return machdata
diff --git a/meta/lib/oe/gpg_sign.py b/meta/lib/oe/gpg_sign.py
index 7ce767ee0a..1bce6cb792 100644
--- a/meta/lib/oe/gpg_sign.py
+++ b/meta/lib/oe/gpg_sign.py
@@ -1,60 +1,71 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
"""Helper module for GPG signing"""
import os
import bb
-import oe.utils
+import subprocess
+import shlex
class LocalSigner(object):
"""Class for handling local (on the build host) signing"""
def __init__(self, d):
self.gpg_bin = d.getVar('GPG_BIN') or \
bb.utils.which(os.getenv('PATH'), 'gpg')
+ self.gpg_cmd = [self.gpg_bin]
+ self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
+ # Without this we see "Cannot allocate memory" errors when running processes in parallel
+ # It needs to be set for any gpg command since any agent launched can stick around in memory
+ # and this parameter must be set.
+ if self.gpg_agent_bin:
+ self.gpg_cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
self.gpg_path = d.getVar('GPG_PATH')
- self.gpg_version = self.get_gpg_version()
self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
+ self.gpg_version = self.get_gpg_version()
+
def export_pubkey(self, output_file, keyid, armor=True):
"""Export GPG public key to a file"""
- cmd = '%s --batch --yes --export -o %s ' % \
- (self.gpg_bin, output_file)
+ cmd = self.gpg_cmd + ["--no-permission-warning", "--batch", "--yes", "--export", "-o", output_file]
if self.gpg_path:
- cmd += "--homedir %s " % self.gpg_path
+ cmd += ["--homedir", self.gpg_path]
if armor:
- cmd += "--armor "
- cmd += keyid
- status, output = oe.utils.getstatusoutput(cmd)
- if status:
- raise bb.build.FuncFailed('Failed to export gpg public key (%s): %s' %
- (keyid, output))
-
- def sign_rpms(self, files, keyid, passphrase):
+ cmd += ["--armor"]
+ cmd += [keyid]
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+
+ def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None):
"""Sign RPM files"""
cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
- gpg_args = '--batch --passphrase=%s' % passphrase
+ gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
if self.gpg_version > (2,1,):
gpg_args += ' --pinentry-mode=loopback'
cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
+ cmd += "--define '_binary_filedigest_algorithm %s' " % digest
if self.gpg_bin:
- cmd += "--define '%%__gpg %s' " % self.gpg_bin
+ cmd += "--define '__gpg %s' " % self.gpg_bin
if self.gpg_path:
cmd += "--define '_gpg_path %s' " % self.gpg_path
+ if fsk:
+ cmd += "--signfiles --fskpath %s " % fsk
+ if fsk_password:
+ cmd += "--define '_file_signing_key_password %s' " % fsk_password
- # Sign in chunks of 100 packages
- for i in range(0, len(files), 100):
- status, output = oe.utils.getstatusoutput(cmd + ' '.join(files[i:i+100]))
- if status:
- raise bb.build.FuncFailed("Failed to sign RPM packages: %s" % output)
+ # Sign in chunks
+ for i in range(0, len(files), sign_chunk):
+ subprocess.check_output(shlex.split(cmd + ' '.join(files[i:i+sign_chunk])), stderr=subprocess.STDOUT)
def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True):
"""Create a detached signature of a file"""
- import subprocess
if passphrase_file and passphrase:
raise Exception("You should use either passphrase_file of passphrase, not both")
- cmd = [self.gpg_bin, '--detach-sign', '--batch', '--no-tty', '--yes',
- '--passphrase-fd', '0', '-u', keyid]
+ cmd = self.gpg_cmd + ['--detach-sign', '--no-permission-warning', '--batch',
+ '--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid]
if self.gpg_path:
cmd += ['--homedir', self.gpg_path]
@@ -77,8 +88,7 @@ class LocalSigner(object):
(_, stderr) = job.communicate(passphrase.encode("utf-8"))
if job.returncode:
- raise bb.build.FuncFailed("GPG exited with code %d: %s" %
- (job.returncode, stderr.decode("utf-8")))
+ bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8")))
except IOError as e:
bb.error("IO error (%s): %s" % (e.errno, e.strerror))
@@ -91,23 +101,41 @@ class LocalSigner(object):
def get_gpg_version(self):
"""Return the gpg version as a tuple of ints"""
- import subprocess
try:
- ver_str = subprocess.check_output((self.gpg_bin, "--version")).split()[2].decode("utf-8")
- return tuple([int(i) for i in ver_str.split('.')])
+ cmd = self.gpg_cmd + ["--version", "--no-permission-warning"]
+ ver_str = subprocess.check_output(cmd).split()[2].decode("utf-8")
+ return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
except subprocess.CalledProcessError as e:
- raise bb.build.FuncFailed("Could not get gpg version: %s" % e)
+ bb.fatal("Could not get gpg version: %s" % e)
- def verify(self, sig_file):
+ def verify(self, sig_file, valid_sigs = ''):
"""Verify signature"""
- cmd = self.gpg_bin + " --verify "
+ cmd = self.gpg_cmd + ["--verify", "--no-permission-warning", "--status-fd", "1"]
if self.gpg_path:
- cmd += "--homedir %s " % self.gpg_path
- cmd += sig_file
- status, _ = oe.utils.getstatusoutput(cmd)
- ret = False if status else True
- return ret
+ cmd += ["--homedir", self.gpg_path]
+
+ cmd += [sig_file]
+ status = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ # Valid if any key matches if unspecified
+ if not valid_sigs:
+ ret = False if status.returncode else True
+ return ret
+
+ import re
+ goodsigs = []
+ sigre = re.compile(r'^\[GNUPG:\] GOODSIG (\S+)\s(.*)$')
+ for l in status.stdout.decode("utf-8").splitlines():
+ s = sigre.match(l)
+ if s:
+ goodsigs += [s.group(1)]
+
+ for sig in valid_sigs.split():
+ if sig in goodsigs:
+ return True
+ if len(goodsigs):
+ bb.warn('No accepted signatures found. Good signatures found: %s.' % ' '.join(goodsigs))
+ return False
def get_signer(d, backend):
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
index 8d2fd1709c..99cfa5f733 100644
--- a/meta/lib/oe/license.py
+++ b/meta/lib/oe/license.py
@@ -1,4 +1,6 @@
-# vi:sts=4:sw=4:et
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
"""Code for parsing OpenEmbedded license strings"""
import ast
@@ -8,17 +10,20 @@ from fnmatch import fnmatchcase as fnmatch
def license_ok(license, dont_want_licenses):
""" Return False if License exist in dont_want_licenses else True """
for dwl in dont_want_licenses:
- # If you want to exclude license named generically 'X', we
- # surely want to exclude 'X+' as well. In consequence, we
- # will exclude a trailing '+' character from LICENSE in
- # case INCOMPATIBLE_LICENSE is not a 'X+' license.
- lic = license
- if not re.search('\+$', dwl):
- lic = re.sub('\+', '', license)
- if fnmatch(lic, dwl):
+ if fnmatch(license, dwl):
return False
return True
+def obsolete_license_list():
+ return ["AGPL-3", "AGPL-3+", "AGPLv3", "AGPLv3+", "AGPLv3.0", "AGPLv3.0+", "AGPL-3.0", "AGPL-3.0+", "BSD-0-Clause",
+ "GPL-1", "GPL-1+", "GPLv1", "GPLv1+", "GPLv1.0", "GPLv1.0+", "GPL-1.0", "GPL-1.0+", "GPL-2", "GPL-2+", "GPLv2",
+ "GPLv2+", "GPLv2.0", "GPLv2.0+", "GPL-2.0", "GPL-2.0+", "GPL-3", "GPL-3+", "GPLv3", "GPLv3+", "GPLv3.0", "GPLv3.0+",
+ "GPL-3.0", "GPL-3.0+", "LGPLv2", "LGPLv2+", "LGPLv2.0", "LGPLv2.0+", "LGPL-2.0", "LGPL-2.0+", "LGPL2.1", "LGPL2.1+",
+ "LGPLv2.1", "LGPLv2.1+", "LGPL-2.1", "LGPL-2.1+", "LGPLv3", "LGPLv3+", "LGPL-3.0", "LGPL-3.0+", "MPL-1", "MPLv1",
+ "MPLv1.1", "MPLv2", "MIT-X", "MIT-style", "openssl", "PSF", "PSFv2", "Python-2", "Apachev2", "Apache-2", "Artisticv1",
+ "Artistic-1", "AFL-2", "AFL-1", "AFLv2", "AFLv1", "CDDLv1", "CDDL-1", "EPLv1.0", "FreeType", "Nauman",
+ "tcl", "vim", "SGIv1"]
+
class LicenseError(Exception):
pass
@@ -40,8 +45,8 @@ class InvalidLicense(LicenseError):
return "invalid characters in license '%s'" % self.license
license_operator_chars = '&|() '
-license_operator = re.compile('([' + license_operator_chars + '])')
-license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
+license_operator = re.compile(r'([' + license_operator_chars + '])')
+license_pattern = re.compile(r'[a-zA-Z0-9.+_\-]+$')
class LicenseVisitor(ast.NodeVisitor):
"""Get elements based on OpenEmbedded license strings"""
@@ -79,6 +84,9 @@ class FlattenVisitor(LicenseVisitor):
def visit_Str(self, node):
self.licenses.append(node.s)
+ def visit_Constant(self, node):
+ self.licenses.append(node.value)
+
def visit_BinOp(self, node):
if isinstance(node.op, ast.BitOr):
left = FlattenVisitor(self.choose_licenses)
@@ -101,35 +109,43 @@ def flattened_licenses(licensestr, choose_licenses):
raise LicenseSyntaxError(licensestr, exc)
return flatten.licenses
-def is_included(licensestr, whitelist=None, blacklist=None):
- """Given a license string and whitelist and blacklist, determine if the
- license string matches the whitelist and does not match the blacklist.
+def is_included(licensestr, include_licenses=None, exclude_licenses=None):
+ """Given a license string, a list of licenses to include and a list of
+ licenses to exclude, determine if the license string matches the include
+ list and does not match the exclude list.
Returns a tuple holding the boolean state and a list of the applicable
- licenses which were excluded (or None, if the state is True)
- """
+ licenses that were excluded if state is False, or the licenses that were
+ included if the state is True."""
def include_license(license):
- return any(fnmatch(license, pattern) for pattern in whitelist)
+ return any(fnmatch(license, pattern) for pattern in include_licenses)
def exclude_license(license):
- return any(fnmatch(license, pattern) for pattern in blacklist)
+ return any(fnmatch(license, pattern) for pattern in exclude_licenses)
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
- included licenses)."""
- alpha_weight = len(list(filter(include_license, alpha)))
- beta_weight = len(list(filter(include_license, beta)))
- if alpha_weight > beta_weight:
+ included licenses and no excluded licenses)."""
+ # The factor 1000 below is arbitrary, just expected to be much larger
+ # than the number of licenses actually specified. That way the weight
+ # will be negative if the list of licenses contains an excluded license,
+ # but still gives a higher weight to the list with the most included
+ # licenses.
+ alpha_weight = (len(list(filter(include_license, alpha))) -
+ 1000 * (len(list(filter(exclude_license, alpha))) > 0))
+ beta_weight = (len(list(filter(include_license, beta))) -
+ 1000 * (len(list(filter(exclude_license, beta))) > 0))
+ if alpha_weight >= beta_weight:
return alpha
else:
return beta
- if not whitelist:
- whitelist = ['*']
+ if not include_licenses:
+ include_licenses = ['*']
- if not blacklist:
- blacklist = []
+ if not exclude_licenses:
+ exclude_licenses = []
licenses = flattened_licenses(licensestr, choose_licenses)
excluded = [lic for lic in licenses if exclude_license(lic)]
@@ -224,6 +240,9 @@ class ListVisitor(LicenseVisitor):
def visit_Str(self, node):
self.licenses.add(node.s)
+ def visit_Constant(self, node):
+ self.licenses.add(node.value)
+
def list_licenses(licensestr):
"""Simply get a list of all licenses mentioned in a license string.
Binary operators are not applied or taken into account in any way"""
@@ -233,3 +252,8 @@ def list_licenses(licensestr):
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return visitor.licenses
+
+def apply_pkg_license_exception(pkg, bad_licenses, exceptions):
+ """Return remaining bad licenses after removing any package exceptions"""
+
+ return [lic for lic in bad_licenses if pkg + ':' + lic not in exceptions]
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py
index 3a945e0fce..43e46380d7 100644
--- a/meta/lib/oe/lsb.py
+++ b/meta/lib/oe/lsb.py
@@ -1,19 +1,30 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+def get_os_release():
+ """Get all key-value pairs from /etc/os-release as a dict"""
+ from collections import OrderedDict
+
+ data = OrderedDict()
+ if os.path.exists('/etc/os-release'):
+ with open('/etc/os-release') as f:
+ for line in f:
+ try:
+ key, val = line.rstrip().split('=', 1)
+ except ValueError:
+ continue
+ data[key.strip()] = val.strip('"')
+ return data
+
def release_dict_osr():
""" Populate a dict with pertinent values from /etc/os-release """
- if not os.path.exists('/etc/os-release'):
- return None
-
data = {}
- with open('/etc/os-release') as f:
- for line in f:
- try:
- key, val = line.rstrip().split('=', 1)
- except ValueError:
- continue
- if key == 'ID':
- data['DISTRIB_ID'] = val.strip('"')
- if key == 'VERSION_ID':
- data['DISTRIB_RELEASE'] = val.strip('"')
+ os_release = get_os_release()
+ if 'ID' in os_release:
+ data['DISTRIB_ID'] = os_release['ID']
+ if 'VERSION_ID' in os_release:
+ data['DISTRIB_RELEASE'] = os_release['VERSION_ID']
return data
@@ -99,12 +110,12 @@ def distro_identifier(adjust_hook=None):
if adjust_hook:
distro_id, release = adjust_hook(distro_id, release)
if not distro_id:
- return "Unknown"
- # Filter out any non-alphanumerics
- distro_id = re.sub(r'\W', '', distro_id)
+ return "unknown"
+ # Filter out any non-alphanumerics and convert to lowercase
+ distro_id = re.sub(r'\W', '', distro_id).lower()
if release:
- id_str = '{0}-{1}'.format(distro_id.lower(), release)
+ id_str = '{0}-{1}'.format(distro_id, release)
else:
id_str = distro_id
return id_str.replace(' ','-').replace('/','-')
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py
index f88981dd90..d36082c535 100644
--- a/meta/lib/oe/maketype.py
+++ b/meta/lib/oe/maketype.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
"""OpenEmbedded variable typing support
Types are defined in the metadata by name, using the 'type' flag on a
@@ -7,7 +10,7 @@ the arguments of the type's factory for details.
import inspect
import oe.types as types
-import collections
+from collections.abc import Callable
available_types = {}
@@ -96,7 +99,7 @@ for name in dir(types):
continue
obj = getattr(types, name)
- if not isinstance(obj, collections.Callable):
+ if not isinstance(obj, Callable):
continue
register(name, obj)
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py
index 60c49be0e9..1a058dcd73 100644
--- a/meta/lib/oe/manifest.py
+++ b/meta/lib/oe/manifest.py
@@ -1,9 +1,12 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from abc import ABCMeta, abstractmethod
import os
import re
import bb
-
class Manifest(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
@@ -185,154 +188,11 @@ class Manifest(object, metaclass=ABCMeta):
return installed_pkgs
-class RpmManifest(Manifest):
- """
- Returns a dictionary object with mip and mlp packages.
- """
- def _split_multilib(self, pkg_list):
- pkgs = dict()
-
- for pkg in pkg_list.split():
- pkg_type = self.PKG_TYPE_MUST_INSTALL
-
- ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
-
- for ml_variant in ml_variants:
- if pkg.startswith(ml_variant + '-'):
- pkg_type = self.PKG_TYPE_MULTILIB
-
- if not pkg_type in pkgs:
- pkgs[pkg_type] = pkg
- else:
- pkgs[pkg_type] += " " + pkg
-
- return pkgs
-
- def create_initial(self):
- pkgs = dict()
-
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var))
- if split_pkgs is not None:
- pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
- else:
- pkg_list = self.d.getVar(var)
- if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
-
- for pkg_type in pkgs:
- for pkg in pkgs[pkg_type].split():
- manifest.write("%s,%s\n" % (pkg_type, pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- pass
-
-
-class OpkgManifest(Manifest):
- """
- Returns a dictionary object with mip and mlp packages.
- """
- def _split_multilib(self, pkg_list):
- pkgs = dict()
-
- for pkg in pkg_list.split():
- pkg_type = self.PKG_TYPE_MUST_INSTALL
-
- ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
-
- for ml_variant in ml_variants:
- if pkg.startswith(ml_variant + '-'):
- pkg_type = self.PKG_TYPE_MULTILIB
-
- if not pkg_type in pkgs:
- pkgs[pkg_type] = pkg
- else:
- pkgs[pkg_type] += " " + pkg
-
- return pkgs
-
- def create_initial(self):
- pkgs = dict()
-
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var))
- if split_pkgs is not None:
- pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
- else:
- pkg_list = self.d.getVar(var)
- if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
-
- for pkg_type in pkgs:
- for pkg in pkgs[pkg_type].split():
- manifest.write("%s,%s\n" % (pkg_type, pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- if not os.path.exists(self.initial_manifest):
- self.create_initial()
-
- initial_manifest = self.parse_initial_manifest()
- pkgs_to_install = list()
- for pkg_type in initial_manifest:
- pkgs_to_install += initial_manifest[pkg_type]
- if len(pkgs_to_install) == 0:
- return
-
- output = pm.dummy_install(pkgs_to_install)
-
- with open(self.full_manifest, 'w+') as manifest:
- pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
- for line in set(output.split('\n')):
- m = pkg_re.match(line)
- if m:
- manifest.write(m.group(1) + '\n')
-
- return
-
-
-class DpkgManifest(Manifest):
- def create_initial(self):
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- pkg_list = self.d.getVar(var)
-
- if pkg_list is None:
- continue
-
- for pkg in pkg_list.split():
- manifest.write("%s,%s\n" %
- (self.var_maps[self.manifest_type][var], pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- pass
-
def create_manifest(d, final_manifest=False, manifest_dir=None,
manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
- manifest_map = {'rpm': RpmManifest,
- 'ipk': OpkgManifest,
- 'deb': DpkgManifest}
-
- manifest = manifest_map[d.getVar('IMAGE_PKGTYPE')](d, manifest_dir, manifest_type)
+ import importlib
+ manifest = importlib.import_module('oe.package_manager.' + d.getVar('IMAGE_PKGTYPE') + '.manifest').PkgManifest(d, manifest_dir, manifest_type)
if final_manifest:
manifest.create_final()
diff --git a/meta/lib/oe/overlayfs.py b/meta/lib/oe/overlayfs.py
new file mode 100644
index 0000000000..b5d5e88e80
--- /dev/null
+++ b/meta/lib/oe/overlayfs.py
@@ -0,0 +1,48 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This file contains common functions for overlayfs and its QA check
+
+# this function is based on https://github.com/systemd/systemd/blob/main/src/basic/unit-name.c
+def escapeSystemdUnitName(path):
+ escapeMap = {
+ '/': '-',
+ '-': "\\x2d",
+ '\\': "\\x5d"
+ }
+ return "".join([escapeMap.get(c, c) for c in path.strip('/')])
+
+def strForBash(s):
+ return s.replace('\\', '\\\\')
+
+def allOverlaysUnitName(d):
+ return d.getVar('PN') + '-overlays.service'
+
+def mountUnitName(unit):
+ return escapeSystemdUnitName(unit) + '.mount'
+
+def helperUnitName(unit):
+ return escapeSystemdUnitName(unit) + '-create-upper-dir.service'
+
+def unitFileList(d):
+ fileList = []
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
+
+ if not overlayMountPoints:
+ bb.fatal("A recipe uses overlayfs class but there is no OVERLAYFS_MOUNT_POINT set in your MACHINE configuration")
+
+ # check that we have required mount points set first
+ requiredMountPoints = d.getVarFlags('OVERLAYFS_WRITABLE_PATHS')
+ for mountPoint in requiredMountPoints:
+ if mountPoint not in overlayMountPoints:
+ bb.fatal("Missing required mount point for OVERLAYFS_MOUNT_POINT[%s] in your MACHINE configuration" % mountPoint)
+
+ for mountPoint in overlayMountPoints:
+ for path in d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint).split():
+ fileList.append(mountUnitName(path))
+ fileList.append(helperUnitName(path))
+
+ fileList.append(allOverlaysUnitName(d))
+
+ return fileList
+
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py
index 52c5f16cf8..7d387ee81d 100644
--- a/meta/lib/oe/package.py
+++ b/meta/lib/oe/package.py
@@ -1,16 +1,26 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import stat
+import mmap
+import subprocess
+
def runstrip(arg):
# Function to strip a single file, called from split_and_strip_files below
# A working 'file' (one which works on the target architecture)
#
- # The elftype is a bit pattern (explained in split_and_strip_files) to tell
+ # The elftype is a bit pattern (explained in is_elf below) to tell
# us what type of file we're processing...
# 4 - executable
# 8 - shared library
# 16 - kernel module
- import stat, subprocess
-
- (file, elftype, strip) = arg
+ if len(arg) == 3:
+ (file, elftype, strip) = arg
+ extra_strip_sections = ''
+ else:
+ (file, elftype, strip, extra_strip_sections) = arg
newmode = None
if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
@@ -19,30 +29,159 @@ def runstrip(arg):
os.chmod(file, newmode)
stripcmd = [strip]
-
+ skip_strip = False
# kernel module
if elftype & 16:
- stripcmd.extend(["--strip-debug", "--remove-section=.comment",
- "--remove-section=.note", "--preserve-dates"])
+ if is_kernel_module_signed(file):
+ bb.debug(1, "Skip strip on signed module %s" % file)
+ skip_strip = True
+ else:
+ stripcmd.extend(["--strip-debug", "--remove-section=.comment",
+ "--remove-section=.note", "--preserve-dates"])
# .so and shared library
elif ".so" in file and elftype & 8:
stripcmd.extend(["--remove-section=.comment", "--remove-section=.note", "--strip-unneeded"])
# shared or executable:
elif elftype & 8 or elftype & 4:
stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"])
+ if extra_strip_sections != '':
+ for section in extra_strip_sections.split():
+ stripcmd.extend(["--remove-section=" + section])
stripcmd.append(file)
bb.debug(1, "runstrip: %s" % stripcmd)
- try:
+ if not skip_strip:
output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.error("runstrip: '%s' strip command failed with %s (%s)" % (stripcmd, e.returncode, e.output))
if newmode:
os.chmod(file, origmode)
- return
+# Detect .ko module by searching for "vermagic=" string
+def is_kernel_module(path):
+ with open(path) as f:
+ return mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ).find(b"vermagic=") >= 0
+
+# Detect if .ko module is signed
+def is_kernel_module_signed(path):
+ with open(path, "rb") as f:
+ f.seek(-28, 2)
+ module_tail = f.read()
+ return "Module signature appended" in "".join(chr(c) for c in bytearray(module_tail))
+
+# Return type (bits):
+# 0 - not elf
+# 1 - ELF
+# 2 - stripped
+# 4 - executable
+# 8 - shared library
+# 16 - kernel module
+def is_elf(path):
+ exec_type = 0
+ result = subprocess.check_output(["file", "-b", path], stderr=subprocess.STDOUT).decode("utf-8")
+
+ if "ELF" in result:
+ exec_type |= 1
+ if "not stripped" not in result:
+ exec_type |= 2
+ if "executable" in result:
+ exec_type |= 4
+ if "shared" in result:
+ exec_type |= 8
+ if "relocatable" in result:
+ if path.endswith(".ko") and path.find("/lib/modules/") != -1 and is_kernel_module(path):
+ exec_type |= 16
+ return (path, exec_type)
+
+def is_static_lib(path):
+ if path.endswith('.a') and not os.path.islink(path):
+ with open(path, 'rb') as fh:
+ # The magic must include the first slash to avoid
+ # matching golang static libraries
+ magic = b'!<arch>\x0a/'
+ start = fh.read(len(magic))
+ return start == magic
+ return False
+
+def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripped=False):
+ """
+ Strip executable code (like executables, shared libraries) _in_place_
+ - Based on sysroot_strip in staging.bbclass
+ :param dstdir: directory in which to strip files
+ :param strip_cmd: Strip command (usually ${STRIP})
+ :param libdir: ${libdir} - strip .so files in this directory
+ :param base_libdir: ${base_libdir} - strip .so files in this directory
+ :param qa_already_stripped: Set to True if already-stripped' in ${INSANE_SKIP}
+ This is for proper logging and messages only.
+ """
+ import stat, errno, oe.path, oe.utils
+
+ elffiles = {}
+ inodes = {}
+ libdir = os.path.abspath(dstdir + os.sep + libdir)
+ base_libdir = os.path.abspath(dstdir + os.sep + base_libdir)
+ exec_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+ #
+ # First lets figure out all of the files we may have to process
+ #
+ checkelf = []
+ inodecache = {}
+ for root, dirs, files in os.walk(dstdir):
+ for f in files:
+ file = os.path.join(root, f)
+
+ try:
+ ltarget = oe.path.realpath(file, dstdir, False)
+ s = os.lstat(ltarget)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ # Skip broken symlinks
+ continue
+ if not s:
+ continue
+ # Check its an excutable
+ if s[stat.ST_MODE] & exec_mask \
+ or ((file.startswith(libdir) or file.startswith(base_libdir)) and ".so" in f) \
+ or file.endswith('.ko'):
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if os.path.islink(file):
+ continue
+
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ checkelf.append(file)
+ inodecache[file] = s.st_ino
+ results = oe.utils.multiprocess_launch(is_elf, checkelf, d)
+ for (file, elf_file) in results:
+ #elf_file = is_elf(file)
+ if elf_file & 1:
+ if elf_file & 2:
+ if qa_already_stripped:
+ bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dstdir):], pn))
+ else:
+ bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dstdir):], pn))
+ continue
+
+ if inodecache[file] in inodes:
+ os.unlink(file)
+ os.link(inodes[inodecache[file]], file)
+ else:
+ # break hardlinks so that we do not strip the original.
+ inodes[inodecache[file]] = file
+ bb.utils.break_hardlinks(file)
+ elffiles[file] = elf_file
+
+ #
+ # Now strip them (in parallel)
+ #
+ sfiles = []
+ for file in elffiles:
+ elf_file = int(elffiles[file])
+ sfiles.append((file, elf_file, strip_cmd))
+
+ oe.utils.multiprocess_launch(runstrip, sfiles, d)
def file_translate(file):
@@ -57,48 +196,67 @@ def file_translate(file):
def filedeprunner(arg):
import re, subprocess, shlex
- (pkg, pkgfiles, rpmdeps, pkgdest, magic) = arg
+ (pkg, pkgfiles, rpmdeps, pkgdest) = arg
provides = {}
requires = {}
- r = re.compile(r'[<>=]+ +[^ ]*')
+ file_re = re.compile(r'\s+\d+\s(.*)')
+ dep_re = re.compile(r'\s+(\S)\s+(.*)')
+ r = re.compile(r'[<>=]+\s+\S*')
def process_deps(pipe, pkg, pkgdest, provides, requires):
- for line in pipe:
- f = line.decode("utf-8").split(" ", 1)[0].strip()
- line = line.decode("utf-8").split(" ", 1)[1].strip()
+ file = None
+ for line in pipe.split("\n"):
+
+ m = file_re.match(line)
+ if m:
+ file = m.group(1)
+ file = file.replace(pkgdest + "/" + pkg, "")
+ file = file_translate(file)
+ continue
- if line.startswith("Requires:"):
+ m = dep_re.match(line)
+ if not m or not file:
+ continue
+
+ type, dep = m.groups()
+
+ if type == 'R':
i = requires
- elif line.startswith("Provides:"):
+ elif type == 'P':
i = provides
else:
- continue
+ continue
- file = f.replace(pkgdest + "/" + pkg, "")
- file = file_translate(file)
- value = line.split(":", 1)[1].strip()
- value = r.sub(r'(\g<0>)', value)
+ if dep.startswith("python("):
+ continue
- if value.startswith("rpmlib("):
+ # Ignore all perl(VMS::...) and perl(Mac::...) dependencies. These
+ # are typically used conditionally from the Perl code, but are
+ # generated as unconditional dependencies.
+ if dep.startswith('perl(VMS::') or dep.startswith('perl(Mac::'):
continue
- if value == "python":
+
+ # Ignore perl dependencies on .pl files.
+ if dep.startswith('perl(') and dep.endswith('.pl)'):
continue
+
+ # Remove perl versions and perl module versions since they typically
+ # do not make sense when used as package versions.
+ if dep.startswith('perl') and r.search(dep):
+ dep = dep.split()[0]
+
+ # Put parentheses around any version specifications.
+ dep = r.sub(r'(\g<0>)',dep)
+
if file not in i:
i[file] = []
- i[file].append(value)
+ i[file].append(dep)
return provides, requires
- env = os.environ.copy()
- env["MAGIC"] = magic
-
- try:
- dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE, env=env)
- provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires)
- except OSError as e:
- bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e))
- raise e
+ output = subprocess.check_output(shlex.split(rpmdeps) + pkgfiles, stderr=subprocess.STDOUT).decode("utf-8")
+ provides, requires = process_deps(output, pkg, pkgdest, provides, requires)
return (pkg, provides, requires)
@@ -108,13 +266,13 @@ def read_shlib_providers(d):
shlib_provider = {}
shlibs_dirs = d.getVar('SHLIBSDIRS').split()
- list_re = re.compile('^(.*)\.list$')
+ list_re = re.compile(r'^(.*)\.list$')
# Go from least to most specific since the last one found wins
for dir in reversed(shlibs_dirs):
bb.debug(2, "Reading shlib providers in %s" % (dir))
if not os.path.exists(dir):
continue
- for file in os.listdir(dir):
+ for file in sorted(os.listdir(dir)):
m = list_re.match(file)
if m:
dep_pkg = m.group(1)
@@ -132,36 +290,3 @@ def read_shlib_providers(d):
shlib_provider[s[0]] = {}
shlib_provider[s[0]][s[1]] = (dep_pkg, s[2])
return shlib_provider
-
-
-def npm_split_package_dirs(pkgdir):
- """
- Work out the packages fetched and unpacked by BitBake's npm fetcher
- Returns a dict of packagename -> (relpath, package.json) ordered
- such that it is suitable for use in PACKAGES and FILES
- """
- from collections import OrderedDict
- import json
- packages = {}
- for root, dirs, files in os.walk(pkgdir):
- if os.path.basename(root) == 'node_modules':
- for dn in dirs:
- relpth = os.path.relpath(os.path.join(root, dn), pkgdir)
- pkgitems = ['${PN}']
- for pathitem in relpth.split('/'):
- if pathitem == 'node_modules':
- continue
- pkgitems.append(pathitem)
- pkgname = '-'.join(pkgitems).replace('_', '-')
- pkgname = pkgname.replace('@', '')
- pkgfile = os.path.join(root, dn, 'package.json')
- data = None
- if os.path.exists(pkgfile):
- with open(pkgfile, 'r') as f:
- data = json.loads(f.read())
- packages[pkgname] = (relpth, data)
- # We want the main package for a module sorted *after* its subpackages
- # (so that it doesn't otherwise steal the files for the subpackage), so
- # this is a cheap way to do that whilst still having an otherwise
- # alphabetical sort
- return OrderedDict((key, packages[key]) for key in sorted(packages, key=lambda pkg: pkg + '~'))
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
deleted file mode 100644
index 2a4df61998..0000000000
--- a/meta/lib/oe/package_manager.py
+++ /dev/null
@@ -1,1568 +0,0 @@
-from abc import ABCMeta, abstractmethod
-import os
-import glob
-import subprocess
-import shutil
-import multiprocessing
-import re
-import collections
-import bb
-import tempfile
-import oe.utils
-import oe.path
-import string
-from oe.gpg_sign import get_signer
-
-# this can be used by all PM backends to create the index files in parallel
-def create_index(arg):
- index_cmd = arg
-
- try:
- bb.note("Executing '%s' ..." % index_cmd)
- result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
- return("Index creation command '%s' failed with return code %d:\n%s" %
- (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- if result:
- bb.note(result)
-
- return None
-
-"""
-This method parse the output from the package managerand return
-a dictionary with the information of the packages. This is used
-when the packages are in deb or ipk format.
-"""
-def opkg_query(cmd_output):
- verregex = re.compile(' \([=<>]* [^ )]*\)')
- output = dict()
- pkg = ""
- arch = ""
- ver = ""
- filename = ""
- dep = []
- pkgarch = ""
- for line in cmd_output.splitlines():
- line = line.rstrip()
- if ':' in line:
- if line.startswith("Package: "):
- pkg = line.split(": ")[1]
- elif line.startswith("Architecture: "):
- arch = line.split(": ")[1]
- elif line.startswith("Version: "):
- ver = line.split(": ")[1]
- elif line.startswith("File: ") or line.startswith("Filename:"):
- filename = line.split(": ")[1]
- if "/" in filename:
- filename = os.path.basename(filename)
- elif line.startswith("Depends: "):
- depends = verregex.sub('', line.split(": ")[1])
- for depend in depends.split(", "):
- dep.append(depend)
- elif line.startswith("Recommends: "):
- recommends = verregex.sub('', line.split(": ")[1])
- for recommend in recommends.split(", "):
- dep.append("%s [REC]" % recommend)
- elif line.startswith("PackageArch: "):
- pkgarch = line.split(": ")[1]
-
- # When there is a blank line save the package information
- elif not line:
- # IPK doesn't include the filename
- if not filename:
- filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
- if pkg:
- output[pkg] = {"arch":arch, "ver":ver,
- "filename":filename, "deps": dep, "pkgarch":pkgarch }
- pkg = ""
- arch = ""
- ver = ""
- filename = ""
- dep = []
- pkgarch = ""
-
- if pkg:
- if not filename:
- filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
- output[pkg] = {"arch":arch, "ver":ver,
- "filename":filename, "deps": dep }
-
- return output
-
-
-class Indexer(object, metaclass=ABCMeta):
- def __init__(self, d, deploy_dir):
- self.d = d
- self.deploy_dir = deploy_dir
-
- @abstractmethod
- def write_index(self):
- pass
-
-
-class RpmIndexer(Indexer):
- def write_index(self):
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- raise NotImplementedError('Package feed signing not yet implementd for rpm')
-
- createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
- result = create_index("%s --update -q %s" % (createrepo_c, self.deploy_dir))
- if result:
- bb.fatal(result)
-
-class OpkgIndexer(Indexer):
- def write_index(self):
- arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
- "SDK_PACKAGE_ARCHS",
- "MULTILIB_ARCHS"]
-
- opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
- else:
- signer = None
-
- if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
- open(os.path.join(self.deploy_dir, "Packages"), "w").close()
-
- index_cmds = set()
- index_sign_files = set()
- for arch_var in arch_vars:
- archs = self.d.getVar(arch_var)
- if archs is None:
- continue
-
- for arch in archs.split():
- pkgs_dir = os.path.join(self.deploy_dir, arch)
- pkgs_file = os.path.join(pkgs_dir, "Packages")
-
- if not os.path.isdir(pkgs_dir):
- continue
-
- if not os.path.exists(pkgs_file):
- open(pkgs_file, "w").close()
-
- index_cmds.add('%s -r %s -p %s -m %s' %
- (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
-
- index_sign_files.add(pkgs_file)
-
- if len(index_cmds) == 0:
- bb.note("There are no packages in %s!" % self.deploy_dir)
- return
-
- result = oe.utils.multiprocess_exec(index_cmds, create_index)
- if result:
- bb.fatal('%s' % ('\n'.join(result)))
-
- if signer:
- feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
- is_ascii_sig = (feed_sig_type.upper() != "BIN")
- for f in index_sign_files:
- signer.detach_sign(f,
- self.d.getVar('PACKAGE_FEED_GPG_NAME'),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
- armor=is_ascii_sig)
-
-
-class DpkgIndexer(Indexer):
- def _create_configs(self):
- bb.utils.mkdirhier(self.apt_conf_dir)
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
-
- with open(os.path.join(self.apt_conf_dir, "preferences"),
- "w") as prefs_file:
- pass
- with open(os.path.join(self.apt_conf_dir, "sources.list"),
- "w+") as sources_file:
- pass
-
- with open(self.apt_conf_file, "w") as apt_conf:
- with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
- "apt", "apt.conf.sample")) as apt_conf_sample:
- for line in apt_conf_sample.read().split("\n"):
- line = re.sub("#ROOTFS#", "/dev/null", line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
- apt_conf.write(line + "\n")
-
- def write_index(self):
- self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
- "apt-ftparchive")
- self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
- self._create_configs()
-
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- pkg_archs = self.d.getVar('PACKAGE_ARCHS')
- if pkg_archs is not None:
- arch_list = pkg_archs.split()
- sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
- if sdk_pkg_archs is not None:
- for a in sdk_pkg_archs.split():
- if a not in pkg_archs:
- arch_list.append(a)
-
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
- arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
-
- apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
- gzip = bb.utils.which(os.getenv('PATH'), "gzip")
-
- index_cmds = []
- deb_dirs_found = False
- for arch in arch_list:
- arch_dir = os.path.join(self.deploy_dir, arch)
- if not os.path.isdir(arch_dir):
- continue
-
- cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
-
- cmd += "%s -fc Packages > Packages.gz;" % gzip
-
- with open(os.path.join(arch_dir, "Release"), "w+") as release:
- release.write("Label: %s\n" % arch)
-
- cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
-
- index_cmds.append(cmd)
-
- deb_dirs_found = True
-
- if not deb_dirs_found:
- bb.note("There are no packages in %s" % self.deploy_dir)
- return
-
- result = oe.utils.multiprocess_exec(index_cmds, create_index)
- if result:
- bb.fatal('%s' % ('\n'.join(result)))
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- raise NotImplementedError('Package feed signing not implementd for dpkg')
-
-
-
-class PkgsList(object, metaclass=ABCMeta):
- def __init__(self, d, rootfs_dir):
- self.d = d
- self.rootfs_dir = rootfs_dir
-
- @abstractmethod
- def list_pkgs(self):
- pass
-
-class RpmPkgsList(PkgsList):
- def list_pkgs(self):
- return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR')).list_installed()
-
-class OpkgPkgsList(PkgsList):
- def __init__(self, d, rootfs_dir, config_file):
- super(OpkgPkgsList, self).__init__(d, rootfs_dir)
-
- self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
- self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
- self.opkg_args += self.d.getVar("OPKG_ARGS")
-
- def list_pkgs(self, format=None):
- cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
-
- # opkg returns success even when it printed some
- # "Collected errors:" report to stderr. Mixing stderr into
- # stdout then leads to random failures later on when
- # parsing the output. To avoid this we need to collect both
- # output streams separately and check for empty stderr.
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
- cmd_output, cmd_stderr = p.communicate()
- cmd_output = cmd_output.decode("utf-8")
- cmd_stderr = cmd_stderr.decode("utf-8")
- if p.returncode or cmd_stderr:
- bb.fatal("Cannot get the installed packages list. Command '%s' "
- "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
-
- return opkg_query(cmd_output)
-
-
-class DpkgPkgsList(PkgsList):
-
- def list_pkgs(self):
- cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
- "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
- "-W"]
-
- cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
-
- try:
- cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get the installed packages list. Command '%s' "
- "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- return opkg_query(cmd_output)
-
-
-class PackageManager(object, metaclass=ABCMeta):
- """
- This is an abstract class. Do not instantiate this directly.
- """
-
- def __init__(self, d):
- self.d = d
- self.deploy_dir = None
- self.deploy_lock = None
-
- """
- Update the package manager package database.
- """
- @abstractmethod
- def update(self):
- pass
-
- """
- Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
- True, installation failures are ignored.
- """
- @abstractmethod
- def install(self, pkgs, attempt_only=False):
- pass
-
- """
- Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
- is False, the any dependencies are left in place.
- """
- @abstractmethod
- def remove(self, pkgs, with_dependencies=True):
- pass
-
- """
- This function creates the index files
- """
- @abstractmethod
- def write_index(self):
- pass
-
- @abstractmethod
- def remove_packaging_data(self):
- pass
-
- @abstractmethod
- def list_installed(self):
- pass
-
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
-
- """
- @abstractmethod
- def extract(self, pkg):
- pass
-
- """
- Add remote package feeds into repository manager configuration. The parameters
- for the feeds are set by feed_uris, feed_base_paths and feed_archs.
- See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
- for their description.
- """
- @abstractmethod
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- pass
-
- """
- Install complementary packages based upon the list of currently installed
- packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
- these packages, if they don't exist then no error will occur. Note: every
- backend needs to call this function explicitly after the normal package
- installation
- """
- def install_complementary(self, globs=None):
- # we need to write the list of installed packages to a file because the
- # oe-pkgdata-util reads it from a file
- installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR'),
- "installed_pkgs.txt")
- with open(installed_pkgs_file, "w+") as installed_pkgs:
- pkgs = self.list_installed()
- output = oe.utils.format_pkg_list(pkgs, "arch")
- installed_pkgs.write(output)
-
- if globs is None:
- globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
- split_linguas = set()
-
- for translation in self.d.getVar('IMAGE_LINGUAS').split():
- split_linguas.add(translation)
- split_linguas.add(translation.split('-')[0])
-
- split_linguas = sorted(split_linguas)
-
- for lang in split_linguas:
- globs += " *-locale-%s" % lang
-
- if globs is None:
- return
-
- cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
- "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs_file,
- globs]
- exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
- if exclude:
- cmd.extend(['--exclude=' + '|'.join(exclude.split())])
- try:
- bb.note("Installing complementary packages ...")
- bb.note('Running %s' % cmd)
- complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not compute complementary packages list. Command "
- "'%s' returned %d:\n%s" %
- (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- self.install(complementary_pkgs.split(), attempt_only=True)
- os.remove(installed_pkgs_file)
-
- def deploy_dir_lock(self):
- if self.deploy_dir is None:
- raise RuntimeError("deploy_dir is not set!")
-
- lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
-
- self.deploy_lock = bb.utils.lockfile(lock_file_name)
-
- def deploy_dir_unlock(self):
- if self.deploy_lock is None:
- return
-
- bb.utils.unlockfile(self.deploy_lock)
-
- self.deploy_lock = None
-
- """
- Construct URIs based on the following pattern: uri/base_path where 'uri'
- and 'base_path' correspond to each element of the corresponding array
- argument leading to len(uris) x len(base_paths) elements on the returned
- array
- """
- def construct_uris(self, uris, base_paths):
- def _append(arr1, arr2, sep='/'):
- res = []
- narr1 = [a.rstrip(sep) for a in arr1]
- narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
- for a1 in narr1:
- if arr2:
- for a2 in narr2:
- res.append("%s%s%s" % (a1, sep, a2))
- else:
- res.append(a1)
- return res
- return _append(uris, base_paths)
-
-class RpmPM(PackageManager):
- def __init__(self,
- d,
- target_rootfs,
- target_vendor,
- task_name='target',
- providename=None,
- arch_var=None,
- os_var=None):
- super(RpmPM, self).__init__(d)
- self.target_rootfs = target_rootfs
- self.target_vendor = target_vendor
- self.task_name = task_name
- if arch_var == None:
- self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
- else:
- self.archs = self.d.getVar(arch_var).replace("-","_")
- if task_name == "host":
- self.primary_arch = self.d.getVar('SDK_ARCH')
- else:
- self.primary_arch = self.d.getVar('MACHINE_ARCH')
-
- self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), "oe-rootfs-repo")
- bb.utils.mkdirhier(self.rpm_repo_dir)
- oe.path.symlink(self.d.getVar('DEPLOY_DIR_RPM'), oe.path.join(self.rpm_repo_dir, "rpm"), True)
-
- self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
- self.packaging_data_dirs = ['var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
- self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
- self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved'))
-
- def _configure_dnf(self):
- # libsolv handles 'noarch' internally, we don't need to specify it explicitly
- archs = [i for i in self.archs.split() if i not in ["any", "all", "noarch"]]
- # This prevents accidental matching against libsolv's built-in policies
- if len(archs) <= 1:
- archs = archs + ["bogusarch"]
- archconfdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
- bb.utils.mkdirhier(archconfdir)
- open(archconfdir + "arch", 'w').write(":".join(archs))
-
- open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
-
-
- def _configure_rpm(self):
- # We need to configure rpm to use our primary package architecture as the installation architecture,
- # and to make it compatible with other package architectures that we use.
- # Otherwise it will refuse to proceed with packages installation.
- platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
- rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
- bb.utils.mkdirhier(platformconfdir)
- open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
- open(rpmrcconfdir + "rpmrc", 'w').write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
-
- open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
- if self.d.getVar('RPM_PREFER_ELF_ARCH'):
- open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
- else:
- open(platformconfdir + "macros", 'a').write("%_prefer_color 7")
-
- if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
- pubkey_path = self.d.getVar('RPM_GPG_PUBKEY')
- rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
- cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Importing GPG key failed. Command '%s' "
- "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- def create_configs(self):
- self._configure_dnf()
- self._configure_rpm()
-
- def write_index(self):
- lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
- lf = bb.utils.lockfile(lockfilename, False)
- RpmIndexer(self.d, self.rpm_repo_dir).write_index()
- bb.utils.unlockfile(lf)
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- from urllib.parse import urlparse
-
- if feed_uris == "":
- return
-
- bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
- remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
- for uri in remote_uris:
- repo_name = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
- if feed_archs is not None:
- repo_uris = [uri + "/" + arch for arch in feed_archs]
- else:
- repo_uris = [uri]
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_name + ".repo"), 'w').write("[%s]\nbaseurl=%s\n" % (repo_name, " ".join(repo_uris)))
-
- def _prepare_pkg_transaction(self):
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = oe.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
-
- def install(self, pkgs, attempt_only = False):
- if len(pkgs) == 0:
- return
- self._prepare_pkg_transaction()
-
- bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
- package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
- exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
-
- output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
- (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
- (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == 1 else []) +
- (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
- ["install"] +
- pkgs)
-
- failed_scriptlets_pkgnames = collections.OrderedDict()
- for line in output.splitlines():
- if line.startswith("Non-fatal POSTIN scriptlet failure in rpm package"):
- failed_scriptlets_pkgnames[line.split()[-1]] = True
-
- for pkg in failed_scriptlets_pkgnames.keys():
- self.save_rpmpostinst(pkg)
-
- def remove(self, pkgs, with_dependencies = True):
- if len(pkgs) == 0:
- return
- self._prepare_pkg_transaction()
-
- if with_dependencies:
- self._invoke_dnf(["remove"] + pkgs)
- else:
- cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- args = ["-e", "--nodeps", "--root=%s" %self.target_rootfs]
-
- try:
- output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not invoke rpm. Command "
- "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
-
- def upgrade(self):
- self._prepare_pkg_transaction()
- self._invoke_dnf(["upgrade"])
-
- def autoremove(self):
- self._prepare_pkg_transaction()
- self._invoke_dnf(["autoremove"])
-
- def remove_packaging_data(self):
- self._invoke_dnf(["clean", "all"])
- for dir in self.packaging_data_dirs:
- bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
-
- def backup_packaging_data(self):
- # Save the packaging dirs for increment rpm image generation
- if os.path.exists(self.saved_packaging_data):
- bb.utils.remove(self.saved_packaging_data, True)
- for i in self.packaging_data_dirs:
- source_dir = oe.path.join(self.target_rootfs, i)
- target_dir = oe.path.join(self.saved_packaging_data, i)
- shutil.copytree(source_dir, target_dir, symlinks=True)
-
- def recovery_packaging_data(self):
- # Move the rpmlib back
- if os.path.exists(self.saved_packaging_data):
- for i in self.packaging_data_dirs:
- target_dir = oe.path.join(self.target_rootfs, i)
- if os.path.exists(target_dir):
- bb.utils.remove(target_dir, True)
- source_dir = oe.path.join(self.saved_packaging_data, i)
- shutil.copytree(source_dir,
- target_dir,
- symlinks=True)
-
- def list_installed(self):
- output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{sourcerpm}\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
- print_output = False)
- packages = {}
- current_package = None
- current_deps = None
- current_state = "initial"
- for line in output.splitlines():
- if line.startswith("Package:"):
- package_info = line.split(" ")[1:]
- current_package = package_info[0]
- package_arch = package_info[1]
- package_version = package_info[2]
- package_srpm = package_info[3]
- packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_srpm}
- current_deps = []
- elif line.startswith("Dependencies:"):
- current_state = "dependencies"
- elif line.startswith("Recommendations"):
- current_state = "recommendations"
- elif line.startswith("DependenciesEndHere:"):
- current_state = "initial"
- packages[current_package]["deps"] = current_deps
- elif len(line) > 0:
- if current_state == "dependencies":
- current_deps.append(line)
- elif current_state == "recommendations":
- current_deps.append("%s [REC]" % line)
-
- return packages
-
- def update(self):
- self._invoke_dnf(["makecache"])
-
- def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
- os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
-
- dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
- standard_dnf_args = (["-v", "--rpmverbosity=debug"] if self.d.getVar('ROOTFS_RPM_DEBUG') else []) + ["-y",
- "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
- "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
- "--repofrompath=oe-repo,%s" % (self.rpm_repo_dir),
- "--installroot=%s" % (self.target_rootfs),
- "--setopt=logdir=%s" % (self.d.getVar('T'))
- ]
- cmd = [dnf_cmd] + standard_dnf_args + dnf_args
- try:
- output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
- if print_output:
- bb.note(output)
- return output
- except subprocess.CalledProcessError as e:
- if print_output:
- (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
- "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- else:
- (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
- "'%s' returned %d:" % (' '.join(cmd), e.returncode))
- return e.output.decode("utf-8")
-
- def dump_install_solution(self, pkgs):
- open(self.solution_manifest, 'w').write(" ".join(pkgs))
- return pkgs
-
- def load_old_install_solution(self):
- if not os.path.exists(self.solution_manifest):
- return []
-
- return open(self.solution_manifest, 'r').read().split()
-
- def _script_num_prefix(self, path):
- files = os.listdir(path)
- numbers = set()
- numbers.add(99)
- for f in files:
- numbers.add(int(f.split("-")[0]))
- return max(numbers) + 1
-
- def save_rpmpostinst(self, pkg):
- bb.note("Saving postinstall script of %s" % (pkg))
- cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
-
- try:
- output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not invoke rpm. Command "
- "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
-
- # may need to prepend #!/bin/sh to output
-
- target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
- bb.utils.mkdirhier(target_path)
- num = self._script_num_prefix(target_path)
- saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
- open(saved_script_name, 'w').write(output)
- os.chmod(saved_script_name, 0o755)
-
- def extract(self, pkg):
- output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
- pkg_name = output.splitlines()[-1]
- if not pkg_name.endswith(".rpm"):
- bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
- pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
-
- cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
- rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
-
- if not os.path.isfile(pkg_path):
- bb.fatal("Unable to extract package for '%s'."
- "File %s doesn't exists" % (pkg, pkg_path))
-
- tmp_dir = tempfile.mkdtemp()
- current_dir = os.getcwd()
- os.chdir(tmp_dir)
-
- try:
- cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
- except OSError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
-
- bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
- os.chdir(current_dir)
-
- return tmp_dir
-
-
-class OpkgDpkgPM(PackageManager):
- """
- This is an abstract class. Do not instantiate this directly.
- """
- def __init__(self, d):
- super(OpkgDpkgPM, self).__init__(d)
-
- """
- Returns a dictionary with the package info.
-
- This method extracts the common parts for Opkg and Dpkg
- """
- def package_info(self, pkg, cmd):
-
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to list available packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- return opkg_query(output)
-
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
-
- This method extracts the common parts for Opkg and Dpkg
- """
- def extract(self, pkg, pkg_info):
-
- ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
- tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
- pkg_path = pkg_info[pkg]["filepath"]
-
- if not os.path.isfile(pkg_path):
- bb.fatal("Unable to extract package for '%s'."
- "File %s doesn't exists" % (pkg, pkg_path))
-
- tmp_dir = tempfile.mkdtemp()
- current_dir = os.getcwd()
- os.chdir(tmp_dir)
- if self.d.getVar('IMAGE_PKGTYPE') == 'deb':
- data_tar = 'data.tar.xz'
- else:
- data_tar = 'data.tar.gz'
-
- try:
- cmd = [ar_cmd, 'x', pkg_path]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- cmd = [tar_cmd, 'xf', data_tar]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- except OSError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
-
- bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
- bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
- bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
- os.chdir(current_dir)
-
- return tmp_dir
-
-
-class OpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
- super(OpkgPM, self).__init__(d)
-
- self.target_rootfs = target_rootfs
- self.config_file = config_file
- self.pkg_archs = archs
- self.task_name = task_name
-
- self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK")
- self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
- self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
- self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
- self.opkg_args += self.d.getVar("OPKG_ARGS")
-
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
- if opkg_lib_dir[0] == "/":
- opkg_lib_dir = opkg_lib_dir[1:]
-
- self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
-
- bb.utils.mkdirhier(self.opkg_dir)
-
- self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved'))
-
- self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
- if self.from_feeds:
- self._create_custom_config()
- else:
- self._create_config()
-
- self.indexer = OpkgIndexer(self.d, self.deploy_dir)
-
- """
- This function will change a package's status in /var/lib/opkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
- def mark_packages(self, status_tag, packages=None):
- status_file = os.path.join(self.opkg_dir, "status")
-
- with open(status_file, "r") as sf:
- with open(status_file + ".tmp", "w+") as tmp_sf:
- if packages is None:
- tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
- r"Package: \1\n\2Status: \3%s" % status_tag,
- sf.read()))
- else:
- if type(packages).__name__ != "list":
- raise TypeError("'packages' should be a list object")
-
- status = sf.read()
- for pkg in packages:
- status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
- r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
- status)
-
- tmp_sf.write(status)
-
- os.rename(status_file + ".tmp", status_file)
-
- def _create_custom_config(self):
- bb.note("Building from feeds activated!")
-
- with open(self.config_file, "w+") as config_file:
- priority = 1
- for arch in self.pkg_archs.split():
- config_file.write("arch %s %d\n" % (arch, priority))
- priority += 5
-
- for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
- feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
-
- if feed_match is not None:
- feed_name = feed_match.group(1)
- feed_uri = feed_match.group(2)
-
- bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
-
- config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
-
- """
- Allow to use package deploy directory contents as quick devel-testing
- feed. This creates individual feed configs for each arch subdir of those
- specified as compatible for the current machine.
- NOTE: Development-helper feature, NOT a full-fledged feed.
- """
- if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
- for arch in self.pkg_archs.split():
- cfg_file_name = os.path.join(self.target_rootfs,
- self.d.getVar("sysconfdir"),
- "opkg",
- "local-%s-feed.conf" % arch)
-
- with open(cfg_file_name, "w+") as cfg_file:
- cfg_file.write("src/gz local-%s %s/%s" %
- (arch,
- self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
- arch))
-
- if self.opkg_dir != '/var/lib/opkg':
- # There is no command line option for this anymore, we need to add
- # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
- # the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
- cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
- cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
-
-
- def _create_config(self):
- with open(self.config_file, "w+") as config_file:
- priority = 1
- for arch in self.pkg_archs.split():
- config_file.write("arch %s %d\n" % (arch, priority))
- priority += 5
-
- config_file.write("src oe file:%s\n" % self.deploy_dir)
-
- for arch in self.pkg_archs.split():
- pkgs_dir = os.path.join(self.deploy_dir, arch)
- if os.path.isdir(pkgs_dir):
- config_file.write("src oe-%s file:%s\n" %
- (arch, pkgs_dir))
-
- if self.opkg_dir != '/var/lib/opkg':
- # There is no command line option for this anymore, we need to add
- # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
- # the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
- config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
- config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- if feed_uris == "":
- return
-
- rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
- % self.target_rootfs)
-
- feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
- archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
-
- with open(rootfs_config, "w+") as config_file:
- uri_iterator = 0
- for uri in feed_uris:
- if archs:
- for arch in archs:
- if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
- continue
- bb.note('Adding opkg feed url-%s-%d (%s)' %
- (arch, uri_iterator, uri))
- config_file.write("src/gz uri-%s-%d %s/%s\n" %
- (arch, uri_iterator, uri, arch))
- else:
- bb.note('Adding opkg feed url-%d (%s)' %
- (uri_iterator, uri))
- config_file.write("src/gz uri-%d %s\n" %
- (uri_iterator, uri))
-
- uri_iterator += 1
-
- def update(self):
- self.deploy_dir_lock()
-
- cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- self.deploy_dir_unlock()
- bb.fatal("Unable to update the package index files. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- self.deploy_dir_unlock()
-
- def install(self, pkgs, attempt_only=False):
- if not pkgs:
- return
-
- cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
-
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
- try:
- bb.note("Installing the following packages: %s" % ' '.join(pkgs))
- bb.note(cmd)
- output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
- "Command '%s' returned %d:\n%s" %
- (cmd, e.returncode, e.output.decode("utf-8")))
-
- def remove(self, pkgs, with_dependencies=True):
- if with_dependencies:
- cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
- (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
- else:
- cmd = "%s %s --force-depends remove %s" % \
- (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
-
- try:
- bb.note(cmd)
- output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to remove packages. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- def write_index(self):
- self.deploy_dir_lock()
-
- result = self.indexer.write_index()
-
- self.deploy_dir_unlock()
-
- if result is not None:
- bb.fatal(result)
-
- def remove_packaging_data(self):
- bb.utils.remove(self.opkg_dir, True)
- # create the directory back, it's needed by PM lock
- bb.utils.mkdirhier(self.opkg_dir)
-
- def remove_lists(self):
- if not self.from_feeds:
- bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
-
- def list_installed(self):
- return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
-
- def handle_bad_recommendations(self):
- bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS") or ""
- if bad_recommendations.strip() == "":
- return
-
- status_file = os.path.join(self.opkg_dir, "status")
-
- # If status file existed, it means the bad recommendations has already
- # been handled
- if os.path.exists(status_file):
- return
-
- cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
-
- with open(status_file, "w+") as status:
- for pkg in bad_recommendations.split():
- pkg_info = cmd + pkg
-
- try:
- output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get package info. Command '%s' "
- "returned %d:\n%s" % (pkg_info, e.returncode, e.output.decode("utf-8")))
-
- if output == "":
- bb.note("Ignored bad recommendation: '%s' is "
- "not a package" % pkg)
- continue
-
- for line in output.split('\n'):
- if line.startswith("Status:"):
- status.write("Status: deinstall hold not-installed\n")
- else:
- status.write(line + "\n")
-
- # Append a blank line after each package entry to ensure that it
- # is separated from the following entry
- status.write("\n")
-
- '''
- The following function dummy installs pkgs and returns the log of output.
- '''
- def dummy_install(self, pkgs):
- if len(pkgs) == 0:
- return
-
- # Create an temp dir as opkg root for dummy installation
- temp_rootfs = self.d.expand('${T}/opkg')
- temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg')
- bb.utils.mkdirhier(temp_opkg_dir)
-
- opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
- opkg_args += self.d.getVar("OPKG_ARGS")
-
- cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to update. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- # Dummy installation
- cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
- opkg_args,
- ' '.join(pkgs))
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to dummy install packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- bb.utils.remove(temp_rootfs, True)
-
- return output
-
- def backup_packaging_data(self):
- # Save the opkglib for increment ipk image generation
- if os.path.exists(self.saved_opkg_dir):
- bb.utils.remove(self.saved_opkg_dir, True)
- shutil.copytree(self.opkg_dir,
- self.saved_opkg_dir,
- symlinks=True)
-
- def recover_packaging_data(self):
- # Move the opkglib back
- if os.path.exists(self.saved_opkg_dir):
- if os.path.exists(self.opkg_dir):
- bb.utils.remove(self.opkg_dir, True)
-
- bb.note('Recover packaging data')
- shutil.copytree(self.saved_opkg_dir,
- self.opkg_dir,
- symlinks=True)
-
- """
- Returns a dictionary with the package info.
- """
- def package_info(self, pkg):
- cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
- pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
-
- pkg_arch = pkg_info[pkg]["arch"]
- pkg_filename = pkg_info[pkg]["filename"]
- pkg_info[pkg]["filepath"] = \
- os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
-
- return pkg_info
-
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
- def extract(self, pkg):
- pkg_info = self.package_info(pkg)
- if not pkg_info:
- bb.fatal("Unable to get information for package '%s' while "
- "trying to extract the package." % pkg)
-
- tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.gz"))
-
- return tmp_dir
-
-class DpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
- super(DpkgPM, self).__init__(d)
- self.target_rootfs = target_rootfs
- self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB')
- if apt_conf_dir is None:
- self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
- else:
- self.apt_conf_dir = apt_conf_dir
- self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
- self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
- self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
-
- self.apt_args = d.getVar("APT_ARGS")
-
- self.all_arch_list = archs.split()
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
- self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
-
- self._create_configs(archs, base_archs)
-
- self.indexer = DpkgIndexer(self.d, self.deploy_dir)
-
- """
- This function will change a package's status in /var/lib/dpkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
- def mark_packages(self, status_tag, packages=None):
- status_file = self.target_rootfs + "/var/lib/dpkg/status"
-
- with open(status_file, "r") as sf:
- with open(status_file + ".tmp", "w+") as tmp_sf:
- if packages is None:
- tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
- r"Package: \1\n\2Status: \3%s" % status_tag,
- sf.read()))
- else:
- if type(packages).__name__ != "list":
- raise TypeError("'packages' should be a list object")
-
- status = sf.read()
- for pkg in packages:
- status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
- r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
- status)
-
- tmp_sf.write(status)
-
- os.rename(status_file + ".tmp", status_file)
-
- """
- Run the pre/post installs for package "package_name". If package_name is
- None, then run all pre/post install scriptlets.
- """
- def run_pre_post_installs(self, package_name=None):
- info_dir = self.target_rootfs + "/var/lib/dpkg/info"
- ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
- control_scripts = [
- ControlScript(".preinst", "Preinstall", "install"),
- ControlScript(".postinst", "Postinstall", "configure")]
- status_file = self.target_rootfs + "/var/lib/dpkg/status"
- installed_pkgs = []
-
- with open(status_file, "r") as status:
- for line in status.read().split('\n'):
- m = re.match("^Package: (.*)", line)
- if m is not None:
- installed_pkgs.append(m.group(1))
-
- if package_name is not None and not package_name in installed_pkgs:
- return
-
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
- failed_pkgs = []
- for pkg_name in installed_pkgs:
- for control_script in control_scripts:
- p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
- if os.path.exists(p_full):
- try:
- bb.note("Executing %s for package: %s ..." %
- (control_script.name.lower(), pkg_name))
- output = subprocess.check_output([p_full, control_script.argument],
- stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.note("%s for package %s failed with %d:\n%s" %
- (control_script.name, pkg_name, e.returncode,
- e.output.decode("utf-8")))
- failed_pkgs.append(pkg_name)
- break
-
- if len(failed_pkgs):
- self.mark_packages("unpacked", failed_pkgs)
-
- def update(self):
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- self.deploy_dir_lock()
-
- cmd = "%s update" % self.apt_get_cmd
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to update the package index files. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- self.deploy_dir_unlock()
-
- def install(self, pkgs, attempt_only=False):
- if attempt_only and len(pkgs) == 0:
- return
-
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
- (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
-
- try:
- bb.note("Installing the following packages: %s" % ' '.join(pkgs))
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
- "Command '%s' returned %d:\n%s" %
- (cmd, e.returncode, e.output.decode("utf-8")))
-
- # rename *.dpkg-new files/dirs
- for root, dirs, files in os.walk(self.target_rootfs):
- for dir in dirs:
- new_dir = re.sub("\.dpkg-new", "", dir)
- if dir != new_dir:
- os.rename(os.path.join(root, dir),
- os.path.join(root, new_dir))
-
- for file in files:
- new_file = re.sub("\.dpkg-new", "", file)
- if file != new_file:
- os.rename(os.path.join(root, file),
- os.path.join(root, new_file))
-
-
- def remove(self, pkgs, with_dependencies=True):
- if with_dependencies:
- os.environ['APT_CONFIG'] = self.apt_conf_file
- cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
- else:
- cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
- " -P --force-depends %s" % \
- (bb.utils.which(os.getenv('PATH'), "dpkg"),
- self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to remove packages. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- def write_index(self):
- self.deploy_dir_lock()
-
- result = self.indexer.write_index()
-
- self.deploy_dir_unlock()
-
- if result is not None:
- bb.fatal(result)
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- if feed_uris == "":
- return
-
- sources_conf = os.path.join("%s/etc/apt/sources.list"
- % self.target_rootfs)
- arch_list = []
-
- if feed_archs is None:
- for arch in self.all_arch_list:
- if not os.path.exists(os.path.join(self.deploy_dir, arch)):
- continue
- arch_list.append(arch)
- else:
- arch_list = feed_archs.split()
-
- feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
-
- with open(sources_conf, "w+") as sources_file:
- for uri in feed_uris:
- if arch_list:
- for arch in arch_list:
- bb.note('Adding dpkg channel at (%s)' % uri)
- sources_file.write("deb %s/%s ./\n" %
- (uri, arch))
- else:
- bb.note('Adding dpkg channel at (%s)' % uri)
- sources_file.write("deb %s ./\n" % uri)
-
- def _create_configs(self, archs, base_archs):
- base_archs = re.sub("_", "-", base_archs)
-
- if os.path.exists(self.apt_conf_dir):
- bb.utils.remove(self.apt_conf_dir, True)
-
- bb.utils.mkdirhier(self.apt_conf_dir)
- bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
- bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
- bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
-
- arch_list = []
- for arch in self.all_arch_list:
- if not os.path.exists(os.path.join(self.deploy_dir, arch)):
- continue
- arch_list.append(arch)
-
- with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
- priority = 801
- for arch in arch_list:
- prefs_file.write(
- "Package: *\n"
- "Pin: release l=%s\n"
- "Pin-Priority: %d\n\n" % (arch, priority))
-
- priority += 5
-
- pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
- for pkg in pkg_exclude.split():
- prefs_file.write(
- "Package: %s\n"
- "Pin: release *\n"
- "Pin-Priority: -1\n\n" % pkg)
-
- arch_list.reverse()
-
- with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
- for arch in arch_list:
- sources_file.write("deb file:%s/ ./\n" %
- os.path.join(self.deploy_dir, arch))
-
- base_arch_list = base_archs.split()
- multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
- for variant in multilib_variants.split():
- localdata = bb.data.createCopy(self.d)
- variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
- orig_arch = localdata.getVar("DPKG_ARCH")
- localdata.setVar("DEFAULTTUNE", variant_tune)
- variant_arch = localdata.getVar("DPKG_ARCH")
- if variant_arch not in base_arch_list:
- base_arch_list.append(variant_arch)
-
- with open(self.apt_conf_file, "w+") as apt_conf:
- with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
- for line in apt_conf_sample.read().split("\n"):
- match_arch = re.match(" Architecture \".*\";$", line)
- architectures = ""
- if match_arch:
- for base_arch in base_arch_list:
- architectures += "\"%s\";" % base_arch
- apt_conf.write(" Architectures {%s};\n" % architectures);
- apt_conf.write(" Architecture \"%s\";\n" % base_archs)
- else:
- line = re.sub("#ROOTFS#", self.target_rootfs, line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
- apt_conf.write(line + "\n")
-
- target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
- bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
-
- bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
-
- if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
- open(os.path.join(target_dpkg_dir, "status"), "w+").close()
- if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
- open(os.path.join(target_dpkg_dir, "available"), "w+").close()
-
- def remove_packaging_data(self):
- bb.utils.remove(os.path.join(self.target_rootfs,
- self.d.getVar('opkglibdir')), True)
- bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
-
- def fix_broken_dependencies(self):
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args)
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot fix broken dependencies. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- def list_installed(self):
- return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
-
- """
- Returns a dictionary with the package info.
- """
- def package_info(self, pkg):
- cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
- pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
-
- pkg_arch = pkg_info[pkg]["pkgarch"]
- pkg_filename = pkg_info[pkg]["filename"]
- pkg_info[pkg]["filepath"] = \
- os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
-
- return pkg_info
-
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
- def extract(self, pkg):
- pkg_info = self.package_info(pkg)
- if not pkg_info:
- bb.fatal("Unable to get information for package '%s' while "
- "trying to extract the package." % pkg)
-
- tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
-
- return tmp_dir
-
-def generate_index_files(d):
- classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
-
- indexer_map = {
- "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM')),
- "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
- "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
- }
-
- result = None
-
- for pkg_class in classes:
- if not pkg_class in indexer_map:
- continue
-
- if os.path.exists(indexer_map[pkg_class][1]):
- result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
-
- if result is not None:
- bb.fatal(result)
-
-if __name__ == "__main__":
- """
- We should be able to run this as a standalone script, from outside bitbake
- environment.
- """
- """
- TBD
- """
diff --git a/meta/lib/oe/package_manager/__init__.py b/meta/lib/oe/package_manager/__init__.py
new file mode 100644
index 0000000000..80bc1a6bc6
--- /dev/null
+++ b/meta/lib/oe/package_manager/__init__.py
@@ -0,0 +1,556 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from abc import ABCMeta, abstractmethod
+import os
+import glob
+import subprocess
+import shutil
+import re
+import collections
+import bb
+import tempfile
+import oe.utils
+import oe.path
+import string
+from oe.gpg_sign import get_signer
+import hashlib
+import fnmatch
+
+# this can be used by all PM backends to create the index files in parallel
+def create_index(arg):
+ index_cmd = arg
+
+ bb.note("Executing '%s' ..." % index_cmd)
+ result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ if result:
+ bb.note(result)
+
+def opkg_query(cmd_output):
+ """
+ This method parse the output from the package managerand return
+ a dictionary with the information of the packages. This is used
+ when the packages are in deb or ipk format.
+ """
+ verregex = re.compile(r' \([=<>]* [^ )]*\)')
+ output = dict()
+ pkg = ""
+ arch = ""
+ ver = ""
+ filename = ""
+ dep = []
+ prov = []
+ pkgarch = ""
+ for line in cmd_output.splitlines()+['']:
+ line = line.rstrip()
+ if ':' in line:
+ if line.startswith("Package: "):
+ pkg = line.split(": ")[1]
+ elif line.startswith("Architecture: "):
+ arch = line.split(": ")[1]
+ elif line.startswith("Version: "):
+ ver = line.split(": ")[1]
+ elif line.startswith("File: ") or line.startswith("Filename:"):
+ filename = line.split(": ")[1]
+ if "/" in filename:
+ filename = os.path.basename(filename)
+ elif line.startswith("Depends: "):
+ depends = verregex.sub('', line.split(": ")[1])
+ for depend in depends.split(", "):
+ dep.append(depend)
+ elif line.startswith("Recommends: "):
+ recommends = verregex.sub('', line.split(": ")[1])
+ for recommend in recommends.split(", "):
+ dep.append("%s [REC]" % recommend)
+ elif line.startswith("PackageArch: "):
+ pkgarch = line.split(": ")[1]
+ elif line.startswith("Provides: "):
+ provides = verregex.sub('', line.split(": ")[1])
+ for provide in provides.split(", "):
+ prov.append(provide)
+
+ # When there is a blank line save the package information
+ elif not line:
+ # IPK doesn't include the filename
+ if not filename:
+ filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
+ if pkg:
+ output[pkg] = {"arch":arch, "ver":ver,
+ "filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov}
+ pkg = ""
+ arch = ""
+ ver = ""
+ filename = ""
+ dep = []
+ prov = []
+ pkgarch = ""
+
+ return output
+
+def failed_postinsts_abort(pkgs, log_path):
+ bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
+then please place them into pkg_postinst_ontarget:${PN} ().
+Deferring to first boot via 'exit 1' is no longer supported.
+Details of the failure are in %s.""" %(pkgs, log_path))
+
+def generate_locale_archive(d, rootfs, target_arch, localedir):
+ # Pretty sure we don't need this for locale archive generation but
+ # keeping it to be safe...
+ locale_arch_options = { \
+ "arc": ["--uint32-align=4", "--little-endian"],
+ "arceb": ["--uint32-align=4", "--big-endian"],
+ "arm": ["--uint32-align=4", "--little-endian"],
+ "armeb": ["--uint32-align=4", "--big-endian"],
+ "aarch64": ["--uint32-align=4", "--little-endian"],
+ "aarch64_be": ["--uint32-align=4", "--big-endian"],
+ "sh4": ["--uint32-align=4", "--big-endian"],
+ "powerpc": ["--uint32-align=4", "--big-endian"],
+ "powerpc64": ["--uint32-align=4", "--big-endian"],
+ "powerpc64le": ["--uint32-align=4", "--little-endian"],
+ "mips": ["--uint32-align=4", "--big-endian"],
+ "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
+ "mips64": ["--uint32-align=4", "--big-endian"],
+ "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
+ "mipsel": ["--uint32-align=4", "--little-endian"],
+ "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
+ "mips64el": ["--uint32-align=4", "--little-endian"],
+ "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
+ "riscv64": ["--uint32-align=4", "--little-endian"],
+ "riscv32": ["--uint32-align=4", "--little-endian"],
+ "i586": ["--uint32-align=4", "--little-endian"],
+ "i686": ["--uint32-align=4", "--little-endian"],
+ "x86_64": ["--uint32-align=4", "--little-endian"]
+ }
+ if target_arch in locale_arch_options:
+ arch_options = locale_arch_options[target_arch]
+ else:
+ bb.error("locale_arch_options not found for target_arch=" + target_arch)
+ bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
+
+ # Need to set this so cross-localedef knows where the archive is
+ env = dict(os.environ)
+ env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
+
+ for name in sorted(os.listdir(localedir)):
+ path = os.path.join(localedir, name)
+ if os.path.isdir(path):
+ cmd = ["cross-localedef", "--verbose"]
+ cmd += arch_options
+ cmd += ["--add-to-archive", path]
+ subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
+
+class Indexer(object, metaclass=ABCMeta):
+ def __init__(self, d, deploy_dir):
+ self.d = d
+ self.deploy_dir = deploy_dir
+
+ @abstractmethod
+ def write_index(self):
+ pass
+
+class PkgsList(object, metaclass=ABCMeta):
+ def __init__(self, d, rootfs_dir):
+ self.d = d
+ self.rootfs_dir = rootfs_dir
+
+ @abstractmethod
+ def list_pkgs(self):
+ pass
+
+class PackageManager(object, metaclass=ABCMeta):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+
+ def __init__(self, d, target_rootfs):
+ self.d = d
+ self.target_rootfs = target_rootfs
+ self.deploy_dir = None
+ self.deploy_lock = None
+ self._initialize_intercepts()
+
+ def _initialize_intercepts(self):
+ bb.note("Initializing intercept dir for %s" % self.target_rootfs)
+ # As there might be more than one instance of PackageManager operating at the same time
+ # we need to isolate the intercept_scripts directories from each other,
+ # hence the ugly hash digest in dir name.
+ self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
+ (hashlib.sha256(self.target_rootfs.encode()).hexdigest()))
+
+ postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
+ if not postinst_intercepts:
+ postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
+ if not postinst_intercepts_path:
+ postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+ postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)
+
+ bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts))
+ bb.utils.remove(self.intercepts_dir, True)
+ bb.utils.mkdirhier(self.intercepts_dir)
+ for intercept in postinst_intercepts:
+ shutil.copy(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
+
+ @abstractmethod
+ def _handle_intercept_failure(self, failed_script):
+ pass
+
+ def _postpone_to_first_boot(self, postinst_intercept_hook):
+ with open(postinst_intercept_hook) as intercept:
+ registered_pkgs = None
+ for line in intercept.read().split("\n"):
+ m = re.match(r"^##PKGS:(.*)", line)
+ if m is not None:
+ registered_pkgs = m.group(1).strip()
+ break
+
+ if registered_pkgs is not None:
+ bb.note("If an image is being built, the postinstalls for the following packages "
+ "will be postponed for first boot: %s" %
+ registered_pkgs)
+
+ # call the backend dependent handler
+ self._handle_intercept_failure(registered_pkgs)
+
+
+ def run_intercepts(self, populate_sdk=None):
+ intercepts_dir = self.intercepts_dir
+
+ bb.note("Running intercept scripts:")
+ os.environ['D'] = self.target_rootfs
+ os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
+ for script in os.listdir(intercepts_dir):
+ script_full = os.path.join(intercepts_dir, script)
+
+ if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
+ continue
+
+ # we do not want to run any multilib variant of this
+ if script.startswith("delay_to_first_boot"):
+ self._postpone_to_first_boot(script_full)
+ continue
+
+ if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ continue
+
+ bb.note("> Executing %s intercept ..." % script)
+
+ try:
+ output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
+ if output: bb.note(output.decode("utf-8"))
+ except subprocess.CalledProcessError as e:
+ bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
+ if populate_sdk == 'host':
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ elif populate_sdk == 'target':
+ if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ else:
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ else:
+ if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ self._postpone_to_first_boot(script_full)
+ else:
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+
+ @abstractmethod
+ def update(self):
+ """
+ Update the package manager package database.
+ """
+ pass
+
+ @abstractmethod
+ def install(self, pkgs, attempt_only=False):
+ """
+ Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
+ True, installation failures are ignored.
+ """
+ pass
+
+ @abstractmethod
+ def remove(self, pkgs, with_dependencies=True):
+ """
+ Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
+ is False, then any dependencies are left in place.
+ """
+ pass
+
+ @abstractmethod
+ def write_index(self):
+ """
+ This function creates the index files
+ """
+ pass
+
+ @abstractmethod
+ def remove_packaging_data(self):
+ pass
+
+ @abstractmethod
+ def list_installed(self):
+ pass
+
+ @abstractmethod
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pass
+
+ @abstractmethod
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ """
+ Add remote package feeds into repository manager configuration. The parameters
+ for the feeds are set by feed_uris, feed_base_paths and feed_archs.
+ See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
+ for their description.
+ """
+ pass
+
+ def install_glob(self, globs, sdk=False):
+ """
+ Install all packages that match a glob.
+ """
+ # TODO don't have sdk here but have a property on the superclass
+ # (and respect in install_complementary)
+ if sdk:
+ pkgdatadir = self.d.getVar("PKGDATA_DIR_SDK")
+ else:
+ pkgdatadir = self.d.getVar("PKGDATA_DIR")
+
+ try:
+ bb.note("Installing globbed packages...")
+ cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
+ bb.note('Running %s' % cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = proc.communicate()
+ if stderr: bb.note(stderr.decode("utf-8"))
+ pkgs = stdout.decode("utf-8")
+ self.install(pkgs.split(), attempt_only=True)
+ except subprocess.CalledProcessError as e:
+ # Return code 1 means no packages matched
+ if e.returncode != 1:
+ bb.fatal("Could not compute globbed packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ def install_complementary(self, globs=None):
+ """
+ Install complementary packages based upon the list of currently installed
+ packages e.g. locales, *-dev, *-dbg, etc. Note: every backend needs to
+ call this function explicitly after the normal package installation.
+ """
+ if globs is None:
+ globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
+ split_linguas = set()
+
+ for translation in self.d.getVar('IMAGE_LINGUAS').split():
+ split_linguas.add(translation)
+ split_linguas.add(translation.split('-')[0])
+
+ split_linguas = sorted(split_linguas)
+
+ for lang in split_linguas:
+ globs += " *-locale-%s" % lang
+ for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
+ globs += (" " + complementary_linguas) % lang
+
+ if globs is None:
+ return
+
+ # we need to write the list of installed packages to a file because the
+ # oe-pkgdata-util reads it from a file
+ with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
+ pkgs = self.list_installed()
+
+ provided_pkgs = set()
+ for pkg in pkgs.values():
+ provided_pkgs |= set(pkg.get('provs', []))
+
+ output = oe.utils.format_pkg_list(pkgs, "arch")
+ installed_pkgs.write(output)
+ installed_pkgs.flush()
+
+ cmd = ["oe-pkgdata-util",
+ "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
+ globs]
+ exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
+ if exclude:
+ cmd.extend(['--exclude=' + '|'.join(exclude.split())])
+ try:
+ bb.note('Running %s' % cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = proc.communicate()
+ if stderr: bb.note(stderr.decode("utf-8"))
+ complementary_pkgs = stdout.decode("utf-8")
+ complementary_pkgs = set(complementary_pkgs.split())
+ skip_pkgs = sorted(complementary_pkgs & provided_pkgs)
+ install_pkgs = sorted(complementary_pkgs - provided_pkgs)
+ bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % (
+ ' '.join(install_pkgs),
+ ' '.join(skip_pkgs)))
+ self.install(install_pkgs)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not compute complementary packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1':
+ target_arch = self.d.getVar('TARGET_ARCH')
+ localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
+ if os.path.exists(localedir) and os.listdir(localedir):
+ generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
+
+ def deploy_dir_lock(self):
+ if self.deploy_dir is None:
+ raise RuntimeError("deploy_dir is not set!")
+
+ lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
+
+ self.deploy_lock = bb.utils.lockfile(lock_file_name)
+
+ def deploy_dir_unlock(self):
+ if self.deploy_lock is None:
+ return
+
+ bb.utils.unlockfile(self.deploy_lock)
+
+ self.deploy_lock = None
+
+ def construct_uris(self, uris, base_paths):
+ """
+ Construct URIs based on the following pattern: uri/base_path where 'uri'
+ and 'base_path' correspond to each element of the corresponding array
+ argument leading to len(uris) x len(base_paths) elements on the returned
+ array
+ """
+ def _append(arr1, arr2, sep='/'):
+ res = []
+ narr1 = [a.rstrip(sep) for a in arr1]
+ narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
+ for a1 in narr1:
+ if arr2:
+ for a2 in narr2:
+ res.append("%s%s%s" % (a1, sep, a2))
+ else:
+ res.append(a1)
+ return res
+ return _append(uris, base_paths)
+
+def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies):
+ """
+ Go through our do_package_write_X dependencies and hardlink the packages we depend
+ upon into the repo directory. This prevents us seeing other packages that may
+ have been built that we don't depend upon and also packages for architectures we don't
+ support.
+ """
+ import errno
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ pn = d.getVar("PN")
+ seendirs = set()
+ multilibs = {}
+
+ bb.utils.remove(subrepo_dir, recurse=True)
+ bb.utils.mkdirhier(subrepo_dir)
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps or not filterbydependencies:
+ oe.path.symlink(deploydir, subrepo_dir, True)
+ return
+
+ start = None
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+ pkgdeps = set()
+ start = [start]
+ seen = set(start)
+ # Support direct dependencies (do_rootfs -> do_package_write_X)
+ # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
+ while start:
+ next = []
+ for dep2 in start:
+ for dep in taskdepdata[dep2][3]:
+ if taskdepdata[dep][0] != pn:
+ if "do_" + taskname in dep:
+ pkgdeps.add(dep)
+ elif dep not in seen:
+ next.append(dep)
+ seen.add(dep)
+ start = next
+
+ for dep in pkgdeps:
+ c = taskdepdata[dep][0]
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
+ if not manifest:
+ bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
+ if not os.path.exists(manifest):
+ continue
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ deploydir = os.path.normpath(deploydir)
+ if bb.data.inherits_class('packagefeed-stability', d):
+ dest = l.replace(deploydir + "-prediff", "")
+ else:
+ dest = l.replace(deploydir, "")
+ dest = subrepo_dir + dest
+ if l.endswith("/"):
+ if dest not in seendirs:
+ bb.utils.mkdirhier(dest)
+ seendirs.add(dest)
+ continue
+ # Try to hardlink the file, copy if that fails
+ destdir = os.path.dirname(dest)
+ if destdir not in seendirs:
+ bb.utils.mkdirhier(destdir)
+ seendirs.add(destdir)
+ try:
+ os.link(l, dest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(l, dest)
+ else:
+ raise
+
+
+def generate_index_files(d):
+ from oe.package_manager.rpm import RpmSubdirIndexer
+ from oe.package_manager.ipk import OpkgIndexer
+ from oe.package_manager.deb import DpkgIndexer
+
+ classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
+
+ indexer_map = {
+ "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
+ "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
+ "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
+ }
+
+ result = None
+
+ for pkg_class in classes:
+ if not pkg_class in indexer_map:
+ continue
+
+ if os.path.exists(indexer_map[pkg_class][1]):
+ result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
+
+ if result is not None:
+ bb.fatal(result)
diff --git a/meta/lib/oe/package_manager/deb/__init__.py b/meta/lib/oe/package_manager/deb/__init__.py
new file mode 100644
index 0000000000..9f112ae25b
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/__init__.py
@@ -0,0 +1,503 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import subprocess
+from oe.package_manager import *
+
+class DpkgIndexer(Indexer):
+ def _create_configs(self):
+ bb.utils.mkdirhier(self.apt_conf_dir)
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
+
+ with open(os.path.join(self.apt_conf_dir, "preferences"),
+ "w") as prefs_file:
+ pass
+ with open(os.path.join(self.apt_conf_dir, "sources.list"),
+ "w+") as sources_file:
+ pass
+
+ with open(self.apt_conf_file, "w") as apt_conf:
+ with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
+ "apt", "apt.conf.sample")) as apt_conf_sample:
+ for line in apt_conf_sample.read().split("\n"):
+ line = re.sub(r"#ROOTFS#", "/dev/null", line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
+ apt_conf.write(line + "\n")
+
+ def write_index(self):
+ self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
+ "apt-ftparchive")
+ self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+ self._create_configs()
+
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ pkg_archs = self.d.getVar('PACKAGE_ARCHS')
+ if pkg_archs is not None:
+ arch_list = pkg_archs.split()
+ sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
+ if sdk_pkg_archs is not None:
+ for a in sdk_pkg_archs.split():
+ if a not in pkg_archs:
+ arch_list.append(a)
+
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
+ arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
+
+ apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
+ gzip = bb.utils.which(os.getenv('PATH'), "gzip")
+
+ index_cmds = []
+ deb_dirs_found = False
+ for arch in arch_list:
+ arch_dir = os.path.join(self.deploy_dir, arch)
+ if not os.path.isdir(arch_dir):
+ continue
+
+ cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
+
+ cmd += "%s -fcn Packages > Packages.gz;" % gzip
+
+ with open(os.path.join(arch_dir, "Release"), "w+") as release:
+ release.write("Label: %s\n" % arch)
+
+ cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
+
+ index_cmds.append(cmd)
+
+ deb_dirs_found = True
+
+ if not deb_dirs_found:
+ bb.note("There are no packages in %s" % self.deploy_dir)
+ return
+
+ oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ raise NotImplementedError('Package feed signing not implementd for dpkg')
+
+class PMPkgsList(PkgsList):
+
+ def list_pkgs(self):
+ cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
+ "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
+ "-W"]
+
+ cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\nProvides: ${Provides}\n\n")
+
+ try:
+ cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ return opkg_query(cmd_output)
+
+class OpkgDpkgPM(PackageManager):
+ def __init__(self, d, target_rootfs):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ super(OpkgDpkgPM, self).__init__(d, target_rootfs)
+
+ def package_info(self, pkg, cmd):
+ """
+ Returns a dictionary with the package info.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to list available packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ return opkg_query(output)
+
+ def extract(self, pkg, pkg_info):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
+ tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
+ pkg_path = pkg_info[pkg]["filepath"]
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+ data_tar = 'data.tar.xz'
+
+ try:
+ cmd = [ar_cmd, 'x', pkg_path]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ cmd = [tar_cmd, 'xf', data_tar]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
+ bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
+ os.chdir(current_dir)
+
+ return tmp_dir
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.mark_packages("unpacked", registered_pkgs.split())
+
+class DpkgPM(OpkgDpkgPM):
+ def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True):
+ super(DpkgPM, self).__init__(d, target_rootfs)
+ self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir)
+
+ create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies)
+
+ if apt_conf_dir is None:
+ self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
+ else:
+ self.apt_conf_dir = apt_conf_dir
+ self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+ self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
+ self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
+
+ self.apt_args = d.getVar("APT_ARGS")
+
+ self.all_arch_list = archs.split()
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
+ self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
+
+ self._create_configs(archs, base_archs)
+
+ self.indexer = DpkgIndexer(self.d, self.deploy_dir)
+
+ def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/dpkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
+ status_file = self.target_rootfs + "/var/lib/dpkg/status"
+
+ with open(status_file, "r") as sf:
+ with open(status_file + ".tmp", "w+") as tmp_sf:
+ if packages is None:
+ tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+ r"Package: \1\n\2Status: \3%s" % status_tag,
+ sf.read()))
+ else:
+ if type(packages).__name__ != "list":
+ raise TypeError("'packages' should be a list object")
+
+ status = sf.read()
+ for pkg in packages:
+ status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+ r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+ status)
+
+ tmp_sf.write(status)
+
+ bb.utils.rename(status_file + ".tmp", status_file)
+
+ def run_pre_post_installs(self, package_name=None):
+ """
+ Run the pre/post installs for package "package_name". If package_name is
+ None, then run all pre/post install scriptlets.
+ """
+ info_dir = self.target_rootfs + "/var/lib/dpkg/info"
+ ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
+ control_scripts = [
+ ControlScript(".preinst", "Preinstall", "install"),
+ ControlScript(".postinst", "Postinstall", "configure")]
+ status_file = self.target_rootfs + "/var/lib/dpkg/status"
+ installed_pkgs = []
+
+ with open(status_file, "r") as status:
+ for line in status.read().split('\n'):
+ m = re.match(r"^Package: (.*)", line)
+ if m is not None:
+ installed_pkgs.append(m.group(1))
+
+ if package_name is not None and not package_name in installed_pkgs:
+ return
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+ for pkg_name in installed_pkgs:
+ for control_script in control_scripts:
+ p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
+ if os.path.exists(p_full):
+ try:
+ bb.note("Executing %s for package: %s ..." %
+ (control_script.name.lower(), pkg_name))
+ output = subprocess.check_output([p_full, control_script.argument],
+ stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.warn("%s for package %s failed with %d:\n%s" %
+ (control_script.name, pkg_name, e.returncode,
+ e.output.decode("utf-8")))
+ failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+
+ def update(self):
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ self.deploy_dir_lock()
+
+ cmd = "%s update" % self.apt_get_cmd
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to update the package index files. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ self.deploy_dir_unlock()
+
+ def install(self, pkgs, attempt_only=False):
+ if attempt_only and len(pkgs) == 0:
+ return
+
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ cmd = "%s %s install --allow-downgrades --allow-remove-essential --allow-change-held-packages --allow-unauthenticated --no-remove %s" % \
+ (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
+
+ try:
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ bb.note(output.decode("utf-8"))
+ except subprocess.CalledProcessError as e:
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
+ "Command '%s' returned %d:\n%s" %
+ (cmd, e.returncode, e.output.decode("utf-8")))
+
+ # rename *.dpkg-new files/dirs
+ for root, dirs, files in os.walk(self.target_rootfs):
+ for dir in dirs:
+ new_dir = re.sub(r"\.dpkg-new", "", dir)
+ if dir != new_dir:
+ bb.utils.rename(os.path.join(root, dir),
+ os.path.join(root, new_dir))
+
+ for file in files:
+ new_file = re.sub(r"\.dpkg-new", "", file)
+ if file != new_file:
+ bb.utils.rename(os.path.join(root, file),
+ os.path.join(root, new_file))
+
+
+ def remove(self, pkgs, with_dependencies=True):
+ if not pkgs:
+ return
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+
+ if with_dependencies:
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+ cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
+ else:
+ cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
+ " -P --force-depends %s" % \
+ (bb.utils.which(os.getenv('PATH'), "dpkg"),
+ self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ def write_index(self):
+ self.deploy_dir_lock()
+
+ result = self.indexer.write_index()
+
+ self.deploy_dir_unlock()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
+ return
+
+
+ sources_conf = os.path.join("%s/etc/apt/sources.list"
+ % self.target_rootfs)
+ if not os.path.exists(os.path.dirname(sources_conf)):
+ return
+
+ arch_list = []
+
+ if feed_archs is None:
+ for arch in self.all_arch_list:
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+ else:
+ arch_list = feed_archs.split()
+
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+
+ with open(sources_conf, "w+") as sources_file:
+ for uri in feed_uris:
+ if arch_list:
+ for arch in arch_list:
+ bb.note('Adding dpkg channel at (%s)' % uri)
+ sources_file.write("deb [trusted=yes] %s/%s ./\n" %
+ (uri, arch))
+ else:
+ bb.note('Adding dpkg channel at (%s)' % uri)
+ sources_file.write("deb [trusted=yes] %s ./\n" % uri)
+
+ def _create_configs(self, archs, base_archs):
+ base_archs = re.sub(r"_", r"-", base_archs)
+
+ if os.path.exists(self.apt_conf_dir):
+ bb.utils.remove(self.apt_conf_dir, True)
+
+ bb.utils.mkdirhier(self.apt_conf_dir)
+ bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
+ bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
+ bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
+
+ arch_list = []
+ for arch in self.all_arch_list:
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+
+ with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
+ priority = 801
+ for arch in arch_list:
+ prefs_file.write(
+ "Package: *\n"
+ "Pin: release l=%s\n"
+ "Pin-Priority: %d\n\n" % (arch, priority))
+
+ priority += 5
+
+ pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
+ for pkg in pkg_exclude.split():
+ prefs_file.write(
+ "Package: %s\n"
+ "Pin: release *\n"
+ "Pin-Priority: -1\n\n" % pkg)
+
+ arch_list.reverse()
+
+ with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
+ for arch in arch_list:
+ sources_file.write("deb [trusted=yes] file:%s/ ./\n" %
+ os.path.join(self.deploy_dir, arch))
+
+ base_arch_list = base_archs.split()
+ multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
+ for variant in multilib_variants.split():
+ localdata = bb.data.createCopy(self.d)
+ variant_tune = localdata.getVar("DEFAULTTUNE:virtclass-multilib-" + variant, False)
+ orig_arch = localdata.getVar("DPKG_ARCH")
+ localdata.setVar("DEFAULTTUNE", variant_tune)
+ variant_arch = localdata.getVar("DPKG_ARCH")
+ if variant_arch not in base_arch_list:
+ base_arch_list.append(variant_arch)
+
+ with open(self.apt_conf_file, "w+") as apt_conf:
+ with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
+ for line in apt_conf_sample.read().split("\n"):
+ match_arch = re.match(r" Architecture \".*\";$", line)
+ architectures = ""
+ if match_arch:
+ for base_arch in base_arch_list:
+ architectures += "\"%s\";" % base_arch
+ apt_conf.write(" Architectures {%s};\n" % architectures);
+ apt_conf.write(" Architecture \"%s\";\n" % base_archs)
+ else:
+ line = re.sub(r"#ROOTFS#", self.target_rootfs, line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
+ apt_conf.write(line + "\n")
+
+ target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
+ bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
+
+ bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
+
+ if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
+ open(os.path.join(target_dpkg_dir, "status"), "w+").close()
+ if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
+ open(os.path.join(target_dpkg_dir, "available"), "w+").close()
+
+ def remove_packaging_data(self):
+ bb.utils.remove(self.target_rootfs + self.d.getVar('opkglibdir'), True)
+ bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
+
+ def fix_broken_dependencies(self):
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ cmd = "%s %s --allow-unauthenticated -f install" % (self.apt_get_cmd, self.apt_args)
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot fix broken dependencies. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ def list_installed(self):
+ return PMPkgsList(self.d, self.target_rootfs).list_pkgs()
+
+ def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
+ cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
+ pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
+
+ pkg_arch = pkg_info[pkg]["pkgarch"]
+ pkg_filename = pkg_info[pkg]["filename"]
+ pkg_info[pkg]["filepath"] = \
+ os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
+
+ return pkg_info
+
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pkg_info = self.package_info(pkg)
+ if not pkg_info:
+ bb.fatal("Unable to get information for package '%s' while "
+ "trying to extract the package." % pkg)
+
+ tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
+
+ return tmp_dir
diff --git a/meta/lib/oe/package_manager/deb/manifest.py b/meta/lib/oe/package_manager/deb/manifest.py
new file mode 100644
index 0000000000..d8eab24a06
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/manifest.py
@@ -0,0 +1,26 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.manifest import Manifest
+
+class PkgManifest(Manifest):
+ def create_initial(self):
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ pkg_list = self.d.getVar(var)
+
+ if pkg_list is None:
+ continue
+
+ for pkg in pkg_list.split():
+ manifest.write("%s,%s\n" %
+ (self.var_maps[self.manifest_type][var], pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ pass
diff --git a/meta/lib/oe/package_manager/deb/rootfs.py b/meta/lib/oe/package_manager/deb/rootfs.py
new file mode 100644
index 0000000000..8fbaca11d6
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/rootfs.py
@@ -0,0 +1,210 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import shutil
+from oe.rootfs import Rootfs
+from oe.manifest import Manifest
+from oe.utils import execute_pre_post_process
+from oe.package_manager.deb.manifest import PkgManifest
+from oe.package_manager.deb import DpkgPM
+
+class DpkgOpkgRootfs(Rootfs):
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
+ super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+
+ def _get_pkgs_postinsts(self, status_file):
+ def _get_pkg_depends_list(pkg_depends):
+ pkg_depends_list = []
+ # filter version requirements like libc (>= 1.1)
+ for dep in pkg_depends.split(', '):
+ m_dep = re.match(r"^(.*) \(.*\)$", dep)
+ if m_dep:
+ dep = m_dep.group(1)
+ pkg_depends_list.append(dep)
+
+ return pkg_depends_list
+
+ pkgs = {}
+ pkg_name = ""
+ pkg_status_match = False
+ pkg_depends = ""
+
+ with open(status_file) as status:
+ data = status.read()
+ status.close()
+ for line in data.split('\n'):
+ m_pkg = re.match(r"^Package: (.*)", line)
+ m_status = re.match(r"^Status:.*unpacked", line)
+ m_depends = re.match(r"^Depends: (.*)", line)
+
+ #Only one of m_pkg, m_status or m_depends is not None at time
+ #If m_pkg is not None, we started a new package
+ if m_pkg is not None:
+ #Get Package name
+ pkg_name = m_pkg.group(1)
+ #Make sure we reset other variables
+ pkg_status_match = False
+ pkg_depends = ""
+ elif m_status is not None:
+ #New status matched
+ pkg_status_match = True
+ elif m_depends is not None:
+ #New depends macthed
+ pkg_depends = m_depends.group(1)
+ else:
+ pass
+
+ #Now check if we can process package depends and postinst
+ if "" != pkg_name and pkg_status_match:
+ pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+ else:
+ #Not enough information
+ pass
+
+ # remove package dependencies not in postinsts
+ pkg_names = list(pkgs.keys())
+ for pkg_name in pkg_names:
+ deps = pkgs[pkg_name][:]
+
+ for d in deps:
+ if d not in pkg_names:
+ pkgs[pkg_name].remove(d)
+
+ return pkgs
+
+ def _get_delayed_postinsts_common(self, status_file):
+ def _dep_resolve(graph, node, resolved, seen):
+ seen.append(node)
+
+ for edge in graph[node]:
+ if edge not in resolved:
+ if edge in seen:
+ raise RuntimeError("Packages %s and %s have " \
+ "a circular dependency in postinsts scripts." \
+ % (node, edge))
+ _dep_resolve(graph, edge, resolved, seen)
+
+ resolved.append(node)
+
+ pkg_list = []
+
+ pkgs = None
+ if not self.d.getVar('PACKAGE_INSTALL').strip():
+ bb.note("Building empty image")
+ else:
+ pkgs = self._get_pkgs_postinsts(status_file)
+ if pkgs:
+ root = "__packagegroup_postinst__"
+ pkgs[root] = list(pkgs.keys())
+ _dep_resolve(pkgs, root, pkg_list, [])
+ pkg_list.remove(root)
+
+ if len(pkg_list) == 0:
+ return None
+
+ return pkg_list
+
+ def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d):
+ return
+ num = 0
+ for p in self._get_delayed_postinsts():
+ bb.utils.mkdirhier(dst_postinst_dir)
+
+ if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
+ shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
+ os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
+
+ num += 1
+
+class PkgRootfs(DpkgOpkgRootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = '^E:'
+ self.log_check_expected_regexes = \
+ [
+ "^E: Unmet dependencies."
+ ]
+
+ bb.utils.remove(self.image_rootfs, True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+ self.manifest = PkgManifest(d, manifest_dir)
+ self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
+ d.getVar('PACKAGE_ARCHS'),
+ d.getVar('DPKG_ARCH'))
+
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
+ deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
+
+ alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
+ bb.utils.mkdirhier(alt_dir)
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, deb_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+ # Don't support incremental, so skip that
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ self.pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+ self.pm.fix_broken_dependencies()
+
+ if self.progress_reporter:
+ # Don't support attemptonly, so skip that
+ self.progress_reporter.next_stage()
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self._setup_dbg_rootfs(['/var/lib/dpkg'])
+
+ self.pm.fix_broken_dependencies()
+
+ self.pm.mark_packages("installed")
+
+ self.pm.run_pre_post_installs()
+
+ execute_pre_post_process(self.d, deb_post_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ @staticmethod
+ def _depends_list():
+ return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
+
+ def _get_delayed_postinsts(self):
+ status_file = self.image_rootfs + "/var/lib/dpkg/status"
+ return self._get_delayed_postinsts_common(status_file)
+
+ def _save_postinsts(self):
+ dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
+ src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
+ return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ pass
diff --git a/meta/lib/oe/package_manager/deb/sdk.py b/meta/lib/oe/package_manager/deb/sdk.py
new file mode 100644
index 0000000000..f4b0b6510a
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/sdk.py
@@ -0,0 +1,100 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import glob
+import shutil
+from oe.utils import execute_pre_post_process
+from oe.sdk import Sdk
+from oe.manifest import Manifest
+from oe.package_manager.deb import DpkgPM
+from oe.package_manager.deb.manifest import PkgManifest
+
+class PkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(PkgSdk, self).__init__(d, manifest_dir)
+
+ self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
+ self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
+
+
+ self.target_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ deb_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ deb_repo_workdir = "oe-sdk-ext-repo"
+
+ self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
+ self.d.getVar("PACKAGE_ARCHS"),
+ self.d.getVar("DPKG_ARCH"),
+ self.target_conf_dir,
+ deb_repo_workdir=deb_repo_workdir)
+
+ self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ self.d.getVar("DEB_SDK_ARCH"),
+ self.host_conf_dir,
+ deb_repo_workdir=deb_repo_workdir)
+
+ def _copy_apt_dir_to(self, dst_dir):
+ staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
+
+ self.remove(dst_dir, True)
+
+ shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ pm.write_index()
+ pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_pre_post_installs()
+
+ self.target_pm.run_intercepts(populate_sdk='target')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_pre_post_installs()
+
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
+ "etc", "apt"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+ "var", "lib", "dpkg")
+ self.mkdirhier(native_dpkg_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
+ self.movefile(f, native_dpkg_state_dir)
+ self.remove(os.path.join(self.sdk_output, "var"), True)
diff --git a/meta/lib/oe/package_manager/ipk/__init__.py b/meta/lib/oe/package_manager/ipk/__init__.py
new file mode 100644
index 0000000000..4cd3963111
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/__init__.py
@@ -0,0 +1,503 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import shutil
+import subprocess
+from oe.package_manager import *
+
+class OpkgIndexer(Indexer):
+ def write_index(self):
+ arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
+ "SDK_PACKAGE_ARCHS",
+ ]
+
+ opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
+ else:
+ signer = None
+
+ if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
+ open(os.path.join(self.deploy_dir, "Packages"), "w").close()
+
+ index_cmds = set()
+ index_sign_files = set()
+ for arch_var in arch_vars:
+ archs = self.d.getVar(arch_var)
+ if archs is None:
+ continue
+
+ for arch in archs.split():
+ pkgs_dir = os.path.join(self.deploy_dir, arch)
+ pkgs_file = os.path.join(pkgs_dir, "Packages")
+
+ if not os.path.isdir(pkgs_dir):
+ continue
+
+ if not os.path.exists(pkgs_file):
+ open(pkgs_file, "w").close()
+
+ index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s' %
+ (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
+
+ index_sign_files.add(pkgs_file)
+
+ if len(index_cmds) == 0:
+ bb.note("There are no packages in %s!" % self.deploy_dir)
+ return
+
+ oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
+
+ if signer:
+ feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
+ is_ascii_sig = (feed_sig_type.upper() != "BIN")
+ for f in index_sign_files:
+ signer.detach_sign(f,
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ armor=is_ascii_sig)
+
+class PMPkgsList(PkgsList):
+ def __init__(self, d, rootfs_dir):
+ super(PMPkgsList, self).__init__(d, rootfs_dir)
+ config_file = d.getVar("IPKGCONF_TARGET")
+
+ self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+ self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
+
+ def list_pkgs(self, format=None):
+ cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
+
+ # opkg returns success even when it printed some
+ # "Collected errors:" report to stderr. Mixing stderr into
+ # stdout then leads to random failures later on when
+ # parsing the output. To avoid this we need to collect both
+ # output streams separately and check for empty stderr.
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ cmd_output, cmd_stderr = p.communicate()
+ cmd_output = cmd_output.decode("utf-8")
+ cmd_stderr = cmd_stderr.decode("utf-8")
+ if p.returncode or cmd_stderr:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
+
+ return opkg_query(cmd_output)
+
+
+
+class OpkgDpkgPM(PackageManager):
+ def __init__(self, d, target_rootfs):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ super(OpkgDpkgPM, self).__init__(d, target_rootfs)
+
+ def package_info(self, pkg, cmd):
+ """
+ Returns a dictionary with the package info.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to list available packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ return opkg_query(output)
+
+ def extract(self, pkg, pkg_info):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
+ tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
+ pkg_path = pkg_info[pkg]["filepath"]
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+ data_tar = 'data.tar.xz'
+
+ try:
+ cmd = [ar_cmd, 'x', pkg_path]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ cmd = [tar_cmd, 'xf', data_tar]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
+ bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
+ os.chdir(current_dir)
+
+ return tmp_dir
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.mark_packages("unpacked", registered_pkgs.split())
+
+class OpkgPM(OpkgDpkgPM):
+ def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True):
+ super(OpkgPM, self).__init__(d, target_rootfs)
+
+ self.config_file = config_file
+ self.pkg_archs = archs
+ self.task_name = task_name
+
+ self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir)
+ self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
+ self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+ self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
+
+ if prepare_index:
+ create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies)
+
+ self.opkg_dir = oe.path.join(target_rootfs, self.d.getVar('OPKGLIBDIR'), "opkg")
+ bb.utils.mkdirhier(self.opkg_dir)
+
+ self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+ self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
+ if self.from_feeds:
+ self._create_custom_config()
+ else:
+ self._create_config()
+
+ self.indexer = OpkgIndexer(self.d, self.deploy_dir)
+
+ def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/opkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
+ status_file = os.path.join(self.opkg_dir, "status")
+
+ with open(status_file, "r") as sf:
+ with open(status_file + ".tmp", "w+") as tmp_sf:
+ if packages is None:
+ tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+ r"Package: \1\n\2Status: \3%s" % status_tag,
+ sf.read()))
+ else:
+ if type(packages).__name__ != "list":
+ raise TypeError("'packages' should be a list object")
+
+ status = sf.read()
+ for pkg in packages:
+ status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+ r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+ status)
+
+ tmp_sf.write(status)
+
+ bb.utils.rename(status_file + ".tmp", status_file)
+
+ def _create_custom_config(self):
+ bb.note("Building from feeds activated!")
+
+ with open(self.config_file, "w+") as config_file:
+ priority = 1
+ for arch in self.pkg_archs.split():
+ config_file.write("arch %s %d\n" % (arch, priority))
+ priority += 5
+
+ for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
+ feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
+
+ if feed_match is not None:
+ feed_name = feed_match.group(1)
+ feed_uri = feed_match.group(2)
+
+ bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
+
+ config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
+
+ """
+ Allow to use package deploy directory contents as quick devel-testing
+ feed. This creates individual feed configs for each arch subdir of those
+ specified as compatible for the current machine.
+ NOTE: Development-helper feature, NOT a full-fledged feed.
+ """
+ if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
+ for arch in self.pkg_archs.split():
+ cfg_file_name = os.path.join(self.target_rootfs,
+ self.d.getVar("sysconfdir"),
+ "opkg",
+ "local-%s-feed.conf" % arch)
+
+ with open(cfg_file_name, "w+") as cfg_file:
+ cfg_file.write("src/gz local-%s %s/%s" %
+ (arch,
+ self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
+ arch))
+
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
+
+
+ def _create_config(self):
+ with open(self.config_file, "w+") as config_file:
+ priority = 1
+ for arch in self.pkg_archs.split():
+ config_file.write("arch %s %d\n" % (arch, priority))
+ priority += 5
+
+ config_file.write("src oe file:%s\n" % self.deploy_dir)
+
+ for arch in self.pkg_archs.split():
+ pkgs_dir = os.path.join(self.deploy_dir, arch)
+ if os.path.isdir(pkgs_dir):
+ config_file.write("src oe-%s file:%s\n" %
+ (arch, pkgs_dir))
+
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
+ return
+
+ rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
+ % self.target_rootfs)
+
+ os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True)
+
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
+
+ with open(rootfs_config, "w+") as config_file:
+ uri_iterator = 0
+ for uri in feed_uris:
+ if archs:
+ for arch in archs:
+ if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
+ continue
+ bb.note('Adding opkg feed url-%s-%d (%s)' %
+ (arch, uri_iterator, uri))
+ config_file.write("src/gz uri-%s-%d %s/%s\n" %
+ (arch, uri_iterator, uri, arch))
+ else:
+ bb.note('Adding opkg feed url-%d (%s)' %
+ (uri_iterator, uri))
+ config_file.write("src/gz uri-%d %s\n" %
+ (uri_iterator, uri))
+
+ uri_iterator += 1
+
+ def update(self):
+ self.deploy_dir_lock()
+
+ cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ self.deploy_dir_unlock()
+ bb.fatal("Unable to update the package index files. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ self.deploy_dir_unlock()
+
+ def install(self, pkgs, attempt_only=False):
+ if not pkgs:
+ return
+
+ cmd = "%s %s" % (self.opkg_cmd, self.opkg_args)
+ for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split():
+ cmd += " --add-exclude %s" % exclude
+ for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split():
+ cmd += " --add-ignore-recommends %s" % bad_recommendation
+ cmd += " install "
+ cmd += " ".join(pkgs)
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+ try:
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ bb.note(cmd)
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ failed_pkgs = []
+ for line in output.split('\n'):
+ if line.endswith("configuration required on target."):
+ bb.warn(line)
+ failed_pkgs.append(line.split(".")[0])
+ if failed_pkgs:
+ failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+ except subprocess.CalledProcessError as e:
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
+ "Command '%s' returned %d:\n%s" %
+ (cmd, e.returncode, e.output.decode("utf-8")))
+
+ def remove(self, pkgs, with_dependencies=True):
+ if not pkgs:
+ return
+
+ if with_dependencies:
+ cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
+ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+ else:
+ cmd = "%s %s --force-depends remove %s" % \
+ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+
+ try:
+ bb.note(cmd)
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ def write_index(self):
+ self.deploy_dir_lock()
+
+ result = self.indexer.write_index()
+
+ self.deploy_dir_unlock()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def remove_packaging_data(self):
+ cachedir = oe.path.join(self.target_rootfs, self.d.getVar("localstatedir"), "cache", "opkg")
+ bb.utils.remove(self.opkg_dir, True)
+ bb.utils.remove(cachedir, True)
+
+ def remove_lists(self):
+ if not self.from_feeds:
+ bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
+
+ def list_installed(self):
+ return PMPkgsList(self.d, self.target_rootfs).list_pkgs()
+
+ def dummy_install(self, pkgs):
+ """
+ The following function dummy installs pkgs and returns the log of output.
+ """
+ if len(pkgs) == 0:
+ return
+
+ # Create an temp dir as opkg root for dummy installation
+ temp_rootfs = self.d.expand('${T}/opkg')
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ if opkg_lib_dir[0] == "/":
+ opkg_lib_dir = opkg_lib_dir[1:]
+ temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
+ bb.utils.mkdirhier(temp_opkg_dir)
+
+ opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
+ opkg_args += self.d.getVar("OPKG_ARGS")
+
+ cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to update. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ # Dummy installation
+ cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
+ opkg_args,
+ ' '.join(pkgs))
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to dummy install packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ bb.utils.remove(temp_rootfs, True)
+
+ return output
+
+ def backup_packaging_data(self):
+ # Save the opkglib for increment ipk image generation
+ if os.path.exists(self.saved_opkg_dir):
+ bb.utils.remove(self.saved_opkg_dir, True)
+ shutil.copytree(self.opkg_dir,
+ self.saved_opkg_dir,
+ symlinks=True)
+
+ def recover_packaging_data(self):
+ # Move the opkglib back
+ if os.path.exists(self.saved_opkg_dir):
+ if os.path.exists(self.opkg_dir):
+ bb.utils.remove(self.opkg_dir, True)
+
+ bb.note('Recover packaging data')
+ shutil.copytree(self.saved_opkg_dir,
+ self.opkg_dir,
+ symlinks=True)
+
+ def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
+ cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
+ pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
+
+ pkg_arch = pkg_info[pkg]["arch"]
+ pkg_filename = pkg_info[pkg]["filename"]
+ pkg_info[pkg]["filepath"] = \
+ os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
+
+ return pkg_info
+
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pkg_info = self.package_info(pkg)
+ if not pkg_info:
+ bb.fatal("Unable to get information for package '%s' while "
+ "trying to extract the package." % pkg)
+
+ tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
+
+ return tmp_dir
diff --git a/meta/lib/oe/package_manager/ipk/manifest.py b/meta/lib/oe/package_manager/ipk/manifest.py
new file mode 100644
index 0000000000..ae451c5c70
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/manifest.py
@@ -0,0 +1,74 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.manifest import Manifest
+import re
+
+class PkgManifest(Manifest):
+ """
+ Returns a dictionary object with mip and mlp packages.
+ """
+ def _split_multilib(self, pkg_list):
+ pkgs = dict()
+
+ for pkg in pkg_list.split():
+ pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
+
+ for ml_variant in ml_variants:
+ if pkg.startswith(ml_variant + '-'):
+ pkg_type = self.PKG_TYPE_MULTILIB
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = pkg
+ else:
+ pkgs[pkg_type] += " " + pkg
+
+ return pkgs
+
+ def create_initial(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ if var in self.vars_to_split:
+ split_pkgs = self._split_multilib(self.d.getVar(var))
+ if split_pkgs is not None:
+ pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
+ else:
+ pkg_list = self.d.getVar(var)
+ if pkg_list is not None:
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
+
+ for pkg_type in sorted(pkgs):
+ for pkg in sorted(pkgs[pkg_type].split()):
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ if not os.path.exists(self.initial_manifest):
+ self.create_initial()
+
+ initial_manifest = self.parse_initial_manifest()
+ pkgs_to_install = list()
+ for pkg_type in initial_manifest:
+ pkgs_to_install += initial_manifest[pkg_type]
+ if len(pkgs_to_install) == 0:
+ return
+
+ output = pm.dummy_install(pkgs_to_install).decode('utf-8')
+
+ with open(self.full_manifest, 'w+') as manifest:
+ pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
+ for line in set(output.split('\n')):
+ m = pkg_re.match(line)
+ if m:
+ manifest.write(m.group(1) + '\n')
+
+ return
diff --git a/meta/lib/oe/package_manager/ipk/rootfs.py b/meta/lib/oe/package_manager/ipk/rootfs.py
new file mode 100644
index 0000000000..10a831994e
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/rootfs.py
@@ -0,0 +1,350 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import filecmp
+import shutil
+from oe.rootfs import Rootfs
+from oe.manifest import Manifest
+from oe.utils import execute_pre_post_process
+from oe.package_manager.ipk.manifest import PkgManifest
+from oe.package_manager.ipk import OpkgPM
+
+class DpkgOpkgRootfs(Rootfs):
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
+ super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+
+ def _get_pkgs_postinsts(self, status_file):
+ def _get_pkg_depends_list(pkg_depends):
+ pkg_depends_list = []
+ # filter version requirements like libc (>= 1.1)
+ for dep in pkg_depends.split(', '):
+ m_dep = re.match(r"^(.*) \(.*\)$", dep)
+ if m_dep:
+ dep = m_dep.group(1)
+ pkg_depends_list.append(dep)
+
+ return pkg_depends_list
+
+ pkgs = {}
+ pkg_name = ""
+ pkg_status_match = False
+ pkg_depends = ""
+
+ with open(status_file) as status:
+ data = status.read()
+ status.close()
+ for line in data.split('\n'):
+ m_pkg = re.match(r"^Package: (.*)", line)
+ m_status = re.match(r"^Status:.*unpacked", line)
+ m_depends = re.match(r"^Depends: (.*)", line)
+
+ #Only one of m_pkg, m_status or m_depends is not None at time
+ #If m_pkg is not None, we started a new package
+ if m_pkg is not None:
+ #Get Package name
+ pkg_name = m_pkg.group(1)
+ #Make sure we reset other variables
+ pkg_status_match = False
+ pkg_depends = ""
+ elif m_status is not None:
+ #New status matched
+ pkg_status_match = True
+ elif m_depends is not None:
+ #New depends macthed
+ pkg_depends = m_depends.group(1)
+ else:
+ pass
+
+ #Now check if we can process package depends and postinst
+ if "" != pkg_name and pkg_status_match:
+ pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+ else:
+ #Not enough information
+ pass
+
+ # remove package dependencies not in postinsts
+ pkg_names = list(pkgs.keys())
+ for pkg_name in pkg_names:
+ deps = pkgs[pkg_name][:]
+
+ for d in deps:
+ if d not in pkg_names:
+ pkgs[pkg_name].remove(d)
+
+ return pkgs
+
+ def _get_delayed_postinsts_common(self, status_file):
+ def _dep_resolve(graph, node, resolved, seen):
+ seen.append(node)
+
+ for edge in graph[node]:
+ if edge not in resolved:
+ if edge in seen:
+ raise RuntimeError("Packages %s and %s have " \
+ "a circular dependency in postinsts scripts." \
+ % (node, edge))
+ _dep_resolve(graph, edge, resolved, seen)
+
+ resolved.append(node)
+
+ pkg_list = []
+
+ pkgs = None
+ if not self.d.getVar('PACKAGE_INSTALL').strip():
+ bb.note("Building empty image")
+ else:
+ pkgs = self._get_pkgs_postinsts(status_file)
+ if pkgs:
+ root = "__packagegroup_postinst__"
+ pkgs[root] = list(pkgs.keys())
+ _dep_resolve(pkgs, root, pkg_list, [])
+ pkg_list.remove(root)
+
+ if len(pkg_list) == 0:
+ return None
+
+ return pkg_list
+
+ def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d):
+ return
+ num = 0
+ for p in self._get_delayed_postinsts():
+ bb.utils.mkdirhier(dst_postinst_dir)
+
+ if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
+ shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
+ os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
+
+ num += 1
+
+class PkgRootfs(DpkgOpkgRootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = '(exit 1|Collected errors)'
+
+ self.manifest = PkgManifest(d, manifest_dir)
+ self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
+
+ self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
+ if self._remove_old_rootfs():
+ bb.utils.remove(self.image_rootfs, True)
+ self.pm = OpkgPM(d,
+ self.image_rootfs,
+ self.opkg_conf,
+ self.pkg_archs)
+ else:
+ self.pm = OpkgPM(d,
+ self.image_rootfs,
+ self.opkg_conf,
+ self.pkg_archs)
+ self.pm.recover_packaging_data()
+
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+ '''
+ Compare two files with the same key twice to see if they are equal.
+ If they are not equal, it means they are duplicated and come from
+ different packages.
+ '''
+ def _file_equal(self, key, f1, f2):
+ if filecmp.cmp(f1, f2):
+ return True
+ # Not equal
+ return False
+
+ """
+ This function was reused from the old implementation.
+ See commit: "image.bbclass: Added variables for multilib support." by
+ Lianhao Lu.
+ """
+ def _multilib_sanity_test(self, dirs):
+
+ allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
+ if allow_replace is None:
+ allow_replace = ""
+
+ allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace))
+ error_prompt = "Multilib check error:"
+
+ files = {}
+ for dir in dirs:
+ for root, subfolders, subfiles in os.walk(dir):
+ for file in subfiles:
+ item = os.path.join(root, file)
+ key = str(os.path.join("/", os.path.relpath(item, dir)))
+
+ valid = True
+ if key in files:
+ #check whether the file is allow to replace
+ if allow_rep.match(key):
+ valid = True
+ else:
+ if os.path.exists(files[key]) and \
+ os.path.exists(item) and \
+ not self._file_equal(key, files[key], item):
+ valid = False
+ bb.fatal("%s duplicate files %s %s is not the same\n" %
+ (error_prompt, item, files[key]))
+
+ #pass the check, add to list
+ if valid:
+ files[key] = item
+
+ def _multilib_test_install(self, pkgs):
+ ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
+ bb.utils.mkdirhier(ml_temp)
+
+ dirs = [self.image_rootfs]
+
+ for variant in self.d.getVar("MULTILIB_VARIANTS").split():
+ ml_target_rootfs = os.path.join(ml_temp, variant)
+
+ bb.utils.remove(ml_target_rootfs, True)
+
+ ml_opkg_conf = os.path.join(ml_temp,
+ variant + "-" + os.path.basename(self.opkg_conf))
+
+ ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False)
+
+ ml_pm.update()
+ ml_pm.install(pkgs)
+
+ dirs.append(ml_target_rootfs)
+
+ self._multilib_sanity_test(dirs)
+
+ '''
+ While ipk incremental image generation is enabled, it will remove the
+ unneeded pkgs by comparing the old full manifest in previous existing
+ image and the new full manifest in the current image.
+ '''
+ def _remove_extra_packages(self, pkgs_initial_install):
+ if self.inc_opkg_image_gen == "1":
+ # Parse full manifest in previous existing image creation session
+ old_full_manifest = self.manifest.parse_full_manifest()
+
+ # Create full manifest for the current image session, the old one
+ # will be replaced by the new one.
+ self.manifest.create_full(self.pm)
+
+ # Parse full manifest in current image creation session
+ new_full_manifest = self.manifest.parse_full_manifest()
+
+ pkg_to_remove = list()
+ for pkg in old_full_manifest:
+ if pkg not in new_full_manifest:
+ pkg_to_remove.append(pkg)
+
+ if pkg_to_remove != []:
+ bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
+ self.pm.remove(pkg_to_remove)
+
+ '''
+ Compare with previous existing image creation, if some conditions
+ triggered, the previous old image should be removed.
+ The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
+ and BAD_RECOMMENDATIONS' has been changed.
+ '''
+ def _remove_old_rootfs(self):
+ if self.inc_opkg_image_gen != "1":
+ return True
+
+ vars_list_file = self.d.expand('${T}/vars_list')
+
+ old_vars_list = ""
+ if os.path.exists(vars_list_file):
+ old_vars_list = open(vars_list_file, 'r+').read()
+
+ new_vars_list = '%s:%s:%s\n' % \
+ ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
+ open(vars_list_file, 'w+').write(new_vars_list)
+
+ if old_vars_list != new_vars_list:
+ return True
+
+ return False
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
+ opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, opkg_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+ # Steps are a bit different in order, skip next
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ if self.inc_opkg_image_gen == "1":
+ self._remove_extra_packages(pkgs_to_install)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ # For multilib, we perform a sanity test before final install
+ # If sanity test fails, it will automatically do a bb.fatal()
+ # and the installation will stop
+ if pkg_type == Manifest.PKG_TYPE_MULTILIB:
+ self._multilib_test_install(pkgs_to_install[pkg_type])
+
+ self.pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
+ self._setup_dbg_rootfs([opkg_dir])
+
+ execute_pre_post_process(self.d, opkg_post_process_cmds)
+
+ if self.inc_opkg_image_gen == "1":
+ self.pm.backup_packaging_data()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ @staticmethod
+ def _depends_list():
+ return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
+
+ def _get_delayed_postinsts(self):
+ status_file = os.path.join(self.image_rootfs,
+ self.d.getVar('OPKGLIBDIR').strip('/'),
+ "opkg", "status")
+ return self._get_delayed_postinsts_common(status_file)
+
+ def _save_postinsts(self):
+ dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
+ src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
+ return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ self.pm.remove_lists()
diff --git a/meta/lib/oe/package_manager/ipk/sdk.py b/meta/lib/oe/package_manager/ipk/sdk.py
new file mode 100644
index 0000000000..e2ca415c8e
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/sdk.py
@@ -0,0 +1,102 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import glob
+import shutil
+from oe.utils import execute_pre_post_process
+from oe.sdk import Sdk
+from oe.package_manager.ipk.manifest import PkgManifest
+from oe.manifest import Manifest
+from oe.package_manager.ipk import OpkgPM
+
+class PkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(PkgSdk, self).__init__(d, manifest_dir)
+
+ # In sdk_list_installed_packages the call to opkg is hardcoded to
+ # always use IPKGCONF_TARGET and there's no exposed API to change this
+ # so simply override IPKGCONF_TARGET to use this separated config file.
+ ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK_TARGET")
+ d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
+
+ self.target_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.host_conf = self.d.getVar("IPKGCONF_SDK")
+
+ self.target_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ ipk_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ ipk_repo_workdir = "oe-sdk-ext-repo"
+
+ self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
+ self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"),
+ ipk_repo_workdir=ipk_repo_workdir)
+
+ self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ ipk_repo_workdir=ipk_repo_workdir)
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
+ pm.write_index()
+
+ pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_intercepts(populate_sdk='target')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
+ host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
+
+ self.mkdirhier(target_sysconfdir)
+ shutil.copy(self.target_conf, target_sysconfdir)
+ os.chmod(os.path.join(target_sysconfdir,
+ os.path.basename(self.target_conf)), 0o644)
+
+ self.mkdirhier(host_sysconfdir)
+ shutil.copy(self.host_conf, host_sysconfdir)
+ os.chmod(os.path.join(host_sysconfdir,
+ os.path.basename(self.host_conf)), 0o644)
+
+ native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
+ "lib", "opkg")
+ self.mkdirhier(native_opkg_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
+ self.movefile(f, native_opkg_state_dir)
+
+ self.remove(os.path.join(self.sdk_output, "var"), True)
diff --git a/meta/lib/oe/package_manager/rpm/__init__.py b/meta/lib/oe/package_manager/rpm/__init__.py
new file mode 100644
index 0000000000..b392581069
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/__init__.py
@@ -0,0 +1,410 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import shutil
+import subprocess
+from oe.package_manager import *
+
+class RpmIndexer(Indexer):
+ def write_index(self):
+ self.do_write_index(self.deploy_dir)
+
+ def do_write_index(self, deploy_dir):
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
+ else:
+ signer = None
+
+ createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
+ result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
+ if result:
+ bb.fatal(result)
+
+ # Sign repomd
+ if signer:
+ sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
+ is_ascii_sig = (sig_type.upper() != "BIN")
+ signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ armor=is_ascii_sig)
+
+class RpmSubdirIndexer(RpmIndexer):
+ def write_index(self):
+ bb.note("Generating package index for %s" %(self.deploy_dir))
+ # Remove the existing repodata to ensure that we re-generate it no matter what
+ bb.utils.remove(os.path.join(self.deploy_dir, "repodata"), recurse=True)
+
+ self.do_write_index(self.deploy_dir)
+ for entry in os.walk(self.deploy_dir):
+ if os.path.samefile(self.deploy_dir, entry[0]):
+ for dir in entry[1]:
+ if dir != 'repodata':
+ dir_path = oe.path.join(self.deploy_dir, dir)
+ bb.note("Generating package index for %s" %(dir_path))
+ self.do_write_index(dir_path)
+
+
+class PMPkgsList(PkgsList):
+ def list_pkgs(self):
+ return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed()
+
+class RpmPM(PackageManager):
+ def __init__(self,
+ d,
+ target_rootfs,
+ target_vendor,
+ task_name='target',
+ arch_var=None,
+ os_var=None,
+ rpm_repo_workdir="oe-rootfs-repo",
+ filterbydependencies=True,
+ needfeed=True):
+ super(RpmPM, self).__init__(d, target_rootfs)
+ self.target_vendor = target_vendor
+ self.task_name = task_name
+ if arch_var == None:
+ self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
+ else:
+ self.archs = self.d.getVar(arch_var).replace("-","_")
+ if task_name == "host":
+ self.primary_arch = self.d.getVar('SDK_ARCH')
+ else:
+ self.primary_arch = self.d.getVar('MACHINE_ARCH')
+
+ if needfeed:
+ self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
+ create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
+
+ self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
+ self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
+ self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
+ self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+ def _configure_dnf(self):
+ # libsolv handles 'noarch' internally, we don't need to specify it explicitly
+ archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
+ # This prevents accidental matching against libsolv's built-in policies
+ if len(archs) <= 1:
+ archs = archs + ["bogusarch"]
+ # This architecture needs to be upfront so that packages using it are properly prioritized
+ archs = ["sdk_provides_dummy_target"] + archs
+ confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
+ bb.utils.mkdirhier(confdir)
+ open(confdir + "arch", 'w').write(":".join(archs))
+ distro_codename = self.d.getVar('DISTRO_CODENAME')
+ open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
+
+ open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
+
+
+ def _configure_rpm(self):
+ # We need to configure rpm to use our primary package architecture as the installation architecture,
+ # and to make it compatible with other package architectures that we use.
+ # Otherwise it will refuse to proceed with packages installation.
+ platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
+ rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
+ bb.utils.mkdirhier(platformconfdir)
+ open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
+ with open(rpmrcconfdir + "rpmrc", 'w') as f:
+ f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
+ f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
+
+ open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
+ if self.d.getVar('RPM_PREFER_ELF_ARCH'):
+ open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
+ signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
+ pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
+ signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
+ rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
+ cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Importing GPG key failed. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ def create_configs(self):
+ self._configure_dnf()
+ self._configure_rpm()
+
+ def write_index(self):
+ lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
+ lf = bb.utils.lockfile(lockfilename, False)
+ RpmIndexer(self.d, self.rpm_repo_dir).write_index()
+ bb.utils.unlockfile(lf)
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ from urllib.parse import urlparse
+
+ if feed_uris == "":
+ return
+
+ gpg_opts = ''
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ gpg_opts += 'repo_gpgcheck=1\n'
+ gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
+ gpg_opts += 'gpgcheck=0\n'
+
+ bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
+ remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ for uri in remote_uris:
+ repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
+ if feed_archs is not None:
+ for arch in feed_archs.split():
+ repo_uri = uri + "/" + arch
+ repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
+ open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
+ "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
+ else:
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
+ repo_uri = uri
+ open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
+ "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
+
+ def _prepare_pkg_transaction(self):
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+
+ def install(self, pkgs, attempt_only = False):
+ if len(pkgs) == 0:
+ return
+ self._prepare_pkg_transaction()
+
+ bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
+ package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
+ exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
+
+ output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
+ (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
+ (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) +
+ (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
+ ["install"] +
+ pkgs)
+
+ failed_scriptlets_pkgnames = collections.OrderedDict()
+ for line in output.splitlines():
+ if line.startswith("Error: Systemctl"):
+ bb.error(line)
+
+ if line.startswith("Error in POSTIN scriptlet in rpm package"):
+ failed_scriptlets_pkgnames[line.split()[-1]] = True
+
+ if len(failed_scriptlets_pkgnames) > 0:
+ failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+
+ def remove(self, pkgs, with_dependencies = True):
+ if not pkgs:
+ return
+
+ self._prepare_pkg_transaction()
+
+ if with_dependencies:
+ self._invoke_dnf(["remove"] + pkgs)
+ else:
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
+
+ try:
+ bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
+ output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
+
+ def upgrade(self):
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["upgrade"])
+
+ def autoremove(self):
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["autoremove"])
+
+ def remove_packaging_data(self):
+ self._invoke_dnf(["clean", "all"])
+ for dir in self.packaging_data_dirs:
+ bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
+
+ def backup_packaging_data(self):
+ # Save the packaging dirs for increment rpm image generation
+ if os.path.exists(self.saved_packaging_data):
+ bb.utils.remove(self.saved_packaging_data, True)
+ for i in self.packaging_data_dirs:
+ source_dir = oe.path.join(self.target_rootfs, i)
+ target_dir = oe.path.join(self.saved_packaging_data, i)
+ if os.path.isdir(source_dir):
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+ elif os.path.isfile(source_dir):
+ shutil.copy2(source_dir, target_dir)
+
+ def recovery_packaging_data(self):
+ # Move the rpmlib back
+ if os.path.exists(self.saved_packaging_data):
+ for i in self.packaging_data_dirs:
+ target_dir = oe.path.join(self.target_rootfs, i)
+ if os.path.exists(target_dir):
+ bb.utils.remove(target_dir, True)
+ source_dir = oe.path.join(self.saved_packaging_data, i)
+ if os.path.isdir(source_dir):
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+ elif os.path.isfile(source_dir):
+ shutil.copy2(source_dir, target_dir)
+
+ def list_installed(self):
+ output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
+ print_output = False)
+ packages = {}
+ current_package = None
+ current_deps = None
+ current_state = "initial"
+ for line in output.splitlines():
+ if line.startswith("Package:"):
+ package_info = line.split(" ")[1:]
+ current_package = package_info[0]
+ package_arch = package_info[1]
+ package_version = package_info[2]
+ package_rpm = package_info[3]
+ packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
+ current_deps = []
+ elif line.startswith("Dependencies:"):
+ current_state = "dependencies"
+ elif line.startswith("Recommendations"):
+ current_state = "recommendations"
+ elif line.startswith("DependenciesEndHere:"):
+ current_state = "initial"
+ packages[current_package]["deps"] = current_deps
+ elif len(line) > 0:
+ if current_state == "dependencies":
+ current_deps.append(line)
+ elif current_state == "recommendations":
+ current_deps.append("%s [REC]" % line)
+
+ return packages
+
+ def update(self):
+ self._invoke_dnf(["makecache", "--refresh"])
+
+ def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
+ os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
+
+ dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
+ standard_dnf_args = ["-v", "--rpmverbosity=info", "-y",
+ "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
+ "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
+ "--installroot=%s" % (self.target_rootfs),
+ "--setopt=logdir=%s" % (self.d.getVar('T'))
+ ]
+ if hasattr(self, "rpm_repo_dir"):
+ standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir))
+ cmd = [dnf_cmd] + standard_dnf_args + dnf_args
+ bb.note('Running %s' % ' '.join(cmd))
+ try:
+ output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
+ if print_output:
+ bb.debug(1, output)
+ return output
+ except subprocess.CalledProcessError as e:
+ if print_output:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ else:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:" % (' '.join(cmd), e.returncode))
+ return e.output.decode("utf-8")
+
+ def dump_install_solution(self, pkgs):
+ open(self.solution_manifest, 'w').write(" ".join(pkgs))
+ return pkgs
+
+ def load_old_install_solution(self):
+ if not os.path.exists(self.solution_manifest):
+ return []
+ with open(self.solution_manifest, 'r') as fd:
+ return fd.read().split()
+
+ def _script_num_prefix(self, path):
+ files = os.listdir(path)
+ numbers = set()
+ numbers.add(99)
+ for f in files:
+ numbers.add(int(f.split("-")[0]))
+ return max(numbers) + 1
+
+ def save_rpmpostinst(self, pkg):
+ bb.note("Saving postinstall script of %s" % (pkg))
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
+
+ try:
+ output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
+
+ # may need to prepend #!/bin/sh to output
+
+ target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
+ bb.utils.mkdirhier(target_path)
+ num = self._script_num_prefix(target_path)
+ saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
+ open(saved_script_name, 'w').write(output)
+ os.chmod(saved_script_name, 0o755)
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
+ bb.utils.mkdirhier(rpm_postinsts_dir)
+
+ # Save the package postinstalls in /etc/rpm-postinsts
+ for pkg in registered_pkgs.split():
+ self.save_rpmpostinst(pkg)
+
+ def extract(self, pkg):
+ output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
+ pkg_name = output.splitlines()[-1]
+ if not pkg_name.endswith(".rpm"):
+ bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
+ pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
+
+ cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
+ rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+
+ try:
+ cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ os.chdir(current_dir)
+
+ return tmp_dir
diff --git a/meta/lib/oe/package_manager/rpm/manifest.py b/meta/lib/oe/package_manager/rpm/manifest.py
new file mode 100644
index 0000000000..e6604b301f
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/manifest.py
@@ -0,0 +1,54 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.manifest import Manifest
+
+class PkgManifest(Manifest):
+ """
+ Returns a dictionary object with mip and mlp packages.
+ """
+ def _split_multilib(self, pkg_list):
+ pkgs = dict()
+
+ for pkg in pkg_list.split():
+ pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
+
+ for ml_variant in ml_variants:
+ if pkg.startswith(ml_variant + '-'):
+ pkg_type = self.PKG_TYPE_MULTILIB
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = pkg
+ else:
+ pkgs[pkg_type] += " " + pkg
+
+ return pkgs
+
+ def create_initial(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ if var in self.vars_to_split:
+ split_pkgs = self._split_multilib(self.d.getVar(var))
+ if split_pkgs is not None:
+ pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
+ else:
+ pkg_list = self.d.getVar(var)
+ if pkg_list is not None:
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
+
+ for pkg_type in pkgs:
+ for pkg in pkgs[pkg_type].split():
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ pass
diff --git a/meta/lib/oe/package_manager/rpm/rootfs.py b/meta/lib/oe/package_manager/rpm/rootfs.py
new file mode 100644
index 0000000000..00d07cd9cc
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/rootfs.py
@@ -0,0 +1,148 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.rootfs import Rootfs
+from oe.manifest import Manifest
+from oe.utils import execute_pre_post_process
+from oe.package_manager.rpm.manifest import PkgManifest
+from oe.package_manager.rpm import RpmPM
+
+class PkgRootfs(Rootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\
+ r'|exit 1|ERROR: |Error: |Error |ERROR '\
+ r'|Failed |Failed: |Failed$|Failed\(\d+\):)'
+
+ self.manifest = PkgManifest(d, manifest_dir)
+
+ self.pm = RpmPM(d,
+ d.getVar('IMAGE_ROOTFS'),
+ self.d.getVar('TARGET_VENDOR')
+ )
+
+ self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
+ if self.inc_rpm_image_gen != "1":
+ bb.utils.remove(self.image_rootfs, True)
+ else:
+ self.pm.recovery_packaging_data()
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+
+ self.pm.create_configs()
+
+ '''
+ While rpm incremental image generation is enabled, it will remove the
+ unneeded pkgs by comparing the new install solution manifest and the
+ old installed manifest.
+ '''
+ def _create_incremental(self, pkgs_initial_install):
+ if self.inc_rpm_image_gen == "1":
+
+ pkgs_to_install = list()
+ for pkg_type in pkgs_initial_install:
+ pkgs_to_install += pkgs_initial_install[pkg_type]
+
+ installed_manifest = self.pm.load_old_install_solution()
+ solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
+
+ pkg_to_remove = list()
+ for pkg in installed_manifest:
+ if pkg not in solution_manifest:
+ pkg_to_remove.append(pkg)
+
+ self.pm.update()
+
+ bb.note('incremental update -- upgrade packages in place ')
+ self.pm.upgrade()
+ if pkg_to_remove != []:
+ bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
+ self.pm.remove(pkg_to_remove)
+
+ self.pm.autoremove()
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
+ rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, rpm_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ if self.inc_rpm_image_gen == "1":
+ self._create_incremental(pkgs_to_install)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ pkgs = []
+ pkgs_attempt = []
+ for pkg_type in pkgs_to_install:
+ if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+ pkgs_attempt += pkgs_to_install[pkg_type]
+ else:
+ pkgs += pkgs_to_install[pkg_type]
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install(pkgs)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install(pkgs_attempt, True)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
+
+ execute_pre_post_process(self.d, rpm_post_process_cmds)
+
+ if self.inc_rpm_image_gen == "1":
+ self.pm.backup_packaging_data()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+
+ @staticmethod
+ def _depends_list():
+ return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
+ 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
+
+ def _get_delayed_postinsts(self):
+ postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
+ if os.path.isdir(postinst_dir):
+ files = os.listdir(postinst_dir)
+ for f in files:
+ bb.note('Delayed package scriptlet: %s' % f)
+ return files
+
+ return None
+
+ def _save_postinsts(self):
+ # this is just a stub. For RPM, the failed postinstalls are
+ # already saved in /etc/rpm-postinsts
+ pass
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d):
+ self.pm._invoke_dnf(["clean", "all"])
diff --git a/meta/lib/oe/package_manager/rpm/sdk.py b/meta/lib/oe/package_manager/rpm/sdk.py
new file mode 100644
index 0000000000..c5f232431f
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/sdk.py
@@ -0,0 +1,114 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import glob
+from oe.utils import execute_pre_post_process
+from oe.sdk import Sdk
+from oe.manifest import Manifest
+from oe.package_manager.rpm.manifest import PkgManifest
+from oe.package_manager.rpm import RpmPM
+
+class PkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
+ super(PkgSdk, self).__init__(d, manifest_dir)
+
+ self.target_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ rpm_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ rpm_repo_workdir = "oe-sdk-ext-repo"
+
+ self.target_pm = RpmPM(d,
+ self.sdk_target_sysroot,
+ self.d.getVar('TARGET_VENDOR'),
+ 'target',
+ rpm_repo_workdir=rpm_repo_workdir
+ )
+
+ self.host_pm = RpmPM(d,
+ self.sdk_host_sysroot,
+ self.d.getVar('SDK_VENDOR'),
+ 'host',
+ "SDK_PACKAGE_ARCHS",
+ "SDK_OS",
+ rpm_repo_workdir=rpm_repo_workdir
+ )
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ pm.create_configs()
+ pm.write_index()
+ pm.update()
+
+ pkgs = []
+ pkgs_attempt = []
+ for pkg_type in pkgs_to_install:
+ if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+ pkgs_attempt += pkgs_to_install[pkg_type]
+ else:
+ pkgs += pkgs_to_install[pkg_type]
+
+ pm.install(pkgs)
+
+ pm.install(pkgs_attempt, True)
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_intercepts(populate_sdk='target')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ # Move host RPM library data
+ native_rpm_state_dir = os.path.join(self.sdk_output,
+ self.sdk_native_path,
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
+ "lib",
+ "rpm"
+ )
+ self.mkdirhier(native_rpm_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output,
+ "var",
+ "lib",
+ "rpm",
+ "*")):
+ self.movefile(f, native_rpm_state_dir)
+
+ self.remove(os.path.join(self.sdk_output, "var"), True)
+
+ # Move host sysconfig data
+ native_sysconf_dir = os.path.join(self.sdk_output,
+ self.sdk_native_path,
+ self.d.getVar('sysconfdir',
+ True).strip('/'),
+ )
+ self.mkdirhier(native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
+ self.movefile(f, native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
+ self.movefile(f, native_sysconf_dir)
+ self.remove(os.path.join(self.sdk_output, "etc"), True)
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
index 32e5c82a94..212f048bc6 100644
--- a/meta/lib/oe/packagedata.py
+++ b/meta/lib/oe/packagedata.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import codecs
import os
@@ -13,10 +17,9 @@ def read_pkgdatafile(fn):
if os.access(fn, os.R_OK):
import re
- f = open(fn, 'r')
- lines = f.readlines()
- f.close()
- r = re.compile("([^:]+):\s*(.*)")
+ with open(fn, 'r') as f:
+ lines = f.readlines()
+ r = re.compile(r"(^.+?):\s+(.*)")
for l in lines:
m = r.match(l)
if m:
@@ -42,18 +45,30 @@ def read_pkgdata(pn, d):
return read_pkgdatafile(fn)
#
-# Collapse FOO_pkg variables into FOO
+# Collapse FOO:pkg variables into FOO
#
def read_subpkgdata_dict(pkg, d):
ret = {}
subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
for var in subd:
- newvar = var.replace("_" + pkg, "")
- if newvar == var and var + "_" + pkg in subd:
+ newvar = var.replace(":" + pkg, "")
+ if newvar == var and var + ":" + pkg in subd:
continue
ret[newvar] = subd[var]
return ret
+def read_subpkgdata_extended(pkg, d):
+ import json
+ import bb.compress.zstd
+
+ fn = d.expand("${PKGDATA_DIR}/extended/%s.json.zstd" % pkg)
+ try:
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+ with bb.compress.zstd.open(fn, "rt", encoding="utf-8", num_threads=num_threads) as f:
+ return json.load(f)
+ except FileNotFoundError:
+ return None
+
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py
index 4bc5d3e4bb..8fcaecde82 100644
--- a/meta/lib/oe/packagegroup.py
+++ b/meta/lib/oe/packagegroup.py
@@ -1,17 +1,15 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import itertools
def is_optional(feature, d):
- packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
- if packages:
- return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
- else:
- return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional"))
+ return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
def packages(features, d):
for feature in features:
packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
- if not packages:
- packages = d.getVar("PACKAGE_GROUP_%s" % feature)
for pkg in (packages or "").split():
yield pkg
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
index aaa2d3aabd..9034fcae03 100644
--- a/meta/lib/oe/patch.py
+++ b/meta/lib/oe/patch.py
@@ -1,4 +1,10 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import oe.path
+import oe.types
+import subprocess
class NotFoundError(bb.BBHandledException):
def __init__(self, path):
@@ -32,15 +38,25 @@ def runcmd(args, dir = None):
args = [ pipes.quote(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
- (exitstatus, output) = oe.utils.getstatusoutput(cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ stdout, stderr = proc.communicate()
+ stdout = stdout.decode('utf-8')
+ stderr = stderr.decode('utf-8')
+ exitstatus = proc.returncode
if exitstatus != 0:
- raise CmdError(cmd, exitstatus >> 8, output)
- return output
+ raise CmdError(cmd, exitstatus >> 8, "stdout: %s\nstderr: %s" % (stdout, stderr))
+ if " fuzz " in stdout and "Hunk " in stdout:
+ # Drop patch fuzz info with header and footer to log file so
+ # insane.bbclass can handle to throw error/warning
+ bb.note("--- Patch fuzz start ---\n%s\n--- Patch fuzz end ---" % format(stdout))
+
+ return stdout
finally:
if dir:
os.chdir(olddir)
+
class PatchError(Exception):
def __init__(self, msg):
self.msg = msg
@@ -81,7 +97,7 @@ class PatchSet(object):
patch[param] = PatchSet.defaults[param]
if patch.get("remote"):
- patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d)
+ patch["file"] = self.d.expand(bb.fetch2.localpath(patch["remote"], self.d))
patch["filemd5"] = bb.utils.md5_file(patch["file"])
@@ -211,7 +227,7 @@ class PatchTree(PatchSet):
self.patches.insert(i, patch)
def _applypatch(self, patch, force = False, reverse = False, run = True):
- shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
+ shellcmd = ["cat", patch['file'], "|", "patch", "--no-backup-if-mismatch", "-p", patch['strippath']]
if reverse:
shellcmd.append('-R')
@@ -283,6 +299,24 @@ class GitApplyTree(PatchTree):
PatchTree.__init__(self, dir, d)
self.commituser = d.getVar('PATCH_GIT_USER_NAME')
self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
+ if not self._isInitialized():
+ self._initRepo()
+
+ def _isInitialized(self):
+ cmd = "git rev-parse --show-toplevel"
+ try:
+ output = runcmd(cmd.split(), self.dir).strip()
+ except CmdError as err:
+ ## runcmd returned non-zero which most likely means 128
+ ## Not a git directory
+ return False
+ ## Make sure repo is in builddir to not break top-level git repos
+ return os.path.samefile(output, self.dir)
+
+ def _initRepo(self):
+ runcmd("git init".split(), self.dir)
+ runcmd("git add .".split(), self.dir)
+ runcmd("git commit -a --allow-empty -m bitbake_patching_started".split(), self.dir)
@staticmethod
def extractPatchHeader(patchfile):
@@ -316,8 +350,8 @@ class GitApplyTree(PatchTree):
@staticmethod
def interpretPatchHeader(headerlines):
import re
- author_re = re.compile('[\S ]+ <\S+@\S+\.\S+>')
- from_commit_re = re.compile('^From [a-z0-9]{40} .*')
+ author_re = re.compile(r'[\S ]+ <\S+@\S+\.\S+>')
+ from_commit_re = re.compile(r'^From [a-z0-9]{40} .*')
outlines = []
author = None
date = None
@@ -405,7 +439,7 @@ class GitApplyTree(PatchTree):
date = newdate
if not subject:
subject = newsubject
- if subject and outlines and not outlines[0].strip() == subject:
+ if subject and not (outlines and outlines[0].strip() == subject):
outlines.insert(0, '%s\n\n' % subject.strip())
# Write out commit message to a file
@@ -428,10 +462,9 @@ class GitApplyTree(PatchTree):
def extractPatches(tree, startcommit, outdir, paths=None):
import tempfile
import shutil
- import re
tempdir = tempfile.mkdtemp(prefix='oepatch')
try:
- shellcmd = ["git", "format-patch", startcommit, "-o", tempdir]
+ shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", startcommit, "-o", tempdir]
if paths:
shellcmd.append('--')
shellcmd.extend(paths)
@@ -444,13 +477,10 @@ class GitApplyTree(PatchTree):
try:
with open(srcfile, 'r', encoding=encoding) as f:
for line in f:
- checkline = line
- if checkline.startswith('Subject: '):
- checkline = re.sub(r'\[.+?\]\s*', '', checkline[9:])
- if checkline.startswith(GitApplyTree.patch_line_prefix):
+ if line.startswith(GitApplyTree.patch_line_prefix):
outfile = line.split()[-1].strip()
continue
- if checkline.startswith(GitApplyTree.ignore_commit_prefix):
+ if line.startswith(GitApplyTree.ignore_commit_prefix):
continue
patchlines.append(line)
except UnicodeDecodeError:
@@ -497,8 +527,7 @@ class GitApplyTree(PatchTree):
with open(commithook, 'w') as f:
# NOTE: the formatting here is significant; if you change it you'll also need to
# change other places which read it back
- f.write('echo >> $1\n')
- f.write('echo "%s: $PATCHFILE" >> $1\n' % GitApplyTree.patch_line_prefix)
+ f.write('echo "\n%s: $PATCHFILE" >> $1' % GitApplyTree.patch_line_prefix)
os.chmod(commithook, 0o755)
shutil.copy2(commithook, applyhook)
try:
@@ -506,7 +535,7 @@ class GitApplyTree(PatchTree):
try:
shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot]
self.gitCommandUserOptions(shellcmd, self.commituser, self.commitemail)
- shellcmd += ["am", "-3", "--keep-cr", "-p%s" % patch['strippath']]
+ shellcmd += ["am", "-3", "--keep-cr", "--no-scissors", "-p%s" % patch['strippath']]
return _applypatchhelper(shellcmd, patch, force, reverse, run)
except CmdError:
# Need to abort the git am, or we'll still be within it at the end
@@ -772,9 +801,11 @@ class UserResolver(Resolver):
def patch_path(url, fetch, workdir, expand=True):
- """Return the local path of a patch, or None if this isn't a patch"""
+ """Return the local path of a patch, or return nothing if this isn't a patch"""
local = fetch.localpath(url)
+ if os.path.isdir(local):
+ return
base, ext = os.path.splitext(os.path.basename(local))
if ext in ('.gz', '.bz2', '.xz', '.Z'):
if expand:
@@ -838,6 +869,7 @@ def src_patches(d, all=False, expand=True):
def should_apply(parm, d):
+ import bb.utils
if "mindate" in parm or "maxdate" in parm:
pn = d.getVar('PN')
srcdate = d.getVar('SRCDATE_%s' % pn)
@@ -874,5 +906,15 @@ def should_apply(parm, d):
if srcrev and parm["notrev"] in srcrev:
return False, "doesn't apply to revision"
+ if "maxver" in parm:
+ pv = d.getVar('PV')
+ if bb.utils.vercmp_string_op(pv, parm["maxver"], ">"):
+ return False, "applies to earlier version"
+
+ if "minver" in parm:
+ pv = d.getVar('PV')
+ if bb.utils.vercmp_string_op(pv, parm["minver"], "<"):
+ return False, "applies to later version"
+
return True, None
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py
index 448a2b944e..c8d8ad05b9 100644
--- a/meta/lib/oe/path.py
+++ b/meta/lib/oe/path.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import errno
import glob
import shutil
@@ -86,19 +90,34 @@ def copytree(src, dst):
# This way we also preserve hardlinks between files in the tree.
bb.utils.mkdirhier(dst)
- cmd = "tar --xattrs --xattrs-include='*' -cf - -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
+ cmd = "tar --xattrs --xattrs-include='*' -cf - -S -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def copyhardlinktree(src, dst):
- """ Make the hard link when possible, otherwise copy. """
+ """Make a tree of hard links when possible, otherwise copy."""
bb.utils.mkdirhier(dst)
if os.path.isdir(src) and not len(os.listdir(src)):
return
- if (os.stat(src).st_dev == os.stat(dst).st_dev):
+ canhard = False
+ testfile = None
+ for root, dirs, files in os.walk(src):
+ if len(files):
+ testfile = os.path.join(root, files[0])
+ break
+
+ if testfile is not None:
+ try:
+ os.link(testfile, os.path.join(dst, 'testfile'))
+ os.unlink(os.path.join(dst, 'testfile'))
+ canhard = True
+ except Exception as e:
+ bb.debug(2, "Hardlink test failed with " + str(e))
+
+ if (canhard):
# Need to copy directories only with tar first since cp will error if two
# writers try and create a directory at the same time
- cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, src, dst)
+ cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -S -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
source = ''
if os.path.isdir(src):
@@ -114,6 +133,14 @@ def copyhardlinktree(src, dst):
else:
copytree(src, dst)
+def copyhardlink(src, dst):
+ """Make a hard link when possible, otherwise copy."""
+
+ try:
+ os.link(src, dst)
+ except OSError:
+ shutil.copy(src, dst)
+
def remove(path, recurse=True):
"""
Equivalent to rm -f or rm -rf
@@ -237,3 +264,80 @@ def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False)
raise
return file
+
+def is_path_parent(possible_parent, *paths):
+ """
+ Return True if a path is the parent of another, False otherwise.
+ Multiple paths to test can be specified in which case all
+ specified test paths must be under the parent in order to
+ return True.
+ """
+ def abs_path_trailing(pth):
+ pth_abs = os.path.abspath(pth)
+ if not pth_abs.endswith(os.sep):
+ pth_abs += os.sep
+ return pth_abs
+
+ possible_parent_abs = abs_path_trailing(possible_parent)
+ if not paths:
+ return False
+ for path in paths:
+ path_abs = abs_path_trailing(path)
+ if not path_abs.startswith(possible_parent_abs):
+ return False
+ return True
+
+def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=False):
+ """Search a search path for pathname, supporting wildcards.
+
+ Return all paths in the specific search path matching the wildcard pattern
+ in pathname, returning only the first encountered for each file. If
+ candidates is True, information on all potential candidate paths are
+ included.
+ """
+ paths = (path or os.environ.get('PATH', os.defpath)).split(':')
+ if reverse:
+ paths.reverse()
+
+ seen, files = set(), []
+ for index, element in enumerate(paths):
+ if not os.path.isabs(element):
+ element = os.path.abspath(element)
+
+ candidate = os.path.join(element, pathname)
+ globbed = glob.glob(candidate)
+ if globbed:
+ for found_path in sorted(globbed):
+ if not os.access(found_path, mode):
+ continue
+ rel = os.path.relpath(found_path, element)
+ if rel not in seen:
+ seen.add(rel)
+ if candidates:
+ files.append((found_path, [os.path.join(p, rel) for p in paths[:index+1]]))
+ else:
+ files.append(found_path)
+
+ return files
+
+def canonicalize(paths, sep=','):
+ """Given a string with paths (separated by commas by default), expand
+ each path using os.path.realpath() and return the resulting paths as a
+ string (separated using the same separator a the original string).
+ """
+ # Ignore paths containing "$" as they are assumed to be unexpanded bitbake
+ # variables. Normally they would be ignored, e.g., when passing the paths
+ # through the shell they would expand to empty strings. However, when they
+ # are passed through os.path.realpath(), it will cause them to be prefixed
+ # with the absolute path to the current directory and thus not be empty
+ # anymore.
+ #
+ # Also maintain trailing slashes, as the paths may actually be used as
+ # prefixes in sting compares later on, where the slashes then are important.
+ canonical_paths = []
+ for path in (paths or '').split(sep):
+ if '$' not in path:
+ trailing_slash = path.endswith('/') and '/' or ''
+ canonical_paths.append(os.path.realpath(path) + trailing_slash)
+
+ return sep.join(canonical_paths)
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
index 32dfc15e88..339f7aebca 100644
--- a/meta/lib/oe/prservice.py
+++ b/meta/lib/oe/prservice.py
@@ -1,14 +1,16 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
def prserv_make_conn(d, check = False):
import prserv.serv
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
conn = None
- conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
+ conn = prserv.serv.connect(host_params[0], int(host_params[1]))
if check:
if not conn.ping():
raise Exception('service not available')
- d.setVar("__PRSERV_CONN",conn)
except Exception as exc:
bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
@@ -19,31 +21,29 @@ def prserv_dump_db(d):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN")
+ conn = prserv_make_conn(d)
if conn is None:
- conn = prserv_make_conn(d)
- if conn is None:
- bb.error("Making connection failed to remote PR service")
- return None
+ bb.error("Making connection failed to remote PR service")
+ return None
#dump db
opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
- return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+ d = conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+ conn.close()
+ return d
def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN")
+ conn = prserv_make_conn(d)
if conn is None:
- conn = prserv_make_conn(d)
- if conn is None:
- bb.error("Making connection failed to remote PR service")
- return None
+ bb.error("Making connection failed to remote PR service")
+ return None
#get the entry values
imported = []
prefix = "PRAUTO$"
@@ -67,6 +67,7 @@ def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksu
bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
else:
imported.append((version,pkgarch,checksum,value))
+ conn.close()
return imported
def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
@@ -76,41 +77,40 @@ def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
df = d.getVar('PRSERV_DUMPFILE')
#write data
lf = bb.utils.lockfile("%s.lock" % df)
- f = open(df, "a")
- if metainfo:
- #dump column info
- f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
- f.write("#Table: %s\n" % metainfo['tbl_name'])
- f.write("#Columns:\n")
- f.write("#name \t type \t notn \t dflt \t pk\n")
- f.write("#----------\t --------\t --------\t --------\t ----\n")
- for i in range(len(metainfo['col_info'])):
- f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
- (metainfo['col_info'][i]['name'],
- metainfo['col_info'][i]['type'],
- metainfo['col_info'][i]['notnull'],
- metainfo['col_info'][i]['dflt_value'],
- metainfo['col_info'][i]['pk']))
- f.write("\n")
+ with open(df, "a") as f:
+ if metainfo:
+ #dump column info
+ f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
+ f.write("#Table: %s\n" % metainfo['tbl_name'])
+ f.write("#Columns:\n")
+ f.write("#name \t type \t notn \t dflt \t pk\n")
+ f.write("#----------\t --------\t --------\t --------\t ----\n")
+ for i in range(len(metainfo['col_info'])):
+ f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
+ (metainfo['col_info'][i]['name'],
+ metainfo['col_info'][i]['type'],
+ metainfo['col_info'][i]['notnull'],
+ metainfo['col_info'][i]['dflt_value'],
+ metainfo['col_info'][i]['pk']))
+ f.write("\n")
- if lockdown:
- f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
+ if lockdown:
+ f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
- if datainfo:
- idx = {}
- for i in range(len(datainfo)):
- pkgarch = datainfo[i]['pkgarch']
- value = datainfo[i]['value']
- if pkgarch not in idx:
- idx[pkgarch] = i
- elif value > datainfo[idx[pkgarch]]['value']:
- idx[pkgarch] = i
- f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
- (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
- if not nomax:
- for i in idx:
- f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
- f.close()
+ if datainfo:
+ idx = {}
+ for i in range(len(datainfo)):
+ pkgarch = datainfo[i]['pkgarch']
+ value = datainfo[i]['value']
+ if pkgarch not in idx:
+ idx[pkgarch] = i
+ elif value > datainfo[idx[pkgarch]]['value']:
+ idx[pkgarch] = i
+ f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
+ (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
+ if not nomax:
+ for i in idx:
+ f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
bb.utils.unlockfile(lf)
def prserv_check_avail(d):
@@ -123,4 +123,5 @@ def prserv_check_avail(d):
except TypeError:
bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
else:
- prserv_make_conn(d, True)
+ conn = prserv_make_conn(d, True)
+ conn.close()
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py
index 3231e60cea..89acd3ead0 100644
--- a/meta/lib/oe/qa.py
+++ b/meta/lib/oe/qa.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import os, struct, mmap
class NotELFFileError(Exception):
@@ -37,13 +41,18 @@ class ELFFile:
def __init__(self, name):
self.name = name
self.objdump_output = {}
+ self.data = None
# Context Manager functions to close the mmap explicitly
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
- self.data.close()
+ self.close()
+
+ def close(self):
+ if self.data:
+ self.data.close()
def open(self):
with open(self.name, "rb") as f:
@@ -122,6 +131,9 @@ class ELFFile:
"""
return self.getShort(ELFFile.E_MACHINE)
+ def set_objdump(self, cmd, output):
+ self.objdump_output[cmd] = output
+
def run_objdump(self, cmd, d):
import bb.process
import sys
@@ -150,6 +162,7 @@ def elf_machine_to_string(machine):
"""
try:
return {
+ 0x00: "Unset",
0x02: "SPARC",
0x03: "x86",
0x08: "MIPS",
@@ -158,11 +171,46 @@ def elf_machine_to_string(machine):
0x2A: "SuperH",
0x32: "IA-64",
0x3E: "x86-64",
- 0xB7: "AArch64"
+ 0xB7: "AArch64",
+ 0xF7: "BPF"
}[machine]
except:
return "Unknown (%s)" % repr(machine)
+def write_error(type, error, d):
+ logfile = d.getVar('QA_LOGFILE')
+ if logfile:
+ p = d.getVar('P')
+ with open(logfile, "a+") as f:
+ f.write("%s: %s [%s]\n" % (p, error, type))
+
+def handle_error(error_class, error_msg, d):
+ if error_class in (d.getVar("ERROR_QA") or "").split():
+ write_error(error_class, error_msg, d)
+ bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
+ d.setVar("QA_ERRORS_FOUND", "True")
+ return False
+ elif error_class in (d.getVar("WARN_QA") or "").split():
+ write_error(error_class, error_msg, d)
+ bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
+ else:
+ bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
+ return True
+
+def add_message(messages, section, new_msg):
+ if section not in messages:
+ messages[section] = new_msg
+ else:
+ messages[section] = messages[section] + "\n" + new_msg
+
+def exit_with_message_if_errors(message, d):
+ qa_fatal_errors = bb.utils.to_boolean(d.getVar("QA_ERRORS_FOUND"), False)
+ if qa_fatal_errors:
+ bb.fatal(message)
+
+def exit_if_errors(d):
+ exit_with_message_if_errors("Fatal QA errors were found, failing task.", d)
+
if __name__ == "__main__":
import sys
diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py
index a7fdd36e40..872ff97b89 100644
--- a/meta/lib/oe/recipeutils.py
+++ b/meta/lib/oe/recipeutils.py
@@ -2,7 +2,9 @@
#
# Some code borrowed from the OE layer index
#
-# Copyright (C) 2013-2016 Intel Corporation
+# Copyright (C) 2013-2017 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
@@ -16,40 +18,40 @@ import shutil
import re
import fnmatch
import glob
-from collections import OrderedDict, defaultdict
+import bb.tinfoil
+from collections import OrderedDict, defaultdict
+from bb.utils import vercmp_string
# Help us to find places to insert values
-recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()']
+recipe_progression = ['SUMMARY', 'DESCRIPTION', 'AUTHOR', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()', 'BBCLASSEXTEND']
# Variables that sometimes are a bit long but shouldn't be wrapped
-nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI[md5sum]', 'SRC_URI[sha256sum]']
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha256sum\]']
list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
-def pn_to_recipe(cooker, pn, mc=''):
- """Convert a recipe name (PN) to the path to the recipe file"""
-
- best = cooker.findBestProvider(pn, mc)
- return best[3]
-
-
-def get_unavailable_reasons(cooker, pn):
- """If a recipe could not be found, find out why if possible"""
- import bb.taskdata
- taskdata = bb.taskdata.TaskData(None, skiplist=cooker.skiplist)
- return taskdata.get_reasons(pn)
-
-
-def parse_recipe(cooker, fn, appendfiles):
+def simplify_history(history, d):
"""
- Parse an individual recipe file, optionally with a list of
- bbappend files.
+ Eliminate any irrelevant events from a variable history
"""
- import bb.cache
- parser = bb.cache.NoCache(cooker.databuilder)
- envdata = parser.loadDataFull(fn, appendfiles)
- return envdata
+ ret_history = []
+ has_set = False
+ # Go backwards through the history and remove any immediate operations
+ # before the most recent set
+ for event in reversed(history):
+ if 'flag' in event or not 'file' in event:
+ continue
+ if event['op'] == 'set':
+ if has_set:
+ continue
+ has_set = True
+ elif event['op'] in ('append', 'prepend', 'postdot', 'predot'):
+ # Reminder: "append" and "prepend" mean += and =+ respectively, NOT :append / :prepend
+ if has_set:
+ continue
+ ret_history.insert(0, event)
+ return ret_history
def get_var_files(fn, varlist, d):
@@ -58,11 +60,19 @@ def get_var_files(fn, varlist, d):
"""
varfiles = {}
for v in varlist:
- history = d.varhistory.variable(v)
files = []
- for event in history:
- if 'file' in event and not 'flag' in event:
- files.append(event['file'])
+ if '[' in v:
+ varsplit = v.split('[')
+ varflag = varsplit[1].split(']')[0]
+ history = d.varhistory.variable(varsplit[0])
+ for event in history:
+ if 'file' in event and event.get('flag', '') == varflag:
+ files.append(event['file'])
+ else:
+ history = d.varhistory.variable(v)
+ for event in history:
+ if 'file' in event and not 'flag' in event:
+ files.append(event['file'])
if files:
actualfile = files[-1]
else:
@@ -142,6 +152,10 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
else:
newline = ''
+ nowrap_vars_res = []
+ for item in nowrap_vars:
+ nowrap_vars_res.append(re.compile('^%s$' % item))
+
recipe_progression_res = []
recipe_progression_restrs = []
for item in recipe_progression:
@@ -149,7 +163,7 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
key = item[:-2]
else:
key = item
- restr = '%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
+ restr = r'%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
if item.endswith('()'):
recipe_progression_restrs.append(restr + '()')
else:
@@ -172,15 +186,27 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
def outputvalue(name, lines, rewindcomments=False):
if values[name] is None:
return
- rawtext = '%s = "%s"%s' % (name, values[name], newline)
+ if isinstance(values[name], tuple):
+ op, value = values[name]
+ if op == '+=' and value.strip() == '':
+ return
+ else:
+ value = values[name]
+ op = '='
+ rawtext = '%s %s "%s"%s' % (name, op, value, newline)
addlines = []
- if name in nowrap_vars:
+ nowrap = False
+ for nowrap_re in nowrap_vars_res:
+ if nowrap_re.match(name):
+ nowrap = True
+ break
+ if nowrap:
addlines.append(rawtext)
elif name in list_vars:
- splitvalue = split_var_value(values[name], assignment=False)
+ splitvalue = split_var_value(value, assignment=False)
if len(splitvalue) > 1:
linesplit = ' \\\n' + (' ' * (len(name) + 4))
- addlines.append('%s = "%s%s"%s' % (name, linesplit.join(splitvalue), linesplit, newline))
+ addlines.append('%s %s "%s%s"%s' % (name, op, linesplit.join(splitvalue), linesplit, newline))
else:
addlines.append(rawtext)
else:
@@ -188,6 +214,11 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
for wrapline in wrapped[:-1]:
addlines.append('%s \\%s' % (wrapline, newline))
addlines.append('%s%s' % (wrapped[-1], newline))
+
+ # Split on newlines - this isn't strictly necessary if you are only
+ # going to write the output to disk, but if you want to compare it
+ # (as patch_recipe_file() will do if patch=True) then it's important.
+ addlines = [line for l in addlines for line in l.splitlines(True)]
if rewindcomments:
# Ensure we insert the lines before any leading comments
# (that we'd want to ensure remain leading the next value)
@@ -237,7 +268,7 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
return changed, tolines
-def patch_recipe_file(fn, values, patch=False, relpath=''):
+def patch_recipe_file(fn, values, patch=False, relpath='', redirect_output=None):
"""Update or insert variable values into a recipe file (assuming you
have already identified the exact file you want to update.)
Note that some manual inspection/intervention may be required
@@ -249,7 +280,11 @@ def patch_recipe_file(fn, values, patch=False, relpath=''):
_, tolines = patch_recipe_lines(fromlines, values)
- if patch:
+ if redirect_output:
+ with open(os.path.join(redirect_output, os.path.basename(fn)), 'w') as f:
+ f.writelines(tolines)
+ return None
+ elif patch:
relfn = os.path.relpath(fn, relpath)
diff = difflib.unified_diff(fromlines, tolines, 'a/%s' % relfn, 'b/%s' % relfn)
return diff
@@ -299,17 +334,52 @@ def localise_file_vars(fn, varfiles, varlist):
return filevars
-def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
+def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None):
"""Modify a list of variable values in the specified recipe. Handles inc files if
used by the recipe.
"""
+ overrides = d.getVar('OVERRIDES').split(':')
+ def override_applicable(hevent):
+ op = hevent['op']
+ if '[' in op:
+ opoverrides = op.split('[')[1].split(']')[0].split(':')
+ for opoverride in opoverrides:
+ if not opoverride in overrides:
+ return False
+ return True
+
varlist = varvalues.keys()
+ fn = os.path.abspath(fn)
varfiles = get_var_files(fn, varlist, d)
locs = localise_file_vars(fn, varfiles, varlist)
patches = []
for f,v in locs.items():
vals = {k: varvalues[k] for k in v}
- patchdata = patch_recipe_file(f, vals, patch, relpath)
+ f = os.path.abspath(f)
+ if f == fn:
+ extravals = {}
+ for var, value in vals.items():
+ if var in list_vars:
+ history = simplify_history(d.varhistory.variable(var), d)
+ recipe_set = False
+ for event in history:
+ if os.path.abspath(event['file']) == fn:
+ if event['op'] == 'set':
+ recipe_set = True
+ if not recipe_set:
+ for event in history:
+ if event['op'].startswith(':remove'):
+ continue
+ if not override_applicable(event):
+ continue
+ newvalue = value.replace(event['detail'], '')
+ if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith(':'):
+ op = event['op'].replace('[', ':').replace(']', '')
+ extravals[var + op] = None
+ value = newvalue
+ vals[var] = ('+=', value)
+ vals.update(extravals)
+ patchdata = patch_recipe_file(f, vals, patch, relpath, redirect_output)
if patch:
patches.append(patchdata)
@@ -320,7 +390,7 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
-def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
+def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=False):
"""Copy (local) recipe files, including both files included via include/require,
and files referred to in the SRC_URI variable."""
import bb.fetch2
@@ -328,18 +398,43 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
# FIXME need a warning if the unexpanded SRC_URI value contains variable references
- uris = (d.getVar('SRC_URI') or "").split()
- fetch = bb.fetch2.Fetch(uris, d)
- if download:
- fetch.download()
+ uri_values = []
+ localpaths = []
+ def fetch_urls(rdata):
+ # Collect the local paths from SRC_URI
+ srcuri = rdata.getVar('SRC_URI') or ""
+ if srcuri not in uri_values:
+ fetch = bb.fetch2.Fetch(srcuri.split(), rdata)
+ if download:
+ fetch.download()
+ for pth in fetch.localpaths():
+ if pth not in localpaths:
+ localpaths.append(os.path.abspath(pth))
+ uri_values.append(srcuri)
+
+ fetch_urls(d)
+ if all_variants:
+ # Get files for other variants e.g. in the case of a SRC_URI:append
+ localdata = bb.data.createCopy(d)
+ variants = (localdata.getVar('BBCLASSEXTEND') or '').split()
+ if variants:
+ # Ensure we handle class-target if we're dealing with one of the variants
+ variants.append('target')
+ for variant in variants:
+ if variant.startswith("devupstream"):
+ localdata.setVar('SRCPV', 'git')
+ localdata.setVar('CLASSOVERRIDE', 'class-%s' % variant)
+ fetch_urls(localdata)
# Copy local files to target directory and gather any remote files
- bb_dir = os.path.dirname(d.getVar('FILE')) + os.sep
+ bb_dir = os.path.abspath(os.path.dirname(d.getVar('FILE'))) + os.sep
remotes = []
copied = []
- includes = [path for path in d.getVar('BBINCLUDED').split() if
- path.startswith(bb_dir) and os.path.exists(path)]
- for path in fetch.localpaths() + includes:
+ # Need to do this in two steps since we want to check against the absolute path
+ includes = [os.path.abspath(path) for path in d.getVar('BBINCLUDED').split() if os.path.exists(path)]
+ # We also check this below, but we don't want any items in this list being considered remotes
+ includes = [path for path in includes if path.startswith(bb_dir)]
+ for path in localpaths + includes:
# Only import files that are under the meta directory
if path.startswith(bb_dir):
if not whole_dir:
@@ -367,7 +462,7 @@ def get_recipe_local_files(d, patches=False, archives=False):
# fetcher) though note that this only encompasses actual container formats
# i.e. that can contain multiple files as opposed to those that only
# contain a compressed stream (i.e. .tar.gz as opposed to just .gz)
- archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
+ archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.txz', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
ret = {}
for uri in uris:
if fetch.ud[uri].type == 'file':
@@ -391,7 +486,14 @@ def get_recipe_local_files(d, patches=False, archives=False):
unpack = fetch.ud[uri].parm.get('unpack', True)
if unpack:
continue
- ret[fname] = localpath
+ if os.path.isdir(localpath):
+ for root, dirs, files in os.walk(localpath):
+ for fname in files:
+ fileabspath = os.path.join(root,fname)
+ srcdir = os.path.dirname(localpath)
+ ret[os.path.relpath(fileabspath,srcdir)] = fileabspath
+ else:
+ ret[fname] = localpath
return ret
@@ -461,6 +563,23 @@ def get_bbfile_path(d, destdir, extrapathhint=None):
confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
pn = d.getVar('PN')
+ # Parse BBFILES_DYNAMIC and append to BBFILES
+ bbfiles_dynamic = (confdata.getVar('BBFILES_DYNAMIC') or "").split()
+ collections = (confdata.getVar('BBFILE_COLLECTIONS') or "").split()
+ invalid = []
+ for entry in bbfiles_dynamic:
+ parts = entry.split(":", 1)
+ if len(parts) != 2:
+ invalid.append(entry)
+ continue
+ l, f = parts
+ invert = l[0] == "!"
+ if invert:
+ l = l[1:]
+ if (l in collections and not invert) or (l not in collections and invert):
+ confdata.appendVar("BBFILES", " " + f)
+ if invalid:
+ return None
bbfilespecs = (confdata.getVar('BBFILES') or '').split()
if destdir == destlayerdir:
for bbfilespec in bbfilespecs:
@@ -547,7 +666,7 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
return (appendpath, pathok)
-def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None):
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None):
"""
Writes a bbappend file for a recipe
Parameters:
@@ -574,6 +693,9 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
value pairs, or simply a list of the lines.
removevalues:
Variable values to remove - a dict of names/values.
+ redirect_output:
+ If specified, redirects writing the output file to the
+ specified directory (for dry-run purposes)
"""
if not removevalues:
@@ -588,7 +710,8 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
appenddir = os.path.dirname(appendpath)
- bb.utils.mkdirhier(appenddir)
+ if not redirect_output:
+ bb.utils.mkdirhier(appenddir)
# FIXME check if the bbappend doesn't get overridden by a higher priority layer?
@@ -630,12 +753,12 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
destsubdir = rd.getVar('PN')
if srcfiles:
- bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
+ bbappendlines.append(('FILESEXTRAPATHS:prepend', ':=', '${THISDIR}/${PN}:'))
appendoverride = ''
if machine:
bbappendlines.append(('PACKAGE_ARCH', '=', '${MACHINE_ARCH}'))
- appendoverride = '_%s' % machine
+ appendoverride = ':%s' % machine
copyfiles = {}
if srcfiles:
instfunclines = []
@@ -649,7 +772,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
# FIXME do we care if the entry is added by another bbappend that might go away?
if not srcurientry in rd.getVar('SRC_URI').split():
if machine:
- appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
+ appendline('SRC_URI:append%s' % appendoverride, '=', ' ' + srcurientry)
else:
appendline('SRC_URI', '+=', srcurientry)
copyfiles[newfile] = srcfile
@@ -663,24 +786,33 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
instfunclines.append(instdirline)
instfunclines.append('install -m %s ${WORKDIR}/%s ${D}%s' % (perms, os.path.basename(srcfile), instdestpath))
if instfunclines:
- bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
-
- bb.note('Writing append file %s' % appendpath)
+ bbappendlines.append(('do_install:append%s()' % appendoverride, '', instfunclines))
+
+ if redirect_output:
+ bb.note('Writing append file %s (dry-run)' % appendpath)
+ outfile = os.path.join(redirect_output, os.path.basename(appendpath))
+ # Only take a copy if the file isn't already there (this function may be called
+ # multiple times per operation when we're handling overrides)
+ if os.path.exists(appendpath) and not os.path.exists(outfile):
+ shutil.copy2(appendpath, outfile)
+ else:
+ bb.note('Writing append file %s' % appendpath)
+ outfile = appendpath
- if os.path.exists(appendpath):
+ if os.path.exists(outfile):
# Work around lack of nonlocal in python 2
extvars = {'destsubdir': destsubdir}
def appendfile_varfunc(varname, origvalue, op, newlines):
- if varname == 'FILESEXTRAPATHS_prepend':
+ if varname == 'FILESEXTRAPATHS:prepend':
if origvalue.startswith('${THISDIR}/'):
- popline('FILESEXTRAPATHS_prepend')
+ popline('FILESEXTRAPATHS:prepend')
extvars['destsubdir'] = rd.expand(origvalue.split('${THISDIR}/', 1)[1].rstrip(':'))
elif varname == 'PACKAGE_ARCH':
if machine:
popline('PACKAGE_ARCH')
return (machine, None, 4, False)
- elif varname.startswith('do_install_append'):
+ elif varname.startswith('do_install:append'):
func = popline(varname)
if func:
instfunclines = [line.strip() for line in origvalue.strip('\n').splitlines()]
@@ -692,7 +824,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
splitval = split_var_value(origvalue, assignment=False)
changed = False
removevar = varname
- if varname in ['SRC_URI', 'SRC_URI_append%s' % appendoverride]:
+ if varname in ['SRC_URI', 'SRC_URI:append%s' % appendoverride]:
removevar = 'SRC_URI'
line = popline(varname)
if line:
@@ -721,11 +853,11 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
newvalue = splitval
if len(newvalue) == 1:
# Ensure it's written out as one line
- if '_append' in varname:
+ if ':append' in varname:
newvalue = ' ' + newvalue[0]
else:
newvalue = newvalue[0]
- if not newvalue and (op in ['+=', '.='] or '_append' in varname):
+ if not newvalue and (op in ['+=', '.='] or ':append' in varname):
# There's no point appending nothing
newvalue = None
if varname.endswith('()'):
@@ -739,7 +871,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
if removevalues:
varnames.extend(list(removevalues.keys()))
- with open(appendpath, 'r') as f:
+ with open(outfile, 'r') as f:
(updated, newlines) = bb.utils.edit_metadata(f, varnames, appendfile_varfunc)
destsubdir = extvars['destsubdir']
@@ -756,20 +888,27 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
updated = True
if updated:
- with open(appendpath, 'w') as f:
+ with open(outfile, 'w') as f:
f.writelines(newlines)
if copyfiles:
if machine:
destsubdir = os.path.join(destsubdir, machine)
+ if redirect_output:
+ outdir = redirect_output
+ else:
+ outdir = appenddir
for newfile, srcfile in copyfiles.items():
- filedest = os.path.join(appenddir, destsubdir, os.path.basename(srcfile))
+ filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile))
if os.path.abspath(newfile) != os.path.abspath(filedest):
if newfile.startswith(tempfile.gettempdir()):
newfiledisp = os.path.basename(newfile)
else:
newfiledisp = newfile
- bb.note('Copying %s to %s' % (newfiledisp, filedest))
+ if redirect_output:
+ bb.note('Copying %s to %s (dry-run)' % (newfiledisp, os.path.join(appenddir, destsubdir, os.path.basename(srcfile))))
+ else:
+ bb.note('Copying %s to %s' % (newfiledisp, filedest))
bb.utils.mkdirhier(os.path.dirname(filedest))
shutil.copyfile(newfile, filedest)
@@ -778,7 +917,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
def find_layerdir(fn):
""" Figure out the path to the base of the layer containing a file (e.g. a recipe)"""
- pth = fn
+ pth = os.path.abspath(fn)
layerdir = ''
while pth:
if os.path.exists(os.path.join(pth, 'conf', 'layer.conf')):
@@ -814,7 +953,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type):
sfx = ''
if uri_type == 'git':
- git_regex = re.compile("(?P<pfx>v?)(?P<ver>[^\+]*)((?P<sfx>\+(git)?r?(AUTOINC\+))(?P<rev>.*))?")
+ git_regex = re.compile(r"(?P<pfx>v?)(?P<ver>.*?)(?P<sfx>\+[^\+]*(git)?r?(AUTOINC\+))(?P<rev>.*)")
m = git_regex.match(pv)
if m:
@@ -822,7 +961,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type):
pfx = m.group('pfx')
sfx = m.group('sfx')
else:
- regex = re.compile("(?P<pfx>(v|r)?)(?P<ver>.*)")
+ regex = re.compile(r"(?P<pfx>(v|r)?)(?P<ver>.*)")
m = regex.match(pv)
if m:
pv = m.group('ver')
@@ -839,25 +978,25 @@ def get_recipe_upstream_version(rd):
FetchError when don't have network access or upstream site don't response.
NoMethodError when uri latest_versionstring method isn't implemented.
- Returns a dictonary with version, type and datetime.
+ Returns a dictonary with version, repository revision, current_version, type and datetime.
Type can be A for Automatic, M for Manual and U for Unknown.
"""
from bb.fetch2 import decodeurl
from datetime import datetime
ru = {}
+ ru['current_version'] = rd.getVar('PV')
ru['version'] = ''
ru['type'] = 'U'
ru['datetime'] = ''
-
- pv = rd.getVar('PV')
+ ru['revision'] = ''
# XXX: If don't have SRC_URI means that don't have upstream sources so
# returns the current recipe version, so that upstream version check
# declares a match.
src_uris = rd.getVar('SRC_URI')
if not src_uris:
- ru['version'] = pv
+ ru['version'] = ru['current_version']
ru['type'] = 'M'
ru['datetime'] = datetime.now()
return ru
@@ -866,6 +1005,9 @@ def get_recipe_upstream_version(rd):
src_uri = src_uris.split()[0]
uri_type, _, _, _, _, _ = decodeurl(src_uri)
+ (pv, pfx, sfx) = get_recipe_pv_without_srcpv(rd.getVar('PV'), uri_type)
+ ru['current_version'] = pv
+
manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
if manual_upstream_version:
# manual tracking of upstream version.
@@ -886,33 +1028,118 @@ def get_recipe_upstream_version(rd):
ru['datetime'] = datetime.now()
else:
ud = bb.fetch2.FetchData(src_uri, rd)
- pupver = ud.method.latest_versionstring(ud, rd)
- (upversion, revision) = pupver
+ if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1":
+ bb.fetch2.get_srcrev(rd)
+ revision = ud.method.latest_revision(ud, rd, 'default')
+ upversion = pv
+ if revision != rd.getVar("SRCREV"):
+ upversion = upversion + "-new-commits-available"
+ else:
+ pupver = ud.method.latest_versionstring(ud, rd)
+ (upversion, revision) = pupver
+
+ if upversion:
+ ru['version'] = upversion
+ ru['type'] = 'A'
+
+ if revision:
+ ru['revision'] = revision
+
+ ru['datetime'] = datetime.now()
- # format git version version+gitAUTOINC+HASH
- if uri_type == 'git':
- (pv, pfx, sfx) = get_recipe_pv_without_srcpv(pv, uri_type)
+ return ru
- # if contains revision but not upversion use current pv
- if upversion == '' and revision:
- upversion = pv
+def _get_recipe_upgrade_status(data):
+ uv = get_recipe_upstream_version(data)
- if upversion:
- tmp = upversion
- upversion = ''
+ pn = data.getVar('PN')
+ cur_ver = uv['current_version']
- if pfx:
- upversion = pfx + tmp
+ upstream_version_unknown = data.getVar('UPSTREAM_VERSION_UNKNOWN')
+ if not uv['version']:
+ status = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
+ else:
+ cmp = vercmp_string(uv['current_version'], uv['version'])
+ if cmp == -1:
+ status = "UPDATE" if not upstream_version_unknown else "KNOWN_BROKEN"
+ elif cmp == 0:
+ status = "MATCH" if not upstream_version_unknown else "KNOWN_BROKEN"
+ else:
+ status = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
+
+ next_ver = uv['version'] if uv['version'] else "N/A"
+ revision = uv['revision'] if uv['revision'] else "N/A"
+ maintainer = data.getVar('RECIPE_MAINTAINER')
+ no_upgrade_reason = data.getVar('RECIPE_NO_UPDATE_REASON')
+
+ return (pn, status, cur_ver, next_ver, maintainer, revision, no_upgrade_reason)
+
+def get_recipe_upgrade_status(recipes=None):
+ pkgs_list = []
+ data_copy_list = []
+ copy_vars = ('SRC_URI',
+ 'PV',
+ 'DL_DIR',
+ 'PN',
+ 'CACHE',
+ 'PERSISTENT_DIR',
+ 'BB_URI_HEADREVS',
+ 'UPSTREAM_CHECK_COMMITS',
+ 'UPSTREAM_CHECK_GITTAGREGEX',
+ 'UPSTREAM_CHECK_REGEX',
+ 'UPSTREAM_CHECK_URI',
+ 'UPSTREAM_VERSION_UNKNOWN',
+ 'RECIPE_MAINTAINER',
+ 'RECIPE_NO_UPDATE_REASON',
+ 'RECIPE_UPSTREAM_VERSION',
+ 'RECIPE_UPSTREAM_DATE',
+ 'CHECK_DATE',
+ 'FETCHCMD_bzr',
+ 'FETCHCMD_ccrc',
+ 'FETCHCMD_cvs',
+ 'FETCHCMD_git',
+ 'FETCHCMD_hg',
+ 'FETCHCMD_npm',
+ 'FETCHCMD_osc',
+ 'FETCHCMD_p4',
+ 'FETCHCMD_repo',
+ 'FETCHCMD_s3',
+ 'FETCHCMD_svn',
+ 'FETCHCMD_wget',
+ )
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False)
+
+ if not recipes:
+ recipes = tinfoil.all_recipe_files(variants=False)
+
+ for fn in recipes:
+ try:
+ if fn.startswith("/"):
+ data = tinfoil.parse_recipe_file(fn)
else:
- upversion = tmp
+ data = tinfoil.parse_recipe(fn)
+ except bb.providers.NoProvider:
+ bb.note(" No provider for %s" % fn)
+ continue
+
+ unreliable = data.getVar('UPSTREAM_CHECK_UNRELIABLE')
+ if unreliable == "1":
+ bb.note(" Skip package %s as upstream check unreliable" % pn)
+ continue
- if sfx:
- upversion = upversion + sfx + revision[:10]
+ data_copy = bb.data.init()
+ for var in copy_vars:
+ data_copy.setVar(var, data.getVar(var))
+ for k in data:
+ if k.startswith('SRCREV'):
+ data_copy.setVar(k, data.getVar(k))
- if upversion:
- ru['version'] = upversion
- ru['type'] = 'A'
+ data_copy_list.append(data_copy)
- ru['datetime'] = datetime.now()
+ from concurrent.futures import ProcessPoolExecutor
+ with ProcessPoolExecutor(max_workers=utils.cpu_count()) as executor:
+ pkgs_list = executor.map(_get_recipe_upgrade_status, data_copy_list)
- return ru
+ return pkgs_list
diff --git a/meta/lib/oe/reproducible.py b/meta/lib/oe/reproducible.py
new file mode 100644
index 0000000000..35b8be6d08
--- /dev/null
+++ b/meta/lib/oe/reproducible.py
@@ -0,0 +1,192 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+import os
+import subprocess
+import bb
+
+# For reproducible builds, this code sets the default SOURCE_DATE_EPOCH in each
+# component's build environment. The format is number of seconds since the
+# system epoch.
+#
+# Upstream components (generally) respect this environment variable,
+# using it in place of the "current" date and time.
+# See https://reproducible-builds.org/specs/source-date-epoch/
+#
+# The default value of SOURCE_DATE_EPOCH comes from the function
+# get_source_date_epoch_value which reads from the SDE_FILE, or if the file
+# is not available will use the fallback of SOURCE_DATE_EPOCH_FALLBACK.
+#
+# The SDE_FILE is normally constructed from the function
+# create_source_date_epoch_stamp which is typically added as a postfuncs to
+# the do_unpack task. If a recipe does NOT have do_unpack, it should be added
+# to a task that runs after the source is available and before the
+# do_deploy_source_date_epoch task is executed.
+#
+# If a recipe wishes to override the default behavior it should set it's own
+# SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task
+# with recipe-specific functionality to write the appropriate
+# SOURCE_DATE_EPOCH into the SDE_FILE.
+#
+# SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should
+# be reproducible for anyone who builds the same revision from the same
+# sources.
+#
+# There are 4 ways the create_source_date_epoch_stamp function determines what
+# becomes SOURCE_DATE_EPOCH:
+#
+# 1. Use the value from __source_date_epoch.txt file if this file exists.
+# This file was most likely created in the previous build by one of the
+# following methods 2,3,4.
+# Alternatively, it can be provided by a recipe via SRC_URI.
+#
+# If the file does not exist:
+#
+# 2. If there is a git checkout, use the last git commit timestamp.
+# Git does not preserve file timestamps on checkout.
+#
+# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
+# This works for well-kept repositories distributed via tarball.
+#
+# 4. Use the modification time of the youngest file in the source tree, if
+# there is one.
+# This will be the newest file from the distribution tarball, if any.
+#
+# 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK).
+#
+# Once the value is determined, it is stored in the recipe's SDE_FILE.
+
+def get_source_date_epoch_from_known_files(d, sourcedir):
+ source_date_epoch = None
+ newest_file = None
+ known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
+ for file in known_files:
+ filepath = os.path.join(sourcedir, file)
+ if os.path.isfile(filepath):
+ mtime = int(os.lstat(filepath).st_mtime)
+ # There may be more than one "known_file" present, if so, use the youngest one
+ if not source_date_epoch or mtime > source_date_epoch:
+ source_date_epoch = mtime
+ newest_file = filepath
+ if newest_file:
+ bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
+ return source_date_epoch
+
+def find_git_folder(d, sourcedir):
+ # First guess: WORKDIR/git
+ # This is the default git fetcher unpack path
+ workdir = d.getVar('WORKDIR')
+ gitpath = os.path.join(workdir, "git/.git")
+ if os.path.isdir(gitpath):
+ return gitpath
+
+ # Second guess: ${S}
+ gitpath = os.path.join(sourcedir, ".git")
+ if os.path.isdir(gitpath):
+ return gitpath
+
+ # Perhaps there was a subpath or destsuffix specified.
+ # Go looking in the WORKDIR
+ exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
+ "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
+ for root, dirs, files in os.walk(workdir, topdown=True):
+ dirs[:] = [d for d in dirs if d not in exclude]
+ if '.git' in dirs:
+ return os.path.join(root, ".git")
+
+ bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
+ return None
+
+def get_source_date_epoch_from_git(d, sourcedir):
+ if not "git://" in d.getVar('SRC_URI') and not "gitsm://" in d.getVar('SRC_URI'):
+ return None
+
+ gitpath = find_git_folder(d, sourcedir)
+ if not gitpath:
+ return None
+
+ # Check that the repository has a valid HEAD; it may not if subdir is used
+ # in SRC_URI
+ p = subprocess.run(['git', '--git-dir', gitpath, 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ if p.returncode != 0:
+ bb.debug(1, "%s does not have a valid HEAD: %s" % (gitpath, p.stdout.decode('utf-8')))
+ return None
+
+ bb.debug(1, "git repository: %s" % gitpath)
+ p = subprocess.run(['git', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'], check=True, stdout=subprocess.PIPE)
+ return int(p.stdout.decode('utf-8'))
+
+def get_source_date_epoch_from_youngest_file(d, sourcedir):
+ if sourcedir == d.getVar('WORKDIR'):
+ # These sources are almost certainly not from a tarball
+ return None
+
+ # Do it the hard way: check all files and find the youngest one...
+ source_date_epoch = None
+ newest_file = None
+ for root, dirs, files in os.walk(sourcedir, topdown=True):
+ files = [f for f in files if not f[0] == '.']
+
+ for fname in files:
+ filename = os.path.join(root, fname)
+ try:
+ mtime = int(os.lstat(filename).st_mtime)
+ except ValueError:
+ mtime = 0
+ if not source_date_epoch or mtime > source_date_epoch:
+ source_date_epoch = mtime
+ newest_file = filename
+
+ if newest_file:
+ bb.debug(1, "Newest file found: %s" % newest_file)
+ return source_date_epoch
+
+def fixed_source_date_epoch(d):
+ bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
+ source_date_epoch = d.getVar('SOURCE_DATE_EPOCH_FALLBACK')
+ if source_date_epoch:
+ bb.debug(1, "Using SOURCE_DATE_EPOCH_FALLBACK")
+ return int(source_date_epoch)
+ return 0
+
+def get_source_date_epoch(d, sourcedir):
+ return (
+ get_source_date_epoch_from_git(d, sourcedir) or
+ get_source_date_epoch_from_known_files(d, sourcedir) or
+ get_source_date_epoch_from_youngest_file(d, sourcedir) or
+ fixed_source_date_epoch(d) # Last resort
+ )
+
+def epochfile_read(epochfile, d):
+ cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None)
+ if cached and efile == epochfile:
+ return cached
+
+ if cached and epochfile != efile:
+ bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile))
+
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
+ try:
+ with open(epochfile, 'r') as f:
+ s = f.read()
+ try:
+ source_date_epoch = int(s)
+ except ValueError:
+ bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ except FileNotFoundError:
+ bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
+
+ d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile))
+ return str(source_date_epoch)
+
+def epochfile_write(source_date_epoch, epochfile, d):
+
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ bb.utils.mkdirhier(os.path.dirname(epochfile))
+
+ tmp_file = "%s.new" % epochfile
+ with open(tmp_file, 'w') as f:
+ f.write(str(source_date_epoch))
+ os.rename(tmp_file, epochfile)
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index 5e1c09762a..98cf3f244d 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -1,15 +1,16 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.package_manager import *
from oe.manifest import *
import oe.path
-import filecmp
import shutil
import os
import subprocess
import re
-
class Rootfs(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
@@ -48,6 +49,8 @@ class Rootfs(object, metaclass=ABCMeta):
excludes = [ 'log_check', r'^\+' ]
if hasattr(self, 'log_check_expected_regexes'):
excludes.extend(self.log_check_expected_regexes)
+ # Insert custom log_check excludes
+ excludes += [x for x in (self.d.getVar("IMAGE_LOG_CHECK_EXCLUDES") or "").split(" ") if x]
excludes = [re.compile(x) for x in excludes]
r = re.compile(match)
log_path = self.d.expand("${T}/log.do_rootfs")
@@ -92,10 +95,6 @@ class Rootfs(object, metaclass=ABCMeta):
self.d.getVar('PACKAGE_FEED_ARCHS'))
- @abstractmethod
- def _handle_intercept_failure(self, failed_script):
- pass
-
"""
The _cleanup() method should be used to clean-up stuff that we don't really
want to end up on target. For example, in the case of RPM, the DB locks.
@@ -115,7 +114,7 @@ class Rootfs(object, metaclass=ABCMeta):
shutil.rmtree(self.image_rootfs + '-orig')
except:
pass
- os.rename(self.image_rootfs, self.image_rootfs + '-orig')
+ bb.utils.rename(self.image_rootfs, self.image_rootfs + '-orig')
bb.note(" Creating debug rootfs...")
bb.utils.mkdirhier(self.image_rootfs)
@@ -127,17 +126,16 @@ class Rootfs(object, metaclass=ABCMeta):
bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir))
shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir, symlinks=True)
- cpath = oe.cachedpath.CachedPath()
# Copy files located in /usr/lib/debug or /usr/src/debug
for dir in ["/usr/lib/debug", "/usr/src/debug"]:
src = self.image_rootfs + '-orig' + dir
- if cpath.exists(src):
+ if os.path.exists(src):
dst = self.image_rootfs + dir
bb.utils.mkdirhier(os.path.dirname(dst))
shutil.copytree(src, dst)
# Copy files with suffix '.debug' or located in '.debug' dir.
- for root, dirs, files in cpath.walk(self.image_rootfs + '-orig'):
+ for root, dirs, files in os.walk(self.image_rootfs + '-orig'):
relative_dir = root[len(self.image_rootfs + '-orig'):]
for f in files:
if f.endswith('.debug') or '/.debug' in relative_dir:
@@ -148,15 +146,29 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note(" Install complementary '*-dbg' packages...")
self.pm.install_complementary('*-dbg')
+ if self.d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ bb.note(" Install complementary '*-src' packages...")
+ self.pm.install_complementary('*-src')
+
+ """
+ Install additional debug packages. Possibility to install additional packages,
+ which are not automatically installed as complementary package of
+ standard one, e.g. debug package of static libraries.
+ """
+ extra_debug_pkgs = self.d.getVar('IMAGE_INSTALL_DEBUGFS')
+ if extra_debug_pkgs:
+ bb.note(" Install extra debug packages...")
+ self.pm.install(extra_debug_pkgs.split(), True)
+
bb.note(" Rename debug rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-dbg')
except:
pass
- os.rename(self.image_rootfs, self.image_rootfs + '-dbg')
+ bb.utils.rename(self.image_rootfs, self.image_rootfs + '-dbg')
- bb.note(" Restoreing original rootfs...")
- os.rename(self.image_rootfs + '-orig', self.image_rootfs)
+ bb.note(" Restoring original rootfs...")
+ bb.utils.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
fakerootcmd = self.d.getVar('FAKEROOT')
@@ -178,20 +190,6 @@ class Rootfs(object, metaclass=ABCMeta):
post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
- postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR")
- if not postinst_intercepts_dir:
- postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
-
- bb.utils.remove(intercepts_dir, True)
-
- bb.utils.mkdirhier(self.image_rootfs)
-
- bb.utils.mkdirhier(self.deploydir)
-
- shutil.copytree(postinst_intercepts_dir, intercepts_dir)
-
execute_pre_post_process(self.d, pre_process_cmds)
if self.progress_reporter:
@@ -207,7 +205,7 @@ class Rootfs(object, metaclass=ABCMeta):
execute_pre_post_process(self.d, rootfs_post_install_cmds)
- self._run_intercepts()
+ self.pm.run_intercepts()
execute_pre_post_process(self.d, post_process_cmds)
@@ -215,6 +213,9 @@ class Rootfs(object, metaclass=ABCMeta):
self.progress_reporter.next_stage()
if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d) and \
+ not bb.utils.contains("IMAGE_FEATURES",
+ "read-only-rootfs-delayed-postinsts",
True, False, self.d):
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is not None:
@@ -245,13 +246,11 @@ class Rootfs(object, metaclass=ABCMeta):
def _uninstall_unneeded(self):
- # Remove unneeded init script symlinks
+ # Remove the run-postinsts package if no delayed postinsts are found
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is None:
- if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
- self._exec_shell_cmd(["update-rc.d", "-f", "-r",
- self.d.getVar('IMAGE_ROOTFS'),
- "run-postinsts", "remove"])
+ if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")) or os.path.exists(self.d.expand("${IMAGE_ROOTFS}${systemd_system_unitdir}/run-postinsts.service")):
+ self.pm.remove(["run-postinsts"])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d)
@@ -261,15 +260,22 @@ class Rootfs(object, metaclass=ABCMeta):
# Remove components that we don't need if it's a read-only rootfs
unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED").split()
pkgs_installed = image_list_installed_packages(self.d)
- # Make sure update-alternatives is last on the command line, so
- # that it is removed last. This makes sure that its database is
- # available while uninstalling packages, allowing alternative
- # symlinks of packages to be uninstalled to be managed correctly.
+ # Make sure update-alternatives is removed last. This is
+ # because its database has to available while uninstalling
+ # other packages, allowing alternative symlinks of packages
+ # to be uninstalled or to be managed correctly otherwise.
provider = self.d.getVar("VIRTUAL-RUNTIME_update-alternatives")
pkgs_to_remove = sorted([pkg for pkg in pkgs_installed if pkg in unneeded_pkgs], key=lambda x: x == provider)
+ # update-alternatives provider is removed in its own remove()
+ # call because all package managers do not guarantee the packages
+ # are removed in the order they given in the list (which is
+ # passed to the command line). The sorting done earlier is
+ # utilized to implement the 2-stage removal.
+ if len(pkgs_to_remove) > 1:
+ self.pm.remove(pkgs_to_remove[:-1], False)
if len(pkgs_to_remove) > 0:
- self.pm.remove(pkgs_to_remove, False)
+ self.pm.remove([pkgs_to_remove[-1]], False)
if delayed_postinsts:
self._save_postinsts()
@@ -286,53 +292,26 @@ class Rootfs(object, metaclass=ABCMeta):
# Remove the package manager data files
self.pm.remove_packaging_data()
- def _run_intercepts(self):
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
-
- bb.note("Running intercept scripts:")
- os.environ['D'] = self.image_rootfs
- os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
- for script in os.listdir(intercepts_dir):
- script_full = os.path.join(intercepts_dir, script)
-
- if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
- continue
-
- bb.note("> Executing %s intercept ..." % script)
-
- try:
- subprocess.check_output(script_full)
- except subprocess.CalledProcessError as e:
- bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details! (Output: %s)" %
- (script, e.returncode, e.output))
-
- with open(script_full) as intercept:
- registered_pkgs = None
- for line in intercept.read().split("\n"):
- m = re.match("^##PKGS:(.*)", line)
- if m is not None:
- registered_pkgs = m.group(1).strip()
- break
-
- if registered_pkgs is not None:
- bb.warn("The postinstalls for the following packages "
- "will be postponed for first boot: %s" %
- registered_pkgs)
-
- # call the backend dependent handler
- self._handle_intercept_failure(registered_pkgs)
-
def _run_ldconfig(self):
if self.d.getVar('LDCONFIGDEPEND'):
- bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
+ bb.note("Executing: ldconfig -r " + self.image_rootfs + " -c new -v -X")
self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
- 'new', '-v'])
+ 'new', '-v', '-X'])
+
+ image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d)
+ ldconfig_in_features = bb.utils.contains("DISTRO_FEATURES", "ldconfig",
+ True, False, self.d)
+ if image_rorfs or not ldconfig_in_features:
+ ldconfig_cache_dir = os.path.join(self.image_rootfs, "var/cache/ldconfig")
+ if os.path.exists(ldconfig_cache_dir):
+ bb.note("Removing ldconfig auxiliary cache...")
+ shutil.rmtree(ldconfig_cache_dir)
def _check_for_kernel_modules(self, modules_dir):
for root, dirs, files in os.walk(modules_dir, topdown=True):
for name in files:
- found_ko = name.endswith(".ko")
+ found_ko = name.endswith((".ko", ".ko.gz", ".ko.xz"))
if found_ko:
return found_ko
return False
@@ -381,610 +360,10 @@ class Rootfs(object, metaclass=ABCMeta):
self.image_rootfs, "-D", devtable])
-class RpmRootfs(Rootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '(unpacking of archive failed|Cannot find package'\
- '|exit 1|ERROR: |Error: |Error |ERROR '\
- '|Failed |Failed: |Failed$|Failed\(\d+\):)'
- self.manifest = RpmManifest(d, manifest_dir)
-
- self.pm = RpmPM(d,
- d.getVar('IMAGE_ROOTFS'),
- self.d.getVar('TARGET_VENDOR')
- )
-
- self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
- if self.inc_rpm_image_gen != "1":
- bb.utils.remove(self.image_rootfs, True)
- else:
- self.pm.recovery_packaging_data()
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
-
- self.pm.create_configs()
-
- '''
- While rpm incremental image generation is enabled, it will remove the
- unneeded pkgs by comparing the new install solution manifest and the
- old installed manifest.
- '''
- def _create_incremental(self, pkgs_initial_install):
- if self.inc_rpm_image_gen == "1":
-
- pkgs_to_install = list()
- for pkg_type in pkgs_initial_install:
- pkgs_to_install += pkgs_initial_install[pkg_type]
-
- installed_manifest = self.pm.load_old_install_solution()
- solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
-
- pkg_to_remove = list()
- for pkg in installed_manifest:
- if pkg not in solution_manifest:
- pkg_to_remove.append(pkg)
-
- self.pm.update()
-
- bb.note('incremental update -- upgrade packages in place ')
- self.pm.upgrade()
- if pkg_to_remove != []:
- bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
- self.pm.remove(pkg_to_remove)
-
- self.pm.autoremove()
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
- rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, rpm_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- if self.inc_rpm_image_gen == "1":
- self._create_incremental(pkgs_to_install)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- pkgs = []
- pkgs_attempt = []
- for pkg_type in pkgs_to_install:
- if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
- pkgs_attempt += pkgs_to_install[pkg_type]
- else:
- pkgs += pkgs_to_install[pkg_type]
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install(pkgs)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install(pkgs_attempt, True)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
-
- execute_pre_post_process(self.d, rpm_post_process_cmds)
-
- if self.inc_rpm_image_gen == "1":
- self.pm.backup_packaging_data()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
-
- @staticmethod
- def _depends_list():
- return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
- 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
-
- def _get_delayed_postinsts(self):
- postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
- if os.path.isdir(postinst_dir):
- files = os.listdir(postinst_dir)
- for f in files:
- bb.note('Delayed package scriptlet: %s' % f)
- return files
-
- return None
-
- def _save_postinsts(self):
- # this is just a stub. For RPM, the failed postinstalls are
- # already saved in /etc/rpm-postinsts
- pass
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _handle_intercept_failure(self, registered_pkgs):
- rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
- bb.utils.mkdirhier(rpm_postinsts_dir)
-
- # Save the package postinstalls in /etc/rpm-postinsts
- for pkg in registered_pkgs.split():
- self.pm.save_rpmpostinst(pkg)
-
- def _cleanup(self):
- pass
-
-class DpkgOpkgRootfs(Rootfs):
- def __init__(self, d, progress_reporter=None, logcatcher=None):
- super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
-
- def _get_pkgs_postinsts(self, status_file):
- def _get_pkg_depends_list(pkg_depends):
- pkg_depends_list = []
- # filter version requirements like libc (>= 1.1)
- for dep in pkg_depends.split(', '):
- m_dep = re.match("^(.*) \(.*\)$", dep)
- if m_dep:
- dep = m_dep.group(1)
- pkg_depends_list.append(dep)
-
- return pkg_depends_list
-
- pkgs = {}
- pkg_name = ""
- pkg_status_match = False
- pkg_depends = ""
-
- with open(status_file) as status:
- data = status.read()
- status.close()
- for line in data.split('\n'):
- m_pkg = re.match("^Package: (.*)", line)
- m_status = re.match("^Status:.*unpacked", line)
- m_depends = re.match("^Depends: (.*)", line)
-
- if m_pkg is not None:
- if pkg_name and pkg_status_match:
- pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
-
- pkg_name = m_pkg.group(1)
- pkg_status_match = False
- pkg_depends = ""
- elif m_status is not None:
- pkg_status_match = True
- elif m_depends is not None:
- pkg_depends = m_depends.group(1)
-
- # remove package dependencies not in postinsts
- pkg_names = list(pkgs.keys())
- for pkg_name in pkg_names:
- deps = pkgs[pkg_name][:]
-
- for d in deps:
- if d not in pkg_names:
- pkgs[pkg_name].remove(d)
-
- return pkgs
-
- def _get_delayed_postinsts_common(self, status_file):
- def _dep_resolve(graph, node, resolved, seen):
- seen.append(node)
-
- for edge in graph[node]:
- if edge not in resolved:
- if edge in seen:
- raise RuntimeError("Packages %s and %s have " \
- "a circular dependency in postinsts scripts." \
- % (node, edge))
- _dep_resolve(graph, edge, resolved, seen)
-
- resolved.append(node)
-
- pkg_list = []
-
- pkgs = None
- if not self.d.getVar('PACKAGE_INSTALL').strip():
- bb.note("Building empty image")
- else:
- pkgs = self._get_pkgs_postinsts(status_file)
- if pkgs:
- root = "__packagegroup_postinst__"
- pkgs[root] = list(pkgs.keys())
- _dep_resolve(pkgs, root, pkg_list, [])
- pkg_list.remove(root)
-
- if len(pkg_list) == 0:
- return None
-
- return pkg_list
-
- def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
- num = 0
- for p in self._get_delayed_postinsts():
- bb.utils.mkdirhier(dst_postinst_dir)
-
- if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
- shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
- os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
-
- num += 1
-
-class DpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '^E:'
- self.log_check_expected_regexes = \
- [
- "^E: Unmet dependencies."
- ]
-
- bb.utils.remove(self.image_rootfs, True)
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
- self.manifest = DpkgManifest(d, manifest_dir)
- self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
- d.getVar('PACKAGE_ARCHS'),
- d.getVar('DPKG_ARCH'))
-
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
- deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
-
- alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
- bb.utils.mkdirhier(alt_dir)
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, deb_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
- # Don't support incremental, so skip that
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- self.pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- if self.progress_reporter:
- # Don't support attemptonly, so skip that
- self.progress_reporter.next_stage()
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/var/lib/dpkg'])
-
- self.pm.fix_broken_dependencies()
-
- self.pm.mark_packages("installed")
-
- self.pm.run_pre_post_installs()
-
- execute_pre_post_process(self.d, deb_post_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- @staticmethod
- def _depends_list():
- return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
-
- def _get_delayed_postinsts(self):
- status_file = self.image_rootfs + "/var/lib/dpkg/status"
- return self._get_delayed_postinsts_common(status_file)
-
- def _save_postinsts(self):
- dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
- src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
- return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
-
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- pass
-
-
-class OpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '(exit 1|Collected errors)'
-
- self.manifest = OpkgManifest(d, manifest_dir)
- self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
- self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
-
- self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
- if self._remove_old_rootfs():
- bb.utils.remove(self.image_rootfs, True)
- self.pm = OpkgPM(d,
- self.image_rootfs,
- self.opkg_conf,
- self.pkg_archs)
- else:
- self.pm = OpkgPM(d,
- self.image_rootfs,
- self.opkg_conf,
- self.pkg_archs)
- self.pm.recover_packaging_data()
-
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
-
- def _prelink_file(self, root_dir, filename):
- bb.note('prelink %s in %s' % (filename, root_dir))
- prelink_cfg = oe.path.join(root_dir,
- self.d.expand('${sysconfdir}/prelink.conf'))
- if not os.path.exists(prelink_cfg):
- shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
- prelink_cfg)
-
- cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
- self._exec_shell_cmd([cmd_prelink,
- '--root',
- root_dir,
- '-amR',
- '-N',
- '-c',
- self.d.expand('${sysconfdir}/prelink.conf')])
-
- '''
- Compare two files with the same key twice to see if they are equal.
- If they are not equal, it means they are duplicated and come from
- different packages.
- 1st: Comapre them directly;
- 2nd: While incremental image creation is enabled, one of the
- files could be probaly prelinked in the previous image
- creation and the file has been changed, so we need to
- prelink the other one and compare them.
- '''
- def _file_equal(self, key, f1, f2):
-
- # Both of them are not prelinked
- if filecmp.cmp(f1, f2):
- return True
-
- if self.image_rootfs not in f1:
- self._prelink_file(f1.replace(key, ''), f1)
-
- if self.image_rootfs not in f2:
- self._prelink_file(f2.replace(key, ''), f2)
-
- # Both of them are prelinked
- if filecmp.cmp(f1, f2):
- return True
-
- # Not equal
- return False
-
- """
- This function was reused from the old implementation.
- See commit: "image.bbclass: Added variables for multilib support." by
- Lianhao Lu.
- """
- def _multilib_sanity_test(self, dirs):
-
- allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
- if allow_replace is None:
- allow_replace = ""
-
- allow_rep = re.compile(re.sub("\|$", "", allow_replace))
- error_prompt = "Multilib check error:"
-
- files = {}
- for dir in dirs:
- for root, subfolders, subfiles in os.walk(dir):
- for file in subfiles:
- item = os.path.join(root, file)
- key = str(os.path.join("/", os.path.relpath(item, dir)))
-
- valid = True
- if key in files:
- #check whether the file is allow to replace
- if allow_rep.match(key):
- valid = True
- else:
- if os.path.exists(files[key]) and \
- os.path.exists(item) and \
- not self._file_equal(key, files[key], item):
- valid = False
- bb.fatal("%s duplicate files %s %s is not the same\n" %
- (error_prompt, item, files[key]))
-
- #pass the check, add to list
- if valid:
- files[key] = item
-
- def _multilib_test_install(self, pkgs):
- ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
- bb.utils.mkdirhier(ml_temp)
-
- dirs = [self.image_rootfs]
-
- for variant in self.d.getVar("MULTILIB_VARIANTS").split():
- ml_target_rootfs = os.path.join(ml_temp, variant)
-
- bb.utils.remove(ml_target_rootfs, True)
-
- ml_opkg_conf = os.path.join(ml_temp,
- variant + "-" + os.path.basename(self.opkg_conf))
-
- ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
-
- ml_pm.update()
- ml_pm.install(pkgs)
-
- dirs.append(ml_target_rootfs)
-
- self._multilib_sanity_test(dirs)
-
- '''
- While ipk incremental image generation is enabled, it will remove the
- unneeded pkgs by comparing the old full manifest in previous existing
- image and the new full manifest in the current image.
- '''
- def _remove_extra_packages(self, pkgs_initial_install):
- if self.inc_opkg_image_gen == "1":
- # Parse full manifest in previous existing image creation session
- old_full_manifest = self.manifest.parse_full_manifest()
-
- # Create full manifest for the current image session, the old one
- # will be replaced by the new one.
- self.manifest.create_full(self.pm)
-
- # Parse full manifest in current image creation session
- new_full_manifest = self.manifest.parse_full_manifest()
-
- pkg_to_remove = list()
- for pkg in old_full_manifest:
- if pkg not in new_full_manifest:
- pkg_to_remove.append(pkg)
-
- if pkg_to_remove != []:
- bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
- self.pm.remove(pkg_to_remove)
-
- '''
- Compare with previous existing image creation, if some conditions
- triggered, the previous old image should be removed.
- The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
- and BAD_RECOMMENDATIONS' has been changed.
- '''
- def _remove_old_rootfs(self):
- if self.inc_opkg_image_gen != "1":
- return True
-
- vars_list_file = self.d.expand('${T}/vars_list')
-
- old_vars_list = ""
- if os.path.exists(vars_list_file):
- old_vars_list = open(vars_list_file, 'r+').read()
-
- new_vars_list = '%s:%s:%s\n' % \
- ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
- (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
- (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
- open(vars_list_file, 'w+').write(new_vars_list)
-
- if old_vars_list != new_vars_list:
- return True
-
- return False
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
- opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
-
- # update PM index files, unless users provide their own feeds
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
- self.pm.write_index()
-
- execute_pre_post_process(self.d, opkg_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
- # Steps are a bit different in order, skip next
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- self.pm.handle_bad_recommendations()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- if self.inc_opkg_image_gen == "1":
- self._remove_extra_packages(pkgs_to_install)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- # For multilib, we perform a sanity test before final install
- # If sanity test fails, it will automatically do a bb.fatal()
- # and the installation will stop
- if pkg_type == Manifest.PKG_TYPE_MULTILIB:
- self._multilib_test_install(pkgs_to_install[pkg_type])
-
- self.pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/var/lib/opkg'])
-
- execute_pre_post_process(self.d, opkg_post_process_cmds)
-
- if self.inc_opkg_image_gen == "1":
- self.pm.backup_packaging_data()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- @staticmethod
- def _depends_list():
- return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
-
- def _get_delayed_postinsts(self):
- status_file = os.path.join(self.image_rootfs,
- self.d.getVar('OPKGLIBDIR').strip('/'),
- "opkg", "status")
- return self._get_delayed_postinsts_common(status_file)
-
- def _save_postinsts(self):
- dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
- src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
- return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
-
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- self.pm.remove_lists()
-
def get_class_for_type(imgtype):
- return {"rpm": RpmRootfs,
- "ipk": OpkgRootfs,
- "deb": DpkgRootfs}[imgtype]
+ import importlib
+ mod = importlib.import_module('oe.package_manager.' + imgtype + '.rootfs')
+ return mod.PkgRootfs
def variable_depends(d, manifest_dir=None):
img_type = d.getVar('IMAGE_PKGTYPE')
@@ -995,13 +374,9 @@ def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None)
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- RpmRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
- elif img_type == "ipk":
- OpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
- elif img_type == "deb":
- DpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
+ cls = get_class_for_type(img_type)
+ cls(d, manifest_dir, progress_reporter, logcatcher).create()
os.environ.clear()
os.environ.update(env_bkp)
@@ -1011,12 +386,10 @@ def image_list_installed_packages(d, rootfs_dir=None):
rootfs_dir = d.getVar('IMAGE_ROOTFS')
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- return RpmPkgsList(d, rootfs_dir).list_pkgs()
- elif img_type == "ipk":
- return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET")).list_pkgs()
- elif img_type == "deb":
- return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+
+ import importlib
+ cls = importlib.import_module('oe.package_manager.' + img_type)
+ return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
if __name__ == "__main__":
"""
diff --git a/meta/lib/oe/rust.py b/meta/lib/oe/rust.py
new file mode 100644
index 0000000000..ec70b34805
--- /dev/null
+++ b/meta/lib/oe/rust.py
@@ -0,0 +1,5 @@
+# Handle mismatches between `uname -m`-style output and Rust's arch names
+def arch_to_rust_arch(arch):
+ if arch == "ppc64le":
+ return "powerpc64le"
+ return arch
diff --git a/meta/lib/oe/sbom.py b/meta/lib/oe/sbom.py
new file mode 100644
index 0000000000..3372f13a9d
--- /dev/null
+++ b/meta/lib/oe/sbom.py
@@ -0,0 +1,78 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import collections
+
+DepRecipe = collections.namedtuple("DepRecipe", ("doc", "doc_sha1", "recipe"))
+DepSource = collections.namedtuple("DepSource", ("doc", "doc_sha1", "recipe", "file"))
+
+
+def get_recipe_spdxid(d):
+ return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN"))
+
+
+def get_package_spdxid(pkg):
+ return "SPDXRef-Package-%s" % pkg
+
+
+def get_source_file_spdxid(d, idx):
+ return "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), idx)
+
+
+def get_packaged_file_spdxid(pkg, idx):
+ return "SPDXRef-PackagedFile-%s-%d" % (pkg, idx)
+
+
+def get_image_spdxid(img):
+ return "SPDXRef-Image-%s" % img
+
+
+def get_sdk_spdxid(sdk):
+ return "SPDXRef-SDK-%s" % sdk
+
+
+def write_doc(d, spdx_doc, subdir, spdx_deploy=None):
+ from pathlib import Path
+
+ if spdx_deploy is None:
+ spdx_deploy = Path(d.getVar("SPDXDEPLOY"))
+
+ dest = spdx_deploy / subdir / (spdx_doc.name + ".spdx.json")
+ dest.parent.mkdir(exist_ok=True, parents=True)
+ with dest.open("wb") as f:
+ doc_sha1 = spdx_doc.to_json(f, sort_keys=True)
+
+ l = spdx_deploy / "by-namespace" / spdx_doc.documentNamespace.replace("/", "_")
+ l.parent.mkdir(exist_ok=True, parents=True)
+ l.symlink_to(os.path.relpath(dest, l.parent))
+
+ return doc_sha1
+
+
+def read_doc(fn):
+ import hashlib
+ import oe.spdx
+ import io
+ import contextlib
+
+ @contextlib.contextmanager
+ def get_file():
+ if isinstance(fn, io.IOBase):
+ yield fn
+ else:
+ with fn.open("rb") as f:
+ yield f
+
+ with get_file() as f:
+ sha1 = hashlib.sha1()
+ while True:
+ chunk = f.read(4096)
+ if not chunk:
+ break
+ sha1.update(chunk)
+
+ f.seek(0)
+ doc = oe.spdx.SPDXDocument.from_json(f)
+
+ return (doc, sha1.hexdigest())
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
index deb823b6ec..27347667e8 100644
--- a/meta/lib/oe/sdk.py
+++ b/meta/lib/oe/sdk.py
@@ -1,13 +1,14 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.manifest import *
from oe.package_manager import *
import os
-import shutil
-import glob
import traceback
-
class Sdk(object, metaclass=ABCMeta):
def __init__(self, d, manifest_dir):
self.d = d
@@ -84,260 +85,27 @@ class Sdk(object, metaclass=ABCMeta):
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
bb.warn("cannot remove SDK dir: %s" % path)
-class RpmSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
- super(RpmSdk, self).__init__(d, manifest_dir)
-
- self.target_manifest = RpmManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = RpmManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- target_providename = ['/bin/sh',
- '/bin/bash',
- '/usr/bin/env',
- '/usr/bin/perl',
- 'pkgconfig'
- ]
-
- self.target_pm = RpmPM(d,
- self.sdk_target_sysroot,
- self.d.getVar('TARGET_VENDOR'),
- 'target',
- target_providename
- )
-
- sdk_providename = ['/bin/sh',
- '/bin/bash',
- '/usr/bin/env',
- '/usr/bin/perl',
- 'pkgconfig',
- 'libGL.so()(64bit)',
- 'libGL.so'
- ]
-
- self.host_pm = RpmPM(d,
- self.sdk_host_sysroot,
- self.d.getVar('SDK_VENDOR'),
- 'host',
- sdk_providename,
- "SDK_PACKAGE_ARCHS",
- "SDK_OS"
- )
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- pm.create_configs()
- pm.write_index()
- pm.update()
-
- pkgs = []
- pkgs_attempt = []
- for pkg_type in pkgs_to_install:
- if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
- pkgs_attempt += pkgs_to_install[pkg_type]
+ def install_locales(self, pm):
+ linguas = self.d.getVar("SDKIMAGE_LINGUAS")
+ if linguas:
+ import fnmatch
+ # Install the binary locales
+ if linguas == "all":
+ pm.install_glob("nativesdk-glibc-binary-localedata-*.utf-8", sdk=True)
else:
- pkgs += pkgs_to_install[pkg_type]
-
- pm.install(pkgs)
-
- pm.install(pkgs_attempt, True)
-
- def _populate(self):
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- # Move host RPM library data
- native_rpm_state_dir = os.path.join(self.sdk_output,
- self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk').strip('/'),
- "lib",
- "rpm"
- )
- self.mkdirhier(native_rpm_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output,
- "var",
- "lib",
- "rpm",
- "*")):
- self.movefile(f, native_rpm_state_dir)
-
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
- # Move host sysconfig data
- native_sysconf_dir = os.path.join(self.sdk_output,
- self.sdk_native_path,
- self.d.getVar('sysconfdir',
- True).strip('/'),
- )
- self.mkdirhier(native_sysconf_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
- self.movefile(f, native_sysconf_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
- self.movefile(f, native_sysconf_dir)
- self.remove(os.path.join(self.sdk_output, "etc"), True)
-
-
-class OpkgSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
- super(OpkgSdk, self).__init__(d, manifest_dir)
-
- self.target_conf = self.d.getVar("IPKGCONF_TARGET")
- self.host_conf = self.d.getVar("IPKGCONF_SDK")
-
- self.target_manifest = OpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = OpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
- self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
-
- self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
- self.d.getVar("SDK_PACKAGE_ARCHS"))
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
- pm.write_index()
-
- pm.update()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- def _populate(self):
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
- host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
-
- self.mkdirhier(target_sysconfdir)
- shutil.copy(self.target_conf, target_sysconfdir)
- os.chmod(os.path.join(target_sysconfdir,
- os.path.basename(self.target_conf)), 0o644)
-
- self.mkdirhier(host_sysconfdir)
- shutil.copy(self.host_conf, host_sysconfdir)
- os.chmod(os.path.join(host_sysconfdir,
- os.path.basename(self.host_conf)), 0o644)
-
- native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk').strip('/'),
- "lib", "opkg")
- self.mkdirhier(native_opkg_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
- self.movefile(f, native_opkg_state_dir)
-
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
-
-class DpkgSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
- super(DpkgSdk, self).__init__(d, manifest_dir)
-
- self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
- self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
-
- self.target_manifest = DpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = DpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
- self.d.getVar("PACKAGE_ARCHS"),
- self.d.getVar("DPKG_ARCH"),
- self.target_conf_dir)
-
- self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
- self.d.getVar("SDK_PACKAGE_ARCHS"),
- self.d.getVar("DEB_SDK_ARCH"),
- self.host_conf_dir)
-
- def _copy_apt_dir_to(self, dst_dir):
- staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
-
- self.remove(dst_dir, True)
-
- shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- pm.write_index()
- pm.update()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- def _populate(self):
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
- "etc", "apt"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
- "var", "lib", "dpkg")
- self.mkdirhier(native_dpkg_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
- self.movefile(f, native_dpkg_state_dir)
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
+ pm.install(["nativesdk-glibc-binary-localedata-%s.utf-8" % \
+ lang for lang in linguas.split()])
+ # Generate a locale archive of them
+ target_arch = self.d.getVar('SDK_ARCH')
+ rootfs = oe.path.join(self.sdk_host_sysroot, self.sdk_native_path)
+ localedir = oe.path.join(rootfs, self.d.getVar("libdir_nativesdk"), "locale")
+ generate_locale_archive(self.d, rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ pkgs = fnmatch.filter(pm.list_installed(), "nativesdk-glibc-binary-localedata-*.utf-8")
+ pm.remove(pkgs)
+ else:
+ # No linguas so do nothing
+ pass
def sdk_list_installed_packages(d, target, rootfs_dir=None):
@@ -347,30 +115,44 @@ def sdk_list_installed_packages(d, target, rootfs_dir=None):
rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
+ if target is False:
+ ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK")
+ d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
+
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
- os_var = ["SDK_OS", None][target is True]
- return RpmPkgsList(d, rootfs_dir).list_pkgs()
- elif img_type == "ipk":
- conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
- return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var)).list_pkgs()
- elif img_type == "deb":
- return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+ import importlib
+ cls = importlib.import_module('oe.package_manager.' + img_type)
+ return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
def populate_sdk(d, manifest_dir=None):
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- RpmSdk(d, manifest_dir).populate()
- elif img_type == "ipk":
- OpkgSdk(d, manifest_dir).populate()
- elif img_type == "deb":
- DpkgSdk(d, manifest_dir).populate()
+ import importlib
+ cls = importlib.import_module('oe.package_manager.' + img_type + '.sdk')
+ cls.PkgSdk(d, manifest_dir).populate()
os.environ.clear()
os.environ.update(env_bkp)
+def get_extra_sdkinfo(sstate_dir):
+ """
+ This function is going to be used for generating the target and host manifest files packages of eSDK.
+ """
+ import math
+
+ extra_info = {}
+ extra_info['tasksizes'] = {}
+ extra_info['filesizes'] = {}
+ for root, _, files in os.walk(sstate_dir):
+ for fn in files:
+ if fn.endswith('.tgz'):
+ fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
+ task = fn.rsplit(':',1)[1].split('_',1)[1].split(',')[0]
+ origtotal = extra_info['tasksizes'].get(task, 0)
+ extra_info['tasksizes'][task] = origtotal + fsize
+ extra_info['filesizes'][fn] = fsize
+ return extra_info
+
if __name__ == "__main__":
pass
diff --git a/meta/lib/oe/spdx.py b/meta/lib/oe/spdx.py
new file mode 100644
index 0000000000..14ca706895
--- /dev/null
+++ b/meta/lib/oe/spdx.py
@@ -0,0 +1,342 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+#
+# This library is intended to capture the JSON SPDX specification in a type
+# safe manner. It is not intended to encode any particular OE specific
+# behaviors, see the sbom.py for that.
+#
+# The documented SPDX spec document doesn't cover the JSON syntax for
+# particular configuration, which can make it hard to determine what the JSON
+# syntax should be. I've found it is actually much simpler to read the official
+# SPDX JSON schema which can be found here: https://github.com/spdx/spdx-spec
+# in schemas/spdx-schema.json
+#
+
+import hashlib
+import itertools
+import json
+
+SPDX_VERSION = "2.2"
+
+
+#
+# The following are the support classes that are used to implement SPDX object
+#
+
+class _Property(object):
+ """
+ A generic SPDX object property. The different types will derive from this
+ class
+ """
+
+ def __init__(self, *, default=None):
+ self.default = default
+
+ def setdefault(self, dest, name):
+ if self.default is not None:
+ dest.setdefault(name, self.default)
+
+
+class _String(_Property):
+ """
+ A scalar string property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return source
+
+
+class _Object(_Property):
+ """
+ A scalar SPDX object property of a SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(**kwargs)
+ self.cls = cls
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = self.cls()
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper)
+
+ def init(self, source):
+ return self.cls(**source)
+
+
+class _ListProperty(_Property):
+ """
+ A list of SPDX properties
+ """
+
+ def __init__(self, prop, **kwargs):
+ super().__init__(**kwargs)
+ self.prop = prop
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = []
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = list(value)
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return [self.prop.init(o) for o in source]
+
+
+class _StringList(_ListProperty):
+ """
+ A list of strings as a property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(_String(), **kwargs)
+
+
+class _ObjectList(_ListProperty):
+ """
+ A list of SPDX objects as a property for an SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(_Object(cls), **kwargs)
+
+
+class MetaSPDXObject(type):
+ """
+ A metaclass that allows properties (anything derived from a _Property
+ class) to be defined for a SPDX object
+ """
+ def __new__(mcls, name, bases, attrs):
+ attrs["_properties"] = {}
+
+ for key in attrs.keys():
+ if isinstance(attrs[key], _Property):
+ prop = attrs[key]
+ attrs["_properties"][key] = prop
+ prop.set_property(attrs, key)
+
+ return super().__new__(mcls, name, bases, attrs)
+
+
+class SPDXObject(metaclass=MetaSPDXObject):
+ """
+ The base SPDX object; all SPDX spec classes must derive from this class
+ """
+ def __init__(self, **d):
+ self._spdx = {}
+
+ for name, prop in self._properties.items():
+ prop.setdefault(self._spdx, name)
+ if name in d:
+ self._spdx[name] = prop.init(d[name])
+
+ def serializer(self):
+ return self._spdx
+
+ def __setattr__(self, name, value):
+ if name in self._properties or name == "_spdx":
+ super().__setattr__(name, value)
+ return
+ raise KeyError("%r is not a valid SPDX property" % name)
+
+#
+# These are the SPDX objects implemented from the spec. The *only* properties
+# that can be added to these objects are ones directly specified in the SPDX
+# spec, however you may add helper functions to make operations easier.
+#
+# Defaults should *only* be specified if the SPDX spec says there is a certain
+# required value for a field (e.g. dataLicense), or if the field is mandatory
+# and has some sane "this field is unknown" (e.g. "NOASSERTION")
+#
+
+class SPDXAnnotation(SPDXObject):
+ annotationDate = _String()
+ annotationType = _String()
+ annotator = _String()
+ comment = _String()
+
+class SPDXChecksum(SPDXObject):
+ algorithm = _String()
+ checksumValue = _String()
+
+
+class SPDXRelationship(SPDXObject):
+ spdxElementId = _String()
+ relatedSpdxElement = _String()
+ relationshipType = _String()
+ comment = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+
+
+class SPDXExternalReference(SPDXObject):
+ referenceCategory = _String()
+ referenceType = _String()
+ referenceLocator = _String()
+
+
+class SPDXPackageVerificationCode(SPDXObject):
+ packageVerificationCodeValue = _String()
+ packageVerificationCodeExcludedFiles = _StringList()
+
+
+class SPDXPackage(SPDXObject):
+ name = _String()
+ SPDXID = _String()
+ versionInfo = _String()
+ downloadLocation = _String(default="NOASSERTION")
+ packageSupplier = _String(default="NOASSERTION")
+ homepage = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ licenseDeclared = _String(default="NOASSERTION")
+ summary = _String()
+ description = _String()
+ sourceInfo = _String()
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoFromFiles = _StringList(default=["NOASSERTION"])
+ externalRefs = _ObjectList(SPDXExternalReference)
+ packageVerificationCode = _Object(SPDXPackageVerificationCode)
+ hasFiles = _StringList()
+ packageFileName = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+
+
+class SPDXFile(SPDXObject):
+ SPDXID = _String()
+ fileName = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoInFiles = _StringList(default=["NOASSERTION"])
+ checksums = _ObjectList(SPDXChecksum)
+ fileTypes = _StringList()
+
+
+class SPDXCreationInfo(SPDXObject):
+ created = _String()
+ licenseListVersion = _String()
+ comment = _String()
+ creators = _StringList()
+
+
+class SPDXExternalDocumentRef(SPDXObject):
+ externalDocumentId = _String()
+ spdxDocument = _String()
+ checksum = _Object(SPDXChecksum)
+
+
+class SPDXExtractedLicensingInfo(SPDXObject):
+ name = _String()
+ comment = _String()
+ licenseId = _String()
+ extractedText = _String()
+
+
+class SPDXDocument(SPDXObject):
+ spdxVersion = _String(default="SPDX-" + SPDX_VERSION)
+ dataLicense = _String(default="CC0-1.0")
+ SPDXID = _String(default="SPDXRef-DOCUMENT")
+ name = _String()
+ documentNamespace = _String()
+ creationInfo = _Object(SPDXCreationInfo)
+ packages = _ObjectList(SPDXPackage)
+ files = _ObjectList(SPDXFile)
+ relationships = _ObjectList(SPDXRelationship)
+ externalDocumentRefs = _ObjectList(SPDXExternalDocumentRef)
+ hasExtractedLicensingInfos = _ObjectList(SPDXExtractedLicensingInfo)
+
+ def __init__(self, **d):
+ super().__init__(**d)
+
+ def to_json(self, f, *, sort_keys=False, indent=None, separators=None):
+ class Encoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, SPDXObject):
+ return o.serializer()
+
+ return super().default(o)
+
+ sha1 = hashlib.sha1()
+ for chunk in Encoder(
+ sort_keys=sort_keys,
+ indent=indent,
+ separators=separators,
+ ).iterencode(self):
+ chunk = chunk.encode("utf-8")
+ f.write(chunk)
+ sha1.update(chunk)
+
+ return sha1.hexdigest()
+
+ @classmethod
+ def from_json(cls, f):
+ return cls(**json.load(f))
+
+ def add_relationship(self, _from, relationship, _to, *, comment=None, annotation=None):
+ if isinstance(_from, SPDXObject):
+ from_spdxid = _from.SPDXID
+ else:
+ from_spdxid = _from
+
+ if isinstance(_to, SPDXObject):
+ to_spdxid = _to.SPDXID
+ else:
+ to_spdxid = _to
+
+ r = SPDXRelationship(
+ spdxElementId=from_spdxid,
+ relatedSpdxElement=to_spdxid,
+ relationshipType=relationship,
+ )
+
+ if comment is not None:
+ r.comment = comment
+
+ if annotation is not None:
+ r.annotations.append(annotation)
+
+ self.relationships.append(r)
+
+ def find_by_spdxid(self, spdxid):
+ for o in itertools.chain(self.packages, self.files):
+ if o.SPDXID == spdxid:
+ return o
+ return None
+
+ def find_external_document_ref(self, namespace):
+ for r in self.externalDocumentRefs:
+ if r.spdxDocument == namespace:
+ return r
+ return None
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index 13fd3bd633..2cf858e201 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -1,6 +1,11 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import bb.siggen
+import bb.runqueue
+import oe
-def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
+def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
# Return True if we should keep the dependency, False to drop it
def isNative(x):
return x.endswith("-native")
@@ -8,31 +13,37 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
return "-cross-" in x
def isNativeSDK(x):
return x.startswith("nativesdk-")
- def isKernel(fn):
- inherits = " ".join(dataCache.inherits[fn])
+ def isKernel(mc, fn):
+ inherits = " ".join(dataCaches[mc].inherits[fn])
return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
- def isPackageGroup(fn):
- inherits = " ".join(dataCache.inherits[fn])
+ def isPackageGroup(mc, fn):
+ inherits = " ".join(dataCaches[mc].inherits[fn])
return "/packagegroup.bbclass" in inherits
- def isAllArch(fn):
- inherits = " ".join(dataCache.inherits[fn])
+ def isAllArch(mc, fn):
+ inherits = " ".join(dataCaches[mc].inherits[fn])
return "/allarch.bbclass" in inherits
- def isImage(fn):
- return "/image.bbclass" in " ".join(dataCache.inherits[fn])
-
- # Always include our own inter-task dependencies
- if recipename == depname:
+ def isImage(mc, fn):
+ return "/image.bbclass" in " ".join(dataCaches[mc].inherits[fn])
+
+ depmc, _, deptaskname, depmcfn = bb.runqueue.split_tid_mcfn(dep)
+ mc, _ = bb.runqueue.split_mc(fn)
+
+ # (Almost) always include our own inter-task dependencies (unless it comes
+ # from a mcdepends). The exception is the special
+ # do_kernel_configme->do_unpack_and_patch dependency from archiver.bbclass.
+ if recipename == depname and depmc == mc:
+ if task == "do_kernel_configme" and deptaskname == "do_unpack_and_patch":
+ return False
return True
- # Quilt (patch application) changing isn't likely to affect anything
- excludelist = ['quilt-native', 'subversion-native', 'git-native']
- if depname in excludelist and recipename != depname:
- return False
-
# Exclude well defined recipe->dependency
if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
return False
+ # Check for special wildcard
+ if "*->%s" % depname in siggen.saferecipedeps and recipename != depname:
+ return False
+
# Don't change native/cross/nativesdk recipe dependencies any further
if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
return True
@@ -40,22 +51,21 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# Only target packages beyond here
# allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
- if isPackageGroup(fn) and isAllArch(fn) and not isNative(depname):
- return False
+ if isPackageGroup(mc, fn) and isAllArch(mc, fn) and not isNative(depname):
+ return False
# Exclude well defined machine specific configurations which don't change ABI
- if depname in siggen.abisaferecipes and not isImage(fn):
+ if depname in siggen.abisaferecipes and not isImage(mc, fn):
return False
# Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
- # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum
+ # if we're just doing an RRECOMMENDS:xxx = "kernel-module-*", not least because the checksum
# is machine specific.
# Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
# and we reccomend a kernel-module, we exclude the dependency.
- depfn = dep.rsplit(".", 1)[0]
- if dataCache and isKernel(depfn) and not isKernel(fn):
- for pkg in dataCache.runrecs[fn]:
- if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
+ if dataCaches and isKernel(depmc, depmcfn) and not isKernel(mc, fn):
+ for pkg in dataCaches[mc].runrecs[fn]:
+ if " ".join(dataCaches[mc].runrecs[fn][pkg]).find("kernel-module-") != -1:
return False
# Default to keep dependencies
@@ -80,11 +90,12 @@ class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
pass
- def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
- return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+ def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None):
+ return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches)
+
+class SignatureGeneratorOEBasicHashMixIn(object):
+ supports_multiconfig_datacaches = True
-class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
- name = "OEBasicHash"
def init_rundepcheck(self, data):
self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
@@ -97,6 +108,7 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
"").split()
self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
+ self._internal = False
pass
def tasks_resolved(self, virtmap, virtpnmap, dataCache):
@@ -118,16 +130,15 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
newsafedeps.append(a1 + "->" + a2)
self.saferecipedeps = newsafedeps
- def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
- return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+ def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None):
+ return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches)
def get_taskdata(self):
- data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata()
- return (data, self.lockedpnmap, self.lockedhashfn)
+ return (self.lockedpnmap, self.lockedhashfn, self.lockedhashes) + super().get_taskdata()
def set_taskdata(self, data):
- coredata, self.lockedpnmap, self.lockedhashfn = data
- super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata)
+ self.lockedpnmap, self.lockedhashfn, self.lockedhashes = data[:3]
+ super().set_taskdata(data[3:])
def dump_sigs(self, dataCache, options):
sigfile = os.getcwd() + "/locked-sigs.inc"
@@ -135,95 +146,135 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
- def get_taskhash(self, fn, task, deps, dataCache):
- h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache)
- recipename = dataCache.pkg_fn[fn]
+ def get_taskhash(self, tid, deps, dataCaches):
+ if tid in self.lockedhashes:
+ if self.lockedhashes[tid]:
+ return self.lockedhashes[tid]
+ else:
+ return super().get_taskhash(tid, deps, dataCaches)
+
+ h = super().get_taskhash(tid, deps, dataCaches)
+
+ (mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
+
+ recipename = dataCaches[mc].pkg_fn[fn]
self.lockedpnmap[fn] = recipename
- self.lockedhashfn[fn] = dataCache.hashfn[fn]
+ self.lockedhashfn[fn] = dataCaches[mc].hashfn[fn]
unlocked = False
if recipename in self.unlockedrecipes:
unlocked = True
else:
def recipename_from_dep(dep):
- # The dep entry will look something like
- # /path/path/recipename.bb.task, virtual:native:/p/foo.bb.task,
- # ...
- fn = dep.rsplit('.', 1)[0]
- return dataCache.pkg_fn[fn]
+ (depmc, _, _, depfn) = bb.runqueue.split_tid_mcfn(dep)
+ return dataCaches[depmc].pkg_fn[depfn]
# If any unlocked recipe is in the direct dependencies then the
# current recipe should be unlocked as well.
- depnames = [ recipename_from_dep(x) for x in deps ]
+ depnames = [ recipename_from_dep(x) for x in deps if mc == bb.runqueue.mc_from_tid(x)]
if any(x in y for y in depnames for x in self.unlockedrecipes):
self.unlockedrecipes[recipename] = ''
unlocked = True
if not unlocked and recipename in self.lockedsigs:
if task in self.lockedsigs[recipename]:
- k = fn + "." + task
h_locked = self.lockedsigs[recipename][task][0]
var = self.lockedsigs[recipename][task][1]
- self.lockedhashes[k] = h_locked
- self.taskhash[k] = h_locked
+ self.lockedhashes[tid] = h_locked
+ self._internal = True
+ unihash = self.get_unihash(tid)
+ self._internal = False
#bb.warn("Using %s %s %s" % (recipename, task, h))
- if h != h_locked:
+ if h != h_locked and h_locked != unihash:
self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
% (recipename, task, h, h_locked, var))
return h_locked
+
+ self.lockedhashes[tid] = False
#bb.warn("%s %s %s" % (recipename, task, h))
return h
+ def get_stampfile_hash(self, tid):
+ if tid in self.lockedhashes and self.lockedhashes[tid]:
+ return self.lockedhashes[tid]
+ return super().get_stampfile_hash(tid)
+
+ def get_unihash(self, tid):
+ if tid in self.lockedhashes and self.lockedhashes[tid] and not self._internal:
+ return self.lockedhashes[tid]
+ return super().get_unihash(tid)
+
def dump_sigtask(self, fn, task, stampbase, runtime):
- k = fn + "." + task
- if k in self.lockedhashes:
+ tid = fn + ":" + task
+ if tid in self.lockedhashes and self.lockedhashes[tid]:
return
super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
def dump_lockedsigs(self, sigfile, taskfilter=None):
types = {}
- for k in self.runtaskdeps:
+ for tid in self.runtaskdeps:
if taskfilter:
- if not k in taskfilter:
+ if not tid in taskfilter:
continue
- fn = k.rsplit(".",1)[0]
+ fn = bb.runqueue.fn_from_tid(tid)
t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
t = 't-' + t.replace('_', '-')
if t not in types:
types[t] = []
- types[t].append(k)
+ types[t].append(tid)
with open(sigfile, "w") as f:
l = sorted(types)
for t in l:
f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
types[t].sort()
- sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]])
- for k in sortedk:
- fn = k.rsplit(".",1)[0]
- task = k.rsplit(".",1)[1]
- if k not in self.taskhash:
+ sortedtid = sorted(types[t], key=lambda tid: self.lockedpnmap[bb.runqueue.fn_from_tid(tid)])
+ for tid in sortedtid:
+ (_, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
+ if tid not in self.taskhash:
continue
- f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n")
+ f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n")
f.write(' "\n')
- f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l)))
+ f.write('SIGGEN_LOCKEDSIGS_TYPES:%s = "%s"' % (self.machine, " ".join(l)))
+
+ def dump_siglist(self, sigfile, path_prefix_strip=None):
+ def strip_fn(fn):
+ nonlocal path_prefix_strip
+ if not path_prefix_strip:
+ return fn
- def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d):
+ fn_exp = fn.split(":")
+ if fn_exp[-1].startswith(path_prefix_strip):
+ fn_exp[-1] = fn_exp[-1][len(path_prefix_strip):]
+
+ return ":".join(fn_exp)
+
+ with open(sigfile, "w") as f:
+ tasks = []
+ for taskitem in self.taskhash:
+ (fn, task) = taskitem.rsplit(":", 1)
+ pn = self.lockedpnmap[fn]
+ tasks.append((pn, task, strip_fn(fn), self.taskhash[taskitem]))
+ for (pn, task, fn, taskhash) in sorted(tasks):
+ f.write('%s:%s %s %s\n' % (pn, task, fn, taskhash))
+
+ def checkhashes(self, sq_data, missed, found, d):
warn_msgs = []
error_msgs = []
sstate_missing_msgs = []
- for task in range(len(sq_fn)):
- if task not in ret:
+ for tid in sq_data['hash']:
+ if tid not in found:
for pn in self.lockedsigs:
- if sq_hash[task] in iter(self.lockedsigs[pn].values()):
- if sq_task[task] == 'do_shared_workdir':
+ taskname = bb.runqueue.taskname_from_tid(tid)
+ if sq_data['hash'][tid] in iter(self.lockedsigs[pn].values()):
+ if taskname == 'do_shared_workdir':
continue
sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
- % (pn, sq_task[task], sq_hash[task]))
+ % (pn, taskname, sq_data['hash'][tid]))
checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
if checklevel == 'warn':
@@ -242,10 +293,25 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
if error_msgs:
bb.fatal("\n".join(error_msgs))
+class SignatureGeneratorOEBasicHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEBasicHash"
+
+class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorUniHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEEquivHash"
+
+ def init_rundepcheck(self, data):
+ super().init_rundepcheck(data)
+ self.server = data.getVar('BB_HASHSERVE')
+ if not self.server:
+ bb.fatal("OEEquivHash requires BB_HASHSERVE to be set")
+ self.method = data.getVar('SSTATE_HASHEQUIV_METHOD')
+ if not self.method:
+ bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set")
# Insert these classes into siggen's namespace so it can see and select them
bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
+bb.siggen.SignatureGeneratorOEEquivHash = SignatureGeneratorOEEquivHash
def find_siginfo(pn, taskname, taskhashlist, d):
@@ -254,20 +320,24 @@ def find_siginfo(pn, taskname, taskhashlist, d):
import fnmatch
import glob
- if taskhashlist:
- hashfiles = {}
-
if not taskname:
# We have to derive pn and taskname
key = pn
- splitit = key.split('.bb.')
+ splitit = key.split('.bb:')
taskname = splitit[1]
pn = os.path.basename(splitit[0]).split('_')[0]
if key.startswith('virtual:native:'):
pn = pn + '-native'
+ hashfiles = {}
filedates = {}
+ def get_hashval(siginfo):
+ if siginfo.endswith('.siginfo'):
+ return siginfo.rpartition(':')[2].partition('_')[0]
+ else:
+ return siginfo.rpartition('.')[2]
+
# First search in stamps dir
localdata = d.createCopy()
localdata.setVar('MULTIMACH_TARGET_SYS', '*')
@@ -297,10 +367,12 @@ def find_siginfo(pn, taskname, taskhashlist, d):
filedates[fullpath] = os.stat(fullpath).st_mtime
except OSError:
continue
+ hashval = get_hashval(fullpath)
+ hashfiles[hashval] = fullpath
if not taskhashlist or (len(filedates) < 2 and not foundall):
# That didn't work, look in sstate-cache
- hashes = taskhashlist or ['*']
+ hashes = taskhashlist or ['?' * 64]
localdata = bb.data.createCopy(d)
for hashval in hashes:
localdata.setVar('PACKAGE_ARCH', '*')
@@ -318,22 +390,17 @@ def find_siginfo(pn, taskname, taskhashlist, d):
sstatename = taskname[3:]
filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG'), sstatename)
- if hashval != '*':
- sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR'), hashval[:2])
- else:
- sstatedir = d.getVar('SSTATE_DIR')
-
- for root, dirs, files in os.walk(sstatedir):
- for fn in files:
- fullpath = os.path.join(root, fn)
- if fnmatch.fnmatch(fullpath, filespec):
- if taskhashlist:
- hashfiles[hashval] = fullpath
- else:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except:
- continue
+ matchedfiles = glob.glob(filespec)
+ for fullpath in matchedfiles:
+ actual_hashval = get_hashval(fullpath)
+ if actual_hashval in hashfiles:
+ continue
+ hashfiles[hashval] = fullpath
+ if not taskhashlist:
+ try:
+ filedates[fullpath] = os.stat(fullpath).st_mtime
+ except:
+ continue
if taskhashlist:
return hashfiles
@@ -353,3 +420,230 @@ def sstate_get_manifest_filename(task, d):
if extrainf:
d2.setVar("SSTATE_MANMACH", extrainf)
return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
+
+def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
+ d2 = d
+ variant = ''
+ curr_variant = ''
+ if d.getVar("BBEXTENDCURR") == "multilib":
+ curr_variant = d.getVar("BBEXTENDVARIANT")
+ if "virtclass-multilib" not in d.getVar("OVERRIDES"):
+ curr_variant = "invalid"
+ if taskdata2.startswith("virtual:multilib"):
+ variant = taskdata2.split(":")[2]
+ if curr_variant != variant:
+ if variant not in multilibcache:
+ multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
+ d2 = multilibcache[variant]
+
+ if taskdata.endswith("-native"):
+ pkgarchs = ["${BUILD_ARCH}", "${BUILD_ARCH}_${ORIGNATIVELSBSTRING}"]
+ elif taskdata.startswith("nativesdk-"):
+ pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
+ elif "-cross-canadian" in taskdata:
+ pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
+ elif "-cross-" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}"]
+ elif "-crosssdk" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
+ else:
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+ pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
+
+ for pkgarch in pkgarchs:
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
+ if os.path.exists(manifest):
+ return manifest, d2
+ bb.fatal("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
+ return None, d2
+
+def OEOuthashBasic(path, sigfile, task, d):
+ """
+ Basic output hash function
+
+ Calculates the output hash of a task by hashing all output file metadata,
+ and file contents.
+ """
+ import hashlib
+ import stat
+ import pwd
+ import grp
+ import re
+ import fnmatch
+
+ def update_hash(s):
+ s = s.encode('utf-8')
+ h.update(s)
+ if sigfile:
+ sigfile.write(s)
+
+ h = hashlib.sha256()
+ prev_dir = os.getcwd()
+ corebase = d.getVar("COREBASE")
+ tmpdir = d.getVar("TMPDIR")
+ include_owners = os.environ.get('PSEUDO_DISABLED') == '0'
+ if "package_write_" in task or task == "package_qa":
+ include_owners = False
+ include_timestamps = False
+ include_root = True
+ if task == "package":
+ include_timestamps = True
+ include_root = False
+ hash_version = d.getVar('HASHEQUIV_HASH_VERSION')
+ extra_sigdata = d.getVar("HASHEQUIV_EXTRA_SIGDATA")
+
+ filemaps = {}
+ for m in (d.getVar('SSTATE_HASHEQUIV_FILEMAP') or '').split():
+ entry = m.split(":")
+ if len(entry) != 3 or entry[0] != task:
+ continue
+ filemaps.setdefault(entry[1], [])
+ filemaps[entry[1]].append(entry[2])
+
+ try:
+ os.chdir(path)
+ basepath = os.path.normpath(path)
+
+ update_hash("OEOuthashBasic\n")
+ if hash_version:
+ update_hash(hash_version + "\n")
+
+ if extra_sigdata:
+ update_hash(extra_sigdata + "\n")
+
+ # It is only currently useful to get equivalent hashes for things that
+ # can be restored from sstate. Since the sstate object is named using
+ # SSTATE_PKGSPEC and the task name, those should be included in the
+ # output hash calculation.
+ update_hash("SSTATE_PKGSPEC=%s\n" % d.getVar('SSTATE_PKGSPEC'))
+ update_hash("task=%s\n" % task)
+
+ for root, dirs, files in os.walk('.', topdown=True):
+ # Sort directories to ensure consistent ordering when recursing
+ dirs.sort()
+ files.sort()
+
+ def process(path):
+ s = os.lstat(path)
+
+ if stat.S_ISDIR(s.st_mode):
+ update_hash('d')
+ elif stat.S_ISCHR(s.st_mode):
+ update_hash('c')
+ elif stat.S_ISBLK(s.st_mode):
+ update_hash('b')
+ elif stat.S_ISSOCK(s.st_mode):
+ update_hash('s')
+ elif stat.S_ISLNK(s.st_mode):
+ update_hash('l')
+ elif stat.S_ISFIFO(s.st_mode):
+ update_hash('p')
+ else:
+ update_hash('-')
+
+ def add_perm(mask, on, off='-'):
+ if mask & s.st_mode:
+ update_hash(on)
+ else:
+ update_hash(off)
+
+ add_perm(stat.S_IRUSR, 'r')
+ add_perm(stat.S_IWUSR, 'w')
+ if stat.S_ISUID & s.st_mode:
+ add_perm(stat.S_IXUSR, 's', 'S')
+ else:
+ add_perm(stat.S_IXUSR, 'x')
+
+ if include_owners:
+ # Group/other permissions are only relevant in pseudo context
+ add_perm(stat.S_IRGRP, 'r')
+ add_perm(stat.S_IWGRP, 'w')
+ if stat.S_ISGID & s.st_mode:
+ add_perm(stat.S_IXGRP, 's', 'S')
+ else:
+ add_perm(stat.S_IXGRP, 'x')
+
+ add_perm(stat.S_IROTH, 'r')
+ add_perm(stat.S_IWOTH, 'w')
+ if stat.S_ISVTX & s.st_mode:
+ update_hash('t')
+ else:
+ add_perm(stat.S_IXOTH, 'x')
+
+ try:
+ update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
+ update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
+ except KeyError as e:
+ bb.warn("KeyError in %s" % path)
+ msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match "
+ "any user/group on target. This may be due to host contamination." % (e, path, s.st_uid, s.st_gid))
+ raise Exception(msg).with_traceback(e.__traceback__)
+
+ if include_timestamps:
+ update_hash(" %10d" % s.st_mtime)
+
+ update_hash(" ")
+ if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
+ update_hash("%9s" % ("%d.%d" % (os.major(s.st_rdev), os.minor(s.st_rdev))))
+ else:
+ update_hash(" " * 9)
+
+ filterfile = False
+ for entry in filemaps:
+ if fnmatch.fnmatch(path, entry):
+ filterfile = True
+
+ update_hash(" ")
+ if stat.S_ISREG(s.st_mode) and not filterfile:
+ update_hash("%10d" % s.st_size)
+ else:
+ update_hash(" " * 10)
+
+ update_hash(" ")
+ fh = hashlib.sha256()
+ if stat.S_ISREG(s.st_mode):
+ # Hash file contents
+ if filterfile:
+ # Need to ignore paths in crossscripts and postinst-useradd files.
+ with open(path, 'rb') as d:
+ chunk = d.read()
+ chunk = chunk.replace(bytes(basepath, encoding='utf8'), b'')
+ for entry in filemaps:
+ if not fnmatch.fnmatch(path, entry):
+ continue
+ for r in filemaps[entry]:
+ if r.startswith("regex-"):
+ chunk = re.sub(bytes(r[6:], encoding='utf8'), b'', chunk)
+ else:
+ chunk = chunk.replace(bytes(r, encoding='utf8'), b'')
+ fh.update(chunk)
+ else:
+ with open(path, 'rb') as d:
+ for chunk in iter(lambda: d.read(4096), b""):
+ fh.update(chunk)
+ update_hash(fh.hexdigest())
+ else:
+ update_hash(" " * len(fh.hexdigest()))
+
+ update_hash(" %s" % path)
+
+ if stat.S_ISLNK(s.st_mode):
+ update_hash(" -> %s" % os.readlink(path))
+
+ update_hash("\n")
+
+ # Process this directory and all its child files
+ if include_root or root != ".":
+ process(root)
+ for f in files:
+ if f == 'fixmepath':
+ continue
+ process(os.path.join(root, f))
+ finally:
+ os.chdir(prev_dir)
+
+ return h.hexdigest()
+
+
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index 0426e15834..53186c4a3e 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -1,8 +1,10 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import logging
import oe.classutils
import shlex
from bb.process import Popen, ExecutionError
-from distutils.version import LooseVersion
logger = logging.getLogger('BitBake.OE.Terminal')
@@ -39,7 +41,7 @@ class Terminal(Popen, metaclass=Registry):
raise
def format_command(self, sh_cmd, title):
- fmt = {'title': title or 'Terminal', 'command': sh_cmd}
+ fmt = {'title': title or 'Terminal', 'command': sh_cmd, 'cwd': os.getcwd() }
if isinstance(self.command, str):
return shlex.split(self.command.format(**fmt))
else:
@@ -52,7 +54,7 @@ class XTerminal(Terminal):
raise UnsupportedTerminal(self.name)
class Gnome(XTerminal):
- command = 'gnome-terminal -t "{title}" -x {command}'
+ command = 'gnome-terminal -t "{title}" -- {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -62,31 +64,10 @@ class Gnome(XTerminal):
# Once fixed on the gnome-terminal project, this should be removed.
if os.getenv('LC_ALL'): os.putenv('LC_ALL','')
- # We need to know when the command completes but gnome-terminal gives us no way
- # to do this. We therefore write the pid to a file using a "phonehome" wrapper
- # script, then monitor the pid until it exits. Thanks gnome!
- import tempfile
- pidfile = tempfile.NamedTemporaryFile(delete = False).name
- try:
- sh_cmd = "oe-gnome-terminal-phonehome " + pidfile + " " + sh_cmd
- XTerminal.__init__(self, sh_cmd, title, env, d)
- while os.stat(pidfile).st_size <= 0:
- continue
- with open(pidfile, "r") as f:
- pid = int(f.readline())
- finally:
- os.unlink(pidfile)
-
- import time
- while True:
- try:
- os.kill(pid, 0)
- time.sleep(0.1)
- except OSError:
- return
+ XTerminal.__init__(self, sh_cmd, title, env, d)
class Mate(XTerminal):
- command = 'mate-terminal -t "{title}" -x {command}'
+ command = 'mate-terminal --disable-factory -t "{title}" -x {command}'
priority = 2
class Xfce(XTerminal):
@@ -104,10 +85,10 @@ class Konsole(XTerminal):
def __init__(self, sh_cmd, title=None, env=None, d=None):
# Check version
vernum = check_terminal_version("konsole")
- if vernum and LooseVersion(vernum) < '2.0.0':
+ if vernum and bb.utils.vercmp_string_op(vernum, "2.0.0", "<"):
# Konsole from KDE 3.x
self.command = 'konsole -T "{title}" -e {command}'
- elif vernum and LooseVersion(vernum) < '16.08.1':
+ elif vernum and bb.utils.vercmp_string_op(vernum, "16.08.1", "<"):
# Konsole pre 16.08.01 Has nofork
self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
XTerminal.__init__(self, sh_cmd, title, env, d)
@@ -133,12 +114,12 @@ class Screen(Terminal):
bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
0.5, 10), d)
else:
- logger.warn(msg)
+ logger.warning(msg)
class TmuxRunning(Terminal):
"""Open a new pane in the current running tmux window"""
name = 'tmux-running'
- command = 'tmux split-window "{command}"'
+ command = 'tmux split-window -c "{cwd}" "{command}"'
priority = 2.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -156,7 +137,7 @@ class TmuxRunning(Terminal):
class TmuxNewWindow(Terminal):
"""Open a new window in the current running tmux session"""
name = 'tmux-new-window'
- command = 'tmux new-window -n "{title}" "{command}"'
+ command = 'tmux new-window -c "{cwd}" -n "{title}" "{command}"'
priority = 2.70
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -170,7 +151,7 @@ class TmuxNewWindow(Terminal):
class Tmux(Terminal):
"""Start a new tmux session and window"""
- command = 'tmux new -d -s devshell -n devshell "{command}"'
+ command = 'tmux new -c "{cwd}" -d -s devshell -n devshell "{command}"'
priority = 0.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -181,7 +162,12 @@ class Tmux(Terminal):
# devshells, if it's already there, add a new window to it.
window_name = 'devshell-%i' % os.getpid()
- self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name)
+ self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'
+ if not check_tmux_version('1.9'):
+ # `tmux new-session -c` was added in 1.9;
+ # older versions fail with that flag
+ self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'
+ self.command = self.command.format(window_name)
Terminal.__init__(self, sh_cmd, title, env, d)
attach_cmd = 'tmux att -t {0}'.format(window_name)
@@ -189,7 +175,7 @@ class Tmux(Terminal):
if d:
bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
else:
- logger.warn(msg)
+ logger.warning(msg)
class Custom(Terminal):
command = 'false' # This is a placeholder
@@ -201,9 +187,9 @@ class Custom(Terminal):
if not '{command}' in self.command:
self.command += ' {command}'
Terminal.__init__(self, sh_cmd, title, env, d)
- logger.warn('Custom terminal was started.')
+ logger.warning('Custom terminal was started.')
else:
- logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
+ logger.debug('No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
@@ -225,32 +211,64 @@ def spawn_preferred(sh_cmd, title=None, env=None, d=None):
spawn(terminal.name, sh_cmd, title, env, d)
break
except UnsupportedTerminal:
- continue
+ pass
+ except:
+ bb.warn("Terminal %s is supported but did not start" % (terminal.name))
+ # when we've run out of options
else:
raise NoSupportedTerminals(get_cmd_list())
def spawn(name, sh_cmd, title=None, env=None, d=None):
"""Spawn the specified terminal, by name"""
- logger.debug(1, 'Attempting to spawn terminal "%s"', name)
+ logger.debug('Attempting to spawn terminal "%s"', name)
try:
terminal = Registry.registry[name]
except KeyError:
raise UnsupportedTerminal(name)
- pipe = terminal(sh_cmd, title, env, d)
- output = pipe.communicate()[0]
- if output:
- output = output.decode("utf-8")
- if pipe.returncode != 0:
- raise ExecutionError(sh_cmd, pipe.returncode, output)
+ # We need to know when the command completes but some terminals (at least
+ # gnome and tmux) gives us no way to do this. We therefore write the pid
+ # to a file using a "phonehome" wrapper script, then monitor the pid
+ # until it exits.
+ import tempfile
+ import time
+ pidfile = tempfile.NamedTemporaryFile(delete = False).name
+ try:
+ sh_cmd = bb.utils.which(os.getenv('PATH'), "oe-gnome-terminal-phonehome") + " " + pidfile + " " + sh_cmd
+ pipe = terminal(sh_cmd, title, env, d)
+ output = pipe.communicate()[0]
+ if output:
+ output = output.decode("utf-8")
+ if pipe.returncode != 0:
+ raise ExecutionError(sh_cmd, pipe.returncode, output)
+
+ while os.stat(pidfile).st_size <= 0:
+ time.sleep(0.01)
+ continue
+ with open(pidfile, "r") as f:
+ pid = int(f.readline())
+ finally:
+ os.unlink(pidfile)
+
+ while True:
+ try:
+ os.kill(pid, 0)
+ time.sleep(0.1)
+ except OSError:
+ return
+
+def check_tmux_version(desired):
+ vernum = check_terminal_version("tmux")
+ if vernum and bb.utils.vercmp_string_op(vernum, desired, "<"):
+ return False
+ return vernum
def check_tmux_pane_size(tmux):
import subprocess as sub
# On older tmux versions (<1.9), return false. The reason
# is that there is no easy way to get the height of the active panel
# on current window without nested formats (available from version 1.9)
- vernum = check_terminal_version("tmux")
- if vernum and LooseVersion(vernum) < '1.9':
+ if not check_tmux_version('1.9'):
return False
try:
p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
@@ -289,8 +307,12 @@ def check_terminal_version(terminalName):
vernum = ver.split(' ')[-1]
if ver.startswith('GNOME Terminal'):
vernum = ver.split(' ')[-1]
+ if ver.startswith('MATE Terminal'):
+ vernum = ver.split(' ')[-1]
if ver.startswith('tmux'):
vernum = ver.split()[-1]
+ if ver.startswith('tmux next-'):
+ vernum = ver.split()[-1][5:]
return vernum
def distro_name():
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py
index 4ae58acfac..bbbabafbf6 100644
--- a/meta/lib/oe/types.py
+++ b/meta/lib/oe/types.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import errno
import re
import os
@@ -103,8 +107,13 @@ def boolean(value):
"""OpenEmbedded 'boolean' type
Valid values for true: 'yes', 'y', 'true', 't', '1'
- Valid values for false: 'no', 'n', 'false', 'f', '0'
+ Valid values for false: 'no', 'n', 'false', 'f', '0', None
"""
+ if value is None:
+ return False
+
+ if isinstance(value, bool):
+ return value
if not isinstance(value, str):
raise TypeError("boolean accepts a string, not '%s'" % type(value))
@@ -145,9 +154,33 @@ def path(value, relativeto='', normalize='true', mustexist='false'):
if boolean(mustexist):
try:
- open(value, 'r')
+ with open(value, 'r'):
+ pass
except IOError as exc:
if exc.errno == errno.ENOENT:
raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
return value
+
+def is_x86(arch):
+ """
+ Check whether arch is x86 or x86_64
+ """
+ if arch.startswith('x86_') or re.match('i.*86', arch):
+ return True
+ else:
+ return False
+
+def qemu_use_kvm(kvm, target_arch):
+ """
+ Enable kvm if target_arch == build_arch or both of them are x86 archs.
+ """
+
+ use_kvm = False
+ if kvm and boolean(kvm):
+ build_arch = os.uname()[4]
+ if is_x86(build_arch) and is_x86(target_arch):
+ use_kvm = True
+ elif build_arch == target_arch:
+ use_kvm = True
+ return use_kvm
diff --git a/meta/lib/oe/useradd.py b/meta/lib/oe/useradd.py
new file mode 100644
index 0000000000..3caa3f851a
--- /dev/null
+++ b/meta/lib/oe/useradd.py
@@ -0,0 +1,69 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+import argparse
+import re
+
+class myArgumentParser(argparse.ArgumentParser):
+ def _print_message(self, message, file=None):
+ bb.warn("%s - %s: %s" % (d.getVar('PN'), pkg, message))
+
+ # This should never be called...
+ def exit(self, status=0, message=None):
+ message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN'), pkg))
+ error(message)
+
+ def error(self, message):
+ bb.fatal(message)
+
+def split_commands(params):
+ params = re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
+ # Remove any empty items
+ return [x for x in params if x]
+
+def split_args(params):
+ params = re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
+ # Remove any empty items
+ return [x for x in params if x]
+
+def build_useradd_parser():
+ # The following comes from --help on useradd from shadow
+ parser = myArgumentParser(prog='useradd')
+ parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
+ parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
+ parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
+ parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
+ parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
+ parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
+ parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
+ parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
+ parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
+ parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
+ parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
+ parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
+ parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
+ parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
+ parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
+ parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
+ parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
+ parser.add_argument("-r", "--system", help="create a system account", action="store_true")
+ parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
+ parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
+ parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
+ parser.add_argument("LOGIN", help="Login name of the new user")
+
+ return parser
+
+def build_groupadd_parser():
+ # The following comes from --help on groupadd from shadow
+ parser = myArgumentParser(prog='groupadd')
+ parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
+ parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
+ parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
+ parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
+ parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
+ parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
+ parser.add_argument("-r", "--system", help="create a system account", action="store_true")
+ parser.add_argument("GROUP", help="Group name of the new group")
+
+ return parser
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index 330a5ff94a..46fc76c261 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -1,4 +1,10 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import subprocess
+import multiprocessing
+import traceback
def read_file(filename):
try:
@@ -23,6 +29,13 @@ def conditional(variable, checkvalue, truevalue, falsevalue, d):
else:
return falsevalue
+def vartrue(var, iftrue, iffalse, d):
+ import oe.types
+ if oe.types.boolean(d.getVar(var)):
+ return iftrue
+ else:
+ return iffalse
+
def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
if float(d.getVar(variable)) <= float(checkvalue):
return truevalue
@@ -69,12 +82,12 @@ def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
- if var.endswith(suffix):
- var = var.replace(suffix, "")
+ if suffix and var.endswith(suffix):
+ var = var[:-len(suffix)]
prefix = d.getVar("MLPREFIX")
if prefix and var.startswith(prefix):
- var = var.replace(prefix, "")
+ var = var[len(prefix):]
return var
@@ -86,17 +99,6 @@ def str_filter_out(f, str, d):
from re import match
return " ".join([x for x in str.split() if not match(f, x, 0)])
-def param_bool(cfg, field, dflt = None):
- """Lookup <field> in <cfg> map and convert it to a boolean; take
- <dflt> when this <field> does not exist"""
- value = cfg.get(field, dflt)
- strvalue = str(value).lower()
- if strvalue in ('yes', 'y', 'true', 't', '1'):
- return True
- elif strvalue in ('no', 'n', 'false', 'f', '0'):
- return False
- raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
-
def build_depends_string(depends, task):
"""Append a taskname to a string of dependencies as used by the [depends] flag"""
return " ".join(dep + ":" + task for dep in depends.split())
@@ -126,6 +128,92 @@ def features_backfill(var,d):
if addfeatures:
d.appendVar(var, " " + " ".join(addfeatures))
+def all_distro_features(d, features, truevalue="1", falsevalue=""):
+ """
+ Returns truevalue if *all* given features are set in DISTRO_FEATURES,
+ else falsevalue. The features can be given as single string or anything
+ that can be turned into a set.
+
+ This is a shorter, more flexible version of
+ bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d).
+
+ Without explicit true/false values it can be used directly where
+ Python expects a boolean:
+ if oe.utils.all_distro_features(d, "foo bar"):
+ bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES")
+
+ With just a truevalue, it can be used to include files that are meant to be
+ used only when requested via DISTRO_FEATURES:
+ require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc")
+ """
+ return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+
+def any_distro_features(d, features, truevalue="1", falsevalue=""):
+ """
+ Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES,
+ else falsevalue. The features can be given as single string or anything
+ that can be turned into a set.
+
+ This is a shorter, more flexible version of
+ bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d).
+
+ Without explicit true/false values it can be used directly where
+ Python expects a boolean:
+ if not oe.utils.any_distro_features(d, "foo bar"):
+ bb.fatal("foo, bar or both must be set in DISTRO_FEATURES")
+
+ With just a truevalue, it can be used to include files that are meant to be
+ used only when requested via DISTRO_FEATURES:
+ require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc")
+
+ """
+ return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+
+def parallel_make(d, makeinst=False):
+ """
+ Return the integer value for the number of parallel threads to use when
+ building, scraped out of PARALLEL_MAKE. If no parallelization option is
+ found, returns None
+
+ e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
+ """
+ if makeinst:
+ pm = (d.getVar('PARALLEL_MAKEINST') or '').split()
+ else:
+ pm = (d.getVar('PARALLEL_MAKE') or '').split()
+ # look for '-j' and throw other options (e.g. '-l') away
+ while pm:
+ opt = pm.pop(0)
+ if opt == '-j':
+ v = pm.pop(0)
+ elif opt.startswith('-j'):
+ v = opt[2:].strip()
+ else:
+ continue
+
+ return int(v)
+
+ return ''
+
+def parallel_make_argument(d, fmt, limit=None, makeinst=False):
+ """
+ Helper utility to construct a parallel make argument from the number of
+ parallel threads specified in PARALLEL_MAKE.
+
+ Returns the input format string `fmt` where a single '%d' will be expanded
+ with the number of parallel threads to use. If `limit` is specified, the
+ number of parallel threads will be no larger than it. If no parallelization
+ option is found in PARALLEL_MAKE, returns an empty string
+
+ e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
+ "-n 10"
+ """
+ v = parallel_make(d, makeinst)
+ if v:
+ if limit:
+ v = min(limit, v)
+ return fmt % v
+ return ''
def packages_filter_out_system(d):
"""
@@ -133,12 +221,12 @@ def packages_filter_out_system(d):
PN-dbg PN-doc PN-locale-eb-gb removed.
"""
pn = d.getVar('PN')
- blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')]
+ pkgfilter = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
localepkg = pn + "-locale-"
pkgs = []
for pkg in d.getVar('PACKAGES').split():
- if pkg not in blacklist and localepkg not in pkg:
+ if pkg not in pkgfilter and localepkg not in pkg:
pkgs.append(pkg)
return pkgs
@@ -160,9 +248,9 @@ def trim_version(version, num_parts=2):
trimmed = ".".join(parts[:num_parts])
return trimmed
-def cpu_count():
- import multiprocessing
- return multiprocessing.cpu_count()
+def cpu_count(at_least=1, at_most=64):
+ cpus = len(os.sched_getaffinity(0))
+ return max(min(cpus, at_most), at_least)
def execute_pre_post_process(d, cmds):
if cmds is None:
@@ -174,40 +262,111 @@ def execute_pre_post_process(d, cmds):
bb.note("Executing %s ..." % cmd)
bb.build.exec_func(cmd, d)
-def multiprocess_exec(commands, function):
- import signal
- import multiprocessing
-
- if not commands:
- return []
+# For each item in items, call the function 'target' with item as the first
+# argument, extraargs as the other arguments and handle any exceptions in the
+# parent thread
+def multiprocess_launch(target, items, d, extraargs=None):
- def init_worker():
- signal.signal(signal.SIGINT, signal.SIG_IGN)
+ class ProcessLaunch(multiprocessing.Process):
+ def __init__(self, *args, **kwargs):
+ multiprocessing.Process.__init__(self, *args, **kwargs)
+ self._pconn, self._cconn = multiprocessing.Pipe()
+ self._exception = None
+ self._result = None
- nproc = min(multiprocessing.cpu_count(), len(commands))
- pool = bb.utils.multiprocessingpool(nproc, init_worker)
- imap = pool.imap(function, commands)
-
- try:
- res = list(imap)
- pool.close()
- pool.join()
- results = []
- for result in res:
- if result is not None:
- results.append(result)
- return results
-
- except KeyboardInterrupt:
- pool.terminate()
- pool.join()
- raise
+ def run(self):
+ try:
+ ret = self._target(*self._args, **self._kwargs)
+ self._cconn.send((None, ret))
+ except Exception as e:
+ tb = traceback.format_exc()
+ self._cconn.send((e, tb))
+
+ def update(self):
+ if self._pconn.poll():
+ (e, tb) = self._pconn.recv()
+ if e is not None:
+ self._exception = (e, tb)
+ else:
+ self._result = tb
+
+ @property
+ def exception(self):
+ self.update()
+ return self._exception
+
+ @property
+ def result(self):
+ self.update()
+ return self._result
+
+ max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
+ launched = []
+ errors = []
+ results = []
+ items = list(items)
+ while (items and not errors) or launched:
+ if not errors and items and len(launched) < max_process:
+ args = (items.pop(),)
+ if extraargs is not None:
+ args = args + extraargs
+ p = ProcessLaunch(target=target, args=args)
+ p.start()
+ launched.append(p)
+ for q in launched:
+ # Have to manually call update() to avoid deadlocks. The pipe can be full and
+ # transfer stalled until we try and read the results object but the subprocess won't exit
+ # as it still has data to write (https://bugs.python.org/issue8426)
+ q.update()
+ # The finished processes are joined when calling is_alive()
+ if not q.is_alive():
+ if q.exception:
+ errors.append(q.exception)
+ if q.result:
+ results.append(q.result)
+ launched.remove(q)
+ # Paranoia doesn't hurt
+ for p in launched:
+ p.join()
+ if errors:
+ msg = ""
+ for (e, tb) in errors:
+ if isinstance(e, subprocess.CalledProcessError) and e.output:
+ msg = msg + str(e) + "\n"
+ msg = msg + "Subprocess output:"
+ msg = msg + e.output.decode("utf-8", errors="ignore")
+ else:
+ msg = msg + str(e) + ": " + str(tb) + "\n"
+ bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg)
+ return results
def squashspaces(string):
import re
- return re.sub("\s+", " ", string).strip()
-
-def format_pkg_list(pkg_dict, ret_format=None):
+ return re.sub(r"\s+", " ", string).strip()
+
+def rprovides_map(pkgdata_dir, pkg_dict):
+ # Map file -> pkg provider
+ rprov_map = {}
+
+ for pkg in pkg_dict:
+ path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
+ if not os.path.isfile(path_to_pkgfile):
+ continue
+ with open(path_to_pkgfile) as f:
+ for line in f:
+ if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'):
+ # List all components provided by pkg.
+ # Exclude version strings, i.e. those starting with (
+ provides = [x for x in line.split()[1:] if not x.startswith('(')]
+ for prov in provides:
+ if prov in rprov_map:
+ rprov_map[prov].append(pkg)
+ else:
+ rprov_map[prov] = [pkg]
+
+ return rprov_map
+
+def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None):
output = []
if ret_format == "arch":
@@ -220,33 +379,100 @@ def format_pkg_list(pkg_dict, ret_format=None):
for pkg in sorted(pkg_dict):
output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
elif ret_format == "deps":
+ rprov_map = rprovides_map(pkgdata_dir, pkg_dict)
for pkg in sorted(pkg_dict):
for dep in pkg_dict[pkg]["deps"]:
- output.append("%s|%s" % (pkg, dep))
+ if dep in rprov_map:
+ # There could be multiple providers within the image
+ for pkg_provider in rprov_map[dep]:
+ output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep))
+ else:
+ output.append("%s|%s" % (pkg, dep))
else:
for pkg in sorted(pkg_dict):
output.append(pkg)
- return '\n'.join(output)
+ output_str = '\n'.join(output)
+
+ if output_str:
+ # make sure last line is newline terminated
+ output_str += '\n'
-def host_gcc_version(d):
+ return output_str
+
+
+# Helper function to get the host compiler version
+# Do not assume the compiler is gcc
+def get_host_compiler_version(d, taskcontextonly=False):
import re, subprocess
+ if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
+ return
+
compiler = d.getVar("BUILD_CC")
+ # Get rid of ccache since it is not present when parsing.
+ if compiler.startswith('ccache '):
+ compiler = compiler[7:]
+ try:
+ env = os.environ.copy()
+ # datastore PATH does not contain session PATH as set by environment-setup-...
+ # this breaks the install-buildtools use-case
+ # env["PATH"] = d.getVar("PATH")
+ output = subprocess.check_output("%s --version" % compiler, \
+ shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
+
+ match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
+ if not match:
+ bb.fatal("Can't get compiler version from %s --version output" % compiler)
+
+ version = match.group(1)
+ return compiler, version
+
+
+def host_gcc_version(d, taskcontextonly=False):
+ import re, subprocess
+
+ if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
+ return
+
+ compiler = d.getVar("BUILD_CC")
+ # Get rid of ccache since it is not present when parsing.
+ if compiler.startswith('ccache '):
+ compiler = compiler[7:]
try:
env = os.environ.copy()
env["PATH"] = d.getVar("PATH")
- output = subprocess.check_output("%s --version" % compiler, shell=True, env=env).decode("utf-8")
+ output = subprocess.check_output("%s --version" % compiler, \
+ shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
- match = re.match(".* (\d\.\d)\.\d.*", output.split('\n')[0])
+ match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
if not match:
bb.fatal("Can't get compiler version from %s --version output" % compiler)
version = match.group(1)
return "-%s" % version if version in ("4.8", "4.9") else ""
+
+def get_multilib_datastore(variant, d):
+ localdata = bb.data.createCopy(d)
+ if variant:
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", variant + "-")
+ else:
+ origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
+ if origdefault:
+ localdata.setVar("DEFAULTTUNE", origdefault)
+ overrides = localdata.getVar("OVERRIDES", False).split(":")
+ overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")])
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", "")
+ return localdata
+
#
# Python 2.7 doesn't have threaded pools (just multiprocessing)
# so implement a version here
@@ -257,8 +483,8 @@ from threading import Thread
class ThreadedWorker(Thread):
"""Thread executing tasks from a given tasks queue"""
- def __init__(self, tasks, worker_init, worker_end):
- Thread.__init__(self)
+ def __init__(self, tasks, worker_init, worker_end, name=None):
+ Thread.__init__(self, name=name)
self.tasks = tasks
self.daemon = True
@@ -282,19 +508,19 @@ class ThreadedWorker(Thread):
try:
func(self, *args, **kargs)
except Exception as e:
- print(e)
+ # Eat all exceptions
+ bb.mainlogger.debug("Worker task raised %s" % e, exc_info=e)
finally:
self.tasks.task_done()
class ThreadedPool:
"""Pool of threads consuming tasks from a queue"""
- def __init__(self, num_workers, num_tasks, worker_init=None,
- worker_end=None):
+ def __init__(self, num_workers, num_tasks, worker_init=None, worker_end=None, name="ThreadedPool-"):
self.tasks = Queue(num_tasks)
self.workers = []
- for _ in range(num_workers):
- worker = ThreadedWorker(self.tasks, worker_init, worker_end)
+ for i in range(num_workers):
+ worker = ThreadedWorker(self.tasks, worker_init, worker_end, name=name + str(i))
self.workers.append(worker)
def start(self):
@@ -311,18 +537,7 @@ class ThreadedPool:
for worker in self.workers:
worker.join()
-def write_ld_so_conf(d):
- # Some utils like prelink may not have the correct target library paths
- # so write an ld.so.conf to help them
- ldsoconf = d.expand("${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf")
- if os.path.exists(ldsoconf):
- bb.utils.remove(ldsoconf)
- bb.utils.mkdirhier(os.path.dirname(ldsoconf))
- with open(ldsoconf, "w") as f:
- f.write(d.getVar("base_libdir") + '\n')
- f.write(d.getVar("libdir") + '\n')
-
-class ImageQAFailed(bb.build.FuncFailed):
+class ImageQAFailed(Exception):
def __init__(self, description, name=None, logfile=None):
self.description = description
self.name = name
@@ -334,3 +549,38 @@ class ImageQAFailed(bb.build.FuncFailed):
msg = msg + ' (%s)' % self.description
return msg
+
+def sh_quote(string):
+ import shlex
+ return shlex.quote(string)
+
+def directory_size(root, blocksize=4096):
+ """
+ Calculate the size of the directory, taking into account hard links,
+ rounding up every size to multiples of the blocksize.
+ """
+ def roundup(size):
+ """
+ Round the size up to the nearest multiple of the block size.
+ """
+ import math
+ return math.ceil(size / blocksize) * blocksize
+
+ def getsize(filename):
+ """
+ Get the size of the filename, not following symlinks, taking into
+ account hard links.
+ """
+ stat = os.lstat(filename)
+ if stat.st_ino not in inodes:
+ inodes.add(stat.st_ino)
+ return stat.st_size
+ else:
+ return 0
+
+ inodes = set()
+ total = 0
+ for root, dirs, files in os.walk(root):
+ total += sum(roundup(getsize(os.path.join(root, name))) for name in files)
+ total += roundup(getsize(root))
+ return total