summaryrefslogtreecommitdiffstats
path: root/meta/lib/oe
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib/oe')
-rw-r--r--meta/lib/oe/__init__.py4
-rw-r--r--meta/lib/oe/buildhistory_analysis.py312
-rw-r--r--meta/lib/oe/cachedpath.py2
-rw-r--r--meta/lib/oe/classextend.py20
-rw-r--r--meta/lib/oe/classutils.py5
-rw-r--r--meta/lib/oe/copy_buildsystem.py83
-rw-r--r--meta/lib/oe/data.py38
-rw-r--r--meta/lib/oe/distro_check.py313
-rw-r--r--meta/lib/oe/elf.py133
-rw-r--r--meta/lib/oe/gpg_sign.py89
-rw-r--r--meta/lib/oe/license.py30
-rw-r--r--meta/lib/oe/lsb.py91
-rw-r--r--meta/lib/oe/maketype.py12
-rw-r--r--meta/lib/oe/manifest.py36
-rw-r--r--meta/lib/oe/package.py230
-rw-r--r--meta/lib/oe/package_manager.py2077
-rw-r--r--meta/lib/oe/packagedata.py11
-rw-r--r--meta/lib/oe/packagegroup.py14
-rw-r--r--meta/lib/oe/patch.py163
-rw-r--r--meta/lib/oe/path.py120
-rw-r--r--meta/lib/oe/prservice.py29
-rw-r--r--meta/lib/oe/qa.py111
-rw-r--r--meta/lib/oe/recipeutils.py453
-rw-r--r--meta/lib/oe/rootfs.py330
-rw-r--r--meta/lib/oe/sdk.py193
-rw-r--r--meta/lib/oe/sstatesig.py373
-rw-r--r--meta/lib/oe/terminal.py104
-rw-r--r--meta/lib/oe/tests/__init__.py0
-rw-r--r--meta/lib/oe/tests/test_license.py68
-rw-r--r--meta/lib/oe/tests/test_path.py89
-rw-r--r--meta/lib/oe/tests/test_types.py62
-rw-r--r--meta/lib/oe/tests/test_utils.py51
-rw-r--r--meta/lib/oe/types.py37
-rw-r--r--meta/lib/oe/useradd.py71
-rw-r--r--meta/lib/oe/utils.py315
35 files changed, 3476 insertions, 2593 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py
index 3ad9513f40..4e7c09da04 100644
--- a/meta/lib/oe/__init__.py
+++ b/meta/lib/oe/__init__.py
@@ -1,2 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
index b6c0265c15..5b28774c98 100644
--- a/meta/lib/oe/buildhistory_analysis.py
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -1,8 +1,10 @@
# Report significant differences in the buildhistory repository since a specific revision
#
-# Copyright (C) 2012 Intel Corporation
+# Copyright (C) 2012-2013, 2016-2017 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Note: requires GitPython 0.3.1+
#
# You can use this from the command line by running scripts/buildhistory-diff
@@ -13,7 +15,11 @@ import os.path
import difflib
import git
import re
+import shlex
+import hashlib
+import collections
import bb.utils
+import bb.tinfoil
# How to display fields
@@ -28,15 +34,27 @@ ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
monitor_numeric_threshold = 10
# Image files to monitor (note that image-info.txt is handled separately)
img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
-# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
-related_fields = {}
-related_fields['RDEPENDS'] = ['DEPENDS']
-related_fields['RRECOMMENDS'] = ['DEPENDS']
-related_fields['FILELIST'] = ['FILES']
-related_fields['PKGSIZE'] = ['FILELIST']
-related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
-related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+}
+
+def init_colours(use_colours):
+ global colours
+ if use_colours:
+ colours = {
+ 'colour_default': '\033[0m',
+ 'colour_add': '\033[1;32m',
+ 'colour_remove': '\033[1;31m',
+ }
+ else:
+ colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+ }
class ChangeRecord:
def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
@@ -45,7 +63,6 @@ class ChangeRecord:
self.oldvalue = oldvalue
self.newvalue = newvalue
self.monitored = monitored
- self.related = []
self.filechanges = None
def __str__(self):
@@ -69,24 +86,77 @@ class ChangeRecord:
pkglist.append(k)
return pkglist
+ def detect_renamed_dirs(aitems, bitems):
+ adirs = set(map(os.path.dirname, aitems))
+ bdirs = set(map(os.path.dirname, bitems))
+ files_ab = [(name, sorted(os.path.basename(item) for item in aitems if os.path.dirname(item) == name)) \
+ for name in adirs - bdirs]
+ files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
+ for name in bdirs - adirs]
+ renamed_dirs = []
+ for dir1, files1 in files_ab:
+ rename = False
+ for dir2, files2 in files_ba:
+ if files1 == files2 and not rename:
+ renamed_dirs.append((dir1,dir2))
+ # Make sure that we don't use this (dir, files) pair again.
+ files_ba.remove((dir2,files2))
+ # If a dir has already been found to have a rename, stop and go no further.
+ rename = True
+
+ # remove files that belong to renamed dirs from aitems and bitems
+ for dir1, dir2 in renamed_dirs:
+ aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
+ bitems = [item for item in bitems if os.path.dirname(item) not in (dir1, dir2)]
+ return renamed_dirs, aitems, bitems
+
if self.fieldname in list_fields or self.fieldname in list_order_fields:
+ renamed_dirs = []
+ changed_order = False
if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
aitems = pkglist_combine(depvera)
bitems = pkglist_combine(depverb)
else:
- aitems = self.oldvalue.split()
- bitems = self.newvalue.split()
+ if self.fieldname == 'FILELIST':
+ aitems = shlex.split(self.oldvalue)
+ bitems = shlex.split(self.newvalue)
+ renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems)
+ else:
+ aitems = self.oldvalue.split()
+ bitems = self.newvalue.split()
+
removed = list(set(aitems) - set(bitems))
added = list(set(bitems) - set(aitems))
+ if not removed and not added and self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
+ depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False)
+ depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False)
+ for i, j in zip(depvera.items(), depverb.items()):
+ if i[0] != j[0]:
+ changed_order = True
+ break
+
+ lines = []
+ if renamed_dirs:
+ for dfrom, dto in renamed_dirs:
+ lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours))
if removed or added:
if removed and not bitems:
- out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed))
+ lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours))
else:
- out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '')
+ if removed:
+ lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours))
+ if added:
+ lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours))
+ else:
+ lines.append('changed order')
+
+ if not (removed or added or changed_order):
+ out = ''
else:
- out = '%s changed order' % self.fieldname
+ out = '%s: %s' % (self.fieldname, ', '.join(lines))
+
elif self.fieldname in numeric_fields:
aval = int(self.oldvalue or 0)
bval = int(self.newvalue or 0)
@@ -94,9 +164,9 @@ class ChangeRecord:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
- out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours)
elif self.fieldname in defaultval_map:
- out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
if self.fieldname == 'PKG' and '[default]' in self.newvalue:
out += ' - may indicate debian renaming failure'
elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
@@ -111,34 +181,30 @@ class ChangeRecord:
diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
out += '\n '.join(list(diff)[2:])
out += '\n --'
- elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
- fieldname = self.fieldname
- if '/image-files/' in self.path:
- fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
- out = 'Changes to %s:\n ' % fieldname
- else:
- if outer:
- prefix = 'Changes to %s ' % self.path
- out = '(%s):\n ' % self.fieldname
- if self.filechanges:
- out += '\n '.join(['%s' % i for i in self.filechanges])
+ elif self.fieldname in img_monitor_files or '/image-files/' in self.path or self.fieldname == "sysroot":
+ if self.filechanges or (self.oldvalue and self.newvalue):
+ fieldname = self.fieldname
+ if '/image-files/' in self.path:
+ fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
+ out = 'Changes to %s:\n ' % fieldname
+ else:
+ if outer:
+ prefix = 'Changes to %s ' % self.path
+ out = '(%s):\n ' % self.fieldname
+ if self.filechanges:
+ out += '\n '.join(['%s' % i for i in self.filechanges])
+ else:
+ alines = self.oldvalue.splitlines()
+ blines = self.newvalue.splitlines()
+ diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
+ out += '\n '.join(list(diff))
+ out += '\n --'
else:
- alines = self.oldvalue.splitlines()
- blines = self.newvalue.splitlines()
- diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
- out += '\n '.join(list(diff))
- out += '\n --'
+ out = ''
else:
- out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
-
- if self.related:
- for chg in self.related:
- if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
- continue
- for line in chg._str_internal(False).splitlines():
- out += '\n * %s' % line
+ out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
- return '%s%s' % (prefix, out)
+ return '%s%s' % (prefix, out) if out else ''
class FileChange:
changetype_add = 'A'
@@ -216,7 +282,7 @@ def file_list_to_dict(lines):
return adict
-def compare_file_lists(alines, blines):
+def compare_file_lists(alines, blines, compare_ownership=True):
adict = file_list_to_dict(alines)
bdict = file_list_to_dict(blines)
filechanges = []
@@ -228,16 +294,20 @@ def compare_file_lists(alines, blines):
newvalue = newsplitv[0][0]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
+
# Check permissions
oldvalue = splitv[0][1:]
newvalue = newsplitv[0][1:]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
- # Check owner/group
- oldvalue = '%s/%s' % (splitv[1], splitv[2])
- newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
- if oldvalue != newvalue:
- filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+
+ if compare_ownership:
+ # Check owner/group
+ oldvalue = '%s/%s' % (splitv[1], splitv[2])
+ newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+
# Check symlink target
if newsplitv[0][0] == 'l':
if len(splitv) > 3:
@@ -343,15 +413,19 @@ def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
if abs(percentchg) < monitor_numeric_threshold:
continue
elif (not report_all) and key in list_fields:
- if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
+ if key == "FILELIST" and (path.endswith("-dbg") or path.endswith("-src")) and bstr.strip() != '':
continue
if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(astr, bstr)
if depvera == depverb:
continue
- alist = astr.split()
+ if key == 'FILELIST':
+ alist = shlex.split(astr)
+ blist = shlex.split(bstr)
+ else:
+ alist = astr.split()
+ blist = bstr.split()
alist.sort()
- blist = bstr.split()
blist.sort()
# We don't care about the removal of self-dependencies
if pkgname in alist and not pkgname in blist:
@@ -382,13 +456,116 @@ def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
return changes
-def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False):
+def compare_siglists(a_blob, b_blob, taskdiff=False):
+ # FIXME collapse down a recipe's tasks?
+ alines = a_blob.data_stream.read().decode('utf-8').splitlines()
+ blines = b_blob.data_stream.read().decode('utf-8').splitlines()
+ keys = []
+ pnmap = {}
+ def readsigs(lines):
+ sigs = {}
+ for line in lines:
+ linesplit = line.split()
+ if len(linesplit) > 2:
+ sigs[linesplit[0]] = linesplit[2]
+ if not linesplit[0] in keys:
+ keys.append(linesplit[0])
+ pnmap[linesplit[1]] = linesplit[0].rsplit('.', 1)[0]
+ return sigs
+ adict = readsigs(alines)
+ bdict = readsigs(blines)
+ out = []
+
+ changecount = 0
+ addcount = 0
+ removecount = 0
+ if taskdiff:
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+
+ changes = collections.OrderedDict()
+
+ def compare_hashfiles(pn, taskname, hash1, hash2):
+ hashes = [hash1, hash2]
+ hashfiles = bb.siggen.find_siginfo(pn, taskname, hashes, tinfoil.config_data)
+
+ if not taskname:
+ (pn, taskname) = pn.rsplit('.', 1)
+ pn = pnmap.get(pn, pn)
+ desc = '%s.%s' % (pn, taskname)
+
+ if len(hashfiles) == 0:
+ out.append("Unable to find matching sigdata for %s with hashes %s or %s" % (desc, hash1, hash2))
+ elif not hash1 in hashfiles:
+ out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash1))
+ elif not hash2 in hashfiles:
+ out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
+ else:
+ out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, collapsed=True)
+ for line in out2:
+ m = hashlib.sha256()
+ m.update(line.encode('utf-8'))
+ entry = changes.get(m.hexdigest(), (line, []))
+ if desc not in entry[1]:
+ changes[m.hexdigest()] = (line, entry[1] + [desc])
+
+ # Define recursion callback
+ def recursecb(key, hash1, hash2):
+ compare_hashfiles(key, None, hash1, hash2)
+ return []
+
+ for key in keys:
+ siga = adict.get(key, None)
+ sigb = bdict.get(key, None)
+ if siga is not None and sigb is not None and siga != sigb:
+ changecount += 1
+ (pn, taskname) = key.rsplit('.', 1)
+ compare_hashfiles(pn, taskname, siga, sigb)
+ elif siga is None:
+ addcount += 1
+ elif sigb is None:
+ removecount += 1
+ for key, item in changes.items():
+ line, tasks = item
+ if len(tasks) == 1:
+ desc = tasks[0]
+ elif len(tasks) == 2:
+ desc = '%s and %s' % (tasks[0], tasks[1])
+ else:
+ desc = '%s and %d others' % (tasks[-1], len(tasks)-1)
+ out.append('%s: %s' % (desc, line))
+ else:
+ for key in keys:
+ siga = adict.get(key, None)
+ sigb = bdict.get(key, None)
+ if siga is not None and sigb is not None and siga != sigb:
+ out.append('%s changed from %s to %s' % (key, siga, sigb))
+ changecount += 1
+ elif siga is None:
+ out.append('%s was added' % key)
+ addcount += 1
+ elif sigb is None:
+ out.append('%s was removed' % key)
+ removecount += 1
+ out.append('Summary: %d tasks added, %d tasks removed, %d tasks modified (%.1f%%)' % (addcount, removecount, changecount, (changecount / float(len(bdict)) * 100)))
+ return '\n'.join(out)
+
+
+def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False,
+ sigs=False, sigsdiff=False, exclude_path=None):
repo = git.Repo(repopath)
assert repo.bare == False
commit = repo.commit(revision1)
diff = commit.diff(revision2)
changes = []
+
+ if sigs or sigsdiff:
+ for d in diff.iter_change_type('M'):
+ if d.a_blob.path == 'siglist.txt':
+ changes.append(compare_siglists(d.a_blob, d.b_blob, taskdiff=sigsdiff))
+ return changes
+
for d in diff.iter_change_type('M'):
path = os.path.dirname(d.a_blob.path)
if path.startswith('packages/'):
@@ -398,6 +575,15 @@ def process_changes(repopath, revision1, revision2='HEAD', report_all=False, rep
elif filename.startswith('latest.'):
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
changes.append(chg)
+ elif filename == 'sysroot':
+ alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
+ blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
+ filechanges = compare_file_lists(alines,blines, compare_ownership=False)
+ if filechanges:
+ chg = ChangeRecord(path, filename, None, None, True)
+ chg.filechanges = filechanges
+ changes.append(chg)
+
elif path.startswith('images/'):
filename = os.path.basename(d.a_blob.path)
if filename in img_monitor_files:
@@ -457,16 +643,18 @@ def process_changes(repopath, revision1, revision2='HEAD', report_all=False, rep
chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True)
changes.append(chg)
- # Link related changes
- for chg in changes:
- if chg.monitored:
- for chg2 in changes:
- # (Check dirname in the case of fields from recipe info files)
- if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
- if chg2.fieldname in related_fields.get(chg.fieldname, []):
- chg.related.append(chg2)
- elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
- chg.related.append(chg2)
+ # filter out unwanted paths
+ if exclude_path:
+ for chg in changes:
+ if chg.filechanges:
+ fchgs = []
+ for fchg in chg.filechanges:
+ for epath in exclude_path:
+ if fchg.path.startswith(epath):
+ break
+ else:
+ fchgs.append(fchg)
+ chg.filechanges = fchgs
if report_all:
return changes
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py
index 0840cc4c3f..254257a83f 100644
--- a/meta/lib/oe/cachedpath.py
+++ b/meta/lib/oe/cachedpath.py
@@ -1,4 +1,6 @@
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Based on standard python library functions but avoid
# repeated stat calls. Its assumed the files will not change from under us
# so we can cache stat calls.
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
index 4c8a00070c..f02fbe9fba 100644
--- a/meta/lib/oe/classextend.py
+++ b/meta/lib/oe/classextend.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import collections
class ClassExtender(object):
@@ -20,12 +24,14 @@ class ClassExtender(object):
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
return name
+ if name.startswith("/"):
+ return name
if not name.startswith(self.extname):
return self.extname + "-" + name
return name
def map_variable(self, varname, setvar = True):
- var = self.d.getVar(varname, True)
+ var = self.d.getVar(varname)
if not var:
return ""
var = var.split()
@@ -38,7 +44,7 @@ class ClassExtender(object):
return newdata
def map_regexp_variable(self, varname, setvar = True):
- var = self.d.getVar(varname, True)
+ var = self.d.getVar(varname)
if not var:
return ""
var = var.split()
@@ -60,7 +66,7 @@ class ClassExtender(object):
return dep
else:
# Do not extend for that already have multilib prefix
- var = self.d.getVar("MULTILIB_VARIANTS", True)
+ var = self.d.getVar("MULTILIB_VARIANTS")
if var:
var = var.split()
for v in var:
@@ -74,7 +80,7 @@ class ClassExtender(object):
varname = varname + "_" + suffix
orig = self.d.getVar("EXTENDPKGV", False)
self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
- deps = self.d.getVar(varname, True)
+ deps = self.d.getVar(varname)
if not deps:
self.d.setVar("EXTENDPKGV", orig)
return
@@ -87,7 +93,7 @@ class ClassExtender(object):
self.d.setVar("EXTENDPKGV", orig)
def map_packagevars(self):
- for pkg in (self.d.getVar("PACKAGES", True).split() + [""]):
+ for pkg in (self.d.getVar("PACKAGES").split() + [""]):
self.map_depends_variable("RDEPENDS", pkg)
self.map_depends_variable("RRECOMMENDS", pkg)
self.map_depends_variable("RSUGGESTS", pkg)
@@ -97,7 +103,7 @@ class ClassExtender(object):
self.map_depends_variable("PKG", pkg)
def rename_packages(self):
- for pkg in (self.d.getVar("PACKAGES", True) or "").split():
+ for pkg in (self.d.getVar("PACKAGES") or "").split():
if pkg.startswith(self.extname):
self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
continue
@@ -114,7 +120,7 @@ class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
if dep.startswith(self.extname):
return dep
- if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
+ if dep.endswith(("-gcc", "-g++")):
return dep + "-crosssdk"
elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
return dep
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py
index e7856c86f2..08bb66b365 100644
--- a/meta/lib/oe/classutils.py
+++ b/meta/lib/oe/classutils.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
class ClassRegistryMeta(type):
"""Give each ClassRegistry their own registry"""
@@ -36,7 +39,7 @@ abstract base classes out of the registry)."""
@classmethod
def prioritized(tcls):
return sorted(list(tcls.registry.values()),
- key=lambda v: v.priority, reverse=True)
+ key=lambda v: (v.priority, v.name), reverse=True)
def unregister(cls):
for key in cls.registry.keys():
diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py
index afaff68598..31a84f5b06 100644
--- a/meta/lib/oe/copy_buildsystem.py
+++ b/meta/lib/oe/copy_buildsystem.py
@@ -1,14 +1,28 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This class should provide easy access to the different aspects of the
# buildsystem such as layers, bitbake location, etc.
+#
+# SDK_LAYERS_EXCLUDE: Layers which will be excluded from SDK layers.
+# SDK_LAYERS_EXCLUDE_PATTERN: The simiar to SDK_LAYERS_EXCLUDE, this supports
+# python regular expression, use space as separator,
+# e.g.: ".*-downloads closed-.*"
+#
+
import stat
import shutil
def _smart_copy(src, dest):
+ import subprocess
# smart_copy will choose the correct function depending on whether the
# source is a file or a directory.
mode = os.stat(src).st_mode
if stat.S_ISDIR(mode):
- shutil.copytree(src, dest, symlinks=True, ignore=shutil.ignore_patterns('.git'))
+ bb.utils.mkdirhier(dest)
+ cmd = "tar --exclude='.git' --xattrs --xattrs-include='*' -chf - -C %s -p . \
+ | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
shutil.copyfile(src, dest)
shutil.copymode(src, dest)
@@ -17,23 +31,42 @@ class BuildSystem(object):
def __init__(self, context, d):
self.d = d
self.context = context
- self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS', True).split()]
- self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE', True) or "").split()
+ self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()]
+ self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split()
+ self.layers_exclude_pattern = d.getVar('SDK_LAYERS_EXCLUDE_PATTERN')
def copy_bitbake_and_layers(self, destdir, workspace_name=None):
+ import re
# Copy in all metadata layers + bitbake (as repositories)
+ copied_corebase = None
layers_copied = []
bb.utils.mkdirhier(destdir)
layers = list(self.layerdirs)
- corebase = os.path.abspath(self.d.getVar('COREBASE', True))
+ corebase = os.path.abspath(self.d.getVar('COREBASE'))
layers.append(corebase)
+ # Get relationship between TOPDIR and COREBASE
+ # Layers should respect it
+ corebase_relative = os.path.dirname(os.path.relpath(os.path.abspath(self.d.getVar('TOPDIR')), corebase))
+ # The bitbake build system uses the meta-skeleton layer as a layout
+ # for common recipies, e.g: the recipetool script to create kernel recipies
+ # Add the meta-skeleton layer to be included as part of the eSDK installation
+ layers.append(os.path.join(corebase, 'meta-skeleton'))
# Exclude layers
for layer_exclude in self.layers_exclude:
if layer_exclude in layers:
+ bb.note('Excluded %s from sdk layers since it is in SDK_LAYERS_EXCLUDE' % layer_exclude)
layers.remove(layer_exclude)
+ if self.layers_exclude_pattern:
+ layers_cp = layers[:]
+ for pattern in self.layers_exclude_pattern.split():
+ for layer in layers_cp:
+ if re.match(pattern, layer):
+ bb.note('Excluded %s from sdk layers since matched SDK_LAYERS_EXCLUDE_PATTERN' % layer)
+ layers.remove(layer)
+
workspace_newname = workspace_name
if workspace_newname:
layernames = [os.path.basename(layer) for layer in layers]
@@ -42,7 +75,7 @@ class BuildSystem(object):
extranum += 1
workspace_newname = '%s-%d' % (workspace_name, extranum)
- corebase_files = self.d.getVar('COREBASE_FILES', True).split()
+ corebase_files = self.d.getVar('COREBASE_FILES').split()
corebase_files = [corebase + '/' +x for x in corebase_files]
# Make sure bitbake goes in
bitbake_dir = bb.__file__.rsplit('/', 3)[0]
@@ -67,22 +100,31 @@ class BuildSystem(object):
layerdestpath = destdir
if corebase == os.path.dirname(layer):
layerdestpath += '/' + os.path.basename(corebase)
+ else:
+ layer_relative = os.path.relpath(layer, corebase)
+ if os.path.dirname(layer_relative) == corebase_relative:
+ layer_relative = os.path.dirname(corebase_relative) + '/' + layernewname
+ layer_relative = os.path.basename(corebase) + '/' + layer_relative
+ if os.path.dirname(layer_relative) != layernewname:
+ layerdestpath += '/' + os.path.dirname(layer_relative)
+
layerdestpath += '/' + layernewname
layer_relative = os.path.relpath(layerdestpath,
destdir)
- layers_copied.append(layer_relative)
-
# Treat corebase as special since it typically will contain
# build directories or other custom items.
if corebase == layer:
+ copied_corebase = layer_relative
bb.utils.mkdirhier(layerdestpath)
for f in corebase_files:
f_basename = os.path.basename(f)
destname = os.path.join(layerdestpath, f_basename)
_smart_copy(f, destname)
else:
- if os.path.exists(layerdestpath):
+ layers_copied.append(layer_relative)
+
+ if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
bb.note("Skipping layer %s, already handled" % layer)
else:
_smart_copy(layer, layerdestpath)
@@ -96,7 +138,7 @@ class BuildSystem(object):
# Drop all bbappends except the one for the image the SDK is being built for
# (because of externalsrc, the workspace bbappends will interfere with the
# locked signatures if present, and we don't need them anyway)
- image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE', True)))[0] + '.bbappend'
+ image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE')))[0] + '.bbappend'
appenddir = os.path.join(layerdestpath, 'appends')
if os.path.isdir(appenddir):
for fn in os.listdir(appenddir):
@@ -119,15 +161,23 @@ class BuildSystem(object):
line = line.replace('workspacelayer', workspace_newname)
f.write(line)
- return layers_copied
+ # meta-skeleton layer is added as part of the build system
+ # but not as a layer included in the build, therefore it is
+ # not reported to the function caller.
+ for layer in layers_copied:
+ if layer.endswith('/meta-skeleton'):
+ layers_copied.remove(layer)
+ break
+
+ return copied_corebase, layers_copied
def generate_locked_sigs(sigfile, d):
bb.utils.mkdirhier(os.path.dirname(sigfile))
depd = d.getVar('BB_TASKDEPDATA', False)
- tasks = ['%s.%s' % (v[2], v[1]) for v in depd.values()]
+ tasks = ['%s:%s' % (v[2], v[1]) for v in depd.values()]
bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
-def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output):
+def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, onlynative, pruned_output):
with open(lockedsigs, 'r') as infile:
bb.utils.mkdirhier(os.path.dirname(pruned_output))
with open(pruned_output, 'w') as f:
@@ -137,7 +187,11 @@ def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output
if line.endswith('\\\n'):
splitval = line.strip().split(':')
if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets:
- f.write(line)
+ if onlynative:
+ if 'nativesdk' in splitval[0]:
+ f.write(line)
+ else:
+ f.write(line)
else:
f.write(line)
invalue = False
@@ -204,7 +258,7 @@ def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cac
import shutil
bb.note('Generating sstate-cache...')
- nativelsbstring = d.getVar('NATIVELSBSTRING', True)
+ nativelsbstring = d.getVar('NATIVELSBSTRING')
bb.process.run("gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
if fixedlsbstring and nativelsbstring != fixedlsbstring:
nativedir = output_sstate_cache + '/' + nativelsbstring
@@ -235,6 +289,7 @@ def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, lo
cmd = "%sBB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
env = dict(d.getVar('BB_ORIGENV', False))
env.pop('BUILDDIR', '')
+ env.pop('BBPATH', '')
pathitems = env['PATH'].split(':')
env['PATH'] = ':'.join([item for item in pathitems if not item.endswith('/bitbake/bin')])
bb.process.run(cmd, stderr=subprocess.STDOUT, env=env, cwd=cwd, executable='/bin/bash')
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py
index ee48950a82..602130a904 100644
--- a/meta/lib/oe/data.py
+++ b/meta/lib/oe/data.py
@@ -1,9 +1,14 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import json
import oe.maketype
def typed_value(key, d):
"""Construct a value for the specified metadata variable, using its flags
to determine the type and parameters for construction."""
- var_type = d.getVarFlag(key, 'type', True)
+ var_type = d.getVarFlag(key, 'type')
flags = d.getVarFlags(key)
if flags is not None:
flags = dict((flag, d.expand(value))
@@ -12,6 +17,35 @@ def typed_value(key, d):
flags = {}
try:
- return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags)
+ return oe.maketype.create(d.getVar(key) or '', var_type, **flags)
except (TypeError, ValueError) as exc:
bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
+
+def export2json(d, json_file, expand=True, searchString="",replaceString=""):
+ data2export = {}
+ keys2export = []
+
+ for key in d.keys():
+ if key.startswith("_"):
+ continue
+ elif key.startswith("BB"):
+ continue
+ elif key.startswith("B_pn"):
+ continue
+ elif key.startswith("do_"):
+ continue
+ elif d.getVarFlag(key, "func"):
+ continue
+
+ keys2export.append(key)
+
+ for key in keys2export:
+ try:
+ data2export[key] = d.getVar(key, expand).replace(searchString,replaceString)
+ except bb.data_smart.ExpansionError:
+ data2export[key] = ''
+ except AttributeError:
+ pass
+
+ with open(json_file, "w") as f:
+ json.dump(data2export, f, skipkeys=True, indent=4, sort_keys=True)
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py
index 87c52fae9c..88e46c354d 100644
--- a/meta/lib/oe/distro_check.py
+++ b/meta/lib/oe/distro_check.py
@@ -1,32 +1,21 @@
-from contextlib import contextmanager
-
-from bb.utils import export_proxies
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
def create_socket(url, d):
import urllib
+ from bb.utils import export_proxies
- socket = None
- try:
- export_proxies(d)
- socket = urllib.request.urlopen(url)
- except:
- bb.warn("distro_check: create_socket url %s can't access" % url)
-
- return socket
+ export_proxies(d)
+ return urllib.request.urlopen(url)
def get_links_from_url(url, d):
"Return all the href links found on the web location"
from bs4 import BeautifulSoup, SoupStrainer
+ soup = BeautifulSoup(create_socket(url,d), "html.parser", parse_only=SoupStrainer("a"))
hyperlinks = []
-
- webpage = ''
- sock = create_socket(url,d)
- if sock:
- webpage = sock.read()
-
- soup = BeautifulSoup(webpage, "html.parser", parse_only=SoupStrainer("a"))
for line in soup.find_all('a', href=True):
hyperlinks.append(line['href'].strip('/'))
return hyperlinks
@@ -37,6 +26,7 @@ def find_latest_numeric_release(url, d):
maxstr=""
for link in get_links_from_url(url, d):
try:
+ # TODO use LooseVersion
release = float(link)
except:
release = 0
@@ -47,144 +37,105 @@ def find_latest_numeric_release(url, d):
def is_src_rpm(name):
"Check if the link is pointing to a src.rpm file"
- if name[-8:] == ".src.rpm":
- return True
- else:
- return False
+ return name.endswith(".src.rpm")
def package_name_from_srpm(srpm):
"Strip out the package name from the src.rpm filename"
- strings = srpm.split('-')
- package_name = strings[0]
- for i in range(1, len (strings) - 1):
- str = strings[i]
- if not str[0].isdigit():
- package_name += '-' + str
- return package_name
-
-def clean_package_list(package_list):
- "Removes multiple entries of packages and sorts the list"
- set = {}
- map(set.__setitem__, package_list, [])
- return set.keys()
-
-def get_latest_released_meego_source_package_list(d):
- "Returns list of all the name os packages in the latest meego distro"
-
- package_names = []
- try:
- f = open("/tmp/Meego-1.1", "r")
- for line in f:
- package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end
- except IOError: pass
- package_list=clean_package_list(package_names)
- return "1.0", package_list
+ # ca-certificates-2016.2.7-1.0.fc24.src.rpm
+ # ^name ^ver ^release^removed
+ (name, version, release) = srpm.replace(".src.rpm", "").rsplit("-", 2)
+ return name
def get_source_package_list_from_url(url, section, d):
"Return a sectioned list of package names from a URL list"
bb.note("Reading %s: %s" % (url, section))
links = get_links_from_url(url, d)
- srpms = list(filter(is_src_rpm, links))
- names_list = list(map(package_name_from_srpm, srpms))
+ srpms = filter(is_src_rpm, links)
+ names_list = map(package_name_from_srpm, srpms)
- new_pkgs = []
+ new_pkgs = set()
for pkgs in names_list:
- new_pkgs.append(pkgs + ":" + section)
-
+ new_pkgs.add(pkgs + ":" + section)
return new_pkgs
+def get_source_package_list_from_url_by_letter(url, section, d):
+ import string
+ from urllib.error import HTTPError
+ packages = set()
+ for letter in (string.ascii_lowercase + string.digits):
+ # Not all subfolders may exist, so silently handle 404
+ try:
+ packages |= get_source_package_list_from_url(url + "/" + letter, section, d)
+ except HTTPError as e:
+ if e.code != 404: raise
+ return packages
+
def get_latest_released_fedora_source_package_list(d):
"Returns list of all the name os packages in the latest fedora distro"
latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/", d)
-
- package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main", d)
-
-# package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything")
- package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
-
- package_list=clean_package_list(package_names)
-
- return latest, package_list
+ package_names = get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Everything/source/tree/Packages/" % latest, "main", d)
+ package_names |= get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
+ return latest, package_names
def get_latest_released_opensuse_source_package_list(d):
"Returns list of all the name os packages in the latest opensuse distro"
- latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/",d)
-
- package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main", d)
- package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates", d)
-
- package_list=clean_package_list(package_names)
- return latest, package_list
+ latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/leap", d)
-def get_latest_released_mandriva_source_package_list(d):
- "Returns list of all the name os packages in the latest mandriva distro"
- latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/", d)
- package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main", d)
-# package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib")
- package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates", d)
+ package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/leap/%s/repo/oss/suse/src/" % latest, "main", d)
+ package_names |= get_source_package_list_from_url("http://download.opensuse.org/update/leap/%s/oss/src/" % latest, "updates", d)
+ return latest, package_names
- package_list=clean_package_list(package_names)
- return latest, package_list
+def get_latest_released_clear_source_package_list(d):
+ latest = find_latest_numeric_release("https://download.clearlinux.org/releases/", d)
+ package_names = get_source_package_list_from_url("https://download.clearlinux.org/releases/%s/clear/source/SRPMS/" % latest, "main", d)
+ return latest, package_names
def find_latest_debian_release(url, d):
"Find the latest listed debian release on the given url"
- releases = []
- for link in get_links_from_url(url, d):
- if link[:6] == "Debian":
- if ';' not in link:
- releases.append(link)
+ releases = [link.replace("Debian", "")
+ for link in get_links_from_url(url, d)
+ if link.startswith("Debian")]
releases.sort()
try:
- return releases.pop()[6:]
+ return releases[-1]
except:
return "_NotFound_"
def get_debian_style_source_package_list(url, section, d):
"Return the list of package-names stored in the debian style Sources.gz file"
- import tempfile
import gzip
- webpage = ''
- sock = create_socket(url,d)
- if sock:
- webpage = sock.read()
-
- tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
- tmpfilename=tmpfile.name
- tmpfile.write(sock.read())
- tmpfile.close()
- bb.note("Reading %s: %s" % (url, section))
-
- f = gzip.open(tmpfilename)
- package_names = []
- for line in f:
- if line[:9] == "Package: ":
- package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end
- os.unlink(tmpfilename)
-
+ package_names = set()
+ for line in gzip.open(create_socket(url, d), mode="rt"):
+ if line.startswith("Package:"):
+ pkg = line.split(":", 1)[1].strip()
+ package_names.add(pkg + ":" + section)
return package_names
def get_latest_released_debian_source_package_list(d):
- "Returns list of all the name os packages in the latest debian distro"
+ "Returns list of all the name of packages in the latest debian distro"
latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/", d)
- url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
+ url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
package_names = get_debian_style_source_package_list(url, "main", d)
-# url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz"
-# package_names += get_debian_style_source_package_list(url, "contrib")
- url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
- package_names += get_debian_style_source_package_list(url, "updates", d)
- package_list=clean_package_list(package_names)
- return latest, package_list
+ url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
+ package_names |= get_debian_style_source_package_list(url, "updates", d)
+ return latest, package_names
def find_latest_ubuntu_release(url, d):
- "Find the latest listed ubuntu release on the given url"
+ """
+ Find the latest listed Ubuntu release on the given ubuntu/dists/ URL.
+
+ To avoid matching development releases look for distributions that have
+ updates, so the resulting distro could be any supported release.
+ """
url += "?C=M;O=D" # Descending Sort by Last Modified
for link in get_links_from_url(url, d):
- if link[-8:] == "-updates":
- return link[:-8]
+ if "-updates" in link:
+ distro = link.replace("-updates", "")
+ return distro
return "_NotFound_"
def get_latest_released_ubuntu_source_package_list(d):
@@ -192,52 +143,44 @@ def get_latest_released_ubuntu_source_package_list(d):
latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/", d)
url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
package_names = get_debian_style_source_package_list(url, "main", d)
-# url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest
-# package_names += get_debian_style_source_package_list(url, "multiverse")
-# url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest
-# package_names += get_debian_style_source_package_list(url, "universe")
url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
- package_names += get_debian_style_source_package_list(url, "updates", d)
- package_list=clean_package_list(package_names)
- return latest, package_list
+ package_names |= get_debian_style_source_package_list(url, "updates", d)
+ return latest, package_names
def create_distro_packages_list(distro_check_dir, d):
+ import shutil
+
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
- if not os.path.isdir (pkglst_dir):
- os.makedirs(pkglst_dir)
- # first clear old stuff
- for file in os.listdir(pkglst_dir):
- os.unlink(os.path.join(pkglst_dir, file))
-
- per_distro_functions = [
- ["Debian", get_latest_released_debian_source_package_list],
- ["Ubuntu", get_latest_released_ubuntu_source_package_list],
- ["Fedora", get_latest_released_fedora_source_package_list],
- ["OpenSuSE", get_latest_released_opensuse_source_package_list],
- ["Mandriva", get_latest_released_mandriva_source_package_list],
- ["Meego", get_latest_released_meego_source_package_list]
- ]
-
- from datetime import datetime
- begin = datetime.now()
- for distro in per_distro_functions:
- name = distro[0]
- release, package_list = distro[1](d)
+ bb.utils.remove(pkglst_dir, True)
+ bb.utils.mkdirhier(pkglst_dir)
+
+ per_distro_functions = (
+ ("Debian", get_latest_released_debian_source_package_list),
+ ("Ubuntu", get_latest_released_ubuntu_source_package_list),
+ ("Fedora", get_latest_released_fedora_source_package_list),
+ ("openSUSE", get_latest_released_opensuse_source_package_list),
+ ("Clear", get_latest_released_clear_source_package_list),
+ )
+
+ for name, fetcher_func in per_distro_functions:
+ try:
+ release, package_list = fetcher_func(d)
+ except Exception as e:
+ bb.warn("Cannot fetch packages for %s: %s" % (name, e))
bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
+ if len(package_list) == 0:
+ bb.error("Didn't fetch any packages for %s %s" % (name, release))
+
package_list_file = os.path.join(pkglst_dir, name + "-" + release)
- f = open(package_list_file, "w+b")
- for pkg in package_list:
- f.write(pkg + "\n")
- f.close()
- end = datetime.now()
- delta = end - begin
- bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds)
+ with open(package_list_file, 'w') as f:
+ for pkg in sorted(package_list):
+ f.write(pkg + "\n")
def update_distro_data(distro_check_dir, datetime, d):
"""
- If distro packages list data is old then rebuild it.
- The operations has to be protected by a lock so that
- only one thread performes it at a time.
+ If distro packages list data is old then rebuild it.
+ The operations has to be protected by a lock so that
+ only one thread performes it at a time.
"""
if not os.path.isdir (distro_check_dir):
try:
@@ -264,71 +207,59 @@ def update_distro_data(distro_check_dir, datetime, d):
f.seek(0)
f.write(datetime)
- except OSError:
- raise Exception('Unable to read/write this file: %s' % (datetime_file))
+ except OSError as e:
+ raise Exception('Unable to open timestamp: %s' % e)
finally:
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
-
+
def compare_in_distro_packages_list(distro_check_dir, d):
if not os.path.isdir(distro_check_dir):
raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
-
+
localdata = bb.data.createCopy(d)
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
matching_distros = []
- pn = d.getVar('PN', True)
- recipe_name = d.getVar('PN', True)
+ pn = recipe_name = d.getVar('PN')
bb.note("Checking: %s" % pn)
- trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"})
-
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
if pn.startswith("nativesdk-"):
pnstripped = pn.split("nativesdk-")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[1]
if pn.find("-cross") != -1:
pnstripped = pn.split("-cross")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
if pn.find("-initial") != -1:
pnstripped = pn.split("-initial")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
bb.note("Recipe: %s" % recipe_name)
- tmp = localdata.getVar('DISTRO_PN_ALIAS', True)
distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
-
- if tmp:
- list = tmp.split(' ')
- for str in list:
- if str and str.find("=") == -1 and distro_exceptions[str]:
- matching_distros.append(str)
+ tmp = localdata.getVar('DISTRO_PN_ALIAS') or ""
+ for str in tmp.split():
+ if str and str.find("=") == -1 and distro_exceptions[str]:
+ matching_distros.append(str)
distro_pn_aliases = {}
- if tmp:
- list = tmp.split(' ')
- for str in list:
- if str.find("=") != -1:
- (dist, pn_alias) = str.split('=')
- distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
-
+ for str in tmp.split():
+ if "=" in str:
+ (dist, pn_alias) = str.split('=')
+ distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
+
for file in os.listdir(pkglst_dir):
(distro, distro_release) = file.split("-")
- f = open(os.path.join(pkglst_dir, file), "rb")
+ f = open(os.path.join(pkglst_dir, file), "r")
for line in f:
(pkg, section) = line.split(":")
if distro.lower() in distro_pn_aliases:
@@ -341,38 +272,34 @@ def compare_in_distro_packages_list(distro_check_dir, d):
break
f.close()
-
- if tmp != None:
- list = tmp.split(' ')
- for item in list:
- matching_distros.append(item)
+ for item in tmp.split():
+ matching_distros.append(item)
bb.note("Matching: %s" % matching_distros)
return matching_distros
def create_log_file(d, logname):
- import subprocess
- logpath = d.getVar('LOG_DIR', True)
+ logpath = d.getVar('LOG_DIR')
bb.utils.mkdirhier(logpath)
logfn, logsuffix = os.path.splitext(logname)
- logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix))
+ logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME'), logsuffix))
if not os.path.exists(logfile):
slogfile = os.path.join(logpath, logname)
if os.path.exists(slogfile):
os.remove(slogfile)
- subprocess.call("touch %s" % logfile, shell=True)
+ open(logfile, 'w+').close()
os.symlink(logfile, slogfile)
d.setVar('LOG_FILE', logfile)
return logfile
def save_distro_check_result(result, datetime, result_file, d):
- pn = d.getVar('PN', True)
- logdir = d.getVar('LOG_DIR', True)
+ pn = d.getVar('PN')
+ logdir = d.getVar('LOG_DIR')
if not logdir:
bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
return
- if not os.path.isdir(logdir):
- os.makedirs(logdir)
+ bb.utils.mkdirhier(logdir)
+
line = pn
for i in result:
line = line + "," + i
diff --git a/meta/lib/oe/elf.py b/meta/lib/oe/elf.py
new file mode 100644
index 0000000000..2562cea1dd
--- /dev/null
+++ b/meta/lib/oe/elf.py
@@ -0,0 +1,133 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+def machine_dict(d):
+# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
+ machdata = {
+ "darwin9" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "eabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "elf" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "i586" : (3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "epiphany": (4643, 0, 0, True, 32),
+ "lm32": (138, 0, 0, False, 32),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeeb":(189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ "powerpc": (20, 0, 0, False, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ },
+ "linux" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "arm" : (40, 97, 0, True, 32),
+ "armeb": (40, 97, 0, False, 32),
+ "powerpc": (20, 0, 0, False, 32),
+ "powerpc64": (21, 0, 0, False, 64),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "ia64": (50, 0, 0, True, 64),
+ "alpha": (36902, 0, 0, True, 64),
+ "hppa": (15, 3, 0, False, 32),
+ "m68k": ( 4, 0, 0, False, 32),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "mipsisa32r6": ( 8, 0, 0, False, 32),
+ "mipsisa32r6el": ( 8, 0, 0, True, 32),
+ "mipsisa64r6": ( 8, 0, 0, False, 64),
+ "mipsisa64r6el": ( 8, 0, 0, True, 64),
+ "nios2": (113, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ "s390": (22, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ "sparc": ( 2, 0, 0, False, 32),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeeb":(189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ },
+ "linux-musl" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "arm" : ( 40, 97, 0, True, 32),
+ "armeb": ( 40, 97, 0, False, 32),
+ "powerpc": ( 20, 0, 0, False, 32),
+ "powerpc64": ( 21, 0, 0, False, 64),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": ( 62, 0, 0, True, 64),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeeb":(189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
+ "sh4": ( 42, 0, 0, True, 32),
+ },
+ "uclinux-uclibc" : {
+ "bfin": ( 106, 0, 0, True, 32),
+ },
+ "linux-gnueabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-musleabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-gnuspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-muslspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-gnu" : {
+ "powerpc": (20, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ },
+ "linux-gnu_ilp32" : {
+ "aarch64" : (183, 0, 0, True, 32),
+ },
+ "linux-gnux32" : {
+ "x86_64": (62, 0, 0, True, 32),
+ },
+ "linux-muslx32" : {
+ "x86_64": (62, 0, 0, True, 32),
+ },
+ "linux-gnun32" : {
+ "mips64": ( 8, 0, 0, False, 32),
+ "mips64el": ( 8, 0, 0, True, 32),
+ "mipsisa64r6": ( 8, 0, 0, False, 32),
+ "mipsisa64r6el":( 8, 0, 0, True, 32),
+ },
+ }
+
+ # Add in any extra user supplied data which may come from a BSP layer, removing the
+ # need to always change this class directly
+ extra_machdata = (d and d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS" or None) or "").split()
+ for m in extra_machdata:
+ call = m + "(machdata, d)"
+ locs = { "machdata" : machdata, "d" : d}
+ machdata = bb.utils.better_eval(call, locs)
+
+ return machdata
diff --git a/meta/lib/oe/gpg_sign.py b/meta/lib/oe/gpg_sign.py
index 38eb0cb137..7634d7ef1d 100644
--- a/meta/lib/oe/gpg_sign.py
+++ b/meta/lib/oe/gpg_sign.py
@@ -1,55 +1,71 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
"""Helper module for GPG signing"""
import os
import bb
-import oe.utils
+import subprocess
+import shlex
class LocalSigner(object):
"""Class for handling local (on the build host) signing"""
def __init__(self, d):
- self.gpg_bin = d.getVar('GPG_BIN', True) or \
+ self.gpg_bin = d.getVar('GPG_BIN') or \
bb.utils.which(os.getenv('PATH'), 'gpg')
- self.gpg_path = d.getVar('GPG_PATH', True)
- self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpm")
+ self.gpg_cmd = [self.gpg_bin]
+ self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
+ # Without this we see "Cannot allocate memory" errors when running processes in parallel
+ # It needs to be set for any gpg command since any agent launched can stick around in memory
+ # and this parameter must be set.
+ if self.gpg_agent_bin:
+ self.gpg_cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
+ self.gpg_path = d.getVar('GPG_PATH')
+ self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
+ self.gpg_version = self.get_gpg_version()
+
def export_pubkey(self, output_file, keyid, armor=True):
"""Export GPG public key to a file"""
- cmd = '%s --batch --yes --export -o %s ' % \
- (self.gpg_bin, output_file)
+ cmd = self.gpg_cmd + ["--no-permission-warning", "--batch", "--yes", "--export", "-o", output_file]
if self.gpg_path:
- cmd += "--homedir %s " % self.gpg_path
+ cmd += ["--homedir", self.gpg_path]
if armor:
- cmd += "--armor "
- cmd += keyid
- status, output = oe.utils.getstatusoutput(cmd)
- if status:
- raise bb.build.FuncFailed('Failed to export gpg public key (%s): %s' %
- (keyid, output))
-
- def sign_rpms(self, files, keyid, passphrase):
+ cmd += ["--armor"]
+ cmd += [keyid]
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+
+ def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None):
"""Sign RPM files"""
cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
- cmd += "--define '_gpg_passphrase %s' " % passphrase
+ gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
+ if self.gpg_version > (2,1,):
+ gpg_args += ' --pinentry-mode=loopback'
+ cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
+ cmd += "--define '_binary_filedigest_algorithm %s' " % digest
if self.gpg_bin:
- cmd += "--define '%%__gpg %s' " % self.gpg_bin
+ cmd += "--define '__gpg %s' " % self.gpg_bin
if self.gpg_path:
cmd += "--define '_gpg_path %s' " % self.gpg_path
- cmd += ' '.join(files)
+ if fsk:
+ cmd += "--signfiles --fskpath %s " % fsk
+ if fsk_password:
+ cmd += "--define '_file_signing_key_password %s' " % fsk_password
- status, output = oe.utils.getstatusoutput(cmd)
- if status:
- raise bb.build.FuncFailed("Failed to sign RPM packages: %s" % output)
+ # Sign in chunks
+ for i in range(0, len(files), sign_chunk):
+ subprocess.check_output(shlex.split(cmd + ' '.join(files[i:i+sign_chunk])), stderr=subprocess.STDOUT)
def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True):
"""Create a detached signature of a file"""
- import subprocess
if passphrase_file and passphrase:
raise Exception("You should use either passphrase_file of passphrase, not both")
- cmd = [self.gpg_bin, '--detach-sign', '--batch', '--no-tty', '--yes',
- '--passphrase-fd', '0', '-u', keyid]
+ cmd = self.gpg_cmd + ['--detach-sign', '--no-permission-warning', '--batch',
+ '--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid]
if self.gpg_path:
cmd += ['--homedir', self.gpg_path]
@@ -58,9 +74,7 @@ class LocalSigner(object):
#gpg > 2.1 supports password pipes only through the loopback interface
#gpg < 2.1 errors out if given unknown parameters
- dots = self.get_gpg_version().split('.')
- assert len(dots) >= 2
- if int(dots[0]) >= 2 and int(dots[1]) >= 1:
+ if self.gpg_version > (2,1,):
cmd += ['--pinentry-mode', 'loopback']
cmd += [input_file]
@@ -74,8 +88,7 @@ class LocalSigner(object):
(_, stderr) = job.communicate(passphrase.encode("utf-8"))
if job.returncode:
- raise bb.build.FuncFailed("GPG exited with code %d: %s" %
- (job.returncode, stderr.decode("utf-8")))
+ bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8")))
except IOError as e:
bb.error("IO error (%s): %s" % (e.errno, e.strerror))
@@ -87,21 +100,23 @@ class LocalSigner(object):
def get_gpg_version(self):
- """Return the gpg version"""
- import subprocess
+ """Return the gpg version as a tuple of ints"""
try:
- return subprocess.check_output((self.gpg_bin, "--version")).split()[2].decode("utf-8")
+ cmd = self.gpg_cmd + ["--version", "--no-permission-warning"]
+ ver_str = subprocess.check_output(cmd).split()[2].decode("utf-8")
+ return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
except subprocess.CalledProcessError as e:
- raise bb.build.FuncFailed("Could not get gpg version: %s" % e)
+ bb.fatal("Could not get gpg version: %s" % e)
def verify(self, sig_file):
"""Verify signature"""
- cmd = self.gpg_bin + " --verify "
+ cmd = self.gpg_cmd + [" --verify", "--no-permission-warning"]
if self.gpg_path:
- cmd += "--homedir %s " % self.gpg_path
- cmd += sig_file
- status, _ = oe.utils.getstatusoutput(cmd)
+ cmd += ["--homedir", self.gpg_path]
+
+ cmd += [sig_file]
+ status = subprocess.call(cmd)
ret = False if status else True
return ret
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
index 8d2fd1709c..c1274a61de 100644
--- a/meta/lib/oe/license.py
+++ b/meta/lib/oe/license.py
@@ -1,4 +1,6 @@
-# vi:sts=4:sw=4:et
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
"""Code for parsing OpenEmbedded license strings"""
import ast
@@ -13,8 +15,8 @@ def license_ok(license, dont_want_licenses):
# will exclude a trailing '+' character from LICENSE in
# case INCOMPATIBLE_LICENSE is not a 'X+' license.
lic = license
- if not re.search('\+$', dwl):
- lic = re.sub('\+', '', license)
+ if not re.search(r'\+$', dwl):
+ lic = re.sub(r'\+', '', license)
if fnmatch(lic, dwl):
return False
return True
@@ -40,8 +42,8 @@ class InvalidLicense(LicenseError):
return "invalid characters in license '%s'" % self.license
license_operator_chars = '&|() '
-license_operator = re.compile('([' + license_operator_chars + '])')
-license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
+license_operator = re.compile(r'([' + license_operator_chars + '])')
+license_pattern = re.compile(r'[a-zA-Z0-9.+_\-]+$')
class LicenseVisitor(ast.NodeVisitor):
"""Get elements based on OpenEmbedded license strings"""
@@ -106,7 +108,8 @@ def is_included(licensestr, whitelist=None, blacklist=None):
license string matches the whitelist and does not match the blacklist.
Returns a tuple holding the boolean state and a list of the applicable
- licenses which were excluded (or None, if the state is True)
+ licenses that were excluded if state is False, or the licenses that were
+ included if the state is True.
"""
def include_license(license):
@@ -117,10 +120,17 @@ def is_included(licensestr, whitelist=None, blacklist=None):
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
- included licenses)."""
- alpha_weight = len(list(filter(include_license, alpha)))
- beta_weight = len(list(filter(include_license, beta)))
- if alpha_weight > beta_weight:
+ included licenses and no excluded licenses)."""
+ # The factor 1000 below is arbitrary, just expected to be much larger
+ # that the number of licenses actually specified. That way the weight
+ # will be negative if the list of licenses contains an excluded license,
+ # but still gives a higher weight to the list with the most included
+ # licenses.
+ alpha_weight = (len(list(filter(include_license, alpha))) -
+ 1000 * (len(list(filter(exclude_license, alpha))) > 0))
+ beta_weight = (len(list(filter(include_license, beta))) -
+ 1000 * (len(list(filter(exclude_license, beta))) > 0))
+ if alpha_weight >= beta_weight:
return alpha
else:
return beta
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py
index e0bdfba255..43e46380d7 100644
--- a/meta/lib/oe/lsb.py
+++ b/meta/lib/oe/lsb.py
@@ -1,26 +1,65 @@
-def release_dict():
- """Return the output of lsb_release -ir as a dictionary"""
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+def get_os_release():
+ """Get all key-value pairs from /etc/os-release as a dict"""
+ from collections import OrderedDict
+
+ data = OrderedDict()
+ if os.path.exists('/etc/os-release'):
+ with open('/etc/os-release') as f:
+ for line in f:
+ try:
+ key, val = line.rstrip().split('=', 1)
+ except ValueError:
+ continue
+ data[key.strip()] = val.strip('"')
+ return data
+
+def release_dict_osr():
+ """ Populate a dict with pertinent values from /etc/os-release """
+ data = {}
+ os_release = get_os_release()
+ if 'ID' in os_release:
+ data['DISTRIB_ID'] = os_release['ID']
+ if 'VERSION_ID' in os_release:
+ data['DISTRIB_RELEASE'] = os_release['VERSION_ID']
+
+ return data
+
+def release_dict_lsb():
+ """ Return the output of lsb_release -ir as a dictionary """
from subprocess import PIPE
try:
output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
except bb.process.CmdError as exc:
- return None
+ return {}
+
+ lsb_map = { 'Distributor ID': 'DISTRIB_ID',
+ 'Release': 'DISTRIB_RELEASE'}
+ lsb_keys = lsb_map.keys()
data = {}
for line in output.splitlines():
- if line.startswith("-e"): line = line[3:]
+ if line.startswith("-e"):
+ line = line[3:]
try:
key, value = line.split(":\t", 1)
except ValueError:
continue
- else:
- data[key] = value
+ if key in lsb_keys:
+ data[lsb_map[key]] = value
+
+ if len(data.keys()) != 2:
+ return None
+
return data
def release_dict_file():
- """ Try to gather LSB release information manually when lsb_release tool is unavailable """
- data = None
+ """ Try to gather release information manually when other methods fail """
+ data = {}
try:
if os.path.exists('/etc/lsb-release'):
data = {}
@@ -37,14 +76,6 @@ def release_dict_file():
if match:
data['DISTRIB_ID'] = match.group(1)
data['DISTRIB_RELEASE'] = match.group(2)
- elif os.path.exists('/etc/os-release'):
- data = {}
- with open('/etc/os-release') as f:
- for line in f:
- if line.startswith('NAME='):
- data['DISTRIB_ID'] = line[5:].rstrip().strip('"')
- if line.startswith('VERSION_ID='):
- data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"')
elif os.path.exists('/etc/SuSE-release'):
data = {}
data['DISTRIB_ID'] = 'SUSE LINUX'
@@ -55,7 +86,7 @@ def release_dict_file():
break
except IOError:
- return None
+ return {}
return data
def distro_identifier(adjust_hook=None):
@@ -64,22 +95,24 @@ def distro_identifier(adjust_hook=None):
import re
- lsb_data = release_dict()
- if lsb_data:
- distro_id, release = lsb_data['Distributor ID'], lsb_data['Release']
- else:
- lsb_data_file = release_dict_file()
- if lsb_data_file:
- distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None)
- else:
- distro_id, release = None, None
+ # Try /etc/os-release first, then the output of `lsb_release -ir` and
+ # finally fall back on parsing various release files in order to determine
+ # host distro name and version.
+ distro_data = release_dict_osr()
+ if not distro_data:
+ distro_data = release_dict_lsb()
+ if not distro_data:
+ distro_data = release_dict_file()
+
+ distro_id = distro_data.get('DISTRIB_ID', '')
+ release = distro_data.get('DISTRIB_RELEASE', '')
if adjust_hook:
distro_id, release = adjust_hook(distro_id, release)
if not distro_id:
- return "Unknown"
- # Filter out any non-alphanumerics
- distro_id = re.sub(r'\W', '', distro_id)
+ return "unknown"
+ # Filter out any non-alphanumerics and convert to lowercase
+ distro_id = re.sub(r'\W', '', distro_id).lower()
if release:
id_str = '{0}-{1}'.format(distro_id, release)
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py
index f88981dd90..d929c8b3e5 100644
--- a/meta/lib/oe/maketype.py
+++ b/meta/lib/oe/maketype.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
"""OpenEmbedded variable typing support
Types are defined in the metadata by name, using the 'type' flag on a
@@ -7,7 +10,12 @@ the arguments of the type's factory for details.
import inspect
import oe.types as types
-import collections
+try:
+ # Python 3.7+
+ from collections.abc import Callable
+except ImportError:
+ # Python < 3.7
+ from collections import Callable
available_types = {}
@@ -96,7 +104,7 @@ for name in dir(types):
continue
obj = getattr(types, name)
- if not isinstance(obj, collections.Callable):
+ if not isinstance(obj, Callable):
continue
register(name, obj)
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py
index 95f8eb2df3..f7c88f9a09 100644
--- a/meta/lib/oe/manifest.py
+++ b/meta/lib/oe/manifest.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from abc import ABCMeta, abstractmethod
import os
import re
@@ -59,9 +63,9 @@ class Manifest(object, metaclass=ABCMeta):
if manifest_dir is None:
if manifest_type != self.MANIFEST_TYPE_IMAGE:
- self.manifest_dir = self.d.getVar('SDK_DIR', True)
+ self.manifest_dir = self.d.getVar('SDK_DIR')
else:
- self.manifest_dir = self.d.getVar('WORKDIR', True)
+ self.manifest_dir = self.d.getVar('WORKDIR')
else:
self.manifest_dir = manifest_dir
@@ -82,7 +86,7 @@ class Manifest(object, metaclass=ABCMeta):
This will be used for testing until the class is implemented properly!
"""
def _create_dummy_initial(self):
- image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
+ image_rootfs = self.d.getVar('IMAGE_ROOTFS')
pkg_list = dict()
if image_rootfs.find("core-image-sato-sdk") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
@@ -104,7 +108,7 @@ class Manifest(object, metaclass=ABCMeta):
pkg_list['lgp'] = \
"locale-base-en-us locale-base-en-gb"
elif image_rootfs.find("core-image-minimal") > 0:
- pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot"
+ pkg_list[self.PKG_TYPE_MUST_INSTALL] = "packagegroup-core-boot"
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
@@ -195,7 +199,7 @@ class RpmManifest(Manifest):
for pkg in pkg_list.split():
pkg_type = self.PKG_TYPE_MUST_INSTALL
- ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
for ml_variant in ml_variants:
if pkg.startswith(ml_variant + '-'):
@@ -216,13 +220,13 @@ class RpmManifest(Manifest):
for var in self.var_maps[self.manifest_type]:
if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var, True))
+ split_pkgs = self._split_multilib(self.d.getVar(var))
if split_pkgs is not None:
pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
else:
- pkg_list = self.d.getVar(var, True)
+ pkg_list = self.d.getVar(var)
if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
for pkg_type in pkgs:
for pkg in pkgs[pkg_type].split():
@@ -245,7 +249,7 @@ class OpkgManifest(Manifest):
for pkg in pkg_list.split():
pkg_type = self.PKG_TYPE_MUST_INSTALL
- ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
for ml_variant in ml_variants:
if pkg.startswith(ml_variant + '-'):
@@ -266,16 +270,16 @@ class OpkgManifest(Manifest):
for var in self.var_maps[self.manifest_type]:
if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var, True))
+ split_pkgs = self._split_multilib(self.d.getVar(var))
if split_pkgs is not None:
pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
else:
- pkg_list = self.d.getVar(var, True)
+ pkg_list = self.d.getVar(var)
if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
- for pkg_type in pkgs:
- for pkg in pkgs[pkg_type].split():
+ for pkg_type in sorted(pkgs):
+ for pkg in sorted(pkgs[pkg_type].split()):
manifest.write("%s,%s\n" % (pkg_type, pkg))
def create_final(self):
@@ -310,7 +314,7 @@ class DpkgManifest(Manifest):
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
- pkg_list = self.d.getVar(var, True)
+ pkg_list = self.d.getVar(var)
if pkg_list is None:
continue
@@ -332,7 +336,7 @@ def create_manifest(d, final_manifest=False, manifest_dir=None,
'ipk': OpkgManifest,
'deb': DpkgManifest}
- manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type)
+ manifest = manifest_map[d.getVar('IMAGE_PKGTYPE')](d, manifest_dir, manifest_type)
if final_manifest:
manifest.create_final()
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py
index 02642f29f0..b8585d4253 100644
--- a/meta/lib/oe/package.py
+++ b/meta/lib/oe/package.py
@@ -1,15 +1,21 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import stat
+import mmap
+import subprocess
+
def runstrip(arg):
# Function to strip a single file, called from split_and_strip_files below
# A working 'file' (one which works on the target architecture)
#
- # The elftype is a bit pattern (explained in split_and_strip_files) to tell
+ # The elftype is a bit pattern (explained in is_elf below) to tell
# us what type of file we're processing...
# 4 - executable
# 8 - shared library
# 16 - kernel module
- import stat, subprocess
-
(file, elftype, strip) = arg
newmode = None
@@ -18,30 +24,157 @@ def runstrip(arg):
newmode = origmode | stat.S_IWRITE | stat.S_IREAD
os.chmod(file, newmode)
- extraflags = ""
-
+ stripcmd = [strip]
+ skip_strip = False
# kernel module
if elftype & 16:
- extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates"
+ if is_kernel_module_signed(file):
+ bb.debug(1, "Skip strip on signed module %s" % file)
+ skip_strip = True
+ else:
+ stripcmd.extend(["--strip-debug", "--remove-section=.comment",
+ "--remove-section=.note", "--preserve-dates"])
# .so and shared library
elif ".so" in file and elftype & 8:
- extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded"
+ stripcmd.extend(["--remove-section=.comment", "--remove-section=.note", "--strip-unneeded"])
# shared or executable:
elif elftype & 8 or elftype & 4:
- extraflags = "--remove-section=.comment --remove-section=.note"
+ stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"])
- stripcmd = "'%s' %s '%s'" % (strip, extraflags, file)
+ stripcmd.append(file)
bb.debug(1, "runstrip: %s" % stripcmd)
- try:
- output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.error("runstrip: '%s' strip command failed with %s (%s)" % (stripcmd, e.returncode, e.output))
+ if not skip_strip:
+ output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT)
if newmode:
os.chmod(file, origmode)
- return
+# Detect .ko module by searching for "vermagic=" string
+def is_kernel_module(path):
+ with open(path) as f:
+ return mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ).find(b"vermagic=") >= 0
+
+# Detect if .ko module is signed
+def is_kernel_module_signed(path):
+ with open(path, "rb") as f:
+ f.seek(-28, 2)
+ module_tail = f.read()
+ return "Module signature appended" in "".join(chr(c) for c in bytearray(module_tail))
+
+# Return type (bits):
+# 0 - not elf
+# 1 - ELF
+# 2 - stripped
+# 4 - executable
+# 8 - shared library
+# 16 - kernel module
+def is_elf(path):
+ exec_type = 0
+ result = subprocess.check_output(["file", "-b", path], stderr=subprocess.STDOUT).decode("utf-8")
+
+ if "ELF" in result:
+ exec_type |= 1
+ if "not stripped" not in result:
+ exec_type |= 2
+ if "executable" in result:
+ exec_type |= 4
+ if "shared" in result:
+ exec_type |= 8
+ if "relocatable" in result:
+ if path.endswith(".ko") and path.find("/lib/modules/") != -1 and is_kernel_module(path):
+ exec_type |= 16
+ return (path, exec_type)
+
+def is_static_lib(path):
+ if path.endswith('.a') and not os.path.islink(path):
+ with open(path, 'rb') as fh:
+ # The magic must include the first slash to avoid
+ # matching golang static libraries
+ magic = b'!<arch>\x0a/'
+ start = fh.read(len(magic))
+ return start == magic
+ return False
+
+def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripped=False):
+ """
+ Strip executable code (like executables, shared libraries) _in_place_
+ - Based on sysroot_strip in staging.bbclass
+ :param dstdir: directory in which to strip files
+ :param strip_cmd: Strip command (usually ${STRIP})
+ :param libdir: ${libdir} - strip .so files in this directory
+ :param base_libdir: ${base_libdir} - strip .so files in this directory
+ :param qa_already_stripped: Set to True if already-stripped' in ${INSANE_SKIP}
+ This is for proper logging and messages only.
+ """
+ import stat, errno, oe.path, oe.utils
+
+ elffiles = {}
+ inodes = {}
+ libdir = os.path.abspath(dstdir + os.sep + libdir)
+ base_libdir = os.path.abspath(dstdir + os.sep + base_libdir)
+ exec_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+ #
+ # First lets figure out all of the files we may have to process
+ #
+ checkelf = []
+ inodecache = {}
+ for root, dirs, files in os.walk(dstdir):
+ for f in files:
+ file = os.path.join(root, f)
+
+ try:
+ ltarget = oe.path.realpath(file, dstdir, False)
+ s = os.lstat(ltarget)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ # Skip broken symlinks
+ continue
+ if not s:
+ continue
+ # Check its an excutable
+ if s[stat.ST_MODE] & exec_mask \
+ or ((file.startswith(libdir) or file.startswith(base_libdir)) and ".so" in f) \
+ or file.endswith('.ko'):
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if os.path.islink(file):
+ continue
+
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ checkelf.append(file)
+ inodecache[file] = s.st_ino
+ results = oe.utils.multiprocess_launch(is_elf, checkelf, d)
+ for (file, elf_file) in results:
+ #elf_file = is_elf(file)
+ if elf_file & 1:
+ if elf_file & 2:
+ if qa_already_stripped:
+ bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dstdir):], pn))
+ else:
+ bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dstdir):], pn))
+ continue
+
+ if inodecache[file] in inodes:
+ os.unlink(file)
+ os.link(inodes[inodecache[file]], file)
+ else:
+ # break hardlinks so that we do not strip the original.
+ inodes[inodecache[file]] = file
+ bb.utils.break_hardlinks(file)
+ elffiles[file] = elf_file
+
+ #
+ # Now strip them (in parallel)
+ #
+ sfiles = []
+ for file in elffiles:
+ elf_file = int(elffiles[file])
+ sfiles.append((file, elf_file, strip_cmd))
+
+ oe.utils.multiprocess_launch(runstrip, sfiles, d)
def file_translate(file):
@@ -60,41 +193,63 @@ def filedeprunner(arg):
provides = {}
requires = {}
- r = re.compile(r'[<>=]+ +[^ ]*')
+ file_re = re.compile(r'\s+\d+\s(.*)')
+ dep_re = re.compile(r'\s+(\S)\s+(.*)')
+ r = re.compile(r'[<>=]+\s+\S*')
def process_deps(pipe, pkg, pkgdest, provides, requires):
- for line in pipe:
- f = line.decode("utf-8").split(" ", 1)[0].strip()
- line = line.decode("utf-8").split(" ", 1)[1].strip()
+ file = None
+ for line in pipe.split("\n"):
+
+ m = file_re.match(line)
+ if m:
+ file = m.group(1)
+ file = file.replace(pkgdest + "/" + pkg, "")
+ file = file_translate(file)
+ continue
- if line.startswith("Requires:"):
+ m = dep_re.match(line)
+ if not m or not file:
+ continue
+
+ type, dep = m.groups()
+
+ if type == 'R':
i = requires
- elif line.startswith("Provides:"):
+ elif type == 'P':
i = provides
else:
- continue
+ continue
- file = f.replace(pkgdest + "/" + pkg, "")
- file = file_translate(file)
- value = line.split(":", 1)[1].strip()
- value = r.sub(r'(\g<0>)', value)
+ if dep.startswith("python("):
+ continue
- if value.startswith("rpmlib("):
+ # Ignore all perl(VMS::...) and perl(Mac::...) dependencies. These
+ # are typically used conditionally from the Perl code, but are
+ # generated as unconditional dependencies.
+ if dep.startswith('perl(VMS::') or dep.startswith('perl(Mac::'):
continue
- if value == "python":
+
+ # Ignore perl dependencies on .pl files.
+ if dep.startswith('perl(') and dep.endswith('.pl)'):
continue
+
+ # Remove perl versions and perl module versions since they typically
+ # do not make sense when used as package versions.
+ if dep.startswith('perl') and r.search(dep):
+ dep = dep.split()[0]
+
+ # Put parentheses around any version specifications.
+ dep = r.sub(r'(\g<0>)',dep)
+
if file not in i:
i[file] = []
- i[file].append(value)
+ i[file].append(dep)
return provides, requires
- try:
- dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE)
- provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires)
- except OSError as e:
- bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e))
- raise e
+ output = subprocess.check_output(shlex.split(rpmdeps) + pkgfiles, stderr=subprocess.STDOUT).decode("utf-8")
+ provides, requires = process_deps(output, pkg, pkgdest, provides, requires)
return (pkg, provides, requires)
@@ -103,14 +258,14 @@ def read_shlib_providers(d):
import re
shlib_provider = {}
- shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
- list_re = re.compile('^(.*)\.list$')
+ shlibs_dirs = d.getVar('SHLIBSDIRS').split()
+ list_re = re.compile(r'^(.*)\.list$')
# Go from least to most specific since the last one found wins
for dir in reversed(shlibs_dirs):
bb.debug(2, "Reading shlib providers in %s" % (dir))
if not os.path.exists(dir):
continue
- for file in os.listdir(dir):
+ for file in sorted(os.listdir(dir)):
m = list_re.match(file)
if m:
dep_pkg = m.group(1)
@@ -149,6 +304,7 @@ def npm_split_package_dirs(pkgdir):
continue
pkgitems.append(pathitem)
pkgname = '-'.join(pkgitems).replace('_', '-')
+ pkgname = pkgname.replace('@', '')
pkgfile = os.path.join(root, dn, 'package.json')
data = None
if os.path.exists(pkgfile):
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
index 434b898d3d..4ff19cf09c 100644
--- a/meta/lib/oe/package_manager.py
+++ b/meta/lib/oe/package_manager.py
@@ -1,9 +1,12 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from abc import ABCMeta, abstractmethod
import os
import glob
import subprocess
import shutil
-import multiprocessing
import re
import collections
import bb
@@ -12,30 +15,25 @@ import oe.utils
import oe.path
import string
from oe.gpg_sign import get_signer
+import hashlib
+import fnmatch
# this can be used by all PM backends to create the index files in parallel
def create_index(arg):
index_cmd = arg
- try:
- bb.note("Executing '%s' ..." % index_cmd)
- result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
- return("Index creation command '%s' failed with return code %d:\n%s" %
- (e.cmd, e.returncode, e.output.decode("utf-8")))
-
+ bb.note("Executing '%s' ..." % index_cmd)
+ result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
if result:
bb.note(result)
- return None
-
-"""
-This method parse the output from the package managerand return
-a dictionary with the information of the packages. This is used
-when the packages are in deb or ipk format.
-"""
def opkg_query(cmd_output):
- verregex = re.compile(' \([=<>]* [^ )]*\)')
+ """
+ This method parse the output from the package managerand return
+ a dictionary with the information of the packages. This is used
+ when the packages are in deb or ipk format.
+ """
+ verregex = re.compile(r' \([=<>]* [^ )]*\)')
output = dict()
pkg = ""
arch = ""
@@ -90,6 +88,56 @@ def opkg_query(cmd_output):
return output
+def failed_postinsts_abort(pkgs, log_path):
+ bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
+then please place them into pkg_postinst_ontarget_${PN} ().
+Deferring to first boot via 'exit 1' is no longer supported.
+Details of the failure are in %s.""" %(pkgs, log_path))
+
+def generate_locale_archive(d, rootfs, target_arch, localedir):
+ # Pretty sure we don't need this for locale archive generation but
+ # keeping it to be safe...
+ locale_arch_options = { \
+ "arc": ["--uint32-align=4", "--little-endian"],
+ "arceb": ["--uint32-align=4", "--big-endian"],
+ "arm": ["--uint32-align=4", "--little-endian"],
+ "armeb": ["--uint32-align=4", "--big-endian"],
+ "aarch64": ["--uint32-align=4", "--little-endian"],
+ "aarch64_be": ["--uint32-align=4", "--big-endian"],
+ "sh4": ["--uint32-align=4", "--big-endian"],
+ "powerpc": ["--uint32-align=4", "--big-endian"],
+ "powerpc64": ["--uint32-align=4", "--big-endian"],
+ "mips": ["--uint32-align=4", "--big-endian"],
+ "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
+ "mips64": ["--uint32-align=4", "--big-endian"],
+ "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
+ "mipsel": ["--uint32-align=4", "--little-endian"],
+ "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
+ "mips64el": ["--uint32-align=4", "--little-endian"],
+ "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
+ "riscv64": ["--uint32-align=4", "--little-endian"],
+ "riscv32": ["--uint32-align=4", "--little-endian"],
+ "i586": ["--uint32-align=4", "--little-endian"],
+ "i686": ["--uint32-align=4", "--little-endian"],
+ "x86_64": ["--uint32-align=4", "--little-endian"]
+ }
+ if target_arch in locale_arch_options:
+ arch_options = locale_arch_options[target_arch]
+ else:
+ bb.error("locale_arch_options not found for target_arch=" + target_arch)
+ bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
+
+ # Need to set this so cross-localedef knows where the archive is
+ env = dict(os.environ)
+ env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
+
+ for name in os.listdir(localedir):
+ path = os.path.join(localedir, name)
+ if os.path.isdir(path):
+ cmd = ["cross-localedef", "--verbose"]
+ cmd += arch_options
+ cmd += ["--add-to-archive", path]
+ subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
class Indexer(object, metaclass=ABCMeta):
def __init__(self, d, deploy_dir):
@@ -102,118 +150,50 @@ class Indexer(object, metaclass=ABCMeta):
class RpmIndexer(Indexer):
- def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None):
- package_archs = collections.OrderedDict()
- target_os = collections.OrderedDict()
-
- if arch_var is not None and os_var is not None:
- package_archs['default'] = self.d.getVar(arch_var, True).split()
- package_archs['default'].reverse()
- target_os['default'] = self.d.getVar(os_var, True).strip()
- else:
- package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split()
- # arch order is reversed. This ensures the -best- match is
- # listed first!
- package_archs['default'].reverse()
- target_os['default'] = self.d.getVar("TARGET_OS", True).strip()
- multilibs = self.d.getVar('MULTILIBS', True) or ""
- for ext in multilibs.split():
- eext = ext.split(':')
- if len(eext) > 1 and eext[0] == 'multilib':
- localdata = bb.data.createCopy(self.d)
- default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1]
- default_tune = localdata.getVar(default_tune_key, False)
- if default_tune is None:
- default_tune_key = "DEFAULTTUNE_ML_" + eext[1]
- default_tune = localdata.getVar(default_tune_key, False)
- if default_tune:
- localdata.setVar("DEFAULTTUNE", default_tune)
- bb.data.update_data(localdata)
- package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS',
- True).split()
- package_archs[eext[1]].reverse()
- target_os[eext[1]] = localdata.getVar("TARGET_OS",
- True).strip()
-
- ml_prefix_list = collections.OrderedDict()
- for mlib in package_archs:
- if mlib == 'default':
- ml_prefix_list[mlib] = package_archs[mlib]
- else:
- ml_prefix_list[mlib] = list()
- for arch in package_archs[mlib]:
- if arch in ['all', 'noarch', 'any']:
- ml_prefix_list[mlib].append(arch)
- else:
- ml_prefix_list[mlib].append(mlib + "_" + arch)
-
- return (ml_prefix_list, target_os)
-
def write_index(self):
- sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
- all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
+ self.do_write_index(self.deploy_dir)
- mlb_prefix_list = self.get_ml_prefix_and_os_list()[0]
-
- archs = set()
- for item in mlb_prefix_list:
- archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item]))
-
- if len(archs) == 0:
- archs = archs.union(set(all_mlb_pkg_archs))
-
- archs = archs.union(set(sdk_pkg_archs))
-
- rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo")
- if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND', True))
+ def do_write_index(self, deploy_dir):
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
- index_cmds = []
- repomd_files = []
- rpm_dirs_found = False
- for arch in archs:
- dbpath = os.path.join(self.d.getVar('WORKDIR', True), 'rpmdb', arch)
- if os.path.exists(dbpath):
- bb.utils.remove(dbpath, True)
- arch_dir = os.path.join(self.deploy_dir, arch)
- if not os.path.isdir(arch_dir):
- continue
-
- index_cmds.append("%s --dbpath %s --update -q %s" % \
- (rpm_createrepo, dbpath, arch_dir))
- repomd_files.append(os.path.join(arch_dir, 'repodata', 'repomd.xml'))
-
- rpm_dirs_found = True
- if not rpm_dirs_found:
- bb.note("There are no packages in %s" % self.deploy_dir)
- return
-
- # Create repodata
- result = oe.utils.multiprocess_exec(index_cmds, create_index)
+ createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
+ result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
if result:
- bb.fatal('%s' % ('\n'.join(result)))
+ bb.fatal(result)
+
# Sign repomd
if signer:
- for repomd in repomd_files:
- feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE', True)
- is_ascii_sig = (feed_sig_type.upper() != "BIN")
- signer.detach_sign(repomd,
- self.d.getVar('PACKAGE_FEED_GPG_NAME', True),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True),
- armor=is_ascii_sig)
-
+ sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
+ is_ascii_sig = (sig_type.upper() != "BIN")
+ signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ armor=is_ascii_sig)
+
+class RpmSubdirIndexer(RpmIndexer):
+ def write_index(self):
+ bb.note("Generating package index for %s" %(self.deploy_dir))
+ self.do_write_index(self.deploy_dir)
+ for entry in os.walk(self.deploy_dir):
+ if os.path.samefile(self.deploy_dir, entry[0]):
+ for dir in entry[1]:
+ if dir != 'repodata':
+ dir_path = oe.path.join(self.deploy_dir, dir)
+ bb.note("Generating package index for %s" %(dir_path))
+ self.do_write_index(dir_path)
class OpkgIndexer(Indexer):
def write_index(self):
arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
"SDK_PACKAGE_ARCHS",
- "MULTILIB_ARCHS"]
+ ]
opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
- if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND', True))
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
@@ -223,7 +203,7 @@ class OpkgIndexer(Indexer):
index_cmds = set()
index_sign_files = set()
for arch_var in arch_vars:
- archs = self.d.getVar(arch_var, True)
+ archs = self.d.getVar(arch_var)
if archs is None:
continue
@@ -237,7 +217,7 @@ class OpkgIndexer(Indexer):
if not os.path.exists(pkgs_file):
open(pkgs_file, "w").close()
- index_cmds.add('%s -r %s -p %s -m %s' %
+ index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s' %
(opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
index_sign_files.add(pkgs_file)
@@ -246,17 +226,15 @@ class OpkgIndexer(Indexer):
bb.note("There are no packages in %s!" % self.deploy_dir)
return
- result = oe.utils.multiprocess_exec(index_cmds, create_index)
- if result:
- bb.fatal('%s' % ('\n'.join(result)))
+ oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
if signer:
- feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE', True)
+ feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
is_ascii_sig = (feed_sig_type.upper() != "BIN")
for f in index_sign_files:
signer.detach_sign(f,
- self.d.getVar('PACKAGE_FEED_GPG_NAME', True),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True),
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)
@@ -278,8 +256,8 @@ class DpkgIndexer(Indexer):
with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
"apt", "apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
- line = re.sub("#ROOTFS#", "/dev/null", line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+ line = re.sub(r"#ROOTFS#", "/dev/null", line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
def write_index(self):
@@ -290,16 +268,16 @@ class DpkgIndexer(Indexer):
os.environ['APT_CONFIG'] = self.apt_conf_file
- pkg_archs = self.d.getVar('PACKAGE_ARCHS', True)
+ pkg_archs = self.d.getVar('PACKAGE_ARCHS')
if pkg_archs is not None:
arch_list = pkg_archs.split()
- sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True)
+ sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
if sdk_pkg_archs is not None:
for a in sdk_pkg_archs.split():
if a not in pkg_archs:
arch_list.append(a)
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").split()
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
@@ -314,13 +292,13 @@ class DpkgIndexer(Indexer):
cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
- cmd += "%s -fc Packages > Packages.gz;" % gzip
+ cmd += "%s -fcn Packages > Packages.gz;" % gzip
with open(os.path.join(arch_dir, "Release"), "w+") as release:
release.write("Label: %s\n" % arch)
cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
-
+
index_cmds.append(cmd)
deb_dirs_found = True
@@ -329,10 +307,8 @@ class DpkgIndexer(Indexer):
bb.note("There are no packages in %s" % self.deploy_dir)
return
- result = oe.utils.multiprocess_exec(index_cmds, create_index)
- if result:
- bb.fatal('%s' % ('\n'.join(result)))
- if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+ oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
raise NotImplementedError('Package feed signing not implementd for dpkg')
@@ -346,119 +322,9 @@ class PkgsList(object, metaclass=ABCMeta):
def list_pkgs(self):
pass
-
class RpmPkgsList(PkgsList):
- def __init__(self, d, rootfs_dir, arch_var=None, os_var=None):
- super(RpmPkgsList, self).__init__(d, rootfs_dir)
-
- self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm')
-
- self.ml_prefix_list, self.ml_os_list = \
- RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var)
-
- # Determine rpm version
- cmd = "%s --version" % self.rpm_cmd
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Getting rpm version failed. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- '''
- Translate the RPM/Smart format names to the OE multilib format names
- '''
- def _pkg_translate_smart_to_oe(self, pkg, arch):
- new_pkg = pkg
- new_arch = arch
- fixed_arch = arch.replace('_', '-')
- found = 0
- for mlib in self.ml_prefix_list:
- for cmp_arch in self.ml_prefix_list[mlib]:
- fixed_cmp_arch = cmp_arch.replace('_', '-')
- if fixed_arch == fixed_cmp_arch:
- if mlib == 'default':
- new_pkg = pkg
- new_arch = cmp_arch
- else:
- new_pkg = mlib + '-' + pkg
- # We need to strip off the ${mlib}_ prefix on the arch
- new_arch = cmp_arch.replace(mlib + '_', '')
-
- # Workaround for bug 3565. Simply look to see if we
- # know of a package with that name, if not try again!
- filename = os.path.join(self.d.getVar('PKGDATA_DIR', True),
- 'runtime-reverse',
- new_pkg)
- if os.path.exists(filename):
- found = 1
- break
-
- if found == 1 and fixed_arch == fixed_cmp_arch:
- break
- #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch))
- return new_pkg, new_arch
-
- def _list_pkg_deps(self):
- cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"),
- "-t", self.image_rpmlib]
-
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get the package dependencies. Command '%s' "
- "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- return output
-
def list_pkgs(self):
- cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir
- cmd += ' -D "_dbpath /var/lib/rpm" -qa'
- cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'"
-
- try:
- # bb.note(cmd)
- tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get the installed packages list. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- output = dict()
- deps = dict()
- dependencies = self._list_pkg_deps()
-
- # Populate deps dictionary for better manipulation
- for line in dependencies.splitlines():
- try:
- pkg, dep = line.split("|")
- if not pkg in deps:
- deps[pkg] = list()
- if not dep in deps[pkg]:
- deps[pkg].append(dep)
- except:
- # Ignore any other lines they're debug or errors
- pass
-
- for line in tmp_output.split('\n'):
- if len(line.strip()) == 0:
- continue
- pkg = line.split()[0]
- arch = line.split()[1]
- ver = line.split()[2]
- dep = deps.get(pkg, [])
-
- # Skip GPG keys
- if pkg == 'gpg-pubkey':
- continue
-
- pkgorigin = line.split()[3]
- new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch)
-
- output[new_pkg] = {"arch":new_arch, "ver":ver,
- "filename":pkgorigin, "deps":dep}
-
- return output
-
+ return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed()
class OpkgPkgsList(PkgsList):
def __init__(self, d, rootfs_dir, config_file):
@@ -466,7 +332,7 @@ class OpkgPkgsList(PkgsList):
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
- self.opkg_args += self.d.getVar("OPKG_ARGS", True)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
def list_pkgs(self, format=None):
cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
@@ -510,42 +376,129 @@ class PackageManager(object, metaclass=ABCMeta):
This is an abstract class. Do not instantiate this directly.
"""
- def __init__(self, d):
+ def __init__(self, d, target_rootfs):
self.d = d
+ self.target_rootfs = target_rootfs
self.deploy_dir = None
self.deploy_lock = None
- self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or ""
- self.feed_base_paths = self.d.getVar('PACKAGE_FEED_BASE_PATHS', True) or ""
- self.feed_archs = self.d.getVar('PACKAGE_FEED_ARCHS', True)
+ self._initialize_intercepts()
+
+ def _initialize_intercepts(self):
+ bb.note("Initializing intercept dir for %s" % self.target_rootfs)
+ # As there might be more than one instance of PackageManager operating at the same time
+ # we need to isolate the intercept_scripts directories from each other,
+ # hence the ugly hash digest in dir name.
+ self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
+ (hashlib.sha256(self.target_rootfs.encode()).hexdigest()))
+
+ postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
+ if not postinst_intercepts:
+ postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
+ if not postinst_intercepts_path:
+ postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+ postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)
+
+ bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts))
+ bb.utils.remove(self.intercepts_dir, True)
+ bb.utils.mkdirhier(self.intercepts_dir)
+ for intercept in postinst_intercepts:
+ bb.utils.copyfile(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
+
+ @abstractmethod
+ def _handle_intercept_failure(self, failed_script):
+ pass
+
+ def _postpone_to_first_boot(self, postinst_intercept_hook):
+ with open(postinst_intercept_hook) as intercept:
+ registered_pkgs = None
+ for line in intercept.read().split("\n"):
+ m = re.match(r"^##PKGS:(.*)", line)
+ if m is not None:
+ registered_pkgs = m.group(1).strip()
+ break
+
+ if registered_pkgs is not None:
+ bb.note("If an image is being built, the postinstalls for the following packages "
+ "will be postponed for first boot: %s" %
+ registered_pkgs)
+
+ # call the backend dependent handler
+ self._handle_intercept_failure(registered_pkgs)
+
+
+ def run_intercepts(self, populate_sdk=None):
+ intercepts_dir = self.intercepts_dir
+
+ bb.note("Running intercept scripts:")
+ os.environ['D'] = self.target_rootfs
+ os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
+ for script in os.listdir(intercepts_dir):
+ script_full = os.path.join(intercepts_dir, script)
+
+ if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
+ continue
+
+ # we do not want to run any multilib variant of this
+ if script.startswith("delay_to_first_boot"):
+ self._postpone_to_first_boot(script_full)
+ continue
+
+ if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ continue
+
+ bb.note("> Executing %s intercept ..." % script)
+
+ try:
+ output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
+ if output: bb.note(output.decode("utf-8"))
+ except subprocess.CalledProcessError as e:
+ bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
+ if populate_sdk == 'host':
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ elif populate_sdk == 'target':
+ if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ else:
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ else:
+ if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ self._postpone_to_first_boot(script_full)
+ else:
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
- """
- Update the package manager package database.
- """
@abstractmethod
def update(self):
+ """
+ Update the package manager package database.
+ """
pass
- """
- Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
- True, installation failures are ignored.
- """
@abstractmethod
def install(self, pkgs, attempt_only=False):
+ """
+ Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
+ True, installation failures are ignored.
+ """
pass
- """
- Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
- is False, the any dependencies are left in place.
- """
@abstractmethod
def remove(self, pkgs, with_dependencies=True):
+ """
+ Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
+ is False, then any dependencies are left in place.
+ """
pass
- """
- This function creates the index files
- """
@abstractmethod
def write_index(self):
+ """
+ This function creates the index files
+ """
pass
@abstractmethod
@@ -557,31 +510,59 @@ class PackageManager(object, metaclass=ABCMeta):
pass
@abstractmethod
- def insert_feeds_uris(self):
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+ Deleting the tmpdir is responsability of the caller.
+ """
pass
- """
- Install complementary packages based upon the list of currently installed
- packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
- these packages, if they don't exist then no error will occur. Note: every
- backend needs to call this function explicitly after the normal package
- installation
- """
- def install_complementary(self, globs=None):
- # we need to write the list of installed packages to a file because the
- # oe-pkgdata-util reads it from a file
- installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True),
- "installed_pkgs.txt")
- with open(installed_pkgs_file, "w+") as installed_pkgs:
- pkgs = self.list_installed()
- output = oe.utils.format_pkg_list(pkgs, "arch")
- installed_pkgs.write(output)
+ @abstractmethod
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ """
+ Add remote package feeds into repository manager configuration. The parameters
+ for the feeds are set by feed_uris, feed_base_paths and feed_archs.
+ See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
+ for their description.
+ """
+ pass
+
+ def install_glob(self, globs, sdk=False):
+ """
+ Install all packages that match a glob.
+ """
+ # TODO don't have sdk here but have a property on the superclass
+ # (and respect in install_complementary)
+ if sdk:
+ pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}")
+ else:
+ pkgdatadir = self.d.getVar("PKGDATA_DIR")
+
+ try:
+ bb.note("Installing globbed packages...")
+ cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
+ pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+ self.install(pkgs.split(), attempt_only=True)
+ except subprocess.CalledProcessError as e:
+ # Return code 1 means no packages matched
+ if e.returncode != 1:
+ bb.fatal("Could not compute globbed packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ def install_complementary(self, globs=None):
+ """
+ Install complementary packages based upon the list of currently installed
+ packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
+ these packages, if they don't exist then no error will occur. Note: every
+ backend needs to call this function explicitly after the normal package
+ installation
+ """
if globs is None:
- globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True)
+ globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
split_linguas = set()
- for translation in self.d.getVar('IMAGE_LINGUAS', True).split():
+ for translation in self.d.getVar('IMAGE_LINGUAS').split():
split_linguas.add(translation)
split_linguas.add(translation.split('-')[0])
@@ -589,26 +570,42 @@ class PackageManager(object, metaclass=ABCMeta):
for lang in split_linguas:
globs += " *-locale-%s" % lang
+ for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
+ globs += (" " + complementary_linguas) % lang
if globs is None:
return
- cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
- "-p", self.d.getVar('PKGDATA_DIR', True), "glob", installed_pkgs_file,
- globs]
- exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True)
- if exclude:
- cmd.extend(['-x', exclude])
- try:
- bb.note("Installing complementary packages ...")
- bb.note('Running %s' % cmd)
- complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not compute complementary packages list. Command "
- "'%s' returned %d:\n%s" %
- (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- self.install(complementary_pkgs.split(), attempt_only=True)
- os.remove(installed_pkgs_file)
+ # we need to write the list of installed packages to a file because the
+ # oe-pkgdata-util reads it from a file
+ with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
+ pkgs = self.list_installed()
+ output = oe.utils.format_pkg_list(pkgs, "arch")
+ installed_pkgs.write(output)
+ installed_pkgs.flush()
+
+ cmd = ["oe-pkgdata-util",
+ "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
+ globs]
+ exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
+ if exclude:
+ cmd.extend(['--exclude=' + '|'.join(exclude.split())])
+ try:
+ bb.note("Installing complementary packages ...")
+ bb.note('Running %s' % cmd)
+ complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+ self.install(complementary_pkgs.split(), attempt_only=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not compute complementary packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ target_arch = self.d.getVar('TARGET_ARCH')
+ localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
+ if os.path.exists(localedir) and os.listdir(localedir):
+ generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
def deploy_dir_lock(self):
if self.deploy_dir is None:
@@ -626,13 +623,13 @@ class PackageManager(object, metaclass=ABCMeta):
self.deploy_lock = None
- """
- Construct URIs based on the following pattern: uri/base_path where 'uri'
- and 'base_path' correspond to each element of the corresponding array
- argument leading to len(uris) x len(base_paths) elements on the returned
- array
- """
def construct_uris(self, uris, base_paths):
+ """
+ Construct URIs based on the following pattern: uri/base_path where 'uri'
+ and 'base_path' correspond to each element of the corresponding array
+ argument leading to len(uris) x len(base_paths) elements on the returned
+ array
+ """
def _append(arr1, arr2, sep='/'):
res = []
narr1 = [a.rstrip(sep) for a in arr1]
@@ -646,837 +643,416 @@ class PackageManager(object, metaclass=ABCMeta):
return res
return _append(uris, base_paths)
+def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies):
+ """
+ Go through our do_package_write_X dependencies and hardlink the packages we depend
+ upon into the repo directory. This prevents us seeing other packages that may
+ have been built that we don't depend upon and also packages for architectures we don't
+ support.
+ """
+ import errno
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ pn = d.getVar("PN")
+ seendirs = set()
+ multilibs = {}
+
+ bb.utils.remove(subrepo_dir, recurse=True)
+ bb.utils.mkdirhier(subrepo_dir)
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps or not filterbydependencies:
+ oe.path.symlink(deploydir, subrepo_dir, True)
+ return
+
+ start = None
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+ pkgdeps = set()
+ start = [start]
+ seen = set(start)
+ # Support direct dependencies (do_rootfs -> do_package_write_X)
+ # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
+ while start:
+ next = []
+ for dep2 in start:
+ for dep in taskdepdata[dep2][3]:
+ if taskdepdata[dep][0] != pn:
+ if "do_" + taskname in dep:
+ pkgdeps.add(dep)
+ elif dep not in seen:
+ next.append(dep)
+ seen.add(dep)
+ start = next
+
+ for dep in pkgdeps:
+ c = taskdepdata[dep][0]
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
+ if not manifest:
+ bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
+ if not os.path.exists(manifest):
+ continue
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ deploydir = os.path.normpath(deploydir)
+ if bb.data.inherits_class('packagefeed-stability', d):
+ dest = l.replace(deploydir + "-prediff", "")
+ else:
+ dest = l.replace(deploydir, "")
+ dest = subrepo_dir + dest
+ if l.endswith("/"):
+ if dest not in seendirs:
+ bb.utils.mkdirhier(dest)
+ seendirs.add(dest)
+ continue
+ # Try to hardlink the file, copy if that fails
+ destdir = os.path.dirname(dest)
+ if destdir not in seendirs:
+ bb.utils.mkdirhier(destdir)
+ seendirs.add(destdir)
+ try:
+ os.link(l, dest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(l, dest)
+ else:
+ raise
+
class RpmPM(PackageManager):
def __init__(self,
d,
target_rootfs,
target_vendor,
task_name='target',
- providename=None,
arch_var=None,
- os_var=None):
- super(RpmPM, self).__init__(d)
- self.target_rootfs = target_rootfs
+ os_var=None,
+ rpm_repo_workdir="oe-rootfs-repo",
+ filterbydependencies=True,
+ needfeed=True):
+ super(RpmPM, self).__init__(d, target_rootfs)
self.target_vendor = target_vendor
self.task_name = task_name
- self.providename = providename
- self.fullpkglist = list()
- self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True)
- self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm")
- self.install_dir_name = "oe_install"
- self.install_dir_path = os.path.join(self.target_rootfs, self.install_dir_name)
- self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart")
- # 0 = default, only warnings
- # 1 = --log-level=info (includes information about executing scriptlets and their output)
- # 2 = --log-level=debug
- # 3 = --log-level=debug plus dumps of scriplet content and command invocation
- self.debug_level = int(d.getVar('ROOTFS_RPM_DEBUG', True) or "0")
- self.smart_opt = "--log-level=%s --data-dir=%s" % \
- ("warning" if self.debug_level == 0 else
- "info" if self.debug_level == 1 else
- "debug",
- os.path.join(target_rootfs, 'var/lib/smart'))
- self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper')
+ if arch_var == None:
+ self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
+ else:
+ self.archs = self.d.getVar(arch_var).replace("-","_")
+ if task_name == "host":
+ self.primary_arch = self.d.getVar('SDK_ARCH')
+ else:
+ self.primary_arch = self.d.getVar('MACHINE_ARCH')
+
+ if needfeed:
+ self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
+ create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
+
+ self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
+ self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
self.task_name)
- self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name)
- self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm')
-
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
- packageindex_dir = os.path.join(self.d.getVar('WORKDIR', True), 'rpms')
- self.indexer = RpmIndexer(self.d, packageindex_dir)
- self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var)
+ def _configure_dnf(self):
+ # libsolv handles 'noarch' internally, we don't need to specify it explicitly
+ archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
+ # This prevents accidental matching against libsolv's built-in policies
+ if len(archs) <= 1:
+ archs = archs + ["bogusarch"]
+ confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
+ bb.utils.mkdirhier(confdir)
+ open(confdir + "arch", 'w').write(":".join(archs))
+ distro_codename = self.d.getVar('DISTRO_CODENAME')
+ open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
+
+ open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
+
+
+ def _configure_rpm(self):
+ # We need to configure rpm to use our primary package architecture as the installation architecture,
+ # and to make it compatible with other package architectures that we use.
+ # Otherwise it will refuse to proceed with packages installation.
+ platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
+ rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
+ bb.utils.mkdirhier(platformconfdir)
+ open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
+ with open(rpmrcconfdir + "rpmrc", 'w') as f:
+ f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
+ f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
+
+ open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
+ if self.d.getVar('RPM_PREFER_ELF_ARCH'):
+ open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
+ else:
+ open(platformconfdir + "macros", 'a').write("%_prefer_color 7")
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
+ signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
+ pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
+ signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
+ rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
+ cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Importing GPG key failed. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var)
+ def create_configs(self):
+ self._configure_dnf()
+ self._configure_rpm()
- def insert_feeds_uris(self):
- if self.feed_uris == "":
- return
+ def write_index(self):
+ lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
+ lf = bb.utils.lockfile(lockfilename, False)
+ RpmIndexer(self.d, self.rpm_repo_dir).write_index()
+ bb.utils.unlockfile(lf)
- arch_list = []
- if self.feed_archs is not None:
- # User define feed architectures
- arch_list = self.feed_archs.split()
- else:
- # List must be prefered to least preferred order
- default_platform_extra = list()
- platform_extra = list()
- bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
- for mlib in self.ml_os_list:
- for arch in self.ml_prefix_list[mlib]:
- plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
- if mlib == bbextendvariant:
- if plt not in default_platform_extra:
- default_platform_extra.append(plt)
- else:
- if plt not in platform_extra:
- platform_extra.append(plt)
- platform_extra = default_platform_extra + platform_extra
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ from urllib.parse import urlparse
- for canonical_arch in platform_extra:
- arch = canonical_arch.split('-')[0]
- if not os.path.exists(os.path.join(self.deploy_dir, arch)):
- continue
- arch_list.append(arch)
+ if feed_uris == "":
+ return
- feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
-
- uri_iterator = 0
- channel_priority = 10 + 5 * len(feed_uris) * (len(arch_list) if arch_list else 1)
-
- for uri in feed_uris:
- if arch_list:
- for arch in arch_list:
- bb.note('Adding Smart channel url%d%s (%s)' %
- (uri_iterator, arch, channel_priority))
- self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/%s -y'
- % (uri_iterator, arch, uri, arch))
- self._invoke_smart('channel --set url%d-%s priority=%d' %
- (uri_iterator, arch, channel_priority))
- channel_priority -= 5
+ gpg_opts = ''
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ gpg_opts += 'repo_gpgcheck=1\n'
+ gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
+ gpg_opts += 'gpgcheck=0\n'
+
+ bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
+ remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ for uri in remote_uris:
+ repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
+ if feed_archs is not None:
+ for arch in feed_archs.split():
+ repo_uri = uri + "/" + arch
+ repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
+ open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
+ "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
else:
- bb.note('Adding Smart channel url%d (%s)' %
- (uri_iterator, channel_priority))
- self._invoke_smart('channel --add url%d type=rpm-md baseurl=%s -y'
- % (uri_iterator, uri))
- self._invoke_smart('channel --set url%d priority=%d' %
- (uri_iterator, channel_priority))
- channel_priority -= 5
-
- uri_iterator += 1
-
- '''
- Create configs for rpm and smart, and multilib is supported
- '''
- def create_configs(self):
- target_arch = self.d.getVar('TARGET_ARCH', True)
- platform = '%s%s-%s' % (target_arch.replace('-', '_'),
- self.target_vendor,
- self.ml_os_list['default'])
-
- # List must be prefered to least preferred order
- default_platform_extra = list()
- platform_extra = list()
- bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
- for mlib in self.ml_os_list:
- for arch in self.ml_prefix_list[mlib]:
- plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
- if mlib == bbextendvariant:
- if plt not in default_platform_extra:
- default_platform_extra.append(plt)
- else:
- if plt not in platform_extra:
- platform_extra.append(plt)
- platform_extra = default_platform_extra + platform_extra
-
- self._create_configs(platform, platform_extra)
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
+ repo_uri = uri
+ open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
+ "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
- def _invoke_smart(self, args):
- cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args)
- # bb.note(cmd)
- try:
- complementary_pkgs = subprocess.check_output(cmd,
- stderr=subprocess.STDOUT,
- shell=True).decode("utf-8")
- # bb.note(complementary_pkgs)
- return complementary_pkgs
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not invoke smart. Command "
- "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- def _search_pkg_name_in_feeds(self, pkg, feed_archs):
- for arch in feed_archs:
- arch = arch.replace('-', '_')
- regex_match = re.compile(r"^%s-[^-]*-[^-]*@%s$" % \
- (re.escape(pkg), re.escape(arch)))
- for p in self.fullpkglist:
- if regex_match.match(p) is not None:
- # First found is best match
- # bb.note('%s -> %s' % (pkg, pkg + '@' + arch))
- return pkg + '@' + arch
-
- # Search provides if not found by pkgname.
- bb.note('Not found %s by name, searching provides ...' % pkg)
- cmd = "%s %s query --provides %s --show-format='$name-$version'" % \
- (self.smart_cmd, self.smart_opt, pkg)
- cmd += " | sed -ne 's/ *Provides://p'"
- bb.note('cmd: %s' % cmd)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- # Found a provider
- if output:
- bb.note('Found providers for %s: %s' % (pkg, output))
- for p in output.split():
- for arch in feed_archs:
- arch = arch.replace('-', '_')
- if p.rstrip().endswith('@' + arch):
- return p
-
- return ""
-
- '''
- Translate the OE multilib format names to the RPM/Smart format names
- It searched the RPM/Smart format names in probable multilib feeds first,
- and then searched the default base feed.
- '''
- def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False):
- new_pkgs = list()
-
- for pkg in pkgs:
- new_pkg = pkg
- # Search new_pkg in probable multilibs first
- for mlib in self.ml_prefix_list:
- # Jump the default archs
- if mlib == 'default':
- continue
-
- subst = pkg.replace(mlib + '-', '')
- # if the pkg in this multilib feed
- if subst != pkg:
- feed_archs = self.ml_prefix_list[mlib]
- new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs)
- if not new_pkg:
- # Failed to translate, package not found!
- err_msg = '%s not found in the %s feeds (%s) in %s.' % \
- (pkg, mlib, " ".join(feed_archs), self.d.getVar('DEPLOY_DIR_RPM', True))
- if not attempt_only:
- bb.error(err_msg)
- bb.fatal("This is often caused by an empty package declared " \
- "in a recipe's PACKAGES variable. (Empty packages are " \
- "not constructed unless ALLOW_EMPTY_<pkg> = '1' is used.)")
- bb.warn(err_msg)
- else:
- new_pkgs.append(new_pkg)
+ def _prepare_pkg_transaction(self):
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
- break
- # Apparently not a multilib package...
- if pkg == new_pkg:
- # Search new_pkg in default archs
- default_archs = self.ml_prefix_list['default']
- new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs)
- if not new_pkg:
- err_msg = '%s not found in the feeds (%s) in %s.' % \
- (pkg, " ".join(default_archs), self.d.getVar('DEPLOY_DIR_RPM', True))
- if not attempt_only:
- bb.error(err_msg)
- bb.fatal("This is often caused by an empty package declared " \
- "in a recipe's PACKAGES variable. (Empty packages are " \
- "not constructed unless ALLOW_EMPTY_<pkg> = '1' is used.)")
- bb.warn(err_msg)
- else:
- new_pkgs.append(new_pkg)
-
- return new_pkgs
-
- def _create_configs(self, platform, platform_extra):
- # Setup base system configuration
- bb.note("configuring RPM platform settings")
-
- # Configure internal RPM environment when using Smart
- os.environ['RPM_ETCRPM'] = self.etcrpm_dir
- bb.utils.mkdirhier(self.etcrpm_dir)
-
- # Setup temporary directory -- install...
- if os.path.exists(self.install_dir_path):
- bb.utils.remove(self.install_dir_path, True)
- bb.utils.mkdirhier(os.path.join(self.install_dir_path, 'tmp'))
-
- channel_priority = 5
- platform_dir = os.path.join(self.etcrpm_dir, "platform")
- sdkos = self.d.getVar("SDK_OS", True)
- with open(platform_dir, "w+") as platform_fd:
- platform_fd.write(platform + '\n')
- for pt in platform_extra:
- channel_priority += 5
- if sdkos:
- tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt)
- tmp = re.sub("-linux.*$", "-linux.*\n", tmp)
- platform_fd.write(tmp)
-
- # Tell RPM that the "/" directory exist and is available
- bb.note("configuring RPM system provides")
- sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo")
- bb.utils.mkdirhier(sysinfo_dir)
- with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames:
- dirnames.write("/\n")
-
- if self.providename:
- providename_dir = os.path.join(sysinfo_dir, "Providename")
- if not os.path.exists(providename_dir):
- providename_content = '\n'.join(self.providename)
- providename_content += '\n'
- open(providename_dir, "w+").write(providename_content)
-
- # Configure RPM... we enforce these settings!
- bb.note("configuring RPM DB settings")
- # After change the __db.* cache size, log file will not be
- # generated automatically, that will raise some warnings,
- # so touch a bare log for rpm write into it.
- rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001')
- if not os.path.exists(rpmlib_log):
- bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log'))
- open(rpmlib_log, 'w+').close()
-
- DB_CONFIG_CONTENT = "# ================ Environment\n" \
- "set_data_dir .\n" \
- "set_create_dir .\n" \
- "set_lg_dir ./log\n" \
- "set_tmp_dir ./tmp\n" \
- "set_flags db_log_autoremove on\n" \
- "\n" \
- "# -- thread_count must be >= 8\n" \
- "set_thread_count 64\n" \
- "\n" \
- "# ================ Logging\n" \
- "\n" \
- "# ================ Memory Pool\n" \
- "set_cachesize 0 1048576 0\n" \
- "set_mp_mmapsize 268435456\n" \
- "\n" \
- "# ================ Locking\n" \
- "set_lk_max_locks 16384\n" \
- "set_lk_max_lockers 16384\n" \
- "set_lk_max_objects 16384\n" \
- "mutex_set_max 163840\n" \
- "\n" \
- "# ================ Replication\n"
-
- db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG')
- if not os.path.exists(db_config_dir):
- open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT)
-
- # Create database so that smart doesn't complain (lazy init)
- opt = "-qa"
- cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % (
- self.rpm_cmd, self.target_rootfs, opt)
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Create rpm database failed. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- # Import GPG key to RPM database of the target system
- if self.d.getVar('RPM_SIGN_PACKAGES', True) == '1':
- pubkey_path = self.d.getVar('RPM_GPG_PUBKEY', True)
- cmd = "%s --root %s --dbpath /var/lib/rpm --import %s > /dev/null" % (
- self.rpm_cmd, self.target_rootfs, pubkey_path)
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ def install(self, pkgs, attempt_only = False):
+ if len(pkgs) == 0:
+ return
+ self._prepare_pkg_transaction()
- # Configure smart
- bb.note("configuring Smart settings")
- bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
- True)
- self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs)
- self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm')
- self._invoke_smart('config --set rpm-extra-macros._var=%s' %
- self.d.getVar('localstatedir', True))
- cmd = "config --set rpm-extra-macros._tmppath=/%s/tmp" % (self.install_dir_name)
-
- prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True)
- if prefer_color:
- if prefer_color not in ['0', '1', '2', '4']:
- bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n"
- "\t1: ELF32 wins\n"
- "\t2: ELF64 wins\n"
- "\t4: ELF64 N32 wins (mips64 or mips64el only)" %
- prefer_color)
- if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \
- ['mips64', 'mips64el']:
- bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el "
- "only.")
- self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s'
- % prefer_color)
-
- self._invoke_smart(cmd)
- self._invoke_smart('config --set rpm-ignoresize=1')
-
- # Write common configuration for host and target usage
- self._invoke_smart('config --set rpm-nolinktos=1')
- self._invoke_smart('config --set rpm-noparentdirs=1')
- check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True)
- if check_signature and check_signature.strip() == "0":
- self._invoke_smart('config --set rpm-check-signatures=false')
- for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
- self._invoke_smart('flag --set ignore-recommends %s' % i)
-
- # Do the following configurations here, to avoid them being
- # saved for field upgrade
- if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1":
- self._invoke_smart('config --set ignore-all-recommends=1')
- pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
- for i in pkg_exclude.split():
- self._invoke_smart('flag --set exclude-packages %s' % i)
-
- # Optional debugging
- # self._invoke_smart('config --set rpm-log-level=debug')
- # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile'
- # self._invoke_smart(cmd)
- ch_already_added = []
- for canonical_arch in platform_extra:
- arch = canonical_arch.split('-')[0]
- arch_channel = os.path.join(self.d.getVar('WORKDIR', True), 'rpms', arch)
- oe.path.remove(arch_channel)
- deploy_arch_dir = os.path.join(self.deploy_dir, arch)
- if not os.path.exists(deploy_arch_dir):
- continue
+ bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
+ package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
+ exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
- lockfilename = self.d.getVar('DEPLOY_DIR_RPM', True) + "/rpm.lock"
- lf = bb.utils.lockfile(lockfilename, False)
- oe.path.copyhardlinktree(deploy_arch_dir, arch_channel)
- bb.utils.unlockfile(lf)
-
- if not arch in ch_already_added:
- bb.note('Adding Smart channel %s (%s)' %
- (arch, channel_priority))
- self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y'
- % (arch, arch_channel))
- self._invoke_smart('channel --set %s priority=%d' %
- (arch, channel_priority))
- channel_priority -= 5
-
- ch_already_added.append(arch)
-
- bb.note('adding Smart RPM DB channel')
- self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
-
- # Construct install scriptlet wrapper.
- # Scripts need to be ordered when executed, this ensures numeric order.
- # If we ever run into needing more the 899 scripts, we'll have to.
- # change num to start with 1000.
- #
- scriptletcmd = "$2 $1/$3 $4\n"
- scriptpath = "$1/$3"
-
- # When self.debug_level >= 3, also dump the content of the
- # executed scriptlets and how they get invoked. We have to
- # replace "exit 1" and "ERR" because printing those as-is
- # would trigger a log analysis failure.
- if self.debug_level >= 3:
- dump_invocation = 'echo "Executing ${name} ${kind} with: ' + scriptletcmd + '"\n'
- dump_script = 'cat ' + scriptpath + '| sed -e "s/exit 1/exxxit 1/g" -e "s/ERR/IRR/g"; echo\n'
- else:
- dump_invocation = 'echo "Executing ${name} ${kind}"\n'
- dump_script = ''
-
- SCRIPTLET_FORMAT = "#!/bin/bash\n" \
- "\n" \
- "export PATH=%s\n" \
- "export D=%s\n" \
- 'export OFFLINE_ROOT="$D"\n' \
- 'export IPKG_OFFLINE_ROOT="$D"\n' \
- 'export OPKG_OFFLINE_ROOT="$D"\n' \
- "export INTERCEPT_DIR=%s\n" \
- "export NATIVE_ROOT=%s\n" \
- "\n" \
- "name=`head -1 " + scriptpath + " | cut -d\' \' -f 2`\n" \
- "kind=`head -1 " + scriptpath + " | cut -d\' \' -f 4`\n" \
- + dump_invocation \
- + dump_script \
- + scriptletcmd + \
- "ret=$?\n" \
- "echo Result of ${name} ${kind}: ${ret}\n" \
- "if [ ${ret} -ne 0 ]; then\n" \
- " if [ $4 -eq 1 ]; then\n" \
- " mkdir -p $1/etc/rpm-postinsts\n" \
- " num=100\n" \
- " while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \
- ' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \
- ' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \
- " cat " + scriptpath + " >> $1/etc/rpm-postinsts/${num}-${name}\n" \
- " chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \
- ' echo "Info: deferring ${name} ${kind} install scriptlet to first boot"\n' \
- " else\n" \
- ' echo "Error: ${name} ${kind} remove scriptlet failed"\n' \
- " fi\n" \
- "fi\n"
-
- intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts')
- native_root = self.d.getVar('STAGING_DIR_NATIVE', True)
- scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'],
- self.target_rootfs,
- intercept_dir,
- native_root)
- open(self.scriptlet_wrapper, 'w+').write(scriptlet_content)
-
- bb.note("configuring RPM cross-install scriptlet_wrapper")
- os.chmod(self.scriptlet_wrapper, 0o755)
- cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \
- self.scriptlet_wrapper
- self._invoke_smart(cmd)
-
- # Debug to show smart config info
- # bb.note(self._invoke_smart('config --show'))
+ output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
+ (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
+ (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) +
+ (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
+ ["install"] +
+ pkgs)
- def update(self):
- self._invoke_smart('update rpmsys')
-
- def get_rdepends_recursively(self, pkgs):
- # pkgs will be changed during the loop, so use [:] to make a copy.
- for pkg in pkgs[:]:
- sub_data = oe.packagedata.read_subpkgdata(pkg, self.d)
- sub_rdep = sub_data.get("RDEPENDS_" + pkg)
- if not sub_rdep:
- continue
- done = list(bb.utils.explode_dep_versions2(sub_rdep).keys())
- next = done
- # Find all the rdepends on dependency chain
- while next:
- new = []
- for sub_pkg in next:
- sub_data = oe.packagedata.read_subpkgdata(sub_pkg, self.d)
- sub_pkg_rdep = sub_data.get("RDEPENDS_" + sub_pkg)
- if not sub_pkg_rdep:
- continue
- for p in bb.utils.explode_dep_versions2(sub_pkg_rdep):
- # Already handled, skip it.
- if p in done or p in pkgs:
- continue
- # It's a new dep
- if oe.packagedata.has_subpkgdata(p, self.d):
- done.append(p)
- new.append(p)
- next = new
- pkgs.extend(done)
- return pkgs
+ failed_scriptlets_pkgnames = collections.OrderedDict()
+ for line in output.splitlines():
+ if line.startswith("Error in POSTIN scriptlet in rpm package"):
+ failed_scriptlets_pkgnames[line.split()[-1]] = True
- '''
- Install pkgs with smart, the pkg name is oe format
- '''
- def install(self, pkgs, attempt_only=False):
+ if len(failed_scriptlets_pkgnames) > 0:
+ failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+ def remove(self, pkgs, with_dependencies = True):
if not pkgs:
- bb.note("There are no packages to install")
return
- bb.note("Installing the following packages: %s" % ' '.join(pkgs))
- if not attempt_only:
- # Pull in multilib requires since rpm may not pull in them
- # correctly, for example,
- # lib32-packagegroup-core-standalone-sdk-target requires
- # lib32-libc6, but rpm may pull in libc6 rather than lib32-libc6
- # since it doesn't know mlprefix (lib32-), bitbake knows it and
- # can handle it well, find out the RDEPENDS on the chain will
- # fix the problem. Both do_rootfs and do_populate_sdk have this
- # issue.
- # The attempt_only packages don't need this since they are
- # based on the installed ones.
- #
- # Separate pkgs into two lists, one is multilib, the other one
- # is non-multilib.
- ml_pkgs = []
- non_ml_pkgs = pkgs[:]
- for pkg in pkgs:
- for mlib in (self.d.getVar("MULTILIB_VARIANTS", True) or "").split():
- if pkg.startswith(mlib + '-'):
- ml_pkgs.append(pkg)
- non_ml_pkgs.remove(pkg)
-
- if len(ml_pkgs) > 0 and len(non_ml_pkgs) > 0:
- # Found both foo and lib-foo
- ml_pkgs = self.get_rdepends_recursively(ml_pkgs)
- non_ml_pkgs = self.get_rdepends_recursively(non_ml_pkgs)
- # Longer list makes smart slower, so only keep the pkgs
- # which have the same BPN, and smart can handle others
- # correctly.
- pkgs_new = []
- for pkg in non_ml_pkgs:
- for mlib in (self.d.getVar("MULTILIB_VARIANTS", True) or "").split():
- mlib_pkg = mlib + "-" + pkg
- if mlib_pkg in ml_pkgs:
- pkgs_new.append(pkg)
- pkgs_new.append(mlib_pkg)
- for pkg in pkgs:
- if pkg not in pkgs_new:
- pkgs_new.append(pkg)
- pkgs = pkgs_new
- new_depends = {}
- deps = bb.utils.explode_dep_versions2(" ".join(pkgs))
- for depend in deps:
- data = oe.packagedata.read_subpkgdata(depend, self.d)
- key = "PKG_%s" % depend
- if key in data:
- new_depend = data[key]
- else:
- new_depend = depend
- new_depends[new_depend] = deps[depend]
- pkgs = bb.utils.join_deps(new_depends, commasep=True).split(', ')
- pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only)
- if not pkgs:
- bb.note("There are no packages to install")
- return
- if not attempt_only:
- bb.note('to be installed: %s' % ' '.join(pkgs))
- cmd = "%s %s install -y %s" % \
- (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
- bb.note(cmd)
- else:
- bb.note('installing attempt only packages...')
- bb.note('Attempting %s' % ' '.join(pkgs))
- cmd = "%s %s install --attempt -y %s" % \
- (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
- try:
- output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to install packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- '''
- Remove pkgs with smart, the pkg name is smart/rpm format
- '''
- def remove(self, pkgs, with_dependencies=True):
- bb.note('to be removed: ' + ' '.join(pkgs))
-
- if not with_dependencies:
- cmd = "%s -e --nodeps " % self.rpm_cmd
- cmd += "--root=%s " % self.target_rootfs
- cmd += "--dbpath=/var/lib/rpm "
- cmd += "--define='_cross_scriptlet_wrapper %s' " % \
- self.scriptlet_wrapper
- cmd += "--define='_tmppath /%s/tmp' %s" % (self.install_dir_name, ' '.join(pkgs))
- else:
- # for pkg in pkgs:
- # bb.note('Debug: What required: %s' % pkg)
- # bb.note(self._invoke_smart('query %s --show-requiredby' % pkg))
+ self._prepare_pkg_transaction()
- cmd = "%s %s remove -y %s" % (self.smart_cmd,
- self.smart_opt,
- ' '.join(pkgs))
+ if with_dependencies:
+ self._invoke_dnf(["remove"] + pkgs)
+ else:
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
- try:
- bb.note(cmd)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.note("Unable to remove packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ try:
+ bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
+ output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
def upgrade(self):
- bb.note('smart upgrade')
- self._invoke_smart('upgrade')
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["upgrade"])
- def write_index(self):
- result = self.indexer.write_index()
-
- if result is not None:
- bb.fatal(result)
+ def autoremove(self):
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["autoremove"])
def remove_packaging_data(self):
- bb.utils.remove(self.image_rpmlib, True)
- bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
- True)
- bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True)
-
- # remove temp directory
- bb.utils.remove(self.install_dir_path, True)
+ self._invoke_dnf(["clean", "all"])
+ for dir in self.packaging_data_dirs:
+ bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
def backup_packaging_data(self):
- # Save the rpmlib for increment rpm image generation
- if os.path.exists(self.saved_rpmlib):
- bb.utils.remove(self.saved_rpmlib, True)
- shutil.copytree(self.image_rpmlib,
- self.saved_rpmlib,
- symlinks=True)
+ # Save the packaging dirs for increment rpm image generation
+ if os.path.exists(self.saved_packaging_data):
+ bb.utils.remove(self.saved_packaging_data, True)
+ for i in self.packaging_data_dirs:
+ source_dir = oe.path.join(self.target_rootfs, i)
+ target_dir = oe.path.join(self.saved_packaging_data, i)
+ if os.path.isdir(source_dir):
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+ elif os.path.isfile(source_dir):
+ shutil.copy2(source_dir, target_dir)
def recovery_packaging_data(self):
# Move the rpmlib back
- if os.path.exists(self.saved_rpmlib):
- if os.path.exists(self.image_rpmlib):
- bb.utils.remove(self.image_rpmlib, True)
-
- bb.note('Recovery packaging data')
- shutil.copytree(self.saved_rpmlib,
- self.image_rpmlib,
- symlinks=True)
+ if os.path.exists(self.saved_packaging_data):
+ for i in self.packaging_data_dirs:
+ target_dir = oe.path.join(self.target_rootfs, i)
+ if os.path.exists(target_dir):
+ bb.utils.remove(target_dir, True)
+ source_dir = oe.path.join(self.saved_packaging_data, i)
+ if os.path.isdir(source_dir):
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+ elif os.path.isfile(source_dir):
+ shutil.copy2(source_dir, target_dir)
def list_installed(self):
- return self.pkgs_list.list_pkgs()
-
- '''
- If incremental install, we need to determine what we've got,
- what we need to add, and what to remove...
- The dump_install_solution will dump and save the new install
- solution.
- '''
- def dump_install_solution(self, pkgs):
- bb.note('creating new install solution for incremental install')
- if len(pkgs) == 0:
- return
-
- pkgs = self._pkg_translate_oe_to_smart(pkgs, False)
- install_pkgs = list()
+ output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
+ print_output = False)
+ packages = {}
+ current_package = None
+ current_deps = None
+ current_state = "initial"
+ for line in output.splitlines():
+ if line.startswith("Package:"):
+ package_info = line.split(" ")[1:]
+ current_package = package_info[0]
+ package_arch = package_info[1]
+ package_version = package_info[2]
+ package_rpm = package_info[3]
+ packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
+ current_deps = []
+ elif line.startswith("Dependencies:"):
+ current_state = "dependencies"
+ elif line.startswith("Recommendations"):
+ current_state = "recommendations"
+ elif line.startswith("DependenciesEndHere:"):
+ current_state = "initial"
+ packages[current_package]["deps"] = current_deps
+ elif len(line) > 0:
+ if current_state == "dependencies":
+ current_deps.append(line)
+ elif current_state == "recommendations":
+ current_deps.append("%s [REC]" % line)
+
+ return packages
- cmd = "%s %s install -y --dump %s 2>%s" % \
- (self.smart_cmd,
- self.smart_opt,
- ' '.join(pkgs),
- self.solution_manifest)
+ def update(self):
+ self._invoke_dnf(["makecache", "--refresh"])
+
+ def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
+ os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
+
+ dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
+ standard_dnf_args = ["-v", "--rpmverbosity=info", "-y",
+ "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
+ "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
+ "--installroot=%s" % (self.target_rootfs),
+ "--setopt=logdir=%s" % (self.d.getVar('T'))
+ ]
+ if hasattr(self, "rpm_repo_dir"):
+ standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir))
+ cmd = [dnf_cmd] + standard_dnf_args + dnf_args
+ bb.note('Running %s' % ' '.join(cmd))
try:
- # Disable rpmsys channel for the fake install
- self._invoke_smart('channel --disable rpmsys')
-
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- with open(self.solution_manifest, 'r') as manifest:
- for pkg in manifest.read().split('\n'):
- if '@' in pkg:
- install_pkgs.append(pkg)
+ output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
+ if print_output:
+ bb.debug(1, output)
+ return output
except subprocess.CalledProcessError as e:
- bb.note("Unable to dump install packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- # Recovery rpmsys channel
- self._invoke_smart('channel --enable rpmsys')
- return install_pkgs
-
- '''
- If incremental install, we need to determine what we've got,
- what we need to add, and what to remove...
- The load_old_install_solution will load the previous install
- solution
- '''
- def load_old_install_solution(self):
- bb.note('load old install solution for incremental install')
- installed_pkgs = list()
- if not os.path.exists(self.solution_manifest):
- bb.note('old install solution not exist')
- return installed_pkgs
-
- with open(self.solution_manifest, 'r') as manifest:
- for pkg in manifest.read().split('\n'):
- if '@' in pkg:
- installed_pkgs.append(pkg.strip())
-
- return installed_pkgs
-
- '''
- Dump all available packages in feeds, it should be invoked after the
- newest rpm index was created
- '''
- def dump_all_available_pkgs(self):
- available_manifest = self.d.expand('${T}/saved/available_pkgs.txt')
- available_pkgs = list()
- cmd = "%s %s query --output %s" % \
- (self.smart_cmd, self.smart_opt, available_manifest)
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- with open(available_manifest, 'r') as manifest:
- for pkg in manifest.read().split('\n'):
- if '@' in pkg:
- available_pkgs.append(pkg.strip())
- except subprocess.CalledProcessError as e:
- bb.note("Unable to list all available packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ if print_output:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ else:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:" % (' '.join(cmd), e.returncode))
+ return e.output.decode("utf-8")
- self.fullpkglist = available_pkgs
+ def dump_install_solution(self, pkgs):
+ open(self.solution_manifest, 'w').write(" ".join(pkgs))
+ return pkgs
- return
+ def load_old_install_solution(self):
+ if not os.path.exists(self.solution_manifest):
+ return []
+ with open(self.solution_manifest, 'r') as fd:
+ return fd.read().split()
+
+ def _script_num_prefix(self, path):
+ files = os.listdir(path)
+ numbers = set()
+ numbers.add(99)
+ for f in files:
+ numbers.add(int(f.split("-")[0]))
+ return max(numbers) + 1
def save_rpmpostinst(self, pkg):
- mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS', False) or "").split()
-
- new_pkg = pkg
- # Remove any multilib prefix from the package name
- for mlib in mlibs:
- if mlib in pkg:
- new_pkg = pkg.replace(mlib + '-', '')
- break
-
- bb.note(' * postponing %s' % new_pkg)
- saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg
-
- cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs
- cmd += ' --dbpath=/var/lib/rpm ' + new_pkg
- cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"'
- cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"'
- cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir
+ bb.note("Saving postinstall script of %s" % (pkg))
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
try:
- bb.note(cmd)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip().decode("utf-8")
- bb.note(output)
- os.chmod(saved_dir, 0o755)
- except subprocess.CalledProcessError as e:
- bb.fatal("Invoke save_rpmpostinst failed. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- '''Write common configuration for target usage'''
- def rpm_setup_smart_target_config(self):
- bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
- True)
-
- self._invoke_smart('config --set rpm-nolinktos=1')
- self._invoke_smart('config --set rpm-noparentdirs=1')
- for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
- self._invoke_smart('flag --set ignore-recommends %s' % i)
- self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
-
- '''
- The rpm db lock files were produced after invoking rpm to query on
- build system, and they caused the rpm on target didn't work, so we
- need to unlock the rpm db by removing the lock files.
- '''
- def unlock_rpm_db(self):
- # Remove rpm db lock files
- rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs)
- for f in rpm_db_locks:
- bb.utils.remove(f, True)
-
- """
- Returns a dictionary with the package info.
- """
- def package_info(self, pkg):
- cmd = "%s %s info --urls %s" % (self.smart_cmd, self.smart_opt, pkg)
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
- bb.fatal("Unable to list available packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
- # Set default values to avoid UnboundLocalError
- arch = ""
- ver = ""
- filename = ""
+ # may need to prepend #!/bin/sh to output
- #Parse output
- for line in output.splitlines():
- line = line.rstrip()
- if line.startswith("Name:"):
- pkg = line.split(": ")[1]
- elif line.startswith("Version:"):
- tmp_str = line.split(": ")[1]
- ver, arch = tmp_str.split("@")
- break
-
- # Get filename
- index = re.search("^URLs", output, re.MULTILINE)
- tmp_str = output[index.end():]
- for line in tmp_str.splitlines():
- if "/" in line:
- line = line.lstrip()
- filename = line.split(" ")[0]
- break
-
- # To have the same data type than other package_info methods
- filepath = os.path.join(self.deploy_dir, arch, filename)
- pkg_dict = {}
- pkg_dict[pkg] = {"arch":arch, "ver":ver, "filename":filename,
- "filepath": filepath}
-
- return pkg_dict
+ target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
+ bb.utils.mkdirhier(target_path)
+ num = self._script_num_prefix(target_path)
+ saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
+ open(saved_script_name, 'w').write(output)
+ os.chmod(saved_script_name, 0o755)
- """
- Returns the path to a tmpdir where resides the contents of a package.
+ def _handle_intercept_failure(self, registered_pkgs):
+ rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
+ bb.utils.mkdirhier(rpm_postinsts_dir)
- Deleting the tmpdir is responsability of the caller.
+ # Save the package postinstalls in /etc/rpm-postinsts
+ for pkg in registered_pkgs.split():
+ self.save_rpmpostinst(pkg)
- """
def extract(self, pkg):
- pkg_info = self.package_info(pkg)
- if not pkg_info:
- bb.fatal("Unable to get information for package '%s' while "
- "trying to extract the package." % pkg)
-
- pkg_path = pkg_info[pkg]["filepath"]
+ output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
+ pkg_name = output.splitlines()[-1]
+ if not pkg_name.endswith(".rpm"):
+ bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
+ pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
@@ -1508,18 +1084,18 @@ class RpmPM(PackageManager):
class OpkgDpkgPM(PackageManager):
- """
- This is an abstract class. Do not instantiate this directly.
- """
- def __init__(self, d):
- super(OpkgDpkgPM, self).__init__(d)
+ def __init__(self, d, target_rootfs):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ super(OpkgDpkgPM, self).__init__(d, target_rootfs)
- """
- Returns a dictionary with the package info.
-
- This method extracts the common parts for Opkg and Dpkg
- """
def package_info(self, pkg, cmd):
+ """
+ Returns a dictionary with the package info.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
@@ -1528,14 +1104,14 @@ class OpkgDpkgPM(PackageManager):
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
return opkg_query(output)
- """
- Returns the path to a tmpdir where resides the contents of a package.
+ def extract(self, pkg, pkg_info):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
- Deleting the tmpdir is responsability of the caller.
+ Deleting the tmpdir is responsability of the caller.
- This method extracts the common parts for Opkg and Dpkg
- """
- def extract(self, pkg, pkg_info):
+ This method extracts the common parts for Opkg and Dpkg
+ """
ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
@@ -1548,20 +1124,21 @@ class OpkgDpkgPM(PackageManager):
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
+ data_tar = 'data.tar.xz'
try:
- cmd = "%s x %s" % (ar_cmd, pkg_path)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- cmd = "%s xf data.tar.*" % tar_cmd
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ cmd = [ar_cmd, 'x', pkg_path]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ cmd = [tar_cmd, 'xf', data_tar]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
+ "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
except OSError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
+ "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
@@ -1570,23 +1147,27 @@ class OpkgDpkgPM(PackageManager):
return tmp_dir
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.mark_packages("unpacked", registered_pkgs.split())
class OpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
- super(OpkgPM, self).__init__(d)
+ def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True):
+ super(OpkgPM, self).__init__(d, target_rootfs)
- self.target_rootfs = target_rootfs
self.config_file = config_file
self.pkg_archs = archs
self.task_name = task_name
- self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True)
+ self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir)
self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
- self.opkg_args += self.d.getVar("OPKG_ARGS", True)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
+ if prepare_index:
+ create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies)
+
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
if opkg_lib_dir[0] == "/":
opkg_lib_dir = opkg_lib_dir[1:]
@@ -1598,7 +1179,7 @@ class OpkgPM(OpkgDpkgPM):
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
- self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") == "1"
+ self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
if self.from_feeds:
self._create_custom_config()
else:
@@ -1606,12 +1187,12 @@ class OpkgPM(OpkgDpkgPM):
self.indexer = OpkgIndexer(self.d, self.deploy_dir)
- """
- This function will change a package's status in /var/lib/opkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/opkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
status_file = os.path.join(self.opkg_dir, "status")
with open(status_file, "r") as sf:
@@ -1643,8 +1224,8 @@ class OpkgPM(OpkgDpkgPM):
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
- for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split():
- feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
+ for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
+ feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
if feed_match is not None:
feed_name = feed_match.group(1)
@@ -1660,27 +1241,29 @@ class OpkgPM(OpkgDpkgPM):
specified as compatible for the current machine.
NOTE: Development-helper feature, NOT a full-fledged feed.
"""
- if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "":
+ if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
for arch in self.pkg_archs.split():
cfg_file_name = os.path.join(self.target_rootfs,
- self.d.getVar("sysconfdir", True),
+ self.d.getVar("sysconfdir"),
"opkg",
"local-%s-feed.conf" % arch)
with open(cfg_file_name, "w+") as cfg_file:
cfg_file.write("src/gz local-%s %s/%s" %
(arch,
- self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True),
+ self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
arch))
- if self.opkg_dir != '/var/lib/opkg':
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
# There is no command line option for this anymore, we need to add
# info_dir and status_file to config file, if OPKGLIBDIR doesn't have
# the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
- cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info'))
- cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status'))
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
def _create_config(self):
@@ -1698,31 +1281,35 @@ class OpkgPM(OpkgDpkgPM):
config_file.write("src oe-%s file:%s\n" %
(arch, pkgs_dir))
- if self.opkg_dir != '/var/lib/opkg':
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
# There is no command line option for this anymore, we need to add
# info_dir and status_file to config file, if OPKGLIBDIR doesn't have
# the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
- config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info'))
- config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status'))
-
- def insert_feeds_uris(self):
- if self.feed_uris == "":
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
return
rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
% self.target_rootfs)
- feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
- archs = self.pkg_archs.split() if self.feed_archs is None else self.feed_archs.split()
+ os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True)
+
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
with open(rootfs_config, "w+") as config_file:
uri_iterator = 0
for uri in feed_uris:
if archs:
for arch in archs:
- if (self.feed_archs is None) and (not os.path.exists(os.path.join(self.deploy_dir, arch))):
+ if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
continue
bb.note('Adding opkg feed url-%s-%d (%s)' %
(arch, uri_iterator, uri))
@@ -1754,29 +1341,44 @@ class OpkgPM(OpkgDpkgPM):
if not pkgs:
return
- cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+ cmd = "%s %s" % (self.opkg_cmd, self.opkg_args)
+ for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split():
+ cmd += " --add-exclude %s" % exclude
+ for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split():
+ cmd += " --add-ignore-recommends %s" % bad_recommendation
+ cmd += " install "
+ cmd += " ".join(pkgs)
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
- "intercept_scripts")
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
+ failed_pkgs = []
+ for line in output.split('\n'):
+ if line.endswith("configuration required on target."):
+ bb.warn(line)
+ failed_pkgs.append(line.split(".")[0])
+ if failed_pkgs:
+ failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
except subprocess.CalledProcessError as e:
- (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output.decode("utf-8")))
def remove(self, pkgs, with_dependencies=True):
+ if not pkgs:
+ return
+
if with_dependencies:
- cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \
+ cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
else:
cmd = "%s %s --force-depends remove %s" % \
@@ -1812,59 +1414,23 @@ class OpkgPM(OpkgDpkgPM):
def list_installed(self):
return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
- def handle_bad_recommendations(self):
- bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or ""
- if bad_recommendations.strip() == "":
- return
-
- status_file = os.path.join(self.opkg_dir, "status")
-
- # If status file existed, it means the bad recommendations has already
- # been handled
- if os.path.exists(status_file):
- return
-
- cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
-
- with open(status_file, "w+") as status:
- for pkg in bad_recommendations.split():
- pkg_info = cmd + pkg
-
- try:
- output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get package info. Command '%s' "
- "returned %d:\n%s" % (pkg_info, e.returncode, e.output.decode("utf-8")))
-
- if output == "":
- bb.note("Ignored bad recommendation: '%s' is "
- "not a package" % pkg)
- continue
-
- for line in output.split('\n'):
- if line.startswith("Status:"):
- status.write("Status: deinstall hold not-installed\n")
- else:
- status.write(line + "\n")
-
- # Append a blank line after each package entry to ensure that it
- # is separated from the following entry
- status.write("\n")
-
- '''
- The following function dummy installs pkgs and returns the log of output.
- '''
def dummy_install(self, pkgs):
+ """
+ The following function dummy installs pkgs and returns the log of output.
+ """
if len(pkgs) == 0:
return
# Create an temp dir as opkg root for dummy installation
temp_rootfs = self.d.expand('${T}/opkg')
- temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg')
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ if opkg_lib_dir[0] == "/":
+ opkg_lib_dir = opkg_lib_dir[1:]
+ temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
bb.utils.mkdirhier(temp_opkg_dir)
opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
- opkg_args += self.d.getVar("OPKG_ARGS", True)
+ opkg_args += self.d.getVar("OPKG_ARGS")
cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
try:
@@ -1906,10 +1472,10 @@ class OpkgPM(OpkgDpkgPM):
self.opkg_dir,
symlinks=True)
- """
- Returns a dictionary with the package info.
- """
def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
@@ -1920,27 +1486,29 @@ class OpkgPM(OpkgDpkgPM):
return pkg_info
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
"trying to extract the package." % pkg)
tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.gz"))
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
return tmp_dir
class DpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
- super(DpkgPM, self).__init__(d)
- self.target_rootfs = target_rootfs
- self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True)
+ def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True):
+ super(DpkgPM, self).__init__(d, target_rootfs)
+ self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir)
+
+ create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies)
+
if apt_conf_dir is None:
self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
else:
@@ -1949,22 +1517,22 @@ class DpkgPM(OpkgDpkgPM):
self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
- self.apt_args = d.getVar("APT_ARGS", True)
+ self.apt_args = d.getVar("APT_ARGS")
self.all_arch_list = archs.split()
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").split()
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
self._create_configs(archs, base_archs)
self.indexer = DpkgIndexer(self.d, self.deploy_dir)
- """
- This function will change a package's status in /var/lib/dpkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/dpkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
status_file = self.target_rootfs + "/var/lib/dpkg/status"
with open(status_file, "r") as sf:
@@ -1987,19 +1555,22 @@ class DpkgPM(OpkgDpkgPM):
os.rename(status_file + ".tmp", status_file)
- """
- Run the pre/post installs for package "package_name". If package_name is
- None, then run all pre/post install scriptlets.
- """
def run_pre_post_installs(self, package_name=None):
+ """
+ Run the pre/post installs for package "package_name". If package_name is
+ None, then run all pre/post install scriptlets.
+ """
info_dir = self.target_rootfs + "/var/lib/dpkg/info"
- suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")]
+ ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
+ control_scripts = [
+ ControlScript(".preinst", "Preinstall", "install"),
+ ControlScript(".postinst", "Postinstall", "configure")]
status_file = self.target_rootfs + "/var/lib/dpkg/status"
installed_pkgs = []
with open(status_file, "r") as status:
for line in status.read().split('\n'):
- m = re.match("^Package: (.*)", line)
+ m = re.match(r"^Package: (.*)", line)
if m is not None:
installed_pkgs.append(m.group(1))
@@ -2010,27 +1581,24 @@ class DpkgPM(OpkgDpkgPM):
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
- "intercept_scripts")
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
- failed_pkgs = []
for pkg_name in installed_pkgs:
- for suffix in suffixes:
- p_full = os.path.join(info_dir, pkg_name + suffix[0])
+ for control_script in control_scripts:
+ p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
if os.path.exists(p_full):
try:
bb.note("Executing %s for package: %s ..." %
- (suffix[1].lower(), pkg_name))
- subprocess.check_output(p_full, stderr=subprocess.STDOUT)
+ (control_script.name.lower(), pkg_name))
+ output = subprocess.check_output([p_full, control_script.argument],
+ stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
except subprocess.CalledProcessError as e:
- bb.note("%s for package %s failed with %d:\n%s" %
- (suffix[1], pkg_name, e.returncode, e.output.decode("utf-8")))
- failed_pkgs.append(pkg_name)
- break
-
- if len(failed_pkgs):
- self.mark_packages("unpacked", failed_pkgs)
+ bb.warn("%s for package %s failed with %d:\n%s" %
+ (control_script.name, pkg_name, e.returncode,
+ e.output.decode("utf-8")))
+ failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
def update(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
@@ -2060,26 +1628,29 @@ class DpkgPM(OpkgDpkgPM):
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
- (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output.decode("utf-8")))
# rename *.dpkg-new files/dirs
for root, dirs, files in os.walk(self.target_rootfs):
for dir in dirs:
- new_dir = re.sub("\.dpkg-new", "", dir)
+ new_dir = re.sub(r"\.dpkg-new", "", dir)
if dir != new_dir:
os.rename(os.path.join(root, dir),
os.path.join(root, new_dir))
for file in files:
- new_file = re.sub("\.dpkg-new", "", file)
+ new_file = re.sub(r"\.dpkg-new", "", file)
if file != new_file:
os.rename(os.path.join(root, file),
os.path.join(root, new_file))
def remove(self, pkgs, with_dependencies=True):
+ if not pkgs:
+ return
+
if with_dependencies:
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
@@ -2105,23 +1676,23 @@ class DpkgPM(OpkgDpkgPM):
if result is not None:
bb.fatal(result)
- def insert_feeds_uris(self):
- if self.feed_uris == "":
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
return
sources_conf = os.path.join("%s/etc/apt/sources.list"
% self.target_rootfs)
arch_list = []
- if self.feed_archs is None:
+ if feed_archs is None:
for arch in self.all_arch_list:
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
else:
- arch_list = self.feed_archs.split()
+ arch_list = feed_archs.split()
- feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
with open(sources_conf, "w+") as sources_file:
for uri in feed_uris:
@@ -2135,7 +1706,7 @@ class DpkgPM(OpkgDpkgPM):
sources_file.write("deb %s ./\n" % uri)
def _create_configs(self, archs, base_archs):
- base_archs = re.sub("_", "-", base_archs)
+ base_archs = re.sub(r"_", r"-", base_archs)
if os.path.exists(self.apt_conf_dir):
bb.utils.remove(self.apt_conf_dir, True)
@@ -2161,7 +1732,7 @@ class DpkgPM(OpkgDpkgPM):
priority += 5
- pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
+ pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
for pkg in pkg_exclude.split():
prefs_file.write(
"Package: %s\n"
@@ -2176,21 +1747,20 @@ class DpkgPM(OpkgDpkgPM):
os.path.join(self.deploy_dir, arch))
base_arch_list = base_archs.split()
- multilib_variants = self.d.getVar("MULTILIB_VARIANTS", True);
+ multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
for variant in multilib_variants.split():
localdata = bb.data.createCopy(self.d)
variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
- orig_arch = localdata.getVar("DPKG_ARCH", True)
+ orig_arch = localdata.getVar("DPKG_ARCH")
localdata.setVar("DEFAULTTUNE", variant_tune)
- bb.data.update_data(localdata)
- variant_arch = localdata.getVar("DPKG_ARCH", True)
+ variant_arch = localdata.getVar("DPKG_ARCH")
if variant_arch not in base_arch_list:
base_arch_list.append(variant_arch)
with open(self.apt_conf_file, "w+") as apt_conf:
with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
- match_arch = re.match(" Architecture \".*\";$", line)
+ match_arch = re.match(r" Architecture \".*\";$", line)
architectures = ""
if match_arch:
for base_arch in base_arch_list:
@@ -2198,8 +1768,8 @@ class DpkgPM(OpkgDpkgPM):
apt_conf.write(" Architectures {%s};\n" % architectures);
apt_conf.write(" Architecture \"%s\";\n" % base_archs)
else:
- line = re.sub("#ROOTFS#", self.target_rootfs, line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+ line = re.sub(r"#ROOTFS#", self.target_rootfs, line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
@@ -2214,7 +1784,7 @@ class DpkgPM(OpkgDpkgPM):
def remove_packaging_data(self):
bb.utils.remove(os.path.join(self.target_rootfs,
- self.d.getVar('opkglibdir', True)), True)
+ self.d.getVar('opkglibdir')), True)
bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
def fix_broken_dependencies(self):
@@ -2231,10 +1801,10 @@ class DpkgPM(OpkgDpkgPM):
def list_installed(self):
return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
- """
- Returns a dictionary with the package info.
- """
def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
@@ -2245,12 +1815,12 @@ class DpkgPM(OpkgDpkgPM):
return pkg_info
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
@@ -2262,12 +1832,12 @@ class DpkgPM(OpkgDpkgPM):
return tmp_dir
def generate_index_files(d):
- classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split()
+ classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
indexer_map = {
- "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)),
- "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)),
- "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True))
+ "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
+ "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
+ "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
}
result = None
@@ -2281,12 +1851,3 @@ def generate_index_files(d):
if result is not None:
bb.fatal(result)
-
-if __name__ == "__main__":
- """
- We should be able to run this as a standalone script, from outside bitbake
- environment.
- """
- """
- TBD
- """
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
index 21d4de914f..a82085a792 100644
--- a/meta/lib/oe/packagedata.py
+++ b/meta/lib/oe/packagedata.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import codecs
import os
@@ -13,9 +17,8 @@ def read_pkgdatafile(fn):
if os.access(fn, os.R_OK):
import re
- f = open(fn, 'r')
- lines = f.readlines()
- f.close()
+ with open(fn, 'r') as f:
+ lines = f.readlines()
r = re.compile("([^:]+):\s*(.*)")
for l in lines:
m = r.match(l)
@@ -57,7 +60,7 @@ def read_subpkgdata_dict(pkg, d):
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
- pkgdatadir = d.getVar("PKGDATA_DIR", True)
+ pkgdatadir = d.getVar("PKGDATA_DIR")
pkgmap = {}
try:
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py
index 97819279b7..2419cbb6d3 100644
--- a/meta/lib/oe/packagegroup.py
+++ b/meta/lib/oe/packagegroup.py
@@ -1,17 +1,21 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import itertools
def is_optional(feature, d):
- packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
+ packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
if packages:
- return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional", True))
+ return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
else:
- return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional", True))
+ return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional"))
def packages(features, d):
for feature in features:
- packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
+ packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
if not packages:
- packages = d.getVar("PACKAGE_GROUP_%s" % feature, True)
+ packages = d.getVar("PACKAGE_GROUP_%s" % feature)
for pkg in (packages or "").split():
yield pkg
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
index 0332f100f1..2b1eee1003 100644
--- a/meta/lib/oe/patch.py
+++ b/meta/lib/oe/patch.py
@@ -1,4 +1,9 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import oe.path
+import oe.types
class NotFoundError(bb.BBHandledException):
def __init__(self, path):
@@ -20,6 +25,7 @@ class CmdError(bb.BBHandledException):
def runcmd(args, dir = None):
import pipes
+ import subprocess
if dir:
olddir = os.path.abspath(os.curdir)
@@ -32,9 +38,14 @@ def runcmd(args, dir = None):
args = [ pipes.quote(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
- (exitstatus, output) = oe.utils.getstatusoutput(cmd)
+ (exitstatus, output) = subprocess.getstatusoutput(cmd)
if exitstatus != 0:
raise CmdError(cmd, exitstatus >> 8, output)
+ if " fuzz " in output:
+ # Drop patch fuzz info with header and footer to log file so
+ # insane.bbclass can handle to throw error/warning
+ bb.note("--- Patch fuzz start ---\n%s\n--- Patch fuzz end ---" % format(output))
+
return output
finally:
@@ -81,7 +92,7 @@ class PatchSet(object):
patch[param] = PatchSet.defaults[param]
if patch.get("remote"):
- patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d)
+ patch["file"] = self.d.expand(bb.fetch2.localpath(patch["remote"], self.d))
patch["filemd5"] = bb.utils.md5_file(patch["file"])
@@ -211,7 +222,7 @@ class PatchTree(PatchSet):
self.patches.insert(i, patch)
def _applypatch(self, patch, force = False, reverse = False, run = True):
- shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
+ shellcmd = ["cat", patch['file'], "|", "patch", "--no-backup-if-mismatch", "-p", patch['strippath']]
if reverse:
shellcmd.append('-R')
@@ -281,8 +292,8 @@ class GitApplyTree(PatchTree):
def __init__(self, dir, d):
PatchTree.__init__(self, dir, d)
- self.commituser = d.getVar('PATCH_GIT_USER_NAME', True)
- self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL', True)
+ self.commituser = d.getVar('PATCH_GIT_USER_NAME')
+ self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
@staticmethod
def extractPatchHeader(patchfile):
@@ -316,8 +327,8 @@ class GitApplyTree(PatchTree):
@staticmethod
def interpretPatchHeader(headerlines):
import re
- author_re = re.compile('[\S ]+ <\S+@\S+\.\S+>')
- from_commit_re = re.compile('^From [a-z0-9]{40} .*')
+ author_re = re.compile(r'[\S ]+ <\S+@\S+\.\S+>')
+ from_commit_re = re.compile(r'^From [a-z0-9]{40} .*')
outlines = []
author = None
date = None
@@ -371,8 +382,8 @@ class GitApplyTree(PatchTree):
@staticmethod
def gitCommandUserOptions(cmd, commituser=None, commitemail=None, d=None):
if d:
- commituser = d.getVar('PATCH_GIT_USER_NAME', True)
- commitemail = d.getVar('PATCH_GIT_USER_EMAIL', True)
+ commituser = d.getVar('PATCH_GIT_USER_NAME')
+ commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
if commituser:
cmd += ['-c', 'user.name="%s"' % commituser]
if commitemail:
@@ -428,9 +439,10 @@ class GitApplyTree(PatchTree):
def extractPatches(tree, startcommit, outdir, paths=None):
import tempfile
import shutil
+ import re
tempdir = tempfile.mkdtemp(prefix='oepatch')
try:
- shellcmd = ["git", "format-patch", startcommit, "-o", tempdir]
+ shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", startcommit, "-o", tempdir]
if paths:
shellcmd.append('--')
shellcmd.extend(paths)
@@ -443,10 +455,13 @@ class GitApplyTree(PatchTree):
try:
with open(srcfile, 'r', encoding=encoding) as f:
for line in f:
- if line.startswith(GitApplyTree.patch_line_prefix):
+ checkline = line
+ if checkline.startswith('Subject: '):
+ checkline = re.sub(r'\[.+?\]\s*', '', checkline[9:])
+ if checkline.startswith(GitApplyTree.patch_line_prefix):
outfile = line.split()[-1].strip()
continue
- if line.startswith(GitApplyTree.ignore_commit_prefix):
+ if checkline.startswith(GitApplyTree.ignore_commit_prefix):
continue
patchlines.append(line)
except UnicodeDecodeError:
@@ -547,7 +562,7 @@ class GitApplyTree(PatchTree):
class QuiltTree(PatchSet):
def _runcmd(self, args, run = True):
- quiltrc = self.d.getVar('QUILTRCFILE', True)
+ quiltrc = self.d.getVar('QUILTRCFILE')
if not run:
return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
@@ -723,7 +738,7 @@ class UserResolver(Resolver):
# Patch application failed
patchcmd = self.patchset.Push(True, False, False)
- t = self.patchset.d.getVar('T', True)
+ t = self.patchset.d.getVar('T')
if not t:
bb.msg.fatal("Build", "T not set")
bb.utils.mkdirhier(t)
@@ -765,3 +780,123 @@ class UserResolver(Resolver):
os.chdir(olddir)
raise
os.chdir(olddir)
+
+
+def patch_path(url, fetch, workdir, expand=True):
+ """Return the local path of a patch, or return nothing if this isn't a patch"""
+
+ local = fetch.localpath(url)
+ if os.path.isdir(local):
+ return
+ base, ext = os.path.splitext(os.path.basename(local))
+ if ext in ('.gz', '.bz2', '.xz', '.Z'):
+ if expand:
+ local = os.path.join(workdir, base)
+ ext = os.path.splitext(base)[1]
+
+ urldata = fetch.ud[url]
+ if "apply" in urldata.parm:
+ apply = oe.types.boolean(urldata.parm["apply"])
+ if not apply:
+ return
+ elif ext not in (".diff", ".patch"):
+ return
+
+ return local
+
+def src_patches(d, all=False, expand=True):
+ workdir = d.getVar('WORKDIR')
+ fetch = bb.fetch2.Fetch([], d)
+ patches = []
+ sources = []
+ for url in fetch.urls:
+ local = patch_path(url, fetch, workdir, expand)
+ if not local:
+ if all:
+ local = fetch.localpath(url)
+ sources.append(local)
+ continue
+
+ urldata = fetch.ud[url]
+ parm = urldata.parm
+ patchname = parm.get('pname') or os.path.basename(local)
+
+ apply, reason = should_apply(parm, d)
+ if not apply:
+ if reason:
+ bb.note("Patch %s %s" % (patchname, reason))
+ continue
+
+ patchparm = {'patchname': patchname}
+ if "striplevel" in parm:
+ striplevel = parm["striplevel"]
+ elif "pnum" in parm:
+ #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
+ striplevel = parm["pnum"]
+ else:
+ striplevel = '1'
+ patchparm['striplevel'] = striplevel
+
+ patchdir = parm.get('patchdir')
+ if patchdir:
+ patchparm['patchdir'] = patchdir
+
+ localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
+ patches.append(localurl)
+
+ if all:
+ return sources
+
+ return patches
+
+
+def should_apply(parm, d):
+ import bb.utils
+ if "mindate" in parm or "maxdate" in parm:
+ pn = d.getVar('PN')
+ srcdate = d.getVar('SRCDATE_%s' % pn)
+ if not srcdate:
+ srcdate = d.getVar('SRCDATE')
+
+ if srcdate == "now":
+ srcdate = d.getVar('DATE')
+
+ if "maxdate" in parm and parm["maxdate"] < srcdate:
+ return False, 'is outdated'
+
+ if "mindate" in parm and parm["mindate"] > srcdate:
+ return False, 'is predated'
+
+
+ if "minrev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and srcrev < parm["minrev"]:
+ return False, 'applies to later revisions'
+
+ if "maxrev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and srcrev > parm["maxrev"]:
+ return False, 'applies to earlier revisions'
+
+ if "rev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and parm["rev"] not in srcrev:
+ return False, "doesn't apply to revision"
+
+ if "notrev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and parm["notrev"] in srcrev:
+ return False, "doesn't apply to revision"
+
+ if "maxver" in parm:
+ pv = d.getVar('PV')
+ if bb.utils.vercmp_string_op(pv, parm["maxver"], ">"):
+ return False, "applies to earlier version"
+
+ if "minver" in parm:
+ pv = d.getVar('PV')
+ if bb.utils.vercmp_string_op(pv, parm["minver"], "<"):
+ return False, "applies to later version"
+
+ return True, None
+
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py
index 06a5af2659..fa209b9795 100644
--- a/meta/lib/oe/path.py
+++ b/meta/lib/oe/path.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import errno
import glob
import shutil
@@ -50,9 +54,30 @@ def make_relative_symlink(path):
os.remove(path)
os.symlink(base, path)
+def replace_absolute_symlinks(basedir, d):
+ """
+ Walk basedir looking for absolute symlinks and replacing them with relative ones.
+ The absolute links are assumed to be relative to basedir
+ (compared to make_relative_symlink above which tries to compute common ancestors
+ using pattern matching instead)
+ """
+ for walkroot, dirs, files in os.walk(basedir):
+ for file in files + dirs:
+ path = os.path.join(walkroot, file)
+ if not os.path.islink(path):
+ continue
+ link = os.readlink(path)
+ if not os.path.isabs(link):
+ continue
+ walkdir = os.path.dirname(path.rpartition(basedir)[2])
+ base = os.path.relpath(link, walkdir)
+ bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base))
+ os.remove(path)
+ os.symlink(base, path)
+
def format_display(path, metadata):
""" Prepare a path for display to the user. """
- rel = relative(metadata.getVar("TOPDIR", True), path)
+ rel = relative(metadata.getVar("TOPDIR"), path)
if len(rel) > len(path):
return path
else:
@@ -65,11 +90,11 @@ def copytree(src, dst):
# This way we also preserve hardlinks between files in the tree.
bb.utils.mkdirhier(dst)
- cmd = "tar --xattrs --xattrs-include='*' -cf - -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
+ cmd = "tar --xattrs --xattrs-include='*' -cf - -S -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def copyhardlinktree(src, dst):
- """ Make the hard link when possible, otherwise copy. """
+ """Make a tree of hard links when possible, otherwise copy."""
bb.utils.mkdirhier(dst)
if os.path.isdir(src) and not len(os.listdir(src)):
return
@@ -77,23 +102,42 @@ def copyhardlinktree(src, dst):
if (os.stat(src).st_dev == os.stat(dst).st_dev):
# Need to copy directories only with tar first since cp will error if two
# writers try and create a directory at the same time
- cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, src, dst)
+ cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -S -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
source = ''
if os.path.isdir(src):
- import glob
if len(glob.glob('%s/.??*' % src)) > 0:
- source = '%s/.??* ' % src
- source = source + '%s/*' % src
+ source = './.??* '
+ source += './*'
+ s_dir = src
else:
source = src
- cmd = 'cp -afl --preserve=xattr %s %s' % (source, dst)
- subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ s_dir = os.getcwd()
+ cmd = 'cp -afl --preserve=xattr %s %s' % (source, os.path.realpath(dst))
+ subprocess.check_output(cmd, shell=True, cwd=s_dir, stderr=subprocess.STDOUT)
else:
copytree(src, dst)
+def copyhardlink(src, dst):
+ """Make a hard link when possible, otherwise copy."""
+
+ # We need to stat the destination directory as the destination file probably
+ # doesn't exist yet.
+ dstdir = os.path.dirname(dst)
+ if os.stat(src).st_dev == os.stat(dstdir).st_dev:
+ os.link(src, dst)
+ else:
+ shutil.copy(src, dst)
+
def remove(path, recurse=True):
- """Equivalent to rm -f or rm -rf"""
+ """
+ Equivalent to rm -f or rm -rf
+ NOTE: be careful about passing paths that may contain filenames with
+ wildcards in them (as opposed to passing an actual wildcarded path) -
+ since we use glob.glob() to expand the path. Filenames containing
+ square brackets are particularly problematic since the they may not
+ actually expand to match the original filename.
+ """
for name in glob.glob(path):
try:
os.unlink(name)
@@ -208,3 +252,59 @@ def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False)
raise
return file
+
+def is_path_parent(possible_parent, *paths):
+ """
+ Return True if a path is the parent of another, False otherwise.
+ Multiple paths to test can be specified in which case all
+ specified test paths must be under the parent in order to
+ return True.
+ """
+ def abs_path_trailing(pth):
+ pth_abs = os.path.abspath(pth)
+ if not pth_abs.endswith(os.sep):
+ pth_abs += os.sep
+ return pth_abs
+
+ possible_parent_abs = abs_path_trailing(possible_parent)
+ if not paths:
+ return False
+ for path in paths:
+ path_abs = abs_path_trailing(path)
+ if not path_abs.startswith(possible_parent_abs):
+ return False
+ return True
+
+def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=False):
+ """Search a search path for pathname, supporting wildcards.
+
+ Return all paths in the specific search path matching the wildcard pattern
+ in pathname, returning only the first encountered for each file. If
+ candidates is True, information on all potential candidate paths are
+ included.
+ """
+ paths = (path or os.environ.get('PATH', os.defpath)).split(':')
+ if reverse:
+ paths.reverse()
+
+ seen, files = set(), []
+ for index, element in enumerate(paths):
+ if not os.path.isabs(element):
+ element = os.path.abspath(element)
+
+ candidate = os.path.join(element, pathname)
+ globbed = glob.glob(candidate)
+ if globbed:
+ for found_path in sorted(globbed):
+ if not os.access(found_path, mode):
+ continue
+ rel = os.path.relpath(found_path, element)
+ if rel not in seen:
+ seen.add(rel)
+ if candidates:
+ files.append((found_path, [os.path.join(p, rel) for p in paths[:index+1]]))
+ else:
+ files.append(found_path)
+
+ return files
+
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
index 0054f954cc..b1132ccb11 100644
--- a/meta/lib/oe/prservice.py
+++ b/meta/lib/oe/prservice.py
@@ -1,7 +1,10 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
def prserv_make_conn(d, check = False):
import prserv.serv
- host_params = list([_f for _f in (d.getVar("PRSERV_HOST", True) or '').split(':') if _f])
+ host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
conn = None
conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
@@ -15,11 +18,11 @@ def prserv_make_conn(d, check = False):
return conn
def prserv_dump_db(d):
- if not d.getVar('PRSERV_HOST', True):
+ if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN", True)
+ conn = d.getVar("__PRSERV_CONN")
if conn is None:
conn = prserv_make_conn(d)
if conn is None:
@@ -27,18 +30,18 @@ def prserv_dump_db(d):
return None
#dump db
- opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True)
- opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True)
- opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True)
- opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True))
+ opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
+ opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
+ opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
+ opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
- if not d.getVar('PRSERV_HOST', True):
+ if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN", True)
+ conn = d.getVar("__PRSERV_CONN")
if conn is None:
conn = prserv_make_conn(d)
if conn is None:
@@ -58,7 +61,7 @@ def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksu
(filter_checksum and filter_checksum != checksum):
continue
try:
- value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True))
+ value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum))
except BaseException as exc:
bb.debug("Not valid value of %s:%s" % (v,str(exc)))
continue
@@ -72,8 +75,8 @@ def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksu
def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
import bb.utils
#initilize the output file
- bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True))
- df = d.getVar('PRSERV_DUMPFILE', True)
+ bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR'))
+ df = d.getVar('PRSERV_DUMPFILE')
#write data
lf = bb.utils.lockfile("%s.lock" % df)
f = open(df, "a")
@@ -114,7 +117,7 @@ def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
bb.utils.unlockfile(lf)
def prserv_check_avail(d):
- host_params = list([_f for _f in (d.getVar("PRSERV_HOST", True) or '').split(':') if _f])
+ host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
if len(host_params) != 2:
raise TypeError
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py
index 75e7df8546..21066c4dc3 100644
--- a/meta/lib/oe/qa.py
+++ b/meta/lib/oe/qa.py
@@ -1,4 +1,8 @@
-import os, struct
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os, struct, mmap
class NotELFFileError(Exception):
pass
@@ -23,9 +27,9 @@ class ELFFile:
EV_CURRENT = 1
# possible values for EI_DATA
- ELFDATANONE = 0
- ELFDATA2LSB = 1
- ELFDATA2MSB = 2
+ EI_DATA_NONE = 0
+ EI_DATA_LSB = 1
+ EI_DATA_MSB = 2
PT_INTERP = 3
@@ -34,51 +38,46 @@ class ELFFile:
#print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
raise NotELFFileError("%s is not an ELF" % self.name)
- def __init__(self, name, bits = 0):
+ def __init__(self, name):
self.name = name
- self.bits = bits
self.objdump_output = {}
- def open(self):
- if not os.path.isfile(self.name):
- raise NotELFFileError("%s is not a normal file" % self.name)
+ # Context Manager functions to close the mmap explicitly
+ def __enter__(self):
+ return self
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.data.close()
+
+ def open(self):
with open(self.name, "rb") as f:
- # Read 4k which should cover most of the headers we're after
- self.data = f.read(4096)
+ try:
+ self.data = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
+ except ValueError:
+ # This means the file is empty
+ raise NotELFFileError("%s is empty" % self.name)
+ # Check the file has the minimum number of ELF table entries
if len(self.data) < ELFFile.EI_NIDENT + 4:
raise NotELFFileError("%s is not an ELF" % self.name)
+ # ELF header
self.my_assert(self.data[0], 0x7f)
self.my_assert(self.data[1], ord('E'))
self.my_assert(self.data[2], ord('L'))
self.my_assert(self.data[3], ord('F'))
- if self.bits == 0:
- if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32:
- self.bits = 32
- elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64:
- self.bits = 64
- else:
- # Not 32-bit or 64.. lets assert
- raise NotELFFileError("ELF but not 32 or 64 bit.")
- elif self.bits == 32:
- self.my_assert(self.data[ELFFile.EI_CLASS], ELFFile.ELFCLASS32)
- elif self.bits == 64:
- self.my_assert(self.data[ELFFile.EI_CLASS], ELFFile.ELFCLASS64)
+ if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32:
+ self.bits = 32
+ elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64:
+ self.bits = 64
else:
- raise NotELFFileError("Must specify unknown, 32 or 64 bit size.")
+ # Not 32-bit or 64.. lets assert
+ raise NotELFFileError("ELF but not 32 or 64 bit.")
self.my_assert(self.data[ELFFile.EI_VERSION], ELFFile.EV_CURRENT)
- self.sex = self.data[ELFFile.EI_DATA]
- if self.sex == ELFFile.ELFDATANONE:
- raise NotELFFileError("self.sex == ELFDATANONE")
- elif self.sex == ELFFile.ELFDATA2LSB:
- self.sex = "<"
- elif self.sex == ELFFile.ELFDATA2MSB:
- self.sex = ">"
- else:
- raise NotELFFileError("Unknown self.sex")
+ self.endian = self.data[ELFFile.EI_DATA]
+ if self.endian not in (ELFFile.EI_DATA_LSB, ELFFile.EI_DATA_MSB):
+ raise NotELFFileError("Unexpected EI_DATA %x" % self.endian)
def osAbi(self):
return self.data[ELFFile.EI_OSABI]
@@ -90,16 +89,20 @@ class ELFFile:
return self.bits
def isLittleEndian(self):
- return self.sex == "<"
+ return self.endian == ELFFile.EI_DATA_LSB
def isBigEndian(self):
- return self.sex == ">"
+ return self.endian == ELFFile.EI_DATA_MSB
+
+ def getStructEndian(self):
+ return {ELFFile.EI_DATA_LSB: "<",
+ ELFFile.EI_DATA_MSB: ">"}[self.endian]
def getShort(self, offset):
- return struct.unpack_from(self.sex+"H", self.data, offset)[0]
+ return struct.unpack_from(self.getStructEndian() + "H", self.data, offset)[0]
def getWord(self, offset):
- return struct.unpack_from(self.sex+"i", self.data, offset)[0]
+ return struct.unpack_from(self.getStructEndian() + "i", self.data, offset)[0]
def isDynamic(self):
"""
@@ -118,7 +121,7 @@ class ELFFile:
def machine(self):
"""
- We know the sex stored in self.sex and we
+ We know the endian stored in self.endian and we
know the position
"""
return self.getShort(ELFFile.E_MACHINE)
@@ -130,11 +133,11 @@ class ELFFile:
if cmd in self.objdump_output:
return self.objdump_output[cmd]
- objdump = d.getVar('OBJDUMP', True)
+ objdump = d.getVar('OBJDUMP')
env = os.environ.copy()
env["LC_ALL"] = "C"
- env["PATH"] = d.getVar('PATH', True)
+ env["PATH"] = d.getVar('PATH')
try:
bb.note("%s %s %s" % (objdump, cmd, self.name))
@@ -144,8 +147,30 @@ class ELFFile:
bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
return ""
+def elf_machine_to_string(machine):
+ """
+ Return the name of a given ELF e_machine field or the hex value as a string
+ if it isn't recognised.
+ """
+ try:
+ return {
+ 0x02: "SPARC",
+ 0x03: "x86",
+ 0x08: "MIPS",
+ 0x14: "PowerPC",
+ 0x28: "ARM",
+ 0x2A: "SuperH",
+ 0x32: "IA-64",
+ 0x3E: "x86-64",
+ 0xB7: "AArch64",
+ 0xF7: "BPF"
+ }[machine]
+ except:
+ return "Unknown (%s)" % repr(machine)
+
if __name__ == "__main__":
import sys
- elf = ELFFile(sys.argv[1])
- elf.open()
- print(elf.isDynamic())
+
+ with ELFFile(sys.argv[1]) as elf:
+ elf.open()
+ print(elf.isDynamic())
diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py
index 58e4028aed..630ae967af 100644
--- a/meta/lib/oe/recipeutils.py
+++ b/meta/lib/oe/recipeutils.py
@@ -2,7 +2,9 @@
#
# Some code borrowed from the OE layer index
#
-# Copyright (C) 2013-2016 Intel Corporation
+# Copyright (C) 2013-2017 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
@@ -16,71 +18,40 @@ import shutil
import re
import fnmatch
import glob
-from collections import OrderedDict, defaultdict
+import bb.tinfoil
+from collections import OrderedDict, defaultdict
+from bb.utils import vercmp_string
# Help us to find places to insert values
recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()']
# Variables that sometimes are a bit long but shouldn't be wrapped
-nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI[md5sum]', 'SRC_URI[sha256sum]']
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha256sum\]']
list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
-def pn_to_recipe(cooker, pn, mc=''):
- """Convert a recipe name (PN) to the path to the recipe file"""
- import bb.providers
-
- if pn in cooker.recipecaches[mc].pkg_pn:
- best = bb.providers.findBestProvider(pn, cooker.data, cooker.recipecaches[mc], cooker.recipecaches[mc].pkg_pn)
- return best[3]
- elif pn in cooker.recipecaches[mc].providers:
- filenames = cooker.recipecaches[mc].providers[pn]
- eligible, foundUnique = bb.providers.filterProviders(filenames, pn, cooker.expanded_data, cooker.recipecaches[mc])
- filename = eligible[0]
- return filename
- else:
- return None
-
-
-def get_unavailable_reasons(cooker, pn):
- """If a recipe could not be found, find out why if possible"""
- import bb.taskdata
- taskdata = bb.taskdata.TaskData(None, skiplist=cooker.skiplist)
- return taskdata.get_reasons(pn)
-
-
-def parse_recipe(cooker, fn, appendfiles):
- """
- Parse an individual recipe file, optionally with a list of
- bbappend files.
- """
- import bb.cache
- parser = bb.cache.NoCache(cooker.databuilder)
- envdata = parser.loadDataFull(fn, appendfiles)
- return envdata
-
-
-def parse_recipe_simple(cooker, pn, d, appends=True):
+def simplify_history(history, d):
"""
- Parse a recipe and optionally all bbappends that apply to it
- in the current configuration.
+ Eliminate any irrelevant events from a variable history
"""
- import bb.providers
-
- recipefile = pn_to_recipe(cooker, pn)
- if not recipefile:
- skipreasons = get_unavailable_reasons(cooker, pn)
- # We may as well re-use bb.providers.NoProvider here
- if skipreasons:
- raise bb.providers.NoProvider(skipreasons)
- else:
- raise bb.providers.NoProvider('Unable to find any recipe file matching %s' % pn)
- if appends:
- appendfiles = cooker.collection.get_file_appends(recipefile)
- else:
- appendfiles = None
- return parse_recipe(cooker, recipefile, appendfiles)
+ ret_history = []
+ has_set = False
+ # Go backwards through the history and remove any immediate operations
+ # before the most recent set
+ for event in reversed(history):
+ if 'flag' in event or not 'file' in event:
+ continue
+ if event['op'] == 'set':
+ if has_set:
+ continue
+ has_set = True
+ elif event['op'] in ('append', 'prepend', 'postdot', 'predot'):
+ # Reminder: "append" and "prepend" mean += and =+ respectively, NOT _append / _prepend
+ if has_set:
+ continue
+ ret_history.insert(0, event)
+ return ret_history
def get_var_files(fn, varlist, d):
@@ -89,11 +60,19 @@ def get_var_files(fn, varlist, d):
"""
varfiles = {}
for v in varlist:
- history = d.varhistory.variable(v)
files = []
- for event in history:
- if 'file' in event and not 'flag' in event:
- files.append(event['file'])
+ if '[' in v:
+ varsplit = v.split('[')
+ varflag = varsplit[1].split(']')[0]
+ history = d.varhistory.variable(varsplit[0])
+ for event in history:
+ if 'file' in event and event.get('flag', '') == varflag:
+ files.append(event['file'])
+ else:
+ history = d.varhistory.variable(v)
+ for event in history:
+ if 'file' in event and not 'flag' in event:
+ files.append(event['file'])
if files:
actualfile = files[-1]
else:
@@ -173,6 +152,10 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
else:
newline = ''
+ nowrap_vars_res = []
+ for item in nowrap_vars:
+ nowrap_vars_res.append(re.compile('^%s$' % item))
+
recipe_progression_res = []
recipe_progression_restrs = []
for item in recipe_progression:
@@ -180,7 +163,7 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
key = item[:-2]
else:
key = item
- restr = '%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
+ restr = r'%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
if item.endswith('()'):
recipe_progression_restrs.append(restr + '()')
else:
@@ -203,15 +186,27 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
def outputvalue(name, lines, rewindcomments=False):
if values[name] is None:
return
- rawtext = '%s = "%s"%s' % (name, values[name], newline)
+ if isinstance(values[name], tuple):
+ op, value = values[name]
+ if op == '+=' and value.strip() == '':
+ return
+ else:
+ value = values[name]
+ op = '='
+ rawtext = '%s %s "%s"%s' % (name, op, value, newline)
addlines = []
- if name in nowrap_vars:
+ nowrap = False
+ for nowrap_re in nowrap_vars_res:
+ if nowrap_re.match(name):
+ nowrap = True
+ break
+ if nowrap:
addlines.append(rawtext)
elif name in list_vars:
- splitvalue = split_var_value(values[name], assignment=False)
+ splitvalue = split_var_value(value, assignment=False)
if len(splitvalue) > 1:
linesplit = ' \\\n' + (' ' * (len(name) + 4))
- addlines.append('%s = "%s%s"%s' % (name, linesplit.join(splitvalue), linesplit, newline))
+ addlines.append('%s %s "%s%s"%s' % (name, op, linesplit.join(splitvalue), linesplit, newline))
else:
addlines.append(rawtext)
else:
@@ -219,6 +214,11 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
for wrapline in wrapped[:-1]:
addlines.append('%s \\%s' % (wrapline, newline))
addlines.append('%s%s' % (wrapped[-1], newline))
+
+ # Split on newlines - this isn't strictly necessary if you are only
+ # going to write the output to disk, but if you want to compare it
+ # (as patch_recipe_file() will do if patch=True) then it's important.
+ addlines = [line for l in addlines for line in l.splitlines(True)]
if rewindcomments:
# Ensure we insert the lines before any leading comments
# (that we'd want to ensure remain leading the next value)
@@ -268,7 +268,7 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
return changed, tolines
-def patch_recipe_file(fn, values, patch=False, relpath=''):
+def patch_recipe_file(fn, values, patch=False, relpath='', redirect_output=None):
"""Update or insert variable values into a recipe file (assuming you
have already identified the exact file you want to update.)
Note that some manual inspection/intervention may be required
@@ -280,7 +280,11 @@ def patch_recipe_file(fn, values, patch=False, relpath=''):
_, tolines = patch_recipe_lines(fromlines, values)
- if patch:
+ if redirect_output:
+ with open(os.path.join(redirect_output, os.path.basename(fn)), 'w') as f:
+ f.writelines(tolines)
+ return None
+ elif patch:
relfn = os.path.relpath(fn, relpath)
diff = difflib.unified_diff(fromlines, tolines, 'a/%s' % relfn, 'b/%s' % relfn)
return diff
@@ -330,17 +334,52 @@ def localise_file_vars(fn, varfiles, varlist):
return filevars
-def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
+def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None):
"""Modify a list of variable values in the specified recipe. Handles inc files if
used by the recipe.
"""
+ overrides = d.getVar('OVERRIDES').split(':')
+ def override_applicable(hevent):
+ op = hevent['op']
+ if '[' in op:
+ opoverrides = op.split('[')[1].split(']')[0].split('_')
+ for opoverride in opoverrides:
+ if not opoverride in overrides:
+ return False
+ return True
+
varlist = varvalues.keys()
+ fn = os.path.abspath(fn)
varfiles = get_var_files(fn, varlist, d)
locs = localise_file_vars(fn, varfiles, varlist)
patches = []
for f,v in locs.items():
vals = {k: varvalues[k] for k in v}
- patchdata = patch_recipe_file(f, vals, patch, relpath)
+ f = os.path.abspath(f)
+ if f == fn:
+ extravals = {}
+ for var, value in vals.items():
+ if var in list_vars:
+ history = simplify_history(d.varhistory.variable(var), d)
+ recipe_set = False
+ for event in history:
+ if os.path.abspath(event['file']) == fn:
+ if event['op'] == 'set':
+ recipe_set = True
+ if not recipe_set:
+ for event in history:
+ if event['op'].startswith('_remove'):
+ continue
+ if not override_applicable(event):
+ continue
+ newvalue = value.replace(event['detail'], '')
+ if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith('_'):
+ op = event['op'].replace('[', '_').replace(']', '')
+ extravals[var + op] = None
+ value = newvalue
+ vals[var] = ('+=', value)
+ vals.update(extravals)
+ patchdata = patch_recipe_file(f, vals, patch, relpath, redirect_output)
if patch:
patches.append(patchdata)
@@ -351,7 +390,7 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
-def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
+def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=False):
"""Copy (local) recipe files, including both files included via include/require,
and files referred to in the SRC_URI variable."""
import bb.fetch2
@@ -359,18 +398,41 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
# FIXME need a warning if the unexpanded SRC_URI value contains variable references
- uris = (d.getVar('SRC_URI', True) or "").split()
- fetch = bb.fetch2.Fetch(uris, d)
- if download:
- fetch.download()
+ uri_values = []
+ localpaths = []
+ def fetch_urls(rdata):
+ # Collect the local paths from SRC_URI
+ srcuri = rdata.getVar('SRC_URI') or ""
+ if srcuri not in uri_values:
+ fetch = bb.fetch2.Fetch(srcuri.split(), rdata)
+ if download:
+ fetch.download()
+ for pth in fetch.localpaths():
+ if pth not in localpaths:
+ localpaths.append(pth)
+ uri_values.append(srcuri)
+
+ fetch_urls(d)
+ if all_variants:
+ # Get files for other variants e.g. in the case of a SRC_URI_append
+ localdata = bb.data.createCopy(d)
+ variants = (localdata.getVar('BBCLASSEXTEND') or '').split()
+ if variants:
+ # Ensure we handle class-target if we're dealing with one of the variants
+ variants.append('target')
+ for variant in variants:
+ localdata.setVar('CLASSOVERRIDE', 'class-%s' % variant)
+ fetch_urls(localdata)
# Copy local files to target directory and gather any remote files
- bb_dir = os.path.dirname(d.getVar('FILE', True)) + os.sep
+ bb_dir = os.path.abspath(os.path.dirname(d.getVar('FILE'))) + os.sep
remotes = []
copied = []
- includes = [path for path in d.getVar('BBINCLUDED', True).split() if
- path.startswith(bb_dir) and os.path.exists(path)]
- for path in fetch.localpaths() + includes:
+ # Need to do this in two steps since we want to check against the absolute path
+ includes = [os.path.abspath(path) for path in d.getVar('BBINCLUDED').split() if os.path.exists(path)]
+ # We also check this below, but we don't want any items in this list being considered remotes
+ includes = [path for path in includes if path.startswith(bb_dir)]
+ for path in localpaths + includes:
# Only import files that are under the meta directory
if path.startswith(bb_dir):
if not whole_dir:
@@ -389,15 +451,21 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
return copied, remotes
-def get_recipe_local_files(d, patches=False):
+def get_recipe_local_files(d, patches=False, archives=False):
"""Get a list of local files in SRC_URI within a recipe."""
- uris = (d.getVar('SRC_URI', True) or "").split()
+ import oe.patch
+ uris = (d.getVar('SRC_URI') or "").split()
fetch = bb.fetch2.Fetch(uris, d)
+ # FIXME this list should be factored out somewhere else (such as the
+ # fetcher) though note that this only encompasses actual container formats
+ # i.e. that can contain multiple files as opposed to those that only
+ # contain a compressed stream (i.e. .tar.gz as opposed to just .gz)
+ archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.txz', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
ret = {}
for uri in uris:
if fetch.ud[uri].type == 'file':
if (not patches and
- bb.utils.exec_flat_python_func('patch_path', uri, fetch, '')):
+ oe.patch.patch_path(uri, fetch, '', expand=False)):
continue
# Skip files that are referenced by absolute path
fname = fetch.ud[uri].basepath
@@ -409,16 +477,29 @@ def get_recipe_local_files(d, patches=False):
if os.path.isabs(subdir):
continue
fname = os.path.join(subdir, fname)
- ret[fname] = fetch.localpath(uri)
+ localpath = fetch.localpath(uri)
+ if not archives:
+ # Ignore archives that will be unpacked
+ if localpath.endswith(tuple(archive_exts)):
+ unpack = fetch.ud[uri].parm.get('unpack', True)
+ if unpack:
+ continue
+ if os.path.isdir(localpath):
+ for root, dirs, files in os.walk(localpath):
+ for fname in files:
+ fileabspath = os.path.join(root,fname)
+ srcdir = os.path.dirname(localpath)
+ ret[os.path.relpath(fileabspath,srcdir)] = fileabspath
+ else:
+ ret[fname] = localpath
return ret
def get_recipe_patches(d):
"""Get a list of the patches included in SRC_URI within a recipe."""
+ import oe.patch
+ patches = oe.patch.src_patches(d, expand=False)
patchfiles = []
- # Execute src_patches() defined in patch.bbclass - this works since that class
- # is inherited globally
- patches = bb.utils.exec_flat_python_func('src_patches', d)
for patch in patches:
_, _, local, _, _, parm = bb.fetch.decodeurl(patch)
patchfiles.append(local)
@@ -435,14 +516,12 @@ def get_recipe_patched_files(d):
change mode ('A' for add, 'D' for delete or 'M' for modify)
"""
import oe.patch
- # Execute src_patches() defined in patch.bbclass - this works since that class
- # is inherited globally
- patches = bb.utils.exec_flat_python_func('src_patches', d)
+ patches = oe.patch.src_patches(d, expand=False)
patchedfiles = {}
for patch in patches:
_, _, patchfile, _, _, parm = bb.fetch.decodeurl(patch)
striplevel = int(parm['striplevel'])
- patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S', True), parm.get('patchdir', '')))
+ patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S'), parm.get('patchdir', '')))
return patchedfiles
@@ -480,9 +559,9 @@ def get_bbfile_path(d, destdir, extrapathhint=None):
confdata.setVar('LAYERDIR', destlayerdir)
destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf")
confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
- bbfilespecs = (confdata.getVar('BBFILES', True) or '').split()
+ bbfilespecs = (confdata.getVar('BBFILES') or '').split()
if destdir == destlayerdir:
for bbfilespec in bbfilespecs:
if not bbfilespec.endswith('.bbappend'):
@@ -495,8 +574,8 @@ def get_bbfile_path(d, destdir, extrapathhint=None):
# Try to make up a path that matches BBFILES
# this is a little crude, but better than nothing
- bpn = d.getVar('BPN', True)
- recipefn = os.path.basename(d.getVar('FILE', True))
+ bpn = d.getVar('BPN')
+ recipefn = os.path.basename(d.getVar('FILE'))
pathoptions = [destdir]
if extrapathhint:
pathoptions.append(os.path.join(destdir, extrapathhint))
@@ -520,7 +599,7 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
import bb.cookerdata
destlayerdir = os.path.abspath(destlayerdir)
- recipefile = d.getVar('FILE', True)
+ recipefile = d.getVar('FILE')
recipefn = os.path.splitext(os.path.basename(recipefile))[0]
if wildcardver and '_' in recipefn:
recipefn = recipefn.split('_', 1)[0] + '_%'
@@ -540,7 +619,7 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
appendpath = os.path.join(destlayerdir, os.path.relpath(os.path.dirname(recipefile), origlayerdir), appendfn)
closepath = ''
pathok = True
- for bbfilespec in confdata.getVar('BBFILES', True).split():
+ for bbfilespec in confdata.getVar('BBFILES').split():
if fnmatch.fnmatchcase(appendpath, bbfilespec):
# Our append path works, we're done
break
@@ -568,7 +647,7 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
return (appendpath, pathok)
-def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None):
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None):
"""
Writes a bbappend file for a recipe
Parameters:
@@ -595,6 +674,9 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
value pairs, or simply a list of the lines.
removevalues:
Variable values to remove - a dict of names/values.
+ redirect_output:
+ If specified, redirects writing the output file to the
+ specified directory (for dry-run purposes)
"""
if not removevalues:
@@ -609,11 +691,12 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
appenddir = os.path.dirname(appendpath)
- bb.utils.mkdirhier(appenddir)
+ if not redirect_output:
+ bb.utils.mkdirhier(appenddir)
# FIXME check if the bbappend doesn't get overridden by a higher priority layer?
- layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS', True).split()]
+ layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
if not os.path.abspath(destlayerdir) in layerdirs:
bb.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
@@ -649,7 +732,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
else:
bbappendlines.append((varname, op, value))
- destsubdir = rd.getVar('PN', True)
+ destsubdir = rd.getVar('PN')
if srcfiles:
bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
@@ -668,7 +751,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
srcurientry = 'file://%s' % srcfile
# Double-check it's not there already
# FIXME do we care if the entry is added by another bbappend that might go away?
- if not srcurientry in rd.getVar('SRC_URI', True).split():
+ if not srcurientry in rd.getVar('SRC_URI').split():
if machine:
appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
else:
@@ -686,9 +769,18 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
if instfunclines:
bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
- bb.note('Writing append file %s' % appendpath)
+ if redirect_output:
+ bb.note('Writing append file %s (dry-run)' % appendpath)
+ outfile = os.path.join(redirect_output, os.path.basename(appendpath))
+ # Only take a copy if the file isn't already there (this function may be called
+ # multiple times per operation when we're handling overrides)
+ if os.path.exists(appendpath) and not os.path.exists(outfile):
+ shutil.copy2(appendpath, outfile)
+ else:
+ bb.note('Writing append file %s' % appendpath)
+ outfile = appendpath
- if os.path.exists(appendpath):
+ if os.path.exists(outfile):
# Work around lack of nonlocal in python 2
extvars = {'destsubdir': destsubdir}
@@ -760,7 +852,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
if removevalues:
varnames.extend(list(removevalues.keys()))
- with open(appendpath, 'r') as f:
+ with open(outfile, 'r') as f:
(updated, newlines) = bb.utils.edit_metadata(f, varnames, appendfile_varfunc)
destsubdir = extvars['destsubdir']
@@ -777,16 +869,27 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
updated = True
if updated:
- with open(appendpath, 'w') as f:
+ with open(outfile, 'w') as f:
f.writelines(newlines)
if copyfiles:
if machine:
destsubdir = os.path.join(destsubdir, machine)
+ if redirect_output:
+ outdir = redirect_output
+ else:
+ outdir = appenddir
for newfile, srcfile in copyfiles.items():
- filedest = os.path.join(appenddir, destsubdir, os.path.basename(srcfile))
+ filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile))
if os.path.abspath(newfile) != os.path.abspath(filedest):
- bb.note('Copying %s to %s' % (newfile, filedest))
+ if newfile.startswith(tempfile.gettempdir()):
+ newfiledisp = os.path.basename(newfile)
+ else:
+ newfiledisp = newfile
+ if redirect_output:
+ bb.note('Copying %s to %s (dry-run)' % (newfiledisp, os.path.join(appenddir, destsubdir, os.path.basename(srcfile))))
+ else:
+ bb.note('Copying %s to %s' % (newfiledisp, filedest))
bb.utils.mkdirhier(os.path.dirname(filedest))
shutil.copyfile(newfile, filedest)
@@ -795,7 +898,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
def find_layerdir(fn):
""" Figure out the path to the base of the layer containing a file (e.g. a recipe)"""
- pth = fn
+ pth = os.path.abspath(fn)
layerdir = ''
while pth:
if os.path.exists(os.path.join(pth, 'conf', 'layer.conf')):
@@ -813,7 +916,7 @@ def replace_dir_vars(path, d):
# Sort by length so we get the variables we're interested in first
for var in sorted(list(d.keys()), key=len):
if var.endswith('dir') and var.lower() == var:
- value = d.getVar(var, True)
+ value = d.getVar(var)
if value.startswith('/') and not '\n' in value and value not in dirvars:
dirvars[value] = var
for dirpath in sorted(list(dirvars.keys()), reverse=True):
@@ -831,7 +934,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type):
sfx = ''
if uri_type == 'git':
- git_regex = re.compile("(?P<pfx>v?)(?P<ver>[^\+]*)((?P<sfx>\+(git)?r?(AUTOINC\+))(?P<rev>.*))?")
+ git_regex = re.compile(r"(?P<pfx>v?)(?P<ver>.*?)(?P<sfx>\+[^\+]*(git)?r?(AUTOINC\+))(?P<rev>.*)")
m = git_regex.match(pv)
if m:
@@ -839,7 +942,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type):
pfx = m.group('pfx')
sfx = m.group('sfx')
else:
- regex = re.compile("(?P<pfx>(v|r)?)(?P<ver>.*)")
+ regex = re.compile(r"(?P<pfx>(v|r)?)(?P<ver>.*)")
m = regex.match(pv)
if m:
pv = m.group('ver')
@@ -856,25 +959,25 @@ def get_recipe_upstream_version(rd):
FetchError when don't have network access or upstream site don't response.
NoMethodError when uri latest_versionstring method isn't implemented.
- Returns a dictonary with version, type and datetime.
+ Returns a dictonary with version, repository revision, current_version, type and datetime.
Type can be A for Automatic, M for Manual and U for Unknown.
"""
from bb.fetch2 import decodeurl
from datetime import datetime
ru = {}
+ ru['current_version'] = rd.getVar('PV')
ru['version'] = ''
ru['type'] = 'U'
ru['datetime'] = ''
-
- pv = rd.getVar('PV', True)
+ ru['revision'] = ''
# XXX: If don't have SRC_URI means that don't have upstream sources so
# returns the current recipe version, so that upstream version check
# declares a match.
- src_uris = rd.getVar('SRC_URI', True)
+ src_uris = rd.getVar('SRC_URI')
if not src_uris:
- ru['version'] = pv
+ ru['version'] = ru['current_version']
ru['type'] = 'M'
ru['datetime'] = datetime.now()
return ru
@@ -883,13 +986,16 @@ def get_recipe_upstream_version(rd):
src_uri = src_uris.split()[0]
uri_type, _, _, _, _, _ = decodeurl(src_uri)
- manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION", True)
+ (pv, pfx, sfx) = get_recipe_pv_without_srcpv(rd.getVar('PV'), uri_type)
+ ru['current_version'] = pv
+
+ manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
if manual_upstream_version:
# manual tracking of upstream version.
ru['version'] = manual_upstream_version
ru['type'] = 'M'
- manual_upstream_date = rd.getVar("CHECK_DATE", True)
+ manual_upstream_date = rd.getVar("CHECK_DATE")
if manual_upstream_date:
date = datetime.strptime(manual_upstream_date, "%b %d, %Y")
else:
@@ -903,33 +1009,106 @@ def get_recipe_upstream_version(rd):
ru['datetime'] = datetime.now()
else:
ud = bb.fetch2.FetchData(src_uri, rd)
- pupver = ud.method.latest_versionstring(ud, rd)
- (upversion, revision) = pupver
+ if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1":
+ revision = ud.method.latest_revision(ud, rd, 'default')
+ upversion = pv
+ if revision != rd.getVar("SRCREV"):
+ upversion = upversion + "-new-commits-available"
+ else:
+ pupver = ud.method.latest_versionstring(ud, rd)
+ (upversion, revision) = pupver
+
+ if upversion:
+ ru['version'] = upversion
+ ru['type'] = 'A'
- # format git version version+gitAUTOINC+HASH
- if uri_type == 'git':
- (pv, pfx, sfx) = get_recipe_pv_without_srcpv(pv, uri_type)
+ if revision:
+ ru['revision'] = revision
- # if contains revision but not upversion use current pv
- if upversion == '' and revision:
- upversion = pv
+ ru['datetime'] = datetime.now()
+
+ return ru
+
+def _get_recipe_upgrade_status(data):
+ uv = get_recipe_upstream_version(data)
- if upversion:
- tmp = upversion
- upversion = ''
+ pn = data.getVar('PN')
+ cur_ver = uv['current_version']
- if pfx:
- upversion = pfx + tmp
+ upstream_version_unknown = data.getVar('UPSTREAM_VERSION_UNKNOWN')
+ if not uv['version']:
+ status = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
+ else:
+ cmp = vercmp_string(uv['current_version'], uv['version'])
+ if cmp == -1:
+ status = "UPDATE" if not upstream_version_unknown else "KNOWN_BROKEN"
+ elif cmp == 0:
+ status = "MATCH" if not upstream_version_unknown else "KNOWN_BROKEN"
+ else:
+ status = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
+
+ next_ver = uv['version'] if uv['version'] else "N/A"
+ revision = uv['revision'] if uv['revision'] else "N/A"
+ maintainer = data.getVar('RECIPE_MAINTAINER')
+ no_upgrade_reason = data.getVar('RECIPE_NO_UPDATE_REASON')
+
+ return (pn, status, cur_ver, next_ver, maintainer, revision, no_upgrade_reason)
+
+def get_recipe_upgrade_status(recipes=None):
+ pkgs_list = []
+ data_copy_list = []
+ copy_vars = ('SRC_URI',
+ 'PV',
+ 'GITDIR',
+ 'DL_DIR',
+ 'PN',
+ 'CACHE',
+ 'PERSISTENT_DIR',
+ 'BB_URI_HEADREVS',
+ 'UPSTREAM_CHECK_COMMITS',
+ 'UPSTREAM_CHECK_GITTAGREGEX',
+ 'UPSTREAM_CHECK_REGEX',
+ 'UPSTREAM_CHECK_URI',
+ 'UPSTREAM_VERSION_UNKNOWN',
+ 'RECIPE_MAINTAINER',
+ 'RECIPE_NO_UPDATE_REASON',
+ 'RECIPE_UPSTREAM_VERSION',
+ 'RECIPE_UPSTREAM_DATE',
+ 'CHECK_DATE',
+ )
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False)
+
+ if not recipes:
+ recipes = tinfoil.all_recipe_files(variants=False)
+
+ for fn in recipes:
+ try:
+ if fn.startswith("/"):
+ data = tinfoil.parse_recipe_file(fn)
else:
- upversion = tmp
+ data = tinfoil.parse_recipe(fn)
+ except bb.providers.NoProvider:
+ bb.note(" No provider for %s" % fn)
+ continue
- if sfx:
- upversion = upversion + sfx + revision[:10]
+ unreliable = data.getVar('UPSTREAM_CHECK_UNRELIABLE')
+ if unreliable == "1":
+ bb.note(" Skip package %s as upstream check unreliable" % pn)
+ continue
- if upversion:
- ru['version'] = upversion
- ru['type'] = 'A'
+ data_copy = bb.data.init()
+ for var in copy_vars:
+ data_copy.setVar(var, data.getVar(var))
+ for k in data:
+ if k.startswith('SRCREV'):
+ data_copy.setVar(k, data.getVar(k))
- ru['datetime'] = datetime.now()
+ data_copy_list.append(data_copy)
- return ru
+ from concurrent.futures import ProcessPoolExecutor
+ with ProcessPoolExecutor(max_workers=utils.cpu_count()) as executor:
+ pkgs_list = executor.map(_get_recipe_upgrade_status, data_copy_list)
+
+ return pkgs_list
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index a348b975c2..c62fa5f54a 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.package_manager import *
@@ -15,12 +18,13 @@ class Rootfs(object, metaclass=ABCMeta):
This is an abstract class. Do not instantiate this directly.
"""
- def __init__(self, d, progress_reporter=None):
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
self.d = d
self.pm = None
- self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
- self.deploydir = self.d.getVar('IMGDEPLOYDIR', True)
+ self.image_rootfs = self.d.getVar('IMAGE_ROOTFS')
+ self.deploydir = self.d.getVar('IMGDEPLOYDIR')
self.progress_reporter = progress_reporter
+ self.logcatcher = logcatcher
self.install_order = Manifest.INSTALL_ORDER
@@ -53,6 +57,8 @@ class Rootfs(object, metaclass=ABCMeta):
messages = []
with open(log_path, 'r') as log:
for line in log:
+ if self.logcatcher and self.logcatcher.contains(line.rstrip()):
+ continue
for ee in excludes:
m = ee.search(line)
if m:
@@ -69,7 +75,7 @@ class Rootfs(object, metaclass=ABCMeta):
else:
msg = '%d %s messages' % (len(messages), type)
msg = '[log_check] %s: found %s in the logfile:\n%s' % \
- (self.d.getVar('PN', True), msg, ''.join(messages))
+ (self.d.getVar('PN'), msg, ''.join(messages))
if type == 'error':
bb.fatal(msg)
else:
@@ -84,11 +90,10 @@ class Rootfs(object, metaclass=ABCMeta):
def _insert_feed_uris(self):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
- self.pm.insert_feeds_uris()
+ self.pm.insert_feeds_uris(self.d.getVar('PACKAGE_FEED_URIS') or "",
+ self.d.getVar('PACKAGE_FEED_BASE_PATHS') or "",
+ self.d.getVar('PACKAGE_FEED_ARCHS'))
- @abstractmethod
- def _handle_intercept_failure(self, failed_script):
- pass
"""
The _cleanup() method should be used to clean-up stuff that we don't really
@@ -100,7 +105,7 @@ class Rootfs(object, metaclass=ABCMeta):
pass
def _setup_dbg_rootfs(self, dirs):
- gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS', True) or '0'
+ gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
if gen_debugfs != '1':
return
@@ -142,6 +147,20 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note(" Install complementary '*-dbg' packages...")
self.pm.install_complementary('*-dbg')
+ if self.d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ bb.note(" Install complementary '*-src' packages...")
+ self.pm.install_complementary('*-src')
+
+ """
+ Install additional debug packages. Possibility to install additional packages,
+ which are not automatically installed as complementary package of
+ standard one, e.g. debug package of static libraries.
+ """
+ extra_debug_pkgs = self.d.getVar('IMAGE_INSTALL_DEBUGFS')
+ if extra_debug_pkgs:
+ bb.note(" Install extra debug packages...")
+ self.pm.install(extra_debug_pkgs.split(), True)
+
bb.note(" Rename debug rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-dbg')
@@ -153,7 +172,7 @@ class Rootfs(object, metaclass=ABCMeta):
os.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
- fakerootcmd = self.d.getVar('FAKEROOT', True)
+ fakerootcmd = self.d.getVar('FAKEROOT')
if fakerootcmd is not None:
exec_cmd = [fakerootcmd, cmd]
else:
@@ -168,28 +187,14 @@ class Rootfs(object, metaclass=ABCMeta):
def create(self):
bb.note("###### Generate rootfs #######")
- pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True)
- post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True)
- rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True)
-
- postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR", True)
- if not postinst_intercepts_dir:
- postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
- "intercept_scripts")
-
- bb.utils.remove(intercepts_dir, True)
+ pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND")
+ post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
+ rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
bb.utils.mkdirhier(self.image_rootfs)
bb.utils.mkdirhier(self.deploydir)
- shutil.copytree(postinst_intercepts_dir, intercepts_dir)
-
- shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"),
- self.deploydir +
- "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt")
-
execute_pre_post_process(self.d, pre_process_cmds)
if self.progress_reporter:
@@ -198,14 +203,14 @@ class Rootfs(object, metaclass=ABCMeta):
# call the package manager dependent create method
self._create()
- sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True)
+ sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir')
bb.utils.mkdirhier(sysconfdir)
with open(sysconfdir + "/version", "w+") as ver:
- ver.write(self.d.getVar('BUILDNAME', True) + "\n")
+ ver.write(self.d.getVar('BUILDNAME') + "\n")
execute_pre_post_process(self.d, rootfs_post_install_cmds)
- self._run_intercepts()
+ self.pm.run_intercepts()
execute_pre_post_process(self.d, post_process_cmds)
@@ -220,7 +225,7 @@ class Rootfs(object, metaclass=ABCMeta):
"offline and rootfs is read-only: %s" %
delayed_postinsts)
- if self.d.getVar('USE_DEVFS', True) != "1":
+ if self.d.getVar('USE_DEVFS') != "1":
self._create_devfs()
self._uninstall_unneeded()
@@ -232,7 +237,7 @@ class Rootfs(object, metaclass=ABCMeta):
self._run_ldconfig()
- if self.d.getVar('USE_DEPMOD', True) != "0":
+ if self.d.getVar('USE_DEPMOD') != "0":
self._generate_kernel_module_deps()
self._cleanup()
@@ -248,21 +253,33 @@ class Rootfs(object, metaclass=ABCMeta):
if delayed_postinsts is None:
if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
self._exec_shell_cmd(["update-rc.d", "-f", "-r",
- self.d.getVar('IMAGE_ROOTFS', True),
+ self.d.getVar('IMAGE_ROOTFS'),
"run-postinsts", "remove"])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d)
- image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE', True)
+ image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE')
if image_rorfs or image_rorfs_force == "1":
# Remove components that we don't need if it's a read-only rootfs
- unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED", True).split()
+ unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED").split()
pkgs_installed = image_list_installed_packages(self.d)
- pkgs_to_remove = [pkg for pkg in pkgs_installed if pkg in unneeded_pkgs]
-
+ # Make sure update-alternatives is removed last. This is
+ # because its database has to available while uninstalling
+ # other packages, allowing alternative symlinks of packages
+ # to be uninstalled or to be managed correctly otherwise.
+ provider = self.d.getVar("VIRTUAL-RUNTIME_update-alternatives")
+ pkgs_to_remove = sorted([pkg for pkg in pkgs_installed if pkg in unneeded_pkgs], key=lambda x: x == provider)
+
+ # update-alternatives provider is removed in its own remove()
+ # call because all package managers do not guarantee the packages
+ # are removed in the order they given in the list (which is
+ # passed to the command line). The sorting done earlier is
+ # utilized to implement the 2-stage removal.
+ if len(pkgs_to_remove) > 1:
+ self.pm.remove(pkgs_to_remove[:-1], False)
if len(pkgs_to_remove) > 0:
- self.pm.remove(pkgs_to_remove, False)
+ self.pm.remove([pkgs_to_remove[-1]], False)
if delayed_postinsts:
self._save_postinsts()
@@ -270,7 +287,7 @@ class Rootfs(object, metaclass=ABCMeta):
bb.warn("There are post install scripts "
"in a read-only rootfs")
- post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND", True)
+ post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND")
execute_pre_post_process(self.d, post_uninstall_cmds)
runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management",
@@ -279,45 +296,8 @@ class Rootfs(object, metaclass=ABCMeta):
# Remove the package manager data files
self.pm.remove_packaging_data()
- def _run_intercepts(self):
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
- "intercept_scripts")
-
- bb.note("Running intercept scripts:")
- os.environ['D'] = self.image_rootfs
- os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE', True)
- for script in os.listdir(intercepts_dir):
- script_full = os.path.join(intercepts_dir, script)
-
- if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
- continue
-
- bb.note("> Executing %s intercept ..." % script)
-
- try:
- subprocess.check_call(script_full)
- except subprocess.CalledProcessError as e:
- bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" %
- (script, e.returncode))
-
- with open(script_full) as intercept:
- registered_pkgs = None
- for line in intercept.read().split("\n"):
- m = re.match("^##PKGS:(.*)", line)
- if m is not None:
- registered_pkgs = m.group(1).strip()
- break
-
- if registered_pkgs is not None:
- bb.warn("The postinstalls for the following packages "
- "will be postponed for first boot: %s" %
- registered_pkgs)
-
- # call the backend dependent handler
- self._handle_intercept_failure(registered_pkgs)
-
def _run_ldconfig(self):
- if self.d.getVar('LDCONFIGDEPEND', True):
+ if self.d.getVar('LDCONFIGDEPEND'):
bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
'new', '-v'])
@@ -337,7 +317,7 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note("No Kernel Modules found, not running depmod")
return
- kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR', True), "kernel-depmod",
+ kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR'), "kernel-depmod",
'kernel-abiversion')
if not os.path.exists(kernel_abi_ver_file):
bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
@@ -359,15 +339,15 @@ class Rootfs(object, metaclass=ABCMeta):
"""
def _create_devfs(self):
devtable_list = []
- devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True)
+ devtable = self.d.getVar('IMAGE_DEVICE_TABLE')
if devtable is not None:
devtable_list.append(devtable)
else:
- devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True)
+ devtables = self.d.getVar('IMAGE_DEVICE_TABLES')
if devtables is None:
devtables = 'files/device_table-minimal.txt'
for devtable in devtables.split():
- devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable))
+ devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH'), devtable))
for devtable in devtable_list:
self._exec_shell_cmd(["makedevs", "-r",
@@ -375,24 +355,24 @@ class Rootfs(object, metaclass=ABCMeta):
class RpmRootfs(Rootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None):
- super(RpmRootfs, self).__init__(d, progress_reporter)
- self.log_check_regex = '(unpacking of archive failed|Cannot find package'\
- '|exit 1|ERROR: |Error: |Error |ERROR '\
- '|Failed |Failed: |Failed$|Failed\(\d+\):)'
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\
+ r'|exit 1|ERROR: |Error: |Error |ERROR '\
+ r'|Failed |Failed: |Failed$|Failed\(\d+\):)'
self.manifest = RpmManifest(d, manifest_dir)
self.pm = RpmPM(d,
- d.getVar('IMAGE_ROOTFS', True),
- self.d.getVar('TARGET_VENDOR', True)
+ d.getVar('IMAGE_ROOTFS'),
+ self.d.getVar('TARGET_VENDOR')
)
- self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True)
+ self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
if self.inc_rpm_image_gen != "1":
bb.utils.remove(self.image_rootfs, True)
else:
self.pm.recovery_packaging_data()
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
self.pm.create_configs()
@@ -424,10 +404,12 @@ class RpmRootfs(Rootfs):
bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
self.pm.remove(pkg_to_remove)
+ self.pm.autoremove()
+
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
- rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS', True)
- rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS', True)
+ rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
+ rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
# update PM index files
self.pm.write_index()
@@ -437,8 +419,6 @@ class RpmRootfs(Rootfs):
if self.progress_reporter:
self.progress_reporter.next_stage()
- self.pm.dump_all_available_pkgs()
-
if self.inc_rpm_image_gen == "1":
self._create_incremental(pkgs_to_install)
@@ -473,17 +453,13 @@ class RpmRootfs(Rootfs):
if self.progress_reporter:
self.progress_reporter.next_stage()
- self._setup_dbg_rootfs(['/etc/rpm', '/var/lib/rpm', '/var/lib/smart'])
+ self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
execute_pre_post_process(self.d, rpm_post_process_cmds)
- self._log_check()
-
if self.inc_rpm_image_gen == "1":
self.pm.backup_packaging_data()
- self.pm.rpm_setup_smart_target_config()
-
if self.progress_reporter:
self.progress_reporter.next_stage()
@@ -512,35 +488,21 @@ class RpmRootfs(Rootfs):
self._log_check_warn()
self._log_check_error()
- def _handle_intercept_failure(self, registered_pkgs):
- rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
- bb.utils.mkdirhier(rpm_postinsts_dir)
-
- # Save the package postinstalls in /etc/rpm-postinsts
- for pkg in registered_pkgs.split():
- self.pm.save_rpmpostinst(pkg)
-
def _cleanup(self):
- # during the execution of postprocess commands, rpm is called several
- # times to get the files installed, dependencies, etc. This creates the
- # __db.00* (Berkeley DB files that hold locks, rpm specific environment
- # settings, etc.), that should not get into the final rootfs
- self.pm.unlock_rpm_db()
- if os.path.isdir(self.pm.install_dir_path + "/tmp") and not os.listdir(self.pm.install_dir_path + "/tmp"):
- bb.utils.remove(self.pm.install_dir_path + "/tmp", True)
- if os.path.isdir(self.pm.install_dir_path) and not os.listdir(self.pm.install_dir_path):
- bb.utils.remove(self.pm.install_dir_path, True)
+ if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d):
+ self.pm._invoke_dnf(["clean", "all"])
+
class DpkgOpkgRootfs(Rootfs):
- def __init__(self, d, progress_reporter=None):
- super(DpkgOpkgRootfs, self).__init__(d, progress_reporter)
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
+ super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
def _get_pkgs_postinsts(self, status_file):
def _get_pkg_depends_list(pkg_depends):
pkg_depends_list = []
# filter version requirements like libc (>= 1.1)
for dep in pkg_depends.split(', '):
- m_dep = re.match("^(.*) \(.*\)$", dep)
+ m_dep = re.match(r"^(.*) \(.*\)$", dep)
if m_dep:
dep = m_dep.group(1)
pkg_depends_list.append(dep)
@@ -556,21 +518,33 @@ class DpkgOpkgRootfs(Rootfs):
data = status.read()
status.close()
for line in data.split('\n'):
- m_pkg = re.match("^Package: (.*)", line)
- m_status = re.match("^Status:.*unpacked", line)
- m_depends = re.match("^Depends: (.*)", line)
+ m_pkg = re.match(r"^Package: (.*)", line)
+ m_status = re.match(r"^Status:.*unpacked", line)
+ m_depends = re.match(r"^Depends: (.*)", line)
+ #Only one of m_pkg, m_status or m_depends is not None at time
+ #If m_pkg is not None, we started a new package
if m_pkg is not None:
- if pkg_name and pkg_status_match:
- pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
-
+ #Get Package name
pkg_name = m_pkg.group(1)
+ #Make sure we reset other variables
pkg_status_match = False
pkg_depends = ""
elif m_status is not None:
+ #New status matched
pkg_status_match = True
elif m_depends is not None:
+ #New depends macthed
pkg_depends = m_depends.group(1)
+ else:
+ pass
+
+ #Now check if we can process package depends and postinst
+ if "" != pkg_name and pkg_status_match:
+ pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+ else:
+ #Not enough information
+ pass
# remove package dependencies not in postinsts
pkg_names = list(pkgs.keys())
@@ -600,7 +574,7 @@ class DpkgOpkgRootfs(Rootfs):
pkg_list = []
pkgs = None
- if not self.d.getVar('PACKAGE_INSTALL', True).strip():
+ if not self.d.getVar('PACKAGE_INSTALL').strip():
bb.note("Building empty image")
else:
pkgs = self._get_pkgs_postinsts(status_file)
@@ -616,6 +590,9 @@ class DpkgOpkgRootfs(Rootfs):
return pkg_list
def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d):
+ return
num = 0
for p in self._get_delayed_postinsts():
bb.utils.mkdirhier(dst_postinst_dir)
@@ -627,8 +604,8 @@ class DpkgOpkgRootfs(Rootfs):
num += 1
class DpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None):
- super(DpkgRootfs, self).__init__(d, progress_reporter)
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
self.log_check_regex = '^E:'
self.log_check_expected_regexes = \
[
@@ -636,17 +613,17 @@ class DpkgRootfs(DpkgOpkgRootfs):
]
bb.utils.remove(self.image_rootfs, True)
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
self.manifest = DpkgManifest(d, manifest_dir)
- self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True),
- d.getVar('PACKAGE_ARCHS', True),
- d.getVar('DPKG_ARCH', True))
+ self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
+ d.getVar('PACKAGE_ARCHS'),
+ d.getVar('DPKG_ARCH'))
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
- deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS', True)
- deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS', True)
+ deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
+ deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
bb.utils.mkdirhier(alt_dir)
@@ -670,6 +647,7 @@ class DpkgRootfs(DpkgOpkgRootfs):
if pkg_type in pkgs_to_install:
self.pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+ self.pm.fix_broken_dependencies()
if self.progress_reporter:
# Don't support attemptonly, so skip that
@@ -707,9 +685,6 @@ class DpkgRootfs(DpkgOpkgRootfs):
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
def _log_check(self):
self._log_check_warn()
self._log_check_error()
@@ -719,15 +694,15 @@ class DpkgRootfs(DpkgOpkgRootfs):
class OpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None):
- super(OpkgRootfs, self).__init__(d, progress_reporter)
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
self.log_check_regex = '(exit 1|Collected errors)'
self.manifest = OpkgManifest(d, manifest_dir)
- self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True)
- self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)
+ self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
- self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or ""
+ self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
if self._remove_old_rootfs():
bb.utils.remove(self.image_rootfs, True)
self.pm = OpkgPM(d,
@@ -741,7 +716,7 @@ class OpkgRootfs(DpkgOpkgRootfs):
self.pkg_archs)
self.pm.recover_packaging_data()
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
def _prelink_file(self, root_dir, filename):
bb.note('prelink %s in %s' % (filename, root_dir))
@@ -776,15 +751,16 @@ class OpkgRootfs(DpkgOpkgRootfs):
if filecmp.cmp(f1, f2):
return True
- if self.image_rootfs not in f1:
- self._prelink_file(f1.replace(key, ''), f1)
+ if bb.data.inherits_class('image-prelink', self.d):
+ if self.image_rootfs not in f1:
+ self._prelink_file(f1.replace(key, ''), f1)
- if self.image_rootfs not in f2:
- self._prelink_file(f2.replace(key, ''), f2)
+ if self.image_rootfs not in f2:
+ self._prelink_file(f2.replace(key, ''), f2)
- # Both of them are prelinked
- if filecmp.cmp(f1, f2):
- return True
+ # Both of them are prelinked
+ if filecmp.cmp(f1, f2):
+ return True
# Not equal
return False
@@ -796,11 +772,11 @@ class OpkgRootfs(DpkgOpkgRootfs):
"""
def _multilib_sanity_test(self, dirs):
- allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True)
+ allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
if allow_replace is None:
allow_replace = ""
- allow_rep = re.compile(re.sub("\|$", "", allow_replace))
+ allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace))
error_prompt = "Multilib check error:"
files = {}
@@ -828,12 +804,12 @@ class OpkgRootfs(DpkgOpkgRootfs):
files[key] = item
def _multilib_test_install(self, pkgs):
- ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True)
+ ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
bb.utils.mkdirhier(ml_temp)
dirs = [self.image_rootfs]
- for variant in self.d.getVar("MULTILIB_VARIANTS", True).split():
+ for variant in self.d.getVar("MULTILIB_VARIANTS").split():
ml_target_rootfs = os.path.join(ml_temp, variant)
bb.utils.remove(ml_target_rootfs, True)
@@ -841,7 +817,7 @@ class OpkgRootfs(DpkgOpkgRootfs):
ml_opkg_conf = os.path.join(ml_temp,
variant + "-" + os.path.basename(self.opkg_conf))
- ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
+ ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False)
ml_pm.update()
ml_pm.install(pkgs)
@@ -893,9 +869,9 @@ class OpkgRootfs(DpkgOpkgRootfs):
old_vars_list = open(vars_list_file, 'r+').read()
new_vars_list = '%s:%s:%s\n' % \
- ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(),
- (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(),
- (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip())
+ ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
open(vars_list_file, 'w+').write(new_vars_list)
if old_vars_list != new_vars_list:
@@ -905,12 +881,11 @@ class OpkgRootfs(DpkgOpkgRootfs):
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
- opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True)
- opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True)
+ opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
+ opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
- # update PM index files, unless users provide their own feeds
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
- self.pm.write_index()
+ # update PM index files
+ self.pm.write_index()
execute_pre_post_process(self.d, opkg_pre_process_cmds)
@@ -921,8 +896,6 @@ class OpkgRootfs(DpkgOpkgRootfs):
self.pm.update()
- self.pm.handle_bad_recommendations()
-
if self.progress_reporter:
self.progress_reporter.next_stage()
@@ -951,7 +924,9 @@ class OpkgRootfs(DpkgOpkgRootfs):
if self.progress_reporter:
self.progress_reporter.next_stage()
- self._setup_dbg_rootfs(['/etc', '/var/lib/opkg', '/usr/lib/ssl'])
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
+ self._setup_dbg_rootfs([opkg_dir])
execute_pre_post_process(self.d, opkg_post_process_cmds)
@@ -967,7 +942,7 @@ class OpkgRootfs(DpkgOpkgRootfs):
def _get_delayed_postinsts(self):
status_file = os.path.join(self.image_rootfs,
- self.d.getVar('OPKGLIBDIR', True).strip('/'),
+ self.d.getVar('OPKGLIBDIR').strip('/'),
"opkg", "status")
return self._get_delayed_postinsts_common(status_file)
@@ -976,9 +951,6 @@ class OpkgRootfs(DpkgOpkgRootfs):
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
def _log_check(self):
self._log_check_warn()
self._log_check_error()
@@ -992,20 +964,20 @@ def get_class_for_type(imgtype):
"deb": DpkgRootfs}[imgtype]
def variable_depends(d, manifest_dir=None):
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
cls = get_class_for_type(img_type)
return cls._depends_list()
-def create_rootfs(d, manifest_dir=None, progress_reporter=None):
+def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None):
env_bkp = os.environ.copy()
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
if img_type == "rpm":
- RpmRootfs(d, manifest_dir, progress_reporter).create()
+ RpmRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
elif img_type == "ipk":
- OpkgRootfs(d, manifest_dir, progress_reporter).create()
+ OpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
elif img_type == "deb":
- DpkgRootfs(d, manifest_dir, progress_reporter).create()
+ DpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
os.environ.clear()
os.environ.update(env_bkp)
@@ -1013,13 +985,13 @@ def create_rootfs(d, manifest_dir=None, progress_reporter=None):
def image_list_installed_packages(d, rootfs_dir=None):
if not rootfs_dir:
- rootfs_dir = d.getVar('IMAGE_ROOTFS', True)
+ rootfs_dir = d.getVar('IMAGE_ROOTFS')
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
if img_type == "rpm":
return RpmPkgsList(d, rootfs_dir).list_pkgs()
elif img_type == "ipk":
- return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list_pkgs()
+ return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET")).list_pkgs()
elif img_type == "deb":
return DpkgPkgsList(d, rootfs_dir).list_pkgs()
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
index c74525f929..d02a274812 100644
--- a/meta/lib/oe/sdk.py
+++ b/meta/lib/oe/sdk.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.manifest import *
@@ -7,20 +11,19 @@ import shutil
import glob
import traceback
-
class Sdk(object, metaclass=ABCMeta):
def __init__(self, d, manifest_dir):
self.d = d
- self.sdk_output = self.d.getVar('SDK_OUTPUT', True)
- self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/')
- self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/')
- self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/')
+ self.sdk_output = self.d.getVar('SDK_OUTPUT')
+ self.sdk_native_path = self.d.getVar('SDKPATHNATIVE').strip('/')
+ self.target_path = self.d.getVar('SDKTARGETSYSROOT').strip('/')
+ self.sysconfdir = self.d.getVar('sysconfdir').strip('/')
self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
self.sdk_host_sysroot = self.sdk_output
if manifest_dir is None:
- self.manifest_dir = self.d.getVar("SDK_DIR", True)
+ self.manifest_dir = self.d.getVar("SDK_DIR")
else:
self.manifest_dir = manifest_dir
@@ -40,12 +43,12 @@ class Sdk(object, metaclass=ABCMeta):
# Don't ship any libGL in the SDK
self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('libdir_nativesdk', True).strip('/'),
+ self.d.getVar('libdir_nativesdk').strip('/'),
"libGL*"))
# Fix or remove broken .la files
self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('libdir_nativesdk', True).strip('/'),
+ self.d.getVar('libdir_nativesdk').strip('/'),
"*.la"))
# Link the ld.so.cache file into the hosts filesystem
@@ -54,7 +57,7 @@ class Sdk(object, metaclass=ABCMeta):
self.mkdirhier(os.path.dirname(link_name))
os.symlink("/etc/ld.so.cache", link_name)
- execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True))
+ execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND'))
def movefile(self, sourcefile, destdir):
try:
@@ -84,8 +87,31 @@ class Sdk(object, metaclass=ABCMeta):
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
bb.warn("cannot remove SDK dir: %s" % path)
+ def install_locales(self, pm):
+ linguas = self.d.getVar("SDKIMAGE_LINGUAS")
+ if linguas:
+ import fnmatch
+ # Install the binary locales
+ if linguas == "all":
+ pm.install_glob("nativesdk-glibc-binary-localedata-*.utf-8", sdk=True)
+ else:
+ pm.install(["nativesdk-glibc-binary-localedata-%s.utf-8" % \
+ lang for lang in linguas.split()])
+ # Generate a locale archive of them
+ target_arch = self.d.getVar('SDK_ARCH')
+ rootfs = oe.path.join(self.sdk_host_sysroot, self.sdk_native_path)
+ localedir = oe.path.join(rootfs, self.d.getVar("libdir_nativesdk"), "locale")
+ generate_locale_archive(self.d, rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ pkgs = fnmatch.filter(pm.list_installed(), "nativesdk-glibc-binary-localedata-*.utf-8")
+ pm.remove(pkgs)
+ else:
+ # No linguas so do nothing
+ pass
+
+
class RpmSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
+ def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
super(RpmSdk, self).__init__(d, manifest_dir)
self.target_manifest = RpmManifest(d, self.manifest_dir,
@@ -93,36 +119,24 @@ class RpmSdk(Sdk):
self.host_manifest = RpmManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
- target_providename = ['/bin/sh',
- '/bin/bash',
- '/usr/bin/env',
- '/usr/bin/perl',
- 'pkgconfig'
- ]
+ rpm_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ rpm_repo_workdir = "oe-sdk-ext-repo"
self.target_pm = RpmPM(d,
self.sdk_target_sysroot,
- self.d.getVar('TARGET_VENDOR', True),
+ self.d.getVar('TARGET_VENDOR'),
'target',
- target_providename
+ rpm_repo_workdir=rpm_repo_workdir
)
- sdk_providename = ['/bin/sh',
- '/bin/bash',
- '/usr/bin/env',
- '/usr/bin/perl',
- 'pkgconfig',
- 'libGL.so()(64bit)',
- 'libGL.so'
- ]
-
self.host_pm = RpmPM(d,
self.sdk_host_sysroot,
- self.d.getVar('SDK_VENDOR', True),
+ self.d.getVar('SDK_VENDOR'),
'host',
- sdk_providename,
"SDK_PACKAGE_ARCHS",
- "SDK_OS"
+ "SDK_OS",
+ rpm_repo_workdir=rpm_repo_workdir
)
def _populate_sysroot(self, pm, manifest):
@@ -130,7 +144,6 @@ class RpmSdk(Sdk):
pm.create_configs()
pm.write_index()
- pm.dump_all_available_pkgs()
pm.update()
pkgs = []
@@ -146,20 +159,27 @@ class RpmSdk(Sdk):
pm.install(pkgs_attempt, True)
def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+ self.target_pm.run_intercepts(populate_sdk='target')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts(populate_sdk='host')
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
@@ -167,7 +187,7 @@ class RpmSdk(Sdk):
# Move host RPM library data
native_rpm_state_dir = os.path.join(self.sdk_output,
self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk', True).strip('/'),
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
"lib",
"rpm"
)
@@ -188,7 +208,9 @@ class RpmSdk(Sdk):
True).strip('/'),
)
self.mkdirhier(native_sysconf_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")):
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
+ self.movefile(f, native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
self.movefile(f, native_sysconf_dir)
self.remove(os.path.join(self.sdk_output, "etc"), True)
@@ -197,24 +219,30 @@ class OpkgSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(OpkgSdk, self).__init__(d, manifest_dir)
- self.target_conf = self.d.getVar("IPKGCONF_TARGET", True)
- self.host_conf = self.d.getVar("IPKGCONF_SDK", True)
+ self.target_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.host_conf = self.d.getVar("IPKGCONF_SDK")
self.target_manifest = OpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = OpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
+ ipk_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ ipk_repo_workdir = "oe-sdk-ext-repo"
+
self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
- self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
+ self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"),
+ ipk_repo_workdir=ipk_repo_workdir)
self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
- self.d.getVar("SDK_PACKAGE_ARCHS", True))
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ ipk_repo_workdir=ipk_repo_workdir)
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
+ if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
pm.write_index()
pm.update()
@@ -225,20 +253,27 @@ class OpkgSdk(Sdk):
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+ self.target_pm.run_intercepts(populate_sdk='target')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts(populate_sdk='host')
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
@@ -257,7 +292,7 @@ class OpkgSdk(Sdk):
os.path.basename(self.host_conf)), 0o644)
native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk', True).strip('/'),
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
"lib", "opkg")
self.mkdirhier(native_opkg_state_dir)
for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
@@ -270,26 +305,32 @@ class DpkgSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(DpkgSdk, self).__init__(d, manifest_dir)
- self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt")
- self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk")
+ self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
+ self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
self.target_manifest = DpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = DpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
+ deb_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ deb_repo_workdir = "oe-sdk-ext-repo"
+
self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
- self.d.getVar("PACKAGE_ARCHS", True),
- self.d.getVar("DPKG_ARCH", True),
- self.target_conf_dir)
+ self.d.getVar("PACKAGE_ARCHS"),
+ self.d.getVar("DPKG_ARCH"),
+ self.target_conf_dir,
+ deb_repo_workdir=deb_repo_workdir)
self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
- self.d.getVar("SDK_PACKAGE_ARCHS", True),
- self.d.getVar("DEB_SDK_ARCH", True),
- self.host_conf_dir)
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ self.d.getVar("DEB_SDK_ARCH"),
+ self.host_conf_dir,
+ deb_repo_workdir=deb_repo_workdir)
def _copy_apt_dir_to(self, dst_dir):
- staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True)
+ staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
self.remove(dst_dir, True)
@@ -307,12 +348,16 @@ class DpkgSdk(Sdk):
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_intercepts(populate_sdk='target')
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
@@ -321,8 +366,11 @@ class DpkgSdk(Sdk):
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
"etc", "apt"))
@@ -341,26 +389,26 @@ class DpkgSdk(Sdk):
def sdk_list_installed_packages(d, target, rootfs_dir=None):
if rootfs_dir is None:
- sdk_output = d.getVar('SDK_OUTPUT', True)
- target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/')
+ sdk_output = d.getVar('SDK_OUTPUT')
+ target_path = d.getVar('SDKTARGETSYSROOT').strip('/')
rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
if img_type == "rpm":
arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
os_var = ["SDK_OS", None][target is True]
- return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list_pkgs()
+ return RpmPkgsList(d, rootfs_dir).list_pkgs()
elif img_type == "ipk":
conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
- return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list_pkgs()
+ return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var)).list_pkgs()
elif img_type == "deb":
return DpkgPkgsList(d, rootfs_dir).list_pkgs()
def populate_sdk(d, manifest_dir=None):
env_bkp = os.environ.copy()
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
if img_type == "rpm":
RpmSdk(d, manifest_dir).populate()
elif img_type == "ipk":
@@ -371,5 +419,24 @@ def populate_sdk(d, manifest_dir=None):
os.environ.clear()
os.environ.update(env_bkp)
+def get_extra_sdkinfo(sstate_dir):
+ """
+ This function is going to be used for generating the target and host manifest files packages of eSDK.
+ """
+ import math
+
+ extra_info = {}
+ extra_info['tasksizes'] = {}
+ extra_info['filesizes'] = {}
+ for root, _, files in os.walk(sstate_dir):
+ for fn in files:
+ if fn.endswith('.tgz'):
+ fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
+ task = fn.rsplit(':',1)[1].split('_',1)[1].split(',')[0]
+ origtotal = extra_info['tasksizes'].get(task, 0)
+ extra_info['tasksizes'][task] = origtotal + fsize
+ extra_info['filesizes'][fn] = fsize
+ return extra_info
+
if __name__ == "__main__":
pass
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index 8224e3a12e..7cecb59d8e 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -1,4 +1,8 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import bb.siggen
+import oe
def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# Return True if we should keep the dependency, False to drop it
@@ -20,19 +24,22 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
def isImage(fn):
return "/image.bbclass" in " ".join(dataCache.inherits[fn])
- # Always include our own inter-task dependencies
+ # (Almost) always include our own inter-task dependencies.
+ # The exception is the special do_kernel_configme->do_unpack_and_patch
+ # dependency from archiver.bbclass.
if recipename == depname:
+ if task == "do_kernel_configme" and dep.endswith(".do_unpack_and_patch"):
+ return False
return True
- # Quilt (patch application) changing isn't likely to affect anything
- excludelist = ['quilt-native', 'subversion-native', 'git-native']
- if depname in excludelist and recipename != depname:
- return False
-
# Exclude well defined recipe->dependency
if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
return False
+ # Check for special wildcard
+ if "*->%s" % depname in siggen.saferecipedeps and recipename != depname:
+ return False
+
# Don't change native/cross/nativesdk recipe dependencies any further
if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
return True
@@ -41,7 +48,7 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
if isPackageGroup(fn) and isAllArch(fn) and not isNative(depname):
- return False
+ return False
# Exclude well defined machine specific configurations which don't change ABI
if depname in siggen.abisaferecipes and not isImage(fn):
@@ -52,7 +59,7 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# is machine specific.
# Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
# and we reccomend a kernel-module, we exclude the dependency.
- depfn = dep.rsplit(".", 1)[0]
+ depfn = dep.rsplit(":", 1)[0]
if dataCache and isKernel(depfn) and not isKernel(fn):
for pkg in dataCache.runrecs[fn]:
if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
@@ -63,10 +70,10 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
def sstate_lockedsigs(d):
sigs = {}
- types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES", True) or "").split()
+ types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES") or "").split()
for t in types:
siggen_lockedsigs_var = "SIGGEN_LOCKEDSIGS_%s" % t
- lockedsigs = (d.getVar(siggen_lockedsigs_var, True) or "").split()
+ lockedsigs = (d.getVar(siggen_lockedsigs_var) or "").split()
for ls in lockedsigs:
pn, task, h = ls.split(":", 2)
if pn not in sigs:
@@ -77,24 +84,23 @@ def sstate_lockedsigs(d):
class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
name = "OEBasic"
def init_rundepcheck(self, data):
- self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
- self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+ self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
+ self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
pass
def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
-class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
- name = "OEBasicHash"
+class SignatureGeneratorOEBasicHashMixIn(object):
def init_rundepcheck(self, data):
- self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
- self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+ self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
+ self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
self.lockedsigs = sstate_lockedsigs(data)
self.lockedhashes = {}
self.lockedpnmap = {}
self.lockedhashfn = {}
- self.machine = data.getVar("MACHINE", True)
+ self.machine = data.getVar("MACHINE")
self.mismatch_msgs = []
- self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES", True) or
+ self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
"").split()
self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
pass
@@ -122,12 +128,11 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
def get_taskdata(self):
- data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata()
- return (data, self.lockedpnmap, self.lockedhashfn)
+ return (self.lockedpnmap, self.lockedhashfn, self.lockedhashes) + super().get_taskdata()
def set_taskdata(self, data):
- coredata, self.lockedpnmap, self.lockedhashfn = data
- super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata)
+ self.lockedpnmap, self.lockedhashfn, self.lockedhashes = data[:3]
+ super().set_taskdata(data[3:])
def dump_sigs(self, dataCache, options):
sigfile = os.getcwd() + "/locked-sigs.inc"
@@ -135,8 +140,16 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
- def get_taskhash(self, fn, task, deps, dataCache):
- h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache)
+ def get_taskhash(self, tid, deps, dataCache):
+ if tid in self.lockedhashes:
+ if self.lockedhashes[tid]:
+ return self.lockedhashes[tid]
+ else:
+ return super().get_taskhash(tid, deps, dataCache)
+
+ h = super().get_taskhash(tid, deps, dataCache)
+
+ (mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
recipename = dataCache.pkg_fn[fn]
self.lockedpnmap[fn] = recipename
@@ -147,90 +160,105 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
unlocked = True
else:
def recipename_from_dep(dep):
- # The dep entry will look something like
- # /path/path/recipename.bb.task, virtual:native:/p/foo.bb.task,
- # ...
- fn = dep.rsplit('.', 1)[0]
+ fn = bb.runqueue.fn_from_tid(dep)
return dataCache.pkg_fn[fn]
# If any unlocked recipe is in the direct dependencies then the
# current recipe should be unlocked as well.
- depnames = [ recipename_from_dep(x) for x in deps ]
+ depnames = [ recipename_from_dep(x) for x in deps if mc == bb.runqueue.mc_from_tid(x)]
if any(x in y for y in depnames for x in self.unlockedrecipes):
self.unlockedrecipes[recipename] = ''
unlocked = True
if not unlocked and recipename in self.lockedsigs:
if task in self.lockedsigs[recipename]:
- k = fn + "." + task
h_locked = self.lockedsigs[recipename][task][0]
var = self.lockedsigs[recipename][task][1]
- self.lockedhashes[k] = h_locked
- self.taskhash[k] = h_locked
+ self.lockedhashes[tid] = h_locked
+ unihash = super().get_unihash(tid)
+ self.taskhash[tid] = h_locked
#bb.warn("Using %s %s %s" % (recipename, task, h))
- if h != h_locked:
+ if h != h_locked and h_locked != unihash:
self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
% (recipename, task, h, h_locked, var))
return h_locked
+
+ self.lockedhashes[tid] = False
#bb.warn("%s %s %s" % (recipename, task, h))
return h
+ def get_unihash(self, tid):
+ if tid in self.lockedhashes and self.lockedhashes[tid]:
+ return self.lockedhashes[tid]
+ return super().get_unihash(tid)
+
def dump_sigtask(self, fn, task, stampbase, runtime):
- k = fn + "." + task
- if k in self.lockedhashes:
+ tid = fn + ":" + task
+ if tid in self.lockedhashes and self.lockedhashes[tid]:
return
super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
def dump_lockedsigs(self, sigfile, taskfilter=None):
types = {}
- for k in self.runtaskdeps:
+ for tid in self.runtaskdeps:
if taskfilter:
- if not k in taskfilter:
+ if not tid in taskfilter:
continue
- fn = k.rsplit(".",1)[0]
+ fn = bb.runqueue.fn_from_tid(tid)
t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
t = 't-' + t.replace('_', '-')
if t not in types:
types[t] = []
- types[t].append(k)
+ types[t].append(tid)
with open(sigfile, "w") as f:
- for t in types:
+ l = sorted(types)
+ for t in l:
f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
types[t].sort()
- sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]])
- for k in sortedk:
- fn = k.rsplit(".",1)[0]
- task = k.rsplit(".",1)[1]
- if k not in self.taskhash:
+ sortedtid = sorted(types[t], key=lambda tid: self.lockedpnmap[bb.runqueue.fn_from_tid(tid)])
+ for tid in sortedtid:
+ (_, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
+ if tid not in self.taskhash:
continue
- f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n")
+ f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n")
f.write(' "\n')
- f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(list(types.keys()))))
+ f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l)))
- def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d):
+ def dump_siglist(self, sigfile):
+ with open(sigfile, "w") as f:
+ tasks = []
+ for taskitem in self.taskhash:
+ (fn, task) = taskitem.rsplit(":", 1)
+ pn = self.lockedpnmap[fn]
+ tasks.append((pn, task, fn, self.taskhash[taskitem]))
+ for (pn, task, fn, taskhash) in sorted(tasks):
+ f.write('%s:%s %s %s\n' % (pn, task, fn, taskhash))
+
+ def checkhashes(self, sq_data, missed, found, d):
warn_msgs = []
error_msgs = []
sstate_missing_msgs = []
- for task in range(len(sq_fn)):
- if task not in ret:
+ for tid in sq_data['hash']:
+ if tid not in found:
for pn in self.lockedsigs:
- if sq_hash[task] in iter(self.lockedsigs[pn].values()):
- if sq_task[task] == 'do_shared_workdir':
+ taskname = bb.runqueue.taskname_from_tid(tid)
+ if sq_data['hash'][tid] in iter(self.lockedsigs[pn].values()):
+ if taskname == 'do_shared_workdir':
continue
sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
- % (pn, sq_task[task], sq_hash[task]))
+ % (pn, taskname, sq_data['hash'][tid]))
- checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK", True)
+ checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
if checklevel == 'warn':
warn_msgs += self.mismatch_msgs
elif checklevel == 'error':
error_msgs += self.mismatch_msgs
- checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK", True)
+ checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK")
if checklevel == 'warn':
warn_msgs += sstate_missing_msgs
elif checklevel == 'error':
@@ -241,10 +269,25 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
if error_msgs:
bb.fatal("\n".join(error_msgs))
+class SignatureGeneratorOEBasicHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEBasicHash"
+
+class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorUniHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEEquivHash"
+
+ def init_rundepcheck(self, data):
+ super().init_rundepcheck(data)
+ self.server = data.getVar('BB_HASHSERVE')
+ if not self.server:
+ bb.fatal("OEEquivHash requires BB_HASHSERVE to be set")
+ self.method = data.getVar('SSTATE_HASHEQUIV_METHOD')
+ if not self.method:
+ bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set")
# Insert these classes into siggen's namespace so it can see and select them
bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
+bb.siggen.SignatureGeneratorOEEquivHash = SignatureGeneratorOEEquivHash
def find_siginfo(pn, taskname, taskhashlist, d):
@@ -253,20 +296,24 @@ def find_siginfo(pn, taskname, taskhashlist, d):
import fnmatch
import glob
- if taskhashlist:
- hashfiles = {}
-
if not taskname:
# We have to derive pn and taskname
key = pn
- splitit = key.split('.bb.')
+ splitit = key.split('.bb:')
taskname = splitit[1]
pn = os.path.basename(splitit[0]).split('_')[0]
if key.startswith('virtual:native:'):
pn = pn + '-native'
+ hashfiles = {}
filedates = {}
+ def get_hashval(siginfo):
+ if siginfo.endswith('.siginfo'):
+ return siginfo.rpartition(':')[2].partition('_')[0]
+ else:
+ return siginfo.rpartition('.')[2]
+
# First search in stamps dir
localdata = d.createCopy()
localdata.setVar('MULTIMACH_TARGET_SYS', '*')
@@ -274,7 +321,7 @@ def find_siginfo(pn, taskname, taskhashlist, d):
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('EXTENDPE', '')
- stamp = localdata.getVar('STAMP', True)
+ stamp = localdata.getVar('STAMP')
if pn.startswith("gcc-source"):
# gcc-source shared workdir is a special case :(
stamp = localdata.expand("${STAMPS_DIR}/work-shared/gcc-${PV}-${PR}")
@@ -296,10 +343,12 @@ def find_siginfo(pn, taskname, taskhashlist, d):
filedates[fullpath] = os.stat(fullpath).st_mtime
except OSError:
continue
+ hashval = get_hashval(fullpath)
+ hashfiles[hashval] = fullpath
if not taskhashlist or (len(filedates) < 2 and not foundall):
# That didn't work, look in sstate-cache
- hashes = taskhashlist or ['*']
+ hashes = taskhashlist or ['?' * 64]
localdata = bb.data.createCopy(d)
for hashval in hashes:
localdata.setVar('PACKAGE_ARCH', '*')
@@ -309,30 +358,25 @@ def find_siginfo(pn, taskname, taskhashlist, d):
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('BB_TASKHASH', hashval)
- swspec = localdata.getVar('SSTATE_SWSPEC', True)
+ swspec = localdata.getVar('SSTATE_SWSPEC')
if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
sstatename = taskname[3:]
- filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename)
+ filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG'), sstatename)
- if hashval != '*':
- sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2])
- else:
- sstatedir = d.getVar('SSTATE_DIR', True)
-
- for root, dirs, files in os.walk(sstatedir):
- for fn in files:
- fullpath = os.path.join(root, fn)
- if fnmatch.fnmatch(fullpath, filespec):
- if taskhashlist:
- hashfiles[hashval] = fullpath
- else:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except:
- continue
+ matchedfiles = glob.glob(filespec)
+ for fullpath in matchedfiles:
+ actual_hashval = get_hashval(fullpath)
+ if actual_hashval in hashfiles:
+ continue
+ hashfiles[hashval] = fullpath
+ if not taskhashlist:
+ try:
+ filedates[fullpath] = os.stat(fullpath).st_mtime
+ except:
+ continue
if taskhashlist:
return hashfiles
@@ -348,7 +392,176 @@ def sstate_get_manifest_filename(task, d):
Also returns the datastore that can be used to query related variables.
"""
d2 = d.createCopy()
- extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info', True)
+ extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info')
if extrainf:
d2.setVar("SSTATE_MANMACH", extrainf)
return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
+
+def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
+ d2 = d
+ variant = ''
+ curr_variant = ''
+ if d.getVar("BBEXTENDCURR") == "multilib":
+ curr_variant = d.getVar("BBEXTENDVARIANT")
+ if "virtclass-multilib" not in d.getVar("OVERRIDES"):
+ curr_variant = "invalid"
+ if taskdata2.startswith("virtual:multilib"):
+ variant = taskdata2.split(":")[2]
+ if curr_variant != variant:
+ if variant not in multilibcache:
+ multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
+ d2 = multilibcache[variant]
+
+ if taskdata.endswith("-native"):
+ pkgarchs = ["${BUILD_ARCH}"]
+ elif taskdata.startswith("nativesdk-"):
+ pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
+ elif "-cross-canadian" in taskdata:
+ pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
+ elif "-cross-" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}_${TARGET_ARCH}"]
+ elif "-crosssdk" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
+ else:
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+ pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
+
+ for pkgarch in pkgarchs:
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
+ if os.path.exists(manifest):
+ return manifest, d2
+ bb.warn("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
+ return None, d2
+
+def OEOuthashBasic(path, sigfile, task, d):
+ """
+ Basic output hash function
+
+ Calculates the output hash of a task by hashing all output file metadata,
+ and file contents.
+ """
+ import hashlib
+ import stat
+ import pwd
+ import grp
+
+ def update_hash(s):
+ s = s.encode('utf-8')
+ h.update(s)
+ if sigfile:
+ sigfile.write(s)
+
+ h = hashlib.sha256()
+ prev_dir = os.getcwd()
+ include_owners = os.environ.get('PSEUDO_DISABLED') == '0'
+
+ try:
+ os.chdir(path)
+
+ update_hash("OEOuthashBasic\n")
+
+ # It is only currently useful to get equivalent hashes for things that
+ # can be restored from sstate. Since the sstate object is named using
+ # SSTATE_PKGSPEC and the task name, those should be included in the
+ # output hash calculation.
+ update_hash("SSTATE_PKGSPEC=%s\n" % d.getVar('SSTATE_PKGSPEC'))
+ update_hash("task=%s\n" % task)
+
+ for root, dirs, files in os.walk('.', topdown=True):
+ # Sort directories to ensure consistent ordering when recursing
+ dirs.sort()
+ files.sort()
+
+ def process(path):
+ s = os.lstat(path)
+
+ if stat.S_ISDIR(s.st_mode):
+ update_hash('d')
+ elif stat.S_ISCHR(s.st_mode):
+ update_hash('c')
+ elif stat.S_ISBLK(s.st_mode):
+ update_hash('b')
+ elif stat.S_ISSOCK(s.st_mode):
+ update_hash('s')
+ elif stat.S_ISLNK(s.st_mode):
+ update_hash('l')
+ elif stat.S_ISFIFO(s.st_mode):
+ update_hash('p')
+ else:
+ update_hash('-')
+
+ def add_perm(mask, on, off='-'):
+ if mask & s.st_mode:
+ update_hash(on)
+ else:
+ update_hash(off)
+
+ add_perm(stat.S_IRUSR, 'r')
+ add_perm(stat.S_IWUSR, 'w')
+ if stat.S_ISUID & s.st_mode:
+ add_perm(stat.S_IXUSR, 's', 'S')
+ else:
+ add_perm(stat.S_IXUSR, 'x')
+
+ add_perm(stat.S_IRGRP, 'r')
+ add_perm(stat.S_IWGRP, 'w')
+ if stat.S_ISGID & s.st_mode:
+ add_perm(stat.S_IXGRP, 's', 'S')
+ else:
+ add_perm(stat.S_IXGRP, 'x')
+
+ add_perm(stat.S_IROTH, 'r')
+ add_perm(stat.S_IWOTH, 'w')
+ if stat.S_ISVTX & s.st_mode:
+ update_hash('t')
+ else:
+ add_perm(stat.S_IXOTH, 'x')
+
+ if include_owners:
+ update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
+ update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
+
+ update_hash(" ")
+ if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
+ update_hash("%9s" % ("%d.%d" % (os.major(s.st_rdev), os.minor(s.st_rdev))))
+ else:
+ update_hash(" " * 9)
+
+ update_hash(" ")
+ if stat.S_ISREG(s.st_mode):
+ update_hash("%10d" % s.st_size)
+ else:
+ update_hash(" " * 10)
+
+ update_hash(" ")
+ fh = hashlib.sha256()
+ if stat.S_ISREG(s.st_mode):
+ # Hash file contents
+ with open(path, 'rb') as d:
+ for chunk in iter(lambda: d.read(4096), b""):
+ fh.update(chunk)
+ update_hash(fh.hexdigest())
+ else:
+ update_hash(" " * len(fh.hexdigest()))
+
+ update_hash(" %s" % path)
+
+ if stat.S_ISLNK(s.st_mode):
+ update_hash(" -> %s" % os.readlink(path))
+
+ update_hash("\n")
+
+ # Process this directory and all its child files
+ process(root)
+ for f in files:
+ if f == 'fixmepath':
+ continue
+ process(os.path.join(root, f))
+ finally:
+ os.chdir(prev_dir)
+
+ return h.hexdigest()
+
+
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index 3901ad3f26..a1daa2bed6 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -1,3 +1,6 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import logging
import oe.classutils
import shlex
@@ -11,7 +14,8 @@ class UnsupportedTerminal(Exception):
pass
class NoSupportedTerminals(Exception):
- pass
+ def __init__(self, terms):
+ self.terms = terms
class Registry(oe.classutils.ClassRegistry):
@@ -38,7 +42,7 @@ class Terminal(Popen, metaclass=Registry):
raise
def format_command(self, sh_cmd, title):
- fmt = {'title': title or 'Terminal', 'command': sh_cmd}
+ fmt = {'title': title or 'Terminal', 'command': sh_cmd, 'cwd': os.getcwd() }
if isinstance(self.command, str):
return shlex.split(self.command.format(**fmt))
else:
@@ -51,7 +55,7 @@ class XTerminal(Terminal):
raise UnsupportedTerminal(self.name)
class Gnome(XTerminal):
- command = 'gnome-terminal -t "{title}" -x {command}'
+ command = 'gnome-terminal -t "{title}" -- {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -61,31 +65,10 @@ class Gnome(XTerminal):
# Once fixed on the gnome-terminal project, this should be removed.
if os.getenv('LC_ALL'): os.putenv('LC_ALL','')
- # We need to know when the command completes but gnome-terminal gives us no way
- # to do this. We therefore write the pid to a file using a "phonehome" wrapper
- # script, then monitor the pid until it exits. Thanks gnome!
- import tempfile
- pidfile = tempfile.NamedTemporaryFile(delete = False).name
- try:
- sh_cmd = "oe-gnome-terminal-phonehome " + pidfile + " " + sh_cmd
- XTerminal.__init__(self, sh_cmd, title, env, d)
- while os.stat(pidfile).st_size <= 0:
- continue
- with open(pidfile, "r") as f:
- pid = int(f.readline())
- finally:
- os.unlink(pidfile)
-
- import time
- while True:
- try:
- os.kill(pid, 0)
- time.sleep(0.1)
- except OSError:
- return
+ XTerminal.__init__(self, sh_cmd, title, env, d)
class Mate(XTerminal):
- command = 'mate-terminal -t "{title}" -x {command}'
+ command = 'mate-terminal --disable-factory -t "{title}" -x {command}'
priority = 2
class Xfce(XTerminal):
@@ -97,7 +80,7 @@ class Terminology(XTerminal):
priority = 2
class Konsole(XTerminal):
- command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
+ command = 'konsole --separate --workdir . -p tabtitle="{title}" -e {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -106,6 +89,9 @@ class Konsole(XTerminal):
if vernum and LooseVersion(vernum) < '2.0.0':
# Konsole from KDE 3.x
self.command = 'konsole -T "{title}" -e {command}'
+ elif vernum and LooseVersion(vernum) < '16.08.1':
+ # Konsole pre 16.08.01 Has nofork
+ self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
XTerminal.__init__(self, sh_cmd, title, env, d)
class XTerm(XTerminal):
@@ -129,12 +115,12 @@ class Screen(Terminal):
bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
0.5, 10), d)
else:
- logger.warn(msg)
+ logger.warning(msg)
class TmuxRunning(Terminal):
"""Open a new pane in the current running tmux window"""
name = 'tmux-running'
- command = 'tmux split-window "{command}"'
+ command = 'tmux split-window -c "{cwd}" "{command}"'
priority = 2.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -152,7 +138,7 @@ class TmuxRunning(Terminal):
class TmuxNewWindow(Terminal):
"""Open a new window in the current running tmux session"""
name = 'tmux-new-window'
- command = 'tmux new-window -n "{title}" "{command}"'
+ command = 'tmux new-window -c "{cwd}" -n "{title}" "{command}"'
priority = 2.70
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -166,7 +152,7 @@ class TmuxNewWindow(Terminal):
class Tmux(Terminal):
"""Start a new tmux session and window"""
- command = 'tmux new -d -s devshell -n devshell "{command}"'
+ command = 'tmux new -c "{cwd}" -d -s devshell -n devshell "{command}"'
priority = 0.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -177,7 +163,7 @@ class Tmux(Terminal):
# devshells, if it's already there, add a new window to it.
window_name = 'devshell-%i' % os.getpid()
- self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name)
+ self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'.format(window_name)
Terminal.__init__(self, sh_cmd, title, env, d)
attach_cmd = 'tmux att -t {0}'.format(window_name)
@@ -185,19 +171,19 @@ class Tmux(Terminal):
if d:
bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
else:
- logger.warn(msg)
+ logger.warning(msg)
class Custom(Terminal):
command = 'false' # This is a placeholder
priority = 3
def __init__(self, sh_cmd, title=None, env=None, d=None):
- self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True)
+ self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD')
if self.command:
if not '{command}' in self.command:
self.command += ' {command}'
Terminal.__init__(self, sh_cmd, title, env, d)
- logger.warn('Custom terminal was started.')
+ logger.warning('Custom terminal was started.')
else:
logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
@@ -206,6 +192,14 @@ class Custom(Terminal):
def prioritized():
return Registry.prioritized()
+def get_cmd_list():
+ terms = Registry.prioritized()
+ cmds = []
+ for term in terms:
+ if term.command:
+ cmds.append(term.command)
+ return cmds
+
def spawn_preferred(sh_cmd, title=None, env=None, d=None):
"""Spawn the first supported terminal, by priority"""
for terminal in prioritized():
@@ -215,7 +209,7 @@ def spawn_preferred(sh_cmd, title=None, env=None, d=None):
except UnsupportedTerminal:
continue
else:
- raise NoSupportedTerminals()
+ raise NoSupportedTerminals(get_cmd_list())
def spawn(name, sh_cmd, title=None, env=None, d=None):
"""Spawn the specified terminal, by name"""
@@ -225,10 +219,36 @@ def spawn(name, sh_cmd, title=None, env=None, d=None):
except KeyError:
raise UnsupportedTerminal(name)
- pipe = terminal(sh_cmd, title, env, d)
- output = pipe.communicate()[0]
- if pipe.returncode != 0:
- raise ExecutionError(sh_cmd, pipe.returncode, output)
+ # We need to know when the command completes but some terminals (at least
+ # gnome and tmux) gives us no way to do this. We therefore write the pid
+ # to a file using a "phonehome" wrapper script, then monitor the pid
+ # until it exits.
+ import tempfile
+ import time
+ pidfile = tempfile.NamedTemporaryFile(delete = False).name
+ try:
+ sh_cmd = bb.utils.which(os.getenv('PATH'), "oe-gnome-terminal-phonehome") + " " + pidfile + " " + sh_cmd
+ pipe = terminal(sh_cmd, title, env, d)
+ output = pipe.communicate()[0]
+ if output:
+ output = output.decode("utf-8")
+ if pipe.returncode != 0:
+ raise ExecutionError(sh_cmd, pipe.returncode, output)
+
+ while os.stat(pidfile).st_size <= 0:
+ time.sleep(0.01)
+ continue
+ with open(pidfile, "r") as f:
+ pid = int(f.readline())
+ finally:
+ os.unlink(pidfile)
+
+ while True:
+ try:
+ os.kill(pid, 0)
+ time.sleep(0.1)
+ except OSError:
+ return
def check_tmux_pane_size(tmux):
import subprocess as sub
@@ -275,8 +295,12 @@ def check_terminal_version(terminalName):
vernum = ver.split(' ')[-1]
if ver.startswith('GNOME Terminal'):
vernum = ver.split(' ')[-1]
+ if ver.startswith('MATE Terminal'):
+ vernum = ver.split(' ')[-1]
if ver.startswith('tmux'):
vernum = ver.split()[-1]
+ if ver.startswith('tmux next-'):
+ vernum = ver.split()[-1][5:]
return vernum
def distro_name():
diff --git a/meta/lib/oe/tests/__init__.py b/meta/lib/oe/tests/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/meta/lib/oe/tests/__init__.py
+++ /dev/null
diff --git a/meta/lib/oe/tests/test_license.py b/meta/lib/oe/tests/test_license.py
deleted file mode 100644
index c388886184..0000000000
--- a/meta/lib/oe/tests/test_license.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import unittest
-import oe.license
-
-class SeenVisitor(oe.license.LicenseVisitor):
- def __init__(self):
- self.seen = []
- oe.license.LicenseVisitor.__init__(self)
-
- def visit_Str(self, node):
- self.seen.append(node.s)
-
-class TestSingleLicense(unittest.TestCase):
- licenses = [
- "GPLv2",
- "LGPL-2.0",
- "Artistic",
- "MIT",
- "GPLv3+",
- "FOO_BAR",
- ]
- invalid_licenses = ["GPL/BSD"]
-
- @staticmethod
- def parse(licensestr):
- visitor = SeenVisitor()
- visitor.visit_string(licensestr)
- return visitor.seen
-
- def test_single_licenses(self):
- for license in self.licenses:
- licenses = self.parse(license)
- self.assertListEqual(licenses, [license])
-
- def test_invalid_licenses(self):
- for license in self.invalid_licenses:
- with self.assertRaises(oe.license.InvalidLicense) as cm:
- self.parse(license)
- self.assertEqual(cm.exception.license, license)
-
-class TestSimpleCombinations(unittest.TestCase):
- tests = {
- "FOO&BAR": ["FOO", "BAR"],
- "BAZ & MOO": ["BAZ", "MOO"],
- "ALPHA|BETA": ["ALPHA"],
- "BAZ&MOO|FOO": ["FOO"],
- "FOO&BAR|BAZ": ["FOO", "BAR"],
- }
- preferred = ["ALPHA", "FOO", "BAR"]
-
- def test_tests(self):
- def choose(a, b):
- if all(lic in self.preferred for lic in b):
- return b
- else:
- return a
-
- for license, expected in self.tests.items():
- licenses = oe.license.flattened_licenses(license, choose)
- self.assertListEqual(licenses, expected)
-
-class TestComplexCombinations(TestSimpleCombinations):
- tests = {
- "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"],
- "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"],
- "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"],
- "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"],
- }
- preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"]
diff --git a/meta/lib/oe/tests/test_path.py b/meta/lib/oe/tests/test_path.py
deleted file mode 100644
index 44d068143e..0000000000
--- a/meta/lib/oe/tests/test_path.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import unittest
-import oe, oe.path
-import tempfile
-import os
-import errno
-import shutil
-
-class TestRealPath(unittest.TestCase):
- DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ]
- FILES = [ "etc/passwd", "b/file" ]
- LINKS = [
- ( "bin", "/usr/bin", "/usr/bin" ),
- ( "binX", "usr/binX", "/usr/binX" ),
- ( "c", "broken", "/broken" ),
- ( "etc/passwd-1", "passwd", "/etc/passwd" ),
- ( "etc/passwd-2", "passwd-1", "/etc/passwd" ),
- ( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ),
- ( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ),
- ( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ),
- ( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ),
- ( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ),
- ( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ),
- ( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ),
- ( "usr/binX/prog-E", "../sbin/prog-E", None ),
- ( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ),
- ( "loop", "a/loop", None ),
- ( "a/loop", "../loop", None ),
- ( "b/test", "file/foo", "/b/file/foo" ),
- ]
-
- LINKS_PHYS = [
- ( "./", "/", "" ),
- ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ),
- ]
-
- EXCEPTIONS = [
- ( "loop", errno.ELOOP ),
- ( "b/test", errno.ENOENT ),
- ]
-
- def __del__(self):
- try:
- #os.system("tree -F %s" % self.tmpdir)
- shutil.rmtree(self.tmpdir)
- except:
- pass
-
- def setUp(self):
- self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path")
- self.root = os.path.join(self.tmpdir, "R")
-
- os.mkdir(os.path.join(self.tmpdir, "_real"))
- os.symlink("_real", self.root)
-
- for d in self.DIRS:
- os.mkdir(os.path.join(self.root, d))
- for f in self.FILES:
- open(os.path.join(self.root, f), "w")
- for l in self.LINKS:
- os.symlink(l[1], os.path.join(self.root, l[0]))
-
- def __realpath(self, file, use_physdir, assume_dir = True):
- return oe.path.realpath(os.path.join(self.root, file), self.root,
- use_physdir, assume_dir = assume_dir)
-
- def test_norm(self):
- for l in self.LINKS:
- if l[2] == None:
- continue
-
- target_p = self.__realpath(l[0], True)
- target_l = self.__realpath(l[0], False)
-
- if l[2] != False:
- self.assertEqual(target_p, target_l)
- self.assertEqual(l[2], target_p[len(self.root):])
-
- def test_phys(self):
- for l in self.LINKS_PHYS:
- target_p = self.__realpath(l[0], True)
- target_l = self.__realpath(l[0], False)
-
- self.assertEqual(l[1], target_p[len(self.root):])
- self.assertEqual(l[2], target_l[len(self.root):])
-
- def test_loop(self):
- for e in self.EXCEPTIONS:
- self.assertRaisesRegex(OSError, r'\[Errno %u\]' % e[1],
- self.__realpath, e[0], False, False)
diff --git a/meta/lib/oe/tests/test_types.py b/meta/lib/oe/tests/test_types.py
deleted file mode 100644
index 367cc30e45..0000000000
--- a/meta/lib/oe/tests/test_types.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import unittest
-from oe.maketype import create, factory
-
-class TestTypes(unittest.TestCase):
- def assertIsInstance(self, obj, cls):
- return self.assertTrue(isinstance(obj, cls))
-
- def assertIsNot(self, obj, other):
- return self.assertFalse(obj is other)
-
- def assertFactoryCreated(self, value, type, **flags):
- cls = factory(type)
- self.assertIsNot(cls, None)
- self.assertIsInstance(create(value, type, **flags), cls)
-
-class TestBooleanType(TestTypes):
- def test_invalid(self):
- self.assertRaises(ValueError, create, '', 'boolean')
- self.assertRaises(ValueError, create, 'foo', 'boolean')
- self.assertRaises(TypeError, create, object(), 'boolean')
-
- def test_true(self):
- self.assertTrue(create('y', 'boolean'))
- self.assertTrue(create('yes', 'boolean'))
- self.assertTrue(create('1', 'boolean'))
- self.assertTrue(create('t', 'boolean'))
- self.assertTrue(create('true', 'boolean'))
- self.assertTrue(create('TRUE', 'boolean'))
- self.assertTrue(create('truE', 'boolean'))
-
- def test_false(self):
- self.assertFalse(create('n', 'boolean'))
- self.assertFalse(create('no', 'boolean'))
- self.assertFalse(create('0', 'boolean'))
- self.assertFalse(create('f', 'boolean'))
- self.assertFalse(create('false', 'boolean'))
- self.assertFalse(create('FALSE', 'boolean'))
- self.assertFalse(create('faLse', 'boolean'))
-
- def test_bool_equality(self):
- self.assertEqual(create('n', 'boolean'), False)
- self.assertNotEqual(create('n', 'boolean'), True)
- self.assertEqual(create('y', 'boolean'), True)
- self.assertNotEqual(create('y', 'boolean'), False)
-
-class TestList(TestTypes):
- def assertListEqual(self, value, valid, sep=None):
- obj = create(value, 'list', separator=sep)
- self.assertEqual(obj, valid)
- if sep is not None:
- self.assertEqual(obj.separator, sep)
- self.assertEqual(str(obj), obj.separator.join(obj))
-
- def test_list_nosep(self):
- testlist = ['alpha', 'beta', 'theta']
- self.assertListEqual('alpha beta theta', testlist)
- self.assertListEqual('alpha beta\ttheta', testlist)
- self.assertListEqual('alpha', ['alpha'])
-
- def test_list_usersep(self):
- self.assertListEqual('foo:bar', ['foo', 'bar'], ':')
- self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':')
diff --git a/meta/lib/oe/tests/test_utils.py b/meta/lib/oe/tests/test_utils.py
deleted file mode 100644
index 5d9ac52e7d..0000000000
--- a/meta/lib/oe/tests/test_utils.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import unittest
-from oe.utils import packages_filter_out_system
-
-class TestPackagesFilterOutSystem(unittest.TestCase):
- def test_filter(self):
- """
- Test that oe.utils.packages_filter_out_system works.
- """
- try:
- import bb
- except ImportError:
- self.skipTest("Cannot import bb")
-
- d = bb.data_smart.DataSmart()
- d.setVar("PN", "foo")
-
- d.setVar("PACKAGES", "foo foo-doc foo-dev")
- pkgs = packages_filter_out_system(d)
- self.assertEqual(pkgs, [])
-
- d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev")
- pkgs = packages_filter_out_system(d)
- self.assertEqual(pkgs, ["foo-data"])
-
- d.setVar("PACKAGES", "foo foo-locale-en-gb")
- pkgs = packages_filter_out_system(d)
- self.assertEqual(pkgs, [])
-
- d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb")
- pkgs = packages_filter_out_system(d)
- self.assertEqual(pkgs, ["foo-data"])
-
-
-class TestTrimVersion(unittest.TestCase):
- def test_version_exception(self):
- with self.assertRaises(TypeError):
- trim_version(None, 2)
- with self.assertRaises(TypeError):
- trim_version((1, 2, 3), 2)
-
- def test_num_exception(self):
- with self.assertRaises(ValueError):
- trim_version("1.2.3", 0)
- with self.assertRaises(ValueError):
- trim_version("1.2.3", -1)
-
- def test_valid(self):
- self.assertEqual(trim_version("1.2.3", 1), "1")
- self.assertEqual(trim_version("1.2.3", 2), "1.2")
- self.assertEqual(trim_version("1.2.3", 3), "1.2.3")
- self.assertEqual(trim_version("1.2.3", 4), "1.2.3")
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py
index 4ae58acfac..bbbabafbf6 100644
--- a/meta/lib/oe/types.py
+++ b/meta/lib/oe/types.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import errno
import re
import os
@@ -103,8 +107,13 @@ def boolean(value):
"""OpenEmbedded 'boolean' type
Valid values for true: 'yes', 'y', 'true', 't', '1'
- Valid values for false: 'no', 'n', 'false', 'f', '0'
+ Valid values for false: 'no', 'n', 'false', 'f', '0', None
"""
+ if value is None:
+ return False
+
+ if isinstance(value, bool):
+ return value
if not isinstance(value, str):
raise TypeError("boolean accepts a string, not '%s'" % type(value))
@@ -145,9 +154,33 @@ def path(value, relativeto='', normalize='true', mustexist='false'):
if boolean(mustexist):
try:
- open(value, 'r')
+ with open(value, 'r'):
+ pass
except IOError as exc:
if exc.errno == errno.ENOENT:
raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
return value
+
+def is_x86(arch):
+ """
+ Check whether arch is x86 or x86_64
+ """
+ if arch.startswith('x86_') or re.match('i.*86', arch):
+ return True
+ else:
+ return False
+
+def qemu_use_kvm(kvm, target_arch):
+ """
+ Enable kvm if target_arch == build_arch or both of them are x86 archs.
+ """
+
+ use_kvm = False
+ if kvm and boolean(kvm):
+ build_arch = os.uname()[4]
+ if is_x86(build_arch) and is_x86(target_arch):
+ use_kvm = True
+ elif build_arch == target_arch:
+ use_kvm = True
+ return use_kvm
diff --git a/meta/lib/oe/useradd.py b/meta/lib/oe/useradd.py
new file mode 100644
index 0000000000..8fc77568ff
--- /dev/null
+++ b/meta/lib/oe/useradd.py
@@ -0,0 +1,71 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+import argparse
+import re
+
+class myArgumentParser(argparse.ArgumentParser):
+ def _print_message(self, message, file=None):
+ bb.warn("%s - %s: %s" % (d.getVar('PN'), pkg, message))
+
+ # This should never be called...
+ def exit(self, status=0, message=None):
+ message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN'), pkg))
+ error(message)
+
+ def error(self, message):
+ bb.fatal(message)
+
+def split_commands(params):
+ params = re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
+ # Remove any empty items
+ return [x for x in params if x]
+
+def split_args(params):
+ params = re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
+ # Remove any empty items
+ return [x for x in params if x]
+
+def build_useradd_parser():
+ # The following comes from --help on useradd from shadow
+ parser = myArgumentParser(prog='useradd')
+ parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
+ parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
+ parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
+ parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
+ parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
+ parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
+ parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
+ parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
+ parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
+ parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
+ parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
+ parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
+ parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
+ parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
+ parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
+ parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
+ parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new account")
+ parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
+ parser.add_argument("-r", "--system", help="create a system account", action="store_true")
+ parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
+ parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
+ parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
+ parser.add_argument("LOGIN", help="Login name of the new user")
+
+ return parser
+
+def build_groupadd_parser():
+ # The following comes from --help on groupadd from shadow
+ parser = myArgumentParser(prog='groupadd')
+ parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
+ parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
+ parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
+ parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
+ parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
+ parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new group")
+ parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
+ parser.add_argument("-r", "--system", help="create a system account", action="store_true")
+ parser.add_argument("GROUP", help="Group name of the new group")
+
+ return parser
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index d6545b197d..652b2be145 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -1,9 +1,10 @@
-try:
- # Python 2
- import commands as cmdstatus
-except ImportError:
- # Python 3
- import subprocess as cmdstatus
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import subprocess
+import multiprocessing
+import traceback
def read_file(filename):
try:
@@ -23,27 +24,34 @@ def ifelse(condition, iftrue = True, iffalse = False):
return iffalse
def conditional(variable, checkvalue, truevalue, falsevalue, d):
- if d.getVar(variable, True) == checkvalue:
+ if d.getVar(variable) == checkvalue:
return truevalue
else:
return falsevalue
+def vartrue(var, iftrue, iffalse, d):
+ import oe.types
+ if oe.types.boolean(d.getVar(var)):
+ return iftrue
+ else:
+ return iffalse
+
def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- if float(d.getVar(variable, True)) <= float(checkvalue):
+ if float(d.getVar(variable)) <= float(checkvalue):
return truevalue
else:
return falsevalue
def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue)
+ result = bb.utils.vercmp_string(d.getVar(variable), checkvalue)
if result <= 0:
return truevalue
else:
return falsevalue
def both_contain(variable1, variable2, checkvalue, d):
- val1 = d.getVar(variable1, True)
- val2 = d.getVar(variable2, True)
+ val1 = d.getVar(variable1)
+ val2 = d.getVar(variable2)
val1 = set(val1.split())
val2 = set(val2.split())
if isinstance(checkvalue, str):
@@ -66,20 +74,20 @@ def set_intersect(variable1, variable2, d):
s3 = set_intersect(s1, s2)
=> s3 = "b c"
"""
- val1 = set(d.getVar(variable1, True).split())
- val2 = set(d.getVar(variable2, True).split())
+ val1 = set(d.getVar(variable1).split())
+ val2 = set(d.getVar(variable2).split())
return " ".join(val1 & val2)
def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
- if var.endswith(suffix):
- var = var.replace(suffix, "")
+ if suffix and var.endswith(suffix):
+ var = var[:-len(suffix)]
- prefix = d.getVar("MLPREFIX", True)
+ prefix = d.getVar("MLPREFIX")
if prefix and var.startswith(prefix):
- var = var.replace(prefix, "")
+ var = var[len(prefix):]
return var
@@ -91,16 +99,9 @@ def str_filter_out(f, str, d):
from re import match
return " ".join([x for x in str.split() if not match(f, x, 0)])
-def param_bool(cfg, field, dflt = None):
- """Lookup <field> in <cfg> map and convert it to a boolean; take
- <dflt> when this <field> does not exist"""
- value = cfg.get(field, dflt)
- strvalue = str(value).lower()
- if strvalue in ('yes', 'y', 'true', 't', '1'):
- return True
- elif strvalue in ('no', 'n', 'false', 'f', '0'):
- return False
- raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
+def build_depends_string(depends, task):
+ """Append a taskname to a string of dependencies as used by the [depends] flag"""
+ return " ".join(dep + ":" + task for dep in depends.split())
def inherits(d, *classes):
"""Return True if the metadata inherits any of the specified classes"""
@@ -115,9 +116,9 @@ def features_backfill(var,d):
# disturbing distributions that have already set DISTRO_FEATURES.
# Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
# add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
- features = (d.getVar(var, True) or "").split()
- backfill = (d.getVar(var+"_BACKFILL", True) or "").split()
- considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split()
+ features = (d.getVar(var) or "").split()
+ backfill = (d.getVar(var+"_BACKFILL") or "").split()
+ considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split()
addfeatures = []
for feature in backfill:
@@ -127,24 +128,107 @@ def features_backfill(var,d):
if addfeatures:
d.appendVar(var, " " + " ".join(addfeatures))
+def all_distro_features(d, features, truevalue="1", falsevalue=""):
+ """
+ Returns truevalue if *all* given features are set in DISTRO_FEATURES,
+ else falsevalue. The features can be given as single string or anything
+ that can be turned into a set.
+
+ This is a shorter, more flexible version of
+ bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d).
+
+ Without explicit true/false values it can be used directly where
+ Python expects a boolean:
+ if oe.utils.all_distro_features(d, "foo bar"):
+ bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES")
+
+ With just a truevalue, it can be used to include files that are meant to be
+ used only when requested via DISTRO_FEATURES:
+ require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc")
+ """
+ return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+
+def any_distro_features(d, features, truevalue="1", falsevalue=""):
+ """
+ Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES,
+ else falsevalue. The features can be given as single string or anything
+ that can be turned into a set.
+
+ This is a shorter, more flexible version of
+ bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d).
+
+ Without explicit true/false values it can be used directly where
+ Python expects a boolean:
+ if not oe.utils.any_distro_features(d, "foo bar"):
+ bb.fatal("foo, bar or both must be set in DISTRO_FEATURES")
+
+ With just a truevalue, it can be used to include files that are meant to be
+ used only when requested via DISTRO_FEATURES:
+ require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc")
+
+ """
+ return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+
+def parallel_make(d):
+ """
+ Return the integer value for the number of parallel threads to use when
+ building, scraped out of PARALLEL_MAKE. If no parallelization option is
+ found, returns None
+
+ e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
+ """
+ pm = (d.getVar('PARALLEL_MAKE') or '').split()
+ # look for '-j' and throw other options (e.g. '-l') away
+ while pm:
+ opt = pm.pop(0)
+ if opt == '-j':
+ v = pm.pop(0)
+ elif opt.startswith('-j'):
+ v = opt[2:].strip()
+ else:
+ continue
+
+ return int(v)
+
+ return None
+
+def parallel_make_argument(d, fmt, limit=None):
+ """
+ Helper utility to construct a parallel make argument from the number of
+ parallel threads specified in PARALLEL_MAKE.
+
+ Returns the input format string `fmt` where a single '%d' will be expanded
+ with the number of parallel threads to use. If `limit` is specified, the
+ number of parallel threads will be no larger than it. If no parallelization
+ option is found in PARALLEL_MAKE, returns an empty string
+
+ e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
+ "-n 10"
+ """
+ v = parallel_make(d)
+ if v:
+ if limit:
+ v = min(limit, v)
+ return fmt % v
+ return ''
def packages_filter_out_system(d):
"""
Return a list of packages from PACKAGES with the "system" packages such as
PN-dbg PN-doc PN-locale-eb-gb removed.
"""
- pn = d.getVar('PN', True)
- blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')]
+ pn = d.getVar('PN')
+ blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
localepkg = pn + "-locale-"
pkgs = []
- for pkg in d.getVar('PACKAGES', True).split():
+ for pkg in d.getVar('PACKAGES').split():
if pkg not in blacklist and localepkg not in pkg:
pkgs.append(pkg)
return pkgs
def getstatusoutput(cmd):
- return cmdstatus.getstatusoutput(cmd)
+ return subprocess.getstatusoutput(cmd)
def trim_version(version, num_parts=2):
@@ -175,38 +259,87 @@ def execute_pre_post_process(d, cmds):
bb.note("Executing %s ..." % cmd)
bb.build.exec_func(cmd, d)
-def multiprocess_exec(commands, function):
- import signal
- import multiprocessing
-
- if not commands:
- return []
+# For each item in items, call the function 'target' with item as the first
+# argument, extraargs as the other arguments and handle any exceptions in the
+# parent thread
+def multiprocess_launch(target, items, d, extraargs=None):
- def init_worker():
- signal.signal(signal.SIGINT, signal.SIG_IGN)
+ class ProcessLaunch(multiprocessing.Process):
+ def __init__(self, *args, **kwargs):
+ multiprocessing.Process.__init__(self, *args, **kwargs)
+ self._pconn, self._cconn = multiprocessing.Pipe()
+ self._exception = None
+ self._result = None
- nproc = min(multiprocessing.cpu_count(), len(commands))
- pool = bb.utils.multiprocessingpool(nproc, init_worker)
- imap = pool.imap(function, commands)
-
- try:
- res = list(imap)
- pool.close()
- pool.join()
- results = []
- for result in res:
- if result is not None:
- results.append(result)
- return results
-
- except KeyboardInterrupt:
- pool.terminate()
- pool.join()
- raise
+ def run(self):
+ try:
+ ret = self._target(*self._args, **self._kwargs)
+ self._cconn.send((None, ret))
+ except Exception as e:
+ tb = traceback.format_exc()
+ self._cconn.send((e, tb))
+
+ def update(self):
+ if self._pconn.poll():
+ (e, tb) = self._pconn.recv()
+ if e is not None:
+ self._exception = (e, tb)
+ else:
+ self._result = tb
+
+ @property
+ def exception(self):
+ self.update()
+ return self._exception
+
+ @property
+ def result(self):
+ self.update()
+ return self._result
+
+ max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
+ launched = []
+ errors = []
+ results = []
+ items = list(items)
+ while (items and not errors) or launched:
+ if not errors and items and len(launched) < max_process:
+ args = (items.pop(),)
+ if extraargs is not None:
+ args = args + extraargs
+ p = ProcessLaunch(target=target, args=args)
+ p.start()
+ launched.append(p)
+ for q in launched:
+ # Have to manually call update() to avoid deadlocks. The pipe can be full and
+ # transfer stalled until we try and read the results object but the subprocess won't exit
+ # as it still has data to write (https://bugs.python.org/issue8426)
+ q.update()
+ # The finished processes are joined when calling is_alive()
+ if not q.is_alive():
+ if q.exception:
+ errors.append(q.exception)
+ if q.result:
+ results.append(q.result)
+ launched.remove(q)
+ # Paranoia doesn't hurt
+ for p in launched:
+ p.join()
+ if errors:
+ msg = ""
+ for (e, tb) in errors:
+ if isinstance(e, subprocess.CalledProcessError) and e.output:
+ msg = msg + str(e) + "\n"
+ msg = msg + "Subprocess output:"
+ msg = msg + e.output.decode("utf-8", errors="ignore")
+ else:
+ msg = msg + str(e) + ": " + str(tb) + "\n"
+ bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg)
+ return results
def squashspaces(string):
import re
- return re.sub("\s+", " ", string).strip()
+ return re.sub(r"\s+", " ", string).strip()
def format_pkg_list(pkg_dict, ret_format=None):
output = []
@@ -228,7 +361,55 @@ def format_pkg_list(pkg_dict, ret_format=None):
for pkg in sorted(pkg_dict):
output.append(pkg)
- return '\n'.join(output)
+ output_str = '\n'.join(output)
+
+ if output_str:
+ # make sure last line is newline terminated
+ output_str += '\n'
+
+ return output_str
+
+def host_gcc_version(d, taskcontextonly=False):
+ import re, subprocess
+
+ if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
+ return
+
+ compiler = d.getVar("BUILD_CC")
+ # Get rid of ccache since it is not present when parsing.
+ if compiler.startswith('ccache '):
+ compiler = compiler[7:]
+ try:
+ env = os.environ.copy()
+ env["PATH"] = d.getVar("PATH")
+ output = subprocess.check_output("%s --version" % compiler, \
+ shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
+
+ match = re.match(r".* (\d\.\d)\.\d.*", output.split('\n')[0])
+ if not match:
+ bb.fatal("Can't get compiler version from %s --version output" % compiler)
+
+ version = match.group(1)
+ return "-%s" % version if version in ("4.8", "4.9") else ""
+
+
+def get_multilib_datastore(variant, d):
+ localdata = bb.data.createCopy(d)
+ if variant:
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", variant + "-")
+ else:
+ origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
+ if origdefault:
+ localdata.setVar("DEFAULTTUNE", origdefault)
+ overrides = localdata.getVar("OVERRIDES", False).split(":")
+ overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")])
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", "")
+ return localdata
#
# Python 2.7 doesn't have threaded pools (just multiprocessing)
@@ -302,10 +483,10 @@ def write_ld_so_conf(d):
bb.utils.remove(ldsoconf)
bb.utils.mkdirhier(os.path.dirname(ldsoconf))
with open(ldsoconf, "w") as f:
- f.write(d.getVar("base_libdir", True) + '\n')
- f.write(d.getVar("libdir", True) + '\n')
+ f.write(d.getVar("base_libdir") + '\n')
+ f.write(d.getVar("libdir") + '\n')
-class ImageQAFailed(bb.build.FuncFailed):
+class ImageQAFailed(Exception):
def __init__(self, description, name=None, logfile=None):
self.description = description
self.name = name
@@ -317,3 +498,7 @@ class ImageQAFailed(bb.build.FuncFailed):
msg = msg + ' (%s)' % self.description
return msg
+
+def sh_quote(string):
+ import shlex
+ return shlex.quote(string)