summaryrefslogtreecommitdiffstats
path: root/meta/lib/oe/rootfs.py
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib/oe/rootfs.py')
-rw-r--r--meta/lib/oe/rootfs.py900
1 files changed, 153 insertions, 747 deletions
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index ed0bab1fa4..f26b66ba7a 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -1,26 +1,30 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.package_manager import *
from oe.manifest import *
import oe.path
-import filecmp
import shutil
import os
import subprocess
import re
-
class Rootfs(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
"""
- def __init__(self, d, progress_reporter=None):
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
self.d = d
self.pm = None
- self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
- self.deploy_dir_image = self.d.getVar('DEPLOY_DIR_IMAGE', True)
+ self.image_rootfs = self.d.getVar('IMAGE_ROOTFS')
+ self.deploydir = self.d.getVar('IMGDEPLOYDIR')
self.progress_reporter = progress_reporter
+ self.logcatcher = logcatcher
self.install_order = Manifest.INSTALL_ORDER
@@ -47,12 +51,16 @@ class Rootfs(object, metaclass=ABCMeta):
excludes = [ 'log_check', r'^\+' ]
if hasattr(self, 'log_check_expected_regexes'):
excludes.extend(self.log_check_expected_regexes)
+ # Insert custom log_check excludes
+ excludes += [x for x in (self.d.getVar("IMAGE_LOG_CHECK_EXCLUDES") or "").split(" ") if x]
excludes = [re.compile(x) for x in excludes]
r = re.compile(match)
log_path = self.d.expand("${T}/log.do_rootfs")
messages = []
with open(log_path, 'r') as log:
for line in log:
+ if self.logcatcher and self.logcatcher.contains(line.rstrip()):
+ continue
for ee in excludes:
m = ee.search(line)
if m:
@@ -69,7 +77,7 @@ class Rootfs(object, metaclass=ABCMeta):
else:
msg = '%d %s messages' % (len(messages), type)
msg = '[log_check] %s: found %s in the logfile:\n%s' % \
- (self.d.getVar('PN', True), msg, ''.join(messages))
+ (self.d.getVar('PN'), msg, ''.join(messages))
if type == 'error':
bb.fatal(msg)
else:
@@ -81,14 +89,37 @@ class Rootfs(object, metaclass=ABCMeta):
def _log_check_error(self):
self._log_check_common('error', self.log_check_regex)
+ def _get_feed_archs(self):
+ feed_archs = self.d.getVar('PACKAGE_FEED_ARCHS') or ''
+ if not feed_archs:
+ bb.note(" Figuring PACKAGE_FEED_ARCHS from ALL_MULTILIB_PACKAGE_ARCHS")
+ manager = self.d.getVar('ROOTFS_PKGMANAGE')
+ deploy_dir = ''
+ if 'rpm' in manager:
+ deploy_dir = self.d.getVar('DEPLOY_DIR_RPM')
+ elif 'opkg' in manager:
+ deploy_dir = self.d.getVar('DEPLOY_DIR_IPK')
+ elif 'dpkg' in manager:
+ deploy_dir = self.d.getVar('DEPLOY_DIR_DEB')
+ else:
+ bb.warn('Failed to figure out deploy_dir')
+ return ''
+
+ for arch in self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').split():
+ if 'rpm' in manager:
+ arch = arch.replace("-", "_")
+ arch_path = os.path.join(deploy_dir, arch)
+ if os.path.exists(arch_path):
+ feed_archs += " %s" % arch
+ return feed_archs.strip()
+
def _insert_feed_uris(self):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
- self.pm.insert_feeds_uris()
+ self.pm.insert_feeds_uris(self.d.getVar('PACKAGE_FEED_URIS') or "",
+ self.d.getVar('PACKAGE_FEED_BASE_PATHS') or "",
+ self._get_feed_archs())
- @abstractmethod
- def _handle_intercept_failure(self, failed_script):
- pass
"""
The _cleanup() method should be used to clean-up stuff that we don't really
@@ -100,7 +131,7 @@ class Rootfs(object, metaclass=ABCMeta):
pass
def _setup_dbg_rootfs(self, dirs):
- gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS', True) or '0'
+ gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
if gen_debugfs != '1':
return
@@ -109,7 +140,7 @@ class Rootfs(object, metaclass=ABCMeta):
shutil.rmtree(self.image_rootfs + '-orig')
except:
pass
- os.rename(self.image_rootfs, self.image_rootfs + '-orig')
+ bb.utils.rename(self.image_rootfs, self.image_rootfs + '-orig')
bb.note(" Creating debug rootfs...")
bb.utils.mkdirhier(self.image_rootfs)
@@ -121,17 +152,16 @@ class Rootfs(object, metaclass=ABCMeta):
bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir))
shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir, symlinks=True)
- cpath = oe.cachedpath.CachedPath()
# Copy files located in /usr/lib/debug or /usr/src/debug
for dir in ["/usr/lib/debug", "/usr/src/debug"]:
src = self.image_rootfs + '-orig' + dir
- if cpath.exists(src):
+ if os.path.exists(src):
dst = self.image_rootfs + dir
bb.utils.mkdirhier(os.path.dirname(dst))
shutil.copytree(src, dst)
# Copy files with suffix '.debug' or located in '.debug' dir.
- for root, dirs, files in cpath.walk(self.image_rootfs + '-orig'):
+ for root, dirs, files in os.walk(self.image_rootfs + '-orig'):
relative_dir = root[len(self.image_rootfs + '-orig'):]
for f in files:
if f.endswith('.debug') or '/.debug' in relative_dir:
@@ -142,25 +172,33 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note(" Install complementary '*-dbg' packages...")
self.pm.install_complementary('*-dbg')
+ if self.d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ bb.note(" Install complementary '*-src' packages...")
+ self.pm.install_complementary('*-src')
+
+ """
+ Install additional debug packages. Possibility to install additional packages,
+ which are not automatically installed as complementary package of
+ standard one, e.g. debug package of static libraries.
+ """
+ extra_debug_pkgs = self.d.getVar('IMAGE_INSTALL_DEBUGFS')
+ if extra_debug_pkgs:
+ bb.note(" Install extra debug packages...")
+ self.pm.install(extra_debug_pkgs.split(), True)
+
bb.note(" Rename debug rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-dbg')
except:
pass
- os.rename(self.image_rootfs, self.image_rootfs + '-dbg')
+ bb.utils.rename(self.image_rootfs, self.image_rootfs + '-dbg')
- bb.note(" Restoreing original rootfs...")
- os.rename(self.image_rootfs + '-orig', self.image_rootfs)
+ bb.note(" Restoring original rootfs...")
+ bb.utils.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
- fakerootcmd = self.d.getVar('FAKEROOT', True)
- if fakerootcmd is not None:
- exec_cmd = [fakerootcmd, cmd]
- else:
- exec_cmd = cmd
-
try:
- subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT)
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
@@ -168,27 +206,9 @@ class Rootfs(object, metaclass=ABCMeta):
def create(self):
bb.note("###### Generate rootfs #######")
- pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True)
- post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True)
- rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True)
-
- postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR", True)
- if not postinst_intercepts_dir:
- postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
- "intercept_scripts")
-
- bb.utils.remove(intercepts_dir, True)
-
- bb.utils.mkdirhier(self.image_rootfs)
-
- bb.utils.mkdirhier(self.deploy_dir_image)
-
- shutil.copytree(postinst_intercepts_dir, intercepts_dir)
-
- shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"),
- self.deploy_dir_image +
- "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt")
+ pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND")
+ post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
+ rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
execute_pre_post_process(self.d, pre_process_cmds)
@@ -198,14 +218,14 @@ class Rootfs(object, metaclass=ABCMeta):
# call the package manager dependent create method
self._create()
- sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True)
+ sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir')
bb.utils.mkdirhier(sysconfdir)
with open(sysconfdir + "/version", "w+") as ver:
- ver.write(self.d.getVar('BUILDNAME', True) + "\n")
+ ver.write(self.d.getVar('BUILDNAME') + "\n")
execute_pre_post_process(self.d, rootfs_post_install_cmds)
- self._run_intercepts()
+ self.pm.run_intercepts()
execute_pre_post_process(self.d, post_process_cmds)
@@ -213,6 +233,9 @@ class Rootfs(object, metaclass=ABCMeta):
self.progress_reporter.next_stage()
if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d) and \
+ not bb.utils.contains("IMAGE_FEATURES",
+ "read-only-rootfs-delayed-postinsts",
True, False, self.d):
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is not None:
@@ -220,7 +243,7 @@ class Rootfs(object, metaclass=ABCMeta):
"offline and rootfs is read-only: %s" %
delayed_postinsts)
- if self.d.getVar('USE_DEVFS', True) != "1":
+ if self.d.getVar('USE_DEVFS') != "1":
self._create_devfs()
self._uninstall_unneeded()
@@ -232,7 +255,7 @@ class Rootfs(object, metaclass=ABCMeta):
self._run_ldconfig()
- if self.d.getVar('USE_DEPMOD', True) != "0":
+ if self.d.getVar('USE_DEPMOD') != "0":
self._generate_kernel_module_deps()
self._cleanup()
@@ -243,24 +266,36 @@ class Rootfs(object, metaclass=ABCMeta):
def _uninstall_unneeded(self):
- # Remove unneeded init script symlinks
+ # Remove the run-postinsts package if no delayed postinsts are found
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is None:
- if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
- self._exec_shell_cmd(["update-rc.d", "-f", "-r",
- self.d.getVar('IMAGE_ROOTFS', True),
- "run-postinsts", "remove"])
+ if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")) or os.path.exists(self.d.expand("${IMAGE_ROOTFS}${systemd_system_unitdir}/run-postinsts.service")):
+ self.pm.remove(["run-postinsts"])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d)
- if image_rorfs:
+ image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE')
+
+ if image_rorfs or image_rorfs_force == "1":
# Remove components that we don't need if it's a read-only rootfs
- unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED", True).split()
+ unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED").split()
pkgs_installed = image_list_installed_packages(self.d)
- pkgs_to_remove = [pkg for pkg in pkgs_installed if pkg in unneeded_pkgs]
-
+ # Make sure update-alternatives is removed last. This is
+ # because its database has to available while uninstalling
+ # other packages, allowing alternative symlinks of packages
+ # to be uninstalled or to be managed correctly otherwise.
+ provider = self.d.getVar("VIRTUAL-RUNTIME_update-alternatives")
+ pkgs_to_remove = sorted([pkg for pkg in pkgs_installed if pkg in unneeded_pkgs], key=lambda x: x == provider)
+
+ # update-alternatives provider is removed in its own remove()
+ # call because all package managers do not guarantee the packages
+ # are removed in the order they given in the list (which is
+ # passed to the command line). The sorting done earlier is
+ # utilized to implement the 2-stage removal.
+ if len(pkgs_to_remove) > 1:
+ self.pm.remove(pkgs_to_remove[:-1], False)
if len(pkgs_to_remove) > 0:
- self.pm.remove(pkgs_to_remove, False)
+ self.pm.remove([pkgs_to_remove[-1]], False)
if delayed_postinsts:
self._save_postinsts()
@@ -268,7 +303,7 @@ class Rootfs(object, metaclass=ABCMeta):
bb.warn("There are post install scripts "
"in a read-only rootfs")
- post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND", True)
+ post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND")
execute_pre_post_process(self.d, post_uninstall_cmds)
runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management",
@@ -277,53 +312,26 @@ class Rootfs(object, metaclass=ABCMeta):
# Remove the package manager data files
self.pm.remove_packaging_data()
- def _run_intercepts(self):
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
- "intercept_scripts")
-
- bb.note("Running intercept scripts:")
- os.environ['D'] = self.image_rootfs
- os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE', True)
- for script in os.listdir(intercepts_dir):
- script_full = os.path.join(intercepts_dir, script)
-
- if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
- continue
-
- bb.note("> Executing %s intercept ..." % script)
-
- try:
- subprocess.check_call(script_full)
- except subprocess.CalledProcessError as e:
- bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" %
- (script, e.returncode))
-
- with open(script_full) as intercept:
- registered_pkgs = None
- for line in intercept.read().split("\n"):
- m = re.match("^##PKGS:(.*)", line)
- if m is not None:
- registered_pkgs = m.group(1).strip()
- break
-
- if registered_pkgs is not None:
- bb.warn("The postinstalls for the following packages "
- "will be postponed for first boot: %s" %
- registered_pkgs)
-
- # call the backend dependent handler
- self._handle_intercept_failure(registered_pkgs)
-
def _run_ldconfig(self):
- if self.d.getVar('LDCONFIGDEPEND', True):
- bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
+ if self.d.getVar('LDCONFIGDEPEND'):
+ bb.note("Executing: ldconfig -r " + self.image_rootfs + " -c new -v -X")
self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
- 'new', '-v'])
+ 'new', '-v', '-X'])
+
+ image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d)
+ ldconfig_in_features = bb.utils.contains("DISTRO_FEATURES", "ldconfig",
+ True, False, self.d)
+ if image_rorfs or not ldconfig_in_features:
+ ldconfig_cache_dir = os.path.join(self.image_rootfs, "var/cache/ldconfig")
+ if os.path.exists(ldconfig_cache_dir):
+ bb.note("Removing ldconfig auxiliary cache...")
+ shutil.rmtree(ldconfig_cache_dir)
def _check_for_kernel_modules(self, modules_dir):
for root, dirs, files in os.walk(modules_dir, topdown=True):
for name in files:
- found_ko = name.endswith(".ko")
+ found_ko = name.endswith((".ko", ".ko.gz", ".ko.xz", ".ko.zst"))
if found_ko:
return found_ko
return False
@@ -335,17 +343,29 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note("No Kernel Modules found, not running depmod")
return
- kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR', True), "kernel-depmod",
- 'kernel-abiversion')
- if not os.path.exists(kernel_abi_ver_file):
- bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
+ pkgdatadir = self.d.getVar('PKGDATA_DIR')
- kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
- versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
+ # PKGDATA_DIR can include multiple kernels so we run depmod for each
+ # one of them.
+ for direntry in os.listdir(pkgdatadir):
+ match = re.match('(.*)-depmod', direntry)
+ if not match:
+ continue
+ kernel_package_name = match.group(1)
- bb.utils.mkdirhier(versioned_modules_dir)
+ kernel_abi_ver_file = oe.path.join(pkgdatadir, direntry, kernel_package_name + '-abiversion')
+ if not os.path.exists(kernel_abi_ver_file):
+ bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
- self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver])
+ with open(kernel_abi_ver_file) as f:
+ kernel_ver = f.read().strip(' \n')
+
+ versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
+
+ bb.utils.mkdirhier(versioned_modules_dir)
+
+ bb.note("Running depmodwrapper for %s ..." % versioned_modules_dir)
+ self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver, kernel_package_name])
"""
Create devfs:
@@ -357,669 +377,55 @@ class Rootfs(object, metaclass=ABCMeta):
"""
def _create_devfs(self):
devtable_list = []
- devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True)
+ devtable = self.d.getVar('IMAGE_DEVICE_TABLE')
if devtable is not None:
devtable_list.append(devtable)
else:
- devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True)
+ devtables = self.d.getVar('IMAGE_DEVICE_TABLES')
if devtables is None:
devtables = 'files/device_table-minimal.txt'
for devtable in devtables.split():
- devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable))
+ devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH'), devtable))
for devtable in devtable_list:
self._exec_shell_cmd(["makedevs", "-r",
self.image_rootfs, "-D", devtable])
-class RpmRootfs(Rootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None):
- super(RpmRootfs, self).__init__(d, progress_reporter)
- self.log_check_regex = '(unpacking of archive failed|Cannot find package'\
- '|exit 1|ERROR: |Error: |Error |ERROR '\
- '|Failed |Failed: |Failed$|Failed\(\d+\):)'
- self.manifest = RpmManifest(d, manifest_dir)
-
- self.pm = RpmPM(d,
- d.getVar('IMAGE_ROOTFS', True),
- self.d.getVar('TARGET_VENDOR', True)
- )
-
- self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True)
- if self.inc_rpm_image_gen != "1":
- bb.utils.remove(self.image_rootfs, True)
- else:
- self.pm.recovery_packaging_data()
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
-
- self.pm.create_configs()
-
- '''
- While rpm incremental image generation is enabled, it will remove the
- unneeded pkgs by comparing the new install solution manifest and the
- old installed manifest.
- '''
- def _create_incremental(self, pkgs_initial_install):
- if self.inc_rpm_image_gen == "1":
-
- pkgs_to_install = list()
- for pkg_type in pkgs_initial_install:
- pkgs_to_install += pkgs_initial_install[pkg_type]
-
- installed_manifest = self.pm.load_old_install_solution()
- solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
-
- pkg_to_remove = list()
- for pkg in installed_manifest:
- if pkg not in solution_manifest:
- pkg_to_remove.append(pkg)
-
- self.pm.update()
-
- bb.note('incremental update -- upgrade packages in place ')
- self.pm.upgrade()
- if pkg_to_remove != []:
- bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
- self.pm.remove(pkg_to_remove)
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS', True)
- rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS', True)
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, rpm_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.dump_all_available_pkgs()
-
- if self.inc_rpm_image_gen == "1":
- self._create_incremental(pkgs_to_install)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- pkgs = []
- pkgs_attempt = []
- for pkg_type in pkgs_to_install:
- if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
- pkgs_attempt += pkgs_to_install[pkg_type]
- else:
- pkgs += pkgs_to_install[pkg_type]
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install(pkgs)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install(pkgs_attempt, True)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/etc/rpm', '/var/lib/rpm', '/var/lib/smart'])
-
- execute_pre_post_process(self.d, rpm_post_process_cmds)
-
- self._log_check()
-
- if self.inc_rpm_image_gen == "1":
- self.pm.backup_packaging_data()
-
- self.pm.rpm_setup_smart_target_config()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
-
- @staticmethod
- def _depends_list():
- return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
- 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
-
- def _get_delayed_postinsts(self):
- postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
- if os.path.isdir(postinst_dir):
- files = os.listdir(postinst_dir)
- for f in files:
- bb.note('Delayed package scriptlet: %s' % f)
- return files
-
- return None
-
- def _save_postinsts(self):
- # this is just a stub. For RPM, the failed postinstalls are
- # already saved in /etc/rpm-postinsts
- pass
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _handle_intercept_failure(self, registered_pkgs):
- rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
- bb.utils.mkdirhier(rpm_postinsts_dir)
-
- # Save the package postinstalls in /etc/rpm-postinsts
- for pkg in registered_pkgs.split():
- self.pm.save_rpmpostinst(pkg)
-
- def _cleanup(self):
- # during the execution of postprocess commands, rpm is called several
- # times to get the files installed, dependencies, etc. This creates the
- # __db.00* (Berkeley DB files that hold locks, rpm specific environment
- # settings, etc.), that should not get into the final rootfs
- self.pm.unlock_rpm_db()
- if os.path.isdir(self.pm.install_dir_path + "/tmp") and not os.listdir(self.pm.install_dir_path + "/tmp"):
- bb.utils.remove(self.pm.install_dir_path + "/tmp", True)
- if os.path.isdir(self.pm.install_dir_path) and not os.listdir(self.pm.install_dir_path):
- bb.utils.remove(self.pm.install_dir_path, True)
-
-class DpkgOpkgRootfs(Rootfs):
- def __init__(self, d, progress_reporter=None):
- super(DpkgOpkgRootfs, self).__init__(d, progress_reporter)
-
- def _get_pkgs_postinsts(self, status_file):
- def _get_pkg_depends_list(pkg_depends):
- pkg_depends_list = []
- # filter version requirements like libc (>= 1.1)
- for dep in pkg_depends.split(', '):
- m_dep = re.match("^(.*) \(.*\)$", dep)
- if m_dep:
- dep = m_dep.group(1)
- pkg_depends_list.append(dep)
-
- return pkg_depends_list
-
- pkgs = {}
- pkg_name = ""
- pkg_status_match = False
- pkg_depends = ""
-
- with open(status_file) as status:
- data = status.read()
- status.close()
- for line in data.split('\n'):
- m_pkg = re.match("^Package: (.*)", line)
- m_status = re.match("^Status:.*unpacked", line)
- m_depends = re.match("^Depends: (.*)", line)
-
- if m_pkg is not None:
- if pkg_name and pkg_status_match:
- pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
-
- pkg_name = m_pkg.group(1)
- pkg_status_match = False
- pkg_depends = ""
- elif m_status is not None:
- pkg_status_match = True
- elif m_depends is not None:
- pkg_depends = m_depends.group(1)
-
- # remove package dependencies not in postinsts
- pkg_names = list(pkgs.keys())
- for pkg_name in pkg_names:
- deps = pkgs[pkg_name][:]
-
- for d in deps:
- if d not in pkg_names:
- pkgs[pkg_name].remove(d)
-
- return pkgs
-
- def _get_delayed_postinsts_common(self, status_file):
- def _dep_resolve(graph, node, resolved, seen):
- seen.append(node)
-
- for edge in graph[node]:
- if edge not in resolved:
- if edge in seen:
- raise RuntimeError("Packages %s and %s have " \
- "a circular dependency in postinsts scripts." \
- % (node, edge))
- _dep_resolve(graph, edge, resolved, seen)
-
- resolved.append(node)
-
- pkg_list = []
-
- pkgs = None
- if not self.d.getVar('PACKAGE_INSTALL', True).strip():
- bb.note("Building empty image")
- else:
- pkgs = self._get_pkgs_postinsts(status_file)
- if pkgs:
- root = "__packagegroup_postinst__"
- pkgs[root] = list(pkgs.keys())
- _dep_resolve(pkgs, root, pkg_list, [])
- pkg_list.remove(root)
-
- if len(pkg_list) == 0:
- return None
-
- return pkg_list
-
- def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
- num = 0
- for p in self._get_delayed_postinsts():
- bb.utils.mkdirhier(dst_postinst_dir)
-
- if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
- shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
- os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
-
- num += 1
-
-class DpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None):
- super(DpkgRootfs, self).__init__(d, progress_reporter)
- self.log_check_regex = '^E:'
- self.log_check_expected_regexes = \
- [
- "^E: Unmet dependencies."
- ]
-
- bb.utils.remove(self.image_rootfs, True)
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
- self.manifest = DpkgManifest(d, manifest_dir)
- self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True),
- d.getVar('PACKAGE_ARCHS', True),
- d.getVar('DPKG_ARCH', True))
-
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS', True)
- deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS', True)
-
- alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
- bb.utils.mkdirhier(alt_dir)
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, deb_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
- # Don't support incremental, so skip that
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- self.pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- if self.progress_reporter:
- # Don't support attemptonly, so skip that
- self.progress_reporter.next_stage()
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/var/lib/dpkg'])
-
- self.pm.fix_broken_dependencies()
-
- self.pm.mark_packages("installed")
-
- self.pm.run_pre_post_installs()
-
- execute_pre_post_process(self.d, deb_post_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- @staticmethod
- def _depends_list():
- return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
-
- def _get_delayed_postinsts(self):
- status_file = self.image_rootfs + "/var/lib/dpkg/status"
- return self._get_delayed_postinsts_common(status_file)
-
- def _save_postinsts(self):
- dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
- src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
- return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
-
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- pass
-
-
-class OpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None):
- super(OpkgRootfs, self).__init__(d, progress_reporter)
- self.log_check_regex = '(exit 1|Collected errors)'
-
- self.manifest = OpkgManifest(d, manifest_dir)
- self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True)
- self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)
-
- self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or ""
- if self._remove_old_rootfs():
- bb.utils.remove(self.image_rootfs, True)
- self.pm = OpkgPM(d,
- self.image_rootfs,
- self.opkg_conf,
- self.pkg_archs)
- else:
- self.pm = OpkgPM(d,
- self.image_rootfs,
- self.opkg_conf,
- self.pkg_archs)
- self.pm.recover_packaging_data()
-
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
-
- def _prelink_file(self, root_dir, filename):
- bb.note('prelink %s in %s' % (filename, root_dir))
- prelink_cfg = oe.path.join(root_dir,
- self.d.expand('${sysconfdir}/prelink.conf'))
- if not os.path.exists(prelink_cfg):
- shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
- prelink_cfg)
-
- cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
- self._exec_shell_cmd([cmd_prelink,
- '--root',
- root_dir,
- '-amR',
- '-N',
- '-c',
- self.d.expand('${sysconfdir}/prelink.conf')])
-
- '''
- Compare two files with the same key twice to see if they are equal.
- If they are not equal, it means they are duplicated and come from
- different packages.
- 1st: Comapre them directly;
- 2nd: While incremental image creation is enabled, one of the
- files could be probaly prelinked in the previous image
- creation and the file has been changed, so we need to
- prelink the other one and compare them.
- '''
- def _file_equal(self, key, f1, f2):
-
- # Both of them are not prelinked
- if filecmp.cmp(f1, f2):
- return True
-
- if self.image_rootfs not in f1:
- self._prelink_file(f1.replace(key, ''), f1)
-
- if self.image_rootfs not in f2:
- self._prelink_file(f2.replace(key, ''), f2)
-
- # Both of them are prelinked
- if filecmp.cmp(f1, f2):
- return True
-
- # Not equal
- return False
-
- """
- This function was reused from the old implementation.
- See commit: "image.bbclass: Added variables for multilib support." by
- Lianhao Lu.
- """
- def _multilib_sanity_test(self, dirs):
-
- allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True)
- if allow_replace is None:
- allow_replace = ""
-
- allow_rep = re.compile(re.sub("\|$", "", allow_replace))
- error_prompt = "Multilib check error:"
-
- files = {}
- for dir in dirs:
- for root, subfolders, subfiles in os.walk(dir):
- for file in subfiles:
- item = os.path.join(root, file)
- key = str(os.path.join("/", os.path.relpath(item, dir)))
-
- valid = True
- if key in files:
- #check whether the file is allow to replace
- if allow_rep.match(key):
- valid = True
- else:
- if os.path.exists(files[key]) and \
- os.path.exists(item) and \
- not self._file_equal(key, files[key], item):
- valid = False
- bb.fatal("%s duplicate files %s %s is not the same\n" %
- (error_prompt, item, files[key]))
-
- #pass the check, add to list
- if valid:
- files[key] = item
-
- def _multilib_test_install(self, pkgs):
- ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True)
- bb.utils.mkdirhier(ml_temp)
-
- dirs = [self.image_rootfs]
-
- for variant in self.d.getVar("MULTILIB_VARIANTS", True).split():
- ml_target_rootfs = os.path.join(ml_temp, variant)
-
- bb.utils.remove(ml_target_rootfs, True)
-
- ml_opkg_conf = os.path.join(ml_temp,
- variant + "-" + os.path.basename(self.opkg_conf))
-
- ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
-
- ml_pm.update()
- ml_pm.install(pkgs)
-
- dirs.append(ml_target_rootfs)
-
- self._multilib_sanity_test(dirs)
-
- '''
- While ipk incremental image generation is enabled, it will remove the
- unneeded pkgs by comparing the old full manifest in previous existing
- image and the new full manifest in the current image.
- '''
- def _remove_extra_packages(self, pkgs_initial_install):
- if self.inc_opkg_image_gen == "1":
- # Parse full manifest in previous existing image creation session
- old_full_manifest = self.manifest.parse_full_manifest()
-
- # Create full manifest for the current image session, the old one
- # will be replaced by the new one.
- self.manifest.create_full(self.pm)
-
- # Parse full manifest in current image creation session
- new_full_manifest = self.manifest.parse_full_manifest()
-
- pkg_to_remove = list()
- for pkg in old_full_manifest:
- if pkg not in new_full_manifest:
- pkg_to_remove.append(pkg)
-
- if pkg_to_remove != []:
- bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
- self.pm.remove(pkg_to_remove)
-
- '''
- Compare with previous existing image creation, if some conditions
- triggered, the previous old image should be removed.
- The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
- and BAD_RECOMMENDATIONS' has been changed.
- '''
- def _remove_old_rootfs(self):
- if self.inc_opkg_image_gen != "1":
- return True
-
- vars_list_file = self.d.expand('${T}/vars_list')
-
- old_vars_list = ""
- if os.path.exists(vars_list_file):
- old_vars_list = open(vars_list_file, 'r+').read()
-
- new_vars_list = '%s:%s:%s\n' % \
- ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(),
- (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(),
- (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip())
- open(vars_list_file, 'w+').write(new_vars_list)
-
- if old_vars_list != new_vars_list:
- return True
-
- return False
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True)
- opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True)
-
- # update PM index files, unless users provide their own feeds
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
- self.pm.write_index()
-
- execute_pre_post_process(self.d, opkg_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
- # Steps are a bit different in order, skip next
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- self.pm.handle_bad_recommendations()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- if self.inc_opkg_image_gen == "1":
- self._remove_extra_packages(pkgs_to_install)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- # For multilib, we perform a sanity test before final install
- # If sanity test fails, it will automatically do a bb.fatal()
- # and the installation will stop
- if pkg_type == Manifest.PKG_TYPE_MULTILIB:
- self._multilib_test_install(pkgs_to_install[pkg_type])
-
- self.pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/etc', '/var/lib/opkg', '/usr/lib/ssl'])
-
- execute_pre_post_process(self.d, opkg_post_process_cmds)
-
- if self.inc_opkg_image_gen == "1":
- self.pm.backup_packaging_data()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- @staticmethod
- def _depends_list():
- return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
-
- def _get_delayed_postinsts(self):
- status_file = os.path.join(self.image_rootfs,
- self.d.getVar('OPKGLIBDIR', True).strip('/'),
- "opkg", "status")
- return self._get_delayed_postinsts_common(status_file)
-
- def _save_postinsts(self):
- dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
- src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
- return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
-
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- self.pm.remove_lists()
-
def get_class_for_type(imgtype):
- return {"rpm": RpmRootfs,
- "ipk": OpkgRootfs,
- "deb": DpkgRootfs}[imgtype]
+ import importlib
+ mod = importlib.import_module('oe.package_manager.' + imgtype + '.rootfs')
+ return mod.PkgRootfs
def variable_depends(d, manifest_dir=None):
- img_type = d.getVar('IMAGE_PKGTYPE', True)
+ img_type = d.getVar('IMAGE_PKGTYPE')
cls = get_class_for_type(img_type)
return cls._depends_list()
-def create_rootfs(d, manifest_dir=None, progress_reporter=None):
+def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None):
env_bkp = os.environ.copy()
- img_type = d.getVar('IMAGE_PKGTYPE', True)
- if img_type == "rpm":
- RpmRootfs(d, manifest_dir, progress_reporter).create()
- elif img_type == "ipk":
- OpkgRootfs(d, manifest_dir, progress_reporter).create()
- elif img_type == "deb":
- DpkgRootfs(d, manifest_dir, progress_reporter).create()
+ img_type = d.getVar('IMAGE_PKGTYPE')
+ cls = get_class_for_type(img_type)
+ cls(d, manifest_dir, progress_reporter, logcatcher).create()
os.environ.clear()
os.environ.update(env_bkp)
def image_list_installed_packages(d, rootfs_dir=None):
+ # Theres no rootfs for baremetal images
+ if bb.data.inherits_class('baremetal-image', d):
+ return ""
+
if not rootfs_dir:
- rootfs_dir = d.getVar('IMAGE_ROOTFS', True)
-
- img_type = d.getVar('IMAGE_PKGTYPE', True)
- if img_type == "rpm":
- return RpmPkgsList(d, rootfs_dir).list_pkgs()
- elif img_type == "ipk":
- return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list_pkgs()
- elif img_type == "deb":
- return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+ rootfs_dir = d.getVar('IMAGE_ROOTFS')
+
+ img_type = d.getVar('IMAGE_PKGTYPE')
+
+ import importlib
+ cls = importlib.import_module('oe.package_manager.' + img_type)
+ return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
if __name__ == "__main__":
"""