summaryrefslogtreecommitdiffstats
path: root/meta/lib/oe/utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib/oe/utils.py')
-rw-r--r--meta/lib/oe/utils.py467
1 files changed, 390 insertions, 77 deletions
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index cee087fdfa..84790b7dff 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -1,9 +1,10 @@
-try:
- # Python 2
- import commands as cmdstatus
-except ImportError:
- # Python 3
- import subprocess as cmdstatus
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import subprocess
+import multiprocessing
+import traceback
def read_file(filename):
try:
@@ -23,30 +24,37 @@ def ifelse(condition, iftrue = True, iffalse = False):
return iffalse
def conditional(variable, checkvalue, truevalue, falsevalue, d):
- if d.getVar(variable,1) == checkvalue:
+ if d.getVar(variable) == checkvalue:
return truevalue
else:
return falsevalue
+def vartrue(var, iftrue, iffalse, d):
+ import oe.types
+ if oe.types.boolean(d.getVar(var)):
+ return iftrue
+ else:
+ return iffalse
+
def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- if float(d.getVar(variable,1)) <= float(checkvalue):
+ if float(d.getVar(variable)) <= float(checkvalue):
return truevalue
else:
return falsevalue
def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue)
+ result = bb.utils.vercmp_string(d.getVar(variable), checkvalue)
if result <= 0:
return truevalue
else:
return falsevalue
def both_contain(variable1, variable2, checkvalue, d):
- val1 = d.getVar(variable1, True)
- val2 = d.getVar(variable2, True)
+ val1 = d.getVar(variable1)
+ val2 = d.getVar(variable2)
val1 = set(val1.split())
val2 = set(val2.split())
- if isinstance(checkvalue, basestring):
+ if isinstance(checkvalue, str):
checkvalue = set(checkvalue.split())
else:
checkvalue = set(checkvalue)
@@ -66,41 +74,34 @@ def set_intersect(variable1, variable2, d):
s3 = set_intersect(s1, s2)
=> s3 = "b c"
"""
- val1 = set(d.getVar(variable1, True).split())
- val2 = set(d.getVar(variable2, True).split())
+ val1 = set(d.getVar(variable1).split())
+ val2 = set(d.getVar(variable2).split())
return " ".join(val1 & val2)
def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
- if var.endswith(suffix):
- var = var.replace(suffix, "")
+ if suffix and var.endswith(suffix):
+ var = var[:-len(suffix)]
- prefix = d.getVar("MLPREFIX", True)
+ prefix = d.getVar("MLPREFIX")
if prefix and var.startswith(prefix):
- var = var.replace(prefix, "")
+ var = var[len(prefix):]
return var
def str_filter(f, str, d):
from re import match
- return " ".join(filter(lambda x: match(f, x, 0), str.split()))
+ return " ".join([x for x in str.split() if match(f, x, 0)])
def str_filter_out(f, str, d):
from re import match
- return " ".join(filter(lambda x: not match(f, x, 0), str.split()))
-
-def param_bool(cfg, field, dflt = None):
- """Lookup <field> in <cfg> map and convert it to a boolean; take
- <dflt> when this <field> does not exist"""
- value = cfg.get(field, dflt)
- strvalue = str(value).lower()
- if strvalue in ('yes', 'y', 'true', 't', '1'):
- return True
- elif strvalue in ('no', 'n', 'false', 'f', '0'):
- return False
- raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
+ return " ".join([x for x in str.split() if not match(f, x, 0)])
+
+def build_depends_string(depends, task):
+ """Append a taskname to a string of dependencies as used by the [depends] flag"""
+ return " ".join(dep + ":" + task for dep in depends.split())
def inherits(d, *classes):
"""Return True if the metadata inherits any of the specified classes"""
@@ -115,9 +116,9 @@ def features_backfill(var,d):
# disturbing distributions that have already set DISTRO_FEATURES.
# Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
# add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
- features = (d.getVar(var, True) or "").split()
- backfill = (d.getVar(var+"_BACKFILL", True) or "").split()
- considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split()
+ features = (d.getVar(var) or "").split()
+ backfill = (d.getVar(var+"_BACKFILL") or "").split()
+ considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split()
addfeatures = []
for feature in backfill:
@@ -127,24 +128,110 @@ def features_backfill(var,d):
if addfeatures:
d.appendVar(var, " " + " ".join(addfeatures))
+def all_distro_features(d, features, truevalue="1", falsevalue=""):
+ """
+ Returns truevalue if *all* given features are set in DISTRO_FEATURES,
+ else falsevalue. The features can be given as single string or anything
+ that can be turned into a set.
+
+ This is a shorter, more flexible version of
+ bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d).
+
+ Without explicit true/false values it can be used directly where
+ Python expects a boolean:
+ if oe.utils.all_distro_features(d, "foo bar"):
+ bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES")
+
+ With just a truevalue, it can be used to include files that are meant to be
+ used only when requested via DISTRO_FEATURES:
+ require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc")
+ """
+ return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+
+def any_distro_features(d, features, truevalue="1", falsevalue=""):
+ """
+ Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES,
+ else falsevalue. The features can be given as single string or anything
+ that can be turned into a set.
+
+ This is a shorter, more flexible version of
+ bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d).
+
+ Without explicit true/false values it can be used directly where
+ Python expects a boolean:
+ if not oe.utils.any_distro_features(d, "foo bar"):
+ bb.fatal("foo, bar or both must be set in DISTRO_FEATURES")
+
+ With just a truevalue, it can be used to include files that are meant to be
+ used only when requested via DISTRO_FEATURES:
+ require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc")
+
+ """
+ return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+
+def parallel_make(d, makeinst=False):
+ """
+ Return the integer value for the number of parallel threads to use when
+ building, scraped out of PARALLEL_MAKE. If no parallelization option is
+ found, returns None
+
+ e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
+ """
+ if makeinst:
+ pm = (d.getVar('PARALLEL_MAKEINST') or '').split()
+ else:
+ pm = (d.getVar('PARALLEL_MAKE') or '').split()
+ # look for '-j' and throw other options (e.g. '-l') away
+ while pm:
+ opt = pm.pop(0)
+ if opt == '-j':
+ v = pm.pop(0)
+ elif opt.startswith('-j'):
+ v = opt[2:].strip()
+ else:
+ continue
+
+ return int(v)
+
+ return ''
+
+def parallel_make_argument(d, fmt, limit=None, makeinst=False):
+ """
+ Helper utility to construct a parallel make argument from the number of
+ parallel threads specified in PARALLEL_MAKE.
+
+ Returns the input format string `fmt` where a single '%d' will be expanded
+ with the number of parallel threads to use. If `limit` is specified, the
+ number of parallel threads will be no larger than it. If no parallelization
+ option is found in PARALLEL_MAKE, returns an empty string
+
+ e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
+ "-n 10"
+ """
+ v = parallel_make(d, makeinst)
+ if v:
+ if limit:
+ v = min(limit, v)
+ return fmt % v
+ return ''
def packages_filter_out_system(d):
"""
Return a list of packages from PACKAGES with the "system" packages such as
PN-dbg PN-doc PN-locale-eb-gb removed.
"""
- pn = d.getVar('PN', True)
- blacklist = map(lambda suffix: pn + suffix, ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev'))
+ pn = d.getVar('PN')
+ blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
localepkg = pn + "-locale-"
pkgs = []
- for pkg in d.getVar('PACKAGES', True).split():
+ for pkg in d.getVar('PACKAGES').split():
if pkg not in blacklist and localepkg not in pkg:
pkgs.append(pkg)
return pkgs
def getstatusoutput(cmd):
- return cmdstatus.getstatusoutput(cmd)
+ return subprocess.getstatusoutput(cmd)
def trim_version(version, num_parts=2):
@@ -161,9 +248,9 @@ def trim_version(version, num_parts=2):
trimmed = ".".join(parts[:num_parts])
return trimmed
-def cpu_count():
- import multiprocessing
- return multiprocessing.cpu_count()
+def cpu_count(at_least=1, at_most=64):
+ cpus = len(os.sched_getaffinity(0))
+ return max(min(cpus, at_most), at_least)
def execute_pre_post_process(d, cmds):
if cmds is None:
@@ -175,51 +262,229 @@ def execute_pre_post_process(d, cmds):
bb.note("Executing %s ..." % cmd)
bb.build.exec_func(cmd, d)
-def multiprocess_exec(commands, function):
- import signal
- import multiprocessing
+# For each item in items, call the function 'target' with item as the first
+# argument, extraargs as the other arguments and handle any exceptions in the
+# parent thread
+def multiprocess_launch(target, items, d, extraargs=None):
- if not commands:
- return []
+ class ProcessLaunch(multiprocessing.Process):
+ def __init__(self, *args, **kwargs):
+ multiprocessing.Process.__init__(self, *args, **kwargs)
+ self._pconn, self._cconn = multiprocessing.Pipe()
+ self._exception = None
+ self._result = None
- def init_worker():
- signal.signal(signal.SIGINT, signal.SIG_IGN)
+ def run(self):
+ try:
+ ret = self._target(*self._args, **self._kwargs)
+ self._cconn.send((None, ret))
+ except Exception as e:
+ tb = traceback.format_exc()
+ self._cconn.send((e, tb))
+
+ def update(self):
+ if self._pconn.poll():
+ (e, tb) = self._pconn.recv()
+ if e is not None:
+ self._exception = (e, tb)
+ else:
+ self._result = tb
+
+ @property
+ def exception(self):
+ self.update()
+ return self._exception
+
+ @property
+ def result(self):
+ self.update()
+ return self._result
+
+ max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
+ launched = []
+ errors = []
+ results = []
+ items = list(items)
+ while (items and not errors) or launched:
+ if not errors and items and len(launched) < max_process:
+ args = (items.pop(),)
+ if extraargs is not None:
+ args = args + extraargs
+ p = ProcessLaunch(target=target, args=args)
+ p.start()
+ launched.append(p)
+ for q in launched:
+ # Have to manually call update() to avoid deadlocks. The pipe can be full and
+ # transfer stalled until we try and read the results object but the subprocess won't exit
+ # as it still has data to write (https://bugs.python.org/issue8426)
+ q.update()
+ # The finished processes are joined when calling is_alive()
+ if not q.is_alive():
+ if q.exception:
+ errors.append(q.exception)
+ if q.result:
+ results.append(q.result)
+ launched.remove(q)
+ # Paranoia doesn't hurt
+ for p in launched:
+ p.join()
+ if errors:
+ msg = ""
+ for (e, tb) in errors:
+ if isinstance(e, subprocess.CalledProcessError) and e.output:
+ msg = msg + str(e) + "\n"
+ msg = msg + "Subprocess output:"
+ msg = msg + e.output.decode("utf-8", errors="ignore")
+ else:
+ msg = msg + str(e) + ": " + str(tb) + "\n"
+ bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg)
+ return results
+
+def squashspaces(string):
+ import re
+ return re.sub(r"\s+", " ", string).strip()
+
+def rprovides_map(pkgdata_dir, pkg_dict):
+ # Map file -> pkg provider
+ rprov_map = {}
+
+ for pkg in pkg_dict:
+ path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
+ if not os.path.isfile(path_to_pkgfile):
+ continue
+ with open(path_to_pkgfile) as f:
+ for line in f:
+ if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'):
+ # List all components provided by pkg.
+ # Exclude version strings, i.e. those starting with (
+ provides = [x for x in line.split()[1:] if not x.startswith('(')]
+ for prov in provides:
+ if prov in rprov_map:
+ rprov_map[prov].append(pkg)
+ else:
+ rprov_map[prov] = [pkg]
+
+ return rprov_map
+
+def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None):
+ output = []
+
+ if ret_format == "arch":
+ for pkg in sorted(pkg_dict):
+ output.append("%s %s" % (pkg, pkg_dict[pkg]["arch"]))
+ elif ret_format == "file":
+ for pkg in sorted(pkg_dict):
+ output.append("%s %s %s" % (pkg, pkg_dict[pkg]["filename"], pkg_dict[pkg]["arch"]))
+ elif ret_format == "ver":
+ for pkg in sorted(pkg_dict):
+ output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
+ elif ret_format == "deps":
+ rprov_map = rprovides_map(pkgdata_dir, pkg_dict)
+ for pkg in sorted(pkg_dict):
+ for dep in pkg_dict[pkg]["deps"]:
+ if dep in rprov_map:
+ # There could be multiple providers within the image
+ for pkg_provider in rprov_map[dep]:
+ output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep))
+ else:
+ output.append("%s|%s" % (pkg, dep))
+ else:
+ for pkg in sorted(pkg_dict):
+ output.append(pkg)
+
+ output_str = '\n'.join(output)
+
+ if output_str:
+ # make sure last line is newline terminated
+ output_str += '\n'
- nproc = min(multiprocessing.cpu_count(), len(commands))
- pool = bb.utils.multiprocessingpool(nproc, init_worker)
- imap = pool.imap(function, commands)
+ return output_str
+
+# Helper function to get the host compiler version
+# Do not assume the compiler is gcc
+def get_host_compiler_version(d, taskcontextonly=False):
+ import re, subprocess
+
+ if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
+ return
+
+ compiler = d.getVar("BUILD_CC")
+ # Get rid of ccache since it is not present when parsing.
+ if compiler.startswith('ccache '):
+ compiler = compiler[7:]
try:
- res = list(imap)
- pool.close()
- pool.join()
- results = []
- for result in res:
- if result is not None:
- results.append(result)
- return results
-
- except KeyboardInterrupt:
- pool.terminate()
- pool.join()
- raise
+ env = os.environ.copy()
+ # datastore PATH does not contain session PATH as set by environment-setup-...
+ # this breaks the install-buildtools use-case
+ # env["PATH"] = d.getVar("PATH")
+ output = subprocess.check_output("%s --version" % compiler, \
+ shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
-def squashspaces(string):
- import re
- return re.sub("\s+", " ", string).strip()
+ match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
+ if not match:
+ bb.fatal("Can't get compiler version from %s --version output" % compiler)
+
+ version = match.group(1)
+ return compiler, version
+
+
+def host_gcc_version(d, taskcontextonly=False):
+ import re, subprocess
+
+ if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
+ return
+
+ compiler = d.getVar("BUILD_CC")
+ # Get rid of ccache since it is not present when parsing.
+ if compiler.startswith('ccache '):
+ compiler = compiler[7:]
+ try:
+ env = os.environ.copy()
+ env["PATH"] = d.getVar("PATH")
+ output = subprocess.check_output("%s --version" % compiler, \
+ shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
+
+ match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
+ if not match:
+ bb.fatal("Can't get compiler version from %s --version output" % compiler)
+
+ version = match.group(1)
+ return "-%s" % version if version in ("4.8", "4.9") else ""
+
+
+def get_multilib_datastore(variant, d):
+ localdata = bb.data.createCopy(d)
+ if variant:
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", variant + "-")
+ else:
+ origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
+ if origdefault:
+ localdata.setVar("DEFAULTTUNE", origdefault)
+ overrides = localdata.getVar("OVERRIDES", False).split(":")
+ overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")])
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", "")
+ return localdata
#
# Python 2.7 doesn't have threaded pools (just multiprocessing)
# so implement a version here
#
-from Queue import Queue
+from queue import Queue
from threading import Thread
class ThreadedWorker(Thread):
"""Thread executing tasks from a given tasks queue"""
- def __init__(self, tasks, worker_init, worker_end):
- Thread.__init__(self)
+ def __init__(self, tasks, worker_init, worker_end, name=None):
+ Thread.__init__(self, name=name)
self.tasks = tasks
self.daemon = True
@@ -227,7 +492,7 @@ class ThreadedWorker(Thread):
self.worker_end = worker_end
def run(self):
- from Queue import Empty
+ from queue import Empty
if self.worker_init is not None:
self.worker_init(self)
@@ -242,20 +507,20 @@ class ThreadedWorker(Thread):
try:
func(self, *args, **kargs)
- except Exception, e:
- print e
+ except Exception as e:
+ # Eat all exceptions
+ bb.mainlogger.debug("Worker task raised %s" % e, exc_info=e)
finally:
self.tasks.task_done()
class ThreadedPool:
"""Pool of threads consuming tasks from a queue"""
- def __init__(self, num_workers, num_tasks, worker_init=None,
- worker_end=None):
+ def __init__(self, num_workers, num_tasks, worker_init=None, worker_end=None, name="ThreadedPool-"):
self.tasks = Queue(num_tasks)
self.workers = []
- for _ in range(num_workers):
- worker = ThreadedWorker(self.tasks, worker_init, worker_end)
+ for i in range(num_workers):
+ worker = ThreadedWorker(self.tasks, worker_init, worker_end, name=name + str(i))
self.workers.append(worker)
def start(self):
@@ -271,3 +536,51 @@ class ThreadedPool:
self.tasks.join()
for worker in self.workers:
worker.join()
+
+class ImageQAFailed(Exception):
+ def __init__(self, description, name=None, logfile=None):
+ self.description = description
+ self.name = name
+ self.logfile=logfile
+
+ def __str__(self):
+ msg = 'Function failed: %s' % self.name
+ if self.description:
+ msg = msg + ' (%s)' % self.description
+
+ return msg
+
+def sh_quote(string):
+ import shlex
+ return shlex.quote(string)
+
+def directory_size(root, blocksize=4096):
+ """
+ Calculate the size of the directory, taking into account hard links,
+ rounding up every size to multiples of the blocksize.
+ """
+ def roundup(size):
+ """
+ Round the size up to the nearest multiple of the block size.
+ """
+ import math
+ return math.ceil(size / blocksize) * blocksize
+
+ def getsize(filename):
+ """
+ Get the size of the filename, not following symlinks, taking into
+ account hard links.
+ """
+ stat = os.lstat(filename)
+ if stat.st_ino not in inodes:
+ inodes.add(stat.st_ino)
+ return stat.st_size
+ else:
+ return 0
+
+ inodes = set()
+ total = 0
+ for root, dirs, files in os.walk(root):
+ total += sum(roundup(getsize(os.path.join(root, name))) for name in files)
+ total += roundup(getsize(root))
+ return total