aboutsummaryrefslogtreecommitdiffstats
path: root/meta/lib/oe/utils.py
diff options
context:
space:
mode:
authorRichard Purdie <richard.purdie@linuxfoundation.org>2018-07-19 20:31:35 +0000
committerRichard Purdie <richard.purdie@linuxfoundation.org>2018-07-24 11:52:07 +0100
commit88f0c214e593a45566df5131bda4c946f5ccc8c2 (patch)
tree373c24f6569fdcfa60cc472ce380cf27e9f97620 /meta/lib/oe/utils.py
parent1f279cf1ad2f0a20495780b210a987416650f40f (diff)
downloadopenembedded-core-contrib-88f0c214e593a45566df5131bda4c946f5ccc8c2.tar.gz
utils: Add multiprocess_launch API and testcase
The current methods of spawning processes for parallel execution have issues around collection of results or exceptions. Take the code from package_ipk/deb, make it generic, add a results collection mechanism, fix the exception handling and for it into a standard library function. Also add a test case which tests both the success and failure modes of operation to stop this functionality regressiing again. In particular, compared to multiprocess_exec, this fork off the parent approach means we can pass in the datastore and functions work in the same scope as the parent. This removes some of the complexities found trying to scale multiprocess_exec to wider use. Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/lib/oe/utils.py')
-rw-r--r--meta/lib/oe/utils.py70
1 files changed, 70 insertions, 0 deletions
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index 6aed6dc993..753b577555 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -1,4 +1,6 @@
import subprocess
+import multiprocessing
+import traceback
def read_file(filename):
try:
@@ -280,6 +282,74 @@ def multiprocess_exec(commands, function):
return results
+# For each item in items, call the function 'target' with item as the first
+# argument, extraargs as the other arguments and handle any exceptions in the
+# parent thread
+def multiprocess_launch(target, items, d, extraargs=None):
+
+ class ProcessLaunch(multiprocessing.Process):
+ def __init__(self, *args, **kwargs):
+ multiprocessing.Process.__init__(self, *args, **kwargs)
+ self._pconn, self._cconn = multiprocessing.Pipe()
+ self._exception = None
+ self._result = None
+
+ def run(self):
+ try:
+ ret = self._target(*self._args, **self._kwargs)
+ self._cconn.send((None, ret))
+ except Exception as e:
+ tb = traceback.format_exc()
+ self._cconn.send((e, tb))
+
+ def update(self):
+ if self._pconn.poll():
+ (e, tb) = self._pconn.recv()
+ if e is not None:
+ self._exception = (e, tb)
+ else:
+ self._result = tb
+
+ @property
+ def exception(self):
+ self.update()
+ return self._exception
+
+ @property
+ def result(self):
+ self.update()
+ return self._result
+
+ max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
+ launched = []
+ errors = []
+ results = []
+ items = list(items)
+ while (items and not errors) or launched:
+ if not errors and items and len(launched) < max_process:
+ args = (items.pop(),)
+ if extraargs is not None:
+ args = args + extraargs
+ p = ProcessLaunch(target=target, args=args)
+ p.start()
+ launched.append(p)
+ for q in launched:
+ # The finished processes are joined when calling is_alive()
+ if not q.is_alive():
+ if q.exception:
+ errors.append(q.exception)
+ if q.result:
+ results.append(q.result)
+ launched.remove(q)
+ # Paranoia doesn't hurt
+ for p in launched:
+ p.join()
+ if errors:
+ for (e, tb) in errors:
+ bb.error(str(tb))
+ bb.fatal("Fatal errors occurred in subprocesses, tracebacks printed above")
+ return results
+
def squashspaces(string):
import re
return re.sub("\s+", " ", string).strip()