summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/bitbake3
-rwxr-xr-xbin/bitbake-diffsigs12
-rw-r--r--lib/bb/build.py10
-rw-r--r--lib/bb/cache.py25
-rw-r--r--lib/bb/codeparser.py336
-rw-r--r--lib/bb/cooker.py7
-rw-r--r--lib/bb/data.py96
-rw-r--r--lib/bb/data_smart.py72
-rw-r--r--lib/bb/event.py6
-rw-r--r--lib/bb/fetch/__init__.py49
-rw-r--r--lib/bb/parse/__init__.py4
-rw-r--r--lib/bb/parse/ast.py18
-rw-r--r--lib/bb/parse/parse_py/BBHandler.py6
-rw-r--r--lib/bb/runqueue.py540
-rw-r--r--lib/bb/siggen.py263
-rw-r--r--lib/bb/ui/knotty.py10
-rw-r--r--lib/bb/utils.py51
17 files changed, 1343 insertions, 165 deletions
diff --git a/bin/bitbake b/bin/bitbake
index 63b1d1d12..e14c017c1 100755
--- a/bin/bitbake
+++ b/bin/bitbake
@@ -135,6 +135,9 @@ Default BBFILES are the .bb files in the current directory.""")
parser.add_option("-n", "--dry-run", help = "don't execute, just go through the motions",
action = "store_true", dest = "dry_run", default = False)
+ parser.add_option("-S", "--dump-signatures", help = "don't execute, just dump out the signature construction information",
+ action = "store_true", dest = "dump_signatures", default = False)
+
parser.add_option("-p", "--parse-only", help = "quit after parsing the BB files (developers only)",
action = "store_true", dest = "parse_only", default = False)
diff --git a/bin/bitbake-diffsigs b/bin/bitbake-diffsigs
new file mode 100755
index 000000000..5eb77ce59
--- /dev/null
+++ b/bin/bitbake-diffsigs
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+import os
+import sys
+import warnings
+sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
+
+import bb.siggen
+
+if len(sys.argv) > 2:
+ bb.siggen.compare_sigfiles(sys.argv[1], sys.argv[2])
+else:
+ bb.siggen.dump_sigfile(sys.argv[1])
diff --git a/lib/bb/build.py b/lib/bb/build.py
index 9aa670ac5..dd47405b3 100644
--- a/lib/bb/build.py
+++ b/lib/bb/build.py
@@ -223,15 +223,11 @@ def exec_func_shell(function, d, runfile, logfile, cwd=None, fakeroot=False):
script.write('#!/bin/sh -e\n')
if logger.isEnabledFor(logging.DEBUG):
script.write("set -x\n")
- data.emit_env(script, d)
+ data.emit_func(function, script, d)
script.write("%s\n" % function)
os.fchmod(script.fileno(), 0775)
- env = {
- 'PATH': d.getVar('PATH', True),
- 'LC_ALL': 'C',
- }
if fakeroot:
cmd = ['fakeroot', runfile]
else:
@@ -241,8 +237,7 @@ def exec_func_shell(function, d, runfile, logfile, cwd=None, fakeroot=False):
logfile = LogTee(logger, logfile)
try:
- bb.process.run(cmd, env=env, cwd=cwd, shell=False, stdin=NULL,
- log=logfile)
+ bb.process.run(cmd, cwd=cwd, shell=False, stdin=NULL, log=logfile)
except bb.process.CmdError:
raise FuncFailed(function, logfile.name)
@@ -385,6 +380,7 @@ def add_tasks(tasklist, d):
getTask('rdeptask')
getTask('recrdeptask')
getTask('nostamp')
+ getTask('noexec')
task_deps['parents'][task] = []
for dep in flags['deps']:
dep = data.expand(dep, d)
diff --git a/lib/bb/cache.py b/lib/bb/cache.py
index e960d1e8a..9a2e2d529 100644
--- a/lib/bb/cache.py
+++ b/lib/bb/cache.py
@@ -43,7 +43,7 @@ except ImportError:
logger.info("Importing cPickle failed. "
"Falling back to a very slow implementation.")
-__cache_version__ = "133"
+__cache_version__ = "134"
recipe_fields = (
'pn',
@@ -70,6 +70,9 @@ recipe_fields = (
'nocache',
'variants',
'file_depends',
+ 'tasks',
+ 'basetaskhashes',
+ 'hashfilename',
)
@@ -94,6 +97,11 @@ class RecipeInfo(namedtuple('RecipeInfo', recipe_fields)):
for pkg in packages)
@classmethod
+ def taskvar(cls, var, tasks, metadata):
+ return dict((task, cls.getvar("%s_task-%s" % (var, task), metadata))
+ for task in tasks)
+
+ @classmethod
def getvar(cls, var, metadata):
return metadata.getVar(var, True) or ''
@@ -111,15 +119,23 @@ class RecipeInfo(namedtuple('RecipeInfo', recipe_fields)):
if cls.getvar('__SKIPPED', metadata):
return cls.make_optional(skipped=True)
+ tasks = metadata.getVar('__BBTASKS', False)
+
pn = cls.getvar('PN', metadata)
packages = cls.listvar('PACKAGES', metadata)
if not pn in packages:
packages.append(pn)
+
return RecipeInfo(
+ tasks = tasks,
+ basetaskhashes = cls.taskvar('BB_BASEHASH', tasks, metadata),
+ hashfilename = cls.getvar('BB_HASHFILENAME', metadata),
+
file_depends = metadata.getVar('__depends', False),
task_deps = metadata.getVar('_task_deps', False) or
{'tasks': [], 'parents': {}},
variants = cls.listvar('__VARIANTS', metadata) + [''],
+
skipped = False,
timestamp = bb.parse.cached_mtime(filename),
packages = cls.listvar('PACKAGES', metadata),
@@ -547,6 +563,9 @@ class CacheData(object):
self.task_deps = {}
self.stamp = {}
self.preferred = {}
+ self.tasks = {}
+ self.basetaskhash = {}
+ self.hashfn = {}
"""
Indirect Cache variables
@@ -603,3 +622,7 @@ class CacheData(object):
if not info.broken and not info.not_world:
self.possible_world.append(fn)
+ self.hashfn[fn] = info.hashfilename
+ for task, taskhash in info.basetaskhashes.iteritems():
+ identifier = '%s.%s' % (fn, task)
+ self.basetaskhash[identifier] = taskhash
diff --git a/lib/bb/codeparser.py b/lib/bb/codeparser.py
new file mode 100644
index 000000000..06409319e
--- /dev/null
+++ b/lib/bb/codeparser.py
@@ -0,0 +1,336 @@
+import ast
+import codegen
+import logging
+import os.path
+import bb.utils, bb.data
+from itertools import chain
+from pysh import pyshyacc, pyshlex
+
+
+logger = logging.getLogger('BitBake.CodeParser')
+PARSERCACHE_VERSION = 2
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+ logger.info('Importing cPickle failed. Falling back to a very slow implementation.')
+
+
+def check_indent(codestr):
+ """If the code is indented, add a top level piece of code to 'remove' the indentation"""
+
+ i = 0
+ while codestr[i] in ["\n", " ", " "]:
+ i = i + 1
+
+ if i == 0:
+ return codestr
+
+ if codestr[i-1] is " " or codestr[i-1] is " ":
+ return "if 1:\n" + codestr
+
+ return codestr
+
+pythonparsecache = {}
+shellparsecache = {}
+
+def parser_cachefile(d):
+ cachedir = (bb.data.getVar("PERSISTENT_DIR", d, True) or
+ bb.data.getVar("CACHE", d, True))
+ if cachedir in [None, '']:
+ return None
+ bb.utils.mkdirhier(cachedir)
+ cachefile = os.path.join(cachedir, "bb_codeparser.dat")
+ logger.debug(1, "Using cache in '%s' for codeparser cache", cachefile)
+ return cachefile
+
+def parser_cache_init(d):
+ global pythonparsecache
+ global shellparsecache
+
+ cachefile = parser_cachefile(d)
+ if not cachefile:
+ return
+
+ try:
+ p = pickle.Unpickler(file(cachefile, "rb"))
+ data, version = p.load()
+ except:
+ return
+
+ if version != PARSERCACHE_VERSION:
+ return
+
+ pythonparsecache = data[0]
+ shellparsecache = data[1]
+
+def parser_cache_save(d):
+ cachefile = parser_cachefile(d)
+ if not cachefile:
+ return
+
+ p = pickle.Pickler(file(cachefile, "wb"), -1)
+ p.dump([[pythonparsecache, shellparsecache], PARSERCACHE_VERSION])
+
+class PythonParser():
+ class ValueVisitor():
+ """Visitor to traverse a python abstract syntax tree and obtain
+ the variables referenced via bitbake metadata APIs, and the external
+ functions called.
+ """
+
+ getvars = ("d.getVar", "bb.data.getVar", "data.getVar")
+ expands = ("d.expand", "bb.data.expand", "data.expand")
+ execs = ("bb.build.exec_func", "bb.build.exec_task")
+
+ @classmethod
+ def _compare_name(cls, strparts, node):
+ """Given a sequence of strings representing a python name,
+ where the last component is the actual Name and the prior
+ elements are Attribute nodes, determine if the supplied node
+ matches.
+ """
+
+ if not strparts:
+ return True
+
+ current, rest = strparts[0], strparts[1:]
+ if isinstance(node, ast.Attribute):
+ if current == node.attr:
+ return cls._compare_name(rest, node.value)
+ elif isinstance(node, ast.Name):
+ if current == node.id:
+ return True
+ return False
+
+ @classmethod
+ def compare_name(cls, value, node):
+ """Convenience function for the _compare_node method, which
+ can accept a string (which is split by '.' for you), or an
+ iterable of strings, in which case it checks to see if any of
+ them match, similar to isinstance.
+ """
+
+ if isinstance(value, basestring):
+ return cls._compare_name(tuple(reversed(value.split("."))),
+ node)
+ else:
+ return any(cls.compare_name(item, node) for item in value)
+
+ def __init__(self, value):
+ self.var_references = set()
+ self.var_execs = set()
+ self.direct_func_calls = set()
+ self.var_expands = set()
+ self.value = value
+
+ @classmethod
+ def warn(cls, func, arg):
+ """Warn about calls of bitbake APIs which pass a non-literal
+ argument for the variable name, as we're not able to track such
+ a reference.
+ """
+
+ try:
+ funcstr = codegen.to_source(func)
+ argstr = codegen.to_source(arg)
+ except TypeError:
+ logger.debug(2, 'Failed to convert function and argument to source form')
+ else:
+ logger.debug(1, "Warning: in call to '%s', argumen t'%s' is"
+ "not a literal", funcstr, argstr)
+
+ def visit_Call(self, node):
+ if self.compare_name(self.getvars, node.func):
+ if isinstance(node.args[0], ast.Str):
+ self.var_references.add(node.args[0].s)
+ else:
+ self.warn(node.func, node.args[0])
+ elif self.compare_name(self.expands, node.func):
+ if isinstance(node.args[0], ast.Str):
+ self.warn(node.func, node.args[0])
+ self.var_expands.update(node.args[0].s)
+ elif isinstance(node.args[0], ast.Call) and \
+ self.compare_name(self.getvars, node.args[0].func):
+ pass
+ else:
+ self.warn(node.func, node.args[0])
+ elif self.compare_name(self.execs, node.func):
+ if isinstance(node.args[0], ast.Str):
+ self.var_execs.add(node.args[0].s)
+ else:
+ self.warn(node.func, node.args[0])
+ elif isinstance(node.func, ast.Name):
+ self.direct_func_calls.add(node.func.id)
+ elif isinstance(node.func, ast.Attribute):
+ # We must have a qualified name. Therefore we need
+ # to walk the chain of 'Attribute' nodes to determine
+ # the qualification.
+ attr_node = node.func.value
+ identifier = node.func.attr
+ while isinstance(attr_node, ast.Attribute):
+ identifier = attr_node.attr + "." + identifier
+ attr_node = attr_node.value
+ if isinstance(attr_node, ast.Name):
+ identifier = attr_node.id + "." + identifier
+ self.direct_func_calls.add(identifier)
+
+ def __init__(self):
+ #self.funcdefs = set()
+ self.execs = set()
+ #self.external_cmds = set()
+ self.references = set()
+
+ def parse_python(self, node):
+
+ h = hash(str(node))
+
+ if h in pythonparsecache:
+ self.references = pythonparsecache[h]["refs"]
+ self.execs = pythonparsecache[h]["execs"]
+ return
+
+ code = compile(check_indent(str(node)), "<string>", "exec",
+ ast.PyCF_ONLY_AST)
+
+ visitor = self.ValueVisitor(code)
+ for n in ast.walk(code):
+ if n.__class__.__name__ == "Call":
+ visitor.visit_Call(n)
+
+ self.references.update(visitor.var_references)
+ self.references.update(visitor.var_execs)
+ self.execs = visitor.direct_func_calls
+
+ pythonparsecache[h] = {}
+ pythonparsecache[h]["refs"] = self.references
+ pythonparsecache[h]["execs"] = self.execs
+
+class ShellParser():
+ def __init__(self):
+ self.funcdefs = set()
+ self.allexecs = set()
+ self.execs = set()
+
+ def parse_shell(self, value):
+ """Parse the supplied shell code in a string, returning the external
+ commands it executes.
+ """
+
+ h = hash(str(value))
+
+ if h in shellparsecache:
+ self.execs = shellparsecache[h]["execs"]
+ return self.execs
+
+ try:
+ tokens, _ = pyshyacc.parse(value, eof=True, debug=False)
+ except pyshlex.NeedMore:
+ raise ShellSyntaxError("Unexpected EOF")
+
+ for token in tokens:
+ self.process_tokens(token)
+ self.execs = set(cmd for cmd in self.allexecs if cmd not in self.funcdefs)
+
+ shellparsecache[h] = {}
+ shellparsecache[h]["execs"] = self.execs
+
+ return self.execs
+
+ def process_tokens(self, tokens):
+ """Process a supplied portion of the syntax tree as returned by
+ pyshyacc.parse.
+ """
+
+ def function_definition(value):
+ self.funcdefs.add(value.name)
+ return [value.body], None
+
+ def case_clause(value):
+ # Element 0 of each item in the case is the list of patterns, and
+ # Element 1 of each item in the case is the list of commands to be
+ # executed when that pattern matches.
+ words = chain(*[item[0] for item in value.items])
+ cmds = chain(*[item[1] for item in value.items])
+ return cmds, words
+
+ def if_clause(value):
+ main = chain(value.cond, value.if_cmds)
+ rest = value.else_cmds
+ if isinstance(rest, tuple) and rest[0] == "elif":
+ return chain(main, if_clause(rest[1]))
+ else:
+ return chain(main, rest)
+
+ def simple_command(value):
+ return None, chain(value.words, (assign[1] for assign in value.assigns))
+
+ token_handlers = {
+ "and_or": lambda x: ((x.left, x.right), None),
+ "async": lambda x: ([x], None),
+ "brace_group": lambda x: (x.cmds, None),
+ "for_clause": lambda x: (x.cmds, x.items),
+ "function_definition": function_definition,
+ "if_clause": lambda x: (if_clause(x), None),
+ "pipeline": lambda x: (x.commands, None),
+ "redirect_list": lambda x: ([x.cmd], None),
+ "subshell": lambda x: (x.cmds, None),
+ "while_clause": lambda x: (chain(x.condition, x.cmds), None),
+ "until_clause": lambda x: (chain(x.condition, x.cmds), None),
+ "simple_command": simple_command,
+ "case_clause": case_clause,
+ }
+
+ for token in tokens:
+ name, value = token
+ try:
+ more_tokens, words = token_handlers[name](value)
+ except KeyError:
+ raise NotImplementedError("Unsupported token type " + name)
+
+ if more_tokens:
+ self.process_tokens(more_tokens)
+
+ if words:
+ self.process_words(words)
+
+ def process_words(self, words):
+ """Process a set of 'words' in pyshyacc parlance, which includes
+ extraction of executed commands from $() blocks, as well as grabbing
+ the command name argument.
+ """
+
+ words = list(words)
+ for word in list(words):
+ wtree = pyshlex.make_wordtree(word[1])
+ for part in wtree:
+ if not isinstance(part, list):
+ continue
+
+ if part[0] in ('`', '$('):
+ command = pyshlex.wordtree_as_string(part[1:-1])
+ self.parse_shell(command)
+
+ if word[0] in ("cmd_name", "cmd_word"):
+ if word in words:
+ words.remove(word)
+
+ usetoken = False
+ for word in words:
+ if word[0] in ("cmd_name", "cmd_word") or \
+ (usetoken and word[0] == "TOKEN"):
+ if "=" in word[1]:
+ usetoken = True
+ continue
+
+ cmd = word[1]
+ if cmd.startswith("$"):
+ logger.debug(1, "Warning: execution of non-literal"
+ "command '%s'", cmd)
+ elif cmd == "eval":
+ command = " ".join(word for _, word in words[1:])
+ self.parse_shell(command)
+ else:
+ self.allexecs.add(cmd)
+ break
diff --git a/lib/bb/cooker.py b/lib/bb/cooker.py
index c8ca09980..0e341158b 100644
--- a/lib/bb/cooker.py
+++ b/lib/bb/cooker.py
@@ -485,6 +485,7 @@ class BBCooker:
sys.exit(1)
data = self.configuration.data
+ bb.parse.init_parser(data)
for f in files:
data = _parse(f, data)
@@ -522,6 +523,8 @@ class BBCooker:
bb.event.register(var, bb.data.getVar(var, self.configuration.data))
bb.fetch.fetcher_init(self.configuration.data)
+ bb.codeparser.parser_cache_init(self.configuration.data)
+ bb.parse.init_parser(data)
bb.event.fire(bb.event.ConfigParsed(), self.configuration.data)
def handleCollections( self, collections ):
@@ -879,7 +882,7 @@ class BBCooker:
def shutdown(self):
self.state = state.shutdown
-
+
def stop(self):
self.state = state.stop
@@ -956,6 +959,8 @@ class CookerParser(object):
self.pool.terminate()
self.pool.join()
+ bb.codeparser.parser_cache_save(self.cfgdata)
+
sync = threading.Thread(target=self.bb_cache.sync)
sync.start()
multiprocessing.util.Finalize(None, sync.join, exitpriority=-100)
diff --git a/lib/bb/data.py b/lib/bb/data.py
index ef60c2868..8e7031e34 100644
--- a/lib/bb/data.py
+++ b/lib/bb/data.py
@@ -46,6 +46,7 @@ sys.path.insert(0, path)
from itertools import groupby
from bb import data_smart
+from bb import codeparser
import bb
_dict_type = data_smart.DataSmart
@@ -160,10 +161,12 @@ def expandKeys(alterdata, readdata = None):
def inheritFromOS(d):
"""Inherit variables from the environment."""
+ exportlist = bb.utils.preserved_envvars_exported()
for s in os.environ.keys():
try:
setVar(s, os.environ[s], d)
- setVarFlag(s, "export", True, d)
+ if s in exportlist:
+ setVarFlag(s, "export", True, d)
except TypeError:
pass
@@ -231,10 +234,101 @@ def emit_env(o=sys.__stdout__, d = init(), all=False):
for key in keys:
emit_var(key, o, d, all and not isfunc) and o.write('\n')
+def exported_keys(d):
+ return (key for key in d.keys() if not key.startswith('__') and
+ d.getVarFlag(key, 'export') and
+ not d.getVarFlag(key, 'unexport'))
+
+def exported_vars(d):
+ for key in exported_keys(d):
+ try:
+ value = d.getVar(key, True)
+ except Exception:
+ pass
+
+ if value is not None:
+ yield key, str(value)
+
+def emit_func(func, o=sys.__stdout__, d = init()):
+ """Emits all items in the data store in a format such that it can be sourced by a shell."""
+
+ keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func"))
+ for key in keys:
+ emit_var(key, o, d, False) and o.write('\n')
+
+ emit_var(func, o, d, False) and o.write('\n')
+ newdeps = bb.codeparser.ShellParser().parse_shell(d.getVar(func, True))
+ seen = set()
+ while newdeps:
+ deps = newdeps
+ seen |= deps
+ newdeps = set()
+ for dep in deps:
+ if bb.data.getVarFlag(dep, "func", d):
+ emit_var(dep, o, d, False) and o.write('\n')
+ newdeps |= bb.codeparser.ShellParser().parse_shell(d.getVar(dep, True))
+ newdeps -= seen
+
def update_data(d):
"""Performs final steps upon the datastore, including application of overrides"""
d.finalize()
+def build_dependencies(key, keys, shelldeps, d):
+ deps = set()
+ try:
+ if d.getVarFlag(key, "func"):
+ if d.getVarFlag(key, "python"):
+ parsedvar = d.expandWithRefs(d.getVar(key, False), key)
+ parser = bb.codeparser.PythonParser()
+ parser.parse_python(parsedvar.value)
+ deps = deps | parser.references
+ else:
+ parsedvar = d.expandWithRefs(d.getVar(key, False), key)
+ parser = bb.codeparser.ShellParser()
+ parser.parse_shell(parsedvar.value)
+ deps = deps | shelldeps
+ deps = deps | parsedvar.references
+ deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
+ else:
+ parser = d.expandWithRefs(d.getVar(key, False), key)
+ deps |= parser.references
+ deps = deps | (keys & parser.execs)
+ deps |= set((d.getVarFlag(key, "vardeps") or "").split())
+ deps -= set((d.getVarFlag(key, "vardepsexclude") or "").split())
+ except:
+ bb.note("Error expanding variable %s" % key)
+ raise
+ return deps
+ #bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs)))
+ #d.setVarFlag(key, "vardeps", deps)
+
+def generate_dependencies(d):
+
+ keys = set(key for key in d.keys() if not key.startswith("__"))
+ shelldeps = set(key for key in keys if d.getVarFlag(key, "export") and not d.getVarFlag(key, "unexport"))
+
+ deps = {}
+ taskdeps = {}
+
+ tasklist = bb.data.getVar('__BBTASKS', d) or []
+ for task in tasklist:
+ deps[task] = build_dependencies(task, keys, shelldeps, d)
+
+ newdeps = deps[task]
+ seen = set()
+ while newdeps:
+ nextdeps = newdeps
+ seen |= nextdeps
+ newdeps = set()
+ for dep in nextdeps:
+ if dep not in deps:
+ deps[dep] = build_dependencies(dep, keys, shelldeps, d)
+ newdeps |= deps[dep]
+ newdeps -= seen
+ taskdeps[task] = seen | newdeps
+ #print "For %s: %s" % (task, str(taskdeps[task]))
+ return taskdeps, deps
+
def inherits_class(klass, d):
val = getVar('__inherit_cache', d) or []
if os.path.join('classes', '%s.bbclass' % klass) in val:
diff --git a/lib/bb/data_smart.py b/lib/bb/data_smart.py
index 9b89c5f5a..83e6f70cd 100644
--- a/lib/bb/data_smart.py
+++ b/lib/bb/data_smart.py
@@ -31,7 +31,7 @@ BitBake build tools.
import copy, re
from collections import MutableMapping
import logging
-import bb
+import bb, bb.codeparser
from bb import utils
from bb.COW import COWDictBase
@@ -43,6 +43,40 @@ __expand_var_regexp__ = re.compile(r"\${[^{}]+}")
__expand_python_regexp__ = re.compile(r"\${@.+?}")
+class VariableParse:
+ def __init__(self, varname, d, val = None):
+ self.varname = varname
+ self.d = d
+ self.value = val
+
+ self.references = set()
+ self.execs = set()
+
+ def var_sub(self, match):
+ key = match.group()[2:-1]
+ if self.varname and key:
+ if self.varname == key:
+ raise Exception("variable %s references itself!" % self.varname)
+ var = self.d.getVar(key, 1)
+ if var is not None:
+ self.references.add(key)
+ return var
+ else:
+ return match.group()
+
+ def python_sub(self, match):
+ code = match.group()[3:-1]
+ codeobj = compile(code.strip(), self.varname or "<expansion>", "eval")
+
+ parser = bb.codeparser.PythonParser()
+ parser.parse_python(code)
+ self.references |= parser.references
+ self.execs |= parser.execs
+
+ value = utils.better_eval(codeobj, DataContext(self.d))
+ return str(value)
+
+
class DataContext(dict):
def __init__(self, metadata, **kwargs):
self.metadata = metadata
@@ -66,45 +100,37 @@ class DataSmart(MutableMapping):
self.expand_cache = {}
- def expand(self, s, varname):
- def var_sub(match):
- key = match.group()[2:-1]
- if varname and key:
- if varname == key:
- raise Exception("variable %s references itself!" % varname)
- var = self.getVar(key, 1)
- if var is not None:
- return var
- else:
- return match.group()
-
- def python_sub(match):
- code = match.group()[3:-1]
- codeobj = compile(code.strip(), varname or "<expansion>", "eval")
- value = utils.better_eval(codeobj, DataContext(self))
- return str(value)
+ def expandWithRefs(self, s, varname):
if not isinstance(s, basestring): # sanity check
- return s
+ return VariableParse(varname, self, s)
if varname and varname in self.expand_cache:
return self.expand_cache[varname]
+ varparse = VariableParse(varname, self)
+
while s.find('${') != -1:
olds = s
try:
- s = __expand_var_regexp__.sub(var_sub, s)
- s = __expand_python_regexp__.sub(python_sub, s)
+ s = __expand_var_regexp__.sub(varparse.var_sub, s)
+ s = __expand_python_regexp__.sub(varparse.python_sub, s)
if s == olds:
break
except Exception:
logger.exception("Error evaluating '%s'", s)
raise
+ varparse.value = s
+
if varname:
- self.expand_cache[varname] = s
+ self.expand_cache[varname] = varparse
+
+ return varparse
+
+ def expand(self, s, varname):
+ return self.expandWithRefs(s, varname).value
- return s
def finalize(self):
"""Performs final steps upon the datastore, including application of overrides"""
diff --git a/lib/bb/event.py b/lib/bb/event.py
index 387029f7a..1911782da 100644
--- a/lib/bb/event.py
+++ b/lib/bb/event.py
@@ -127,11 +127,7 @@ def fire(event, d):
def worker_fire(event, d):
data = "<event>" + pickle.dumps(event) + "</event>"
- try:
- if os.write(worker_pipe, data) != len (data):
- print("Error sending event to server (short write)")
- except OSError:
- sys.exit(1)
+ worker_pipe.write(data)
def fire_from_worker(event, d):
if not event.startswith("<event>") or not event.endswith("</event>"):
diff --git a/lib/bb/fetch/__init__.py b/lib/bb/fetch/__init__.py
index aa16fb927..91729f684 100644
--- a/lib/bb/fetch/__init__.py
+++ b/lib/bb/fetch/__init__.py
@@ -223,6 +223,42 @@ def removefile(f):
except:
pass
+def verify_checksum(u, ud, d):
+ """
+ verify the MD5 and SHA256 checksum for downloaded src
+
+ return value:
+ - True: checksum matched
+ - False: checksum unmatched
+
+ if checksum is missing in recipes file, "BB_STRICT_CHECKSUM" decide the return value.
+ if BB_STRICT_CHECKSUM = "1" then return false as unmatched, otherwise return true as
+ matched
+ """
+
+ if not ud.type in ["http", "https", "ftp", "ftps"]:
+ return
+
+ md5data = bb.utils.md5_file(ud.localpath)
+ sha256data = bb.utils.sha256_file(ud.localpath)
+
+ if (ud.md5_expected == None or ud.sha256_expected == None):
+ logger.warn('Missing SRC_URI checksum for %s, consider adding to the recipe:\n'
+ 'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"',
+ ud.localpath, ud.md5_name, md5data,
+ ud.sha256_name, sha256data)
+ if bb.data.getVar("BB_STRICT_CHECKSUM", d, True) == "1":
+ raise FetchError("No checksum specified for %s." % u)
+ return
+
+ if (ud.md5_expected != md5data or ud.sha256_expected != sha256data):
+ logger.error('The checksums for "%s" did not match.\n'
+ ' MD5: expected "%s", got "%s"\n'
+ ' SHA256: expected "%s", got "%s"\n',
+ ud.localpath, ud.md5_expected, md5data,
+ ud.sha256_expected, sha256data)
+ raise FetchError("%s checksum mismatch." % u)
+
def go(d, urls = None):
"""
Fetch all urls
@@ -265,6 +301,7 @@ def go(d, urls = None):
raise FetchError("Unable to fetch URL %s from any source." % u)
ud.localpath = localpath
+
if os.path.exists(ud.md5):
# Touch the md5 file to show active use of the download
try:
@@ -273,6 +310,8 @@ def go(d, urls = None):
# Errors aren't fatal here
pass
else:
+ # Only check the checksums if we've not seen this item before
+ verify_checksum(u, ud, d)
Fetch.write_md5sum(u, ud, d)
bb.utils.unlockfile(lf)
@@ -494,6 +533,16 @@ class FetchData(object):
if not self.pswd and "pswd" in self.parm:
self.pswd = self.parm["pswd"]
self.setup = False
+
+ if "name" in self.parm:
+ self.md5_name = "%s.md5sum" % self.parm["name"]
+ self.sha256_name = "%s.sha256sum" % self.parm["name"]
+ else:
+ self.md5_name = "md5sum"
+ self.sha256_name = "sha256sum"
+ self.md5_expected = bb.data.getVarFlag("SRC_URI", self.md5_name, d)
+ self.sha256_expected = bb.data.getVarFlag("SRC_URI", self.sha256_name, d)
+
for m in methods:
if m.supports(url, self, d):
self.method = m
diff --git a/lib/bb/parse/__init__.py b/lib/bb/parse/__init__.py
index 264d00996..eee8d9cdd 100644
--- a/lib/bb/parse/__init__.py
+++ b/lib/bb/parse/__init__.py
@@ -31,6 +31,7 @@ import stat
import logging
import bb
import bb.utils
+import bb.siggen
logger = logging.getLogger("BitBake.Parsing")
@@ -84,6 +85,9 @@ def init(fn, data):
if h['supports'](fn):
return h['init'](data)
+def init_parser(d):
+ bb.parse.siggen = bb.siggen.init(d)
+
def resolve_file(fn, d):
if not os.path.isabs(fn):
bbpath = bb.data.getVar("BBPATH", d, True)
diff --git a/lib/bb/parse/ast.py b/lib/bb/parse/ast.py
index 860f98b1b..8fffe1e8f 100644
--- a/lib/bb/parse/ast.py
+++ b/lib/bb/parse/ast.py
@@ -142,8 +142,9 @@ class MethodNode(AstNode):
bb.data.setVar(self.func_name, '\n'.join(self.body), data)
class PythonMethodNode(AstNode):
- def __init__(self, filename, lineno, define, body):
+ def __init__(self, filename, lineno, function, define, body):
AstNode.__init__(self, filename, lineno)
+ self.function = function
self.define = define
self.body = body
@@ -151,9 +152,12 @@ class PythonMethodNode(AstNode):
# Note we will add root to parsedmethods after having parse
# 'this' file. This means we will not parse methods from
# bb classes twice
+ text = '\n'.join(self.body)
if not bb.methodpool.parsed_module(self.define):
- text = '\n'.join(self.body)
bb.methodpool.insert_method(self.define, text, self.filename)
+ bb.data.setVarFlag(self.function, "func", 1, data)
+ bb.data.setVarFlag(self.function, "python", 1, data)
+ bb.data.setVar(self.function, text, data)
class MethodFlagsNode(AstNode):
def __init__(self, filename, lineno, key, m):
@@ -280,8 +284,8 @@ def handleData(statements, filename, lineno, groupd):
def handleMethod(statements, filename, lineno, func_name, body):
statements.append(MethodNode(filename, lineno, func_name, body))
-def handlePythonMethod(statements, filename, lineno, root, body):
- statements.append(PythonMethodNode(filename, lineno, root, body))
+def handlePythonMethod(statements, filename, lineno, funcname, root, body):
+ statements.append(PythonMethodNode(filename, lineno, funcname, root, body))
def handleMethodFlags(statements, filename, lineno, key, m):
statements.append(MethodFlagsNode(filename, lineno, key, m))
@@ -305,7 +309,7 @@ def handleInherit(statements, filename, lineno, m):
classes = m.group(1)
statements.append(InheritNode(filename, lineno, classes.split()))
-def finalize(fn, d):
+def finalize(fn, d, variant = None):
for lazykey in bb.data.getVar("__lazy_assigned", d) or ():
if bb.data.getVar(lazykey, d) is None:
val = bb.data.getVarFlag(lazykey, "defaultval", d)
@@ -328,6 +332,8 @@ def finalize(fn, d):
tasklist = bb.data.getVar('__BBTASKS', d) or []
bb.build.add_tasks(tasklist, d)
+ bb.parse.siggen.finalise(fn, d, variant)
+
bb.event.fire(bb.event.RecipeParsed(fn), d)
def _create_variants(datastores, names, function):
@@ -436,7 +442,7 @@ def multi_finalize(fn, d):
for variant, variant_d in datastores.iteritems():
if variant:
try:
- finalize(fn, variant_d)
+ finalize(fn, variant_d, variant)
except bb.parse.SkipPackage:
bb.data.setVar("__SKIPPED", True, variant_d)
diff --git a/lib/bb/parse/parse_py/BBHandler.py b/lib/bb/parse/parse_py/BBHandler.py
index ab807261a..4a938b911 100644
--- a/lib/bb/parse/parse_py/BBHandler.py
+++ b/lib/bb/parse/parse_py/BBHandler.py
@@ -185,7 +185,8 @@ def feeder(lineno, s, fn, root, statements):
__body__.append(s)
return
else:
- ast.handlePythonMethod(statements, fn, lineno, root, __body__)
+ ast.handlePythonMethod(statements, fn, lineno, __inpython__,
+ root, __body__)
__body__ = []
__inpython__ = False
@@ -212,7 +213,8 @@ def feeder(lineno, s, fn, root, statements):
m = __def_regexp__.match(s)
if m:
__body__.append(s)
- __inpython__ = True
+ __inpython__ = m.group(1)
+
return
m = __export_func_regexp__.match(s)
diff --git a/lib/bb/runqueue.py b/lib/bb/runqueue.py
index c4f716170..5953d1dc3 100644
--- a/lib/bb/runqueue.py
+++ b/lib/bb/runqueue.py
@@ -22,6 +22,7 @@ Handles preparation and execution of a queue of tasks
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import copy
import os
import sys
import signal
@@ -63,12 +64,14 @@ class RunQueueStats:
# These values indicate the next step due to be run in the
# runQueue state machine
runQueuePrepare = 2
-runQueueRunInit = 3
-runQueueRunning = 4
-runQueueFailed = 6
-runQueueCleanUp = 7
-runQueueComplete = 8
-runQueueChildProcess = 9
+runQueueSceneInit = 3
+runQueueSceneRun = 4
+runQueueRunInit = 5
+runQueueRunning = 6
+runQueueFailed = 7
+runQueueCleanUp = 8
+runQueueComplete = 9
+runQueueChildProcess = 10
class RunQueueScheduler(object):
"""
@@ -117,13 +120,12 @@ class RunQueueSchedulerSpeed(RunQueueScheduler):
"""
The priority map is sorted by task weight.
"""
- from copy import deepcopy
self.rq = runqueue
self.rqdata = rqdata
- sortweight = sorted(deepcopy(self.rqdata.runq_weight))
- copyweight = deepcopy(self.rqdata.runq_weight)
+ sortweight = sorted(copy.deepcopy(self.rqdata.runq_weight))
+ copyweight = copy.deepcopy(self.rqdata.runq_weight)
self.prio_map = []
for weight in sortweight:
@@ -145,12 +147,11 @@ class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
def __init__(self, runqueue, rqdata):
RunQueueSchedulerSpeed.__init__(self, runqueue, rqdata)
- from copy import deepcopy
#FIXME - whilst this groups all fnids together it does not reorder the
#fnid groups optimally.
- basemap = deepcopy(self.prio_map)
+ basemap = copy.deepcopy(self.prio_map)
self.prio_map = []
while (len(basemap) > 0):
entry = basemap.pop(0)
@@ -187,6 +188,7 @@ class RunQueueData:
self.runq_task = []
self.runq_depends = []
self.runq_revdeps = []
+ self.runq_hash = []
def runq_depends_names(self, ids):
import re
@@ -283,7 +285,7 @@ class RunQueueData:
if dep in explored_deps[revdep]:
scan = True
if scan:
- find_chains(revdep, deepcopy(prev_chain))
+ find_chains(revdep, copy.deepcopy(prev_chain))
for dep in explored_deps[revdep]:
if dep not in total_deps:
total_deps.append(dep)
@@ -488,6 +490,7 @@ class RunQueueData:
self.runq_task.append(taskData.tasks_name[task])
self.runq_depends.append(set(depends))
self.runq_revdeps.append(set())
+ self.runq_hash.append("")
runq_build.append(0)
runq_recrdepends.append(recrdepends)
@@ -600,6 +603,7 @@ class RunQueueData:
del self.runq_depends[listid-delcount]
del runq_build[listid-delcount]
del self.runq_revdeps[listid-delcount]
+ del self.runq_hash[listid-delcount]
delcount = delcount + 1
maps.append(-1)
@@ -683,6 +687,40 @@ class RunQueueData:
stampfnwhitelist.append(fn)
self.stampfnwhitelist = stampfnwhitelist
+ # Interate over the task list looking for tasks with a 'setscene' function
+ self.runq_setscene = []
+ for task in range(len(self.runq_fnid)):
+ setscene = taskData.gettask_id(self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task] + "_setscene", False)
+ if not setscene:
+ continue
+ self.runq_setscene.append(task)
+
+ # Interate over the task list and call into the siggen code
+ dealtwith = set()
+ todeal = set(range(len(self.runq_fnid)))
+ while len(todeal) > 0:
+ for task in todeal.copy():
+ if len(self.runq_depends[task] - dealtwith) == 0:
+ dealtwith.add(task)
+ todeal.remove(task)
+ procdep = []
+ for dep in self.runq_depends[task]:
+ procdep.append(self.taskData.fn_index[self.runq_fnid[dep]] + "." + self.runq_task[dep])
+ self.runq_hash[task] = bb.parse.siggen.get_taskhash(self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task], procdep, self.dataCache)
+
+ self.hashes = {}
+ self.hash_deps = {}
+ for task in xrange(len(self.runq_fnid)):
+ identifier = '%s.%s' % (self.taskData.fn_index[self.runq_fnid[task]],
+ self.runq_task[task])
+ self.hashes[identifier] = self.runq_hash[task]
+ deps = []
+ for dep in self.runq_depends[task]:
+ depidentifier = '%s.%s' % (self.taskData.fn_index[self.runq_fnid[dep]],
+ self.runq_task[dep])
+ deps.append(depidentifier)
+ self.hash_deps[identifier] = deps
+
return len(self.runq_fnid)
def dump_data(self, taskQueue):
@@ -716,7 +754,8 @@ class RunQueue:
self.cfgData = cfgData
self.rqdata = RunQueueData(self, cooker, cfgData, dataCache, taskData, targets)
- self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, 1) or "perfile"
+ self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, True) or "perfile"
+ self.hashvalidate = bb.data.getVar("BB_HASHCHECK_FUNCTION", cfgData, True) or None
self.state = runQueuePrepare
@@ -815,6 +854,13 @@ class RunQueue:
return current
def check_stamp_task(self, task, taskname = None):
+ def get_timestamp(f):
+ try:
+ if not os.access(f, os.F_OK):
+ return None
+ return os.stat(f)[stat.ST_MTIME]
+ except:
+ return None
if self.stamppolicy == "perfile":
fulldeptree = False
@@ -827,7 +873,9 @@ class RunQueue:
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
if taskname is None:
taskname = self.rqdata.runq_task[task]
- stampfile = "%s.%s" % (self.rqdata.dataCache.stamp[fn], taskname)
+
+ stampfile = bb.parse.siggen.stampfile(self.rqdata.dataCache.stamp[fn], taskname, self.rqdata.runq_hash[task])
+
# If the stamp is missing its not current
if not os.access(stampfile, os.F_OK):
logger.debug(2, "Stampfile %s not available", stampfile)
@@ -838,21 +886,27 @@ class RunQueue:
logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
return False
+ if taskname.endswith("_setscene"):
+ return True
+
iscurrent = True
- t1 = os.stat(stampfile)[stat.ST_MTIME]
+ t1 = get_timestamp(stampfile)
for dep in self.rqdata.runq_depends[task]:
if iscurrent:
fn2 = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[dep]]
taskname2 = self.rqdata.runq_task[dep]
- stampfile2 = "%s.%s" % (self.rqdata.dataCache.stamp[fn2], taskname2)
+ stampfile2 = bb.parse.siggen.stampfile(self.rqdata.dataCache.stamp[fn2], taskname2, self.rqdata.runq_hash[dep])
+ stampfile3 = bb.parse.siggen.stampfile(self.rqdata.dataCache.stamp[fn2], taskname2 + "_setscene", self.rqdata.runq_hash[dep])
+ t2 = get_timestamp(stampfile2)
+ t3 = get_timestamp(stampfile3)
+ if t3 and t3 > t2:
+ continue
if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
- try:
- t2 = os.stat(stampfile2)[stat.ST_MTIME]
- if t1 < t2:
- logger.debug(2, "Stampfile %s < %s", stampfile, stampfile2)
- iscurrent = False
- except:
- logger.debug(2, "Exception reading %s for %s", stampfile2, stampfile)
+ if not t2:
+ logger.debug(2, 'Stampfile %s does not exist', stampfile2)
+ iscurrent = False
+ if t1 < t2:
+ logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
iscurrent = False
return iscurrent
@@ -871,15 +925,24 @@ class RunQueue:
if self.rqdata.prepare() is 0:
self.state = runQueueComplete
else:
- self.state = runQueueRunInit
+ self.state = runQueueSceneInit
+
+ if self.state is runQueueSceneInit:
+ if self.cooker.configuration.dump_signatures:
+ self.dump_signatures()
+ else:
+ self.rqexe = RunQueueExecuteScenequeue(self)
+
+ if self.state is runQueueSceneRun:
+ retval = self.rqexe.execute()
if self.state is runQueueRunInit:
- logger.info("Executing runqueue")
+ logger.info("Executing runqueue tasks")
self.rqexe = RunQueueExecuteTasks(self)
self.state = runQueueRunning
if self.state is runQueueRunning:
- self.rqexe.execute()
+ retval = self.rqexe.execute()
if self.state is runQueueCleanUp:
self.rqexe.finish()
@@ -909,6 +972,20 @@ class RunQueue:
else:
self.rqexe.finish()
+ def dump_signatures(self):
+ self.state = runQueueComplete
+ done = set()
+ bb.note("Reparsing files to collect dependency data")
+ for task in range(len(self.rqdata.runq_fnid)):
+ if self.rqdata.runq_fnid[task] not in done:
+ fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
+ the_data = self.cooker.bb_cache.loadDataFull(fn, self.cooker.get_file_appends(fn), self.cooker.configuration.data)
+ done.add(self.rqdata.runq_fnid[task])
+
+ bb.parse.siggen.dump_sigs(self.rqdata.dataCache)
+
+ return
+
class RunQueueExecute:
@@ -944,8 +1021,6 @@ class RunQueueExecute:
self.task_fail(task, result[1]>>8)
else:
self.task_complete(task)
- self.stats.taskCompleted()
- bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
def finish_now(self):
if self.stats.active:
@@ -964,14 +1039,10 @@ class RunQueueExecute:
for pipe in self.build_pipes:
self.build_pipes[pipe].read()
- try:
- while self.stats.active > 0:
- bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
- if self.runqueue_process_waitpid() is None:
- return
- except:
- self.finish_now()
- raise
+ if self.stats.active > 0:
+ bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
+ self.runqueue_process_waitpid()
+ return
if len(self.failed_fnids) != 0:
self.rq.state = runQueueFailed
@@ -980,22 +1051,19 @@ class RunQueueExecute:
self.rq.state = runQueueComplete
return
- def notify_task_started(self, task):
- bb.event.fire(runQueueTaskStarted(task, self.stats, self.rq), self.cfgData)
-
- def notify_task_completed(self, task):
- bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
-
- def fork_off_task(self, fn, task, taskname):
+ def fork_off_task(self, fn, task, taskname, quieterrors=False):
sys.stdout.flush()
sys.stderr.flush()
try:
pipein, pipeout = os.pipe()
+ pipein = os.fdopen(pipein, 'rb', 4096)
+ pipeout = os.fdopen(pipeout, 'wb', 0)
pid = os.fork()
except OSError as e:
bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
if pid == 0:
- os.close(pipein)
+ pipein.close()
+
# Save out the PID so that the event can include it the
# events
bb.event.worker_pid = os.getpid()
@@ -1008,15 +1076,17 @@ class RunQueueExecute:
newsi = os.open(os.devnull, os.O_RDWR)
os.dup2(newsi, sys.stdin.fileno())
- self.notify_task_started(task)
-
bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", self, self.cooker.configuration.data)
bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY2", fn, self.cooker.configuration.data)
+ bb.parse.siggen.set_taskdata(self.rqdata.hashes, self.rqdata.hash_deps)
try:
the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.get_file_appends(fn), self.cooker.configuration.data)
+ the_data.setVar('BB_TASKHASH', self.rqdata.runq_hash[task])
+ os.environ.update(bb.data.exported_vars(the_data))
bb.build.exec_task(fn, taskname, the_data)
except Exception as exc:
- logger.critical(str(exc))
+ if not quieterrors:
+ logger.critical(str(exc))
os._exit(1)
os._exit(0)
return pid, pipein, pipeout
@@ -1044,6 +1114,23 @@ class RunQueueExecuteTasks(RunQueueExecute):
self.runq_buildable.append(1)
else:
self.runq_buildable.append(0)
+ if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered):
+ self.rq.scenequeue_covered.add(task)
+
+ found = True
+ while found:
+ found = False
+ for task in range(self.stats.total):
+ if task in self.rq.scenequeue_covered:
+ continue
+ if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered):
+ self.rq.scenequeue_covered.add(task)
+ found = True
+
+ logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
+
+ for task in self.rq.scenequeue_covered:
+ self.task_skip(task)
event.fire(bb.event.StampUpdate(self.rqdata.target_pairs, self.rqdata.dataCache.stamp), self.cfgData)
@@ -1078,7 +1165,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
schedulers.add(getattr(module, name))
return schedulers
- def task_complete(self, task):
+ def task_completeoutright(self, task):
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
@@ -1100,6 +1187,11 @@ class RunQueueExecuteTasks(RunQueueExecute):
taskname = self.rqdata.runq_task[revdep]
logger.debug(1, "Marking task %s (%s, %s) as buildable", revdep, fn, taskname)
+ def task_complete(self, task):
+ self.stats.taskCompleted()
+ bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
+ self.task_completeoutright(task)
+
def task_fail(self, task, exitcode):
"""
Called when a task has failed
@@ -1115,7 +1207,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
def task_skip(self, task):
self.runq_running[task] = 1
self.runq_buildable[task] = 1
- self.task_complete(task)
+ self.task_completeoutright(task)
self.stats.taskCompleted()
self.stats.taskSkipped()
@@ -1128,56 +1220,306 @@ class RunQueueExecuteTasks(RunQueueExecute):
# nothing to do
self.rq.state = runQueueCleanUp
- while True:
- for task in iter(self.sched.next, None):
- fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
-
- taskname = self.rqdata.runq_task[task]
- if self.rq.check_stamp_task(task, taskname):
- logger.debug(2, "Stamp current task %s (%s)", task,
- self.rqdata.get_user_idstring(task))
- self.task_skip(task)
- continue
- elif self.cooker.configuration.dry_run:
- self.runq_running[task] = 1
- self.runq_buildable[task] = 1
- self.notify_task_started(task)
- self.stats.taskActive()
- self.task_complete(task)
- self.stats.taskCompleted()
- self.notify_task_completed(task)
- continue
+ task = self.sched.next()
+ if task is not None:
+ fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
- pid, pipein, pipeout = self.fork_off_task(fn, task, taskname)
+ taskname = self.rqdata.runq_task[task]
+ if self.rq.check_stamp_task(task, taskname):
+ logger.debug(2, "Stamp current task %s (%s)", task,
+ self.rqdata.get_user_idstring(task))
+ self.task_skip(task)
+ return True
+ elif self.cooker.configuration.dry_run:
+ self.runq_running[task] = 1
+ self.runq_buildable[task] = 1
+ self.stats.taskActive()
+ self.task_complete(task)
+ return True
- self.build_pids[pid] = task
- self.build_pipes[pid] = runQueuePipe(pipein, pipeout, self.cfgData)
+ taskdep = self.rqdata.dataCache.task_deps[fn]
+ if 'noexec' in taskdep and taskname in taskdep['noexec']:
+ startevent = runQueueTaskStarted(task, self.stats, self.rq,
+ noexec=True)
+ bb.event.fire(startevent, self.cfgData)
self.runq_running[task] = 1
self.stats.taskActive()
+ bb.build.make_stamp(taskname, self.rqdata.dataCache, fn)
+ self.task_complete(task)
+ return True
+ else:
+ startevent = runQueueTaskStarted(task, self.stats, self.rq)
+ bb.event.fire(startevent, self.cfgData)
- for pipe in self.build_pipes:
- self.build_pipes[pipe].read()
+ pid, pipein, pipeout = self.fork_off_task(fn, task, taskname)
- if self.stats.active > 0:
- if self.runqueue_process_waitpid() is None:
- return
- continue
+ self.build_pids[pid] = task
+ self.build_pipes[pid] = runQueuePipe(pipein, pipeout, self.cfgData)
+ self.runq_running[task] = 1
+ self.stats.taskActive()
- if len(self.failed_fnids) != 0:
- self.rq.state = runQueueFailed
- return
+ for pipe in self.build_pipes:
+ self.build_pipes[pipe].read()
+
+ if self.stats.active > 0:
+ if self.runqueue_process_waitpid() is None:
+ return 0.5
+ return True
+
+ if len(self.failed_fnids) != 0:
+ self.rq.state = runQueueFailed
+ return True
+
+ # Sanity Checks
+ for task in xrange(self.stats.total):
+ if self.runq_buildable[task] == 0:
+ logger.error("Task %s never buildable!", task)
+ if self.runq_running[task] == 0:
+ logger.error("Task %s never ran!", task)
+ if self.runq_complete[task] == 0:
+ logger.error("Task %s never completed!", task)
+ self.rq.state = runQueueComplete
+ return True
+
+class RunQueueExecuteScenequeue(RunQueueExecute):
+ def __init__(self, rq):
+ RunQueueExecute.__init__(self, rq)
- # Sanity Checks
- for task in xrange(self.stats.total):
- if self.runq_buildable[task] == 0:
- logger.error("Task %s never buildable!", task)
- if self.runq_running[task] == 0:
- logger.error("Task %s never ran!", task)
- if self.runq_complete[task] == 0:
- logger.error("Task %s never completed!", task)
- self.rq.state = runQueueComplete
+ self.scenequeue_covered = set()
+ self.scenequeue_notcovered = set()
+
+ # If we don't have any setscene functions, skip this step
+ if len(self.rqdata.runq_setscene) == 0:
+ rq.scenequeue_covered = set()
+ rq.state = runQueueRunInit
return
+ self.stats = RunQueueStats(len(self.rqdata.runq_setscene))
+
+ endpoints = {}
+ sq_revdeps = []
+ sq_revdeps_new = []
+ sq_revdeps_squash = []
+
+ # We need to construct a dependency graph for the setscene functions. Intermediate
+ # dependencies between the setscene tasks only complicate the code. This code
+ # therefore aims to collapse the huge runqueue dependency tree into a smaller one
+ # only containing the setscene functions.
+
+ for task in range(self.stats.total):
+ self.runq_running.append(0)
+ self.runq_complete.append(0)
+ self.runq_buildable.append(0)
+
+ for task in range(len(self.rqdata.runq_fnid)):
+ sq_revdeps.append(copy.copy(self.rqdata.runq_revdeps[task]))
+ sq_revdeps_new.append(set())
+ if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
+ endpoints[task] = None
+
+ for task in self.rqdata.runq_setscene:
+ for dep in self.rqdata.runq_depends[task]:
+ endpoints[dep] = task
+
+ def process_endpoints(endpoints):
+ newendpoints = {}
+ for point, task in endpoints.items():
+ tasks = set()
+ if task:
+ tasks.add(task)
+ if sq_revdeps_new[point]:
+ tasks |= sq_revdeps_new[point]
+ sq_revdeps_new[point] = set()
+ for dep in self.rqdata.runq_depends[point]:
+ if point in sq_revdeps[dep]:
+ sq_revdeps[dep].remove(point)
+ if tasks:
+ sq_revdeps_new[dep] |= tasks
+ if (len(sq_revdeps[dep]) == 0 or len(sq_revdeps_new[dep]) != 0) and dep not in self.rqdata.runq_setscene:
+ newendpoints[dep] = task
+ if len(newendpoints) != 0:
+ process_endpoints(newendpoints)
+
+ process_endpoints(endpoints)
+
+ for task in range(len(self.rqdata.runq_fnid)):
+ if task in self.rqdata.runq_setscene:
+ deps = set()
+ for dep in sq_revdeps_new[task]:
+ deps.add(self.rqdata.runq_setscene.index(dep))
+ sq_revdeps_squash.append(deps)
+ elif len(sq_revdeps_new[task]) != 0:
+ bb.msg.fatal(bb.msg.domain.RunQueue, "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
+
+ #for task in range(len(sq_revdeps_squash)):
+ # print "Task %s: %s.%s is %s " % (task, self.taskData.fn_index[self.runq_fnid[self.runq_setscene[task]]], self.runq_task[self.runq_setscene[task]] + "_setscene", sq_revdeps_squash[task])
+
+ self.sq_deps = []
+ self.sq_revdeps = sq_revdeps_squash
+ self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
+
+ for task in range(len(self.sq_revdeps)):
+ self.sq_deps.append(set())
+ for task in range(len(self.sq_revdeps)):
+ for dep in self.sq_revdeps[task]:
+ self.sq_deps[dep].add(task)
+
+ for task in range(len(self.sq_revdeps)):
+ if len(self.sq_revdeps[task]) == 0:
+ self.runq_buildable[task] = 1
+
+ if self.rq.hashvalidate:
+ sq_hash = []
+ sq_hashfn = []
+ sq_fn = []
+ sq_taskname = []
+ sq_task = []
+ noexec = []
+ for task in range(len(self.sq_revdeps)):
+ realtask = self.rqdata.runq_setscene[task]
+ fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
+ taskname = self.rqdata.runq_task[realtask]
+ taskdep = self.rqdata.dataCache.task_deps[fn]
+ if 'noexec' in taskdep and taskname in taskdep['noexec']:
+ noexec.append(task)
+ self.task_skip(task)
+ bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn)
+ continue
+ sq_fn.append(fn)
+ sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
+ sq_hash.append(self.rqdata.runq_hash[realtask])
+ sq_taskname.append(taskname)
+ sq_task.append(task)
+ call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
+ locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.configuration.data }
+ valid = bb.utils.better_eval(call, locs)
+
+ valid_new = []
+ for v in valid:
+ valid_new.append(sq_task[v])
+
+ for task in range(len(self.sq_revdeps)):
+ if task not in valid_new and task not in noexec:
+ logger.debug(2, 'No package found, so skipping setscene task %s',
+ self.rqdata.get_user_idstring(task))
+ self.task_failoutright(task)
+
+ logger.info('Executing setscene tasks')
+
+ self.rq.state = runQueueSceneRun
+
+ def scenequeue_updatecounters(self, task):
+ for dep in self.sq_deps[task]:
+ self.sq_revdeps2[dep].remove(task)
+ if len(self.sq_revdeps2[dep]) == 0:
+ self.runq_buildable[dep] = 1
+
+ def task_completeoutright(self, task):
+ """
+ Mark a task as completed
+ Look at the reverse dependencies and mark any task with
+ completed dependencies as buildable
+ """
+
+ index = self.rqdata.runq_setscene[task]
+ logger.debug(1, 'Found task %s which could be accelerated',
+ self.rqdata.get_user_idstring(index))
+
+ self.scenequeue_covered.add(task)
+ self.scenequeue_updatecounters(task)
+
+ def task_complete(self, task):
+ self.stats.taskCompleted()
+ self.task_completeoutright(task)
+
+ def task_fail(self, task, result):
+ self.stats.taskFailed()
+ index = self.rqdata.runq_setscene[task]
+ bb.event.fire(runQueueTaskFailed(task, self.stats, self), self.cfgData)
+ self.scenequeue_notcovered.add(task)
+ self.scenequeue_updatecounters(task)
+
+ def task_failoutright(self, task):
+ self.runq_running[task] = 1
+ self.runq_buildable[task] = 1
+ self.stats.taskCompleted()
+ self.stats.taskSkipped()
+ index = self.rqdata.runq_setscene[task]
+ self.scenequeue_notcovered.add(task)
+ self.scenequeue_updatecounters(task)
+
+ def task_skip(self, task):
+ self.runq_running[task] = 1
+ self.runq_buildable[task] = 1
+ self.task_completeoutright(task)
+ self.stats.taskCompleted()
+ self.stats.taskSkipped()
+
+ def execute(self):
+ """
+ Run the tasks in a queue prepared by prepare_runqueue
+ """
+
+ task = None
+ if self.stats.active < self.number_tasks:
+ # Find the next setscene to run
+ for nexttask in range(self.stats.total):
+ if self.runq_buildable[nexttask] == 1 and self.runq_running[nexttask] != 1:
+ task = nexttask
+ break
+ if task is not None:
+ realtask = self.rqdata.runq_setscene[task]
+ fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
+
+ taskname = self.rqdata.runq_task[realtask] + "_setscene"
+ if self.rq.check_stamp_task(realtask, self.rqdata.runq_task[realtask]):
+ logger.debug(2, 'Stamp for underlying task %s(%s) is current, so skipping setscene variant',
+ task, self.rqdata.get_user_idstring(task))
+ self.task_failoutright(task)
+ return True
+
+ if self.cooker.configuration.force:
+ for target in self.rqdata.target_pairs:
+ if target[0] == fn and target[1] == self.rqdata.runq_task[realtask]:
+ self.task_failoutright(task)
+ return True
+
+ if self.rq.check_stamp_task(realtask, taskname):
+ logger.debug(2, 'Setscene stamp current task %s(%s), so skip it and its dependencies',
+ task, self.rqdata.get_user_idstring(realtask))
+ self.task_skip(task)
+ return True
+
+ pid, pipein, pipeout = self.fork_off_task(fn, realtask, taskname)
+
+ self.build_pids[pid] = task
+ self.build_pipes[pid] = runQueuePipe(pipein, pipeout, self.cfgData)
+ self.runq_running[task] = 1
+ self.stats.taskActive()
+ if self.stats.active < self.number_tasks:
+ return True
+
+ for pipe in self.build_pipes:
+ self.build_pipes[pipe].read()
+
+ if self.stats.active > 0:
+ if self.runqueue_process_waitpid() is None:
+ return 0.5
+ return True
+
+ # Convert scenequeue_covered task numbers into full taskgraph ids
+ oldcovered = self.scenequeue_covered
+ self.rq.scenequeue_covered = set()
+ for task in oldcovered:
+ self.rq.scenequeue_covered.add(self.rqdata.runq_setscene[task])
+
+ logger.debug(1, 'We can skip tasks %s', self.rq.scenequeue_covered)
+
+ self.rq.state = runQueueRunInit
+ return True
+
+ def fork_off_task(self, fn, task, taskname):
+ return RunQueueExecute.fork_off_task(self, fn, task, taskname, quieterrors=True)
class TaskFailure(Exception):
"""
@@ -1211,9 +1553,9 @@ class runQueueTaskStarted(runQueueEvent):
"""
Event notifing a task was started
"""
- def __init__(self, task, stats, rq):
+ def __init__(self, task, stats, rq, noexec=False):
runQueueEvent.__init__(self, task, stats, rq)
- self.message = "Running task %s (%d of %d) (%s)" % (task, stats.completed + stats.active + 1, self.stats.total, self.taskstring)
+ self.noexec = noexec
class runQueueTaskFailed(runQueueEvent):
"""
@@ -1222,15 +1564,11 @@ class runQueueTaskFailed(runQueueEvent):
def __init__(self, task, stats, exitcode, rq):
runQueueEvent.__init__(self, task, stats, rq)
self.exitcode = exitcode
- self.message = "Task %s failed (%s)" % (task, self.taskstring)
class runQueueTaskCompleted(runQueueEvent):
"""
Event notifing a task completed
"""
- def __init__(self, task, stats, rq):
- runQueueEvent.__init__(self, task, stats, rq)
- self.message = "Task %s completed (%s)" % (task, self.taskstring)
def check_stamp_fn(fn, taskname, d):
rqexe = bb.data.getVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", d)
@@ -1246,17 +1584,17 @@ class runQueuePipe():
Abstraction for a pipe between a worker thread and the server
"""
def __init__(self, pipein, pipeout, d):
- self.fd = pipein
- os.close(pipeout)
- fcntl.fcntl(self.fd, fcntl.F_SETFL, fcntl.fcntl(self.fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+ self.input = pipein
+ pipeout.close()
+ fcntl.fcntl(self.input, fcntl.F_SETFL, fcntl.fcntl(self.input, fcntl.F_GETFL) | os.O_NONBLOCK)
self.queue = ""
self.d = d
def read(self):
start = len(self.queue)
try:
- self.queue = self.queue + os.read(self.fd, 1024)
- except OSError:
+ self.queue = self.queue + self.input.read(1024)
+ except (OSError, IOError):
pass
end = len(self.queue)
index = self.queue.find("</event>")
@@ -1271,4 +1609,4 @@ class runQueuePipe():
continue
if len(self.queue) > 0:
print("Warning, worker left partial message")
- os.close(self.fd)
+ self.input.close()
diff --git a/lib/bb/siggen.py b/lib/bb/siggen.py
new file mode 100644
index 000000000..94ae2b48a
--- /dev/null
+++ b/lib/bb/siggen.py
@@ -0,0 +1,263 @@
+import hashlib
+import logging
+import re
+
+logger = logging.getLogger('BitBake.SigGen')
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+ logger.info('Importing cPickle failed. Falling back to a very slow implementation.')
+
+def init(d):
+ siggens = [obj for obj in globals().itervalues()
+ if type(obj) is type and issubclass(obj, SignatureGenerator)]
+
+ desired = bb.data.getVar("BB_SIGNATURE_HANDLER", d, True) or "noop"
+ for sg in siggens:
+ if desired == sg.name:
+ return sg(d)
+ break
+ else:
+ logger.error("Invalid signature generator '%s', using default 'noop'\n"
+ "Available generators: %s",
+ ', '.join(obj.name for obj in siggens))
+ return SignatureGenerator(d)
+
+class SignatureGenerator(object):
+ """
+ """
+ name = "noop"
+
+ def __init__(self, data):
+ return
+
+ def finalise(self, fn, d, varient):
+ return
+
+ def get_taskhash(self, fn, task, deps, dataCache):
+ return 0
+
+ def set_taskdata(self, hashes, deps):
+ return
+
+ def stampfile(self, stampbase, taskname, taskhash):
+ return "%s.%s" % (stampbase, taskname)
+
+class SignatureGeneratorBasic(SignatureGenerator):
+ """
+ """
+ name = "basic"
+
+ def __init__(self, data):
+ self.basehash = {}
+ self.taskhash = {}
+ self.taskdeps = {}
+ self.runtaskdeps = {}
+ self.gendeps = {}
+ self.lookupcache = {}
+ self.basewhitelist = (data.getVar("BB_HASHBASE_WHITELIST", True) or "").split()
+ self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST", True) or None
+
+ if self.taskwhitelist:
+ self.twl = re.compile(self.taskwhitelist)
+ else:
+ self.twl = None
+
+ def _build_data(self, fn, d):
+
+ taskdeps, gendeps = bb.data.generate_dependencies(d)
+
+ basehash = {}
+ lookupcache = {}
+
+ for task in taskdeps:
+ data = d.getVar(task, False)
+ lookupcache[task] = data
+ for dep in sorted(taskdeps[task]):
+ if dep in self.basewhitelist:
+ continue
+ if dep in lookupcache:
+ var = lookupcache[dep]
+ else:
+ var = d.getVar(dep, False)
+ lookupcache[dep] = var
+ if var:
+ data = data + var
+ if data is None:
+ bb.error("Task %s from %s seems to be empty?!" % (task, fn))
+ self.basehash[fn + "." + task] = hashlib.md5(data).hexdigest()
+
+ self.taskdeps[fn] = taskdeps
+ self.gendeps[fn] = gendeps
+ self.lookupcache[fn] = lookupcache
+
+ return taskdeps
+
+ def finalise(self, fn, d, variant):
+
+ if variant:
+ fn = "virtual:" + variant + ":" + fn
+
+ taskdeps = self._build_data(fn, d)
+
+ #Slow but can be useful for debugging mismatched basehashes
+ #for task in self.taskdeps[fn]:
+ # self.dump_sigtask(fn, task, d.getVar("STAMP", True), False)
+
+ for task in taskdeps:
+ d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
+
+ def get_taskhash(self, fn, task, deps, dataCache):
+ k = fn + "." + task
+ data = dataCache.basetaskhash[k]
+ self.runtaskdeps[k] = []
+ for dep in sorted(deps):
+ # We only manipulate the dependencies for packages not in the whitelist
+ if self.twl and not self.twl.search(dataCache.pkg_fn[fn]):
+ # then process the actual dependencies
+ dep_fn = re.search("(?P<fn>.*)\..*", dep).group('fn')
+ if self.twl.search(dataCache.pkg_fn[dep_fn]):
+ continue
+ if dep not in self.taskhash:
+ bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
+ data = data + self.taskhash[dep]
+ self.runtaskdeps[k].append(dep)
+ h = hashlib.md5(data).hexdigest()
+ self.taskhash[k] = h
+ #d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
+ return h
+
+ def set_taskdata(self, hashes, deps):
+ self.runtaskdeps = deps
+ self.taskhash = hashes
+
+ def dump_sigtask(self, fn, task, stampbase, runtime):
+ k = fn + "." + task
+ if runtime == "customfile":
+ sigfile = stampbase
+ elif runtime:
+ sigfile = stampbase + "." + task + ".sigdata" + "." + self.taskhash[k]
+ else:
+ sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[k]
+
+ bb.utils.mkdirhier(os.path.dirname(sigfile))
+
+ data = {}
+ data['basewhitelist'] = self.basewhitelist
+ data['taskwhitelist'] = self.taskwhitelist
+ data['taskdeps'] = self.taskdeps[fn][task]
+ data['basehash'] = self.basehash[k]
+ data['gendeps'] = {}
+ data['varvals'] = {}
+ data['varvals'][task] = self.lookupcache[fn][task]
+ for dep in self.taskdeps[fn][task]:
+ if dep in self.basewhitelist:
+ continue
+ data['gendeps'][dep] = self.gendeps[fn][dep]
+ data['varvals'][dep] = self.lookupcache[fn][dep]
+
+ if runtime:
+ data['runtaskdeps'] = self.runtaskdeps[k]
+ data['runtaskhashes'] = {}
+ for dep in data['runtaskdeps']:
+ data['runtaskhashes'][dep] = self.taskhash[dep]
+
+ p = pickle.Pickler(file(sigfile, "wb"), -1)
+ p.dump(data)
+
+ def dump_sigs(self, dataCache):
+ for fn in self.taskdeps:
+ for task in self.taskdeps[fn]:
+ k = fn + "." + task
+ if k not in self.taskhash:
+ continue
+ if dataCache.basetaskhash[k] != self.basehash[k]:
+ bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k)
+ bb.error("The mismatched hashes were %s and %s" % (dataCache.basetaskhash[k], self.basehash[k]))
+ self.dump_sigtask(fn, task, dataCache.stamp[fn], True)
+
+def dump_this_task(outfile, d):
+ fn = d.getVar("BB_FILENAME", True)
+ task = "do_" + d.getVar("BB_CURRENTTASK", True)
+ bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile")
+
+def compare_sigfiles(a, b):
+ p1 = pickle.Unpickler(file(a, "rb"))
+ a_data = p1.load()
+ p2 = pickle.Unpickler(file(b, "rb"))
+ b_data = p2.load()
+
+ def dict_diff(a, b):
+ sa = set(a.keys())
+ sb = set(b.keys())
+ common = sa & sb
+ changed = set()
+ for i in common:
+ if a[i] != b[i]:
+ changed.add(i)
+ added = sa - sb
+ removed = sb - sa
+ return changed, added, removed
+
+ if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
+ print "basewhitelist changed from %s to %s" % (a_data['basewhitelist'], b_data['basewhitelist'])
+
+ if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
+ print "taskwhitelist changed from %s to %s" % (a_data['taskwhitelist'], b_data['taskwhitelist'])
+
+ if a_data['taskdeps'] != b_data['taskdeps']:
+ print "Task dependencies changed from %s to %s" % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps']))
+
+ if a_data['basehash'] != b_data['basehash']:
+ print "basehash changed from %s to %s" % (a_data['basehash'], b_data['basehash'])
+
+ changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'])
+ if changed:
+ for dep in changed:
+ print "List of dependencies for variable %s changed from %s to %s" % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep])
+ if added:
+ for dep in added:
+ print "Dependency on variable %s was added" % (dep)
+ if removed:
+ for dep in removed:
+ print "Dependency on Variable %s was removed" % (dep)
+
+
+ changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
+ if changed:
+ for dep in changed:
+ print "Variable %s value changed from %s to %s" % (dep, a_data['varvals'][dep], b_data['varvals'][dep])
+ if 'runtaskdeps' in a_data and 'runtaskdeps' in b_data and sorted(a_data['runtaskdeps']) != sorted(b_data['runtaskdeps']):
+ print "Tasks this task depends on changed from %s to %s" % (sorted(a_data['runtaskdeps']), sorted(b_data['runtaskdeps']))
+
+ if 'runtaskhashes' in a_data:
+ for dep in a_data['runtaskhashes']:
+ if a_data['runtaskhashes'][dep] != b_data['runtaskhashes'][dep]:
+ print "Hash for dependent task %s changed from %s to %s" % (dep, a_data['runtaskhashes'][dep], b_data['runtaskhashes'][dep])
+
+def dump_sigfile(a):
+ p1 = pickle.Unpickler(file(a, "rb"))
+ a_data = p1.load()
+
+ print "basewhitelist: %s" % (a_data['basewhitelist'])
+
+ print "taskwhitelist: %s" % (a_data['taskwhitelist'])
+
+ print "Task dependencies: %s" % (sorted(a_data['taskdeps']))
+
+ print "basehash: %s" % (a_data['basehash'])
+
+ for dep in a_data['gendeps']:
+ print "List of dependencies for variable %s is %s" % (dep, a_data['gendeps'][dep])
+
+ for dep in a_data['varvals']:
+ print "Variable %s value is %s" % (dep, a_data['varvals'][dep])
+
+ if 'runtaskdeps' in a_data:
+ print "Tasks this task depends on: %s" % (a_data['runtaskdeps'])
+
+ if 'runtaskhashes' in a_data:
+ for dep in a_data['runtaskhashes']:
+ print "Hash for dependent task %s is %s" % (dep, a_data['runtaskhashes'][dep])
diff --git a/lib/bb/ui/knotty.py b/lib/bb/ui/knotty.py
index 0735f25dc..bebfa8a94 100644
--- a/lib/bb/ui/knotty.py
+++ b/lib/bb/ui/knotty.py
@@ -195,8 +195,14 @@ def main(server, eventHandler):
continue
if isinstance(event, bb.runqueue.runQueueTaskStarted):
- logger.info("Running task %s of %s (ID: %s, %s)",
- event.stats.completed + event.stats.active + event.stats.failed + 1,
+ if event.noexec:
+ tasktype = 'noexec task'
+ else:
+ tasktype = 'task'
+ logger.info("Running %s %s of %s (ID: %s, %s)",
+ tasktype,
+ event.stats.completed + event.stats.active +
+ event.stats.failed + 1,
event.stats.total, event.taskid, event.taskstring)
continue
diff --git a/lib/bb/utils.py b/lib/bb/utils.py
index b121b121c..459b8948d 100644
--- a/lib/bb/utils.py
+++ b/lib/bb/utils.py
@@ -473,12 +473,25 @@ def sha256_file(filename):
s.update(line)
return s.hexdigest()
-def preserved_envvars_list():
+def preserved_envvars_exported():
+ """Variables which are taken from the environment and placed in and exported
+ from the metadata"""
+ return [
+ 'BB_TASKHASH',
+ 'HOME',
+ 'LOGNAME',
+ 'PATH',
+ 'PWD',
+ 'SHELL',
+ 'TERM',
+ 'USER',
+ 'USERNAME',
+ ]
+
+def preserved_envvars_exported_interactive():
+ """Variables which are taken from the environment and placed in and exported
+ from the metadata, for interactive tasks"""
return [
- 'BBPATH',
- 'BB_PRESERVE_ENV',
- 'BB_ENV_WHITELIST',
- 'BB_ENV_EXTRAWHITE',
'COLORTERM',
'DBUS_SESSION_BUS_ADDRESS',
'DESKTOP_SESSION',
@@ -488,23 +501,25 @@ def preserved_envvars_list():
'GNOME_KEYRING_SOCKET',
'GPG_AGENT_INFO',
'GTK_RC_FILES',
- 'HOME',
- 'LANG',
- 'LOGNAME',
- 'PATH',
- 'PWD',
'SESSION_MANAGER',
- 'SHELL',
'SSH_AUTH_SOCK',
- 'TERM',
- 'USER',
- 'USERNAME',
- '_',
'XAUTHORITY',
'XDG_DATA_DIRS',
'XDG_SESSION_COOKIE',
]
+def preserved_envvars():
+ """Variables which are taken from the environment and placed in the metadata"""
+ v = [
+ 'BBPATH',
+ 'BB_PRESERVE_ENV',
+ 'BB_ENV_WHITELIST',
+ 'BB_ENV_EXTRAWHITE',
+ 'LANG',
+ '_',
+ ]
+ return v + preserved_envvars_exported() + preserved_envvars_exported_interactive()
+
def filter_environment(good_vars):
"""
Create a pristine environment for bitbake. This will remove variables that
@@ -525,6 +540,10 @@ def filter_environment(good_vars):
return removed_vars
+def create_interactive_env(d):
+ for k in preserved_envvars_exported_interactive():
+ os.setenv(k, bb.data.getVar(k, d, True))
+
def clean_environment():
"""
Clean up any spurious environment variables. This will remove any
@@ -534,7 +553,7 @@ def clean_environment():
if 'BB_ENV_WHITELIST' in os.environ:
good_vars = os.environ['BB_ENV_WHITELIST'].split()
else:
- good_vars = preserved_envvars_list()
+ good_vars = preserved_envvars()
if 'BB_ENV_EXTRAWHITE' in os.environ:
good_vars.extend(os.environ['BB_ENV_EXTRAWHITE'].split())
filter_environment(good_vars)