aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ChangeLog2
-rw-r--r--lib/bb/__init__.py179
-rw-r--r--lib/bb/build.py176
-rw-r--r--lib/bb/cache.py1
-rw-r--r--lib/bb/cooker.py12
-rw-r--r--lib/bb/parse/parse_py/BBHandler.py35
-rw-r--r--lib/bb/runqueue.py26
-rw-r--r--lib/bb/taskdata.py5
8 files changed, 66 insertions, 370 deletions
diff --git a/ChangeLog b/ChangeLog
index 5dbbd44e8..df492a07e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -108,6 +108,8 @@ Changes in Bitbake 1.9.x:
- Update shell to use cooker.buildFile
- Add StampUpdate event
- Convert -b option to use taskdata/runqueue
+ - Remove digraph and switch to new stamp checking code. exec_task no longer
+ honours dependencies
Changes in Bitbake 1.8.0:
- Release 1.7.x as a stable series
diff --git a/lib/bb/__init__.py b/lib/bb/__init__.py
index b5f265b76..99995212c 100644
--- a/lib/bb/__init__.py
+++ b/lib/bb/__init__.py
@@ -46,7 +46,6 @@ __all__ = [
"pkgcmp",
"dep_parenreduce",
"dep_opconvert",
- "digraph",
# fetch
"decodeurl",
@@ -1129,184 +1128,6 @@ def dep_opconvert(mysplit, myuse):
mypos += 1
return newsplit
-class digraph:
- """beautiful directed graph object"""
-
- def __init__(self):
- self.dict={}
- #okeys = keys, in order they were added (to optimize firstzero() ordering)
- self.okeys=[]
- self.__callback_cache=[]
-
- def __str__(self):
- str = ""
- for key in self.okeys:
- str += "%s:\t%s\n" % (key, self.dict[key][1])
- return str
-
- def addnode(self,mykey,myparent):
- if not mykey in self.dict:
- self.okeys.append(mykey)
- if myparent==None:
- self.dict[mykey]=[0,[]]
- else:
- self.dict[mykey]=[0,[myparent]]
- self.dict[myparent][0]=self.dict[myparent][0]+1
- return
- if myparent and (not myparent in self.dict[mykey][1]):
- self.dict[mykey][1].append(myparent)
- self.dict[myparent][0]=self.dict[myparent][0]+1
-
- def delnode(self,mykey, ref = 1):
- """Delete a node
-
- If ref is 1, remove references to this node from other nodes.
- If ref is 2, remove nodes that reference this node."""
- if not mykey in self.dict:
- return
- for x in self.dict[mykey][1]:
- self.dict[x][0]=self.dict[x][0]-1
- del self.dict[mykey]
- while 1:
- try:
- self.okeys.remove(mykey)
- except ValueError:
- break
- if ref:
- __kill = []
- for k in self.okeys:
- if mykey in self.dict[k][1]:
- if ref == 1 or ref == 2:
- self.dict[k][1].remove(mykey)
- if ref == 2:
- __kill.append(k)
- for l in __kill:
- self.delnode(l, ref)
-
- def allnodes(self):
- "returns all nodes in the dictionary"
- keys = self.dict.keys()
- ret = []
- for key in keys:
- ret.append(key)
- ret.sort()
- return ret
-
- def firstzero(self):
- "returns first node with zero references, or NULL if no such node exists"
- for x in self.okeys:
- if self.dict[x][0]==0:
- return x
- return None
-
- def firstnonzero(self):
- "returns first node with nonzero references, or NULL if no such node exists"
- for x in self.okeys:
- if self.dict[x][0]!=0:
- return x
- return None
-
-
- def allzeros(self):
- "returns all nodes with zero references, or NULL if no such node exists"
- zerolist = []
- for x in self.dict.keys():
- if self.dict[x][0]==0:
- zerolist.append(x)
- return zerolist
-
- def hasallzeros(self):
- "returns 0/1, Are all nodes zeros? 1 : 0"
- zerolist = []
- for x in self.dict.keys():
- if self.dict[x][0]!=0:
- return 0
- return 1
-
- def empty(self):
- if len(self.dict)==0:
- return 1
- return 0
-
- def hasnode(self,mynode):
- return mynode in self.dict
-
- def getparents(self, item):
- if not self.hasnode(item):
- return []
- parents = self.dict[item][1]
- ret = []
- for parent in parents:
- ret.append(parent)
- ret.sort()
- return ret
-
- def getchildren(self, item):
- if not self.hasnode(item):
- return []
- children = [i for i in self.okeys if item in self.getparents(i)]
- return children
-
- def walkdown(self, item, callback, debug = None, usecache = False):
- if not self.hasnode(item):
- return 0
-
- if usecache:
- if self.__callback_cache.count(item):
- if debug:
- print "hit cache for item: %s" % item
- return 1
-
- parents = self.getparents(item)
- children = self.getchildren(item)
- for p in parents:
- if p in children:
-# print "%s is both parent and child of %s" % (p, item)
- if usecache:
- self.__callback_cache.append(p)
- ret = callback(self, p)
- if ret == 0:
- return 0
- continue
- if item == p:
- print "eek, i'm my own parent!"
- return 0
- if debug:
- print "item: %s, p: %s" % (item, p)
- ret = self.walkdown(p, callback, debug, usecache)
- if ret == 0:
- return 0
- if usecache:
- self.__callback_cache.append(item)
- return callback(self, item)
-
- def walkup(self, item, callback):
- if not self.hasnode(item):
- return 0
-
- parents = self.getparents(item)
- children = self.getchildren(item)
- for c in children:
- if c in parents:
- ret = callback(self, item)
- if ret == 0:
- return 0
- continue
- if item == c:
- print "eek, i'm my own child!"
- return 0
- ret = self.walkup(c, callback)
- if ret == 0:
- return 0
- return callback(self, item)
-
- def copy(self):
- mygraph=digraph()
- for x in self.dict.keys():
- mygraph.dict[x]=self.dict[x][:]
- mygraph.okeys=self.okeys[:]
- return mygraph
-
if __name__ == "__main__":
import doctest, bb
doctest.testmod(bb)
diff --git a/lib/bb/build.py b/lib/bb/build.py
index 74736c66d..27859beb4 100644
--- a/lib/bb/build.py
+++ b/lib/bb/build.py
@@ -259,78 +259,35 @@ def exec_task(task, d):
a function is that a task exists in the task digraph, and therefore
has dependencies amongst other tasks."""
- # check if the task is in the graph..
- task_graph = data.getVar('_task_graph', d)
- if not task_graph:
- task_graph = bb.digraph()
- data.setVar('_task_graph', task_graph, d)
- task_cache = data.getVar('_task_cache', d)
- if not task_cache:
- task_cache = []
- data.setVar('_task_cache', task_cache, d)
- if not task_graph.hasnode(task):
- raise EventException("Missing node in task graph", InvalidTask(task, d))
-
- # check whether this task needs executing..
- if stamp_is_current(task, d):
- return 1
-
- # follow digraph path up, then execute our way back down
- def execute(graph, item):
- if data.getVarFlag(item, 'task', d):
- if item in task_cache:
- return 1
-
- if task != item:
- # deeper than toplevel, exec w/ deps
- exec_task(item, d)
- return 1
-
- try:
- bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % item)
- old_overrides = data.getVar('OVERRIDES', d, 0)
- localdata = data.createCopy(d)
- data.setVar('OVERRIDES', 'task_%s:%s' % (item, old_overrides), localdata)
- data.update_data(localdata)
- event.fire(TaskStarted(item, localdata))
- exec_func(item, localdata)
- event.fire(TaskSucceeded(item, localdata))
- task_cache.append(item)
- data.setVar('_task_cache', task_cache, d)
- except FuncFailed, message:
- # Try to extract the optional logfile
- try:
- (msg, logfile) = message
- except:
- logfile = None
- msg = message
- bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % message )
- failedevent = TaskFailed(msg, logfile, item, d)
- event.fire(failedevent)
- raise EventException("Function failed in task: %s" % message, failedevent)
-
- if data.getVarFlag(task, 'dontrundeps', d):
- execute(None, task)
- else:
- task_graph.walkdown(task, execute)
+ # Check whther this is a valid task
+ if not data.getVarFlag(task, 'task', d):
+ raise EventException("No such task", InvalidTask(task, d))
+
+ try:
+ bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % item)
+ old_overrides = data.getVar('OVERRIDES', d, 0)
+ localdata = data.createCopy(d)
+ data.setVar('OVERRIDES', 'task_%s:%s' % (item, old_overrides), localdata)
+ data.update_data(localdata)
+ event.fire(TaskStarted(item, localdata))
+ exec_func(item, localdata)
+ event.fire(TaskSucceeded(item, localdata))
+ except FuncFailed, message:
+ # Try to extract the optional logfile
+ try:
+ (msg, logfile) = message
+ except:
+ logfile = None
+ msg = message
+ bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % message )
+ failedevent = TaskFailed(msg, logfile, item, d)
+ event.fire(failedevent)
+ raise EventException("Function failed in task: %s" % message, failedevent)
# make stamp, or cause event and raise exception
if not data.getVarFlag(task, 'nostamp', d) and not data.getVarFlag(task, 'selfstamp', d):
make_stamp(task, d)
-def extract_stamp_data(d, fn):
- """
- Extracts stamp data from d which is either a data dictonary (fn unset)
- or a dataCache entry (fn set).
- """
- if fn:
- return (d.task_queues[fn], d.stamp[fn], d.task_deps[fn])
- task_graph = data.getVar('_task_graph', d)
- if not task_graph:
- task_graph = bb.digraph()
- data.setVar('_task_graph', task_graph, d)
- return (task_graph, data.getVar('STAMP', d, 1), None)
-
def extract_stamp(d, fn):
"""
Extracts stamp format which is either a data dictonary (fn unset)
@@ -340,49 +297,6 @@ def extract_stamp(d, fn):
return d.stamp[fn]
return data.getVar('STAMP', d, 1)
-def stamp_is_current(task, d, file_name = None, checkdeps = 1):
- """
- Check status of a given task's stamp.
- Returns 0 if it is not current and needs updating.
- (d can be a data dict or dataCache)
- """
-
- (task_graph, stampfn, taskdep) = extract_stamp_data(d, file_name)
-
- if not stampfn:
- return 0
-
- stampfile = "%s.%s" % (stampfn, task)
- if not os.access(stampfile, os.F_OK):
- return 0
-
- if checkdeps == 0:
- return 1
-
- import stat
- tasktime = os.stat(stampfile)[stat.ST_MTIME]
-
- _deps = []
- def checkStamp(graph, task):
- # check for existance
- if file_name:
- if 'nostamp' in taskdep and task in taskdep['nostamp']:
- return 1
- else:
- if data.getVarFlag(task, 'nostamp', d):
- return 1
-
- if not stamp_is_current(task, d, file_name, 0 ):
- return 0
-
- depfile = "%s.%s" % (stampfn, task)
- deptime = os.stat(depfile)[stat.ST_MTIME]
- if deptime > tasktime:
- return 0
- return 1
-
- return task_graph.walkdown(task, checkStamp)
-
def stamp_internal(task, d, file_name):
"""
Internal stamp helper function
@@ -419,40 +333,35 @@ def del_stamp(task, d, file_name = None):
stamp_internal(task, d, file_name)
def add_tasks(tasklist, d):
- task_graph = data.getVar('_task_graph', d)
task_deps = data.getVar('_task_deps', d)
- if not task_graph:
- task_graph = bb.digraph()
if not task_deps:
task_deps = {}
+ if not 'tasks' in task_deps:
+ task_deps['tasks'] = []
for task in tasklist:
- deps = tasklist[task]
task = data.expand(task, d)
-
data.setVarFlag(task, 'task', 1, d)
- task_graph.addnode(task, None)
- for dep in deps:
- dep = data.expand(dep, d)
- if not task_graph.hasnode(dep):
- task_graph.addnode(dep, None)
- task_graph.addnode(task, dep)
+
+ if not task in task_deps['tasks']:
+ task_deps['tasks'].append(task)
flags = data.getVarFlags(task, d)
def getTask(name):
+ if not name in task_deps:
+ task_deps[name] = {}
if name in flags:
deptask = data.expand(flags[name], d)
- if not name in task_deps:
- task_deps[name] = {}
task_deps[name][task] = deptask
+ getTask('deps')
getTask('depends')
getTask('deptask')
getTask('rdeptask')
getTask('recrdeptask')
getTask('nostamp')
+ task_deps['parents'] = task_deps['deps']
# don't assume holding a reference
- data.setVar('_task_graph', task_graph, d)
data.setVar('_task_deps', task_deps, d)
def remove_task(task, kill, d):
@@ -460,22 +369,5 @@ def remove_task(task, kill, d):
If kill is 1, also remove tasks that depend on this task."""
- task_graph = data.getVar('_task_graph', d)
- if not task_graph:
- task_graph = bb.digraph()
- if not task_graph.hasnode(task):
- return
-
data.delVarFlag(task, 'task', d)
- ref = 1
- if kill == 1:
- ref = 2
- task_graph.delnode(task, ref)
- data.setVar('_task_graph', task_graph, d)
-
-def task_exists(task, d):
- task_graph = data.getVar('_task_graph', d)
- if not task_graph:
- task_graph = bb.digraph()
- data.setVar('_task_graph', task_graph, d)
- return task_graph.hasnode(task)
+
diff --git a/lib/bb/cache.py b/lib/bb/cache.py
index 94d545715..135d29f10 100644
--- a/lib/bb/cache.py
+++ b/lib/bb/cache.py
@@ -290,7 +290,6 @@ class Cache:
packages_dynamic = (self.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split()
rprovides = (self.getVar("RPROVIDES", file_name, True) or "").split()
- cacheData.task_queues[file_name] = self.getVar("_task_graph", file_name, True)
cacheData.task_deps[file_name] = self.getVar("_task_deps", file_name, True)
# build PackageName to FileName lookup table
diff --git a/lib/bb/cooker.py b/lib/bb/cooker.py
index 92ee203d3..ab3178942 100644
--- a/lib/bb/cooker.py
+++ b/lib/bb/cooker.py
@@ -175,14 +175,12 @@ class BBCooker:
# Always reschedule
return True
- def tryBuildPackage(self, fn, item, task, the_data, build_depends):
+ def tryBuildPackage(self, fn, item, task, the_data):
"""
Build one task of a package, optionally build following task depends
"""
bb.event.fire(bb.event.PkgStarted(item, the_data))
try:
- if not build_depends:
- bb.data.setVarFlag('do_%s' % task, 'dontrundeps', 1, the_data)
if not self.configuration.dry_run:
bb.build.exec_task('do_%s' % task, the_data)
bb.event.fire(bb.event.PkgSucceeded(item, the_data))
@@ -197,7 +195,7 @@ class BBCooker:
bb.event.fire(bb.event.PkgFailed(item, the_data))
raise
- def tryBuild( self, fn, build_depends):
+ def tryBuild(self, fn):
"""
Build a provider and its dependencies.
build_depends is a list of previous build dependencies (not runtime)
@@ -208,10 +206,10 @@ class BBCooker:
item = self.status.pkg_fn[fn]
- if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
- return True
+ #if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
+ # return True
- return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data, build_depends)
+ return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data)
def showVersions(self):
diff --git a/lib/bb/parse/parse_py/BBHandler.py b/lib/bb/parse/parse_py/BBHandler.py
index a43eb9478..e9b950acb 100644
--- a/lib/bb/parse/parse_py/BBHandler.py
+++ b/lib/bb/parse/parse_py/BBHandler.py
@@ -176,22 +176,7 @@ def handle(fn, d, include = 0):
handler = data.getVar(var,d)
bb.event.register(var, handler)
- tasklist = {}
- for var in data.getVar('__BBTASKS', d) or []:
- if var not in tasklist:
- tasklist[var] = []
- deps = data.getVarFlag(var, 'deps', d) or []
- for p in deps:
- if p not in tasklist[var]:
- tasklist[var].append(p)
-
- postdeps = data.getVarFlag(var, 'postdeps', d) or []
- for p in postdeps:
- if p not in tasklist:
- tasklist[p] = []
- if var not in tasklist[p]:
- tasklist[p].append(var)
-
+ tasklist = data.getVar('__BBTASKS', d) or []
bb.build.add_tasks(tasklist, d)
bbpath.pop(0)
@@ -338,15 +323,23 @@ def feeder(lineno, s, fn, root, d):
data.setVarFlag(var, "task", 1, d)
bbtasks = data.getVar('__BBTASKS', d) or []
- bbtasks.append(var)
+ if not var in bbtasks:
+ bbtasks.append(var)
data.setVar('__BBTASKS', bbtasks, d)
+ existing = data.getVarFlag(var, "deps", d) or []
if after is not None:
-# set up deps for function
- data.setVarFlag(var, "deps", after.split(), d)
+ # set up deps for function
+ for entry in after.split():
+ if entry not in existing:
+ existing.append(entry)
+ data.setVarFlag(var, "deps", existing, d)
if before is not None:
-# set up things that depend on this func
- data.setVarFlag(var, "postdeps", before.split(), d)
+ # set up things that depend on this func
+ for entry in before.split():
+ existing = data.getVarFlag(entry, "deps", d) or []
+ if var not in existing:
+ data.setVarFlag(entry, "deps", [var] + existing, d)
return
m = __addhandler_regexp__.match(s)
diff --git a/lib/bb/runqueue.py b/lib/bb/runqueue.py
index 73698283e..9e52a5367 100644
--- a/lib/bb/runqueue.py
+++ b/lib/bb/runqueue.py
@@ -804,19 +804,11 @@ class RunQueue:
event.fire(bb.event.StampUpdate(self.target_pairs, self.dataCache.stamp, self.cfgdata))
- # RP - this code allows tasks to run out of the correct order - disabled, FIXME
- # Find any tasks with current stamps and remove them from the queue
- # for task1 in range(self.stats.total):
- # task = self.prio_map[task1]
- # fn = self.taskData.fn_index[self.runq_fnid[task]]
- # taskname = self.runq_task[task]
- # if bb.build.stamp_is_current(taskname, self.dataCache, fn):
- # bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
- # self.runq_running[task] = 1
- # self.runq_buildable[task] = 1
- # self.task_complete(task)
- # self.stats.taskCompleted()
- # self.stats.taskSkipped()
+ # Find out which tasks have current stamps which we can skip when the
+ # time comes
+ self.currentstamps = self.check_stamps()
+ self.stats.taskSkipped(len(self.currentstamps))
+ self.stats.taskCompleted(len(self.currentstamps))
def task_complete(self, task):
"""
@@ -871,13 +863,13 @@ class RunQueue:
fn = self.taskData.fn_index[self.runq_fnid[task]]
taskname = self.runq_task[task]
- if bb.build.stamp_is_current(taskname, self.dataCache, fn):
+ if task in self.currentstamps:
bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
self.runq_running[task] = 1
self.runq_buildable[task] = 1
self.task_complete(task)
- self.stats.taskCompleted()
- self.stats.taskSkipped()
+ #self.stats.taskCompleted()
+ #self.stats.taskSkipped()
continue
bb.event.fire(runQueueTaskStarted(task, self.stats, self, self.cfgData))
@@ -894,7 +886,7 @@ class RunQueue:
os.dup2(newsi, sys.stdin.fileno())
self.cooker.configuration.cmd = taskname[3:]
try:
- self.cooker.tryBuild(fn, False)
+ self.cooker.tryBuild(fn)
except bb.build.EventException:
bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
sys.exit(1)
diff --git a/lib/bb/taskdata.py b/lib/bb/taskdata.py
index 3dac6c26a..4a79e7a56 100644
--- a/lib/bb/taskdata.py
+++ b/lib/bb/taskdata.py
@@ -124,7 +124,6 @@ class TaskData:
Add tasks for a given fn to the database
"""
- task_graph = dataCache.task_queues[fn]
task_deps = dataCache.task_deps[fn]
fnid = self.getfn_id(fn)
@@ -136,11 +135,11 @@ class TaskData:
if fnid in self.tasks_fnid:
return
- for task in task_graph.allnodes():
+ for task in task_deps['tasks']:
# Work out task dependencies
parentids = []
- for dep in task_graph.getparents(task):
+ for dep in task_deps['parents'][task]:
parentid = self.gettask_id(fn, dep)
parentids.append(parentid)
taskid = self.gettask_id(fn, task)