summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Purdie <rpurdie@linux.intel.com>2006-08-20 21:52:23 +0000
committerRichard Purdie <rpurdie@linux.intel.com>2006-08-20 21:52:23 +0000
commita6e26ec83270de1ae9993c054be25e4ccbde1a8e (patch)
tree6693ea9b7afb26a27cd7a9abc41f27b5d06178b4
parentb1bd6a2abfa177520991263efc4a5b365a625a0c (diff)
downloadbitbake-a6e26ec83270de1ae9993c054be25e4ccbde1a8e.tar.gz
bitbake/lib/bb/taskdata.py:
bitbake/lib/bb/__init__.py: bitbake/lib/bb/shell.py: bitbake/lib/bb/runqueue.py: bitbake/lib/bb/msg.py: bitbake/MANIFEST: bitbake/bin/bitbake: Major rewrite and modularisation of the dependecy code. The code in bin/bitbake is replaced by the taskdata and runqueue modules. The code is designed to support multiple threads (although the initial threading algothirm is simplistic). The new code is aimed to be maintainable and debuggable (once msy.py is finished).
-rw-r--r--MANIFEST2
-rwxr-xr-xbin/bitbake321
-rw-r--r--lib/bb/__init__.py2
-rw-r--r--lib/bb/msg.py2
-rw-r--r--lib/bb/runqueue.py423
-rw-r--r--lib/bb/shell.py2
-rw-r--r--lib/bb/taskdata.py534
7 files changed, 1030 insertions, 256 deletions
diff --git a/MANIFEST b/MANIFEST
index 5823cd070..4542acca7 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -25,7 +25,9 @@ lib/bb/parse/__init__.py
lib/bb/parse/parse_py/BBHandler.py
lib/bb/parse/parse_py/ConfHandler.py
lib/bb/parse/parse_py/__init__.py
+lib/bb/runqueue.py
lib/bb/shell.py
+lib/bb/taskdata.py
lib/bb/utils.py
doc/COPYING.GPL
doc/COPYING.MIT
diff --git a/bin/bitbake b/bin/bitbake
index 9be1be1d9..5242f1fd2 100755
--- a/bin/bitbake
+++ b/bin/bitbake
@@ -7,6 +7,7 @@
# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
# Copyright (C) 2005 Holger Hans Peter Freyther
# Copyright (C) 2005 ROAD GmbH
+# Copyright )C) 2006 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
@@ -24,7 +25,7 @@
import sys, os, getopt, glob, copy, os.path, re, time
sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
import bb
-from bb import utils, data, parse, event, cache, providers
+from bb import utils, data, parse, event, cache, providers, taskdata, runqueue
from sets import Set
import itertools, optparse
@@ -43,15 +44,14 @@ class BBParsingStatus:
"""
def __init__(self):
+ """
+ Direct cache variables
+ """
self.providers = {}
self.rproviders = {}
self.packages = {}
self.packages_dynamic = {}
- self.bbfile_priority = {}
- self.bbfile_config_priorities = []
- self.ignored_dependencies = None
self.possible_world = []
- self.world_target = Set()
self.pkg_pn = {}
self.pkg_fn = {}
self.pkg_pvpr = {}
@@ -67,6 +67,15 @@ class BBParsingStatus:
self.stamp = {}
self.preferred = {}
+ """
+ Indirect Cache variables
+ """
+ self.ignored_dependencies = []
+ self.world_target = Set()
+ self.bbfile_priority = {}
+ self.bbfile_config_priorities = []
+
+
def handle_bb_data(self, file_name, bb_cache, cached):
"""
We will fill the dictionaries with the stuff we
@@ -212,25 +221,25 @@ class BBCooker:
def __init__( self ):
self.build_cache_fail = []
self.build_cache = []
- self.rbuild_cache = []
- self.building_list = []
- self.build_path = []
- self.consider_msgs_cache = []
self.stats = BBStatistics()
self.status = None
self.cache = None
self.bb_cache = None
- def tryBuildPackage( self, fn, item, the_data ):
- """Build one package"""
+ def tryBuildPackage(self, fn, item, task, the_data, build_depends):
+ """
+ Build one task of a package, optionally build following task depends
+ """
bb.event.fire(bb.event.PkgStarted(item, the_data))
try:
self.stats.attempt += 1
if self.configuration.force:
- bb.data.setVarFlag('do_%s' % self.configuration.cmd, 'force', 1, the_data)
+ bb.data.setVarFlag('do_%s' % task, 'force', 1, the_data)
+ if not build_depends:
+ bb.data.setVarFlag('do_%s' % task, 'dontrundeps', 1, the_data)
if not self.configuration.dry_run:
- bb.build.exec_task('do_%s' % self.configuration.cmd, the_data)
+ bb.build.exec_task('do_%s' % task, the_data)
bb.event.fire(bb.event.PkgSucceeded(item, the_data))
self.build_cache.append(fn)
return True
@@ -248,7 +257,7 @@ class BBCooker:
self.build_cache_fail.append(fn)
raise
- def tryBuild( self, fn, virtual , buildAllDeps , build_depends = []):
+ def tryBuild( self, fn, build_depends):
"""
Build a provider and its dependencies.
build_depends is a list of previous build dependencies (not runtime)
@@ -257,79 +266,13 @@ class BBCooker:
the_data = self.bb_cache.loadDataFull(fn, self)
- # Only follow all (runtime) dependencies if doing a build
- if not buildAllDeps and self.configuration.cmd is "build":
- buildAllDeps = self.status.build_all[fn]
-
- # Error on build time dependency loops
- if build_depends and build_depends.count(fn) > 1:
- bb.msg.error(bb.msg.domain.Depends, "%s depends on itself (eventually)" % fn)
- bb.msg.error(bb.msg.domain.Depends, "upwards chain is: %s" % (" -> ".join(self.build_path)))
- return False
-
- # See if this is a runtime dependency we've already built
- # Or a build dependency being handled in a different build chain
- if fn in self.building_list:
- return self.addRunDeps(fn, virtual , buildAllDeps)
-
item = self.status.pkg_fn[fn]
- self.building_list.append(fn)
-
- pathstr = "%s (%s)" % (item, virtual)
- self.build_path.append(pathstr)
-
- depends_list = (bb.data.getVar('DEPENDS', the_data, True) or "").split()
-
- bb.msg.note(2, bb.msg.domain.Depends, "current path: %s" % (" -> ".join(self.build_path)))
- bb.msg.note(2, bb.msg.domain.Depends, "dependencies for %s are: %s" % (item, " ".join(depends_list)))
-
- try:
- failed = False
-
- depcmd = self.configuration.cmd
- bbdepcmd = bb.data.getVarFlag('do_%s' % self.configuration.cmd, 'bbdepcmd', the_data)
- if bbdepcmd is not None:
- if bbdepcmd == "":
- depcmd = None
- else:
- depcmd = bbdepcmd
-
- if depcmd:
- oldcmd = self.configuration.cmd
- self.configuration.cmd = depcmd
-
- for dependency in depends_list:
- if dependency in self.status.ignored_dependencies:
- continue
- if not depcmd:
- continue
- if self.buildProvider( dependency , buildAllDeps , build_depends ) == 0:
- bb.msg.error(bb.msg.domain.Depends, "dependency %s (for %s) not satisfied" % (dependency,item))
- failed = True
- if self.configuration.abort:
- break
-
- if depcmd:
- self.configuration.cmd = oldcmd
-
- if failed:
- self.stats.deps += 1
- return False
-
- if not self.addRunDeps(fn, virtual , buildAllDeps):
- return False
-
- if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
- self.build_cache.append(fn)
- return True
-
- return self.tryBuildPackage( fn, item, the_data )
-
- finally:
- self.building_list.remove(fn)
- self.build_path.remove(pathstr)
+ if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
+ self.build_cache.append(fn)
+ return True
+ return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data, build_depends)
def showVersions( self ):
pkg_pn = self.status.pkg_pn
@@ -410,7 +353,7 @@ class BBCooker:
bb.msg.debug(1, bb.msg.domain.Provider, "providers for %s are: %s" % (item, pkg_pn.keys()))
for pn in pkg_pn.keys():
- preferred_versions[pn] = bb.providers.findBestProvider(pn, pkg_pn)[2:4]
+ preferred_versions[pn] = bb.providers.findBestProvider(pn, self.configuration.data, self.status, pkg_pn)[2:4]
eligible.append(preferred_versions[pn][1])
for p in eligible:
@@ -435,8 +378,6 @@ class BBCooker:
return eligible
-
-
# try to avoid adding the same rdepends over an over again
seen_depends = []
seen_rdepends = []
@@ -556,163 +497,34 @@ class BBCooker:
(takes item names from DEPENDS namespace)
"""
- fn = None
- discriminated = False
+ taskdata = bb.taskdata.TaskData()
- if not item in self.status.providers:
- bb.msg.error(bb.msg.domain.Depends, "Nothing provides dependency %s" % item)
- bb.event.fire(bb.event.NoProvider(item,self.configuration.data))
+ try:
+ taskdata.add_provider(self.configuration.data, self.status, item)
+ except bb.providers.NoProvider:
return 0
- all_p = self.status.providers[item]
-
- for p in all_p:
- if p in self.build_cache:
- bb.msg.debug(1, bb.msg.domain.Provider, "already built %s in this run" % p)
- return 1
+ providers = taskdata.get_provider(item)
- eligible = bb.providers.filterProviders(all_p, item, self.configuration.data, self.status, self.build_cache_fail)
-
- if not eligible:
+ if len(providers) == 0:
return 0
- prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, self.configuration.data, 1)
- if prefervar:
- self.status.preferred[item] = prefervar
-
- if item in self.status.preferred:
- for p in eligible:
- pn = self.status.pkg_fn[p]
- if self.status.preferred[item] == pn:
- bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy %s due to PREFERRED_PROVIDERS" % (pn, item))
- eligible.remove(p)
- eligible = [p] + eligible
- discriminated = True
- break
-
- if len(eligible) > 1 and discriminated == False:
- if item not in self.consider_msgs_cache:
- providers_list = []
- for fn in eligible:
- providers_list.append(self.status.pkg_fn[fn])
- bb.msg.note(1, bb.msg.domain.Provider, "multiple providers are available (%s);" % ", ".join(providers_list))
- bb.msg.note(1, bb.msg.domain.Provider, "consider defining PREFERRED_PROVIDER_%s" % item)
- bb.event.fire(bb.event.MultipleProviders(item,providers_list,self.configuration.data))
- self.consider_msgs_cache.append(item)
-
-
- # run through the list until we find one that we can build
- for fn in eligible:
- bb.msg.debug(2, bb.msg.domain.Provider, "selecting %s to satisfy %s" % (fn, item))
- if self.tryBuild(fn, item, buildAllDeps, build_depends + [fn]):
- return 1
-
- bb.msg.note(1, bb.msg.domain.Provider, "no buildable providers for %s" % item)
- bb.event.fire(bb.event.NoProvider(item,self.configuration.data))
- return 0
-
- def buildRProvider( self, item , buildAllDeps ):
- """
- Build something to provide a named runtime requirement
- (takes item names from RDEPENDS/PACKAGES namespace)
- """
-
- fn = None
- all_p = []
- discriminated = False
-
- if not buildAllDeps:
- return True
-
- all_p = bb.providers.getRuntimeProviders(self.status, item)
-
- if not all_p:
- bb.msg.error(bb.msg.domain.Provider, "Nothing provides runtime dependency %s" % (item))
- bb.event.fire(bb.event.NoProvider(item,self.configuration.data,runtime=True))
- return False
-
- for p in all_p:
- if p in self.rbuild_cache:
- bb.msg.debug(2, bb.msg.domain.Provider, "Already built %s providing runtime %s" % (p,item))
- return True
+ for p in providers:
if p in self.build_cache:
- bb.msg.debug(2, bb.msg.domain.Provider, "Already built %s but adding any further RDEPENDS for %s" % (p, item))
- return self.addRunDeps(p, item , buildAllDeps)
-
- eligible = bb.providers.filterProviders(all_p, item, self.configuration.data, self.status, self.build_cache_fail)
- if not eligible:
- return 0
-
- preferred = []
- for p in eligible:
- pn = self.status.pkg_fn[p]
- provides = self.status.pn_provides[pn]
- for provide in provides:
- prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, self.configuration.data, 1)
- if prefervar == pn:
- bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to PREFERRED_PROVIDERS" % (pn, item))
- eligible.remove(p)
- eligible = [p] + eligible
- preferred.append(p)
-
- if len(eligible) > 1 and len(preferred) == 0:
- if item not in self.consider_msgs_cache:
- providers_list = []
- for fn in eligible:
- providers_list.append(self.status.pkg_fn[fn])
- bb.msg.note(1, bb.msg.domain.Provider, "multiple providers are available (%s);" % ", ".join(providers_list))
- bb.msg.note(1, bb.msg.domain.Provider, "consider defining a PREFERRED_PROVIDER to match runtime %s" % item)
- bb.event.fire(bb.event.MultipleProviders(item,providers_list,self.configuration.data,runtime=True))
- self.consider_msgs_cache.append(item)
-
- if len(preferred) > 1:
- if item not in self.consider_msgs_cache:
- providers_list = []
- for fn in preferred:
- providers_list.append(self.status.pkg_fn[fn])
- bb.msg.note(1, bb.msg.domain.Provider, "multiple preferred providers are available (%s);" % ", ".join(providers_list))
- bb.msg.note(1, bb.msg.domain.Provider, "consider defining only one PREFERRED_PROVIDER to match runtime %s" % item)
- bb.event.fire(bb.event.MultipleProviders(item,providers_list,self.configuration.data,runtime=True))
- self.consider_msgs_cache.append(item)
-
- # run through the list until we find one that we can build
- for fn in eligible:
- bb.msg.debug(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s" % (fn, item))
- if self.tryBuild(fn, item, buildAllDeps):
- return True
-
- bb.msg.error(bb.msg.domain.Provider, "No buildable providers for runtime %s" % item)
- bb.event.fire(bb.event.NoProvider(item,self.configuration.data))
- return False
-
- def addRunDeps(self , fn, item , buildAllDeps):
- """
- Add any runtime dependencies of runtime item provided by fn
- as long as item has't previously been processed by this function.
- """
-
- if item in self.rbuild_cache:
- return True
-
- if not buildAllDeps:
- return True
+ bb.msg.debug(1, bb.msg.domain.Provider, "already built %s in this run" % p)
+ return 1
- rdepends = []
- self.rbuild_cache.append(item)
+ taskdata.add_unresolved(self.configuration.data, self.status)
- if fn in self.status.rundeps and item in self.status.rundeps[fn]:
- rdepends += self.status.rundeps[fn][item].keys()
- if fn in self.status.runrecs and item in self.status.runrecs[fn]:
- rdepends += self.status.runrecs[fn][item].keys()
+ tasks = [[item, "do_%s" % self.configuration.cmd]]
+ rq = bb.runqueue.RunQueue()
+ rq.prepare_runqueue(self.configuration.data, self.status, taskdata, tasks)
+ rq.execute_runqueue(self, self.configuration.data, self.status, taskdata, tasks)
- bb.msg.debug(2, bb.msg.domain.Provider, "Additional runtime dependencies for %s are: %s" % (item, " ".join(rdepends)))
+ #taskdata.dump_data()
+ #rq.dump_data(taskdata)
- for rdepend in rdepends:
- if rdepend in self.status.ignored_dependencies:
- continue
- if not self.buildRProvider(rdepend, buildAllDeps):
- return False
- return True
+ return 1
def buildDepgraph( self ):
all_depends = self.status.all_depends
@@ -853,9 +665,6 @@ class BBCooker:
if self.configuration.verbose:
bb.msg.set_verbose(True)
- if not self.configuration.cmd:
- self.configuration.cmd = "build"
-
if self.configuration.debug:
bb.msg.set_debug_level(self.configuration.debug)
@@ -866,6 +675,12 @@ class BBCooker:
self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) )
+ if not self.configuration.cmd:
+ self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data)
+
+ # For backwards compatibility - REMOVE ME
+ if not self.configuration.cmd:
+ self.configuration.cmd = "build"
#
# Special updated configuration we use for firing events
@@ -896,7 +711,7 @@ class BBCooker:
item = bb.data.getVar('PN', bbfile_data, 1)
try:
- self.tryBuildPackage( bf, item, bbfile_data )
+ self.tryBuildPackage(bf, item, self.configuration.cmd, bbfile_data, True)
except bb.build.EventException:
bb.msg.error(bb.msg.domain.Build, "Build of '%s' failed" % item )
@@ -962,24 +777,22 @@ class BBCooker:
self.generateDotGraph( pkgs_to_build, self.configuration.ignored_dot_deps )
sys.exit( 0 )
-
bb.event.fire(bb.event.BuildStarted(buildname, pkgs_to_build, self.configuration.event_data))
- failures = 0
- for k in pkgs_to_build:
- failed = False
- try:
- if self.buildProvider( k , False ) == 0:
- # already diagnosed
- failed = True
- except bb.build.EventException:
- bb.msg.error(bb.msg.domain.Build, "Build of " + k + " failed")
- failed = True
-
- if failed:
- failures += failures
- if self.configuration.abort:
- sys.exit(1)
+ taskdata = bb.taskdata.TaskData()
+
+ runlist = []
+ try:
+ for k in pkgs_to_build:
+ taskdata.add_provider(self.configuration.data, self.status, k)
+ runlist.append([k, "do_%s" % self.configuration.cmd])
+ taskdata.add_unresolved(self.configuration.data, self.status)
+ except bb.providers.NoProvider:
+ sys.exit(1)
+
+ rq = bb.runqueue.RunQueue()
+ rq.prepare_runqueue(self.configuration.data, self.status, taskdata, runlist)
+ failures = rq.execute_runqueue(self, self.configuration.data, self.status, taskdata, runlist)
bb.event.fire(bb.event.BuildCompleted(buildname, pkgs_to_build, self.configuration.event_data, failures))
@@ -1118,7 +931,7 @@ Default BBFILES are the .bb files in the current directory.""" )
action = "store_true", dest = "interactive", default = False )
parser.add_option( "-c", "--cmd", help = "Specify task to execute. Note that this only executes the specified task for the providee and the packages it depends on, i.e. 'compile' does not implicitly call stage for the dependencies (IOW: use only if you know what you are doing). Depending on the base.bbclass a listtaks tasks is defined and will show available tasks",
- action = "store", dest = "cmd", default = "build" )
+ action = "store", dest = "cmd" )
parser.add_option( "-r", "--read", help = "read the specified file before bitbake.conf",
action = "append", dest = "file", default = [] )
diff --git a/lib/bb/__init__.py b/lib/bb/__init__.py
index 6310e42bc..ad10b3bb1 100644
--- a/lib/bb/__init__.py
+++ b/lib/bb/__init__.py
@@ -63,6 +63,8 @@ __all__ = [
"manifest",
"methodpool",
"cache",
+ "runqueue",
+ "taskdata",
"providers",
]
diff --git a/lib/bb/msg.py b/lib/bb/msg.py
index d526b6e98..a30a70c24 100644
--- a/lib/bb/msg.py
+++ b/lib/bb/msg.py
@@ -30,7 +30,7 @@ debug_level = 0
verbose = False
-domain = bb.utils.Enum('Depends', 'Provider', 'Build', 'Parsing', 'Collection')
+domain = bb.utils.Enum('Depends', 'Provider', 'Build', 'Parsing', 'Collection', 'RunQueue', "TaskData")
#
# Message control functions
diff --git a/lib/bb/runqueue.py b/lib/bb/runqueue.py
new file mode 100644
index 000000000..e874b343d
--- /dev/null
+++ b/lib/bb/runqueue.py
@@ -0,0 +1,423 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'RunQueue' implementation
+
+Handles preparation and execution of a queue of tasks
+
+Copyright (C) 2006 Richard Purdie
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License version 2 as published by the Free
+Software Foundation
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+"""
+
+from bb import msg, data, fetch, event, mkdirhier, utils
+import bb, os, sys
+
+class TaskFailure(Exception):
+ """Exception raised when a task in a runqueue fails"""
+
+ def __init__(self, fnid, taskname):
+ self.args = fnid, taskname
+
+class RunQueue:
+ """
+ BitBake Run Queue implementation
+ """
+ def __init__(self):
+ self.reset_runqueue()
+
+ def reset_runqueue(self):
+ self.runq_fnid = []
+ self.runq_task = []
+ self.runq_depends = []
+ self.runq_revdeps = []
+ self.runq_weight = []
+ self.prio_map = []
+
+ def prepare_runqueue(self, cfgData, dataCache, taskData, targets):
+ """
+ Turn a set of taskData into a RunQueue and compute data needed
+ to optimise the execution order.
+ targets is list of paired values - a provider name and the task to run
+ """
+
+ runq_weight1 = []
+ runq_build = []
+ runq_done = []
+
+ bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing Runqueue")
+
+ for task in range(len(taskData.tasks_name)):
+ fnid = taskData.tasks_fnid[task]
+ fn = taskData.fn_index[fnid]
+ task_deps = dataCache.task_deps[fn]
+
+ if fnid not in taskData.failed_fnids:
+
+ depends = taskData.tasks_tdepends[task]
+
+ # Resolve Depends
+ if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
+ taskname = task_deps['deptask'][taskData.tasks_name[task]]
+ for depid in taskData.depids[fnid]:
+ if depid in taskData.build_targets:
+ depdata = taskData.build_targets[depid][0]
+ if depdata:
+ dep = taskData.fn_index[depdata]
+ depends.append(taskData.gettask_id(dep, taskname))
+
+ # Resolve Runtime Depends
+ if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
+ taskname = task_deps['rdeptask'][taskData.tasks_name[task]]
+ for depid in taskData.rdepids[fnid]:
+ if depid in taskData.run_targets:
+ depdata = taskData.run_targets[depid][0]
+ if depdata:
+ dep = taskData.fn_index[depdata]
+ depends.append(taskData.gettask_id(dep, taskname))
+
+ def add_recursive_run(rdepid):
+ """
+ Add runtime depends of rdepid to depends, if
+ we've not see it before
+ (calls itself recursively)
+ """
+ if str(rdepid) in rdep_seen:
+ return
+ rdep_seen.append(rdepid)
+ if rdepid in taskData.run_targets:
+ depdata = taskData.run_targets[rdepid][0]
+ if depdata:
+ dep = taskData.fn_index[depdata]
+ taskid = taskData.gettask_id(dep, taskname)
+ depends.append(taskid)
+ fnid = taskData.tasks_fnid[taskid]
+ for nextdepid in taskData.rdepids[fnid]:
+ if nextdepid not in rdep_seen:
+ add_recursive_run(nextdepid)
+
+ # Resolve Recursive Runtime Depends
+ if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
+ rdep_seen = []
+ taskname = task_deps['recrdeptask'][taskData.tasks_name[task]]
+ for rdepid in taskData.rdepids[fnid]:
+ add_recursive_run(rdepid)
+
+ #Prune self references
+ if task in depends:
+ newdep = []
+ bb.debug(2, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends))
+ for dep in depends:
+ if task != dep:
+ newdep.append(dep)
+ depends = newdep
+
+
+ self.runq_fnid.append(taskData.tasks_fnid[task])
+ self.runq_task.append(taskData.tasks_name[task])
+ self.runq_depends.append(depends)
+ self.runq_revdeps.append([])
+ self.runq_weight.append(0)
+
+ runq_weight1.append(0)
+ runq_build.append(0)
+ runq_done.append(0)
+
+ bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks")
+
+ def mark_active(listid, depth):
+ """
+ Mark an item as active along with its depends
+ (calls itself recursively)
+ """
+
+ if runq_build[listid] == 1:
+ return
+
+ runq_build[listid] = 1
+
+ depends = self.runq_depends[listid]
+ for depend in depends:
+ mark_active(depend, depth+1)
+
+ for target in targets:
+ targetid = taskData.getbuild_id(target[0])
+ if targetid in taskData.failed_deps:
+ continue
+
+ fnid = taskData.build_targets[targetid][0]
+ if fnid in taskData.failed_fnids:
+ continue
+
+ fnids = taskData.matches_in_list(self.runq_fnid, fnid)
+ tasks = taskData.matches_in_list(self.runq_task, target[1])
+
+ listid = taskData.both_contain(fnids, tasks)
+
+ mark_active(listid, 1)
+
+ # Prune inactive tasks
+ maps = []
+ delcount = 0
+ for listid in range(len(self.runq_fnid)):
+ if runq_build[listid-delcount] == 1:
+ maps.append(listid-delcount)
+ else:
+ del self.runq_fnid[listid-delcount]
+ del self.runq_task[listid-delcount]
+ del self.runq_depends[listid-delcount]
+ del self.runq_weight[listid-delcount]
+ del runq_weight1[listid-delcount]
+ del runq_build[listid-delcount]
+ del runq_done[listid-delcount]
+ del self.runq_revdeps[listid-delcount]
+ delcount = delcount + 1
+ maps.append(-1)
+
+ bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid)))
+
+ for listid in range(len(self.runq_fnid)):
+ newdeps = []
+ origdeps = self.runq_depends[listid]
+ for origdep in origdeps:
+ if maps[origdep] == -1:
+ bb.fatal(bb.msg.domain.RunQueue, "Invalid mapping - Should never happen!")
+ newdeps.append(maps[origdep])
+ self.runq_depends[listid] = newdeps
+
+ bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings")
+
+ for listid in range(len(self.runq_fnid)):
+ for dep in self.runq_depends[listid]:
+ if dep not in self.runq_revdeps[dep]:
+ self.runq_revdeps[dep].append(listid)
+
+ endpoints = []
+ for listid in range(len(self.runq_fnid)):
+ revdeps = self.runq_revdeps[listid]
+ if len(revdeps) == 0:
+ runq_done[listid] = 1
+ self.runq_weight[listid] = 1
+ endpoints.append(listid)
+ for dep in revdeps:
+ if dep in self.runq_depends[listid]:
+ self.dump_data(taskData)
+ bb.fatal("Task %s has circular dependency on %s" % (dep, listid))
+ runq_weight1[listid] = len(revdeps)
+
+ bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints))
+
+ while 1:
+ next_points = []
+ for listid in endpoints:
+ for revdep in self.runq_depends[listid]:
+ self.runq_weight[revdep] = self.runq_weight[revdep] + self.runq_weight[listid]
+ runq_weight1[revdep] = runq_weight1[revdep] - 1
+ if runq_weight1[revdep] == 0:
+ next_points.append(revdep)
+ runq_done[revdep] = 1
+ endpoints = next_points
+ if len(next_points) == 0:
+ break
+
+ # Sanity Checks
+ for task in range(len(self.runq_fnid)):
+ if runq_done[task] == 0:
+ bb.fatal("Task %s not processed!" % task)
+ if runq_weight1[task] != 0:
+ bb.fatal("Task %s count not zero!" % task)
+
+ # Make a weight sorted map
+ from copy import deepcopy
+
+ sortweight = deepcopy(self.runq_weight)
+ sortweight.sort()
+ copyweight = deepcopy(self.runq_weight)
+ self.prio_map = []
+
+ for weight in sortweight:
+ idx = copyweight.index(weight)
+ self.prio_map.append(idx)
+ copyweight[idx] = -1
+ self.prio_map.reverse()
+
+ def execute_runqueue(self, cooker, cfgData, dataCache, taskData, runlist):
+ """
+ Run the tasks in a queue prepared by prepare_runqueue
+ Upon failure, optionally try to recover the build using any alternate providers
+ (if the abort on failure configuration option isn't set)
+ """
+
+ failures = 0
+ while 1:
+ try:
+ self.execute_runqueue_internal(cooker, cfgData, dataCache, taskData)
+ return failures
+ except bb.runqueue.TaskFailure, (fnid, taskname):
+ taskData.fail_fnid(fnid)
+ self.reset_runqueue()
+ self.prepare_runqueue(cfgData, dataCache, taskData, runlist)
+ failures = failures + 1
+
+ def execute_runqueue_internal(self, cooker, cfgData, dataCache, taskData):
+ """
+ Run the tasks in a queue prepared by prepare_runqueue
+ """
+
+ bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue")
+
+ runq_buildable = []
+ runq_running = []
+ runq_complete = []
+ active_builds = 0
+ build_pids = {}
+
+ def get_next_task(data):
+ """
+ Return the id of the highest priority task that is buildable
+ """
+ for task1 in range(len(data.runq_fnid)):
+ task = data.prio_map[task1]
+ if runq_running[task] == 1:
+ continue
+ if runq_buildable[task] == 1:
+ return task
+ return None
+
+ def task_complete(data, task):
+ """
+ Mark a task as completed
+ Look at the reverse dependencies and mark any task with
+ completed dependencies as buildable
+ """
+ runq_complete[task] = 1
+ for revdep in data.runq_revdeps[task]:
+ if runq_running[revdep] == 1:
+ continue
+ if runq_buildable[revdep] == 1:
+ continue
+ alldeps = 1
+ for dep in data.runq_depends[revdep]:
+ if runq_complete[dep] != 1:
+ alldeps = 0
+ if alldeps == 1:
+ runq_buildable[revdep] = 1
+ fn = taskData.fn_index[self.runq_fnid[revdep]]
+ taskname = self.runq_task[revdep]
+ bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
+
+ # Mark initial buildable tasks
+ for task in range(len(self.runq_fnid)):
+ runq_running.append(0)
+ runq_complete.append(0)
+ if len(self.runq_depends[task]) == 0:
+ runq_buildable.append(1)
+ else:
+ runq_buildable.append(0)
+
+ def get_user_idstring(task):
+ fn = taskData.fn_index[self.runq_fnid[task]]
+ taskname = self.runq_task[task]
+ return "%s, %s" % (fn, taskname)
+
+ number_tasks = bb.data.getVar("BB_NUMBER_THREADS", cfgData)
+ if not number_tasks:
+ number_tasks = 1
+
+ try:
+ while 1:
+ task = get_next_task(self)
+ if task is not None:
+ fn = taskData.fn_index[self.runq_fnid[task]]
+ taskname = self.runq_task[task]
+
+ if bb.build.stamp_is_current_cache(dataCache, fn, taskname):
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, get_user_idstring(task)))
+ runq_running[task] = 1
+ task_complete(self, task)
+ continue
+
+ bb.msg.debug(1, bb.msg.domain.RunQueue, "Running task %s (%s)" % (task, get_user_idstring(task)))
+ try:
+ pid = os.fork()
+ except OSError, e:
+ bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
+ if pid == 0:
+ cooker.configuration.cmd = taskname[3:]
+ try:
+ cooker.tryBuild(fn, False)
+ except bb.build.EventException, e:
+ bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
+ sys.exit(1)
+ except:
+ sys.exit(1)
+ sys.exit(0)
+ build_pids[pid] = task
+ runq_running[task] = 1
+ active_builds = active_builds + 1
+ if active_builds < number_tasks:
+ continue
+ if active_builds > 0:
+ result = os.waitpid(-1, 0)
+ active_builds = active_builds - 1
+ if result[1] != 0:
+ bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (build_pids[result[0]], get_user_idstring(build_pids[result[0]])))
+ raise bb.runqueue.TaskFailure(self.runq_fnid[build_pids[result[0]]], self.runq_task[build_pids[result[0]]])
+ task_complete(self, build_pids[result[0]])
+ continue
+ break
+ except SystemExit:
+ raise
+ except:
+ bb.error("Exception received")
+ if active_builds > 0:
+ while active_builds > 0:
+ bb.note("Waiting for %s active tasks to finish" % active_builds)
+ os.waitpid(-1, 0)
+ active_builds = active_builds - 1
+ if cooker.configuration.abort:
+ sys.exit(1)
+ raise
+
+ # Sanity Checks
+ for task in range(len(self.runq_fnid)):
+ if runq_buildable[task] == 0:
+ bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task)
+ if runq_running[task] == 0:
+ bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task)
+ if runq_complete[task] == 0:
+ bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task)
+
+ return 0
+
+ def dump_data(self, taskQueue):
+ """
+ Dump some debug information on the internal data structures
+ """
+ bb.msg.debug(3, bb.msg.domain.RunQueue, "run_tasks:")
+ for task in range(len(self.runq_fnid)):
+ bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
+ taskQueue.fn_index[self.runq_fnid[task]],
+ self.runq_task[task],
+ self.runq_weight[task],
+ self.runq_depends[task],
+ self.runq_revdeps[task]))
+
+ bb.msg.debug(3, bb.msg.domain.RunQueue, "sorted_tasks:")
+ for task1 in range(len(self.runq_fnid)):
+ task = self.prio_map[task1]
+ bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
+ taskQueue.fn_index[self.runq_fnid[task]],
+ self.runq_task[task],
+ self.runq_weight[task],
+ self.runq_depends[task],
+ self.runq_revdeps[task]))
diff --git a/lib/bb/shell.py b/lib/bb/shell.py
index b962b582d..889d90fb1 100644
--- a/lib/bb/shell.py
+++ b/lib/bb/shell.py
@@ -232,7 +232,7 @@ class BitBakeShellCommands:
item = data.getVar('PN', bbfile_data, 1)
data.setVar( "_task_cache", [], bbfile_data ) # force
try:
- cooker.tryBuildPackage( os.path.abspath( bf ), item, bbfile_data )
+ cooker.tryBuildPackage( os.path.abspath( bf ), item, cmd, bbfile_data, True )
except build.EventException, e:
print "ERROR: Couldn't build '%s'" % name
global last_exception
diff --git a/lib/bb/taskdata.py b/lib/bb/taskdata.py
new file mode 100644
index 000000000..d14b0b99e
--- /dev/null
+++ b/lib/bb/taskdata.py
@@ -0,0 +1,534 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'TaskData' implementation
+
+Task data collection and handling
+
+Copyright (C) 2006 Richard Purdie
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License version 2 as published by the Free
+Software Foundation
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+"""
+
+from bb import data, fetch, event, mkdirhier, utils
+import bb, os
+
+class TaskData:
+ """
+ BitBake Task Data implementation
+ """
+ def __init__(self):
+ self.build_names_index = []
+ self.run_names_index = []
+ self.fn_index = []
+
+ self.build_targets = {}
+ self.run_targets = {}
+
+ self.tasks_fnid = []
+ self.tasks_name = []
+ self.tasks_tdepends = []
+
+ self.depids = {}
+ self.rdepids = {}
+
+ self.consider_msgs_cache = []
+
+ self.failed_deps = []
+ self.failed_rdeps = []
+ self.failed_fnids = []
+
+
+ def matches_in_list(self, data, substring):
+ """
+ Return a list of the positions of substring in list data
+ """
+ matches = []
+ start = 0
+ while 1:
+ try:
+ start = data.index(substring, start)
+ except ValueError:
+ return matches
+ matches.append(start)
+ start = start + 1
+
+ def both_contain(self, list1, list2):
+ """
+ Return the items present in both list1 and list2
+ """
+ matches = []
+ for data in list1:
+ if data in list2:
+ return data
+ return None
+
+
+ def getbuild_id(self, name):
+ """
+ Return an ID number for the build target name.
+ If it doesn't exist, create one.
+ """
+ if not name in self.build_names_index:
+ self.build_names_index.append(name)
+
+ return self.build_names_index.index(name)
+
+ def getrun_id(self, name):
+ """
+ Return an ID number for the run target name.
+ If it doesn't exist, create one.
+ """
+ if not name in self.run_names_index:
+ self.run_names_index.append(name)
+
+ return self.run_names_index.index(name)
+
+ def getfn_id(self, name):
+ """
+ Return an ID number for the filename.
+ If it doesn't exist, create one.
+ """
+ if not name in self.fn_index:
+ self.fn_index.append(name)
+
+ return self.fn_index.index(name)
+
+ def gettask_id(self, fn, task):
+ """
+ Return an ID number for the task matching fn and task.
+ If it doesn't exist, create one.
+ """
+ fnid = self.getfn_id(fn)
+
+ fnids = self.matches_in_list(self.tasks_fnid, fnid)
+ names = self.matches_in_list(self.tasks_name, task)
+
+ listid = self.both_contain(fnids, names)
+
+ if listid is not None:
+ return listid
+
+ self.tasks_name.append(task)
+ self.tasks_fnid.append(fnid)
+ self.tasks_tdepends.append([])
+
+ return len(self.tasks_name)-1
+
+ def add_tasks(self, fn, dataCache):
+ """
+ Add tasks for a given fn to the database
+ """
+
+ task_graph = dataCache.task_queues[fn]
+ task_deps = dataCache.task_deps[fn]
+
+ fnid = self.getfn_id(fn)
+
+ if fnid in self.failed_fnids:
+ bb.fatal("Trying to re-add a failed file? Something is broken...")
+
+ # Check if we've already seen this fn
+ if fnid in self.tasks_fnid:
+ return
+
+ # Work out task dependencies
+ for task in task_graph.allnodes():
+ parentids = []
+ for dep in task_graph.getparents(task):
+ parentid = self.gettask_id(fn, dep)
+ parentids.append(parentid)
+ taskid = self.gettask_id(fn, task)
+ self.tasks_tdepends[taskid].extend(parentids)
+
+ # Work out build dependencies
+ if not fnid in self.depids:
+ dependids = []
+ for depend in dataCache.deps[fn]:
+ bb.msg.debug(2, bb.msg.domain.TaskData, "Added dependency %s for %s" % (depend, fn))
+ dependids.append(self.getbuild_id(depend))
+ self.depids[fnid] = dependids
+
+ # Work out runtime dependencies
+ if not fnid in self.rdepids:
+ rdependids = []
+ rdepends = dataCache.rundeps[fn]
+ rrecs = dataCache.runrecs[fn]
+ for package in rdepends:
+ for rdepend in rdepends[package]:
+ bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime dependency %s for %s" % (rdepend, fn))
+ rdependids.append(self.getrun_id(rdepend))
+ for package in rrecs:
+ for rdepend in rrecs[package]:
+ bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime recommendation %s for %s" % (rdepend, fn))
+ rdependids.append(self.getrun_id(rdepend))
+ self.rdepids[fnid] = rdependids
+
+ def have_build_target(self, target):
+ """
+ Have we a build target matching this name?
+ """
+ targetid = self.getbuild_id(target)
+
+ if targetid in self.build_targets:
+ return True
+ return False
+
+ def have_runtime_target(self, target):
+ """
+ Have we a runtime target matching this name?
+ """
+ targetid = self.getrun_id(target)
+
+ if targetid in self.run_targets:
+ return True
+ return False
+
+ def add_build_target(self, fn, item):
+ """
+ Add a build target.
+ If already present, append the provider fn to the list
+ """
+ targetid = self.getbuild_id(item)
+ fnid = self.getfn_id(fn)
+
+ if targetid in self.build_targets:
+ if fnid in self.build_targets[targetid]:
+ return
+ self.build_targets[targetid].append(fnid)
+ return
+ self.build_targets[targetid] = [fnid]
+
+ def add_runtime_target(self, fn, item):
+ """
+ Add a runtime target.
+ If already present, append the provider fn to the list
+ """
+ targetid = self.getrun_id(item)
+ fnid = self.getfn_id(fn)
+
+ if targetid in self.run_targets:
+ if fnid in self.run_targets[targetid]:
+ return
+ self.run_targets[targetid].append(fnid)
+ return
+ self.run_targets[targetid] = [fnid]
+
+ def get_unresolved_build_targets(self, dataCache):
+ """
+ Return a list of build targets who's providers
+ are unknown.
+ """
+ unresolved = []
+ for target in self.build_names_index:
+ if target in dataCache.ignored_dependencies:
+ continue
+ if target in self.failed_deps:
+ continue
+ if not self.have_build_target(target):
+ unresolved.append(target)
+ return unresolved
+
+ def get_unresolved_run_targets(self, dataCache):
+ """
+ Return a list of runtime targets who's providers
+ are unknown.
+ """
+ unresolved = []
+ for target in self.run_names_index:
+ if target in dataCache.ignored_dependencies:
+ continue
+ if target in self.failed_rdeps:
+ continue
+ if not self.have_runtime_target(target):
+ unresolved.append(target)
+ return unresolved
+
+ def get_provider(self, item):
+ """
+ Return a list of providers of item
+ """
+ targetid = self.getbuild_id(item)
+
+ return self.build_targets[targetid]
+
+ def get_dependees(self, itemid):
+ """
+ Return a list of targets which depend on item
+ """
+ dependees = []
+ for fnid in self.depids:
+ if itemid in self.depids[fnid]:
+ dependees.append(fnid)
+ return dependees
+
+ def get_dependees_str(self, item):
+ """
+ Return a list of targets which depend on item as a user readable string
+ """
+ itemid = self.getbuild_id(item)
+ dependees = []
+ for fnid in self.depids:
+ if itemid in self.depids[fnid]:
+ dependees.append(self.fn_index[fnid])
+ return dependees
+
+ def get_rdependees(self, itemid):
+ """
+ Return a list of targets which depend on runtime item
+ """
+ dependees = []
+ for fnid in self.rdepids:
+ if itemid in self.rdepids[fnid]:
+ dependees.append(fnid)
+ return dependees
+
+ def get_rdependees_str(self, item):
+ """
+ Return a list of targets which depend on runtime item as a user readable string
+ """
+ itemid = self.getrun_id(item)
+ dependees = []
+ for fnid in self.rdepids:
+ if itemid in self.rdepids[fnid]:
+ dependees.append(self.fn_index[fnid])
+ return dependees
+
+ def add_provider(self, cfgData, dataCache, item):
+ """
+ Add the providers of item to the task data
+ """
+
+ if item in dataCache.ignored_dependencies:
+ return True
+
+ if not item in dataCache.providers:
+ bb.msg.error(bb.msg.domain.Provider, "No providers of build target %s (for %s)" % (item, self.get_dependees_str(item)))
+ bb.event.fire(bb.event.NoProvider(item, cfgData))
+ raise bb.providers.NoProvider(item)
+
+ if self.have_build_target(item):
+ return True
+
+ all_p = dataCache.providers[item]
+
+ eligible = bb.providers.filterProviders(all_p, item, cfgData, dataCache)
+
+ for p in eligible:
+ fnid = self.getfn_id(p)
+ if fnid in self.failed_fnids:
+ eligible.remove(p)
+
+ if not eligible:
+ bb.msg.error(bb.msg.domain.Provider, "No providers of build target %s after filtering (for %s)" % (item, self.get_dependees_str(item)))
+ bb.event.fire(bb.event.NoProvider(item, cfgData))
+ raise bb.providers.NoProvider(item)
+
+ prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, cfgData, 1)
+ if prefervar:
+ dataCache.preferred[item] = prefervar
+
+ discriminated = False
+ if item in dataCache.preferred:
+ for p in eligible:
+ pn = dataCache.pkg_fn[p]
+ if dataCache.preferred[item] == pn:
+ bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy %s due to PREFERRED_PROVIDERS" % (pn, item))
+ eligible.remove(p)
+ eligible = [p] + eligible
+ discriminated = True
+ break
+
+ if len(eligible) > 1 and discriminated == False:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in eligible:
+ providers_list.append(dataCache.pkg_fn[fn])
+ bb.msg.note(1, bb.msg.domain.Provider, "multiple providers are available (%s);" % ", ".join(providers_list))
+ bb.msg.note(1, bb.msg.domain.Provider, "consider defining PREFERRED_PROVIDER_%s" % item)
+ bb.event.fire(bb.event.MultipleProviders(item,providers_list,cfgData))
+ self.consider_msgs_cache.append(item)
+
+ for fn in eligible:
+ bb.msg.debug(2, bb.msg.domain.Provider, "adding %s to satisfy %s" % (fn, item))
+
+ self.add_tasks(fn, dataCache)
+ self.add_build_target(fn, item)
+
+ item = dataCache.pkg_fn[fn]
+
+ return True
+
+ def add_rprovider(self, cfgData, dataCache, item):
+ """
+ Add the runtime providers of item to the task data
+ (takes item names from RDEPENDS/PACKAGES namespace)
+ """
+
+ if item in dataCache.ignored_dependencies:
+ return True
+
+ if self.have_runtime_target(item):
+ return True
+
+ all_p = bb.providers.getRuntimeProviders(dataCache, item)
+
+ if not all_p:
+ bb.msg.error(bb.msg.domain.Provider, "No providers of runtime build target %s (for %s)" % (item, self.get_rdependees_str(item)))
+ bb.event.fire(bb.event.NoProvider(item, cfgData, runtime=True))
+ raise bb.providers.NoRProvider(item)
+
+ eligible = bb.providers.filterProviders(all_p, item, cfgData, dataCache)
+
+ for p in eligible:
+ fnid = self.getfn_id(p)
+ if fnid in self.failed_fnids:
+ eligible.remove(p)
+
+ if not eligible:
+ bb.msg.error(bb.msg.domain.Provider, "No providers of runtime build target %s after filtering (for %s)" % (item, self.get_rdependees_str(item)))
+ bb.event.fire(bb.event.NoProvider(item, cfgData, runtime=True))
+ raise bb.providers.NoRProvider(item)
+
+ # Should use dataCache.preferred here?
+ preferred = []
+ for p in eligible:
+ pn = dataCache.pkg_fn[p]
+ provides = dataCache.pn_provides[pn]
+ for provide in provides:
+ prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, cfgData, 1)
+ if prefervar == pn:
+ bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to PREFERRED_PROVIDERS" % (pn, item))
+ eligible.remove(p)
+ eligible = [p] + eligible
+ preferred.append(p)
+
+ if len(eligible) > 1 and len(preferred) == 0:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in eligible:
+ providers_list.append(dataCache.pkg_fn[fn])
+ bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available (%s);" % ", ".join(providers_list))
+ bb.msg.note(2, bb.msg.domain.Provider, "consider defining a PREFERRED_PROVIDER to match runtime %s" % item)
+ bb.event.fire(bb.event.MultipleProviders(item,providers_list, cfgData, runtime=True))
+ self.consider_msgs_cache.append(item)
+
+ if len(preferred) > 1:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in preferred:
+ providers_list.append(dataCache.pkg_fn[fn])
+ bb.msg.note(2, bb.msg.domain.Provider, "multiple preferred providers are available (%s);" % ", ".join(providers_list))
+ bb.msg.note(2, bb.msg.domain.Provider, "consider defining only one PREFERRED_PROVIDER to match runtime %s" % item)
+ bb.event.fire(bb.event.MultipleProviders(item,providers_list, cfgData, runtime=True))
+ self.consider_msgs_cache.append(item)
+
+ # run through the list until we find one that we can build
+ for fn in eligible:
+ bb.msg.debug(2, bb.msg.domain.Provider, "adding %s to satisfy runtime %s" % (fn, item))
+ self.add_tasks(fn, dataCache)
+ self.add_runtime_target(fn, item)
+
+ return True
+
+ def fail_fnid(self, fnid):
+ """
+ Mark a file as failed (unbuildable)
+ Remove any references from build and runtime provider lists
+ """
+ if fnid in self.failed_fnids:
+ return
+ bb.msg.note(1, bb.msg.domain.Provider, "Removing failed file %s" % self.fn_index[fnid])
+ self.failed_fnids.append(fnid)
+ for target in self.build_targets:
+ if fnid in self.build_targets[target]:
+ self.build_targets[target].remove(fnid)
+ if len(self.build_targets[target]) == 0:
+ self.remove_buildtarget(target)
+ for target in self.run_targets:
+ if fnid in self.run_targets[target]:
+ self.run_targets[target].remove(fnid)
+ if len(self.run_targets[target]) == 0:
+ self.remove_runtarget(target)
+
+ def remove_buildtarget(self, targetid):
+ """
+ Mark a build target as failed (unbuildable)
+ Trigger removal of any files that have this as a dependency
+ """
+ bb.msg.note(1, bb.msg.domain.Provider, "Removing failed build target %s" % self.build_names_index[targetid])
+ self.failed_deps.append(targetid)
+ dependees = self.get_dependees(targetid)
+ for fnid in dependees:
+ self.fail_fnid(fnid)
+
+ def remove_runtarget(self, targetid):
+ """
+ Mark a run target as failed (unbuildable)
+ Trigger removal of any files that have this as a dependency
+ """
+ bb.msg.note(1, bb.msg.domain.Provider, "Removing failed runtime build target %s" % self.run_names_index[targetid])
+ self.failed_rdeps.append(targetid)
+ dependees = self.get_rdependees(targetid)
+ for fnid in dependees:
+ self.fail_fnid(fnid)
+
+ def add_unresolved(self, cfgData, dataCache):
+ """
+ Resolve all unresolved build and runtime targets
+ """
+ bb.msg.note(1, bb.msg.domain.TaskData, "Resolving missing task queue dependencies")
+ while 1:
+ added = 0
+ for target in self.get_unresolved_build_targets(dataCache):
+ try:
+ self.add_provider(cfgData, dataCache, target)
+ added = added + 1
+ except bb.providers.NoProvider:
+ # FIXME - should look at configuration.abort here and raise if set
+ self.remove_buildtarget(self.getbuild_id(target))
+ for target in self.get_unresolved_run_targets(dataCache):
+ try:
+ self.add_rprovider(cfgData, dataCache, target)
+ added = added + 1
+ except bb.providers.NoRProvider:
+ # FIXME - should look at configuration.abort here and raise if set
+ self.remove_runtarget(self.getrun_id(target))
+ bb.msg.debug(1, bb.msg.domain.TaskData, "Resolved " + str(added) + " extra dependecies")
+ if added == 0:
+ break
+
+
+
+ def dump_data(self):
+ """
+ Dump some debug information on the internal data structures
+ """
+ bb.msg.debug(3, bb.msg.domain.TaskData, "build_names:")
+ bb.msg.debug(3, bb.msg.domain.TaskData, self.build_names_index)
+ bb.msg.debug(3, bb.msg.domain.TaskData, "run_names:")
+ bb.msg.debug(3, bb.msg.domain.TaskData, self.run_names_index)
+ bb.msg.debug(3, bb.msg.domain.TaskData, "build_targets:")
+ for target in self.build_targets.keys():
+ bb.msg.debug(3, bb.msg.domain.TaskData, " %s: %s" % (self.build_names_index[target], self.build_targets[target]))
+ bb.msg.debug(3, bb.msg.domain.TaskData, "run_targets:")
+ for target in self.run_targets.keys():
+ bb.msg.debug(3, bb.msg.domain.TaskData, " %s: %s" % (self.run_names_index[target], self.run_targets[target]))
+ bb.msg.debug(3, bb.msg.domain.TaskData, "tasks:")
+ for task in range(len(self.tasks_name)):
+ bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s - %s: %s" % (
+ task,
+ self.fn_index[self.tasks_fnid[task]],
+ self.tasks_name[task],
+ self.tasks_tdepends[task]))
+
+