aboutsummaryrefslogtreecommitdiffstats
path: root/bitbake-dev/lib/bb
diff options
context:
space:
mode:
Diffstat (limited to 'bitbake-dev/lib/bb')
-rw-r--r--bitbake-dev/lib/bb/COW.py320
-rw-r--r--bitbake-dev/lib/bb/__init__.py1133
-rw-r--r--bitbake-dev/lib/bb/build.py377
-rw-r--r--bitbake-dev/lib/bb/cache.py465
-rw-r--r--bitbake-dev/lib/bb/command.py211
-rw-r--r--bitbake-dev/lib/bb/cooker.py941
-rw-r--r--bitbake-dev/lib/bb/daemonize.py189
-rw-r--r--bitbake-dev/lib/bb/data.py570
-rw-r--r--bitbake-dev/lib/bb/data_smart.py292
-rw-r--r--bitbake-dev/lib/bb/event.py302
-rw-r--r--bitbake-dev/lib/bb/fetch/__init__.py556
-rw-r--r--bitbake-dev/lib/bb/fetch/bzr.py154
-rw-r--r--bitbake-dev/lib/bb/fetch/cvs.py178
-rw-r--r--bitbake-dev/lib/bb/fetch/git.py142
-rw-r--r--bitbake-dev/lib/bb/fetch/hg.py141
-rw-r--r--bitbake-dev/lib/bb/fetch/local.py72
-rw-r--r--bitbake-dev/lib/bb/fetch/perforce.py213
-rw-r--r--bitbake-dev/lib/bb/fetch/ssh.py120
-rw-r--r--bitbake-dev/lib/bb/fetch/svk.py109
-rw-r--r--bitbake-dev/lib/bb/fetch/svn.py204
-rw-r--r--bitbake-dev/lib/bb/fetch/wget.py105
-rw-r--r--bitbake-dev/lib/bb/manifest.py144
-rw-r--r--bitbake-dev/lib/bb/methodpool.py84
-rw-r--r--bitbake-dev/lib/bb/msg.py125
-rw-r--r--bitbake-dev/lib/bb/parse/__init__.py80
-rw-r--r--bitbake-dev/lib/bb/parse/parse_py/BBHandler.py416
-rw-r--r--bitbake-dev/lib/bb/parse/parse_py/ConfHandler.py228
-rw-r--r--bitbake-dev/lib/bb/parse/parse_py/__init__.py33
-rw-r--r--bitbake-dev/lib/bb/persist_data.py110
-rw-r--r--bitbake-dev/lib/bb/providers.py303
-rw-r--r--bitbake-dev/lib/bb/runqueue.py1157
-rw-r--r--bitbake-dev/lib/bb/shell.py827
-rw-r--r--bitbake-dev/lib/bb/taskdata.py594
-rw-r--r--bitbake-dev/lib/bb/ui/__init__.py18
-rw-r--r--bitbake-dev/lib/bb/ui/depexplorer.py271
-rw-r--r--bitbake-dev/lib/bb/ui/knotty.py157
-rw-r--r--bitbake-dev/lib/bb/ui/ncurses.py333
-rw-r--r--bitbake-dev/lib/bb/ui/uievent.py127
-rw-r--r--bitbake-dev/lib/bb/ui/uihelper.py49
-rw-r--r--bitbake-dev/lib/bb/utils.py270
-rw-r--r--bitbake-dev/lib/bb/xmlrpcserver.py157
41 files changed, 12277 insertions, 0 deletions
diff --git a/bitbake-dev/lib/bb/COW.py b/bitbake-dev/lib/bb/COW.py
new file mode 100644
index 0000000000..e5063d60a8
--- /dev/null
+++ b/bitbake-dev/lib/bb/COW.py
@@ -0,0 +1,320 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# This is a copy on write dictionary and set which abuses classes to try and be nice and fast.
+#
+# Copyright (C) 2006 Tim Amsell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#Please Note:
+# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW.
+# Assign a file to __warn__ to get warnings about slow operations.
+#
+
+from inspect import getmro
+
+import copy
+import types, sets
+types.ImmutableTypes = tuple([ \
+ types.BooleanType, \
+ types.ComplexType, \
+ types.FloatType, \
+ types.IntType, \
+ types.LongType, \
+ types.NoneType, \
+ types.TupleType, \
+ sets.ImmutableSet] + \
+ list(types.StringTypes))
+
+MUTABLE = "__mutable__"
+
+class COWMeta(type):
+ pass
+
+class COWDictMeta(COWMeta):
+ __warn__ = False
+ __hasmutable__ = False
+ __marker__ = tuple()
+
+ def __str__(cls):
+ # FIXME: I have magic numbers!
+ return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3)
+ __repr__ = __str__
+
+ def cow(cls):
+ class C(cls):
+ __count__ = cls.__count__ + 1
+ return C
+ copy = cow
+ __call__ = cow
+
+ def __setitem__(cls, key, value):
+ if not isinstance(value, types.ImmutableTypes):
+ if not isinstance(value, COWMeta):
+ cls.__hasmutable__ = True
+ key += MUTABLE
+ setattr(cls, key, value)
+
+ def __getmutable__(cls, key, readonly=False):
+ nkey = key + MUTABLE
+ try:
+ return cls.__dict__[nkey]
+ except KeyError:
+ pass
+
+ value = getattr(cls, nkey)
+ if readonly:
+ return value
+
+ if not cls.__warn__ is False and not isinstance(value, COWMeta):
+ print >> cls.__warn__, "Warning: Doing a copy because %s is a mutable type." % key
+ try:
+ value = value.copy()
+ except AttributeError, e:
+ value = copy.copy(value)
+ setattr(cls, nkey, value)
+ return value
+
+ __getmarker__ = []
+ def __getreadonly__(cls, key, default=__getmarker__):
+ """\
+ Get a value (even if mutable) which you promise not to change.
+ """
+ return cls.__getitem__(key, default, True)
+
+ def __getitem__(cls, key, default=__getmarker__, readonly=False):
+ try:
+ try:
+ value = getattr(cls, key)
+ except AttributeError:
+ value = cls.__getmutable__(key, readonly)
+
+ # This is for values which have been deleted
+ if value is cls.__marker__:
+ raise AttributeError("key %s does not exist." % key)
+
+ return value
+ except AttributeError, e:
+ if not default is cls.__getmarker__:
+ return default
+
+ raise KeyError(str(e))
+
+ def __delitem__(cls, key):
+ cls.__setitem__(key, cls.__marker__)
+
+ def __revertitem__(cls, key):
+ if not cls.__dict__.has_key(key):
+ key += MUTABLE
+ delattr(cls, key)
+
+ def has_key(cls, key):
+ value = cls.__getreadonly__(key, cls.__marker__)
+ if value is cls.__marker__:
+ return False
+ return True
+
+ def iter(cls, type, readonly=False):
+ for key in dir(cls):
+ if key.startswith("__"):
+ continue
+
+ if key.endswith(MUTABLE):
+ key = key[:-len(MUTABLE)]
+
+ if type == "keys":
+ yield key
+
+ try:
+ if readonly:
+ value = cls.__getreadonly__(key)
+ else:
+ value = cls[key]
+ except KeyError:
+ continue
+
+ if type == "values":
+ yield value
+ if type == "items":
+ yield (key, value)
+ raise StopIteration()
+
+ def iterkeys(cls):
+ return cls.iter("keys")
+ def itervalues(cls, readonly=False):
+ if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False:
+ print >> cls.__warn__, "Warning: If you arn't going to change any of the values call with True."
+ return cls.iter("values", readonly)
+ def iteritems(cls, readonly=False):
+ if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False:
+ print >> cls.__warn__, "Warning: If you arn't going to change any of the values call with True."
+ return cls.iter("items", readonly)
+
+class COWSetMeta(COWDictMeta):
+ def __str__(cls):
+ # FIXME: I have magic numbers!
+ return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) -3)
+ __repr__ = __str__
+
+ def cow(cls):
+ class C(cls):
+ __count__ = cls.__count__ + 1
+ return C
+
+ def add(cls, value):
+ COWDictMeta.__setitem__(cls, repr(hash(value)), value)
+
+ def remove(cls, value):
+ COWDictMeta.__delitem__(cls, repr(hash(value)))
+
+ def __in__(cls, value):
+ return COWDictMeta.has_key(repr(hash(value)))
+
+ def iterkeys(cls):
+ raise TypeError("sets don't have keys")
+
+ def iteritems(cls):
+ raise TypeError("sets don't have 'items'")
+
+# These are the actual classes you use!
+class COWDictBase(object):
+ __metaclass__ = COWDictMeta
+ __count__ = 0
+
+class COWSetBase(object):
+ __metaclass__ = COWSetMeta
+ __count__ = 0
+
+if __name__ == "__main__":
+ import sys
+ COWDictBase.__warn__ = sys.stderr
+ a = COWDictBase()
+ print "a", a
+
+ a['a'] = 'a'
+ a['b'] = 'b'
+ a['dict'] = {}
+
+ b = a.copy()
+ print "b", b
+ b['c'] = 'b'
+
+ print
+
+ print "a", a
+ for x in a.iteritems():
+ print x
+ print "--"
+ print "b", b
+ for x in b.iteritems():
+ print x
+ print
+
+ b['dict']['a'] = 'b'
+ b['a'] = 'c'
+
+ print "a", a
+ for x in a.iteritems():
+ print x
+ print "--"
+ print "b", b
+ for x in b.iteritems():
+ print x
+ print
+
+ try:
+ b['dict2']
+ except KeyError, e:
+ print "Okay!"
+
+ a['set'] = COWSetBase()
+ a['set'].add("o1")
+ a['set'].add("o1")
+ a['set'].add("o2")
+
+ print "a", a
+ for x in a['set'].itervalues():
+ print x
+ print "--"
+ print "b", b
+ for x in b['set'].itervalues():
+ print x
+ print
+
+ b['set'].add('o3')
+
+ print "a", a
+ for x in a['set'].itervalues():
+ print x
+ print "--"
+ print "b", b
+ for x in b['set'].itervalues():
+ print x
+ print
+
+ a['set2'] = set()
+ a['set2'].add("o1")
+ a['set2'].add("o1")
+ a['set2'].add("o2")
+
+ print "a", a
+ for x in a.iteritems():
+ print x
+ print "--"
+ print "b", b
+ for x in b.iteritems(readonly=True):
+ print x
+ print
+
+ del b['b']
+ try:
+ print b['b']
+ except KeyError:
+ print "Yay! deleted key raises error"
+
+ if b.has_key('b'):
+ print "Boo!"
+ else:
+ print "Yay - has_key with delete works!"
+
+ print "a", a
+ for x in a.iteritems():
+ print x
+ print "--"
+ print "b", b
+ for x in b.iteritems(readonly=True):
+ print x
+ print
+
+ b.__revertitem__('b')
+
+ print "a", a
+ for x in a.iteritems():
+ print x
+ print "--"
+ print "b", b
+ for x in b.iteritems(readonly=True):
+ print x
+ print
+
+ b.__revertitem__('dict')
+ print "a", a
+ for x in a.iteritems():
+ print x
+ print "--"
+ print "b", b
+ for x in b.iteritems(readonly=True):
+ print x
+ print
diff --git a/bitbake-dev/lib/bb/__init__.py b/bitbake-dev/lib/bb/__init__.py
new file mode 100644
index 0000000000..99995212c3
--- /dev/null
+++ b/bitbake-dev/lib/bb/__init__.py
@@ -0,0 +1,1133 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Build System Python Library
+#
+# Copyright (C) 2003 Holger Schurig
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# Based on Gentoo's portage.py.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+__version__ = "1.9.0"
+
+__all__ = [
+
+ "debug",
+ "note",
+ "error",
+ "fatal",
+
+ "mkdirhier",
+ "movefile",
+
+ "tokenize",
+ "evaluate",
+ "flatten",
+ "relparse",
+ "ververify",
+ "isjustname",
+ "isspecific",
+ "pkgsplit",
+ "catpkgsplit",
+ "vercmp",
+ "pkgcmp",
+ "dep_parenreduce",
+ "dep_opconvert",
+
+# fetch
+ "decodeurl",
+ "encodeurl",
+
+# modules
+ "parse",
+ "data",
+ "command",
+ "event",
+ "build",
+ "fetch",
+ "manifest",
+ "methodpool",
+ "cache",
+ "runqueue",
+ "taskdata",
+ "providers",
+ ]
+
+whitespace = '\t\n\x0b\x0c\r '
+lowercase = 'abcdefghijklmnopqrstuvwxyz'
+
+import sys, os, types, re, string, bb
+from bb import msg
+
+#projectdir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
+projectdir = os.getcwd()
+
+if "BBDEBUG" in os.environ:
+ level = int(os.environ["BBDEBUG"])
+ if level:
+ bb.msg.set_debug_level(level)
+
+class VarExpandError(Exception):
+ pass
+
+class MalformedUrl(Exception):
+ """Exception raised when encountering an invalid url"""
+
+
+#######################################################################
+#######################################################################
+#
+# SECTION: Debug
+#
+# PURPOSE: little functions to make yourself known
+#
+#######################################################################
+#######################################################################
+
+def plain(*args):
+ bb.msg.warn(''.join(args))
+
+def debug(lvl, *args):
+ bb.msg.debug(lvl, None, ''.join(args))
+
+def note(*args):
+ bb.msg.note(1, None, ''.join(args))
+
+def warn(*args):
+ bb.msg.warn(1, None, ''.join(args))
+
+def error(*args):
+ bb.msg.error(None, ''.join(args))
+
+def fatal(*args):
+ bb.msg.fatal(None, ''.join(args))
+
+
+#######################################################################
+#######################################################################
+#
+# SECTION: File
+#
+# PURPOSE: Basic file and directory tree related functions
+#
+#######################################################################
+#######################################################################
+
+def mkdirhier(dir):
+ """Create a directory like 'mkdir -p', but does not complain if
+ directory already exists like os.makedirs
+ """
+
+ debug(3, "mkdirhier(%s)" % dir)
+ try:
+ os.makedirs(dir)
+ debug(2, "created " + dir)
+ except OSError, e:
+ if e.errno != 17: raise e
+
+
+#######################################################################
+
+import stat
+
+def movefile(src,dest,newmtime=None,sstat=None):
+ """Moves a file from src to dest, preserving all permissions and
+ attributes; mtime will be preserved even when moving across
+ filesystems. Returns true on success and false on failure. Move is
+ atomic.
+ """
+
+ #print "movefile("+src+","+dest+","+str(newmtime)+","+str(sstat)+")"
+ try:
+ if not sstat:
+ sstat=os.lstat(src)
+ except Exception, e:
+ print "movefile: Stating source file failed...", e
+ return None
+
+ destexists=1
+ try:
+ dstat=os.lstat(dest)
+ except:
+ dstat=os.lstat(os.path.dirname(dest))
+ destexists=0
+
+ if destexists:
+ if stat.S_ISLNK(dstat[stat.ST_MODE]):
+ try:
+ os.unlink(dest)
+ destexists=0
+ except Exception, e:
+ pass
+
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ try:
+ target=os.readlink(src)
+ if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+ os.unlink(dest)
+ os.symlink(target,dest)
+ #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ os.unlink(src)
+ return os.lstat(dest)
+ except Exception, e:
+ print "movefile: failed to properly create symlink:", dest, "->", target, e
+ return None
+
+ renamefailed=1
+ if sstat[stat.ST_DEV]==dstat[stat.ST_DEV]:
+ try:
+ ret=os.rename(src,dest)
+ renamefailed=0
+ except Exception, e:
+ import errno
+ if e[0]!=errno.EXDEV:
+ # Some random error.
+ print "movefile: Failed to move", src, "to", dest, e
+ return None
+ # Invalid cross-device-link 'bind' mounted or actually Cross-Device
+
+ if renamefailed:
+ didcopy=0
+ if stat.S_ISREG(sstat[stat.ST_MODE]):
+ try: # For safety copy then move it over.
+ shutil.copyfile(src,dest+"#new")
+ os.rename(dest+"#new",dest)
+ didcopy=1
+ except Exception, e:
+ print 'movefile: copy', src, '->', dest, 'failed.', e
+ return None
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ a=getstatusoutput("/bin/mv -f "+"'"+src+"' '"+dest+"'")
+ if a[0]!=0:
+ print "movefile: Failed to move special file:" + src + "' to '" + dest + "'", a
+ return None # failure
+ try:
+ if didcopy:
+ missingos.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
+ os.unlink(src)
+ except Exception, e:
+ print "movefile: Failed to chown/chmod/unlink", dest, e
+ return None
+
+ if newmtime:
+ os.utime(dest,(newmtime,newmtime))
+ else:
+ os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
+ newmtime=sstat[stat.ST_MTIME]
+ return newmtime
+
+def copyfile(src,dest,newmtime=None,sstat=None):
+ """
+ Copies a file from src to dest, preserving all permissions and
+ attributes; mtime will be preserved even when moving across
+ filesystems. Returns true on success and false on failure.
+ """
+ import os, stat, shutil
+
+ #print "copyfile("+src+","+dest+","+str(newmtime)+","+str(sstat)+")"
+ try:
+ if not sstat:
+ sstat=os.lstat(src)
+ except Exception, e:
+ print "copyfile: Stating source file failed...", e
+ return False
+
+ destexists=1
+ try:
+ dstat=os.lstat(dest)
+ except:
+ dstat=os.lstat(os.path.dirname(dest))
+ destexists=0
+
+ if destexists:
+ if stat.S_ISLNK(dstat[stat.ST_MODE]):
+ try:
+ os.unlink(dest)
+ destexists=0
+ except Exception, e:
+ pass
+
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ try:
+ target=os.readlink(src)
+ if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+ os.unlink(dest)
+ os.symlink(target,dest)
+ #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ return os.lstat(dest)
+ except Exception, e:
+ print "copyfile: failed to properly create symlink:", dest, "->", target, e
+ return False
+
+ if stat.S_ISREG(sstat[stat.ST_MODE]):
+ try: # For safety copy then move it over.
+ shutil.copyfile(src,dest+"#new")
+ os.rename(dest+"#new",dest)
+ except Exception, e:
+ print 'copyfile: copy', src, '->', dest, 'failed.', e
+ return False
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ a=getstatusoutput("/bin/cp -f "+"'"+src+"' '"+dest+"'")
+ if a[0]!=0:
+ print "copyfile: Failed to copy special file:" + src + "' to '" + dest + "'", a
+ return False # failure
+ try:
+ os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
+ except Exception, e:
+ print "copyfile: Failed to chown/chmod/unlink", dest, e
+ return False
+
+ if newmtime:
+ os.utime(dest,(newmtime,newmtime))
+ else:
+ os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
+ newmtime=sstat[stat.ST_MTIME]
+ return newmtime
+
+#######################################################################
+#######################################################################
+#
+# SECTION: Download
+#
+# PURPOSE: Download via HTTP, FTP, CVS, BITKEEPER, handling of MD5-signatures
+# and mirrors
+#
+#######################################################################
+#######################################################################
+
+def decodeurl(url):
+ """Decodes an URL into the tokens (scheme, network location, path,
+ user, password, parameters).
+
+ >>> decodeurl("http://www.google.com/index.html")
+ ('http', 'www.google.com', '/index.html', '', '', {})
+
+ CVS url with username, host and cvsroot. The cvs module to check out is in the
+ parameters:
+
+ >>> decodeurl("cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg")
+ ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'})
+
+ Dito, but this time the username has a password part. And we also request a special tag
+ to check out.
+
+ >>> decodeurl("cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;module=familiar/dist/ipkg;tag=V0-99-81")
+ ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', {'tag': 'V0-99-81', 'module': 'familiar/dist/ipkg'})
+ """
+
+ m = re.compile('(?P<type>[^:]*)://((?P<user>.+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
+ if not m:
+ raise MalformedUrl(url)
+
+ type = m.group('type')
+ location = m.group('location')
+ if not location:
+ raise MalformedUrl(url)
+ user = m.group('user')
+ parm = m.group('parm')
+
+ locidx = location.find('/')
+ if locidx != -1:
+ host = location[:locidx]
+ path = location[locidx:]
+ else:
+ host = ""
+ path = location
+ if user:
+ m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user)
+ if m:
+ user = m.group('user')
+ pswd = m.group('pswd')
+ else:
+ user = ''
+ pswd = ''
+
+ p = {}
+ if parm:
+ for s in parm.split(';'):
+ s1,s2 = s.split('=')
+ p[s1] = s2
+
+ return (type, host, path, user, pswd, p)
+
+#######################################################################
+
+def encodeurl(decoded):
+ """Encodes a URL from tokens (scheme, network location, path,
+ user, password, parameters).
+
+ >>> encodeurl(['http', 'www.google.com', '/index.html', '', '', {}])
+ 'http://www.google.com/index.html'
+
+ CVS with username, host and cvsroot. The cvs module to check out is in the
+ parameters:
+
+ >>> encodeurl(['cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'}])
+ 'cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg'
+
+ Dito, but this time the username has a password part. And we also request a special tag
+ to check out.
+
+ >>> encodeurl(['cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', {'tag': 'V0-99-81', 'module': 'familiar/dist/ipkg'}])
+ 'cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg'
+ """
+
+ (type, host, path, user, pswd, p) = decoded
+
+ if not type or not path:
+ fatal("invalid or missing parameters for url encoding")
+ url = '%s://' % type
+ if user:
+ url += "%s" % user
+ if pswd:
+ url += ":%s" % pswd
+ url += "@"
+ if host:
+ url += "%s" % host
+ url += "%s" % path
+ if p:
+ for parm in p.keys():
+ url += ";%s=%s" % (parm, p[parm])
+
+ return url
+
+#######################################################################
+
+def which(path, item, direction = 0):
+ """
+ Locate a file in a PATH
+ """
+
+ paths = (path or "").split(':')
+ if direction != 0:
+ paths.reverse()
+
+ for p in (path or "").split(':'):
+ next = os.path.join(p, item)
+ if os.path.exists(next):
+ return next
+
+ return ""
+
+#######################################################################
+
+
+
+
+#######################################################################
+#######################################################################
+#
+# SECTION: Dependency
+#
+# PURPOSE: Compare build & run dependencies
+#
+#######################################################################
+#######################################################################
+
+def tokenize(mystring):
+ """Breaks a string like 'foo? (bar) oni? (blah (blah))' into (possibly embedded) lists:
+
+ >>> tokenize("x")
+ ['x']
+ >>> tokenize("x y")
+ ['x', 'y']
+ >>> tokenize("(x y)")
+ [['x', 'y']]
+ >>> tokenize("(x y) b c")
+ [['x', 'y'], 'b', 'c']
+ >>> tokenize("foo? (bar) oni? (blah (blah))")
+ ['foo?', ['bar'], 'oni?', ['blah', ['blah']]]
+ >>> tokenize("sys-apps/linux-headers nls? (sys-devel/gettext)")
+ ['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']]
+ """
+
+ newtokens = []
+ curlist = newtokens
+ prevlists = []
+ level = 0
+ accum = ""
+ for x in mystring:
+ if x=="(":
+ if accum:
+ curlist.append(accum)
+ accum=""
+ prevlists.append(curlist)
+ curlist=[]
+ level=level+1
+ elif x==")":
+ if accum:
+ curlist.append(accum)
+ accum=""
+ if level==0:
+ print "!!! tokenizer: Unmatched left parenthesis in:\n'"+mystring+"'"
+ return None
+ newlist=curlist
+ curlist=prevlists.pop()
+ curlist.append(newlist)
+ level=level-1
+ elif x in whitespace:
+ if accum:
+ curlist.append(accum)
+ accum=""
+ else:
+ accum=accum+x
+ if accum:
+ curlist.append(accum)
+ if (level!=0):
+ print "!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+mystring+"'"
+ return None
+ return newtokens
+
+
+#######################################################################
+
+def evaluate(tokens,mydefines,allon=0):
+ """Removes tokens based on whether conditional definitions exist or not.
+ Recognizes !
+
+ >>> evaluate(['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']], {})
+ ['sys-apps/linux-headers']
+
+ Negate the flag:
+
+ >>> evaluate(['sys-apps/linux-headers', '!nls?', ['sys-devel/gettext']], {})
+ ['sys-apps/linux-headers', ['sys-devel/gettext']]
+
+ Define 'nls':
+
+ >>> evaluate(['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']], {"nls":1})
+ ['sys-apps/linux-headers', ['sys-devel/gettext']]
+
+ Turn allon on:
+
+ >>> evaluate(['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']], {}, True)
+ ['sys-apps/linux-headers', ['sys-devel/gettext']]
+ """
+
+ if tokens == None:
+ return None
+ mytokens = tokens + [] # this copies the list
+ pos = 0
+ while pos < len(mytokens):
+ if type(mytokens[pos]) == types.ListType:
+ evaluate(mytokens[pos], mydefines)
+ if not len(mytokens[pos]):
+ del mytokens[pos]
+ continue
+ elif mytokens[pos][-1] == "?":
+ cur = mytokens[pos][:-1]
+ del mytokens[pos]
+ if allon:
+ if cur[0] == "!":
+ del mytokens[pos]
+ else:
+ if cur[0] == "!":
+ if (cur[1:] in mydefines) and (pos < len(mytokens)):
+ del mytokens[pos]
+ continue
+ elif (cur not in mydefines) and (pos < len(mytokens)):
+ del mytokens[pos]
+ continue
+ pos = pos + 1
+ return mytokens
+
+
+#######################################################################
+
+def flatten(mytokens):
+ """Converts nested arrays into a flat arrays:
+
+ >>> flatten([1,[2,3]])
+ [1, 2, 3]
+ >>> flatten(['sys-apps/linux-headers', ['sys-devel/gettext']])
+ ['sys-apps/linux-headers', 'sys-devel/gettext']
+ """
+
+ newlist=[]
+ for x in mytokens:
+ if type(x)==types.ListType:
+ newlist.extend(flatten(x))
+ else:
+ newlist.append(x)
+ return newlist
+
+
+#######################################################################
+
+_package_weights_ = {"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1} # dicts are unordered
+_package_ends_ = ["pre", "p", "alpha", "beta", "rc", "cvs", "bk", "HEAD" ] # so we need ordered list
+
+def relparse(myver):
+ """Parses the last elements of a version number into a triplet, that can
+ later be compared:
+
+ >>> relparse('1.2_pre3')
+ [1.2, -2, 3.0]
+ >>> relparse('1.2b')
+ [1.2, 98, 0]
+ >>> relparse('1.2')
+ [1.2, 0, 0]
+ """
+
+ number = 0
+ p1 = 0
+ p2 = 0
+ mynewver = myver.split('_')
+ if len(mynewver)==2:
+ # an _package_weights_
+ number = float(mynewver[0])
+ match = 0
+ for x in _package_ends_:
+ elen = len(x)
+ if mynewver[1][:elen] == x:
+ match = 1
+ p1 = _package_weights_[x]
+ try:
+ p2 = float(mynewver[1][elen:])
+ except:
+ p2 = 0
+ break
+ if not match:
+ # normal number or number with letter at end
+ divider = len(myver)-1
+ if myver[divider:] not in "1234567890":
+ # letter at end
+ p1 = ord(myver[divider:])
+ number = float(myver[0:divider])
+ else:
+ number = float(myver)
+ else:
+ # normal number or number with letter at end
+ divider = len(myver)-1
+ if myver[divider:] not in "1234567890":
+ #letter at end
+ p1 = ord(myver[divider:])
+ number = float(myver[0:divider])
+ else:
+ number = float(myver)
+ return [number,p1,p2]
+
+
+#######################################################################
+
+__ververify_cache__ = {}
+
+def ververify(myorigval,silent=1):
+ """Returns 1 if given a valid version string, els 0. Valid versions are in the format
+
+ <v1>.<v2>...<vx>[a-z,_{_package_weights_}[vy]]
+
+ >>> ververify('2.4.20')
+ 1
+ >>> ververify('2.4..20') # two dots
+ 0
+ >>> ververify('2.x.20') # 'x' is not numeric
+ 0
+ >>> ververify('2.4.20a')
+ 1
+ >>> ververify('2.4.20cvs') # only one trailing letter
+ 0
+ >>> ververify('1a')
+ 1
+ >>> ververify('test_a') # no version at all
+ 0
+ >>> ververify('2.4.20_beta1')
+ 1
+ >>> ververify('2.4.20_beta')
+ 1
+ >>> ververify('2.4.20_wrongext') # _wrongext is no valid trailer
+ 0
+ """
+
+ # Lookup the cache first
+ try:
+ return __ververify_cache__[myorigval]
+ except KeyError:
+ pass
+
+ if len(myorigval) == 0:
+ if not silent:
+ error("package version is empty")
+ __ververify_cache__[myorigval] = 0
+ return 0
+ myval = myorigval.split('.')
+ if len(myval)==0:
+ if not silent:
+ error("package name has empty version string")
+ __ververify_cache__[myorigval] = 0
+ return 0
+ # all but the last version must be a numeric
+ for x in myval[:-1]:
+ if not len(x):
+ if not silent:
+ error("package version has two points in a row")
+ __ververify_cache__[myorigval] = 0
+ return 0
+ try:
+ foo = int(x)
+ except:
+ if not silent:
+ error("package version contains non-numeric '"+x+"'")
+ __ververify_cache__[myorigval] = 0
+ return 0
+ if not len(myval[-1]):
+ if not silent:
+ error("package version has trailing dot")
+ __ververify_cache__[myorigval] = 0
+ return 0
+ try:
+ foo = int(myval[-1])
+ __ververify_cache__[myorigval] = 1
+ return 1
+ except:
+ pass
+
+ # ok, our last component is not a plain number or blank, let's continue
+ if myval[-1][-1] in lowercase:
+ try:
+ foo = int(myval[-1][:-1])
+ return 1
+ __ververify_cache__[myorigval] = 1
+ # 1a, 2.0b, etc.
+ except:
+ pass
+ # ok, maybe we have a 1_alpha or 1_beta2; let's see
+ ep=string.split(myval[-1],"_")
+ if len(ep)!= 2:
+ if not silent:
+ error("package version has more than one letter at then end")
+ __ververify_cache__[myorigval] = 0
+ return 0
+ try:
+ foo = string.atoi(ep[0])
+ except:
+ # this needs to be numeric, i.e. the "1" in "1_alpha"
+ if not silent:
+ error("package version must have numeric part before the '_'")
+ __ververify_cache__[myorigval] = 0
+ return 0
+
+ for mye in _package_ends_:
+ if ep[1][0:len(mye)] == mye:
+ if len(mye) == len(ep[1]):
+ # no trailing numeric is ok
+ __ververify_cache__[myorigval] = 1
+ return 1
+ else:
+ try:
+ foo = string.atoi(ep[1][len(mye):])
+ __ververify_cache__[myorigval] = 1
+ return 1
+ except:
+ # if no _package_weights_ work, *then* we return 0
+ pass
+ if not silent:
+ error("package version extension after '_' is invalid")
+ __ververify_cache__[myorigval] = 0
+ return 0
+
+
+def isjustname(mypkg):
+ myparts = string.split(mypkg,'-')
+ for x in myparts:
+ if ververify(x):
+ return 0
+ return 1
+
+
+_isspecific_cache_={}
+
+def isspecific(mypkg):
+ "now supports packages with no category"
+ try:
+ return __isspecific_cache__[mypkg]
+ except:
+ pass
+
+ mysplit = string.split(mypkg,"/")
+ if not isjustname(mysplit[-1]):
+ __isspecific_cache__[mypkg] = 1
+ return 1
+ __isspecific_cache__[mypkg] = 0
+ return 0
+
+
+#######################################################################
+
+__pkgsplit_cache__={}
+
+def pkgsplit(mypkg, silent=1):
+
+ """This function can be used as a package verification function. If
+ it is a valid name, pkgsplit will return a list containing:
+ [pkgname, pkgversion(norev), pkgrev ].
+
+ >>> pkgsplit('')
+ >>> pkgsplit('x')
+ >>> pkgsplit('x-')
+ >>> pkgsplit('-1')
+ >>> pkgsplit('glibc-1.2-8.9-r7')
+ >>> pkgsplit('glibc-2.2.5-r7')
+ ['glibc', '2.2.5', 'r7']
+ >>> pkgsplit('foo-1.2-1')
+ >>> pkgsplit('Mesa-3.0')
+ ['Mesa', '3.0', 'r0']
+ """
+
+ try:
+ return __pkgsplit_cache__[mypkg]
+ except KeyError:
+ pass
+
+ myparts = string.split(mypkg,'-')
+ if len(myparts) < 2:
+ if not silent:
+ error("package name without name or version part")
+ __pkgsplit_cache__[mypkg] = None
+ return None
+ for x in myparts:
+ if len(x) == 0:
+ if not silent:
+ error("package name with empty name or version part")
+ __pkgsplit_cache__[mypkg] = None
+ return None
+ # verify rev
+ revok = 0
+ myrev = myparts[-1]
+ ververify(myrev, silent)
+ if len(myrev) and myrev[0] == "r":
+ try:
+ string.atoi(myrev[1:])
+ revok = 1
+ except:
+ pass
+ if revok:
+ if ververify(myparts[-2]):
+ if len(myparts) == 2:
+ __pkgsplit_cache__[mypkg] = None
+ return None
+ else:
+ for x in myparts[:-2]:
+ if ververify(x):
+ __pkgsplit_cache__[mypkg]=None
+ return None
+ # names can't have versiony looking parts
+ myval=[string.join(myparts[:-2],"-"),myparts[-2],myparts[-1]]
+ __pkgsplit_cache__[mypkg]=myval
+ return myval
+ else:
+ __pkgsplit_cache__[mypkg] = None
+ return None
+
+ elif ververify(myparts[-1],silent):
+ if len(myparts)==1:
+ if not silent:
+ print "!!! Name error in",mypkg+": missing name part."
+ __pkgsplit_cache__[mypkg]=None
+ return None
+ else:
+ for x in myparts[:-1]:
+ if ververify(x):
+ if not silent: error("package name has multiple version parts")
+ __pkgsplit_cache__[mypkg] = None
+ return None
+ myval = [string.join(myparts[:-1],"-"), myparts[-1],"r0"]
+ __pkgsplit_cache__[mypkg] = myval
+ return myval
+ else:
+ __pkgsplit_cache__[mypkg] = None
+ return None
+
+
+#######################################################################
+
+__catpkgsplit_cache__ = {}
+
+def catpkgsplit(mydata,silent=1):
+ """returns [cat, pkgname, version, rev ]
+
+ >>> catpkgsplit('sys-libs/glibc-1.2-r7')
+ ['sys-libs', 'glibc', '1.2', 'r7']
+ >>> catpkgsplit('glibc-1.2-r7')
+ [None, 'glibc', '1.2', 'r7']
+ """
+
+ try:
+ return __catpkgsplit_cache__[mydata]
+ except KeyError:
+ pass
+
+ cat = os.path.basename(os.path.dirname(mydata))
+ mydata = os.path.join(cat, os.path.basename(mydata))
+ if mydata[-3:] == '.bb':
+ mydata = mydata[:-3]
+
+ mysplit = mydata.split("/")
+ p_split = None
+ splitlen = len(mysplit)
+ if splitlen == 1:
+ retval = [None]
+ p_split = pkgsplit(mydata,silent)
+ else:
+ retval = [mysplit[splitlen - 2]]
+ p_split = pkgsplit(mysplit[splitlen - 1],silent)
+ if not p_split:
+ __catpkgsplit_cache__[mydata] = None
+ return None
+ retval.extend(p_split)
+ __catpkgsplit_cache__[mydata] = retval
+ return retval
+
+
+#######################################################################
+
+__vercmp_cache__ = {}
+
+def vercmp(val1,val2):
+ """This takes two version strings and returns an integer to tell you whether
+ the versions are the same, val1>val2 or val2>val1.
+
+ >>> vercmp('1', '2')
+ -1.0
+ >>> vercmp('2', '1')
+ 1.0
+ >>> vercmp('1', '1.0')
+ 0
+ >>> vercmp('1', '1.1')
+ -1.0
+ >>> vercmp('1.1', '1_p2')
+ 1.0
+ """
+
+ # quick short-circuit
+ if val1 == val2:
+ return 0
+ valkey = val1+" "+val2
+
+ # cache lookup
+ try:
+ return __vercmp_cache__[valkey]
+ try:
+ return - __vercmp_cache__[val2+" "+val1]
+ except KeyError:
+ pass
+ except KeyError:
+ pass
+
+ # consider 1_p2 vc 1.1
+ # after expansion will become (1_p2,0) vc (1,1)
+ # then 1_p2 is compared with 1 before 0 is compared with 1
+ # to solve the bug we need to convert it to (1,0_p2)
+ # by splitting _prepart part and adding it back _after_expansion
+
+ val1_prepart = val2_prepart = ''
+ if val1.count('_'):
+ val1, val1_prepart = val1.split('_', 1)
+ if val2.count('_'):
+ val2, val2_prepart = val2.split('_', 1)
+
+ # replace '-' by '.'
+ # FIXME: Is it needed? can val1/2 contain '-'?
+
+ val1 = string.split(val1,'-')
+ if len(val1) == 2:
+ val1[0] = val1[0] +"."+ val1[1]
+ val2 = string.split(val2,'-')
+ if len(val2) == 2:
+ val2[0] = val2[0] +"."+ val2[1]
+
+ val1 = string.split(val1[0],'.')
+ val2 = string.split(val2[0],'.')
+
+ # add back decimal point so that .03 does not become "3" !
+ for x in range(1,len(val1)):
+ if val1[x][0] == '0' :
+ val1[x] = '.' + val1[x]
+ for x in range(1,len(val2)):
+ if val2[x][0] == '0' :
+ val2[x] = '.' + val2[x]
+
+ # extend varion numbers
+ if len(val2) < len(val1):
+ val2.extend(["0"]*(len(val1)-len(val2)))
+ elif len(val1) < len(val2):
+ val1.extend(["0"]*(len(val2)-len(val1)))
+
+ # add back _prepart tails
+ if val1_prepart:
+ val1[-1] += '_' + val1_prepart
+ if val2_prepart:
+ val2[-1] += '_' + val2_prepart
+ # The above code will extend version numbers out so they
+ # have the same number of digits.
+ for x in range(0,len(val1)):
+ cmp1 = relparse(val1[x])
+ cmp2 = relparse(val2[x])
+ for y in range(0,3):
+ myret = cmp1[y] - cmp2[y]
+ if myret != 0:
+ __vercmp_cache__[valkey] = myret
+ return myret
+ __vercmp_cache__[valkey] = 0
+ return 0
+
+
+#######################################################################
+
+def pkgcmp(pkg1,pkg2):
+ """ Compares two packages, which should have been split via
+ pkgsplit(). if the return value val is less than zero, then pkg2 is
+ newer than pkg1, zero if equal and positive if older.
+
+ >>> pkgcmp(['glibc', '2.2.5', 'r7'], ['glibc', '2.2.5', 'r7'])
+ 0
+ >>> pkgcmp(['glibc', '2.2.5', 'r4'], ['glibc', '2.2.5', 'r7'])
+ -1
+ >>> pkgcmp(['glibc', '2.2.5', 'r7'], ['glibc', '2.2.5', 'r2'])
+ 1
+ """
+
+ mycmp = vercmp(pkg1[1],pkg2[1])
+ if mycmp > 0:
+ return 1
+ if mycmp < 0:
+ return -1
+ r1=string.atoi(pkg1[2][1:])
+ r2=string.atoi(pkg2[2][1:])
+ if r1 > r2:
+ return 1
+ if r2 > r1:
+ return -1
+ return 0
+
+
+#######################################################################
+
+def dep_parenreduce(mysplit, mypos=0):
+ """Accepts a list of strings, and converts '(' and ')' surrounded items to sub-lists:
+
+ >>> dep_parenreduce([''])
+ ['']
+ >>> dep_parenreduce(['1', '2', '3'])
+ ['1', '2', '3']
+ >>> dep_parenreduce(['1', '(', '2', '3', ')', '4'])
+ ['1', ['2', '3'], '4']
+ """
+
+ while mypos < len(mysplit):
+ if mysplit[mypos] == "(":
+ firstpos = mypos
+ mypos = mypos + 1
+ while mypos < len(mysplit):
+ if mysplit[mypos] == ")":
+ mysplit[firstpos:mypos+1] = [mysplit[firstpos+1:mypos]]
+ mypos = firstpos
+ break
+ elif mysplit[mypos] == "(":
+ # recurse
+ mysplit = dep_parenreduce(mysplit,mypos)
+ mypos = mypos + 1
+ mypos = mypos + 1
+ return mysplit
+
+
+def dep_opconvert(mysplit, myuse):
+ "Does dependency operator conversion"
+
+ mypos = 0
+ newsplit = []
+ while mypos < len(mysplit):
+ if type(mysplit[mypos]) == types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos],myuse))
+ mypos += 1
+ elif mysplit[mypos] == ")":
+ # mismatched paren, error
+ return None
+ elif mysplit[mypos]=="||":
+ if ((mypos+1)>=len(mysplit)) or (type(mysplit[mypos+1])!=types.ListType):
+ # || must be followed by paren'd list
+ return None
+ try:
+ mynew = dep_opconvert(mysplit[mypos+1],myuse)
+ except Exception, e:
+ error("unable to satisfy OR dependancy: " + string.join(mysplit," || "))
+ raise e
+ mynew[0:0] = ["||"]
+ newsplit.append(mynew)
+ mypos += 2
+ elif mysplit[mypos][-1] == "?":
+ # use clause, i.e "gnome? ( foo bar )"
+ # this is a quick and dirty hack so that repoman can enable all USE vars:
+ if (len(myuse) == 1) and (myuse[0] == "*"):
+ # enable it even if it's ! (for repoman) but kill it if it's
+ # an arch variable that isn't for this arch. XXX Sparc64?
+ if (mysplit[mypos][:-1] not in settings.usemask) or \
+ (mysplit[mypos][:-1]==settings["ARCH"]):
+ enabled=1
+ else:
+ enabled=0
+ else:
+ if mysplit[mypos][0] == "!":
+ myusevar = mysplit[mypos][1:-1]
+ enabled = not myusevar in myuse
+ #if myusevar in myuse:
+ # enabled = 0
+ #else:
+ # enabled = 1
+ else:
+ myusevar=mysplit[mypos][:-1]
+ enabled = myusevar in myuse
+ #if myusevar in myuse:
+ # enabled=1
+ #else:
+ # enabled=0
+ if (mypos +2 < len(mysplit)) and (mysplit[mypos+2] == ":"):
+ # colon mode
+ if enabled:
+ # choose the first option
+ if type(mysplit[mypos+1]) == types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos+1],myuse))
+ else:
+ newsplit.append(mysplit[mypos+1])
+ else:
+ # choose the alternate option
+ if type(mysplit[mypos+1]) == types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos+3],myuse))
+ else:
+ newsplit.append(mysplit[mypos+3])
+ mypos += 4
+ else:
+ # normal use mode
+ if enabled:
+ if type(mysplit[mypos+1]) == types.ListType:
+ newsplit.append(dep_opconvert(mysplit[mypos+1],myuse))
+ else:
+ newsplit.append(mysplit[mypos+1])
+ # otherwise, continue
+ mypos += 2
+ else:
+ # normal item
+ newsplit.append(mysplit[mypos])
+ mypos += 1
+ return newsplit
+
+if __name__ == "__main__":
+ import doctest, bb
+ doctest.testmod(bb)
diff --git a/bitbake-dev/lib/bb/build.py b/bitbake-dev/lib/bb/build.py
new file mode 100644
index 0000000000..ca7cfbc6bb
--- /dev/null
+++ b/bitbake-dev/lib/bb/build.py
@@ -0,0 +1,377 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake 'Build' implementation
+#
+# Core code for function execution and task handling in the
+# BitBake build tools.
+#
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# Based on Gentoo's portage.py.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+from bb import data, event, mkdirhier, utils
+import bb, os, sys
+
+# events
+class FuncFailed(Exception):
+ """
+ Executed function failed
+ First parameter a message
+ Second paramter is a logfile (optional)
+ """
+
+class EventException(Exception):
+ """Exception which is associated with an Event."""
+
+ def __init__(self, msg, event):
+ self.args = msg, event
+
+class TaskBase(event.Event):
+ """Base class for task events"""
+
+ def __init__(self, t, d ):
+ self._task = t
+ self._package = bb.data.getVar("PF", d, 1)
+ event.Event.__init__(self, d)
+ self._message = "package %s: task %s: %s" % (bb.data.getVar("PF", d, 1), t, bb.event.getName(self)[4:])
+
+ def getTask(self):
+ return self._task
+
+ def setTask(self, task):
+ self._task = task
+
+ task = property(getTask, setTask, None, "task property")
+
+class TaskStarted(TaskBase):
+ """Task execution started"""
+
+class TaskSucceeded(TaskBase):
+ """Task execution completed"""
+
+class TaskFailed(TaskBase):
+ """Task execution failed"""
+ def __init__(self, msg, logfile, t, d ):
+ self.logfile = logfile
+ self.msg = msg
+ TaskBase.__init__(self, t, d)
+
+class InvalidTask(TaskBase):
+ """Invalid Task"""
+
+# functions
+
+def exec_func(func, d, dirs = None):
+ """Execute an BB 'function'"""
+
+ body = data.getVar(func, d)
+ if not body:
+ return
+
+ flags = data.getVarFlags(func, d)
+ for item in ['deps', 'check', 'interactive', 'python', 'cleandirs', 'dirs', 'lockfiles', 'fakeroot']:
+ if not item in flags:
+ flags[item] = None
+
+ ispython = flags['python']
+
+ cleandirs = (data.expand(flags['cleandirs'], d) or "").split()
+ for cdir in cleandirs:
+ os.system("rm -rf %s" % cdir)
+
+ if dirs:
+ dirs = data.expand(dirs, d)
+ else:
+ dirs = (data.expand(flags['dirs'], d) or "").split()
+ for adir in dirs:
+ mkdirhier(adir)
+
+ if len(dirs) > 0:
+ adir = dirs[-1]
+ else:
+ adir = data.getVar('B', d, 1)
+
+ # Save current directory
+ try:
+ prevdir = os.getcwd()
+ except OSError:
+ prevdir = data.getVar('TOPDIR', d, True)
+
+ # Setup logfiles
+ t = data.getVar('T', d, 1)
+ if not t:
+ bb.msg.fatal(bb.msg.domain.Build, "T not set")
+ mkdirhier(t)
+ # Gross hack, FIXME
+ import random
+ logfile = "%s/log.%s.%s.%s" % (t, func, str(os.getpid()),random.random())
+ runfile = "%s/run.%s.%s" % (t, func, str(os.getpid()))
+
+ # Change to correct directory (if specified)
+ if adir and os.access(adir, os.F_OK):
+ os.chdir(adir)
+
+ # Handle logfiles
+ si = file('/dev/null', 'r')
+ try:
+ if bb.msg.debug_level['default'] > 0 or ispython:
+ so = os.popen("tee \"%s\"" % logfile, "w")
+ else:
+ so = file(logfile, 'w')
+ except OSError, e:
+ bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e)
+ pass
+
+ se = so
+
+ # Dup the existing fds so we dont lose them
+ osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
+ oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
+ ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
+
+ # Replace those fds with our own
+ os.dup2(si.fileno(), osi[1])
+ os.dup2(so.fileno(), oso[1])
+ os.dup2(se.fileno(), ose[1])
+
+ locks = []
+ lockfiles = (data.expand(flags['lockfiles'], d) or "").split()
+ for lock in lockfiles:
+ locks.append(bb.utils.lockfile(lock))
+
+ try:
+ # Run the function
+ if ispython:
+ exec_func_python(func, d, runfile, logfile)
+ else:
+ exec_func_shell(func, d, runfile, logfile, flags)
+
+ # Restore original directory
+ try:
+ os.chdir(prevdir)
+ except:
+ pass
+
+ finally:
+
+ # Unlock any lockfiles
+ for lock in locks:
+ bb.utils.unlockfile(lock)
+
+ # Restore the backup fds
+ os.dup2(osi[0], osi[1])
+ os.dup2(oso[0], oso[1])
+ os.dup2(ose[0], ose[1])
+
+ # Close our logs
+ si.close()
+ so.close()
+ se.close()
+
+ # Close the backup fds
+ os.close(osi[0])
+ os.close(oso[0])
+ os.close(ose[0])
+
+def exec_func_python(func, d, runfile, logfile):
+ """Execute a python BB 'function'"""
+ import re, os
+
+ bbfile = bb.data.getVar('FILE', d, 1)
+ tmp = "def " + func + "():\n%s" % data.getVar(func, d)
+ tmp += '\n' + func + '()'
+
+ f = open(runfile, "w")
+ f.write(tmp)
+ comp = utils.better_compile(tmp, func, bbfile)
+ g = {} # globals
+ g['bb'] = bb
+ g['os'] = os
+ g['d'] = d
+ utils.better_exec(comp, g, tmp, bbfile)
+
+
+def exec_func_shell(func, d, runfile, logfile, flags):
+ """Execute a shell BB 'function' Returns true if execution was successful.
+
+ For this, it creates a bash shell script in the tmp dectory, writes the local
+ data into it and finally executes. The output of the shell will end in a log file and stdout.
+
+ Note on directory behavior. The 'dirs' varflag should contain a list
+ of the directories you need created prior to execution. The last
+ item in the list is where we will chdir/cd to.
+ """
+
+ deps = flags['deps']
+ check = flags['check']
+ if check in globals():
+ if globals()[check](func, deps):
+ return
+
+ f = open(runfile, "w")
+ f.write("#!/bin/sh -e\n")
+ if bb.msg.debug_level['default'] > 0: f.write("set -x\n")
+ data.emit_env(f, d)
+
+ f.write("cd %s\n" % os.getcwd())
+ if func: f.write("%s\n" % func)
+ f.close()
+ os.chmod(runfile, 0775)
+ if not func:
+ bb.msg.error(bb.msg.domain.Build, "Function not specified")
+ raise FuncFailed("Function not specified for exec_func_shell")
+
+ # execute function
+ if flags['fakeroot']:
+ maybe_fakeroot = "PATH=\"%s\" fakeroot " % bb.data.getVar("PATH", d, 1)
+ else:
+ maybe_fakeroot = ''
+ lang_environment = "LC_ALL=C "
+ ret = os.system('%s%ssh -e %s' % (lang_environment, maybe_fakeroot, runfile))
+
+ if ret == 0:
+ return
+
+ bb.msg.error(bb.msg.domain.Build, "Function %s failed" % func)
+ raise FuncFailed("function %s failed" % func, logfile)
+
+
+def exec_task(task, d):
+ """Execute an BB 'task'
+
+ The primary difference between executing a task versus executing
+ a function is that a task exists in the task digraph, and therefore
+ has dependencies amongst other tasks."""
+
+ # Check whther this is a valid task
+ if not data.getVarFlag(task, 'task', d):
+ raise EventException("No such task", InvalidTask(task, d))
+
+ try:
+ bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % task)
+ old_overrides = data.getVar('OVERRIDES', d, 0)
+ localdata = data.createCopy(d)
+ data.setVar('OVERRIDES', 'task_%s:%s' % (task, old_overrides), localdata)
+ data.update_data(localdata)
+ event.fire(TaskStarted(task, localdata))
+ exec_func(task, localdata)
+ event.fire(TaskSucceeded(task, localdata))
+ except FuncFailed, message:
+ # Try to extract the optional logfile
+ try:
+ (msg, logfile) = message
+ except:
+ logfile = None
+ msg = message
+ bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % message )
+ failedevent = TaskFailed(msg, logfile, task, d)
+ event.fire(failedevent)
+ raise EventException("Function failed in task: %s" % message, failedevent)
+
+ # make stamp, or cause event and raise exception
+ if not data.getVarFlag(task, 'nostamp', d) and not data.getVarFlag(task, 'selfstamp', d):
+ make_stamp(task, d)
+
+def extract_stamp(d, fn):
+ """
+ Extracts stamp format which is either a data dictonary (fn unset)
+ or a dataCache entry (fn set).
+ """
+ if fn:
+ return d.stamp[fn]
+ return data.getVar('STAMP', d, 1)
+
+def stamp_internal(task, d, file_name):
+ """
+ Internal stamp helper function
+ Removes any stamp for the given task
+ Makes sure the stamp directory exists
+ Returns the stamp path+filename
+ """
+ stamp = extract_stamp(d, file_name)
+ if not stamp:
+ return
+ stamp = "%s.%s" % (stamp, task)
+ mkdirhier(os.path.dirname(stamp))
+ # Remove the file and recreate to force timestamp
+ # change on broken NFS filesystems
+ if os.access(stamp, os.F_OK):
+ os.remove(stamp)
+ return stamp
+
+def make_stamp(task, d, file_name = None):
+ """
+ Creates/updates a stamp for a given task
+ (d can be a data dict or dataCache)
+ """
+ stamp = stamp_internal(task, d, file_name)
+ if stamp:
+ f = open(stamp, "w")
+ f.close()
+
+def del_stamp(task, d, file_name = None):
+ """
+ Removes a stamp for a given task
+ (d can be a data dict or dataCache)
+ """
+ stamp_internal(task, d, file_name)
+
+def add_tasks(tasklist, d):
+ task_deps = data.getVar('_task_deps', d)
+ if not task_deps:
+ task_deps = {}
+ if not 'tasks' in task_deps:
+ task_deps['tasks'] = []
+ if not 'parents' in task_deps:
+ task_deps['parents'] = {}
+
+ for task in tasklist:
+ task = data.expand(task, d)
+ data.setVarFlag(task, 'task', 1, d)
+
+ if not task in task_deps['tasks']:
+ task_deps['tasks'].append(task)
+
+ flags = data.getVarFlags(task, d)
+ def getTask(name):
+ if not name in task_deps:
+ task_deps[name] = {}
+ if name in flags:
+ deptask = data.expand(flags[name], d)
+ task_deps[name][task] = deptask
+ getTask('depends')
+ getTask('deptask')
+ getTask('rdeptask')
+ getTask('recrdeptask')
+ getTask('nostamp')
+ task_deps['parents'][task] = []
+ for dep in flags['deps']:
+ dep = data.expand(dep, d)
+ task_deps['parents'][task].append(dep)
+
+ # don't assume holding a reference
+ data.setVar('_task_deps', task_deps, d)
+
+def remove_task(task, kill, d):
+ """Remove an BB 'task'.
+
+ If kill is 1, also remove tasks that depend on this task."""
+
+ data.delVarFlag(task, 'task', d)
+
diff --git a/bitbake-dev/lib/bb/cache.py b/bitbake-dev/lib/bb/cache.py
new file mode 100644
index 0000000000..bcf393a578
--- /dev/null
+++ b/bitbake-dev/lib/bb/cache.py
@@ -0,0 +1,465 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake 'Event' implementation
+#
+# Caching of bitbake variables before task execution
+
+# Copyright (C) 2006 Richard Purdie
+
+# but small sections based on code from bin/bitbake:
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
+# Copyright (C) 2005 Holger Hans Peter Freyther
+# Copyright (C) 2005 ROAD GmbH
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import os, re
+import bb.data
+import bb.utils
+from sets import Set
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+ bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.")
+
+__cache_version__ = "128"
+
+class Cache:
+ """
+ BitBake Cache implementation
+ """
+ def __init__(self, cooker):
+
+
+ self.cachedir = bb.data.getVar("CACHE", cooker.configuration.data, True)
+ self.clean = {}
+ self.checked = {}
+ self.depends_cache = {}
+ self.data = None
+ self.data_fn = None
+ self.cacheclean = True
+
+ if self.cachedir in [None, '']:
+ self.has_cache = False
+ bb.msg.note(1, bb.msg.domain.Cache, "Not using a cache. Set CACHE = <directory> to enable.")
+ else:
+ self.has_cache = True
+ self.cachefile = os.path.join(self.cachedir,"bb_cache.dat")
+
+ bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir)
+ try:
+ os.stat( self.cachedir )
+ except OSError:
+ bb.mkdirhier( self.cachedir )
+
+ if not self.has_cache:
+ return
+
+ # If any of configuration.data's dependencies are newer than the
+ # cache there isn't even any point in loading it...
+ newest_mtime = 0
+ deps = bb.data.getVar("__depends", cooker.configuration.data, True)
+ for f,old_mtime in deps:
+ if old_mtime > newest_mtime:
+ newest_mtime = old_mtime
+
+ if bb.parse.cached_mtime_noerror(self.cachefile) >= newest_mtime:
+ try:
+ p = pickle.Unpickler(file(self.cachefile, "rb"))
+ self.depends_cache, version_data = p.load()
+ if version_data['CACHE_VER'] != __cache_version__:
+ raise ValueError, 'Cache Version Mismatch'
+ if version_data['BITBAKE_VER'] != bb.__version__:
+ raise ValueError, 'Bitbake Version Mismatch'
+ except EOFError:
+ bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...")
+ self.depends_cache = {}
+ except:
+ bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...")
+ self.depends_cache = {}
+ else:
+ bb.msg.note(1, bb.msg.domain.Cache, "Out of date cache found, rebuilding...")
+
+ def getVar(self, var, fn, exp = 0):
+ """
+ Gets the value of a variable
+ (similar to getVar in the data class)
+
+ There are two scenarios:
+ 1. We have cached data - serve from depends_cache[fn]
+ 2. We're learning what data to cache - serve from data
+ backend but add a copy of the data to the cache.
+ """
+ if fn in self.clean:
+ return self.depends_cache[fn][var]
+
+ if not fn in self.depends_cache:
+ self.depends_cache[fn] = {}
+
+ if fn != self.data_fn:
+ # We're trying to access data in the cache which doesn't exist
+ # yet setData hasn't been called to setup the right access. Very bad.
+ bb.msg.error(bb.msg.domain.Cache, "Parsing error data_fn %s and fn %s don't match" % (self.data_fn, fn))
+
+ self.cacheclean = False
+ result = bb.data.getVar(var, self.data, exp)
+ self.depends_cache[fn][var] = result
+ return result
+
+ def setData(self, fn, data):
+ """
+ Called to prime bb_cache ready to learn which variables to cache.
+ Will be followed by calls to self.getVar which aren't cached
+ but can be fulfilled from self.data.
+ """
+ self.data_fn = fn
+ self.data = data
+
+ # Make sure __depends makes the depends_cache
+ self.getVar("__depends", fn, True)
+ self.depends_cache[fn]["CACHETIMESTAMP"] = bb.parse.cached_mtime(fn)
+
+ def loadDataFull(self, fn, cfgData):
+ """
+ Return a complete set of data for fn.
+ To do this, we need to parse the file.
+ """
+ bb.msg.debug(1, bb.msg.domain.Cache, "Parsing %s (full)" % fn)
+
+ bb_data, skipped = self.load_bbfile(fn, cfgData)
+ return bb_data
+
+ def loadData(self, fn, cfgData):
+ """
+ Load a subset of data for fn.
+ If the cached data is valid we do nothing,
+ To do this, we need to parse the file and set the system
+ to record the variables accessed.
+ Return the cache status and whether the file was skipped when parsed
+ """
+ if fn not in self.checked:
+ self.cacheValidUpdate(fn)
+ if self.cacheValid(fn):
+ if "SKIPPED" in self.depends_cache[fn]:
+ return True, True
+ return True, False
+
+ bb.msg.debug(1, bb.msg.domain.Cache, "Parsing %s" % fn)
+
+ bb_data, skipped = self.load_bbfile(fn, cfgData)
+ self.setData(fn, bb_data)
+ return False, skipped
+
+ def cacheValid(self, fn):
+ """
+ Is the cache valid for fn?
+ Fast version, no timestamps checked.
+ """
+ # Is cache enabled?
+ if not self.has_cache:
+ return False
+ if fn in self.clean:
+ return True
+ return False
+
+ def cacheValidUpdate(self, fn):
+ """
+ Is the cache valid for fn?
+ Make thorough (slower) checks including timestamps.
+ """
+ # Is cache enabled?
+ if not self.has_cache:
+ return False
+
+ self.checked[fn] = ""
+
+ # Pretend we're clean so getVar works
+ self.clean[fn] = ""
+
+ # File isn't in depends_cache
+ if not fn in self.depends_cache:
+ bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s is not cached" % fn)
+ self.remove(fn)
+ return False
+
+ mtime = bb.parse.cached_mtime_noerror(fn)
+
+ # Check file still exists
+ if mtime == 0:
+ bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s not longer exists" % fn)
+ self.remove(fn)
+ return False
+
+ # Check the file's timestamp
+ if mtime != self.getVar("CACHETIMESTAMP", fn, True):
+ bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s changed" % fn)
+ self.remove(fn)
+ return False
+
+ # Check dependencies are still valid
+ depends = self.getVar("__depends", fn, True)
+ if depends:
+ for f,old_mtime in depends:
+ fmtime = bb.parse.cached_mtime_noerror(f)
+ # Check if file still exists
+ if fmtime == 0:
+ self.remove(fn)
+ return False
+
+ if (fmtime != old_mtime):
+ bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s's dependency %s changed" % (fn, f))
+ self.remove(fn)
+ return False
+
+ #bb.msg.debug(2, bb.msg.domain.Cache, "Depends Cache: %s is clean" % fn)
+ if not fn in self.clean:
+ self.clean[fn] = ""
+
+ return True
+
+ def skip(self, fn):
+ """
+ Mark a fn as skipped
+ Called from the parser
+ """
+ if not fn in self.depends_cache:
+ self.depends_cache[fn] = {}
+ self.depends_cache[fn]["SKIPPED"] = "1"
+
+ def remove(self, fn):
+ """
+ Remove a fn from the cache
+ Called from the parser in error cases
+ """
+ bb.msg.debug(1, bb.msg.domain.Cache, "Removing %s from cache" % fn)
+ if fn in self.depends_cache:
+ del self.depends_cache[fn]
+ if fn in self.clean:
+ del self.clean[fn]
+
+ def sync(self):
+ """
+ Save the cache
+ Called from the parser when complete (or exiting)
+ """
+
+ if not self.has_cache:
+ return
+
+ if self.cacheclean:
+ bb.msg.note(1, bb.msg.domain.Cache, "Cache is clean, not saving.")
+ return
+
+ version_data = {}
+ version_data['CACHE_VER'] = __cache_version__
+ version_data['BITBAKE_VER'] = bb.__version__
+
+ p = pickle.Pickler(file(self.cachefile, "wb" ), -1 )
+ p.dump([self.depends_cache, version_data])
+
+ def mtime(self, cachefile):
+ return bb.parse.cached_mtime_noerror(cachefile)
+
+ def handle_data(self, file_name, cacheData):
+ """
+ Save data we need into the cache
+ """
+
+ pn = self.getVar('PN', file_name, True)
+ pe = self.getVar('PE', file_name, True) or "0"
+ pv = self.getVar('PV', file_name, True)
+ pr = self.getVar('PR', file_name, True)
+ dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0")
+ depends = bb.utils.explode_deps(self.getVar("DEPENDS", file_name, True) or "")
+ packages = (self.getVar('PACKAGES', file_name, True) or "").split()
+ packages_dynamic = (self.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split()
+ rprovides = (self.getVar("RPROVIDES", file_name, True) or "").split()
+
+ cacheData.task_deps[file_name] = self.getVar("_task_deps", file_name, True)
+
+ # build PackageName to FileName lookup table
+ if pn not in cacheData.pkg_pn:
+ cacheData.pkg_pn[pn] = []
+ cacheData.pkg_pn[pn].append(file_name)
+
+ cacheData.stamp[file_name] = self.getVar('STAMP', file_name, True)
+
+ # build FileName to PackageName lookup table
+ cacheData.pkg_fn[file_name] = pn
+ cacheData.pkg_pepvpr[file_name] = (pe,pv,pr)
+ cacheData.pkg_dp[file_name] = dp
+
+ provides = [pn]
+ for provide in (self.getVar("PROVIDES", file_name, True) or "").split():
+ if provide not in provides:
+ provides.append(provide)
+
+ # Build forward and reverse provider hashes
+ # Forward: virtual -> [filenames]
+ # Reverse: PN -> [virtuals]
+ if pn not in cacheData.pn_provides:
+ cacheData.pn_provides[pn] = []
+
+ cacheData.fn_provides[file_name] = provides
+ for provide in provides:
+ if provide not in cacheData.providers:
+ cacheData.providers[provide] = []
+ cacheData.providers[provide].append(file_name)
+ if not provide in cacheData.pn_provides[pn]:
+ cacheData.pn_provides[pn].append(provide)
+
+ cacheData.deps[file_name] = []
+ for dep in depends:
+ if not dep in cacheData.deps[file_name]:
+ cacheData.deps[file_name].append(dep)
+ if not dep in cacheData.all_depends:
+ cacheData.all_depends.append(dep)
+
+ # Build reverse hash for PACKAGES, so runtime dependencies
+ # can be be resolved (RDEPENDS, RRECOMMENDS etc.)
+ for package in packages:
+ if not package in cacheData.packages:
+ cacheData.packages[package] = []
+ cacheData.packages[package].append(file_name)
+ rprovides += (self.getVar("RPROVIDES_%s" % package, file_name, 1) or "").split()
+
+ for package in packages_dynamic:
+ if not package in cacheData.packages_dynamic:
+ cacheData.packages_dynamic[package] = []
+ cacheData.packages_dynamic[package].append(file_name)
+
+ for rprovide in rprovides:
+ if not rprovide in cacheData.rproviders:
+ cacheData.rproviders[rprovide] = []
+ cacheData.rproviders[rprovide].append(file_name)
+
+ # Build hash of runtime depends and rececommends
+
+ if not file_name in cacheData.rundeps:
+ cacheData.rundeps[file_name] = {}
+ if not file_name in cacheData.runrecs:
+ cacheData.runrecs[file_name] = {}
+
+ rdepends = self.getVar('RDEPENDS', file_name, True) or ""
+ rrecommends = self.getVar('RRECOMMENDS', file_name, True) or ""
+ for package in packages + [pn]:
+ if not package in cacheData.rundeps[file_name]:
+ cacheData.rundeps[file_name][package] = []
+ if not package in cacheData.runrecs[file_name]:
+ cacheData.runrecs[file_name][package] = []
+
+ cacheData.rundeps[file_name][package] = rdepends + " " + (self.getVar("RDEPENDS_%s" % package, file_name, True) or "")
+ cacheData.runrecs[file_name][package] = rrecommends + " " + (self.getVar("RRECOMMENDS_%s" % package, file_name, True) or "")
+
+ # Collect files we may need for possible world-dep
+ # calculations
+ if not self.getVar('BROKEN', file_name, True) and not self.getVar('EXCLUDE_FROM_WORLD', file_name, True):
+ cacheData.possible_world.append(file_name)
+
+
+ def load_bbfile( self, bbfile , config):
+ """
+ Load and parse one .bb build file
+ Return the data and whether parsing resulted in the file being skipped
+ """
+
+ import bb
+ from bb import utils, data, parse, debug, event, fatal
+
+ # expand tmpdir to include this topdir
+ data.setVar('TMPDIR', data.getVar('TMPDIR', config, 1) or "", config)
+ bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
+ oldpath = os.path.abspath(os.getcwd())
+ if bb.parse.cached_mtime_noerror(bbfile_loc):
+ os.chdir(bbfile_loc)
+ bb_data = data.init_db(config)
+ try:
+ bb_data = parse.handle(bbfile, bb_data) # read .bb data
+ os.chdir(oldpath)
+ return bb_data, False
+ except bb.parse.SkipPackage:
+ os.chdir(oldpath)
+ return bb_data, True
+ except:
+ os.chdir(oldpath)
+ raise
+
+def init(cooker):
+ """
+ The Objective: Cache the minimum amount of data possible yet get to the
+ stage of building packages (i.e. tryBuild) without reparsing any .bb files.
+
+ To do this, we intercept getVar calls and only cache the variables we see
+ being accessed. We rely on the cache getVar calls being made for all
+ variables bitbake might need to use to reach this stage. For each cached
+ file we need to track:
+
+ * Its mtime
+ * The mtimes of all its dependencies
+ * Whether it caused a parse.SkipPackage exception
+
+ Files causing parsing errors are evicted from the cache.
+
+ """
+ return Cache(cooker)
+
+
+
+#============================================================================#
+# CacheData
+#============================================================================#
+class CacheData:
+ """
+ The data structures we compile from the cached data
+ """
+
+ def __init__(self):
+ """
+ Direct cache variables
+ (from Cache.handle_data)
+ """
+ self.providers = {}
+ self.rproviders = {}
+ self.packages = {}
+ self.packages_dynamic = {}
+ self.possible_world = []
+ self.pkg_pn = {}
+ self.pkg_fn = {}
+ self.pkg_pepvpr = {}
+ self.pkg_dp = {}
+ self.pn_provides = {}
+ self.fn_provides = {}
+ self.all_depends = []
+ self.deps = {}
+ self.rundeps = {}
+ self.runrecs = {}
+ self.task_queues = {}
+ self.task_deps = {}
+ self.stamp = {}
+ self.preferred = {}
+
+ """
+ Indirect Cache variables
+ (set elsewhere)
+ """
+ self.ignored_dependencies = []
+ self.world_target = Set()
+ self.bbfile_priority = {}
+ self.bbfile_config_priorities = []
diff --git a/bitbake-dev/lib/bb/command.py b/bitbake-dev/lib/bb/command.py
new file mode 100644
index 0000000000..8384e89e55
--- /dev/null
+++ b/bitbake-dev/lib/bb/command.py
@@ -0,0 +1,211 @@
+"""
+BitBake 'Command' module
+
+Provide an interface to interact with the bitbake server through 'commands'
+"""
+
+# Copyright (C) 2006-2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+The bitbake server takes 'commands' from its UI/commandline.
+Commands are either 'online' of 'offline' in nature.
+Offline commands return data to the client in the form of events.
+Online commands must only return data through the function return value
+and must not trigger events, directly or indirectly.
+Commands are queued in a CommandQueue
+"""
+
+import bb
+
+offline_cmds = {}
+online_cmds = {}
+
+class Command:
+ """
+ A queue of 'offline' commands for bitbake
+ """
+ def __init__(self, cooker):
+
+ self.cooker = cooker
+ self.cmds_online = CommandsOnline()
+ self.cmds_offline = CommandsOffline()
+
+ # FIXME Add lock for this
+ self.currentOfflineCommand = None
+
+ for attr in CommandsOnline.__dict__:
+ command = attr[:].lower()
+ method = getattr(CommandsOnline, attr)
+ online_cmds[command] = (method)
+
+ for attr in CommandsOffline.__dict__:
+ command = attr[:].lower()
+ method = getattr(CommandsOffline, attr)
+ offline_cmds[command] = (method)
+
+ def runCommand(self, commandline):
+ try:
+ command = commandline.pop(0)
+ if command in CommandsOnline.__dict__:
+ # Can run online commands straight away
+ return getattr(CommandsOnline, command)(self.cmds_online, self, commandline)
+ if self.currentOfflineCommand is not None:
+ return "Busy (%s in progress)" % self.currentOfflineCommand[0]
+ if command not in CommandsOffline.__dict__:
+ return "No such command"
+ self.currentOfflineCommand = (command, commandline)
+ return True
+ except:
+ import traceback
+ return traceback.format_exc()
+
+ def runOfflineCommand(self):
+ try:
+ if self.currentOfflineCommand is not None:
+ (command, options) = self.currentOfflineCommand
+ getattr(CommandsOffline, command)(self.cmds_offline, self, options)
+ except:
+ import traceback
+ self.finishOfflineCommand(traceback.format_exc())
+
+ def finishOfflineCommand(self, error = None):
+ if error:
+ bb.event.fire(bb.command.CookerCommandFailed(self.cooker.configuration.event_data, error))
+ else:
+ bb.event.fire(bb.command.CookerCommandCompleted(self.cooker.configuration.event_data))
+ self.currentOfflineCommand = None
+
+
+class CommandsOnline:
+ """
+ A class of online commands
+ These should run quickly so as not to hurt interactive performance.
+ These must not influence any running offline command.
+ """
+
+ def stateShutdown(self, command, params):
+ """
+ Trigger cooker 'shutdown' mode
+ """
+ command.cooker.cookerAction = bb.cooker.cookerShutdown
+
+ def stateStop(self, command, params):
+ """
+ Stop the cooker
+ """
+ command.cooker.cookerAction = bb.cooker.cookerStop
+
+ def getCmdLineAction(self, command, params):
+ """
+ Get any command parsed from the commandline
+ """
+ return command.cooker.commandlineAction
+
+ def readVariable(self, command, params):
+ """
+ Read the value of a variable from configuration.data
+ """
+ varname = params[0]
+ expand = True
+ if len(params) > 1:
+ expand = params[1]
+
+ return bb.data.getVar(varname, command.cooker.configuration.data, expand)
+
+class CommandsOffline:
+ """
+ A class of offline commands
+ These functions communicate via generated events.
+ Any function that requires metadata parsing should be here.
+ """
+
+ def buildFile(self, command, params):
+ """
+ Build a single specified .bb file
+ """
+ bfile = params[0]
+ task = params[1]
+
+ command.cooker.buildFile(bfile, task)
+
+ def buildTargets(self, command, params):
+ """
+ Build a set of targets
+ """
+ pkgs_to_build = params[0]
+
+ command.cooker.buildTargets(pkgs_to_build)
+
+ def generateDepTreeEvent(self, command, params):
+ """
+ Generate an event containing the dependency information
+ """
+ pkgs_to_build = params[0]
+
+ command.cooker.generateDepTreeEvent(pkgs_to_build)
+ command.finishOfflineCommand()
+
+ def generateDotGraph(self, command, params):
+ """
+ Dump dependency information to disk as .dot files
+ """
+ pkgs_to_build = params[0]
+
+ command.cooker.generateDotGraphFiles(pkgs_to_build)
+ command.finishOfflineCommand()
+
+ def showVersions(self, command, params):
+ """
+ Show the currently selected versions
+ """
+ command.cooker.showVersions()
+ command.finishOfflineCommand()
+
+ def showEnvironment(self, command, params):
+ """
+ Print the environment
+ """
+ bfile = params[0]
+ pkg = params[1]
+
+ command.cooker.showEnvironment(bfile, pkg)
+ command.finishOfflineCommand()
+
+ def parseFiles(self, command, params):
+ """
+ Parse the .bb files
+ """
+ command.cooker.updateCache()
+ command.finishOfflineCommand()
+
+#
+# Events
+#
+class CookerCommandCompleted(bb.event.Event):
+ """
+ Cooker command completed
+ """
+ def __init__(self, data):
+ bb.event.Event.__init__(self, data)
+
+
+class CookerCommandFailed(bb.event.Event):
+ """
+ Cooker command completed
+ """
+ def __init__(self, data, error):
+ bb.event.Event.__init__(self, data)
+ self.error = error
diff --git a/bitbake-dev/lib/bb/cooker.py b/bitbake-dev/lib/bb/cooker.py
new file mode 100644
index 0000000000..c92ad70a2c
--- /dev/null
+++ b/bitbake-dev/lib/bb/cooker.py
@@ -0,0 +1,941 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
+# Copyright (C) 2005 Holger Hans Peter Freyther
+# Copyright (C) 2005 ROAD GmbH
+# Copyright (C) 2006 - 2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys, os, getopt, glob, copy, os.path, re, time
+import bb
+from bb import utils, data, parse, event, cache, providers, taskdata, runqueue
+from bb import xmlrpcserver, command
+from sets import Set
+import itertools, sre_constants
+
+class MultipleMatches(Exception):
+ """
+ Exception raised when multiple file matches are found
+ """
+
+class ParsingErrorsFound(Exception):
+ """
+ Exception raised when parsing errors are found
+ """
+
+class NothingToBuild(Exception):
+ """
+ Exception raised when there is nothing to build
+ """
+
+
+# Different states cooker can be in
+cookerClean = 1
+cookerParsed = 2
+
+# Different action states the cooker can be in
+cookerRun = 1 # Cooker is running normally
+cookerShutdown = 2 # Active tasks should be brought to a controlled stop
+cookerStop = 3 # Stop, now!
+
+#============================================================================#
+# BBCooker
+#============================================================================#
+class BBCooker:
+ """
+ Manages one bitbake build run
+ """
+
+ def __init__(self, configuration):
+ self.status = None
+
+ self.cache = None
+ self.bb_cache = None
+
+ self.server = bb.xmlrpcserver.BitBakeXMLRPCServer(self)
+ #self.server.register_function(self.showEnvironment)
+
+ self.configuration = configuration
+
+ if self.configuration.verbose:
+ bb.msg.set_verbose(True)
+
+ if self.configuration.debug:
+ bb.msg.set_debug_level(self.configuration.debug)
+ else:
+ bb.msg.set_debug_level(0)
+
+ if self.configuration.debug_domains:
+ bb.msg.set_debug_domains(self.configuration.debug_domains)
+
+ self.configuration.data = bb.data.init()
+
+ for f in self.configuration.file:
+ self.parseConfigurationFile( f )
+
+ self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) )
+
+ if not self.configuration.cmd:
+ self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data, True) or "build"
+
+ bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, True)
+ if bbpkgs:
+ self.configuration.pkgs_to_build.extend(bbpkgs.split())
+
+ #
+ # Special updated configuration we use for firing events
+ #
+ self.configuration.event_data = bb.data.createCopy(self.configuration.data)
+ bb.data.update_data(self.configuration.event_data)
+
+ # TOSTOP must not be set or our children will hang when they output
+ fd = sys.stdout.fileno()
+ if os.isatty(fd):
+ import termios
+ tcattr = termios.tcgetattr(fd)
+ if tcattr[3] & termios.TOSTOP:
+ bb.msg.note(1, bb.msg.domain.Build, "The terminal had the TOSTOP bit set, clearing...")
+ tcattr[3] = tcattr[3] & ~termios.TOSTOP
+ termios.tcsetattr(fd, termios.TCSANOW, tcattr)
+
+ # Change nice level if we're asked to
+ nice = bb.data.getVar("BB_NICE_LEVEL", self.configuration.data, True)
+ if nice:
+ curnice = os.nice(0)
+ nice = int(nice) - curnice
+ bb.msg.note(2, bb.msg.domain.Build, "Renice to %s " % os.nice(nice))
+
+ # Parse any commandline into actions
+ if self.configuration.show_environment:
+ self.commandlineAction = None
+
+ if 'world' in self.configuration.pkgs_to_build:
+ bb.error("'world' is not a valid target for --environment.")
+ elif len(self.configuration.pkgs_to_build) > 1:
+ bb.error("Only one target can be used with the --environment option.")
+ elif self.configuration.buildfile and len(self.configuration.pkgs_to_build) > 0:
+ bb.error("No target should be used with the --environment and --buildfile options.")
+ else:
+ self.commandlineAction = ["showEnvironment", self.configuration.buildfile, self.configuration.pkgs_to_build]
+ elif self.configuration.buildfile is not None:
+ self.commandlineAction = ["buildFile", self.configuration.buildfile, self.configuration.cmd]
+ elif self.configuration.show_versions:
+ self.commandlineAction = ["showVersions"]
+ elif self.configuration.parse_only:
+ self.commandlineAction = ["parseFiles"]
+ elif self.configuration.dot_graph:
+ if self.configuration.pkgs_to_build:
+ self.commandlineAction = ["generateDotGraph", self.configuration.pkgs_to_build]
+ else:
+ self.commandlineAction = None
+ bb.error("Please specify a package name for dependency graph generation.")
+ else:
+ if self.configuration.pkgs_to_build:
+ self.commandlineAction = ["buildTargets", self.configuration.pkgs_to_build]
+ else:
+ self.commandlineAction = None
+ bb.error("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
+
+ # FIXME - implement
+ #if self.configuration.interactive:
+ # self.interactiveMode()
+
+ self.command = bb.command.Command(self)
+ self.cookerIdle = True
+ self.cookerState = cookerClean
+ self.cookerAction = cookerRun
+ self.server.register_idle_function(self.runCommands, self)
+
+
+ def runCommands(self, server, data, abort):
+ """
+ Run any queued offline command
+ This is done by the idle handler so it runs in true context rather than
+ tied to any UI.
+ """
+ if self.cookerIdle and not abort:
+ self.command.runOfflineCommand()
+
+ # Always reschedule
+ return True
+
+ def tryBuildPackage(self, fn, item, task, the_data):
+ """
+ Build one task of a package, optionally build following task depends
+ """
+ bb.event.fire(bb.event.PkgStarted(item, the_data))
+ try:
+ if not self.configuration.dry_run:
+ bb.build.exec_task('do_%s' % task, the_data)
+ bb.event.fire(bb.event.PkgSucceeded(item, the_data))
+ return True
+ except bb.build.FuncFailed:
+ bb.msg.error(bb.msg.domain.Build, "task stack execution failed")
+ bb.event.fire(bb.event.PkgFailed(item, the_data))
+ raise
+ except bb.build.EventException, e:
+ event = e.args[1]
+ bb.msg.error(bb.msg.domain.Build, "%s event exception, aborting" % bb.event.getName(event))
+ bb.event.fire(bb.event.PkgFailed(item, the_data))
+ raise
+
+ def tryBuild(self, fn):
+ """
+ Build a provider and its dependencies.
+ build_depends is a list of previous build dependencies (not runtime)
+ If build_depends is empty, we're dealing with a runtime depends
+ """
+
+ the_data = self.bb_cache.loadDataFull(fn, self.configuration.data)
+
+ item = self.status.pkg_fn[fn]
+
+ #if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
+ # return True
+
+ return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data)
+
+ def showVersions(self):
+
+ # Need files parsed
+ self.updateCache()
+
+ pkg_pn = self.status.pkg_pn
+ preferred_versions = {}
+ latest_versions = {}
+
+ # Sort by priority
+ for pn in pkg_pn.keys():
+ (last_ver,last_file,pref_ver,pref_file) = bb.providers.findBestProvider(pn, self.configuration.data, self.status)
+ preferred_versions[pn] = (pref_ver, pref_file)
+ latest_versions[pn] = (last_ver, last_file)
+
+ pkg_list = pkg_pn.keys()
+ pkg_list.sort()
+
+ bb.msg.plain("%-35s %25s %25s" % ("Package Name", "Latest Version", "Preferred Version"))
+ bb.msg.plain("%-35s %25s %25s\n" % ("============", "==============", "================="))
+
+ for p in pkg_list:
+ pref = preferred_versions[p]
+ latest = latest_versions[p]
+
+ prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2]
+ lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2]
+
+ if pref == latest:
+ prefstr = ""
+
+ bb.msg.plain("%-35s %25s %25s" % (p, lateststr, prefstr))
+
+ def showEnvironment(self, buildfile = None, pkgs_to_build = []):
+ """
+ Show the outer or per-package environment
+ """
+ fn = None
+ envdata = None
+
+ if buildfile:
+ self.cb = None
+ self.bb_cache = bb.cache.init(self)
+ fn = self.matchFile(buildfile)
+ elif len(pkgs_to_build) == 1:
+ self.updateCache()
+
+ localdata = data.createCopy(self.configuration.data)
+ bb.data.update_data(localdata)
+ bb.data.expandKeys(localdata)
+
+ taskdata = bb.taskdata.TaskData(self.configuration.abort)
+ taskdata.add_provider(localdata, self.status, pkgs_to_build[0])
+ taskdata.add_unresolved(localdata, self.status)
+
+ targetid = taskdata.getbuild_id(pkgs_to_build[0])
+ fnid = taskdata.build_targets[targetid][0]
+ fn = taskdata.fn_index[fnid]
+ else:
+ envdata = self.configuration.data
+
+ if fn:
+ try:
+ envdata = self.bb_cache.loadDataFull(fn, self.configuration.data)
+ except IOError, e:
+ bb.msg.error(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e))
+ raise
+ except Exception, e:
+ bb.msg.error(bb.msg.domain.Parsing, "%s" % e)
+ raise
+
+ class dummywrite:
+ def __init__(self):
+ self.writebuf = ""
+ def write(self, output):
+ self.writebuf = self.writebuf + output
+
+ # emit variables and shell functions
+ try:
+ data.update_data(envdata)
+ wb = dummywrite()
+ data.emit_env(wb, envdata, True)
+ bb.msg.plain(wb.writebuf)
+ except Exception, e:
+ bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e)
+ # emit the metadata which isnt valid shell
+ data.expandKeys(envdata)
+ for e in envdata.keys():
+ if data.getVarFlag( e, 'python', envdata ):
+ bb.msg.plain("\npython %s () {\n%s}\n" % (e, data.getVar(e, envdata, 1)))
+
+ def generateDepTreeData(self, pkgs_to_build):
+ """
+ Create a dependency tree of pkgs_to_build, returning the data.
+ """
+
+ # Need files parsed
+ self.updateCache()
+
+ pkgs_to_build = self.checkPackages(pkgs_to_build)
+
+ localdata = data.createCopy(self.configuration.data)
+ bb.data.update_data(localdata)
+ bb.data.expandKeys(localdata)
+ taskdata = bb.taskdata.TaskData(self.configuration.abort)
+
+ runlist = []
+ for k in pkgs_to_build:
+ taskdata.add_provider(localdata, self.status, k)
+ runlist.append([k, "do_%s" % self.configuration.cmd])
+ taskdata.add_unresolved(localdata, self.status)
+
+ rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
+ rq.prepare_runqueue()
+
+ seen_fnids = []
+ depend_tree = {}
+ depend_tree["depends"] = {}
+ depend_tree["tdepends"] = {}
+ depend_tree["pn"] = {}
+ depend_tree["rdepends-pn"] = {}
+ depend_tree["packages"] = {}
+ depend_tree["rdepends-pkg"] = {}
+ depend_tree["rrecs-pkg"] = {}
+
+ for task in range(len(rq.runq_fnid)):
+ taskname = rq.runq_task[task]
+ fnid = rq.runq_fnid[task]
+ fn = taskdata.fn_index[fnid]
+ pn = self.status.pkg_fn[fn]
+ version = "%s:%s-%s" % self.status.pkg_pepvpr[fn]
+ if pn not in depend_tree["pn"]:
+ depend_tree["pn"][pn] = {}
+ depend_tree["pn"][pn]["filename"] = fn
+ depend_tree["pn"][pn]["version"] = version
+ for dep in rq.runq_depends[task]:
+ depfn = taskdata.fn_index[rq.runq_fnid[dep]]
+ deppn = self.status.pkg_fn[depfn]
+ dotname = "%s.%s" % (pn, rq.runq_task[task])
+ if not dotname in depend_tree["tdepends"]:
+ depend_tree["tdepends"][dotname] = []
+ depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, rq.runq_task[dep]))
+ if fnid not in seen_fnids:
+ seen_fnids.append(fnid)
+ packages = []
+
+ depend_tree["depends"][pn] = []
+ for dep in taskdata.depids[fnid]:
+ depend_tree["depends"][pn].append(taskdata.build_names_index[dep])
+
+ depend_tree["rdepends-pn"][pn] = []
+ for rdep in taskdata.rdepids[fnid]:
+ depend_tree["rdepends-pn"][pn].append(taskdata.run_names_index[rdep])
+
+ rdepends = self.status.rundeps[fn]
+ for package in rdepends:
+ depend_tree["rdepends-pkg"][package] = []
+ for rdepend in rdepends[package]:
+ depend_tree["rdepends-pkg"][package].append(rdepend)
+ packages.append(package)
+
+ rrecs = self.status.runrecs[fn]
+ for package in rrecs:
+ depend_tree["rrecs-pkg"][package] = []
+ for rdepend in rrecs[package]:
+ depend_tree["rrecs-pkg"][package].append(rdepend)
+ if not package in packages:
+ packages.append(package)
+
+ for package in packages:
+ if package not in depend_tree["packages"]:
+ depend_tree["packages"][package] = {}
+ depend_tree["packages"][package]["pn"] = pn
+ depend_tree["packages"][package]["filename"] = fn
+ depend_tree["packages"][package]["version"] = version
+
+ return depend_tree
+
+
+ def generateDepTreeEvent(self, pkgs_to_build):
+ """
+ Create a task dependency graph of pkgs_to_build.
+ Generate an event with the result
+ """
+ depgraph = self.generateDepTreeData(pkgs_to_build)
+ bb.event.fire(bb.event.DepTreeGenerated(self.configuration.data, depgraph))
+
+ def generateDotGraphFiles(self, pkgs_to_build):
+ """
+ Create a task dependency graph of pkgs_to_build.
+ Save the result to a set of .dot files.
+ """
+
+ depgraph = self.generateDepTreeData(pkgs_to_build)
+
+ # Prints a flattened form of package-depends below where subpackages of a package are merged into the main pn
+ depends_file = file('pn-depends.dot', 'w' )
+ print >> depends_file, "digraph depends {"
+ for pn in depgraph["pn"]:
+ fn = depgraph["pn"][pn]["filename"]
+ version = depgraph["pn"][pn]["version"]
+ print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn)
+ for pn in depgraph["depends"]:
+ for depend in depgraph["depends"][pn]:
+ print >> depends_file, '"%s" -> "%s"' % (pn, depend)
+ for pn in depgraph["rdepends-pn"]:
+ for rdepend in depgraph["rdepends-pn"][pn]:
+ print >> depends_file, '"%s" -> "%s" [style=dashed]' % (pn, rdepend)
+ print >> depends_file, "}"
+ bb.msg.plain("PN dependencies saved to 'pn-depends.dot'")
+
+ depends_file = file('package-depends.dot', 'w' )
+ print >> depends_file, "digraph depends {"
+ for package in depgraph["packages"]:
+ pn = depgraph["packages"][package]["pn"]
+ fn = depgraph["packages"][package]["filename"]
+ version = depgraph["packages"][package]["version"]
+ if package == pn:
+ print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn)
+ else:
+ print >> depends_file, '"%s" [label="%s(%s) %s\\n%s"]' % (package, package, pn, version, fn)
+ for depend in depgraph["depends"][pn]:
+ print >> depends_file, '"%s" -> "%s"' % (package, depend)
+ for package in depgraph["rdepends-pkg"]:
+ for rdepend in depgraph["rdepends-pkg"][package]:
+ print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend)
+ for package in depgraph["rrecs-pkg"]:
+ for rdepend in depgraph["rrecs-pkg"][package]:
+ print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend)
+ print >> depends_file, "}"
+ bb.msg.plain("Package dependencies saved to 'package-depends.dot'")
+
+ tdepends_file = file('task-depends.dot', 'w' )
+ print >> tdepends_file, "digraph depends {"
+ for task in depgraph["tdepends"]:
+ (pn, taskname) = task.rsplit(".", 1)
+ fn = depgraph["pn"][pn]["filename"]
+ version = depgraph["pn"][pn]["version"]
+ print >> tdepends_file, '"%s.%s" [label="%s %s\\n%s\\n%s"]' % (pn, taskname, pn, taskname, version, fn)
+ for dep in depgraph["tdepends"][task]:
+ print >> tdepends_file, '"%s" -> "%s"' % (task, dep)
+ print >> tdepends_file, "}"
+ bb.msg.plain("Task dependencies saved to 'task-depends.dot'")
+
+ def buildDepgraph( self ):
+ all_depends = self.status.all_depends
+ pn_provides = self.status.pn_provides
+
+ localdata = data.createCopy(self.configuration.data)
+ bb.data.update_data(localdata)
+ bb.data.expandKeys(localdata)
+
+ def calc_bbfile_priority(filename):
+ for (regex, pri) in self.status.bbfile_config_priorities:
+ if regex.match(filename):
+ return pri
+ return 0
+
+ # Handle PREFERRED_PROVIDERS
+ for p in (bb.data.getVar('PREFERRED_PROVIDERS', localdata, 1) or "").split():
+ try:
+ (providee, provider) = p.split(':')
+ except:
+ bb.msg.fatal(bb.msg.domain.Provider, "Malformed option in PREFERRED_PROVIDERS variable: %s" % p)
+ continue
+ if providee in self.status.preferred and self.status.preferred[providee] != provider:
+ bb.msg.error(bb.msg.domain.Provider, "conflicting preferences for %s: both %s and %s specified" % (providee, provider, self.status.preferred[providee]))
+ self.status.preferred[providee] = provider
+
+ # Calculate priorities for each file
+ for p in self.status.pkg_fn.keys():
+ self.status.bbfile_priority[p] = calc_bbfile_priority(p)
+
+ def buildWorldTargetList(self):
+ """
+ Build package list for "bitbake world"
+ """
+ all_depends = self.status.all_depends
+ pn_provides = self.status.pn_provides
+ bb.msg.debug(1, bb.msg.domain.Parsing, "collating packages for \"world\"")
+ for f in self.status.possible_world:
+ terminal = True
+ pn = self.status.pkg_fn[f]
+
+ for p in pn_provides[pn]:
+ if p.startswith('virtual/'):
+ bb.msg.debug(2, bb.msg.domain.Parsing, "World build skipping %s due to %s provider starting with virtual/" % (f, p))
+ terminal = False
+ break
+ for pf in self.status.providers[p]:
+ if self.status.pkg_fn[pf] != pn:
+ bb.msg.debug(2, bb.msg.domain.Parsing, "World build skipping %s due to both us and %s providing %s" % (f, pf, p))
+ terminal = False
+ break
+ if terminal:
+ self.status.world_target.add(pn)
+
+ # drop reference count now
+ self.status.possible_world = None
+ self.status.all_depends = None
+
+ def interactiveMode( self ):
+ """Drop off into a shell"""
+ try:
+ from bb import shell
+ except ImportError, details:
+ bb.msg.fatal(bb.msg.domain.Parsing, "Sorry, shell not available (%s)" % details )
+ else:
+ shell.start( self )
+
+ def parseConfigurationFile( self, afile ):
+ try:
+ self.configuration.data = bb.parse.handle( afile, self.configuration.data )
+
+ # Handle any INHERITs and inherit the base class
+ inherits = ["base"] + (bb.data.getVar('INHERIT', self.configuration.data, True ) or "").split()
+ for inherit in inherits:
+ self.configuration.data = bb.parse.handle(os.path.join('classes', '%s.bbclass' % inherit), self.configuration.data, True )
+
+ # Nomally we only register event handlers at the end of parsing .bb files
+ # We register any handlers we've found so far here...
+ for var in data.getVar('__BBHANDLERS', self.configuration.data) or []:
+ bb.event.register(var,bb.data.getVar(var, self.configuration.data))
+
+ bb.fetch.fetcher_init(self.configuration.data)
+
+ bb.event.fire(bb.event.ConfigParsed(self.configuration.data))
+
+ except IOError, e:
+ bb.msg.fatal(bb.msg.domain.Parsing, "Error when parsing %s: %s" % (afile, str(e)))
+ except IOError:
+ bb.msg.fatal(bb.msg.domain.Parsing, "Unable to open %s" % afile )
+ except bb.parse.ParseError, details:
+ bb.msg.fatal(bb.msg.domain.Parsing, "Unable to parse %s (%s)" % (afile, details) )
+
+ def handleCollections( self, collections ):
+ """Handle collections"""
+ if collections:
+ collection_list = collections.split()
+ for c in collection_list:
+ regex = bb.data.getVar("BBFILE_PATTERN_%s" % c, self.configuration.data, 1)
+ if regex == None:
+ bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PATTERN_%s not defined" % c)
+ continue
+ priority = bb.data.getVar("BBFILE_PRIORITY_%s" % c, self.configuration.data, 1)
+ if priority == None:
+ bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PRIORITY_%s not defined" % c)
+ continue
+ try:
+ cre = re.compile(regex)
+ except re.error:
+ bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PATTERN_%s \"%s\" is not a valid regular expression" % (c, regex))
+ continue
+ try:
+ pri = int(priority)
+ self.status.bbfile_config_priorities.append((cre, pri))
+ except ValueError:
+ bb.msg.error(bb.msg.domain.Parsing, "invalid value for BBFILE_PRIORITY_%s: \"%s\"" % (c, priority))
+
+ def buildSetVars(self):
+ """
+ Setup any variables needed before starting a build
+ """
+ if not bb.data.getVar("BUILDNAME", self.configuration.data):
+ bb.data.setVar("BUILDNAME", os.popen('date +%Y%m%d%H%M').readline().strip(), self.configuration.data)
+ bb.data.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime()), self.configuration.data)
+
+ def matchFiles(self, buildfile):
+ """
+ Find the .bb files which match the expression in 'buildfile'.
+ """
+
+ bf = os.path.abspath(buildfile)
+ try:
+ os.stat(bf)
+ return [bf]
+ except OSError:
+ (filelist, masked) = self.collect_bbfiles()
+ regexp = re.compile(buildfile)
+ matches = []
+ for f in filelist:
+ if regexp.search(f) and os.path.isfile(f):
+ bf = f
+ matches.append(f)
+ return matches
+
+ def matchFile(self, buildfile):
+ """
+ Find the .bb file which matches the expression in 'buildfile'.
+ Raise an error if multiple files
+ """
+ matches = self.matchFiles(buildfile)
+ if len(matches) != 1:
+ bb.msg.error(bb.msg.domain.Parsing, "Unable to match %s (%s matches found):" % (buildfile, len(matches)))
+ for f in matches:
+ bb.msg.error(bb.msg.domain.Parsing, " %s" % f)
+ raise MultipleMatches
+ return matches[0]
+
+ def buildFile(self, buildfile, task):
+ """
+ Build the file matching regexp buildfile
+ """
+
+ fn = self.matchFile(buildfile)
+ self.buildSetVars()
+
+ # Load data into the cache for fn
+ self.bb_cache = bb.cache.init(self)
+ self.bb_cache.loadData(fn, self.configuration.data)
+
+ # Parse the loaded cache data
+ self.status = bb.cache.CacheData()
+ self.bb_cache.handle_data(fn, self.status)
+
+ # Tweak some variables
+ item = self.bb_cache.getVar('PN', fn, True)
+ self.status.ignored_dependencies = Set()
+ self.status.bbfile_priority[fn] = 1
+
+ # Remove external dependencies
+ self.status.task_deps[fn]['depends'] = {}
+ self.status.deps[fn] = []
+ self.status.rundeps[fn] = []
+ self.status.runrecs[fn] = []
+
+ # Remove stamp for target if force mode active
+ if self.configuration.force:
+ bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (task, fn))
+ bb.build.del_stamp('do_%s' % task, self.status, fn)
+
+ # Setup taskdata structure
+ taskdata = bb.taskdata.TaskData(self.configuration.abort)
+ taskdata.add_provider(self.configuration.data, self.status, item)
+
+ buildname = bb.data.getVar("BUILDNAME", self.configuration.data)
+ bb.event.fire(bb.event.BuildStarted(buildname, [item], self.configuration.event_data))
+
+ # Execute the runqueue
+ runlist = [[item, "do_%s" % self.configuration.cmd]]
+
+ rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
+
+ def buildFileIdle(server, rq, abort):
+
+ if abort or self.cookerAction == cookerStop:
+ rq.finish_runqueue(True)
+ elif self.cookerAction == cookerShutdown:
+ rq.finish_runqueue(False)
+ failures = 0
+ try:
+ retval = rq.execute_runqueue()
+ except runqueue.TaskFailure, fnids:
+ for fnid in fnids:
+ bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid])
+ failures = failures + 1
+ retval = False
+ if not retval:
+ self.cookerIdle = True
+ self.command.finishOfflineCommand()
+ bb.event.fire(bb.event.BuildCompleted(buildname, targets, self.configuration.event_data, failures))
+ return retval
+
+ self.cookerIdle = False
+ self.server.register_idle_function(buildFileIdle, rq)
+
+ def buildTargets(self, targets):
+ """
+ Attempt to build the targets specified
+ """
+
+ # Need files parsed
+ self.updateCache()
+
+ targets = self.checkPackages(targets)
+
+ def buildTargetsIdle(server, rq, abort):
+
+ if abort or self.cookerAction == cookerStop:
+ rq.finish_runqueue(True)
+ elif self.cookerAction == cookerShutdown:
+ rq.finish_runqueue(False)
+ failures = 0
+ try:
+ retval = rq.execute_runqueue()
+ except runqueue.TaskFailure, fnids:
+ for fnid in fnids:
+ bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid])
+ failures = failures + 1
+ retval = False
+ if not retval:
+ self.cookerIdle = True
+ self.command.finishOfflineCommand()
+ bb.event.fire(bb.event.BuildCompleted(buildname, targets, self.configuration.event_data, failures))
+ return retval
+
+ self.buildSetVars()
+
+ buildname = bb.data.getVar("BUILDNAME", self.configuration.data)
+ bb.event.fire(bb.event.BuildStarted(buildname, targets, self.configuration.event_data))
+
+ localdata = data.createCopy(self.configuration.data)
+ bb.data.update_data(localdata)
+ bb.data.expandKeys(localdata)
+
+ taskdata = bb.taskdata.TaskData(self.configuration.abort)
+
+ runlist = []
+ for k in targets:
+ taskdata.add_provider(localdata, self.status, k)
+ runlist.append([k, "do_%s" % self.configuration.cmd])
+ taskdata.add_unresolved(localdata, self.status)
+
+ rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
+
+ self.cookerIdle = False
+ self.server.register_idle_function(buildTargetsIdle, rq)
+
+ def updateCache(self):
+
+ if self.cookerState == cookerParsed:
+ return
+
+ # Import Psyco if available and not disabled
+ import platform
+ if platform.machine() in ['i386', 'i486', 'i586', 'i686']:
+ if not self.configuration.disable_psyco:
+ try:
+ import psyco
+ except ImportError:
+ bb.msg.note(1, bb.msg.domain.Collection, "Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.")
+ else:
+ psyco.bind( self.parse_bbfiles )
+ else:
+ bb.msg.note(1, bb.msg.domain.Collection, "You have disabled Psyco. This decreases performance.")
+
+ self.status = bb.cache.CacheData()
+
+ ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or ""
+ self.status.ignored_dependencies = Set(ignore.split())
+
+ for dep in self.configuration.extra_assume_provided:
+ self.status.ignored_dependencies.add(dep)
+
+ self.handleCollections( bb.data.getVar("BBFILE_COLLECTIONS", self.configuration.data, 1) )
+
+ bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files")
+ (filelist, masked) = self.collect_bbfiles()
+ self.parse_bbfiles(filelist, masked)
+ bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete")
+
+ self.buildDepgraph()
+
+ self.cookerState = cookerParsed
+
+ def checkPackages(self, pkgs_to_build):
+
+ if len(pkgs_to_build) == 0:
+ raise NothingToBuild
+
+ if 'world' in pkgs_to_build:
+ self.buildWorldTargetList()
+ pkgs_to_build.remove('world')
+ for t in self.status.world_target:
+ pkgs_to_build.append(t)
+
+ return pkgs_to_build
+
+ def get_bbfiles( self, path = os.getcwd() ):
+ """Get list of default .bb files by reading out the current directory"""
+ contents = os.listdir(path)
+ bbfiles = []
+ for f in contents:
+ (root, ext) = os.path.splitext(f)
+ if ext == ".bb":
+ bbfiles.append(os.path.abspath(os.path.join(os.getcwd(),f)))
+ return bbfiles
+
+ def find_bbfiles( self, path ):
+ """Find all the .bb files in a directory"""
+ from os.path import join
+
+ found = []
+ for dir, dirs, files in os.walk(path):
+ for ignored in ('SCCS', 'CVS', '.svn'):
+ if ignored in dirs:
+ dirs.remove(ignored)
+ found += [join(dir,f) for f in files if f.endswith('.bb')]
+
+ return found
+
+ def collect_bbfiles( self ):
+ """Collect all available .bb build files"""
+ parsed, cached, skipped, masked = 0, 0, 0, 0
+ self.bb_cache = bb.cache.init(self)
+
+ files = (data.getVar( "BBFILES", self.configuration.data, 1 ) or "").split()
+ data.setVar("BBFILES", " ".join(files), self.configuration.data)
+
+ if not len(files):
+ files = self.get_bbfiles()
+
+ if not len(files):
+ bb.msg.error(bb.msg.domain.Collection, "no files to build.")
+
+ newfiles = []
+ for f in files:
+ if os.path.isdir(f):
+ dirfiles = self.find_bbfiles(f)
+ if dirfiles:
+ newfiles += dirfiles
+ continue
+ newfiles += glob.glob(f) or [ f ]
+
+ bbmask = bb.data.getVar('BBMASK', self.configuration.data, 1)
+
+ if not bbmask:
+ return (newfiles, 0)
+
+ try:
+ bbmask_compiled = re.compile(bbmask)
+ except sre_constants.error:
+ bb.msg.fatal(bb.msg.domain.Collection, "BBMASK is not a valid regular expression.")
+
+ finalfiles = []
+ for i in xrange( len( newfiles ) ):
+ f = newfiles[i]
+ if bbmask and bbmask_compiled.search(f):
+ bb.msg.debug(1, bb.msg.domain.Collection, "skipping masked file %s" % f)
+ masked += 1
+ continue
+ finalfiles.append(f)
+
+ return (finalfiles, masked)
+
+ def parse_bbfiles(self, filelist, masked):
+ parsed, cached, skipped, error, total = 0, 0, 0, 0, len(filelist)
+ for i in xrange(total):
+ f = filelist[i]
+
+ #bb.msg.debug(1, bb.msg.domain.Collection, "parsing %s" % f)
+
+ # read a file's metadata
+ try:
+ fromCache, skip = self.bb_cache.loadData(f, self.configuration.data)
+ if skip:
+ skipped += 1
+ bb.msg.debug(2, bb.msg.domain.Collection, "skipping %s" % f)
+ self.bb_cache.skip(f)
+ continue
+ elif fromCache: cached += 1
+ else: parsed += 1
+ deps = None
+
+ # Disabled by RP as was no longer functional
+ # allow metadata files to add items to BBFILES
+ #data.update_data(self.pkgdata[f])
+ #addbbfiles = self.bb_cache.getVar('BBFILES', f, False) or None
+ #if addbbfiles:
+ # for aof in addbbfiles.split():
+ # if not files.count(aof):
+ # if not os.path.isabs(aof):
+ # aof = os.path.join(os.path.dirname(f),aof)
+ # files.append(aof)
+
+ self.bb_cache.handle_data(f, self.status)
+
+ except IOError, e:
+ error += 1
+ self.bb_cache.remove(f)
+ bb.msg.error(bb.msg.domain.Collection, "opening %s: %s" % (f, e))
+ pass
+ except KeyboardInterrupt:
+ self.bb_cache.sync()
+ raise
+ except Exception, e:
+ error += 1
+ self.bb_cache.remove(f)
+ bb.msg.error(bb.msg.domain.Collection, "%s while parsing %s" % (e, f))
+ except:
+ self.bb_cache.remove(f)
+ raise
+ finally:
+ bb.event.fire(bb.event.ParseProgress(self.configuration.event_data, cached, parsed, skipped, masked, error, total))
+
+ self.bb_cache.sync()
+ if error > 0:
+ raise ParsingErrorsFound
+
+ def serve(self):
+
+ if self.configuration.profile:
+ try:
+ import cProfile as profile
+ except:
+ import profile
+
+ profile.runctx("self.server.serve_forever()", globals(), locals(), "profile.log")
+
+ # Redirect stdout to capture profile information
+ pout = open('profile.log.processed', 'w')
+ so = sys.stdout.fileno()
+ os.dup2(pout.fileno(), so)
+
+ import pstats
+ p = pstats.Stats('profile.log')
+ p.sort_stats('time')
+ p.print_stats()
+ p.print_callers()
+ p.sort_stats('cumulative')
+ p.print_stats()
+
+ os.dup2(so, pout.fileno())
+ pout.flush()
+ pout.close()
+ else:
+ self.server.serve_forever()
+
+ bb.event.fire(CookerExit(self.configuration.event_data))
+
+class CookerExit(bb.event.Event):
+ """
+ Notify clients of the Cooker shutdown
+ """
+
+ def __init__(self, d):
+ bb.event.Event.__init__(self, d)
+
diff --git a/bitbake-dev/lib/bb/daemonize.py b/bitbake-dev/lib/bb/daemonize.py
new file mode 100644
index 0000000000..6023c9ccd2
--- /dev/null
+++ b/bitbake-dev/lib/bb/daemonize.py
@@ -0,0 +1,189 @@
+"""
+Python Deamonizing helper
+
+Configurable daemon behaviors:
+
+ 1.) The current working directory set to the "/" directory.
+ 2.) The current file creation mode mask set to 0.
+ 3.) Close all open files (1024).
+ 4.) Redirect standard I/O streams to "/dev/null".
+
+A failed call to fork() now raises an exception.
+
+References:
+ 1) Advanced Programming in the Unix Environment: W. Richard Stevens
+ 2) Unix Programming Frequently Asked Questions:
+ http://www.erlenstar.demon.co.uk/unix/faq_toc.html
+
+Modified to allow a function to be daemonized and return for
+bitbake use by Richard Purdie
+"""
+
+__author__ = "Chad J. Schroeder"
+__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
+__version__ = "0.2"
+
+# Standard Python modules.
+import os # Miscellaneous OS interfaces.
+import sys # System-specific parameters and functions.
+
+# Default daemon parameters.
+# File mode creation mask of the daemon.
+UMASK = 0
+
+# Default maximum for the number of available file descriptors.
+MAXFD = 1024
+
+# The standard I/O file descriptors are redirected to /dev/null by default.
+if (hasattr(os, "devnull")):
+ REDIRECT_TO = os.devnull
+else:
+ REDIRECT_TO = "/dev/null"
+
+def createDaemon(function, logfile):
+ """
+ Detach a process from the controlling terminal and run it in the
+ background as a daemon, returning control to the caller.
+ """
+
+ try:
+ # Fork a child process so the parent can exit. This returns control to
+ # the command-line or shell. It also guarantees that the child will not
+ # be a process group leader, since the child receives a new process ID
+ # and inherits the parent's process group ID. This step is required
+ # to insure that the next call to os.setsid is successful.
+ pid = os.fork()
+ except OSError, e:
+ raise Exception, "%s [%d]" % (e.strerror, e.errno)
+
+ if (pid == 0): # The first child.
+ # To become the session leader of this new session and the process group
+ # leader of the new process group, we call os.setsid(). The process is
+ # also guaranteed not to have a controlling terminal.
+ os.setsid()
+
+ # Is ignoring SIGHUP necessary?
+ #
+ # It's often suggested that the SIGHUP signal should be ignored before
+ # the second fork to avoid premature termination of the process. The
+ # reason is that when the first child terminates, all processes, e.g.
+ # the second child, in the orphaned group will be sent a SIGHUP.
+ #
+ # "However, as part of the session management system, there are exactly
+ # two cases where SIGHUP is sent on the death of a process:
+ #
+ # 1) When the process that dies is the session leader of a session that
+ # is attached to a terminal device, SIGHUP is sent to all processes
+ # in the foreground process group of that terminal device.
+ # 2) When the death of a process causes a process group to become
+ # orphaned, and one or more processes in the orphaned group are
+ # stopped, then SIGHUP and SIGCONT are sent to all members of the
+ # orphaned group." [2]
+ #
+ # The first case can be ignored since the child is guaranteed not to have
+ # a controlling terminal. The second case isn't so easy to dismiss.
+ # The process group is orphaned when the first child terminates and
+ # POSIX.1 requires that every STOPPED process in an orphaned process
+ # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
+ # second child is not STOPPED though, we can safely forego ignoring the
+ # SIGHUP signal. In any case, there are no ill-effects if it is ignored.
+ #
+ # import signal # Set handlers for asynchronous events.
+ # signal.signal(signal.SIGHUP, signal.SIG_IGN)
+
+ try:
+ # Fork a second child and exit immediately to prevent zombies. This
+ # causes the second child process to be orphaned, making the init
+ # process responsible for its cleanup. And, since the first child is
+ # a session leader without a controlling terminal, it's possible for
+ # it to acquire one by opening a terminal in the future (System V-
+ # based systems). This second fork guarantees that the child is no
+ # longer a session leader, preventing the daemon from ever acquiring
+ # a controlling terminal.
+ pid = os.fork() # Fork a second child.
+ except OSError, e:
+ raise Exception, "%s [%d]" % (e.strerror, e.errno)
+
+ if (pid == 0): # The second child.
+ # We probably don't want the file mode creation mask inherited from
+ # the parent, so we give the child complete control over permissions.
+ os.umask(UMASK)
+ else:
+ # Parent (the first child) of the second child.
+ os._exit(0)
+ else:
+ # exit() or _exit()?
+ # _exit is like exit(), but it doesn't call any functions registered
+ # with atexit (and on_exit) or any registered signal handlers. It also
+ # closes any open file descriptors. Using exit() may cause all stdio
+ # streams to be flushed twice and any temporary files may be unexpectedly
+ # removed. It's therefore recommended that child branches of a fork()
+ # and the parent branch(es) of a daemon use _exit().
+ return
+
+ # Close all open file descriptors. This prevents the child from keeping
+ # open any file descriptors inherited from the parent. There is a variety
+ # of methods to accomplish this task. Three are listed below.
+ #
+ # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
+ # number of open file descriptors to close. If it doesn't exists, use
+ # the default value (configurable).
+ #
+ # try:
+ # maxfd = os.sysconf("SC_OPEN_MAX")
+ # except (AttributeError, ValueError):
+ # maxfd = MAXFD
+ #
+ # OR
+ #
+ # if (os.sysconf_names.has_key("SC_OPEN_MAX")):
+ # maxfd = os.sysconf("SC_OPEN_MAX")
+ # else:
+ # maxfd = MAXFD
+ #
+ # OR
+ #
+ # Use the getrlimit method to retrieve the maximum file descriptor number
+ # that can be opened by this process. If there is not limit on the
+ # resource, use the default value.
+ #
+ import resource # Resource usage information.
+ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+ if (maxfd == resource.RLIM_INFINITY):
+ maxfd = MAXFD
+
+ # Iterate through and close all file descriptors.
+# for fd in range(0, maxfd):
+# try:
+# os.close(fd)
+# except OSError: # ERROR, fd wasn't open to begin with (ignored)
+# pass
+
+ # Redirect the standard I/O file descriptors to the specified file. Since
+ # the daemon has no controlling terminal, most daemons redirect stdin,
+ # stdout, and stderr to /dev/null. This is done to prevent side-effects
+ # from reads and writes to the standard I/O file descriptors.
+
+ # This call to open is guaranteed to return the lowest file descriptor,
+ # which will be 0 (stdin), since it was closed above.
+# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
+
+ # Duplicate standard input to standard output and standard error.
+# os.dup2(0, 1) # standard output (1)
+# os.dup2(0, 2) # standard error (2)
+
+
+ si = file('/dev/null', 'r')
+ so = file(logfile, 'w')
+ se = so
+
+
+ # Replace those fds with our own
+ os.dup2(si.fileno(), sys.stdin.fileno())
+ os.dup2(so.fileno(), sys.stdout.fileno())
+ os.dup2(se.fileno(), sys.stderr.fileno())
+
+ function()
+
+ os._exit(0)
+
diff --git a/bitbake-dev/lib/bb/data.py b/bitbake-dev/lib/bb/data.py
new file mode 100644
index 0000000000..54b2615afb
--- /dev/null
+++ b/bitbake-dev/lib/bb/data.py
@@ -0,0 +1,570 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Data' implementations
+
+Functions for interacting with the data structure used by the
+BitBake build tools.
+
+The expandData and update_data are the most expensive
+operations. At night the cookie monster came by and
+suggested 'give me cookies on setting the variables and
+things will work out'. Taking this suggestion into account
+applying the skills from the not yet passed 'Entwurf und
+Analyse von Algorithmen' lecture and the cookie
+monster seems to be right. We will track setVar more carefully
+to have faster update_data and expandKeys operations.
+
+This is a treade-off between speed and memory again but
+the speed is more critical here.
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2005 Holger Hans Peter Freyther
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import sys, os, re, time, types
+if sys.argv[0][-5:] == "pydoc":
+ path = os.path.dirname(os.path.dirname(sys.argv[1]))
+else:
+ path = os.path.dirname(os.path.dirname(sys.argv[0]))
+sys.path.insert(0,path)
+
+from bb import data_smart
+import bb
+
+_dict_type = data_smart.DataSmart
+
+def init():
+ return _dict_type()
+
+def init_db(parent = None):
+ if parent:
+ return parent.createCopy()
+ else:
+ return _dict_type()
+
+def createCopy(source):
+ """Link the source set to the destination
+ If one does not find the value in the destination set,
+ search will go on to the source set to get the value.
+ Value from source are copy-on-write. i.e. any try to
+ modify one of them will end up putting the modified value
+ in the destination set.
+ """
+ return source.createCopy()
+
+def initVar(var, d):
+ """Non-destructive var init for data structure"""
+ d.initVar(var)
+
+
+def setVar(var, value, d):
+ """Set a variable to a given value
+
+ Example:
+ >>> d = init()
+ >>> setVar('TEST', 'testcontents', d)
+ >>> print getVar('TEST', d)
+ testcontents
+ """
+ d.setVar(var,value)
+
+
+def getVar(var, d, exp = 0):
+ """Gets the value of a variable
+
+ Example:
+ >>> d = init()
+ >>> setVar('TEST', 'testcontents', d)
+ >>> print getVar('TEST', d)
+ testcontents
+ """
+ return d.getVar(var,exp)
+
+
+def renameVar(key, newkey, d):
+ """Renames a variable from key to newkey
+
+ Example:
+ >>> d = init()
+ >>> setVar('TEST', 'testcontents', d)
+ >>> renameVar('TEST', 'TEST2', d)
+ >>> print getVar('TEST2', d)
+ testcontents
+ """
+ d.renameVar(key, newkey)
+
+def delVar(var, d):
+ """Removes a variable from the data set
+
+ Example:
+ >>> d = init()
+ >>> setVar('TEST', 'testcontents', d)
+ >>> print getVar('TEST', d)
+ testcontents
+ >>> delVar('TEST', d)
+ >>> print getVar('TEST', d)
+ None
+ """
+ d.delVar(var)
+
+def setVarFlag(var, flag, flagvalue, d):
+ """Set a flag for a given variable to a given value
+
+ Example:
+ >>> d = init()
+ >>> setVarFlag('TEST', 'python', 1, d)
+ >>> print getVarFlag('TEST', 'python', d)
+ 1
+ """
+ d.setVarFlag(var,flag,flagvalue)
+
+def getVarFlag(var, flag, d):
+ """Gets given flag from given var
+
+ Example:
+ >>> d = init()
+ >>> setVarFlag('TEST', 'python', 1, d)
+ >>> print getVarFlag('TEST', 'python', d)
+ 1
+ """
+ return d.getVarFlag(var,flag)
+
+def delVarFlag(var, flag, d):
+ """Removes a given flag from the variable's flags
+
+ Example:
+ >>> d = init()
+ >>> setVarFlag('TEST', 'testflag', 1, d)
+ >>> print getVarFlag('TEST', 'testflag', d)
+ 1
+ >>> delVarFlag('TEST', 'testflag', d)
+ >>> print getVarFlag('TEST', 'testflag', d)
+ None
+
+ """
+ d.delVarFlag(var,flag)
+
+def setVarFlags(var, flags, d):
+ """Set the flags for a given variable
+
+ Note:
+ setVarFlags will not clear previous
+ flags. Think of this method as
+ addVarFlags
+
+ Example:
+ >>> d = init()
+ >>> myflags = {}
+ >>> myflags['test'] = 'blah'
+ >>> setVarFlags('TEST', myflags, d)
+ >>> print getVarFlag('TEST', 'test', d)
+ blah
+ """
+ d.setVarFlags(var,flags)
+
+def getVarFlags(var, d):
+ """Gets a variable's flags
+
+ Example:
+ >>> d = init()
+ >>> setVarFlag('TEST', 'test', 'blah', d)
+ >>> print getVarFlags('TEST', d)['test']
+ blah
+ """
+ return d.getVarFlags(var)
+
+def delVarFlags(var, d):
+ """Removes a variable's flags
+
+ Example:
+ >>> data = init()
+ >>> setVarFlag('TEST', 'testflag', 1, data)
+ >>> print getVarFlag('TEST', 'testflag', data)
+ 1
+ >>> delVarFlags('TEST', data)
+ >>> print getVarFlags('TEST', data)
+ None
+
+ """
+ d.delVarFlags(var)
+
+def keys(d):
+ """Return a list of keys in d
+
+ Example:
+ >>> d = init()
+ >>> setVar('TEST', 1, d)
+ >>> setVar('MOO' , 2, d)
+ >>> setVarFlag('TEST', 'test', 1, d)
+ >>> keys(d)
+ ['TEST', 'MOO']
+ """
+ return d.keys()
+
+def getData(d):
+ """Returns the data object used"""
+ return d
+
+def setData(newData, d):
+ """Sets the data object to the supplied value"""
+ d = newData
+
+
+##
+## Cookie Monsters' query functions
+##
+def _get_override_vars(d, override):
+ """
+ Internal!!!
+
+ Get the Names of Variables that have a specific
+ override. This function returns a iterable
+ Set or an empty list
+ """
+ return []
+
+def _get_var_flags_triple(d):
+ """
+ Internal!!!
+
+ """
+ return []
+
+__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
+__expand_python_regexp__ = re.compile(r"\${@.+?}")
+
+def expand(s, d, varname = None):
+ """Variable expansion using the data store.
+
+ Example:
+ Standard expansion:
+ >>> d = init()
+ >>> setVar('A', 'sshd', d)
+ >>> print expand('/usr/bin/${A}', d)
+ /usr/bin/sshd
+
+ Python expansion:
+ >>> d = init()
+ >>> print expand('result: ${@37 * 72}', d)
+ result: 2664
+
+ Shell expansion:
+ >>> d = init()
+ >>> print expand('${TARGET_MOO}', d)
+ ${TARGET_MOO}
+ >>> setVar('TARGET_MOO', 'yupp', d)
+ >>> print expand('${TARGET_MOO}',d)
+ yupp
+ >>> setVar('SRC_URI', 'http://somebug.${TARGET_MOO}', d)
+ >>> delVar('TARGET_MOO', d)
+ >>> print expand('${SRC_URI}', d)
+ http://somebug.${TARGET_MOO}
+ """
+ return d.expand(s, varname)
+
+def expandKeys(alterdata, readdata = None):
+ if readdata == None:
+ readdata = alterdata
+
+ todolist = {}
+ for key in keys(alterdata):
+ if not '${' in key:
+ continue
+
+ ekey = expand(key, readdata)
+ if key == ekey:
+ continue
+ todolist[key] = ekey
+
+ # These two for loops are split for performance to maximise the
+ # usefulness of the expand cache
+
+ for key in todolist:
+ ekey = todolist[key]
+ renameVar(key, ekey, alterdata)
+
+def expandData(alterdata, readdata = None):
+ """For each variable in alterdata, expand it, and update the var contents.
+ Replacements use data from readdata.
+
+ Example:
+ >>> a=init()
+ >>> b=init()
+ >>> setVar("dlmsg", "dl_dir is ${DL_DIR}", a)
+ >>> setVar("DL_DIR", "/path/to/whatever", b)
+ >>> expandData(a, b)
+ >>> print getVar("dlmsg", a)
+ dl_dir is /path/to/whatever
+ """
+ if readdata == None:
+ readdata = alterdata
+
+ for key in keys(alterdata):
+ val = getVar(key, alterdata)
+ if type(val) is not types.StringType:
+ continue
+ expanded = expand(val, readdata)
+# print "key is %s, val is %s, expanded is %s" % (key, val, expanded)
+ if val != expanded:
+ setVar(key, expanded, alterdata)
+
+import os
+
+def inheritFromOS(d):
+ """Inherit variables from the environment."""
+# fakeroot needs to be able to set these
+ non_inherit_vars = [ "LD_LIBRARY_PATH", "LD_PRELOAD" ]
+ for s in os.environ.keys():
+ if not s in non_inherit_vars:
+ try:
+ setVar(s, os.environ[s], d)
+ setVarFlag(s, 'matchesenv', '1', d)
+ except TypeError:
+ pass
+
+import sys
+
+def emit_var(var, o=sys.__stdout__, d = init(), all=False):
+ """Emit a variable to be sourced by a shell."""
+ if getVarFlag(var, "python", d):
+ return 0
+
+ export = getVarFlag(var, "export", d)
+ unexport = getVarFlag(var, "unexport", d)
+ func = getVarFlag(var, "func", d)
+ if not all and not export and not unexport and not func:
+ return 0
+
+ try:
+ if all:
+ oval = getVar(var, d, 0)
+ val = getVar(var, d, 1)
+ except KeyboardInterrupt:
+ raise
+ except:
+ excname = str(sys.exc_info()[0])
+ if excname == "bb.build.FuncFailed":
+ raise
+ o.write('# expansion of %s threw %s\n' % (var, excname))
+ return 0
+
+ if all:
+ o.write('# %s=%s\n' % (var, oval))
+
+ if type(val) is not types.StringType:
+ return 0
+
+ if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all:
+ return 0
+
+ varExpanded = expand(var, d)
+
+ if unexport:
+ o.write('unset %s\n' % varExpanded)
+ return 1
+
+ if getVarFlag(var, 'matchesenv', d):
+ return 0
+
+ val.rstrip()
+ if not val:
+ return 0
+
+ if func:
+ # NOTE: should probably check for unbalanced {} within the var
+ o.write("%s() {\n%s\n}\n" % (varExpanded, val))
+ return 1
+
+ if export:
+ o.write('export ')
+
+ # if we're going to output this within doublequotes,
+ # to a shell, we need to escape the quotes in the var
+ alter = re.sub('"', '\\"', val.strip())
+ o.write('%s="%s"\n' % (varExpanded, alter))
+ return 1
+
+
+def emit_env(o=sys.__stdout__, d = init(), all=False):
+ """Emits all items in the data store in a format such that it can be sourced by a shell."""
+
+ env = keys(d)
+
+ for e in env:
+ if getVarFlag(e, "func", d):
+ continue
+ emit_var(e, o, d, all) and o.write('\n')
+
+ for e in env:
+ if not getVarFlag(e, "func", d):
+ continue
+ emit_var(e, o, d) and o.write('\n')
+
+def update_data(d):
+ """Modifies the environment vars according to local overrides and commands.
+ Examples:
+ Appending to a variable:
+ >>> d = init()
+ >>> setVar('TEST', 'this is a', d)
+ >>> setVar('TEST_append', ' test', d)
+ >>> setVar('TEST_append', ' of the emergency broadcast system.', d)
+ >>> update_data(d)
+ >>> print getVar('TEST', d)
+ this is a test of the emergency broadcast system.
+
+ Prepending to a variable:
+ >>> setVar('TEST', 'virtual/libc', d)
+ >>> setVar('TEST_prepend', 'virtual/tmake ', d)
+ >>> setVar('TEST_prepend', 'virtual/patcher ', d)
+ >>> update_data(d)
+ >>> print getVar('TEST', d)
+ virtual/patcher virtual/tmake virtual/libc
+
+ Overrides:
+ >>> setVar('TEST_arm', 'target', d)
+ >>> setVar('TEST_ramses', 'machine', d)
+ >>> setVar('TEST_local', 'local', d)
+ >>> setVar('OVERRIDES', 'arm', d)
+
+ >>> setVar('TEST', 'original', d)
+ >>> update_data(d)
+ >>> print getVar('TEST', d)
+ target
+
+ >>> setVar('OVERRIDES', 'arm:ramses:local', d)
+ >>> setVar('TEST', 'original', d)
+ >>> update_data(d)
+ >>> print getVar('TEST', d)
+ local
+
+ CopyMonster:
+ >>> e = d.createCopy()
+ >>> setVar('TEST_foo', 'foo', e)
+ >>> update_data(e)
+ >>> print getVar('TEST', e)
+ local
+
+ >>> setVar('OVERRIDES', 'arm:ramses:local:foo', e)
+ >>> update_data(e)
+ >>> print getVar('TEST', e)
+ foo
+
+ >>> f = d.createCopy()
+ >>> setVar('TEST_moo', 'something', f)
+ >>> setVar('OVERRIDES', 'moo:arm:ramses:local:foo', e)
+ >>> update_data(e)
+ >>> print getVar('TEST', e)
+ foo
+
+
+ >>> h = init()
+ >>> setVar('SRC_URI', 'file://append.foo;patch=1 ', h)
+ >>> g = h.createCopy()
+ >>> setVar('SRC_URI_append_arm', 'file://other.foo;patch=1', g)
+ >>> setVar('OVERRIDES', 'arm:moo', g)
+ >>> update_data(g)
+ >>> print getVar('SRC_URI', g)
+ file://append.foo;patch=1 file://other.foo;patch=1
+
+ """
+ bb.msg.debug(2, bb.msg.domain.Data, "update_data()")
+
+ # now ask the cookie monster for help
+ #print "Cookie Monster"
+ #print "Append/Prepend %s" % d._special_values
+ #print "Overrides %s" % d._seen_overrides
+
+ overrides = (getVar('OVERRIDES', d, 1) or "").split(':') or []
+
+ #
+ # Well let us see what breaks here. We used to iterate
+ # over each variable and apply the override and then
+ # do the line expanding.
+ # If we have bad luck - which we will have - the keys
+ # where in some order that is so important for this
+ # method which we don't have anymore.
+ # Anyway we will fix that and write test cases this
+ # time.
+
+ #
+ # First we apply all overrides
+ # Then we will handle _append and _prepend
+ #
+
+ for o in overrides:
+ # calculate '_'+override
+ l = len(o)+1
+
+ # see if one should even try
+ if not d._seen_overrides.has_key(o):
+ continue
+
+ vars = d._seen_overrides[o]
+ for var in vars:
+ name = var[:-l]
+ try:
+ d[name] = d[var]
+ except:
+ bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar")
+
+ # now on to the appends and prepends
+ if d._special_values.has_key('_append'):
+ appends = d._special_values['_append'] or []
+ for append in appends:
+ for (a, o) in getVarFlag(append, '_append', d) or []:
+ # maybe the OVERRIDE was not yet added so keep the append
+ if (o and o in overrides) or not o:
+ delVarFlag(append, '_append', d)
+ if o and not o in overrides:
+ continue
+
+ sval = getVar(append,d) or ""
+ sval+=a
+ setVar(append, sval, d)
+
+
+ if d._special_values.has_key('_prepend'):
+ prepends = d._special_values['_prepend'] or []
+
+ for prepend in prepends:
+ for (a, o) in getVarFlag(prepend, '_prepend', d) or []:
+ # maybe the OVERRIDE was not yet added so keep the prepend
+ if (o and o in overrides) or not o:
+ delVarFlag(prepend, '_prepend', d)
+ if o and not o in overrides:
+ continue
+
+ sval = a + (getVar(prepend,d) or "")
+ setVar(prepend, sval, d)
+
+
+def inherits_class(klass, d):
+ val = getVar('__inherit_cache', d) or []
+ if os.path.join('classes', '%s.bbclass' % klass) in val:
+ return True
+ return False
+
+def _test():
+ """Start a doctest run on this module"""
+ import doctest
+ from bb import data
+ doctest.testmod(data)
+
+if __name__ == "__main__":
+ _test()
diff --git a/bitbake-dev/lib/bb/data_smart.py b/bitbake-dev/lib/bb/data_smart.py
new file mode 100644
index 0000000000..b3a51b0edf
--- /dev/null
+++ b/bitbake-dev/lib/bb/data_smart.py
@@ -0,0 +1,292 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake Smart Dictionary Implementation
+
+Functions for interacting with the data structure used by the
+BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2004, 2005 Seb Frankengul
+# Copyright (C) 2005, 2006 Holger Hans Peter Freyther
+# Copyright (C) 2005 Uli Luckas
+# Copyright (C) 2005 ROAD GmbH
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import copy, os, re, sys, time, types
+import bb
+from bb import utils, methodpool
+from COW import COWDictBase
+from sets import Set
+from new import classobj
+
+
+__setvar_keyword__ = ["_append","_prepend"]
+__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?')
+__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
+__expand_python_regexp__ = re.compile(r"\${@.+?}")
+
+
+class DataSmart:
+ def __init__(self, special = COWDictBase.copy(), seen = COWDictBase.copy() ):
+ self.dict = {}
+
+ # cookie monster tribute
+ self._special_values = special
+ self._seen_overrides = seen
+
+ self.expand_cache = {}
+
+ def expand(self,s, varname):
+ def var_sub(match):
+ key = match.group()[2:-1]
+ if varname and key:
+ if varname == key:
+ raise Exception("variable %s references itself!" % varname)
+ var = self.getVar(key, 1)
+ if var is not None:
+ return var
+ else:
+ return match.group()
+
+ def python_sub(match):
+ import bb
+ code = match.group()[3:-1]
+ locals()['d'] = self
+ s = eval(code)
+ if type(s) == types.IntType: s = str(s)
+ return s
+
+ if type(s) is not types.StringType: # sanity check
+ return s
+
+ if varname and varname in self.expand_cache:
+ return self.expand_cache[varname]
+
+ while s.find('${') != -1:
+ olds = s
+ try:
+ s = __expand_var_regexp__.sub(var_sub, s)
+ s = __expand_python_regexp__.sub(python_sub, s)
+ if s == olds: break
+ if type(s) is not types.StringType: # sanity check
+ bb.msg.error(bb.msg.domain.Data, 'expansion of %s returned non-string %s' % (olds, s))
+ except KeyboardInterrupt:
+ raise
+ except:
+ bb.msg.note(1, bb.msg.domain.Data, "%s:%s while evaluating:\n%s" % (sys.exc_info()[0], sys.exc_info()[1], s))
+ raise
+
+ if varname:
+ self.expand_cache[varname] = s
+
+ return s
+
+ def initVar(self, var):
+ self.expand_cache = {}
+ if not var in self.dict:
+ self.dict[var] = {}
+
+ def _findVar(self,var):
+ _dest = self.dict
+
+ while (_dest and var not in _dest):
+ if not "_data" in _dest:
+ _dest = None
+ break
+ _dest = _dest["_data"]
+
+ if _dest and var in _dest:
+ return _dest[var]
+ return None
+
+ def _makeShadowCopy(self, var):
+ if var in self.dict:
+ return
+
+ local_var = self._findVar(var)
+
+ if local_var:
+ self.dict[var] = copy.copy(local_var)
+ else:
+ self.initVar(var)
+
+ def setVar(self,var,value):
+ self.expand_cache = {}
+ match = __setvar_regexp__.match(var)
+ if match and match.group("keyword") in __setvar_keyword__:
+ base = match.group('base')
+ keyword = match.group("keyword")
+ override = match.group('add')
+ l = self.getVarFlag(base, keyword) or []
+ l.append([value, override])
+ self.setVarFlag(base, keyword, l)
+
+ # todo make sure keyword is not __doc__ or __module__
+ # pay the cookie monster
+ try:
+ self._special_values[keyword].add( base )
+ except:
+ self._special_values[keyword] = Set()
+ self._special_values[keyword].add( base )
+
+ return
+
+ if not var in self.dict:
+ self._makeShadowCopy(var)
+ if self.getVarFlag(var, 'matchesenv'):
+ self.delVarFlag(var, 'matchesenv')
+ self.setVarFlag(var, 'export', 1)
+
+ # more cookies for the cookie monster
+ if '_' in var:
+ override = var[var.rfind('_')+1:]
+ if not self._seen_overrides.has_key(override):
+ self._seen_overrides[override] = Set()
+ self._seen_overrides[override].add( var )
+
+ # setting var
+ self.dict[var]["content"] = value
+
+ def getVar(self,var,exp):
+ value = self.getVarFlag(var,"content")
+
+ if exp and value:
+ return self.expand(value,var)
+ return value
+
+ def renameVar(self, key, newkey):
+ """
+ Rename the variable key to newkey
+ """
+ val = self.getVar(key, 0)
+ if val is None:
+ return
+
+ self.setVar(newkey, val)
+
+ for i in ('_append', '_prepend'):
+ dest = self.getVarFlag(newkey, i) or []
+ src = self.getVarFlag(key, i) or []
+ dest.extend(src)
+ self.setVarFlag(newkey, i, dest)
+
+ if self._special_values.has_key(i) and key in self._special_values[i]:
+ self._special_values[i].remove(key)
+ self._special_values[i].add(newkey)
+
+ self.delVar(key)
+
+ def delVar(self,var):
+ self.expand_cache = {}
+ self.dict[var] = {}
+
+ def setVarFlag(self,var,flag,flagvalue):
+ if not var in self.dict:
+ self._makeShadowCopy(var)
+ self.dict[var][flag] = flagvalue
+
+ def getVarFlag(self,var,flag):
+ local_var = self._findVar(var)
+ if local_var:
+ if flag in local_var:
+ return copy.copy(local_var[flag])
+ return None
+
+ def delVarFlag(self,var,flag):
+ local_var = self._findVar(var)
+ if not local_var:
+ return
+ if not var in self.dict:
+ self._makeShadowCopy(var)
+
+ if var in self.dict and flag in self.dict[var]:
+ del self.dict[var][flag]
+
+ def setVarFlags(self,var,flags):
+ if not var in self.dict:
+ self._makeShadowCopy(var)
+
+ for i in flags.keys():
+ if i == "content":
+ continue
+ self.dict[var][i] = flags[i]
+
+ def getVarFlags(self,var):
+ local_var = self._findVar(var)
+ flags = {}
+
+ if local_var:
+ for i in local_var.keys():
+ if i == "content":
+ continue
+ flags[i] = local_var[i]
+
+ if len(flags) == 0:
+ return None
+ return flags
+
+
+ def delVarFlags(self,var):
+ if not var in self.dict:
+ self._makeShadowCopy(var)
+
+ if var in self.dict:
+ content = None
+
+ # try to save the content
+ if "content" in self.dict[var]:
+ content = self.dict[var]["content"]
+ self.dict[var] = {}
+ self.dict[var]["content"] = content
+ else:
+ del self.dict[var]
+
+
+ def createCopy(self):
+ """
+ Create a copy of self by setting _data to self
+ """
+ # we really want this to be a DataSmart...
+ data = DataSmart(seen=self._seen_overrides.copy(), special=self._special_values.copy())
+ data.dict["_data"] = self.dict
+
+ return data
+
+ # Dictionary Methods
+ def keys(self):
+ def _keys(d, mykey):
+ if "_data" in d:
+ _keys(d["_data"],mykey)
+
+ for key in d.keys():
+ if key != "_data":
+ mykey[key] = None
+ keytab = {}
+ _keys(self.dict,keytab)
+ return keytab.keys()
+
+ def __getitem__(self,item):
+ #print "Warning deprecated"
+ return self.getVar(item, False)
+
+ def __setitem__(self,var,data):
+ #print "Warning deprecated"
+ self.setVar(var,data)
+
+
diff --git a/bitbake-dev/lib/bb/event.py b/bitbake-dev/lib/bb/event.py
new file mode 100644
index 0000000000..c13a0127a5
--- /dev/null
+++ b/bitbake-dev/lib/bb/event.py
@@ -0,0 +1,302 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Event' implementation
+
+Classes and functions for manipulating 'events' in the
+BitBake build tools.
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os, re
+import bb.utils
+
+class Event:
+ """Base class for events"""
+ type = "Event"
+
+ def __init__(self, d):
+ self._data = d
+
+ def getData(self):
+ return self._data
+
+ def setData(self, data):
+ self._data = data
+
+ data = property(getData, setData, None, "data property")
+
+NotHandled = 0
+Handled = 1
+
+Registered = 10
+AlreadyRegistered = 14
+
+# Internal
+_handlers = {}
+_ui_handlers = {}
+_ui_handler_seq = 0
+
+def fire(event):
+ """Fire off an Event"""
+
+ for handler in _handlers:
+ h = _handlers[handler]
+ if type(h).__name__ == "code":
+ exec(h)
+ tmpHandler(event)
+ else:
+ h(event)
+
+ # Remove the event data elements for UI handlers - too much data otherwise
+ # They can request data if they need it
+ event.data = None
+ event._data = None
+
+ errors = []
+ for h in _ui_handlers:
+ #print "Sending event %s" % event
+ classid = "%s.%s" % (event.__class__.__module__, event.__class__.__name__)
+ try:
+ _ui_handlers[h].event.send((classid, event))
+ except:
+ errors.append(h)
+ for h in errors:
+ del _ui_handlers[h]
+
+def register(name, handler):
+ """Register an Event handler"""
+
+ # already registered
+ if name in _handlers:
+ return AlreadyRegistered
+
+ if handler is not None:
+ # handle string containing python code
+ if type(handler).__name__ == "str":
+ tmp = "def tmpHandler(e):\n%s" % handler
+ comp = bb.utils.better_compile(tmp, "tmpHandler(e)", "bb.event._registerCode")
+ _handlers[name] = comp
+ else:
+ _handlers[name] = handler
+
+ return Registered
+
+def remove(name, handler):
+ """Remove an Event handler"""
+ _handlers.pop(name)
+
+def register_UIHhandler(handler):
+ bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
+ _ui_handlers[_ui_handler_seq] = handler
+ return _ui_handler_seq
+
+def unregister_UIHhandler(handlerNum):
+ if handlerNum in _ui_handlers:
+ del _ui_handlers[handlerNum]
+ return
+
+def getName(e):
+ """Returns the name of a class or class instance"""
+ if getattr(e, "__name__", None) == None:
+ return e.__class__.__name__
+ else:
+ return e.__name__
+
+class ConfigParsed(Event):
+ """Configuration Parsing Complete"""
+
+class StampUpdate(Event):
+ """Trigger for any adjustment of the stamp files to happen"""
+
+ def __init__(self, targets, stampfns, d):
+ self._targets = targets
+ self._stampfns = stampfns
+ Event.__init__(self, d)
+
+ def getStampPrefix(self):
+ return self._stampfns
+
+ def getTargets(self):
+ return self._targets
+
+ stampPrefix = property(getStampPrefix)
+ targets = property(getTargets)
+
+class PkgBase(Event):
+ """Base class for package events"""
+
+ def __init__(self, t, d):
+ self._pkg = t
+ Event.__init__(self, d)
+ self._message = "package %s: %s" % (bb.data.getVar("P", d, 1), getName(self)[3:])
+
+ def getPkg(self):
+ return self._pkg
+
+ def setPkg(self, pkg):
+ self._pkg = pkg
+
+ pkg = property(getPkg, setPkg, None, "pkg property")
+
+
+class BuildBase(Event):
+ """Base class for bbmake run events"""
+
+ def __init__(self, n, p, c, failures = 0):
+ self._name = n
+ self._pkgs = p
+ Event.__init__(self, c)
+ self._failures = failures
+
+ def getPkgs(self):
+ return self._pkgs
+
+ def setPkgs(self, pkgs):
+ self._pkgs = pkgs
+
+ def getName(self):
+ return self._name
+
+ def setName(self, name):
+ self._name = name
+
+ def getCfg(self):
+ return self.data
+
+ def setCfg(self, cfg):
+ self.data = cfg
+
+ def getFailures(self):
+ """
+ Return the number of failed packages
+ """
+ return self._failures
+
+ pkgs = property(getPkgs, setPkgs, None, "pkgs property")
+ name = property(getName, setName, None, "name property")
+ cfg = property(getCfg, setCfg, None, "cfg property")
+
+
+class DepBase(PkgBase):
+ """Base class for dependency events"""
+
+ def __init__(self, t, data, d):
+ self._dep = d
+ PkgBase.__init__(self, t, data)
+
+ def getDep(self):
+ return self._dep
+
+ def setDep(self, dep):
+ self._dep = dep
+
+ dep = property(getDep, setDep, None, "dep property")
+
+
+class PkgStarted(PkgBase):
+ """Package build started"""
+
+
+class PkgFailed(PkgBase):
+ """Package build failed"""
+
+
+class PkgSucceeded(PkgBase):
+ """Package build completed"""
+
+
+class BuildStarted(BuildBase):
+ """bbmake build run started"""
+
+
+class BuildCompleted(BuildBase):
+ """bbmake build run completed"""
+
+
+class UnsatisfiedDep(DepBase):
+ """Unsatisfied Dependency"""
+
+
+class RecursiveDep(DepBase):
+ """Recursive Dependency"""
+
+class NoProvider(Event):
+ """No Provider for an Event"""
+
+ def __init__(self, item, data, runtime=False):
+ Event.__init__(self, data)
+ self._item = item
+ self._runtime = runtime
+
+ def getItem(self):
+ return self._item
+
+ def isRuntime(self):
+ return self._runtime
+
+class MultipleProviders(Event):
+ """Multiple Providers"""
+
+ def __init__(self, item, candidates, data, runtime = False):
+ Event.__init__(self, data)
+ self._item = item
+ self._candidates = candidates
+ self._is_runtime = runtime
+
+ def isRuntime(self):
+ """
+ Is this a runtime issue?
+ """
+ return self._is_runtime
+
+ def getItem(self):
+ """
+ The name for the to be build item
+ """
+ return self._item
+
+ def getCandidates(self):
+ """
+ Get the possible Candidates for a PROVIDER.
+ """
+ return self._candidates
+
+class ParseProgress(Event):
+ """
+ Parsing Progress Event
+ """
+
+ def __init__(self, d, cached, parsed, skipped, masked, errors, total):
+ Event.__init__(self, d)
+ self.cached = cached
+ self.parsed = parsed
+ self.skipped = skipped
+ self.masked = masked
+ self.errors = errors
+ self.sofar = cached + parsed + skipped
+ self.total = total
+
+class DepTreeGenerated(Event):
+ """
+ Event when a dependency tree has been generated
+ """
+
+ def __init__(self, d, depgraph):
+ Event.__init__(self, d)
+ self._depgraph = depgraph
+
diff --git a/bitbake-dev/lib/bb/fetch/__init__.py b/bitbake-dev/lib/bb/fetch/__init__.py
new file mode 100644
index 0000000000..c3bea447c1
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/__init__.py
@@ -0,0 +1,556 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementations
+
+Classes for obtaining upstream sources for the
+BitBake build tools.
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os, re, fcntl
+import bb
+from bb import data
+from bb import persist_data
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+class FetchError(Exception):
+ """Exception raised when a download fails"""
+
+class NoMethodError(Exception):
+ """Exception raised when there is no method to obtain a supplied url or set of urls"""
+
+class MissingParameterError(Exception):
+ """Exception raised when a fetch method is missing a critical parameter in the url"""
+
+class ParameterError(Exception):
+ """Exception raised when a url cannot be proccessed due to invalid parameters."""
+
+class MD5SumError(Exception):
+ """Exception raised when a MD5SUM of a file does not match the expected one"""
+
+def uri_replace(uri, uri_find, uri_replace, d):
+# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: operating on %s" % uri)
+ if not uri or not uri_find or not uri_replace:
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "uri_replace: passed an undefined value, not replacing")
+ uri_decoded = list(bb.decodeurl(uri))
+ uri_find_decoded = list(bb.decodeurl(uri_find))
+ uri_replace_decoded = list(bb.decodeurl(uri_replace))
+ result_decoded = ['','','','','',{}]
+ for i in uri_find_decoded:
+ loc = uri_find_decoded.index(i)
+ result_decoded[loc] = uri_decoded[loc]
+ import types
+ if type(i) == types.StringType:
+ import re
+ if (re.match(i, uri_decoded[loc])):
+ result_decoded[loc] = re.sub(i, uri_replace_decoded[loc], uri_decoded[loc])
+ if uri_find_decoded.index(i) == 2:
+ if d:
+ localfn = bb.fetch.localpath(uri, d)
+ if localfn:
+ result_decoded[loc] = os.path.dirname(result_decoded[loc]) + "/" + os.path.basename(bb.fetch.localpath(uri, d))
+# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: matching %s against %s and replacing with %s" % (i, uri_decoded[loc], uri_replace_decoded[loc]))
+ else:
+# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: no match")
+ return uri
+# else:
+# for j in i.keys():
+# FIXME: apply replacements against options
+ return bb.encodeurl(result_decoded)
+
+methods = []
+urldata_cache = {}
+
+def fetcher_init(d):
+ """
+ Called to initilize the fetchers once the configuration data is known
+ Calls before this must not hit the cache.
+ """
+ pd = persist_data.PersistData(d)
+ # When to drop SCM head revisions controled by user policy
+ srcrev_policy = bb.data.getVar('BB_SRCREV_POLICY', d, 1) or "clear"
+ if srcrev_policy == "cache":
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Keeping SRCREV cache due to cache policy of: %s" % srcrev_policy)
+ elif srcrev_policy == "clear":
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Clearing SRCREV cache due to cache policy of: %s" % srcrev_policy)
+ pd.delDomain("BB_URI_HEADREVS")
+ else:
+ bb.msg.fatal(bb.msg.domain.Fetcher, "Invalid SRCREV cache policy of: %s" % srcrev_policy)
+ # Make sure our domains exist
+ pd.addDomain("BB_URI_HEADREVS")
+ pd.addDomain("BB_URI_LOCALCOUNT")
+
+# Function call order is usually:
+# 1. init
+# 2. go
+# 3. localpaths
+# localpath can be called at any time
+
+def init(urls, d, setup = True):
+ urldata = {}
+ fn = bb.data.getVar('FILE', d, 1)
+ if fn in urldata_cache:
+ urldata = urldata_cache[fn]
+
+ for url in urls:
+ if url not in urldata:
+ urldata[url] = FetchData(url, d)
+
+ if setup:
+ for url in urldata:
+ if not urldata[url].setup:
+ urldata[url].setup_localpath(d)
+
+ urldata_cache[fn] = urldata
+ return urldata
+
+def go(d):
+ """
+ Fetch all urls
+ init must have previously been called
+ """
+ urldata = init([], d, True)
+
+ for u in urldata:
+ ud = urldata[u]
+ m = ud.method
+ if ud.localfile:
+ if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
+ # File already present along with md5 stamp file
+ # Touch md5 file to show activity
+ try:
+ os.utime(ud.md5, None)
+ except:
+ # Errors aren't fatal here
+ pass
+ continue
+ lf = bb.utils.lockfile(ud.lockfile)
+ if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
+ # If someone else fetched this before we got the lock,
+ # notice and don't try again
+ try:
+ os.utime(ud.md5, None)
+ except:
+ # Errors aren't fatal here
+ pass
+ bb.utils.unlockfile(lf)
+ continue
+ m.go(u, ud, d)
+ if ud.localfile:
+ if not m.forcefetch(u, ud, d):
+ Fetch.write_md5sum(u, ud, d)
+ bb.utils.unlockfile(lf)
+
+
+def checkstatus(d):
+ """
+ Check all urls exist upstream
+ init must have previously been called
+ """
+ urldata = init([], d, True)
+
+ for u in urldata:
+ ud = urldata[u]
+ m = ud.method
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Testing URL %s" % u)
+ ret = m.checkstatus(u, ud, d)
+ if not ret:
+ bb.msg.fatal(bb.msg.domain.Fetcher, "URL %s doesn't work" % u)
+
+def localpaths(d):
+ """
+ Return a list of the local filenames, assuming successful fetch
+ """
+ local = []
+ urldata = init([], d, True)
+
+ for u in urldata:
+ ud = urldata[u]
+ local.append(ud.localpath)
+
+ return local
+
+srcrev_internal_call = False
+
+def get_srcrev(d):
+ """
+ Return the version string for the current package
+ (usually to be used as PV)
+ Most packages usually only have one SCM so we just pass on the call.
+ In the multi SCM case, we build a value based on SRCREV_FORMAT which must
+ have been set.
+ """
+
+ #
+ # Ugly code alert. localpath in the fetchers will try to evaluate SRCREV which
+ # could translate into a call to here. If it does, we need to catch this
+ # and provide some way so it knows get_srcrev is active instead of being
+ # some number etc. hence the srcrev_internal_call tracking and the magic
+ # "SRCREVINACTION" return value.
+ #
+ # Neater solutions welcome!
+ #
+ if bb.fetch.srcrev_internal_call:
+ return "SRCREVINACTION"
+
+ scms = []
+
+ # Only call setup_localpath on URIs which suppports_srcrev()
+ urldata = init(bb.data.getVar('SRC_URI', d, 1).split(), d, False)
+ for u in urldata:
+ ud = urldata[u]
+ if ud.method.suppports_srcrev():
+ if not ud.setup:
+ ud.setup_localpath(d)
+ scms.append(u)
+
+ if len(scms) == 0:
+ bb.msg.error(bb.msg.domain.Fetcher, "SRCREV was used yet no valid SCM was found in SRC_URI")
+ raise ParameterError
+
+ if len(scms) == 1:
+ return urldata[scms[0]].method.sortable_revision(scms[0], urldata[scms[0]], d)
+
+ #
+ # Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT
+ #
+ format = bb.data.getVar('SRCREV_FORMAT', d, 1)
+ if not format:
+ bb.msg.error(bb.msg.domain.Fetcher, "The SRCREV_FORMAT variable must be set when multiple SCMs are used.")
+ raise ParameterError
+
+ for scm in scms:
+ if 'name' in urldata[scm].parm:
+ name = urldata[scm].parm["name"]
+ rev = urldata[scm].method.sortable_revision(scm, urldata[scm], d)
+ format = format.replace(name, rev)
+
+ return format
+
+def localpath(url, d, cache = True):
+ """
+ Called from the parser with cache=False since the cache isn't ready
+ at this point. Also called from classed in OE e.g. patch.bbclass
+ """
+ ud = init([url], d)
+ if ud[url].method:
+ return ud[url].localpath
+ return url
+
+def runfetchcmd(cmd, d, quiet = False):
+ """
+ Run cmd returning the command output
+ Raise an error if interrupted or cmd fails
+ Optionally echo command output to stdout
+ """
+
+ # Need to export PATH as binary could be in metadata paths
+ # rather than host provided
+ # Also include some other variables.
+ # FIXME: Should really include all export varaiables?
+ exportvars = ['PATH', 'GIT_PROXY_HOST', 'GIT_PROXY_PORT', 'GIT_PROXY_COMMAND']
+
+ for var in exportvars:
+ val = data.getVar(var, d, True)
+ if val:
+ cmd = 'export ' + var + '=%s; %s' % (val, cmd)
+
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cmd)
+
+ # redirect stderr to stdout
+ stdout_handle = os.popen(cmd + " 2>&1", "r")
+ output = ""
+
+ while 1:
+ line = stdout_handle.readline()
+ if not line:
+ break
+ if not quiet:
+ print line,
+ output += line
+
+ status = stdout_handle.close() or 0
+ signal = status >> 8
+ exitstatus = status & 0xff
+
+ if signal:
+ raise FetchError("Fetch command %s failed with signal %s, output:\n%s" % (cmd, signal, output))
+ elif status != 0:
+ raise FetchError("Fetch command %s failed with exit code %s, output:\n%s" % (cmd, status, output))
+
+ return output
+
+class FetchData(object):
+ """
+ A class which represents the fetcher state for a given URI.
+ """
+ def __init__(self, url, d):
+ self.localfile = ""
+ (self.type, self.host, self.path, self.user, self.pswd, self.parm) = bb.decodeurl(data.expand(url, d))
+ self.date = Fetch.getSRCDate(self, d)
+ self.url = url
+ self.setup = False
+ for m in methods:
+ if m.supports(url, self, d):
+ self.method = m
+ return
+ raise NoMethodError("Missing implementation for url %s" % url)
+
+ def setup_localpath(self, d):
+ self.setup = True
+ if "localpath" in self.parm:
+ # if user sets localpath for file, use it instead.
+ self.localpath = self.parm["localpath"]
+ else:
+ bb.fetch.srcrev_internal_call = True
+ self.localpath = self.method.localpath(self.url, self, d)
+ bb.fetch.srcrev_internal_call = False
+ # We have to clear data's internal caches since the cached value of SRCREV is now wrong.
+ # Horrible...
+ bb.data.delVar("ISHOULDNEVEREXIST", d)
+ self.md5 = self.localpath + '.md5'
+ self.lockfile = self.localpath + '.lock'
+
+
+class Fetch(object):
+ """Base class for 'fetch'ing data"""
+
+ def __init__(self, urls = []):
+ self.urls = []
+
+ def supports(self, url, urldata, d):
+ """
+ Check to see if this fetch class supports a given url.
+ """
+ return 0
+
+ def localpath(self, url, urldata, d):
+ """
+ Return the local filename of a given url assuming a successful fetch.
+ Can also setup variables in urldata for use in go (saving code duplication
+ and duplicate code execution)
+ """
+ return url
+
+ def setUrls(self, urls):
+ self.__urls = urls
+
+ def getUrls(self):
+ return self.__urls
+
+ urls = property(getUrls, setUrls, None, "Urls property")
+
+ def forcefetch(self, url, urldata, d):
+ """
+ Force a fetch, even if localpath exists?
+ """
+ return False
+
+ def suppports_srcrev(self):
+ """
+ The fetcher supports auto source revisions (SRCREV)
+ """
+ return False
+
+ def go(self, url, urldata, d):
+ """
+ Fetch urls
+ Assumes localpath was called first
+ """
+ raise NoMethodError("Missing implementation for url")
+
+ def checkstatus(self, url, urldata, d):
+ """
+ Check the status of a URL
+ Assumes localpath was called first
+ """
+ bb.msg.note(1, bb.msg.domain.Fetcher, "URL %s could not be checked for status since no method exists." % url)
+ return True
+
+ def getSRCDate(urldata, d):
+ """
+ Return the SRC Date for the component
+
+ d the bb.data module
+ """
+ if "srcdate" in urldata.parm:
+ return urldata.parm['srcdate']
+
+ pn = data.getVar("PN", d, 1)
+
+ if pn:
+ return data.getVar("SRCDATE_%s" % pn, d, 1) or data.getVar("CVSDATE_%s" % pn, d, 1) or data.getVar("SRCDATE", d, 1) or data.getVar("CVSDATE", d, 1) or data.getVar("DATE", d, 1)
+
+ return data.getVar("SRCDATE", d, 1) or data.getVar("CVSDATE", d, 1) or data.getVar("DATE", d, 1)
+ getSRCDate = staticmethod(getSRCDate)
+
+ def srcrev_internal_helper(ud, d):
+ """
+ Return:
+ a) a source revision if specified
+ b) True if auto srcrev is in action
+ c) False otherwise
+ """
+
+ if 'rev' in ud.parm:
+ return ud.parm['rev']
+
+ if 'tag' in ud.parm:
+ return ud.parm['tag']
+
+ rev = None
+ if 'name' in ud.parm:
+ pn = data.getVar("PN", d, 1)
+ rev = data.getVar("SRCREV_pn-" + pn + "_" + ud.parm['name'], d, 1)
+ if not rev:
+ rev = data.getVar("SRCREV", d, 1)
+ if not rev:
+ return False
+ if rev is "SRCREVINACTION":
+ return True
+ return rev
+
+ srcrev_internal_helper = staticmethod(srcrev_internal_helper)
+
+ def try_mirror(d, tarfn):
+ """
+ Try to use a mirrored version of the sources. We do this
+ to avoid massive loads on foreign cvs and svn servers.
+ This method will be used by the different fetcher
+ implementations.
+
+ d Is a bb.data instance
+ tarfn is the name of the tarball
+ """
+ tarpath = os.path.join(data.getVar("DL_DIR", d, 1), tarfn)
+ if os.access(tarpath, os.R_OK):
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists, skipping checkout." % tarfn)
+ return True
+
+ pn = data.getVar('PN', d, True)
+ src_tarball_stash = None
+ if pn:
+ src_tarball_stash = (data.getVar('SRC_TARBALL_STASH_%s' % pn, d, True) or data.getVar('CVS_TARBALL_STASH_%s' % pn, d, True) or data.getVar('SRC_TARBALL_STASH', d, True) or data.getVar('CVS_TARBALL_STASH', d, True) or "").split()
+
+ for stash in src_tarball_stash:
+ fetchcmd = data.getVar("FETCHCOMMAND_mirror", d, True) or data.getVar("FETCHCOMMAND_wget", d, True)
+ uri = stash + tarfn
+ bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri)
+ fetchcmd = fetchcmd.replace("${URI}", uri)
+ ret = os.system(fetchcmd)
+ if ret == 0:
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Fetched %s from tarball stash, skipping checkout" % tarfn)
+ return True
+ return False
+ try_mirror = staticmethod(try_mirror)
+
+ def verify_md5sum(ud, got_sum):
+ """
+ Verify the md5sum we wanted with the one we got
+ """
+ wanted_sum = None
+ if 'md5sum' in ud.parm:
+ wanted_sum = ud.parm['md5sum']
+ if not wanted_sum:
+ return True
+
+ return wanted_sum == got_sum
+ verify_md5sum = staticmethod(verify_md5sum)
+
+ def write_md5sum(url, ud, d):
+ md5data = bb.utils.md5_file(ud.localpath)
+ # verify the md5sum
+ if not Fetch.verify_md5sum(ud, md5data):
+ raise MD5SumError(url)
+
+ md5out = file(ud.md5, 'w')
+ md5out.write(md5data)
+ md5out.close()
+ write_md5sum = staticmethod(write_md5sum)
+
+ def latest_revision(self, url, ud, d):
+ """
+ Look in the cache for the latest revision, if not present ask the SCM.
+ """
+ if not hasattr(self, "_latest_revision"):
+ raise ParameterError
+
+ pd = persist_data.PersistData(d)
+ key = self._revision_key(url, ud, d)
+ rev = pd.getValue("BB_URI_HEADREVS", key)
+ if rev != None:
+ return str(rev)
+
+ rev = self._latest_revision(url, ud, d)
+ pd.setValue("BB_URI_HEADREVS", key, rev)
+ return rev
+
+ def sortable_revision(self, url, ud, d):
+ """
+
+ """
+ if hasattr(self, "_sortable_revision"):
+ return self._sortable_revision(url, ud, d)
+
+ pd = persist_data.PersistData(d)
+ key = self._revision_key(url, ud, d)
+ latest_rev = self._build_revision(url, ud, d)
+ last_rev = pd.getValue("BB_URI_LOCALCOUNT", key + "_rev")
+ count = pd.getValue("BB_URI_LOCALCOUNT", key + "_count")
+
+ if last_rev == latest_rev:
+ return str(count + "+" + latest_rev)
+
+ if count is None:
+ count = "0"
+ else:
+ count = str(int(count) + 1)
+
+ pd.setValue("BB_URI_LOCALCOUNT", key + "_rev", latest_rev)
+ pd.setValue("BB_URI_LOCALCOUNT", key + "_count", count)
+
+ return str(count + "+" + latest_rev)
+
+
+import cvs
+import git
+import local
+import svn
+import wget
+import svk
+import ssh
+import perforce
+import bzr
+import hg
+
+methods.append(local.Local())
+methods.append(wget.Wget())
+methods.append(svn.Svn())
+methods.append(git.Git())
+methods.append(cvs.Cvs())
+methods.append(svk.Svk())
+methods.append(ssh.SSH())
+methods.append(perforce.Perforce())
+methods.append(bzr.Bzr())
+methods.append(hg.Hg())
diff --git a/bitbake-dev/lib/bb/fetch/bzr.py b/bitbake-dev/lib/bb/fetch/bzr.py
new file mode 100644
index 0000000000..b23e9eef86
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/bzr.py
@@ -0,0 +1,154 @@
+"""
+BitBake 'Fetch' implementation for bzr.
+
+"""
+
+# Copyright (C) 2007 Ross Burton
+# Copyright (C) 2007 Richard Purdie
+#
+# Classes for obtaining upstream sources for the
+# BitBake build tools.
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import sys
+import bb
+from bb import data
+from bb.fetch import Fetch
+from bb.fetch import FetchError
+from bb.fetch import MissingParameterError
+from bb.fetch import runfetchcmd
+
+class Bzr(Fetch):
+ def supports(self, url, ud, d):
+ return ud.type in ['bzr']
+
+ def localpath (self, url, ud, d):
+
+ # Create paths to bzr checkouts
+ relpath = ud.path
+ if relpath.startswith('/'):
+ # Remove leading slash as os.path.join can't cope
+ relpath = relpath[1:]
+ ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath)
+
+ revision = Fetch.srcrev_internal_helper(ud, d)
+ if revision is True:
+ ud.revision = self.latest_revision(url, ud, d)
+ elif revision:
+ ud.revision = revision
+
+ if not ud.revision:
+ ud.revision = self.latest_revision(url, ud, d)
+
+ ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d)
+
+ return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
+
+ def _buildbzrcommand(self, ud, d, command):
+ """
+ Build up an bzr commandline based on ud
+ command is "fetch", "update", "revno"
+ """
+
+ basecmd = data.expand('${FETCHCMD_bzr}', d)
+
+ proto = "http"
+ if "proto" in ud.parm:
+ proto = ud.parm["proto"]
+
+ bzrroot = ud.host + ud.path
+
+ options = []
+
+ if command is "revno":
+ bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
+ else:
+ if ud.revision:
+ options.append("-r %s" % ud.revision)
+
+ if command is "fetch":
+ bzrcmd = "%s co %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
+ elif command is "update":
+ bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options))
+ else:
+ raise FetchError("Invalid bzr command %s" % command)
+
+ return bzrcmd
+
+ def go(self, loc, ud, d):
+ """Fetch url"""
+
+ # try to use the tarball stash
+ if Fetch.try_mirror(d, ud.localfile):
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping bzr checkout." % ud.localpath)
+ return
+
+ if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK):
+ bzrcmd = self._buildbzrcommand(ud, d, "update")
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "BZR Update %s" % loc)
+ os.chdir(os.path.join (ud.pkgdir, os.path.basename(ud.path)))
+ runfetchcmd(bzrcmd, d)
+ else:
+ os.system("rm -rf %s" % os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)))
+ bzrcmd = self._buildbzrcommand(ud, d, "fetch")
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "BZR Checkout %s" % loc)
+ bb.mkdirhier(ud.pkgdir)
+ os.chdir(ud.pkgdir)
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % bzrcmd)
+ runfetchcmd(bzrcmd, d)
+
+ os.chdir(ud.pkgdir)
+ # tar them up to a defined filename
+ try:
+ runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.pkgdir)), d)
+ except:
+ t, v, tb = sys.exc_info()
+ try:
+ os.unlink(ud.localpath)
+ except OSError:
+ pass
+ raise t, v, tb
+
+ def suppports_srcrev(self):
+ return True
+
+ def _revision_key(self, url, ud, d):
+ """
+ Return a unique key for the url
+ """
+ return "bzr:" + ud.pkgdir
+
+ def _latest_revision(self, url, ud, d):
+ """
+ Return the latest upstream revision number
+ """
+ bb.msg.debug(2, bb.msg.domain.Fetcher, "BZR fetcher hitting network for %s" % url)
+
+ output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True)
+
+ return output.strip()
+
+ def _sortable_revision(self, url, ud, d):
+ """
+ Return a sortable revision number which in our case is the revision number
+ """
+
+ return self._build_revision(url, ud, d)
+
+ def _build_revision(self, url, ud, d):
+ return ud.revision
+
diff --git a/bitbake-dev/lib/bb/fetch/cvs.py b/bitbake-dev/lib/bb/fetch/cvs.py
new file mode 100644
index 0000000000..c4ccf4303f
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/cvs.py
@@ -0,0 +1,178 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementations
+
+Classes for obtaining upstream sources for the
+BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#Based on functions from the base bb module, Copyright 2003 Holger Schurig
+#
+
+import os, re
+import bb
+from bb import data
+from bb.fetch import Fetch
+from bb.fetch import FetchError
+from bb.fetch import MissingParameterError
+
+class Cvs(Fetch):
+ """
+ Class to fetch a module or modules from cvs repositories
+ """
+ def supports(self, url, ud, d):
+ """
+ Check to see if a given url can be fetched with cvs.
+ """
+ return ud.type in ['cvs', 'pserver']
+
+ def localpath(self, url, ud, d):
+ if not "module" in ud.parm:
+ raise MissingParameterError("cvs method needs a 'module' parameter")
+ ud.module = ud.parm["module"]
+
+ ud.tag = ""
+ if 'tag' in ud.parm:
+ ud.tag = ud.parm['tag']
+
+ # Override the default date in certain cases
+ if 'date' in ud.parm:
+ ud.date = ud.parm['date']
+ elif ud.tag:
+ ud.date = ""
+
+ norecurse = ''
+ if 'norecurse' in ud.parm:
+ norecurse = '_norecurse'
+
+ fullpath = ''
+ if 'fullpath' in ud.parm:
+ fullpath = '_fullpath'
+
+ ud.localfile = data.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath), d)
+
+ return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
+
+ def forcefetch(self, url, ud, d):
+ if (ud.date == "now"):
+ return True
+ return False
+
+ def go(self, loc, ud, d):
+
+ # try to use the tarball stash
+ if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile):
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping cvs checkout." % ud.localpath)
+ return
+
+ method = "pserver"
+ if "method" in ud.parm:
+ method = ud.parm["method"]
+
+ localdir = ud.module
+ if "localdir" in ud.parm:
+ localdir = ud.parm["localdir"]
+
+ cvs_port = ""
+ if "port" in ud.parm:
+ cvs_port = ud.parm["port"]
+
+ cvs_rsh = None
+ if method == "ext":
+ if "rsh" in ud.parm:
+ cvs_rsh = ud.parm["rsh"]
+
+ if method == "dir":
+ cvsroot = ud.path
+ else:
+ cvsroot = ":" + method
+ cvsproxyhost = data.getVar('CVS_PROXY_HOST', d, True)
+ if cvsproxyhost:
+ cvsroot += ";proxy=" + cvsproxyhost
+ cvsproxyport = data.getVar('CVS_PROXY_PORT', d, True)
+ if cvsproxyport:
+ cvsroot += ";proxyport=" + cvsproxyport
+ cvsroot += ":" + ud.user
+ if ud.pswd:
+ cvsroot += ":" + ud.pswd
+ cvsroot += "@" + ud.host + ":" + cvs_port + ud.path
+
+ options = []
+ if 'norecurse' in ud.parm:
+ options.append("-l")
+ if ud.date:
+ options.append("-D \"%s UTC\"" % ud.date)
+ if ud.tag:
+ options.append("-r %s" % ud.tag)
+
+ localdata = data.createCopy(d)
+ data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata)
+ data.update_data(localdata)
+
+ data.setVar('CVSROOT', cvsroot, localdata)
+ data.setVar('CVSCOOPTS', " ".join(options), localdata)
+ data.setVar('CVSMODULE', ud.module, localdata)
+ cvscmd = data.getVar('FETCHCOMMAND', localdata, 1)
+ cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, 1)
+
+ if cvs_rsh:
+ cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd)
+ cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd)
+
+ # create module directory
+ bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory")
+ pkg = data.expand('${PN}', d)
+ pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg)
+ moddir = os.path.join(pkgdir,localdir)
+ if os.access(os.path.join(moddir,'CVS'), os.R_OK):
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc)
+ # update sources there
+ os.chdir(moddir)
+ myret = os.system(cvsupdatecmd)
+ else:
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
+ # check out sources there
+ bb.mkdirhier(pkgdir)
+ os.chdir(pkgdir)
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cvscmd)
+ myret = os.system(cvscmd)
+
+ if myret != 0 or not os.access(moddir, os.R_OK):
+ try:
+ os.rmdir(moddir)
+ except OSError:
+ pass
+ raise FetchError(ud.module)
+
+ # tar them up to a defined filename
+ if 'fullpath' in ud.parm:
+ os.chdir(pkgdir)
+ myret = os.system("tar -czf %s %s" % (ud.localpath, localdir))
+ else:
+ os.chdir(moddir)
+ os.chdir('..')
+ myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(moddir)))
+
+ if myret != 0:
+ try:
+ os.unlink(ud.localpath)
+ except OSError:
+ pass
+ raise FetchError(ud.module)
diff --git a/bitbake-dev/lib/bb/fetch/git.py b/bitbake-dev/lib/bb/fetch/git.py
new file mode 100644
index 0000000000..f4ae724f87
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/git.py
@@ -0,0 +1,142 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' git implementation
+
+"""
+
+#Copyright (C) 2005 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os, re
+import bb
+from bb import data
+from bb.fetch import Fetch
+from bb.fetch import FetchError
+from bb.fetch import runfetchcmd
+
+def prunedir(topdir):
+ # Delete everything reachable from the directory named in 'topdir'.
+ # CAUTION: This is dangerous!
+ for root, dirs, files in os.walk(topdir, topdown=False):
+ for name in files:
+ os.remove(os.path.join(root, name))
+ for name in dirs:
+ os.rmdir(os.path.join(root, name))
+
+class Git(Fetch):
+ """Class to fetch a module or modules from git repositories"""
+ def supports(self, url, ud, d):
+ """
+ Check to see if a given url can be fetched with git.
+ """
+ return ud.type in ['git']
+
+ def localpath(self, url, ud, d):
+
+ ud.proto = "rsync"
+ if 'protocol' in ud.parm:
+ ud.proto = ud.parm['protocol']
+
+ ud.branch = ud.parm.get("branch", "master")
+
+ tag = Fetch.srcrev_internal_helper(ud, d)
+ if tag is True:
+ ud.tag = self.latest_revision(url, ud, d)
+ elif tag:
+ ud.tag = tag
+
+ if not ud.tag:
+ ud.tag = self.latest_revision(url, ud, d)
+
+ if ud.tag == "master":
+ ud.tag = self.latest_revision(url, ud, d)
+
+ ud.localfile = data.expand('git_%s%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.tag), d)
+
+ return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
+
+ def go(self, loc, ud, d):
+ """Fetch url"""
+
+ if Fetch.try_mirror(d, ud.localfile):
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists (or was stashed). Skipping git checkout." % ud.localpath)
+ return
+
+ gitsrcname = '%s%s' % (ud.host, ud.path.replace('/', '.'))
+
+ repofilename = 'git_%s.tar.gz' % (gitsrcname)
+ repofile = os.path.join(data.getVar("DL_DIR", d, 1), repofilename)
+ repodir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)
+
+ coname = '%s' % (ud.tag)
+ codir = os.path.join(repodir, coname)
+
+ if not os.path.exists(repodir):
+ if Fetch.try_mirror(d, repofilename):
+ bb.mkdirhier(repodir)
+ os.chdir(repodir)
+ runfetchcmd("tar -xzf %s" % (repofile), d)
+ else:
+ runfetchcmd("git clone -n %s://%s%s %s" % (ud.proto, ud.host, ud.path, repodir), d)
+
+ os.chdir(repodir)
+ # Remove all but the .git directory
+ runfetchcmd("rm * -Rf", d)
+ runfetchcmd("git fetch %s://%s%s %s" % (ud.proto, ud.host, ud.path, ud.branch), d)
+ runfetchcmd("git fetch --tags %s://%s%s" % (ud.proto, ud.host, ud.path), d)
+ runfetchcmd("git prune-packed", d)
+ runfetchcmd("git pack-redundant --all | xargs -r rm", d)
+
+ os.chdir(repodir)
+ mirror_tarballs = data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True)
+ if mirror_tarballs != "0":
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git repository")
+ runfetchcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ), d)
+
+ if os.path.exists(codir):
+ prunedir(codir)
+
+ bb.mkdirhier(codir)
+ os.chdir(repodir)
+ runfetchcmd("git read-tree %s" % (ud.tag), d)
+ runfetchcmd("git checkout-index -q -f --prefix=%s -a" % (os.path.join(codir, "git", "")), d)
+
+ os.chdir(codir)
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git checkout")
+ runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.join(".", "*") ), d)
+
+ os.chdir(repodir)
+ prunedir(codir)
+
+ def suppports_srcrev(self):
+ return True
+
+ def _revision_key(self, url, ud, d):
+ """
+ Return a unique key for the url
+ """
+ return "git:" + ud.host + ud.path.replace('/', '.')
+
+ def _latest_revision(self, url, ud, d):
+ """
+ Compute the HEAD revision for the url
+ """
+ output = runfetchcmd("git ls-remote %s://%s%s %s" % (ud.proto, ud.host, ud.path, ud.branch), d, True)
+ return output.split()[0]
+
+ def _build_revision(self, url, ud, d):
+ return ud.tag
+
diff --git a/bitbake-dev/lib/bb/fetch/hg.py b/bitbake-dev/lib/bb/fetch/hg.py
new file mode 100644
index 0000000000..ee3bd2f7fe
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/hg.py
@@ -0,0 +1,141 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementation for mercurial DRCS (hg).
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2004 Marcin Juszkiewicz
+# Copyright (C) 2007 Robert Schuster
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os, re
+import sys
+import bb
+from bb import data
+from bb.fetch import Fetch
+from bb.fetch import FetchError
+from bb.fetch import MissingParameterError
+from bb.fetch import runfetchcmd
+
+class Hg(Fetch):
+ """Class to fetch a from mercurial repositories"""
+ def supports(self, url, ud, d):
+ """
+ Check to see if a given url can be fetched with mercurial.
+ """
+ return ud.type in ['hg']
+
+ def localpath(self, url, ud, d):
+ if not "module" in ud.parm:
+ raise MissingParameterError("hg method needs a 'module' parameter")
+
+ ud.module = ud.parm["module"]
+
+ # Create paths to mercurial checkouts
+ relpath = ud.path
+ if relpath.startswith('/'):
+ # Remove leading slash as os.path.join can't cope
+ relpath = relpath[1:]
+ ud.pkgdir = os.path.join(data.expand('${HGDIR}', d), ud.host, relpath)
+ ud.moddir = os.path.join(ud.pkgdir, ud.module)
+
+ if 'rev' in ud.parm:
+ ud.revision = ud.parm['rev']
+
+ ud.localfile = data.expand('%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d)
+
+ return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
+
+ def _buildhgcommand(self, ud, d, command):
+ """
+ Build up an hg commandline based on ud
+ command is "fetch", "update", "info"
+ """
+
+ basecmd = data.expand('${FETCHCMD_hg}', d)
+
+ proto = "http"
+ if "proto" in ud.parm:
+ proto = ud.parm["proto"]
+
+ host = ud.host
+ if proto == "file":
+ host = "/"
+ ud.host = "localhost"
+
+ hgroot = host + ud.path
+
+ if command is "info":
+ return "%s identify -i %s://%s/%s" % (basecmd, proto, hgroot, ud.module)
+
+ options = [];
+ if ud.revision:
+ options.append("-r %s" % ud.revision)
+
+ if command is "fetch":
+ cmd = "%s clone %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, hgroot, ud.module, ud.module)
+ elif command is "pull":
+ cmd = "%s pull %s" % (basecmd, " ".join(options))
+ elif command is "update":
+ cmd = "%s update -C %s" % (basecmd, " ".join(options))
+ else:
+ raise FetchError("Invalid hg command %s" % command)
+
+ return cmd
+
+ def go(self, loc, ud, d):
+ """Fetch url"""
+
+ # try to use the tarball stash
+ if Fetch.try_mirror(d, ud.localfile):
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping hg checkout." % ud.localpath)
+ return
+
+ bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'")
+
+ if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK):
+ updatecmd = self._buildhgcommand(ud, d, "pull")
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc)
+ # update sources there
+ os.chdir(ud.moddir)
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd)
+ runfetchcmd(updatecmd, d)
+
+ updatecmd = self._buildhgcommand(ud, d, "update")
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd)
+ runfetchcmd(updatecmd, d)
+ else:
+ fetchcmd = self._buildhgcommand(ud, d, "fetch")
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
+ # check out sources there
+ bb.mkdirhier(ud.pkgdir)
+ os.chdir(ud.pkgdir)
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % fetchcmd)
+ runfetchcmd(fetchcmd, d)
+
+ os.chdir(ud.pkgdir)
+ try:
+ runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d)
+ except:
+ t, v, tb = sys.exc_info()
+ try:
+ os.unlink(ud.localpath)
+ except OSError:
+ pass
+ raise t, v, tb
diff --git a/bitbake-dev/lib/bb/fetch/local.py b/bitbake-dev/lib/bb/fetch/local.py
new file mode 100644
index 0000000000..54d598ae89
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/local.py
@@ -0,0 +1,72 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementations
+
+Classes for obtaining upstream sources for the
+BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os, re
+import bb
+from bb import data
+from bb.fetch import Fetch
+
+class Local(Fetch):
+ def supports(self, url, urldata, d):
+ """
+ Check to see if a given url can be fetched with cvs.
+ """
+ return urldata.type in ['file','patch']
+
+ def localpath(self, url, urldata, d):
+ """
+ Return the local filename of a given url assuming a successful fetch.
+ """
+ path = url.split("://")[1]
+ path = path.split(";")[0]
+ newpath = path
+ if path[0] != "/":
+ filespath = data.getVar('FILESPATH', d, 1)
+ if filespath:
+ newpath = bb.which(filespath, path)
+ if not newpath:
+ filesdir = data.getVar('FILESDIR', d, 1)
+ if filesdir:
+ newpath = os.path.join(filesdir, path)
+ # We don't set localfile as for this fetcher the file is already local!
+ return newpath
+
+ def go(self, url, urldata, d):
+ """Fetch urls (no-op for Local method)"""
+ # no need to fetch local files, we'll deal with them in place.
+ return 1
+
+ def checkstatus(self, url, urldata, d):
+ """
+ Check the status of the url
+ """
+ if urldata.localpath.find("*") != -1:
+ bb.msg.note(1, bb.msg.domain.Fetcher, "URL %s looks like a glob and was therefore not checked." % url)
+ return True
+ if os.path.exists(urldata.localpath):
+ return True
+ return False
diff --git a/bitbake-dev/lib/bb/fetch/perforce.py b/bitbake-dev/lib/bb/fetch/perforce.py
new file mode 100644
index 0000000000..b594d2bde2
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/perforce.py
@@ -0,0 +1,213 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementations
+
+Classes for obtaining upstream sources for the
+BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os, re
+import bb
+from bb import data
+from bb.fetch import Fetch
+from bb.fetch import FetchError
+from bb.fetch import MissingParameterError
+
+class Perforce(Fetch):
+ def supports(self, url, ud, d):
+ return ud.type in ['p4']
+
+ def doparse(url,d):
+ parm = {}
+ path = url.split("://")[1]
+ delim = path.find("@");
+ if delim != -1:
+ (user,pswd,host,port) = path.split('@')[0].split(":")
+ path = path.split('@')[1]
+ else:
+ (host,port) = data.getVar('P4PORT', d).split(':')
+ user = ""
+ pswd = ""
+
+ if path.find(";") != -1:
+ keys=[]
+ values=[]
+ plist = path.split(';')
+ for item in plist:
+ if item.count('='):
+ (key,value) = item.split('=')
+ keys.append(key)
+ values.append(value)
+
+ parm = dict(zip(keys,values))
+ path = "//" + path.split(';')[0]
+ host += ":%s" % (port)
+ parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm)
+
+ return host,path,user,pswd,parm
+ doparse = staticmethod(doparse)
+
+ def getcset(d, depot,host,user,pswd,parm):
+ if "cset" in parm:
+ return parm["cset"];
+ if user:
+ data.setVar('P4USER', user, d)
+ if pswd:
+ data.setVar('P4PASSWD', pswd, d)
+ if host:
+ data.setVar('P4PORT', host, d)
+
+ p4date = data.getVar("P4DATE", d, 1)
+ if "revision" in parm:
+ depot += "#%s" % (parm["revision"])
+ elif "label" in parm:
+ depot += "@%s" % (parm["label"])
+ elif p4date:
+ depot += "@%s" % (p4date)
+
+ p4cmd = data.getVar('FETCHCOMMAND_p4', d, 1)
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s changes -m 1 %s" % (p4cmd, depot))
+ p4file = os.popen("%s changes -m 1 %s" % (p4cmd,depot))
+ cset = p4file.readline().strip()
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "READ %s" % (cset))
+ if not cset:
+ return -1
+
+ return cset.split(' ')[1]
+ getcset = staticmethod(getcset)
+
+ def localpath(self, url, ud, d):
+
+ (host,path,user,pswd,parm) = Perforce.doparse(url,d)
+
+ # If a label is specified, we use that as our filename
+
+ if "label" in parm:
+ ud.localfile = "%s.tar.gz" % (parm["label"])
+ return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile)
+
+ base = path
+ which = path.find('/...')
+ if which != -1:
+ base = path[:which]
+
+ if base[0] == "/":
+ base = base[1:]
+
+ cset = Perforce.getcset(d, path, host, user, pswd, parm)
+
+ ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host,base.replace('/', '.'), cset), d)
+
+ return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile)
+
+ def go(self, loc, ud, d):
+ """
+ Fetch urls
+ """
+
+ # try to use the tarball stash
+ if Fetch.try_mirror(d, ud.localfile):
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping perforce checkout." % ud.localpath)
+ return
+
+ (host,depot,user,pswd,parm) = Perforce.doparse(loc, d)
+
+ if depot.find('/...') != -1:
+ path = depot[:depot.find('/...')]
+ else:
+ path = depot
+
+ if "module" in parm:
+ module = parm["module"]
+ else:
+ module = os.path.basename(path)
+
+ localdata = data.createCopy(d)
+ data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata)
+ data.update_data(localdata)
+
+ # Get the p4 command
+ if user:
+ data.setVar('P4USER', user, localdata)
+
+ if pswd:
+ data.setVar('P4PASSWD', pswd, localdata)
+
+ if host:
+ data.setVar('P4PORT', host, localdata)
+
+ p4cmd = data.getVar('FETCHCOMMAND', localdata, 1)
+
+ # create temp directory
+ bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory")
+ bb.mkdirhier(data.expand('${WORKDIR}', localdata))
+ data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata)
+ tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false")
+ tmpfile = tmppipe.readline().strip()
+ if not tmpfile:
+ bb.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.")
+ raise FetchError(module)
+
+ if "label" in parm:
+ depot = "%s@%s" % (depot,parm["label"])
+ else:
+ cset = Perforce.getcset(d, depot, host, user, pswd, parm)
+ depot = "%s@%s" % (depot,cset)
+
+ os.chdir(tmpfile)
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
+ bb.msg.note(1, bb.msg.domain.Fetcher, "%s files %s" % (p4cmd, depot))
+ p4file = os.popen("%s files %s" % (p4cmd, depot))
+
+ if not p4file:
+ bb.error("Fetch: unable to get the P4 files from %s" % (depot))
+ raise FetchError(module)
+
+ count = 0
+
+ for file in p4file:
+ list = file.split()
+
+ if list[2] == "delete":
+ continue
+
+ dest = list[0][len(path)+1:]
+ where = dest.find("#")
+
+ os.system("%s print -o %s/%s %s" % (p4cmd, module,dest[:where],list[0]))
+ count = count + 1
+
+ if count == 0:
+ bb.error("Fetch: No files gathered from the P4 fetch")
+ raise FetchError(module)
+
+ myret = os.system("tar -czf %s %s" % (ud.localpath, module))
+ if myret != 0:
+ try:
+ os.unlink(ud.localpath)
+ except OSError:
+ pass
+ raise FetchError(module)
+ # cleanup
+ os.system('rm -rf %s' % tmpfile)
+
+
diff --git a/bitbake-dev/lib/bb/fetch/ssh.py b/bitbake-dev/lib/bb/fetch/ssh.py
new file mode 100644
index 0000000000..81a9892dcc
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/ssh.py
@@ -0,0 +1,120 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+'''
+BitBake 'Fetch' implementations
+
+This implementation is for Secure Shell (SSH), and attempts to comply with the
+IETF secsh internet draft:
+ http://tools.ietf.org/wg/secsh/draft-ietf-secsh-scp-sftp-ssh-uri/
+
+ Currently does not support the sftp parameters, as this uses scp
+ Also does not support the 'fingerprint' connection parameter.
+
+'''
+
+# Copyright (C) 2006 OpenedHand Ltd.
+#
+#
+# Based in part on svk.py:
+# Copyright (C) 2006 Holger Hans Peter Freyther
+# Based on svn.py:
+# Copyright (C) 2003, 2004 Chris Larson
+# Based on functions from the base bb module:
+# Copyright 2003 Holger Schurig
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re, os
+import bb
+from bb import data
+from bb.fetch import Fetch
+from bb.fetch import FetchError
+from bb.fetch import MissingParameterError
+
+
+__pattern__ = re.compile(r'''
+ \s* # Skip leading whitespace
+ ssh:// # scheme
+ ( # Optional username/password block
+ (?P<user>\S+) # username
+ (:(?P<pass>\S+))? # colon followed by the password (optional)
+ )?
+ (?P<cparam>(;[^;]+)*)? # connection parameters block (optional)
+ @
+ (?P<host>\S+?) # non-greedy match of the host
+ (:(?P<port>[0-9]+))? # colon followed by the port (optional)
+ /
+ (?P<path>[^;]+) # path on the remote system, may be absolute or relative,
+ # and may include the use of '~' to reference the remote home
+ # directory
+ (?P<sparam>(;[^;]+)*)? # parameters block (optional)
+ $
+''', re.VERBOSE)
+
+class SSH(Fetch):
+ '''Class to fetch a module or modules via Secure Shell'''
+
+ def supports(self, url, urldata, d):
+ return __pattern__.match(url) != None
+
+ def localpath(self, url, urldata, d):
+ m = __pattern__.match(url)
+ path = m.group('path')
+ host = m.group('host')
+ lpath = os.path.join(data.getVar('DL_DIR', d, True), host, os.path.basename(path))
+ return lpath
+
+ def go(self, url, urldata, d):
+ dldir = data.getVar('DL_DIR', d, 1)
+
+ m = __pattern__.match(url)
+ path = m.group('path')
+ host = m.group('host')
+ port = m.group('port')
+ user = m.group('user')
+ password = m.group('pass')
+
+ ldir = os.path.join(dldir, host)
+ lpath = os.path.join(ldir, os.path.basename(path))
+
+ if not os.path.exists(ldir):
+ os.makedirs(ldir)
+
+ if port:
+ port = '-P %s' % port
+ else:
+ port = ''
+
+ if user:
+ fr = user
+ if password:
+ fr += ':%s' % password
+ fr += '@%s' % host
+ else:
+ fr = host
+ fr += ':%s' % path
+
+
+ import commands
+ cmd = 'scp -B -r %s %s %s/' % (
+ port,
+ commands.mkarg(fr),
+ commands.mkarg(ldir)
+ )
+
+ (exitstatus, output) = commands.getstatusoutput(cmd)
+ if exitstatus != 0:
+ print output
+ raise FetchError('Unable to fetch %s' % url)
diff --git a/bitbake-dev/lib/bb/fetch/svk.py b/bitbake-dev/lib/bb/fetch/svk.py
new file mode 100644
index 0000000000..d863ccb6e0
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/svk.py
@@ -0,0 +1,109 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementations
+
+This implementation is for svk. It is based on the svn implementation
+
+"""
+
+# Copyright (C) 2006 Holger Hans Peter Freyther
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os, re
+import bb
+from bb import data
+from bb.fetch import Fetch
+from bb.fetch import FetchError
+from bb.fetch import MissingParameterError
+
+class Svk(Fetch):
+ """Class to fetch a module or modules from svk repositories"""
+ def supports(self, url, ud, d):
+ """
+ Check to see if a given url can be fetched with cvs.
+ """
+ return ud.type in ['svk']
+
+ def localpath(self, url, ud, d):
+ if not "module" in ud.parm:
+ raise MissingParameterError("svk method needs a 'module' parameter")
+ else:
+ ud.module = ud.parm["module"]
+
+ ud.revision = ""
+ if 'rev' in ud.parm:
+ ud.revision = ud.parm['rev']
+
+ ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d)
+
+ return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
+
+ def forcefetch(self, url, ud, d):
+ if (ud.date == "now"):
+ return True
+ return False
+
+ def go(self, loc, ud, d):
+ """Fetch urls"""
+
+ if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile):
+ return
+
+ svkroot = ud.host + ud.path
+
+ svkcmd = "svk co -r {%s} %s/%s" % (date, svkroot, ud.module)
+
+ if ud.revision:
+ svkcmd = "svk co -r %s/%s" % (ud.revision, svkroot, ud.module)
+
+ # create temp directory
+ localdata = data.createCopy(d)
+ data.update_data(localdata)
+ bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory")
+ bb.mkdirhier(data.expand('${WORKDIR}', localdata))
+ data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata)
+ tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false")
+ tmpfile = tmppipe.readline().strip()
+ if not tmpfile:
+ bb.msg.error(bb.msg.domain.Fetcher, "Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.")
+ raise FetchError(ud.module)
+
+ # check out sources there
+ os.chdir(tmpfile)
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svkcmd)
+ myret = os.system(svkcmd)
+ if myret != 0:
+ try:
+ os.rmdir(tmpfile)
+ except OSError:
+ pass
+ raise FetchError(ud.module)
+
+ os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module)))
+ # tar them up to a defined filename
+ myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module)))
+ if myret != 0:
+ try:
+ os.unlink(ud.localpath)
+ except OSError:
+ pass
+ raise FetchError(ud.module)
+ # cleanup
+ os.system('rm -rf %s' % tmpfile)
diff --git a/bitbake-dev/lib/bb/fetch/svn.py b/bitbake-dev/lib/bb/fetch/svn.py
new file mode 100644
index 0000000000..5e5b31b3ad
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/svn.py
@@ -0,0 +1,204 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementation for svn.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2004 Marcin Juszkiewicz
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os, re
+import sys
+import bb
+from bb import data
+from bb.fetch import Fetch
+from bb.fetch import FetchError
+from bb.fetch import MissingParameterError
+from bb.fetch import runfetchcmd
+
+class Svn(Fetch):
+ """Class to fetch a module or modules from svn repositories"""
+ def supports(self, url, ud, d):
+ """
+ Check to see if a given url can be fetched with svn.
+ """
+ return ud.type in ['svn']
+
+ def localpath(self, url, ud, d):
+ if not "module" in ud.parm:
+ raise MissingParameterError("svn method needs a 'module' parameter")
+
+ ud.module = ud.parm["module"]
+
+ # Create paths to svn checkouts
+ relpath = ud.path
+ if relpath.startswith('/'):
+ # Remove leading slash as os.path.join can't cope
+ relpath = relpath[1:]
+ ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath)
+ ud.moddir = os.path.join(ud.pkgdir, ud.module)
+
+ if 'rev' in ud.parm:
+ ud.date = ""
+ ud.revision = ud.parm['rev']
+ elif 'date' in ud.date:
+ ud.date = ud.parm['date']
+ ud.revision = ""
+ else:
+ #
+ # ***Nasty hack***
+ # If DATE in unexpanded PV, use ud.date (which is set from SRCDATE)
+ # Should warn people to switch to SRCREV here
+ #
+ pv = data.getVar("PV", d, 0)
+ if "DATE" in pv:
+ ud.revision = ""
+ else:
+ rev = Fetch.srcrev_internal_helper(ud, d)
+ if rev is True:
+ ud.revision = self.latest_revision(url, ud, d)
+ ud.date = ""
+ elif rev:
+ ud.revision = rev
+ ud.date = ""
+ else:
+ ud.revision = ""
+
+ ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d)
+
+ return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
+
+ def _buildsvncommand(self, ud, d, command):
+ """
+ Build up an svn commandline based on ud
+ command is "fetch", "update", "info"
+ """
+
+ basecmd = data.expand('${FETCHCMD_svn}', d)
+
+ proto = "svn"
+ if "proto" in ud.parm:
+ proto = ud.parm["proto"]
+
+ svn_rsh = None
+ if proto == "svn+ssh" and "rsh" in ud.parm:
+ svn_rsh = ud.parm["rsh"]
+
+ svnroot = ud.host + ud.path
+
+ # either use the revision, or SRCDATE in braces,
+ options = []
+
+ if ud.user:
+ options.append("--username %s" % ud.user)
+
+ if ud.pswd:
+ options.append("--password %s" % ud.pswd)
+
+ if command is "info":
+ svncmd = "%s info %s %s://%s/%s/" % (basecmd, " ".join(options), proto, svnroot, ud.module)
+ else:
+ if ud.revision:
+ options.append("-r %s" % ud.revision)
+ elif ud.date:
+ options.append("-r {%s}" % ud.date)
+
+ if command is "fetch":
+ svncmd = "%s co %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, svnroot, ud.module, ud.module)
+ elif command is "update":
+ svncmd = "%s update %s" % (basecmd, " ".join(options))
+ else:
+ raise FetchError("Invalid svn command %s" % command)
+
+ if svn_rsh:
+ svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd)
+
+ return svncmd
+
+ def go(self, loc, ud, d):
+ """Fetch url"""
+
+ # try to use the tarball stash
+ if Fetch.try_mirror(d, ud.localfile):
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping svn checkout." % ud.localpath)
+ return
+
+ bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'")
+
+ if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
+ svnupdatecmd = self._buildsvncommand(ud, d, "update")
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc)
+ # update sources there
+ os.chdir(ud.moddir)
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnupdatecmd)
+ runfetchcmd(svnupdatecmd, d)
+ else:
+ svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
+ bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
+ # check out sources there
+ bb.mkdirhier(ud.pkgdir)
+ os.chdir(ud.pkgdir)
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnfetchcmd)
+ runfetchcmd(svnfetchcmd, d)
+
+ os.chdir(ud.pkgdir)
+ # tar them up to a defined filename
+ try:
+ runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d)
+ except:
+ t, v, tb = sys.exc_info()
+ try:
+ os.unlink(ud.localpath)
+ except OSError:
+ pass
+ raise t, v, tb
+
+ def suppports_srcrev(self):
+ return True
+
+ def _revision_key(self, url, ud, d):
+ """
+ Return a unique key for the url
+ """
+ return "svn:" + ud.moddir
+
+ def _latest_revision(self, url, ud, d):
+ """
+ Return the latest upstream revision number
+ """
+ bb.msg.debug(2, bb.msg.domain.Fetcher, "SVN fetcher hitting network for %s" % url)
+
+ output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "info"), d, True)
+
+ revision = None
+ for line in output.splitlines():
+ if "Last Changed Rev" in line:
+ revision = line.split(":")[1].strip()
+
+ return revision
+
+ def _sortable_revision(self, url, ud, d):
+ """
+ Return a sortable revision number which in our case is the revision number
+ """
+
+ return self._build_revision(url, ud, d)
+
+ def _build_revision(self, url, ud, d):
+ return ud.revision
diff --git a/bitbake-dev/lib/bb/fetch/wget.py b/bitbake-dev/lib/bb/fetch/wget.py
new file mode 100644
index 0000000000..739d5a1bc6
--- /dev/null
+++ b/bitbake-dev/lib/bb/fetch/wget.py
@@ -0,0 +1,105 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementations
+
+Classes for obtaining upstream sources for the
+BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os, re
+import bb
+from bb import data
+from bb.fetch import Fetch
+from bb.fetch import FetchError
+from bb.fetch import uri_replace
+
+class Wget(Fetch):
+ """Class to fetch urls via 'wget'"""
+ def supports(self, url, ud, d):
+ """
+ Check to see if a given url can be fetched with cvs.
+ """
+ return ud.type in ['http','https','ftp']
+
+ def localpath(self, url, ud, d):
+
+ url = bb.encodeurl([ud.type, ud.host, ud.path, ud.user, ud.pswd, {}])
+ ud.basename = os.path.basename(ud.path)
+ ud.localfile = data.expand(os.path.basename(url), d)
+
+ return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
+
+ def go(self, uri, ud, d, checkonly = False):
+ """Fetch urls"""
+
+ def fetch_uri(uri, ud, d):
+ if checkonly:
+ fetchcmd = data.getVar("CHECKCOMMAND", d, 1)
+ elif os.path.exists(ud.localpath):
+ # file exists, but we didnt complete it.. trying again..
+ fetchcmd = data.getVar("RESUMECOMMAND", d, 1)
+ else:
+ fetchcmd = data.getVar("FETCHCOMMAND", d, 1)
+
+ bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri)
+ fetchcmd = fetchcmd.replace("${URI}", uri)
+ fetchcmd = fetchcmd.replace("${FILE}", ud.basename)
+ bb.msg.debug(2, bb.msg.domain.Fetcher, "executing " + fetchcmd)
+ ret = os.system(fetchcmd)
+ if ret != 0:
+ return False
+
+ # Sanity check since wget can pretend it succeed when it didn't
+ # Also, this used to happen if sourceforge sent us to the mirror page
+ if not os.path.exists(ud.localpath):
+ bb.msg.debug(2, bb.msg.domain.Fetcher, "The fetch command for %s returned success but %s doesn't exist?..." % (uri, ud.localpath))
+ return False
+
+ return True
+
+ localdata = data.createCopy(d)
+ data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata)
+ data.update_data(localdata)
+
+ premirrors = [ i.split() for i in (data.getVar('PREMIRRORS', localdata, 1) or "").split('\n') if i ]
+ for (find, replace) in premirrors:
+ newuri = uri_replace(uri, find, replace, d)
+ if newuri != uri:
+ if fetch_uri(newuri, ud, localdata):
+ return True
+
+ if fetch_uri(uri, ud, localdata):
+ return True
+
+ # try mirrors
+ mirrors = [ i.split() for i in (data.getVar('MIRRORS', localdata, 1) or "").split('\n') if i ]
+ for (find, replace) in mirrors:
+ newuri = uri_replace(uri, find, replace, d)
+ if newuri != uri:
+ if fetch_uri(newuri, ud, localdata):
+ return True
+
+ raise FetchError(uri)
+
+
+ def checkstatus(self, uri, ud, d):
+ return self.go(uri, ud, d, True)
diff --git a/bitbake-dev/lib/bb/manifest.py b/bitbake-dev/lib/bb/manifest.py
new file mode 100644
index 0000000000..4e4b7d98ec
--- /dev/null
+++ b/bitbake-dev/lib/bb/manifest.py
@@ -0,0 +1,144 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os, sys
+import bb, bb.data
+
+def getfields(line):
+ fields = {}
+ fieldmap = ( "pkg", "src", "dest", "type", "mode", "uid", "gid", "major", "minor", "start", "inc", "count" )
+ for f in xrange(len(fieldmap)):
+ fields[fieldmap[f]] = None
+
+ if not line:
+ return None
+
+ splitline = line.split()
+ if not len(splitline):
+ return None
+
+ try:
+ for f in xrange(len(fieldmap)):
+ if splitline[f] == '-':
+ continue
+ fields[fieldmap[f]] = splitline[f]
+ except IndexError:
+ pass
+ return fields
+
+def parse (mfile, d):
+ manifest = []
+ while 1:
+ line = mfile.readline()
+ if not line:
+ break
+ if line.startswith("#"):
+ continue
+ fields = getfields(line)
+ if not fields:
+ continue
+ manifest.append(fields)
+ return manifest
+
+def emit (func, manifest, d):
+#str = "%s () {\n" % func
+ str = ""
+ for line in manifest:
+ emittedline = emit_line(func, line, d)
+ if not emittedline:
+ continue
+ str += emittedline + "\n"
+# str += "}\n"
+ return str
+
+def mangle (func, line, d):
+ import copy
+ newline = copy.copy(line)
+ src = bb.data.expand(newline["src"], d)
+
+ if src:
+ if not os.path.isabs(src):
+ src = "${WORKDIR}/" + src
+
+ dest = newline["dest"]
+ if not dest:
+ return
+
+ if dest.startswith("/"):
+ dest = dest[1:]
+
+ if func is "do_install":
+ dest = "${D}/" + dest
+
+ elif func is "do_populate":
+ dest = "${WORKDIR}/install/" + newline["pkg"] + "/" + dest
+
+ elif func is "do_stage":
+ varmap = {}
+ varmap["${bindir}"] = "${STAGING_DIR}/${HOST_SYS}/bin"
+ varmap["${libdir}"] = "${STAGING_DIR}/${HOST_SYS}/lib"
+ varmap["${includedir}"] = "${STAGING_DIR}/${HOST_SYS}/include"
+ varmap["${datadir}"] = "${STAGING_DATADIR}"
+
+ matched = 0
+ for key in varmap.keys():
+ if dest.startswith(key):
+ dest = varmap[key] + "/" + dest[len(key):]
+ matched = 1
+ if not matched:
+ newline = None
+ return
+ else:
+ newline = None
+ return
+
+ newline["src"] = src
+ newline["dest"] = dest
+ return newline
+
+def emit_line (func, line, d):
+ import copy
+ newline = copy.deepcopy(line)
+ newline = mangle(func, newline, d)
+ if not newline:
+ return None
+
+ str = ""
+ type = newline["type"]
+ mode = newline["mode"]
+ src = newline["src"]
+ dest = newline["dest"]
+ if type is "d":
+ str = "install -d "
+ if mode:
+ str += "-m %s " % mode
+ str += dest
+ elif type is "f":
+ if not src:
+ return None
+ if dest.endswith("/"):
+ str = "install -d "
+ str += dest + "\n"
+ str += "install "
+ else:
+ str = "install -D "
+ if mode:
+ str += "-m %s " % mode
+ str += src + " " + dest
+ del newline
+ return str
diff --git a/bitbake-dev/lib/bb/methodpool.py b/bitbake-dev/lib/bb/methodpool.py
new file mode 100644
index 0000000000..f43c4a0580
--- /dev/null
+++ b/bitbake-dev/lib/bb/methodpool.py
@@ -0,0 +1,84 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+#
+# Copyright (C) 2006 Holger Hans Peter Freyther
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+"""
+ What is a method pool?
+
+ BitBake has a global method scope where .bb, .inc and .bbclass
+ files can install methods. These methods are parsed from strings.
+ To avoid recompiling and executing these string we introduce
+ a method pool to do this task.
+
+ This pool will be used to compile and execute the functions. It
+ will be smart enough to
+"""
+
+from bb.utils import better_compile, better_exec
+from bb import error
+
+# A dict of modules we have handled
+# it is the number of .bbclasses + x in size
+_parsed_methods = { }
+_parsed_fns = { }
+
+def insert_method(modulename, code, fn):
+ """
+ Add code of a module should be added. The methods
+ will be simply added, no checking will be done
+ """
+ comp = better_compile(code, "<bb>", fn )
+ better_exec(comp, __builtins__, code, fn)
+
+ # now some instrumentation
+ code = comp.co_names
+ for name in code:
+ if name in ['None', 'False']:
+ continue
+ elif name in _parsed_fns and not _parsed_fns[name] == modulename:
+ error( "Error Method already seen: %s in' %s' now in '%s'" % (name, _parsed_fns[name], modulename))
+ else:
+ _parsed_fns[name] = modulename
+
+def check_insert_method(modulename, code, fn):
+ """
+ Add the code if it wasnt added before. The module
+ name will be used for that
+
+ Variables:
+ @modulename a short name e.g. base.bbclass
+ @code The actual python code
+ @fn The filename from the outer file
+ """
+ if not modulename in _parsed_methods:
+ return insert_method(modulename, code, fn)
+ _parsed_methods[modulename] = 1
+
+def parsed_module(modulename):
+ """
+ Inform me file xyz was parsed
+ """
+ return modulename in _parsed_methods
+
+
+def get_parsed_dict():
+ """
+ shortcut
+ """
+ return _parsed_methods
diff --git a/bitbake-dev/lib/bb/msg.py b/bitbake-dev/lib/bb/msg.py
new file mode 100644
index 0000000000..7aa0a27d25
--- /dev/null
+++ b/bitbake-dev/lib/bb/msg.py
@@ -0,0 +1,125 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'msg' implementation
+
+Message handling infrastructure for bitbake
+
+"""
+
+# Copyright (C) 2006 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys, os, re, bb
+from bb import utils, event
+
+debug_level = {}
+
+verbose = False
+
+domain = bb.utils.Enum(
+ 'Build',
+ 'Cache',
+ 'Collection',
+ 'Data',
+ 'Depends',
+ 'Fetcher',
+ 'Parsing',
+ 'PersistData',
+ 'Provider',
+ 'RunQueue',
+ 'TaskData',
+ 'Util')
+
+
+class MsgBase(bb.event.Event):
+ """Base class for messages"""
+
+ def __init__(self, msg, d ):
+ self._message = msg
+ event.Event.__init__(self, d)
+
+class MsgDebug(MsgBase):
+ """Debug Message"""
+
+class MsgNote(MsgBase):
+ """Note Message"""
+
+class MsgWarn(MsgBase):
+ """Warning Message"""
+
+class MsgError(MsgBase):
+ """Error Message"""
+
+class MsgFatal(MsgBase):
+ """Fatal Message"""
+
+class MsgPlain(MsgBase):
+ """General output"""
+
+#
+# Message control functions
+#
+
+def set_debug_level(level):
+ bb.msg.debug_level = {}
+ for domain in bb.msg.domain:
+ bb.msg.debug_level[domain] = level
+ bb.msg.debug_level['default'] = level
+
+def set_verbose(level):
+ bb.msg.verbose = level
+
+def set_debug_domains(domains):
+ for domain in domains:
+ found = False
+ for ddomain in bb.msg.domain:
+ if domain == str(ddomain):
+ bb.msg.debug_level[ddomain] = bb.msg.debug_level[ddomain] + 1
+ found = True
+ if not found:
+ bb.msg.warn(None, "Logging domain %s is not valid, ignoring" % domain)
+
+#
+# Message handling functions
+#
+
+def debug(level, domain, msg, fn = None):
+ if not domain:
+ domain = 'default'
+ if debug_level[domain] >= level:
+ bb.event.fire(MsgDebug(msg, None))
+
+def note(level, domain, msg, fn = None):
+ if not domain:
+ domain = 'default'
+ if level == 1 or verbose or debug_level[domain] >= 1:
+ bb.event.fire(MsgNote(msg, None))
+
+def warn(domain, msg, fn = None):
+ bb.event.fire(MsgWarn(msg, None))
+
+def error(domain, msg, fn = None):
+ bb.event.fire(MsgError(msg, None))
+ print 'ERROR: ' + msg
+
+def fatal(domain, msg, fn = None):
+ bb.event.fire(MsgFatal(msg, None))
+ print 'FATAL: ' + msg
+ sys.exit(1)
+
+def plain(msg, fn = None):
+ bb.event.fire(MsgPlain(msg, None))
+
diff --git a/bitbake-dev/lib/bb/parse/__init__.py b/bitbake-dev/lib/bb/parse/__init__.py
new file mode 100644
index 0000000000..3c9ba8e6da
--- /dev/null
+++ b/bitbake-dev/lib/bb/parse/__init__.py
@@ -0,0 +1,80 @@
+"""
+BitBake Parsers
+
+File parsers for the BitBake build tools.
+
+"""
+
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+__all__ = [ 'ParseError', 'SkipPackage', 'cached_mtime', 'mark_dependency',
+ 'supports', 'handle', 'init' ]
+handlers = []
+
+import bb, os
+
+class ParseError(Exception):
+ """Exception raised when parsing fails"""
+
+class SkipPackage(Exception):
+ """Exception raised to skip this package"""
+
+__mtime_cache = {}
+def cached_mtime(f):
+ if not __mtime_cache.has_key(f):
+ __mtime_cache[f] = os.stat(f)[8]
+ return __mtime_cache[f]
+
+def cached_mtime_noerror(f):
+ if not __mtime_cache.has_key(f):
+ try:
+ __mtime_cache[f] = os.stat(f)[8]
+ except OSError:
+ return 0
+ return __mtime_cache[f]
+
+def mark_dependency(d, f):
+ if f.startswith('./'):
+ f = "%s/%s" % (os.getcwd(), f[2:])
+ deps = bb.data.getVar('__depends', d) or []
+ deps.append( (f, cached_mtime(f)) )
+ bb.data.setVar('__depends', deps, d)
+
+def supports(fn, data):
+ """Returns true if we have a handler for this file, false otherwise"""
+ for h in handlers:
+ if h['supports'](fn, data):
+ return 1
+ return 0
+
+def handle(fn, data, include = 0):
+ """Call the handler that is appropriate for this file"""
+ for h in handlers:
+ if h['supports'](fn, data):
+ return h['handle'](fn, data, include)
+ raise ParseError("%s is not a BitBake file" % fn)
+
+def init(fn, data):
+ for h in handlers:
+ if h['supports'](fn):
+ return h['init'](data)
+
+
+from parse_py import __version__, ConfHandler, BBHandler
diff --git a/bitbake-dev/lib/bb/parse/parse_py/BBHandler.py b/bitbake-dev/lib/bb/parse/parse_py/BBHandler.py
new file mode 100644
index 0000000000..e9b950acbd
--- /dev/null
+++ b/bitbake-dev/lib/bb/parse/parse_py/BBHandler.py
@@ -0,0 +1,416 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+ class for handling .bb files
+
+ Reads a .bb file and obtains its metadata
+
+"""
+
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re, bb, os, sys, time
+import bb.fetch, bb.build, bb.utils
+from bb import data, fetch, methodpool
+
+from ConfHandler import include, localpath, obtain, init
+from bb.parse import ParseError
+
+__func_start_regexp__ = re.compile( r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" )
+__inherit_regexp__ = re.compile( r"inherit\s+(.+)" )
+__export_func_regexp__ = re.compile( r"EXPORT_FUNCTIONS\s+(.+)" )
+__addtask_regexp__ = re.compile("addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
+__addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" )
+__def_regexp__ = re.compile( r"def\s+(\w+).*:" )
+__python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" )
+__word__ = re.compile(r"\S+")
+
+__infunc__ = ""
+__inpython__ = False
+__body__ = []
+__classname__ = ""
+classes = [ None, ]
+
+# We need to indicate EOF to the feeder. This code is so messy that
+# factoring it out to a close_parse_file method is out of question.
+# We will use the IN_PYTHON_EOF as an indicator to just close the method
+#
+# The two parts using it are tightly integrated anyway
+IN_PYTHON_EOF = -9999999999999
+
+__parsed_methods__ = methodpool.get_parsed_dict()
+
+def supports(fn, d):
+ localfn = localpath(fn, d)
+ return localfn[-3:] == ".bb" or localfn[-8:] == ".bbclass" or localfn[-4:] == ".inc"
+
+def inherit(files, d):
+ __inherit_cache = data.getVar('__inherit_cache', d) or []
+ fn = ""
+ lineno = 0
+ files = data.expand(files, d)
+ for file in files:
+ if file[0] != "/" and file[-8:] != ".bbclass":
+ file = os.path.join('classes', '%s.bbclass' % file)
+
+ if not file in __inherit_cache:
+ bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s:%d: inheriting %s" % (fn, lineno, file))
+ __inherit_cache.append( file )
+ data.setVar('__inherit_cache', __inherit_cache, d)
+ include(fn, file, d, "inherit")
+ __inherit_cache = data.getVar('__inherit_cache', d) or []
+
+def handle(fn, d, include = 0):
+ global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__
+ __body__ = []
+ __infunc__ = ""
+ __classname__ = ""
+ __residue__ = []
+
+ if include == 0:
+ bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data)")
+ else:
+ bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)")
+
+ (root, ext) = os.path.splitext(os.path.basename(fn))
+ base_name = "%s%s" % (root,ext)
+ init(d)
+
+ if ext == ".bbclass":
+ __classname__ = root
+ classes.append(__classname__)
+ __inherit_cache = data.getVar('__inherit_cache', d) or []
+ if not fn in __inherit_cache:
+ __inherit_cache.append(fn)
+ data.setVar('__inherit_cache', __inherit_cache, d)
+
+ if include != 0:
+ oldfile = data.getVar('FILE', d)
+ else:
+ oldfile = None
+
+ fn = obtain(fn, d)
+ bbpath = (data.getVar('BBPATH', d, 1) or '').split(':')
+ if not os.path.isabs(fn):
+ f = None
+ for p in bbpath:
+ j = os.path.join(p, fn)
+ if os.access(j, os.R_OK):
+ abs_fn = j
+ f = open(j, 'r')
+ break
+ if f is None:
+ raise IOError("file %s not found" % fn)
+ else:
+ f = open(fn,'r')
+ abs_fn = fn
+
+ if ext != ".bbclass":
+ dname = os.path.dirname(abs_fn)
+ if dname not in bbpath:
+ bbpath.insert(0, dname)
+ data.setVar('BBPATH', ":".join(bbpath), d)
+
+ if include:
+ bb.parse.mark_dependency(d, abs_fn)
+
+ if ext != ".bbclass":
+ data.setVar('FILE', fn, d)
+
+ lineno = 0
+ while 1:
+ lineno = lineno + 1
+ s = f.readline()
+ if not s: break
+ s = s.rstrip()
+ feeder(lineno, s, fn, base_name, d)
+ if __inpython__:
+ # add a blank line to close out any python definition
+ feeder(IN_PYTHON_EOF, "", fn, base_name, d)
+ if ext == ".bbclass":
+ classes.remove(__classname__)
+ else:
+ if include == 0:
+ data.expandKeys(d)
+ data.update_data(d)
+ anonqueue = data.getVar("__anonqueue", d, 1) or []
+ body = [x['content'] for x in anonqueue]
+ flag = { 'python' : 1, 'func' : 1 }
+ data.setVar("__anonfunc", "\n".join(body), d)
+ data.setVarFlags("__anonfunc", flag, d)
+ from bb import build
+ try:
+ t = data.getVar('T', d)
+ data.setVar('T', '${TMPDIR}/anonfunc/', d)
+ build.exec_func("__anonfunc", d)
+ data.delVar('T', d)
+ if t:
+ data.setVar('T', t, d)
+ except Exception, e:
+ bb.msg.debug(1, bb.msg.domain.Parsing, "Exception when executing anonymous function: %s" % e)
+ raise
+ data.delVar("__anonqueue", d)
+ data.delVar("__anonfunc", d)
+ set_additional_vars(fn, d, include)
+ data.update_data(d)
+
+ all_handlers = {}
+ for var in data.getVar('__BBHANDLERS', d) or []:
+ # try to add the handler
+ handler = data.getVar(var,d)
+ bb.event.register(var, handler)
+
+ tasklist = data.getVar('__BBTASKS', d) or []
+ bb.build.add_tasks(tasklist, d)
+
+ bbpath.pop(0)
+ if oldfile:
+ bb.data.setVar("FILE", oldfile, d)
+
+ # we have parsed the bb class now
+ if ext == ".bbclass" or ext == ".inc":
+ __parsed_methods__[base_name] = 1
+
+ return d
+
+def feeder(lineno, s, fn, root, d):
+ global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, classes, bb, __residue__
+ if __infunc__:
+ if s == '}':
+ __body__.append('')
+ data.setVar(__infunc__, '\n'.join(__body__), d)
+ data.setVarFlag(__infunc__, "func", 1, d)
+ if __infunc__ == "__anonymous":
+ anonqueue = bb.data.getVar("__anonqueue", d) or []
+ anonitem = {}
+ anonitem["content"] = bb.data.getVar("__anonymous", d)
+ anonitem["flags"] = bb.data.getVarFlags("__anonymous", d)
+ anonqueue.append(anonitem)
+ bb.data.setVar("__anonqueue", anonqueue, d)
+ bb.data.delVarFlags("__anonymous", d)
+ bb.data.delVar("__anonymous", d)
+ __infunc__ = ""
+ __body__ = []
+ else:
+ __body__.append(s)
+ return
+
+ if __inpython__:
+ m = __python_func_regexp__.match(s)
+ if m and lineno != IN_PYTHON_EOF:
+ __body__.append(s)
+ return
+ else:
+ # Note we will add root to parsedmethods after having parse
+ # 'this' file. This means we will not parse methods from
+ # bb classes twice
+ if not root in __parsed_methods__:
+ text = '\n'.join(__body__)
+ methodpool.insert_method( root, text, fn )
+ funcs = data.getVar('__functions__', d) or {}
+ if not funcs.has_key( root ):
+ funcs[root] = text
+ else:
+ funcs[root] = "%s\n%s" % (funcs[root], text)
+
+ data.setVar('__functions__', funcs, d)
+ __body__ = []
+ __inpython__ = False
+
+ if lineno == IN_PYTHON_EOF:
+ return
+
+# fall through
+
+ if s == '' or s[0] == '#': return # skip comments and empty lines
+
+ if s[-1] == '\\':
+ __residue__.append(s[:-1])
+ return
+
+ s = "".join(__residue__) + s
+ __residue__ = []
+
+ m = __func_start_regexp__.match(s)
+ if m:
+ __infunc__ = m.group("func") or "__anonymous"
+ key = __infunc__
+ if data.getVar(key, d):
+# clean up old version of this piece of metadata, as its
+# flags could cause problems
+ data.setVarFlag(key, 'python', None, d)
+ data.setVarFlag(key, 'fakeroot', None, d)
+ if m.group("py") is not None:
+ data.setVarFlag(key, "python", "1", d)
+ else:
+ data.delVarFlag(key, "python", d)
+ if m.group("fr") is not None:
+ data.setVarFlag(key, "fakeroot", "1", d)
+ else:
+ data.delVarFlag(key, "fakeroot", d)
+ return
+
+ m = __def_regexp__.match(s)
+ if m:
+ __body__.append(s)
+ __inpython__ = True
+ return
+
+ m = __export_func_regexp__.match(s)
+ if m:
+ fns = m.group(1)
+ n = __word__.findall(fns)
+ for f in n:
+ allvars = []
+ allvars.append(f)
+ allvars.append(classes[-1] + "_" + f)
+
+ vars = [[ allvars[0], allvars[1] ]]
+ if len(classes) > 1 and classes[-2] is not None:
+ allvars.append(classes[-2] + "_" + f)
+ vars = []
+ vars.append([allvars[2], allvars[1]])
+ vars.append([allvars[0], allvars[2]])
+
+ for (var, calledvar) in vars:
+ if data.getVar(var, d) and not data.getVarFlag(var, 'export_func', d):
+ continue
+
+ if data.getVar(var, d):
+ data.setVarFlag(var, 'python', None, d)
+ data.setVarFlag(var, 'func', None, d)
+
+ for flag in [ "func", "python" ]:
+ if data.getVarFlag(calledvar, flag, d):
+ data.setVarFlag(var, flag, data.getVarFlag(calledvar, flag, d), d)
+ for flag in [ "dirs" ]:
+ if data.getVarFlag(var, flag, d):
+ data.setVarFlag(calledvar, flag, data.getVarFlag(var, flag, d), d)
+
+ if data.getVarFlag(calledvar, "python", d):
+ data.setVar(var, "\tbb.build.exec_func('" + calledvar + "', d)\n", d)
+ else:
+ data.setVar(var, "\t" + calledvar + "\n", d)
+ data.setVarFlag(var, 'export_func', '1', d)
+
+ return
+
+ m = __addtask_regexp__.match(s)
+ if m:
+ func = m.group("func")
+ before = m.group("before")
+ after = m.group("after")
+ if func is None:
+ return
+ var = "do_" + func
+
+ data.setVarFlag(var, "task", 1, d)
+
+ bbtasks = data.getVar('__BBTASKS', d) or []
+ if not var in bbtasks:
+ bbtasks.append(var)
+ data.setVar('__BBTASKS', bbtasks, d)
+
+ existing = data.getVarFlag(var, "deps", d) or []
+ if after is not None:
+ # set up deps for function
+ for entry in after.split():
+ if entry not in existing:
+ existing.append(entry)
+ data.setVarFlag(var, "deps", existing, d)
+ if before is not None:
+ # set up things that depend on this func
+ for entry in before.split():
+ existing = data.getVarFlag(entry, "deps", d) or []
+ if var not in existing:
+ data.setVarFlag(entry, "deps", [var] + existing, d)
+ return
+
+ m = __addhandler_regexp__.match(s)
+ if m:
+ fns = m.group(1)
+ hs = __word__.findall(fns)
+ bbhands = data.getVar('__BBHANDLERS', d) or []
+ for h in hs:
+ bbhands.append(h)
+ data.setVarFlag(h, "handler", 1, d)
+ data.setVar('__BBHANDLERS', bbhands, d)
+ return
+
+ m = __inherit_regexp__.match(s)
+ if m:
+
+ files = m.group(1)
+ n = __word__.findall(files)
+ inherit(n, d)
+ return
+
+ from bb.parse import ConfHandler
+ return ConfHandler.feeder(lineno, s, fn, d)
+
+__pkgsplit_cache__={}
+def vars_from_file(mypkg, d):
+ if not mypkg:
+ return (None, None, None)
+ if mypkg in __pkgsplit_cache__:
+ return __pkgsplit_cache__[mypkg]
+
+ myfile = os.path.splitext(os.path.basename(mypkg))
+ parts = myfile[0].split('_')
+ __pkgsplit_cache__[mypkg] = parts
+ if len(parts) > 3:
+ raise ParseError("Unable to generate default variables from the filename: %s (too many underscores)" % mypkg)
+ exp = 3 - len(parts)
+ tmplist = []
+ while exp != 0:
+ exp -= 1
+ tmplist.append(None)
+ parts.extend(tmplist)
+ return parts
+
+def set_additional_vars(file, d, include):
+ """Deduce rest of variables, e.g. ${A} out of ${SRC_URI}"""
+
+ return
+ # Nothing seems to use this variable
+ #bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s: set_additional_vars" % file)
+
+ #src_uri = data.getVar('SRC_URI', d, 1)
+ #if not src_uri:
+ # return
+
+ #a = (data.getVar('A', d, 1) or '').split()
+
+ #from bb import fetch
+ #try:
+ # ud = fetch.init(src_uri.split(), d)
+ # a += fetch.localpaths(d, ud)
+ #except fetch.NoMethodError:
+ # pass
+ #except bb.MalformedUrl,e:
+ # raise ParseError("Unable to generate local paths for SRC_URI due to malformed uri: %s" % e)
+ #del fetch
+
+ #data.setVar('A', " ".join(a), d)
+
+
+# Add us to the handlers list
+from bb.parse import handlers
+handlers.append({'supports': supports, 'handle': handle, 'init': init})
+del handlers
diff --git a/bitbake-dev/lib/bb/parse/parse_py/ConfHandler.py b/bitbake-dev/lib/bb/parse/parse_py/ConfHandler.py
new file mode 100644
index 0000000000..e6488bbe11
--- /dev/null
+++ b/bitbake-dev/lib/bb/parse/parse_py/ConfHandler.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+ class for handling configuration data files
+
+ Reads a .conf file and obtains its metadata
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re, bb.data, os, sys
+from bb.parse import ParseError
+
+#__config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}]+)\s*(?P<colon>:)?(?P<ques>\?)?=\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$")
+__config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}/]+)(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?\s*((?P<colon>:=)|(?P<ques>\?=)|(?P<append>\+=)|(?P<prepend>=\+)|(?P<predot>=\.)|(?P<postdot>\.=)|=)\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$")
+__include_regexp__ = re.compile( r"include\s+(.+)" )
+__require_regexp__ = re.compile( r"require\s+(.+)" )
+__export_regexp__ = re.compile( r"export\s+(.+)" )
+
+def init(data):
+ if not bb.data.getVar('TOPDIR', data):
+ bb.data.setVar('TOPDIR', os.getcwd(), data)
+ if not bb.data.getVar('BBPATH', data):
+ bb.data.setVar('BBPATH', os.path.join(sys.prefix, 'share', 'bitbake'), data)
+
+def supports(fn, d):
+ return localpath(fn, d)[-5:] == ".conf"
+
+def localpath(fn, d):
+ if os.path.exists(fn):
+ return fn
+
+ if "://" not in fn:
+ return fn
+
+ localfn = None
+ try:
+ localfn = bb.fetch.localpath(fn, d, False)
+ except bb.MalformedUrl:
+ pass
+
+ if not localfn:
+ return fn
+ return localfn
+
+def obtain(fn, data):
+ import sys, bb
+ fn = bb.data.expand(fn, data)
+ localfn = bb.data.expand(localpath(fn, data), data)
+
+ if localfn != fn:
+ dldir = bb.data.getVar('DL_DIR', data, 1)
+ if not dldir:
+ bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: DL_DIR not defined")
+ return localfn
+ bb.mkdirhier(dldir)
+ try:
+ bb.fetch.init([fn], data)
+ except bb.fetch.NoMethodError:
+ (type, value, traceback) = sys.exc_info()
+ bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: no method: %s" % value)
+ return localfn
+
+ try:
+ bb.fetch.go(data)
+ except bb.fetch.MissingParameterError:
+ (type, value, traceback) = sys.exc_info()
+ bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: missing parameters: %s" % value)
+ return localfn
+ except bb.fetch.FetchError:
+ (type, value, traceback) = sys.exc_info()
+ bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: failed: %s" % value)
+ return localfn
+ return localfn
+
+
+def include(oldfn, fn, data, error_out):
+ """
+
+ error_out If True a ParseError will be reaised if the to be included
+ """
+ if oldfn == fn: # prevent infinate recursion
+ return None
+
+ import bb
+ fn = bb.data.expand(fn, data)
+ oldfn = bb.data.expand(oldfn, data)
+
+ from bb.parse import handle
+ try:
+ ret = handle(fn, data, True)
+ except IOError:
+ if error_out:
+ raise ParseError("Could not %(error_out)s file %(fn)s" % vars() )
+ bb.msg.debug(2, bb.msg.domain.Parsing, "CONF file '%s' not found" % fn)
+
+def handle(fn, data, include = 0):
+ if include:
+ inc_string = "including"
+ else:
+ inc_string = "reading"
+ init(data)
+
+ if include == 0:
+ bb.data.inheritFromOS(data)
+ oldfile = None
+ else:
+ oldfile = bb.data.getVar('FILE', data)
+
+ fn = obtain(fn, data)
+ if not os.path.isabs(fn):
+ f = None
+ bbpath = bb.data.getVar("BBPATH", data, 1) or []
+ for p in bbpath.split(":"):
+ currname = os.path.join(p, fn)
+ if os.access(currname, os.R_OK):
+ f = open(currname, 'r')
+ abs_fn = currname
+ bb.msg.debug(2, bb.msg.domain.Parsing, "CONF %s %s" % (inc_string, currname))
+ break
+ if f is None:
+ raise IOError("file '%s' not found" % fn)
+ else:
+ f = open(fn,'r')
+ bb.msg.debug(1, bb.msg.domain.Parsing, "CONF %s %s" % (inc_string,fn))
+ abs_fn = fn
+
+ if include:
+ bb.parse.mark_dependency(data, abs_fn)
+
+ lineno = 0
+ bb.data.setVar('FILE', fn, data)
+ while 1:
+ lineno = lineno + 1
+ s = f.readline()
+ if not s: break
+ w = s.strip()
+ if not w: continue # skip empty lines
+ s = s.rstrip()
+ if s[0] == '#': continue # skip comments
+ while s[-1] == '\\':
+ s2 = f.readline()[:-1].strip()
+ lineno = lineno + 1
+ s = s[:-1] + s2
+ feeder(lineno, s, fn, data)
+
+ if oldfile:
+ bb.data.setVar('FILE', oldfile, data)
+ return data
+
+def feeder(lineno, s, fn, data):
+ def getFunc(groupd, key, data):
+ if 'flag' in groupd and groupd['flag'] != None:
+ return bb.data.getVarFlag(key, groupd['flag'], data)
+ else:
+ return bb.data.getVar(key, data)
+
+ m = __config_regexp__.match(s)
+ if m:
+ groupd = m.groupdict()
+ key = groupd["var"]
+ if "exp" in groupd and groupd["exp"] != None:
+ bb.data.setVarFlag(key, "export", 1, data)
+ if "ques" in groupd and groupd["ques"] != None:
+ val = getFunc(groupd, key, data)
+ if val == None:
+ val = groupd["value"]
+ elif "colon" in groupd and groupd["colon"] != None:
+ e = data.createCopy()
+ bb.data.update_data(e)
+ val = bb.data.expand(groupd["value"], e)
+ elif "append" in groupd and groupd["append"] != None:
+ val = "%s %s" % ((getFunc(groupd, key, data) or ""), groupd["value"])
+ elif "prepend" in groupd and groupd["prepend"] != None:
+ val = "%s %s" % (groupd["value"], (getFunc(groupd, key, data) or ""))
+ elif "postdot" in groupd and groupd["postdot"] != None:
+ val = "%s%s" % ((getFunc(groupd, key, data) or ""), groupd["value"])
+ elif "predot" in groupd and groupd["predot"] != None:
+ val = "%s%s" % (groupd["value"], (getFunc(groupd, key, data) or ""))
+ else:
+ val = groupd["value"]
+ if 'flag' in groupd and groupd['flag'] != None:
+ bb.msg.debug(3, bb.msg.domain.Parsing, "setVarFlag(%s, %s, %s, data)" % (key, groupd['flag'], val))
+ bb.data.setVarFlag(key, groupd['flag'], val, data)
+ else:
+ bb.data.setVar(key, val, data)
+ return
+
+ m = __include_regexp__.match(s)
+ if m:
+ s = bb.data.expand(m.group(1), data)
+ bb.msg.debug(3, bb.msg.domain.Parsing, "CONF %s:%d: including %s" % (fn, lineno, s))
+ include(fn, s, data, False)
+ return
+
+ m = __require_regexp__.match(s)
+ if m:
+ s = bb.data.expand(m.group(1), data)
+ include(fn, s, data, "include required")
+ return
+
+ m = __export_regexp__.match(s)
+ if m:
+ bb.data.setVarFlag(m.group(1), "export", 1, data)
+ return
+
+ raise ParseError("%s:%d: unparsed line: '%s'" % (fn, lineno, s));
+
+# Add us to the handlers list
+from bb.parse import handlers
+handlers.append({'supports': supports, 'handle': handle, 'init': init})
+del handlers
diff --git a/bitbake-dev/lib/bb/parse/parse_py/__init__.py b/bitbake-dev/lib/bb/parse/parse_py/__init__.py
new file mode 100644
index 0000000000..9e0e00adda
--- /dev/null
+++ b/bitbake-dev/lib/bb/parse/parse_py/__init__.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake Parsers
+
+File parsers for the BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+__version__ = '1.0'
+
+__all__ = [ 'ConfHandler', 'BBHandler']
+
+import ConfHandler
+import BBHandler
diff --git a/bitbake-dev/lib/bb/persist_data.py b/bitbake-dev/lib/bb/persist_data.py
new file mode 100644
index 0000000000..79e7448bee
--- /dev/null
+++ b/bitbake-dev/lib/bb/persist_data.py
@@ -0,0 +1,110 @@
+# BitBake Persistent Data Store
+#
+# Copyright (C) 2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import bb, os
+
+try:
+ import sqlite3
+except ImportError:
+ try:
+ from pysqlite2 import dbapi2 as sqlite3
+ except ImportError:
+ bb.msg.fatal(bb.msg.domain.PersistData, "Importing sqlite3 and pysqlite2 failed, please install one of them. Python 2.5 or a 'python-pysqlite2' like package is likely to be what you need.")
+
+sqlversion = sqlite3.sqlite_version_info
+if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
+ bb.msg.fatal(bb.msg.domain.PersistData, "sqlite3 version 3.3.0 or later is required.")
+
+class PersistData:
+ """
+ BitBake Persistent Data Store
+
+ Used to store data in a central location such that other threads/tasks can
+ access them at some future date.
+
+ The "domain" is used as a key to isolate each data pool and in this
+ implementation corresponds to an SQL table. The SQL table consists of a
+ simple key and value pair.
+
+ Why sqlite? It handles all the locking issues for us.
+ """
+ def __init__(self, d):
+ self.cachedir = bb.data.getVar("PERSISTENT_DIR", d, True) or bb.data.getVar("CACHE", d, True)
+ if self.cachedir in [None, '']:
+ bb.msg.fatal(bb.msg.domain.PersistData, "Please set the 'PERSISTENT_DIR' or 'CACHE' variable.")
+ try:
+ os.stat(self.cachedir)
+ except OSError:
+ bb.mkdirhier(self.cachedir)
+
+ self.cachefile = os.path.join(self.cachedir,"bb_persist_data.sqlite3")
+ bb.msg.debug(1, bb.msg.domain.PersistData, "Using '%s' as the persistent data cache" % self.cachefile)
+
+ self.connection = sqlite3.connect(self.cachefile, timeout=5, isolation_level=None)
+
+ def addDomain(self, domain):
+ """
+ Should be called before any domain is used
+ Creates it if it doesn't exist.
+ """
+ self.connection.execute("CREATE TABLE IF NOT EXISTS %s(key TEXT, value TEXT);" % domain)
+
+ def delDomain(self, domain):
+ """
+ Removes a domain and all the data it contains
+ """
+ self.connection.execute("DROP TABLE IF EXISTS %s;" % domain)
+
+ def getValue(self, domain, key):
+ """
+ Return the value of a key for a domain
+ """
+ data = self.connection.execute("SELECT * from %s where key=?;" % domain, [key])
+ for row in data:
+ return row[1]
+
+ def setValue(self, domain, key, value):
+ """
+ Sets the value of a key for a domain
+ """
+ data = self.connection.execute("SELECT * from %s where key=?;" % domain, [key])
+ rows = 0
+ for row in data:
+ rows = rows + 1
+ if rows:
+ self._execute("UPDATE %s SET value=? WHERE key=?;" % domain, [value, key])
+ else:
+ self._execute("INSERT into %s(key, value) values (?, ?);" % domain, [key, value])
+
+ def delValue(self, domain, key):
+ """
+ Deletes a key/value pair
+ """
+ self._execute("DELETE from %s where key=?;" % domain, [key])
+
+ def _execute(self, *query):
+ while True:
+ try:
+ self.connection.execute(*query)
+ return
+ except sqlite3.OperationalError, e:
+ if 'database is locked' in str(e):
+ continue
+ raise
+
+
+
diff --git a/bitbake-dev/lib/bb/providers.py b/bitbake-dev/lib/bb/providers.py
new file mode 100644
index 0000000000..0ad5876ef0
--- /dev/null
+++ b/bitbake-dev/lib/bb/providers.py
@@ -0,0 +1,303 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
+# Copyright (C) 2005 Holger Hans Peter Freyther
+# Copyright (C) 2005 ROAD GmbH
+# Copyright (C) 2006 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os, re
+from bb import data, utils
+import bb
+
+class NoProvider(Exception):
+ """Exception raised when no provider of a build dependency can be found"""
+
+class NoRProvider(Exception):
+ """Exception raised when no provider of a runtime dependency can be found"""
+
+
+def sortPriorities(pn, dataCache, pkg_pn = None):
+ """
+ Reorder pkg_pn by file priority and default preference
+ """
+
+ if not pkg_pn:
+ pkg_pn = dataCache.pkg_pn
+
+ files = pkg_pn[pn]
+ priorities = {}
+ for f in files:
+ priority = dataCache.bbfile_priority[f]
+ preference = dataCache.pkg_dp[f]
+ if priority not in priorities:
+ priorities[priority] = {}
+ if preference not in priorities[priority]:
+ priorities[priority][preference] = []
+ priorities[priority][preference].append(f)
+ pri_list = priorities.keys()
+ pri_list.sort(lambda a, b: a - b)
+ tmp_pn = []
+ for pri in pri_list:
+ pref_list = priorities[pri].keys()
+ pref_list.sort(lambda a, b: b - a)
+ tmp_pref = []
+ for pref in pref_list:
+ tmp_pref.extend(priorities[pri][pref])
+ tmp_pn = [tmp_pref] + tmp_pn
+
+ return tmp_pn
+
+
+def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
+ """
+ Find the first provider in pkg_pn with a PREFERRED_VERSION set.
+ """
+
+ preferred_file = None
+ preferred_ver = None
+
+ localdata = data.createCopy(cfgData)
+ bb.data.setVar('OVERRIDES', "pn-%s:%s:%s" % (pn, pn, data.getVar('OVERRIDES', localdata)), localdata)
+ bb.data.update_data(localdata)
+
+ preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True)
+ if preferred_v:
+ m = re.match('(\d+:)*(.*)(_.*)*', preferred_v)
+ if m:
+ if m.group(1):
+ preferred_e = int(m.group(1)[:-1])
+ else:
+ preferred_e = None
+ preferred_v = m.group(2)
+ if m.group(3):
+ preferred_r = m.group(3)[1:]
+ else:
+ preferred_r = None
+ else:
+ preferred_e = None
+ preferred_r = None
+
+ for file_set in pkg_pn:
+ for f in file_set:
+ pe,pv,pr = dataCache.pkg_pepvpr[f]
+ if preferred_v == pv and (preferred_r == pr or preferred_r == None) and (preferred_e == pe or preferred_e == None):
+ preferred_file = f
+ preferred_ver = (pe, pv, pr)
+ break
+ if preferred_file:
+ break;
+ if preferred_r:
+ pv_str = '%s-%s' % (preferred_v, preferred_r)
+ else:
+ pv_str = preferred_v
+ if not (preferred_e is None):
+ pv_str = '%s:%s' % (preferred_e, pv_str)
+ itemstr = ""
+ if item:
+ itemstr = " (for item %s)" % item
+ if preferred_file is None:
+ bb.msg.note(1, bb.msg.domain.Provider, "preferred version %s of %s not available%s" % (pv_str, pn, itemstr))
+ else:
+ bb.msg.debug(1, bb.msg.domain.Provider, "selecting %s as PREFERRED_VERSION %s of package %s%s" % (preferred_file, pv_str, pn, itemstr))
+
+ return (preferred_ver, preferred_file)
+
+
+def findLatestProvider(pn, cfgData, dataCache, file_set):
+ """
+ Return the highest version of the providers in file_set.
+ Take default preferences into account.
+ """
+ latest = None
+ latest_p = 0
+ latest_f = None
+ for file_name in file_set:
+ pe,pv,pr = dataCache.pkg_pepvpr[file_name]
+ dp = dataCache.pkg_dp[file_name]
+
+ if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p):
+ latest = (pe, pv, pr)
+ latest_f = file_name
+ latest_p = dp
+
+ return (latest, latest_f)
+
+
+def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
+ """
+ If there is a PREFERRED_VERSION, find the highest-priority bbfile
+ providing that version. If not, find the latest version provided by
+ an bbfile in the highest-priority set.
+ """
+
+ sortpkg_pn = sortPriorities(pn, dataCache, pkg_pn)
+ # Find the highest priority provider with a PREFERRED_VERSION set
+ (preferred_ver, preferred_file) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item)
+ # Find the latest version of the highest priority provider
+ (latest, latest_f) = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[0])
+
+ if preferred_file is None:
+ preferred_file = latest_f
+ preferred_ver = latest
+
+ return (latest, latest_f, preferred_ver, preferred_file)
+
+
+def _filterProviders(providers, item, cfgData, dataCache):
+ """
+ Take a list of providers and filter/reorder according to the
+ environment variables and previous build results
+ """
+ eligible = []
+ preferred_versions = {}
+ sortpkg_pn = {}
+
+ # The order of providers depends on the order of the files on the disk
+ # up to here. Sort pkg_pn to make dependency issues reproducible rather
+ # than effectively random.
+ providers.sort()
+
+ # Collate providers by PN
+ pkg_pn = {}
+ for p in providers:
+ pn = dataCache.pkg_fn[p]
+ if pn not in pkg_pn:
+ pkg_pn[pn] = []
+ pkg_pn[pn].append(p)
+
+ bb.msg.debug(1, bb.msg.domain.Provider, "providers for %s are: %s" % (item, pkg_pn.keys()))
+
+ # First add PREFERRED_VERSIONS
+ for pn in pkg_pn.keys():
+ sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
+ preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
+ if preferred_versions[pn][1]:
+ eligible.append(preferred_versions[pn][1])
+
+ # Now add latest verisons
+ for pn in pkg_pn.keys():
+ if pn in preferred_versions and preferred_versions[pn][1]:
+ continue
+ preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
+ eligible.append(preferred_versions[pn][1])
+
+ if len(eligible) == 0:
+ bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item)
+ return 0
+
+ # If pn == item, give it a slight default preference
+ # This means PREFERRED_PROVIDER_foobar defaults to foobar if available
+ for p in providers:
+ pn = dataCache.pkg_fn[p]
+ if pn != item:
+ continue
+ (newvers, fn) = preferred_versions[pn]
+ if not fn in eligible:
+ continue
+ eligible.remove(fn)
+ eligible = [fn] + eligible
+
+ return eligible
+
+
+def filterProviders(providers, item, cfgData, dataCache):
+ """
+ Take a list of providers and filter/reorder according to the
+ environment variables and previous build results
+ Takes a "normal" target item
+ """
+
+ eligible = _filterProviders(providers, item, cfgData, dataCache)
+
+ prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, cfgData, 1)
+ if prefervar:
+ dataCache.preferred[item] = prefervar
+
+ foundUnique = False
+ if item in dataCache.preferred:
+ for p in eligible:
+ pn = dataCache.pkg_fn[p]
+ if dataCache.preferred[item] == pn:
+ bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy %s due to PREFERRED_PROVIDERS" % (pn, item))
+ eligible.remove(p)
+ eligible = [p] + eligible
+ foundUnique = True
+ break
+
+ bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible))
+
+ return eligible, foundUnique
+
+def filterProvidersRunTime(providers, item, cfgData, dataCache):
+ """
+ Take a list of providers and filter/reorder according to the
+ environment variables and previous build results
+ Takes a "runtime" target item
+ """
+
+ eligible = _filterProviders(providers, item, cfgData, dataCache)
+
+ # Should use dataCache.preferred here?
+ preferred = []
+ preferred_vars = []
+ for p in eligible:
+ pn = dataCache.pkg_fn[p]
+ provides = dataCache.pn_provides[pn]
+ for provide in provides:
+ prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, cfgData, 1)
+ if prefervar == pn:
+ var = "PREFERRED_PROVIDERS_%s = %s" % (provide, prefervar)
+ bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to %s" % (pn, item, var))
+ preferred_vars.append(var)
+ eligible.remove(p)
+ eligible = [p] + eligible
+ preferred.append(p)
+ break
+
+ numberPreferred = len(preferred)
+
+ if numberPreferred > 1:
+ bb.msg.error(bb.msg.domain.Provider, "Conflicting PREFERRED_PROVIDERS entries were found which resulted in an attempt to select multiple providers (%s) for runtime dependecy %s\nThe entries resulting in this conflict were: %s" % (preferred, item, preferred_vars))
+
+ bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible))
+
+ return eligible, numberPreferred
+
+def getRuntimeProviders(dataCache, rdepend):
+ """
+ Return any providers of runtime dependency
+ """
+ rproviders = []
+
+ if rdepend in dataCache.rproviders:
+ rproviders += dataCache.rproviders[rdepend]
+
+ if rdepend in dataCache.packages:
+ rproviders += dataCache.packages[rdepend]
+
+ if rproviders:
+ return rproviders
+
+ # Only search dynamic packages if we can't find anything in other variables
+ for pattern in dataCache.packages_dynamic:
+ regexp = re.compile(pattern)
+ if regexp.match(rdepend):
+ rproviders += dataCache.packages_dynamic[pattern]
+
+ return rproviders
diff --git a/bitbake-dev/lib/bb/runqueue.py b/bitbake-dev/lib/bb/runqueue.py
new file mode 100644
index 0000000000..4130b50641
--- /dev/null
+++ b/bitbake-dev/lib/bb/runqueue.py
@@ -0,0 +1,1157 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'RunQueue' implementation
+
+Handles preparation and execution of a queue of tasks
+"""
+
+# Copyright (C) 2006-2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from bb import msg, data, event, mkdirhier, utils
+from sets import Set
+import bb, os, sys
+import signal
+import stat
+
+class TaskFailure(Exception):
+ """Exception raised when a task in a runqueue fails"""
+ def __init__(self, x):
+ self.args = x
+
+
+class RunQueueStats:
+ """
+ Holds statistics on the tasks handled by the associated runQueue
+ """
+ def __init__(self, total):
+ self.completed = 0
+ self.skipped = 0
+ self.failed = 0
+ self.active = 0
+ self.total = total
+
+ def taskFailed(self):
+ self.active = self.active - 1
+ self.failed = self.failed + 1
+
+ def taskCompleted(self, number = 1):
+ self.active = self.active - number
+ self.completed = self.completed + number
+
+ def taskSkipped(self, number = 1):
+ self.active = self.active + number
+ self.skipped = self.skipped + number
+
+ def taskActive(self):
+ self.active = self.active + 1
+
+# These values indicate the next step due to be run in the
+# runQueue state machine
+runQueuePrepare = 2
+runQueueRunInit = 3
+runQueueRunning = 4
+runQueueFailed = 6
+runQueueCleanUp = 7
+runQueueComplete = 8
+runQueueChildProcess = 9
+
+class RunQueueScheduler:
+ """
+ Control the order tasks are scheduled in.
+ """
+ def __init__(self, runqueue):
+ """
+ The default scheduler just returns the first buildable task (the
+ priority map is sorted by task numer)
+ """
+ self.rq = runqueue
+ numTasks = len(self.rq.runq_fnid)
+
+ self.prio_map = []
+ self.prio_map.extend(range(numTasks))
+
+ def next(self):
+ """
+ Return the id of the first task we find that is buildable
+ """
+ for task1 in range(len(self.rq.runq_fnid)):
+ task = self.prio_map[task1]
+ if self.rq.runq_running[task] == 1:
+ continue
+ if self.rq.runq_buildable[task] == 1:
+ return task
+
+class RunQueueSchedulerSpeed(RunQueueScheduler):
+ """
+ A scheduler optimised for speed. The priority map is sorted by task weight,
+ heavier weighted tasks (tasks needed by the most other tasks) are run first.
+ """
+ def __init__(self, runqueue):
+ """
+ The priority map is sorted by task weight.
+ """
+ from copy import deepcopy
+
+ self.rq = runqueue
+
+ sortweight = deepcopy(self.rq.runq_weight)
+ sortweight.sort()
+ copyweight = deepcopy(self.rq.runq_weight)
+ self.prio_map = []
+
+ for weight in sortweight:
+ idx = copyweight.index(weight)
+ self.prio_map.append(idx)
+ copyweight[idx] = -1
+
+ self.prio_map.reverse()
+
+class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
+ """
+ A scheduler optimised to complete .bb files are quickly as possible. The
+ priority map is sorted by task weight, but then reordered so once a given
+ .bb file starts to build, its completed as quickly as possible. This works
+ well where disk space is at a premium and classes like OE's rm_work are in
+ force.
+ """
+ def __init__(self, runqueue):
+ RunQueueSchedulerSpeed.__init__(self, runqueue)
+ from copy import deepcopy
+
+ #FIXME - whilst this groups all fnids together it does not reorder the
+ #fnid groups optimally.
+
+ basemap = deepcopy(self.prio_map)
+ self.prio_map = []
+ while (len(basemap) > 0):
+ entry = basemap.pop(0)
+ self.prio_map.append(entry)
+ fnid = self.rq.runq_fnid[entry]
+ todel = []
+ for entry in basemap:
+ entry_fnid = self.rq.runq_fnid[entry]
+ if entry_fnid == fnid:
+ todel.append(basemap.index(entry))
+ self.prio_map.append(entry)
+ todel.reverse()
+ for idx in todel:
+ del basemap[idx]
+
+class RunQueue:
+ """
+ BitBake Run Queue implementation
+ """
+ def __init__(self, cooker, cfgData, dataCache, taskData, targets):
+ self.reset_runqueue()
+ self.cooker = cooker
+ self.dataCache = dataCache
+ self.taskData = taskData
+ self.cfgData = cfgData
+ self.targets = targets
+
+ self.number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData, 1) or 1)
+ self.multi_provider_whitelist = (bb.data.getVar("MULTI_PROVIDER_WHITELIST", cfgData, 1) or "").split()
+ self.scheduler = bb.data.getVar("BB_SCHEDULER", cfgData, 1) or "speed"
+ self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, 1) or "perfile"
+ self.stampwhitelist = bb.data.getVar("BB_STAMP_WHITELIST", cfgData, 1) or ""
+
+ def reset_runqueue(self):
+ self.runq_fnid = []
+ self.runq_task = []
+ self.runq_depends = []
+ self.runq_revdeps = []
+
+ self.state = runQueuePrepare
+
+ def get_user_idstring(self, task):
+ fn = self.taskData.fn_index[self.runq_fnid[task]]
+ taskname = self.runq_task[task]
+ return "%s, %s" % (fn, taskname)
+
+ def get_task_id(self, fnid, taskname):
+ for listid in range(len(self.runq_fnid)):
+ if self.runq_fnid[listid] == fnid and self.runq_task[listid] == taskname:
+ return listid
+ return None
+
+ def circular_depchains_handler(self, tasks):
+ """
+ Some tasks aren't buildable, likely due to circular dependency issues.
+ Identify the circular dependencies and print them in a user readable format.
+ """
+ from copy import deepcopy
+
+ valid_chains = []
+ explored_deps = {}
+ msgs = []
+
+ def chain_reorder(chain):
+ """
+ Reorder a dependency chain so the lowest task id is first
+ """
+ lowest = 0
+ new_chain = []
+ for entry in range(len(chain)):
+ if chain[entry] < chain[lowest]:
+ lowest = entry
+ new_chain.extend(chain[lowest:])
+ new_chain.extend(chain[:lowest])
+ return new_chain
+
+ def chain_compare_equal(chain1, chain2):
+ """
+ Compare two dependency chains and see if they're the same
+ """
+ if len(chain1) != len(chain2):
+ return False
+ for index in range(len(chain1)):
+ if chain1[index] != chain2[index]:
+ return False
+ return True
+
+ def chain_array_contains(chain, chain_array):
+ """
+ Return True if chain_array contains chain
+ """
+ for ch in chain_array:
+ if chain_compare_equal(ch, chain):
+ return True
+ return False
+
+ def find_chains(taskid, prev_chain):
+ prev_chain.append(taskid)
+ total_deps = []
+ total_deps.extend(self.runq_revdeps[taskid])
+ for revdep in self.runq_revdeps[taskid]:
+ if revdep in prev_chain:
+ idx = prev_chain.index(revdep)
+ # To prevent duplicates, reorder the chain to start with the lowest taskid
+ # and search through an array of those we've already printed
+ chain = prev_chain[idx:]
+ new_chain = chain_reorder(chain)
+ if not chain_array_contains(new_chain, valid_chains):
+ valid_chains.append(new_chain)
+ msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
+ for dep in new_chain:
+ msgs.append(" Task %s (%s) (depends: %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends[dep]))
+ msgs.append("\n")
+ if len(valid_chains) > 10:
+ msgs.append("Aborted dependency loops search after 10 matches.\n")
+ return msgs
+ continue
+ scan = False
+ if revdep not in explored_deps:
+ scan = True
+ elif revdep in explored_deps[revdep]:
+ scan = True
+ else:
+ for dep in prev_chain:
+ if dep in explored_deps[revdep]:
+ scan = True
+ if scan:
+ find_chains(revdep, deepcopy(prev_chain))
+ for dep in explored_deps[revdep]:
+ if dep not in total_deps:
+ total_deps.append(dep)
+
+ explored_deps[taskid] = total_deps
+
+ for task in tasks:
+ find_chains(task, [])
+
+ return msgs
+
+ def calculate_task_weights(self, endpoints):
+ """
+ Calculate a number representing the "weight" of each task. Heavier weighted tasks
+ have more dependencies and hence should be executed sooner for maximum speed.
+
+ This function also sanity checks the task list finding tasks that its not
+ possible to execute due to circular dependencies.
+ """
+
+ numTasks = len(self.runq_fnid)
+ weight = []
+ deps_left = []
+ task_done = []
+
+ for listid in range(numTasks):
+ task_done.append(False)
+ weight.append(0)
+ deps_left.append(len(self.runq_revdeps[listid]))
+
+ for listid in endpoints:
+ weight[listid] = 1
+ task_done[listid] = True
+
+ while 1:
+ next_points = []
+ for listid in endpoints:
+ for revdep in self.runq_depends[listid]:
+ weight[revdep] = weight[revdep] + weight[listid]
+ deps_left[revdep] = deps_left[revdep] - 1
+ if deps_left[revdep] == 0:
+ next_points.append(revdep)
+ task_done[revdep] = True
+ endpoints = next_points
+ if len(next_points) == 0:
+ break
+
+ # Circular dependency sanity check
+ problem_tasks = []
+ for task in range(numTasks):
+ if task_done[task] is False or deps_left[task] != 0:
+ problem_tasks.append(task)
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s) is not buildable\n" % (task, self.get_user_idstring(task)))
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "(Complete marker was %s and the remaining dependency count was %s)\n\n" % (task_done[task], deps_left[task]))
+
+ if problem_tasks:
+ message = "Unbuildable tasks were found.\n"
+ message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
+ message = message + "Identifying dependency loops (this may take a short while)...\n"
+ bb.msg.error(bb.msg.domain.RunQueue, message)
+
+ msgs = self.circular_depchains_handler(problem_tasks)
+
+ message = "\n"
+ for msg in msgs:
+ message = message + msg
+ bb.msg.fatal(bb.msg.domain.RunQueue, message)
+
+ return weight
+
+ def prepare_runqueue(self):
+ """
+ Turn a set of taskData into a RunQueue and compute data needed
+ to optimise the execution order.
+ """
+
+ depends = []
+ runq_build = []
+ recursive_tdepends = {}
+
+ taskData = self.taskData
+
+ if len(taskData.tasks_name) == 0:
+ # Nothing to do
+ return
+
+ bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue")
+
+ # Step A - Work out a list of tasks to run
+ #
+ # Taskdata gives us a list of possible providers for a every target
+ # ordered by priority (build_targets, run_targets). It also gives
+ # information on each of those providers.
+ #
+ # To create the actual list of tasks to execute we fix the list of
+ # providers and then resolve the dependencies into task IDs. This
+ # process is repeated for each type of dependency (tdepends, deptask,
+ # rdeptast, recrdeptask, idepends).
+
+ for task in range(len(taskData.tasks_name)):
+ fnid = taskData.tasks_fnid[task]
+ fn = taskData.fn_index[fnid]
+ task_deps = self.dataCache.task_deps[fn]
+
+ if fnid not in taskData.failed_fnids:
+
+ # Resolve task internal dependencies
+ #
+ # e.g. addtask before X after Y
+ depends = taskData.tasks_tdepends[task]
+
+ # Resolve 'deptask' dependencies
+ #
+ # e.g. do_sometask[deptask] = "do_someothertask"
+ # (makes sure sometask runs after someothertask of all DEPENDS)
+ if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
+ tasknames = task_deps['deptask'][taskData.tasks_name[task]].split()
+ for depid in taskData.depids[fnid]:
+ # Won't be in build_targets if ASSUME_PROVIDED
+ if depid in taskData.build_targets:
+ depdata = taskData.build_targets[depid][0]
+ if depdata is not None:
+ dep = taskData.fn_index[depdata]
+ for taskname in tasknames:
+ depends.append(taskData.gettask_id(dep, taskname))
+
+ # Resolve 'rdeptask' dependencies
+ #
+ # e.g. do_sometask[rdeptask] = "do_someothertask"
+ # (makes sure sometask runs after someothertask of all RDEPENDS)
+ if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
+ taskname = task_deps['rdeptask'][taskData.tasks_name[task]]
+ for depid in taskData.rdepids[fnid]:
+ if depid in taskData.run_targets:
+ depdata = taskData.run_targets[depid][0]
+ if depdata is not None:
+ dep = taskData.fn_index[depdata]
+ depends.append(taskData.gettask_id(dep, taskname))
+
+ # Resolve inter-task dependencies
+ #
+ # e.g. do_sometask[depends] = "targetname:do_someothertask"
+ # (makes sure sometask runs after targetname's someothertask)
+ idepends = taskData.tasks_idepends[task]
+ for (depid, idependtask) in idepends:
+ if depid in taskData.build_targets:
+ # Won't be in build_targets if ASSUME_PROVIDED
+ depdata = taskData.build_targets[depid][0]
+ if depdata is not None:
+ dep = taskData.fn_index[depdata]
+ depends.append(taskData.gettask_id(dep, idependtask))
+
+ # Create a list of recursive dependent tasks (from tdepends) and cache
+ def get_recursive_tdepends(task):
+ if not task:
+ return []
+ if task in recursive_tdepends:
+ return recursive_tdepends[task]
+
+ fnid = taskData.tasks_fnid[task]
+ taskids = taskData.gettask_ids(fnid)
+
+ rectdepends = taskids
+ nextdeps = taskids
+ while len(nextdeps) != 0:
+ newdeps = []
+ for nextdep in nextdeps:
+ for tdepend in taskData.tasks_tdepends[nextdep]:
+ if tdepend not in rectdepends:
+ rectdepends.append(tdepend)
+ newdeps.append(tdepend)
+ nextdeps = newdeps
+ recursive_tdepends[task] = rectdepends
+ return rectdepends
+
+ # Using the list of tdepends for this task create a list of
+ # the recursive idepends we have
+ def get_recursive_idepends(task):
+ if not task:
+ return []
+ rectdepends = get_recursive_tdepends(task)
+
+ recidepends = []
+ for tdepend in rectdepends:
+ for idepend in taskData.tasks_idepends[tdepend]:
+ recidepends.append(idepend)
+ return recidepends
+
+ def add_recursive_build(depid, depfnid):
+ """
+ Add build depends of depid to depends
+ (if we've not see it before)
+ (calls itself recursively)
+ """
+ if str(depid) in dep_seen:
+ return
+ dep_seen.append(depid)
+ if depid in taskData.build_targets:
+ depdata = taskData.build_targets[depid][0]
+ if depdata is not None:
+ dep = taskData.fn_index[depdata]
+ # Need to avoid creating new tasks here
+ taskid = taskData.gettask_id(dep, taskname, False)
+ if taskid is not None:
+ depends.append(taskid)
+ fnid = taskData.tasks_fnid[taskid]
+ #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid])
+ else:
+ fnid = taskData.getfn_id(dep)
+ for nextdepid in taskData.depids[fnid]:
+ if nextdepid not in dep_seen:
+ add_recursive_build(nextdepid, fnid)
+ for nextdepid in taskData.rdepids[fnid]:
+ if nextdepid not in rdep_seen:
+ add_recursive_run(nextdepid, fnid)
+ for (idependid, idependtask) in get_recursive_idepends(taskid):
+ if idependid not in dep_seen:
+ add_recursive_build(idependid, fnid)
+
+ def add_recursive_run(rdepid, depfnid):
+ """
+ Add runtime depends of rdepid to depends
+ (if we've not see it before)
+ (calls itself recursively)
+ """
+ if str(rdepid) in rdep_seen:
+ return
+ rdep_seen.append(rdepid)
+ if rdepid in taskData.run_targets:
+ depdata = taskData.run_targets[rdepid][0]
+ if depdata is not None:
+ dep = taskData.fn_index[depdata]
+ # Need to avoid creating new tasks here
+ taskid = taskData.gettask_id(dep, taskname, False)
+ if taskid is not None:
+ depends.append(taskid)
+ fnid = taskData.tasks_fnid[taskid]
+ #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid])
+ else:
+ fnid = taskData.getfn_id(dep)
+ for nextdepid in taskData.depids[fnid]:
+ if nextdepid not in dep_seen:
+ add_recursive_build(nextdepid, fnid)
+ for nextdepid in taskData.rdepids[fnid]:
+ if nextdepid not in rdep_seen:
+ add_recursive_run(nextdepid, fnid)
+ for (idependid, idependtask) in get_recursive_idepends(taskid):
+ if idependid not in dep_seen:
+ add_recursive_build(idependid, fnid)
+
+ # Resolve recursive 'recrdeptask' dependencies
+ #
+ # e.g. do_sometask[recrdeptask] = "do_someothertask"
+ # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
+ if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
+ for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split():
+ dep_seen = []
+ rdep_seen = []
+ idep_seen = []
+ for depid in taskData.depids[fnid]:
+ add_recursive_build(depid, fnid)
+ for rdepid in taskData.rdepids[fnid]:
+ add_recursive_run(rdepid, fnid)
+ deptaskid = taskData.gettask_id(fn, taskname, False)
+ for (idependid, idependtask) in get_recursive_idepends(deptaskid):
+ add_recursive_build(idependid, fnid)
+
+ # Rmove all self references
+ if task in depends:
+ newdep = []
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends))
+ for dep in depends:
+ if task != dep:
+ newdep.append(dep)
+ depends = newdep
+
+
+ self.runq_fnid.append(taskData.tasks_fnid[task])
+ self.runq_task.append(taskData.tasks_name[task])
+ self.runq_depends.append(Set(depends))
+ self.runq_revdeps.append(Set())
+
+ runq_build.append(0)
+
+ # Step B - Mark all active tasks
+ #
+ # Start with the tasks we were asked to run and mark all dependencies
+ # as active too. If the task is to be 'forced', clear its stamp. Once
+ # all active tasks are marked, prune the ones we don't need.
+
+ bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks")
+
+ def mark_active(listid, depth):
+ """
+ Mark an item as active along with its depends
+ (calls itself recursively)
+ """
+
+ if runq_build[listid] == 1:
+ return
+
+ runq_build[listid] = 1
+
+ depends = self.runq_depends[listid]
+ for depend in depends:
+ mark_active(depend, depth+1)
+
+ self.target_pairs = []
+ for target in self.targets:
+ targetid = taskData.getbuild_id(target[0])
+
+ if targetid not in taskData.build_targets:
+ continue
+
+ if targetid in taskData.failed_deps:
+ continue
+
+ fnid = taskData.build_targets[targetid][0]
+ fn = taskData.fn_index[fnid]
+ self.target_pairs.append((fn, target[1]))
+
+ # Remove stamps for targets if force mode active
+ if self.cooker.configuration.force:
+ bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (target[1], fn))
+ bb.build.del_stamp(target[1], self.dataCache, fn)
+
+ if fnid in taskData.failed_fnids:
+ continue
+
+ if target[1] not in taskData.tasks_lookup[fnid]:
+ bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s does not exist for target %s" % (target[1], target[0]))
+
+ listid = taskData.tasks_lookup[fnid][target[1]]
+
+ mark_active(listid, 1)
+
+ # Step C - Prune all inactive tasks
+ #
+ # Once all active tasks are marked, prune the ones we don't need.
+
+ maps = []
+ delcount = 0
+ for listid in range(len(self.runq_fnid)):
+ if runq_build[listid-delcount] == 1:
+ maps.append(listid-delcount)
+ else:
+ del self.runq_fnid[listid-delcount]
+ del self.runq_task[listid-delcount]
+ del self.runq_depends[listid-delcount]
+ del runq_build[listid-delcount]
+ del self.runq_revdeps[listid-delcount]
+ delcount = delcount + 1
+ maps.append(-1)
+
+ #
+ # Step D - Sanity checks and computation
+ #
+
+ # Check to make sure we still have tasks to run
+ if len(self.runq_fnid) == 0:
+ if not taskData.abort:
+ bb.msg.fatal(bb.msg.domain.RunQueue, "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
+ else:
+ bb.msg.fatal(bb.msg.domain.RunQueue, "No active tasks and not in --continue mode?! Please report this bug.")
+
+ bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid)))
+
+ # Remap the dependencies to account for the deleted tasks
+ # Check we didn't delete a task we depend on
+ for listid in range(len(self.runq_fnid)):
+ newdeps = []
+ origdeps = self.runq_depends[listid]
+ for origdep in origdeps:
+ if maps[origdep] == -1:
+ bb.msg.fatal(bb.msg.domain.RunQueue, "Invalid mapping - Should never happen!")
+ newdeps.append(maps[origdep])
+ self.runq_depends[listid] = Set(newdeps)
+
+ bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings")
+
+ # Generate a list of reverse dependencies to ease future calculations
+ for listid in range(len(self.runq_fnid)):
+ for dep in self.runq_depends[listid]:
+ self.runq_revdeps[dep].add(listid)
+
+ # Identify tasks at the end of dependency chains
+ # Error on circular dependency loops (length two)
+ endpoints = []
+ for listid in range(len(self.runq_fnid)):
+ revdeps = self.runq_revdeps[listid]
+ if len(revdeps) == 0:
+ endpoints.append(listid)
+ for dep in revdeps:
+ if dep in self.runq_depends[listid]:
+ #self.dump_data(taskData)
+ bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
+
+ bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints))
+
+ # Calculate task weights
+ # Check of higher length circular dependencies
+ self.runq_weight = self.calculate_task_weights(endpoints)
+
+ # Decide what order to execute the tasks in, pick a scheduler
+ #self.sched = RunQueueScheduler(self)
+ if self.scheduler == "completion":
+ self.sched = RunQueueSchedulerCompletion(self)
+ else:
+ self.sched = RunQueueSchedulerSpeed(self)
+
+ # Sanity Check - Check for multiple tasks building the same provider
+ prov_list = {}
+ seen_fn = []
+ for task in range(len(self.runq_fnid)):
+ fn = taskData.fn_index[self.runq_fnid[task]]
+ if fn in seen_fn:
+ continue
+ seen_fn.append(fn)
+ for prov in self.dataCache.fn_provides[fn]:
+ if prov not in prov_list:
+ prov_list[prov] = [fn]
+ elif fn not in prov_list[prov]:
+ prov_list[prov].append(fn)
+ error = False
+ for prov in prov_list:
+ if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist:
+ error = True
+ bb.msg.error(bb.msg.domain.RunQueue, "Multiple .bb files are due to be built which each provide %s (%s).\n This usually means one provides something the other doesn't and should." % (prov, " ".join(prov_list[prov])))
+ #if error:
+ # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...")
+
+
+ # Create a whitelist usable by the stamp checks
+ stampfnwhitelist = []
+ for entry in self.stampwhitelist.split():
+ entryid = self.taskData.getbuild_id(entry)
+ if entryid not in self.taskData.build_targets:
+ continue
+ fnid = self.taskData.build_targets[entryid][0]
+ fn = self.taskData.fn_index[fnid]
+ stampfnwhitelist.append(fn)
+ self.stampfnwhitelist = stampfnwhitelist
+
+ #self.dump_data(taskData)
+
+ self.state = runQueueRunInit
+
+ def check_stamps(self):
+ unchecked = {}
+ current = []
+ notcurrent = []
+ buildable = []
+
+ if self.stamppolicy == "perfile":
+ fulldeptree = False
+ else:
+ fulldeptree = True
+ stampwhitelist = []
+ if self.stamppolicy == "whitelist":
+ stampwhitelist = self.self.stampfnwhitelist
+
+ for task in range(len(self.runq_fnid)):
+ unchecked[task] = ""
+ if len(self.runq_depends[task]) == 0:
+ buildable.append(task)
+
+ def check_buildable(self, task, buildable):
+ for revdep in self.runq_revdeps[task]:
+ alldeps = 1
+ for dep in self.runq_depends[revdep]:
+ if dep in unchecked:
+ alldeps = 0
+ if alldeps == 1:
+ if revdep in unchecked:
+ buildable.append(revdep)
+
+ for task in range(len(self.runq_fnid)):
+ if task not in unchecked:
+ continue
+ fn = self.taskData.fn_index[self.runq_fnid[task]]
+ taskname = self.runq_task[task]
+ stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
+ # If the stamp is missing its not current
+ if not os.access(stampfile, os.F_OK):
+ del unchecked[task]
+ notcurrent.append(task)
+ check_buildable(self, task, buildable)
+ continue
+ # If its a 'nostamp' task, it's not current
+ taskdep = self.dataCache.task_deps[fn]
+ if 'nostamp' in taskdep and task in taskdep['nostamp']:
+ del unchecked[task]
+ notcurrent.append(task)
+ check_buildable(self, task, buildable)
+ continue
+
+ while (len(buildable) > 0):
+ nextbuildable = []
+ for task in buildable:
+ if task in unchecked:
+ fn = self.taskData.fn_index[self.runq_fnid[task]]
+ taskname = self.runq_task[task]
+ stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
+ iscurrent = True
+
+ t1 = os.stat(stampfile)[stat.ST_MTIME]
+ for dep in self.runq_depends[task]:
+ if iscurrent:
+ fn2 = self.taskData.fn_index[self.runq_fnid[dep]]
+ taskname2 = self.runq_task[dep]
+ stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2)
+ if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
+ if dep in notcurrent:
+ iscurrent = False
+ else:
+ t2 = os.stat(stampfile2)[stat.ST_MTIME]
+ if t1 < t2:
+ iscurrent = False
+ del unchecked[task]
+ if iscurrent:
+ current.append(task)
+ else:
+ notcurrent.append(task)
+
+ check_buildable(self, task, nextbuildable)
+
+ buildable = nextbuildable
+
+ #for task in range(len(self.runq_fnid)):
+ # fn = self.taskData.fn_index[self.runq_fnid[task]]
+ # taskname = self.runq_task[task]
+ # print "%s %s.%s" % (task, taskname, fn)
+
+ #print "Unchecked: %s" % unchecked
+ #print "Current: %s" % current
+ #print "Not current: %s" % notcurrent
+
+ if len(unchecked) > 0:
+ bb.fatal("check_stamps fatal internal error")
+ return current
+
+ def check_stamp_task(self, task):
+
+ if self.stamppolicy == "perfile":
+ fulldeptree = False
+ else:
+ fulldeptree = True
+ stampwhitelist = []
+ if self.stamppolicy == "whitelist":
+ stampwhitelist = self.stampfnwhitelist
+
+ fn = self.taskData.fn_index[self.runq_fnid[task]]
+ taskname = self.runq_task[task]
+ stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
+ # If the stamp is missing its not current
+ if not os.access(stampfile, os.F_OK):
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s not available\n" % stampfile)
+ return False
+ # If its a 'nostamp' task, it's not current
+ taskdep = self.dataCache.task_deps[fn]
+ if 'nostamp' in taskdep and task in taskdep['nostamp']:
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "%s.%s is nostamp\n" % (fn, taskname))
+ return False
+
+ iscurrent = True
+ t1 = os.stat(stampfile)[stat.ST_MTIME]
+ for dep in self.runq_depends[task]:
+ if iscurrent:
+ fn2 = self.taskData.fn_index[self.runq_fnid[dep]]
+ taskname2 = self.runq_task[dep]
+ stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2)
+ if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
+ try:
+ t2 = os.stat(stampfile2)[stat.ST_MTIME]
+ if t1 < t2:
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile,stampfile2))
+ iscurrent = False
+ except:
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2 ,stampfile))
+ iscurrent = False
+
+ return iscurrent
+
+ def execute_runqueue(self):
+ """
+ Run the tasks in a queue prepared by prepare_runqueue
+ Upon failure, optionally try to recover the build using any alternate providers
+ (if the abort on failure configuration option isn't set)
+ """
+
+ if self.state is runQueuePrepare:
+ self.prepare_runqueue()
+
+ if self.state is runQueueRunInit:
+ bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue")
+ self.execute_runqueue_initVars()
+
+ if self.state is runQueueRunning:
+ self.execute_runqueue_internal()
+
+ if self.state is runQueueCleanUp:
+ self.finish_runqueue()
+
+ if self.state is runQueueFailed:
+ if self.taskData.abort:
+ raise bb.runqueue.TaskFailure(self.failed_fnids)
+ for fnid in self.failed_fnids:
+ self.taskData.fail_fnid(fnid)
+ self.reset_runqueue()
+
+ if self.state is runQueueComplete:
+ # All done
+ bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (self.stats.completed, self.stats.skipped, self.stats.failed))
+ return False
+
+ if self.state is runQueueChildProcess:
+ print "Child process"
+ return False
+
+ # Loop
+ return True
+
+ def execute_runqueue_initVars(self):
+
+ self.stats = RunQueueStats(len(self.runq_fnid))
+
+ self.runq_buildable = []
+ self.runq_running = []
+ self.runq_complete = []
+ self.build_pids = {}
+ self.failed_fnids = []
+
+ # Mark initial buildable tasks
+ for task in range(self.stats.total):
+ self.runq_running.append(0)
+ self.runq_complete.append(0)
+ if len(self.runq_depends[task]) == 0:
+ self.runq_buildable.append(1)
+ else:
+ self.runq_buildable.append(0)
+
+ self.state = runQueueRunning
+
+ event.fire(bb.event.StampUpdate(self.target_pairs, self.dataCache.stamp, self.cfgData))
+
+ def task_complete(self, task):
+ """
+ Mark a task as completed
+ Look at the reverse dependencies and mark any task with
+ completed dependencies as buildable
+ """
+ self.runq_complete[task] = 1
+ for revdep in self.runq_revdeps[task]:
+ if self.runq_running[revdep] == 1:
+ continue
+ if self.runq_buildable[revdep] == 1:
+ continue
+ alldeps = 1
+ for dep in self.runq_depends[revdep]:
+ if self.runq_complete[dep] != 1:
+ alldeps = 0
+ if alldeps == 1:
+ self.runq_buildable[revdep] = 1
+ fn = self.taskData.fn_index[self.runq_fnid[revdep]]
+ taskname = self.runq_task[revdep]
+ bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
+
+ def task_fail(self, task, exitcode):
+ """
+ Called when a task has failed
+ Updates the state engine with the failure
+ """
+ bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed with %s" % (task, self.get_user_idstring(task), exitcode))
+ self.stats.taskFailed()
+ fnid = self.runq_fnid[task]
+ self.failed_fnids.append(fnid)
+ bb.event.fire(runQueueTaskFailed(task, self.stats, self, self.cfgData))
+ if self.taskData.abort:
+ self.state = runQueueCleanup
+
+ def execute_runqueue_internal(self):
+ """
+ Run the tasks in a queue prepared by prepare_runqueue
+ """
+
+ if self.stats.total == 0:
+ # nothing to do
+ self.state = runQueueCleanup
+
+ while True:
+ task = None
+ if self.stats.active < self.number_tasks:
+ task = self.sched.next()
+ if task is not None:
+ fn = self.taskData.fn_index[self.runq_fnid[task]]
+
+ taskname = self.runq_task[task]
+ if self.check_stamp_task(task):
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
+ self.runq_running[task] = 1
+ self.runq_buildable[task] = 1
+ self.task_complete(task)
+ self.stats.taskCompleted()
+ self.stats.taskSkipped()
+ continue
+
+ bb.event.fire(runQueueTaskStarted(task, self.stats, self, self.cfgData))
+ bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.stats.active + 1, self.stats.total, task, self.get_user_idstring(task)))
+ sys.stdout.flush()
+ sys.stderr.flush()
+ try:
+ pid = os.fork()
+ except OSError, e:
+ bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
+ if pid == 0:
+ self.state = runQueueChildProcess
+ # Make the child the process group leader
+ os.setpgid(0, 0)
+ newsi = os.open('/dev/null', os.O_RDWR)
+ os.dup2(newsi, sys.stdin.fileno())
+ self.cooker.configuration.cmd = taskname[3:]
+ bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", self, self.cooker.configuration.data)
+ try:
+ self.cooker.tryBuild(fn)
+ except bb.build.EventException:
+ bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
+ sys.exit(1)
+ except:
+ bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
+ raise
+ sys.exit(0)
+ self.build_pids[pid] = task
+ self.runq_running[task] = 1
+ self.stats.taskActive()
+ if self.stats.active < self.number_tasks:
+ continue
+ if self.stats.active > 0:
+ result = os.waitpid(-1, os.WNOHANG)
+ if result[0] is 0 and result[1] is 0:
+ return
+ task = self.build_pids[result[0]]
+ del self.build_pids[result[0]]
+ if result[1] != 0:
+ self.task_fail(task, result[1])
+ return
+ self.task_complete(task)
+ self.stats.taskCompleted()
+ bb.event.fire(runQueueTaskCompleted(task, self.stats, self, self.cfgData))
+ continue
+
+ if len(self.failed_fnids) != 0:
+ self.state = runQueueFailed
+ return
+
+ # Sanity Checks
+ for task in range(self.stats.total):
+ if self.runq_buildable[task] == 0:
+ bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task)
+ if self.runq_running[task] == 0:
+ bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task)
+ if self.runq_complete[task] == 0:
+ bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task)
+ self.state = runQueueComplete
+ return
+
+ def finish_runqueue_now(self):
+ bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % self.stats.active)
+ for k, v in self.build_pids.iteritems():
+ try:
+ os.kill(-k, signal.SIGINT)
+ except:
+ pass
+
+ def finish_runqueue(self, now = False):
+ self.state = runQueueCleanUp
+ if now:
+ self.finish_runqueue_now()
+ try:
+ while self.stats.active > 0:
+ bb.event.fire(runQueueExitWait(self.stats.active, self.cfgData))
+ bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % self.stats.active)
+ tasknum = 1
+ for k, v in self.build_pids.iteritems():
+ bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v), k))
+ tasknum = tasknum + 1
+ result = os.waitpid(-1, os.WNOHANG)
+ if result[0] is 0 and result[1] is 0:
+ return
+ task = self.build_pids[result[0]]
+ del self.build_pids[result[0]]
+ if result[1] != 0:
+ self.task_fail(task, result[1])
+ else:
+ self.stats.taskCompleted()
+ bb.event.fire(runQueueTaskCompleted(task, self.stats, self, self.cfgData))
+ except:
+ self.finish_runqueue_now()
+ raise
+
+ if len(self.failed_fnids) != 0:
+ self.state = runQueueFailed
+ return
+
+ self.state = runQueueComplete
+ return
+
+ def dump_data(self, taskQueue):
+ """
+ Dump some debug information on the internal data structures
+ """
+ bb.msg.debug(3, bb.msg.domain.RunQueue, "run_tasks:")
+ for task in range(len(self.runq_task)):
+ bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
+ taskQueue.fn_index[self.runq_fnid[task]],
+ self.runq_task[task],
+ self.runq_weight[task],
+ self.runq_depends[task],
+ self.runq_revdeps[task]))
+
+ bb.msg.debug(3, bb.msg.domain.RunQueue, "sorted_tasks:")
+ for task1 in range(len(self.runq_task)):
+ if task1 in self.prio_map:
+ task = self.prio_map[task1]
+ bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
+ taskQueue.fn_index[self.runq_fnid[task]],
+ self.runq_task[task],
+ self.runq_weight[task],
+ self.runq_depends[task],
+ self.runq_revdeps[task]))
+
+
+class TaskFailure(Exception):
+ """
+ Exception raised when a task in a runqueue fails
+ """
+ def __init__(self, x):
+ self.args = x
+
+
+class runQueueExitWait(bb.event.Event):
+ """
+ Event when waiting for task processes to exit
+ """
+
+ def __init__(self, remain, d):
+ self.remain = remain
+ self.message = "Waiting for %s active tasks to finish" % remain
+ bb.event.Event.__init__(self, d)
+
+class runQueueEvent(bb.event.Event):
+ """
+ Base runQueue event class
+ """
+ def __init__(self, task, stats, rq, d):
+ self.taskid = task
+ self.taskstring = rq.get_user_idstring(task)
+ self.stats = stats
+ bb.event.Event.__init__(self, d)
+
+class runQueueTaskStarted(runQueueEvent):
+ """
+ Event notifing a task was started
+ """
+ def __init__(self, task, stats, rq, d):
+ runQueueEvent.__init__(self, task, stats, rq, d)
+ self.message = "Running task %s (%d of %d) (%s)" % (task, stats.completed + stats.active + 1, self.stats.total, self.taskstring)
+
+class runQueueTaskFailed(runQueueEvent):
+ """
+ Event notifing a task failed
+ """
+ def __init__(self, task, stats, rq, d):
+ runQueueEvent.__init__(self, task, stats, rq, d)
+ self.message = "Task %s failed (%s)" % (task, self.taskstring)
+
+class runQueueTaskCompleted(runQueueEvent):
+ """
+ Event notifing a task completed
+ """
+ def __init__(self, task, stats, rq, d):
+ runQueueEvent.__init__(self, task, stats, rq, d)
+ self.message = "Task %s completed (%s)" % (task, self.taskstring)
+
+def check_stamp_fn(fn, taskname, d):
+ rq = bb.data.getVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", d)
+ fnid = rq.taskData.getfn_id(fn)
+ taskid = rq.get_task_id(fnid, taskname)
+ if taskid is not None:
+ return rq.check_stamp_task(taskid)
+ return None
diff --git a/bitbake-dev/lib/bb/shell.py b/bitbake-dev/lib/bb/shell.py
new file mode 100644
index 0000000000..34828fe425
--- /dev/null
+++ b/bitbake-dev/lib/bb/shell.py
@@ -0,0 +1,827 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+##########################################################################
+#
+# Copyright (C) 2005-2006 Michael 'Mickey' Lauer <mickey@Vanille.de>
+# Copyright (C) 2005-2006 Vanille Media
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+##########################################################################
+#
+# Thanks to:
+# * Holger Freyther <zecke@handhelds.org>
+# * Justin Patrin <papercrane@reversefold.com>
+#
+##########################################################################
+
+"""
+BitBake Shell
+
+IDEAS:
+ * list defined tasks per package
+ * list classes
+ * toggle force
+ * command to reparse just one (or more) bbfile(s)
+ * automatic check if reparsing is necessary (inotify?)
+ * frontend for bb file manipulation
+ * more shell-like features:
+ - output control, i.e. pipe output into grep, sort, etc.
+ - job control, i.e. bring running commands into background and foreground
+ * start parsing in background right after startup
+ * ncurses interface
+
+PROBLEMS:
+ * force doesn't always work
+ * readline completion for commands with more than one parameters
+
+"""
+
+##########################################################################
+# Import and setup global variables
+##########################################################################
+
+try:
+ set
+except NameError:
+ from sets import Set as set
+import sys, os, readline, socket, httplib, urllib, commands, popen2, copy, shlex, Queue, fnmatch
+from bb import data, parse, build, fatal, cache, taskdata, runqueue, providers as Providers
+
+__version__ = "0.5.3.1"
+__credits__ = """BitBake Shell Version %s (C) 2005 Michael 'Mickey' Lauer <mickey@Vanille.de>
+Type 'help' for more information, press CTRL-D to exit.""" % __version__
+
+cmds = {}
+leave_mainloop = False
+last_exception = None
+cooker = None
+parsed = False
+debug = os.environ.get( "BBSHELL_DEBUG", "" )
+
+##########################################################################
+# Class BitBakeShellCommands
+##########################################################################
+
+class BitBakeShellCommands:
+ """This class contains the valid commands for the shell"""
+
+ def __init__( self, shell ):
+ """Register all the commands"""
+ self._shell = shell
+ for attr in BitBakeShellCommands.__dict__:
+ if not attr.startswith( "_" ):
+ if attr.endswith( "_" ):
+ command = attr[:-1].lower()
+ else:
+ command = attr[:].lower()
+ method = getattr( BitBakeShellCommands, attr )
+ debugOut( "registering command '%s'" % command )
+ # scan number of arguments
+ usage = getattr( method, "usage", "" )
+ if usage != "<...>":
+ numArgs = len( usage.split() )
+ else:
+ numArgs = -1
+ shell.registerCommand( command, method, numArgs, "%s %s" % ( command, usage ), method.__doc__ )
+
+ def _checkParsed( self ):
+ if not parsed:
+ print "SHELL: This command needs to parse bbfiles..."
+ self.parse( None )
+
+ def _findProvider( self, item ):
+ self._checkParsed()
+ # Need to use taskData for this information
+ preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
+ if not preferred: preferred = item
+ try:
+ lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
+ except KeyError:
+ if item in cooker.status.providers:
+ pf = cooker.status.providers[item][0]
+ else:
+ pf = None
+ return pf
+
+ def alias( self, params ):
+ """Register a new name for a command"""
+ new, old = params
+ if not old in cmds:
+ print "ERROR: Command '%s' not known" % old
+ else:
+ cmds[new] = cmds[old]
+ print "OK"
+ alias.usage = "<alias> <command>"
+
+ def buffer( self, params ):
+ """Dump specified output buffer"""
+ index = params[0]
+ print self._shell.myout.buffer( int( index ) )
+ buffer.usage = "<index>"
+
+ def buffers( self, params ):
+ """Show the available output buffers"""
+ commands = self._shell.myout.bufferedCommands()
+ if not commands:
+ print "SHELL: No buffered commands available yet. Start doing something."
+ else:
+ print "="*35, "Available Output Buffers", "="*27
+ for index, cmd in enumerate( commands ):
+ print "| %s %s" % ( str( index ).ljust( 3 ), cmd )
+ print "="*88
+
+ def build( self, params, cmd = "build" ):
+ """Build a providee"""
+ global last_exception
+ globexpr = params[0]
+ self._checkParsed()
+ names = globfilter( cooker.status.pkg_pn.keys(), globexpr )
+ if len( names ) == 0: names = [ globexpr ]
+ print "SHELL: Building %s" % ' '.join( names )
+
+ oldcmd = cooker.configuration.cmd
+ cooker.configuration.cmd = cmd
+
+ td = taskdata.TaskData(cooker.configuration.abort)
+ localdata = data.createCopy(cooker.configuration.data)
+ data.update_data(localdata)
+ data.expandKeys(localdata)
+
+ try:
+ tasks = []
+ for name in names:
+ td.add_provider(localdata, cooker.status, name)
+ providers = td.get_provider(name)
+
+ if len(providers) == 0:
+ raise Providers.NoProvider
+
+ tasks.append([name, "do_%s" % cooker.configuration.cmd])
+
+ td.add_unresolved(localdata, cooker.status)
+
+ rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks)
+ rq.prepare_runqueue()
+ rq.execute_runqueue()
+
+ except Providers.NoProvider:
+ print "ERROR: No Provider"
+ last_exception = Providers.NoProvider
+
+ except runqueue.TaskFailure, fnids:
+ for fnid in fnids:
+ print "ERROR: '%s' failed" % td.fn_index[fnid]
+ last_exception = runqueue.TaskFailure
+
+ except build.EventException, e:
+ print "ERROR: Couldn't build '%s'" % names
+ last_exception = e
+
+ cooker.configuration.cmd = oldcmd
+
+ build.usage = "<providee>"
+
+ def clean( self, params ):
+ """Clean a providee"""
+ self.build( params, "clean" )
+ clean.usage = "<providee>"
+
+ def compile( self, params ):
+ """Execute 'compile' on a providee"""
+ self.build( params, "compile" )
+ compile.usage = "<providee>"
+
+ def configure( self, params ):
+ """Execute 'configure' on a providee"""
+ self.build( params, "configure" )
+ configure.usage = "<providee>"
+
+ def edit( self, params ):
+ """Call $EDITOR on a providee"""
+ name = params[0]
+ bbfile = self._findProvider( name )
+ if bbfile is not None:
+ os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), bbfile ) )
+ else:
+ print "ERROR: Nothing provides '%s'" % name
+ edit.usage = "<providee>"
+
+ def environment( self, params ):
+ """Dump out the outer BitBake environment"""
+ cooker.showEnvironment()
+
+ def exit_( self, params ):
+ """Leave the BitBake Shell"""
+ debugOut( "setting leave_mainloop to true" )
+ global leave_mainloop
+ leave_mainloop = True
+
+ def fetch( self, params ):
+ """Fetch a providee"""
+ self.build( params, "fetch" )
+ fetch.usage = "<providee>"
+
+ def fileBuild( self, params, cmd = "build" ):
+ """Parse and build a .bb file"""
+ global last_exception
+ name = params[0]
+ bf = completeFilePath( name )
+ print "SHELL: Calling '%s' on '%s'" % ( cmd, bf )
+
+ oldcmd = cooker.configuration.cmd
+ cooker.configuration.cmd = cmd
+
+ try:
+ cooker.buildFile(bf)
+ except parse.ParseError:
+ print "ERROR: Unable to open or parse '%s'" % bf
+ except build.EventException, e:
+ print "ERROR: Couldn't build '%s'" % name
+ last_exception = e
+
+ cooker.configuration.cmd = oldcmd
+ fileBuild.usage = "<bbfile>"
+
+ def fileClean( self, params ):
+ """Clean a .bb file"""
+ self.fileBuild( params, "clean" )
+ fileClean.usage = "<bbfile>"
+
+ def fileEdit( self, params ):
+ """Call $EDITOR on a .bb file"""
+ name = params[0]
+ os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), completeFilePath( name ) ) )
+ fileEdit.usage = "<bbfile>"
+
+ def fileRebuild( self, params ):
+ """Rebuild (clean & build) a .bb file"""
+ self.fileBuild( params, "rebuild" )
+ fileRebuild.usage = "<bbfile>"
+
+ def fileReparse( self, params ):
+ """(re)Parse a bb file"""
+ bbfile = params[0]
+ print "SHELL: Parsing '%s'" % bbfile
+ parse.update_mtime( bbfile )
+ cooker.bb_cache.cacheValidUpdate(bbfile)
+ fromCache = cooker.bb_cache.loadData(bbfile, cooker.configuration.data)
+ cooker.bb_cache.sync()
+ if False: #fromCache:
+ print "SHELL: File has not been updated, not reparsing"
+ else:
+ print "SHELL: Parsed"
+ fileReparse.usage = "<bbfile>"
+
+ def abort( self, params ):
+ """Toggle abort task execution flag (see bitbake -k)"""
+ cooker.configuration.abort = not cooker.configuration.abort
+ print "SHELL: Abort Flag is now '%s'" % repr( cooker.configuration.abort )
+
+ def force( self, params ):
+ """Toggle force task execution flag (see bitbake -f)"""
+ cooker.configuration.force = not cooker.configuration.force
+ print "SHELL: Force Flag is now '%s'" % repr( cooker.configuration.force )
+
+ def help( self, params ):
+ """Show a comprehensive list of commands and their purpose"""
+ print "="*30, "Available Commands", "="*30
+ allcmds = cmds.keys()
+ allcmds.sort()
+ for cmd in allcmds:
+ function,numparams,usage,helptext = cmds[cmd]
+ print "| %s | %s" % (usage.ljust(30), helptext)
+ print "="*78
+
+ def lastError( self, params ):
+ """Show the reason or log that was produced by the last BitBake event exception"""
+ if last_exception is None:
+ print "SHELL: No Errors yet (Phew)..."
+ else:
+ reason, event = last_exception.args
+ print "SHELL: Reason for the last error: '%s'" % reason
+ if ':' in reason:
+ msg, filename = reason.split( ':' )
+ filename = filename.strip()
+ print "SHELL: Dumping log file for last error:"
+ try:
+ print open( filename ).read()
+ except IOError:
+ print "ERROR: Couldn't open '%s'" % filename
+
+ def match( self, params ):
+ """Dump all files or providers matching a glob expression"""
+ what, globexpr = params
+ if what == "files":
+ self._checkParsed()
+ for key in globfilter( cooker.status.pkg_fn.keys(), globexpr ): print key
+ elif what == "providers":
+ self._checkParsed()
+ for key in globfilter( cooker.status.pkg_pn.keys(), globexpr ): print key
+ else:
+ print "Usage: match %s" % self.print_.usage
+ match.usage = "<files|providers> <glob>"
+
+ def new( self, params ):
+ """Create a new .bb file and open the editor"""
+ dirname, filename = params
+ packages = '/'.join( data.getVar( "BBFILES", cooker.configuration.data, 1 ).split('/')[:-2] )
+ fulldirname = "%s/%s" % ( packages, dirname )
+
+ if not os.path.exists( fulldirname ):
+ print "SHELL: Creating '%s'" % fulldirname
+ os.mkdir( fulldirname )
+ if os.path.exists( fulldirname ) and os.path.isdir( fulldirname ):
+ if os.path.exists( "%s/%s" % ( fulldirname, filename ) ):
+ print "SHELL: ERROR: %s/%s already exists" % ( fulldirname, filename )
+ return False
+ print "SHELL: Creating '%s/%s'" % ( fulldirname, filename )
+ newpackage = open( "%s/%s" % ( fulldirname, filename ), "w" )
+ print >>newpackage,"""DESCRIPTION = ""
+SECTION = ""
+AUTHOR = ""
+HOMEPAGE = ""
+MAINTAINER = ""
+LICENSE = "GPL"
+PR = "r0"
+
+SRC_URI = ""
+
+#inherit base
+
+#do_configure() {
+#
+#}
+
+#do_compile() {
+#
+#}
+
+#do_stage() {
+#
+#}
+
+#do_install() {
+#
+#}
+"""
+ newpackage.close()
+ os.system( "%s %s/%s" % ( os.environ.get( "EDITOR" ), fulldirname, filename ) )
+ new.usage = "<directory> <filename>"
+
+ def package( self, params ):
+ """Execute 'package' on a providee"""
+ self.build( params, "package" )
+ package.usage = "<providee>"
+
+ def pasteBin( self, params ):
+ """Send a command + output buffer to the pastebin at http://rafb.net/paste"""
+ index = params[0]
+ contents = self._shell.myout.buffer( int( index ) )
+ sendToPastebin( "output of " + params[0], contents )
+ pasteBin.usage = "<index>"
+
+ def pasteLog( self, params ):
+ """Send the last event exception error log (if there is one) to http://rafb.net/paste"""
+ if last_exception is None:
+ print "SHELL: No Errors yet (Phew)..."
+ else:
+ reason, event = last_exception.args
+ print "SHELL: Reason for the last error: '%s'" % reason
+ if ':' in reason:
+ msg, filename = reason.split( ':' )
+ filename = filename.strip()
+ print "SHELL: Pasting log file to pastebin..."
+
+ file = open( filename ).read()
+ sendToPastebin( "contents of " + filename, file )
+
+ def patch( self, params ):
+ """Execute 'patch' command on a providee"""
+ self.build( params, "patch" )
+ patch.usage = "<providee>"
+
+ def parse( self, params ):
+ """(Re-)parse .bb files and calculate the dependency graph"""
+ cooker.status = cache.CacheData()
+ ignore = data.getVar("ASSUME_PROVIDED", cooker.configuration.data, 1) or ""
+ cooker.status.ignored_dependencies = set( ignore.split() )
+ cooker.handleCollections( data.getVar("BBFILE_COLLECTIONS", cooker.configuration.data, 1) )
+
+ (filelist, masked) = cooker.collect_bbfiles()
+ cooker.parse_bbfiles(filelist, masked, cooker.myProgressCallback)
+ cooker.buildDepgraph()
+ global parsed
+ parsed = True
+ print
+
+ def reparse( self, params ):
+ """(re)Parse a providee's bb file"""
+ bbfile = self._findProvider( params[0] )
+ if bbfile is not None:
+ print "SHELL: Found bbfile '%s' for '%s'" % ( bbfile, params[0] )
+ self.fileReparse( [ bbfile ] )
+ else:
+ print "ERROR: Nothing provides '%s'" % params[0]
+ reparse.usage = "<providee>"
+
+ def getvar( self, params ):
+ """Dump the contents of an outer BitBake environment variable"""
+ var = params[0]
+ value = data.getVar( var, cooker.configuration.data, 1 )
+ print value
+ getvar.usage = "<variable>"
+
+ def peek( self, params ):
+ """Dump contents of variable defined in providee's metadata"""
+ name, var = params
+ bbfile = self._findProvider( name )
+ if bbfile is not None:
+ the_data = cooker.bb_cache.loadDataFull(bbfile, cooker.configuration.data)
+ value = the_data.getVar( var, 1 )
+ print value
+ else:
+ print "ERROR: Nothing provides '%s'" % name
+ peek.usage = "<providee> <variable>"
+
+ def poke( self, params ):
+ """Set contents of variable defined in providee's metadata"""
+ name, var, value = params
+ bbfile = self._findProvider( name )
+ if bbfile is not None:
+ print "ERROR: Sorry, this functionality is currently broken"
+ #d = cooker.pkgdata[bbfile]
+ #data.setVar( var, value, d )
+
+ # mark the change semi persistant
+ #cooker.pkgdata.setDirty(bbfile, d)
+ #print "OK"
+ else:
+ print "ERROR: Nothing provides '%s'" % name
+ poke.usage = "<providee> <variable> <value>"
+
+ def print_( self, params ):
+ """Dump all files or providers"""
+ what = params[0]
+ if what == "files":
+ self._checkParsed()
+ for key in cooker.status.pkg_fn.keys(): print key
+ elif what == "providers":
+ self._checkParsed()
+ for key in cooker.status.providers.keys(): print key
+ else:
+ print "Usage: print %s" % self.print_.usage
+ print_.usage = "<files|providers>"
+
+ def python( self, params ):
+ """Enter the expert mode - an interactive BitBake Python Interpreter"""
+ sys.ps1 = "EXPERT BB>>> "
+ sys.ps2 = "EXPERT BB... "
+ import code
+ interpreter = code.InteractiveConsole( dict( globals() ) )
+ interpreter.interact( "SHELL: Expert Mode - BitBake Python %s\nType 'help' for more information, press CTRL-D to switch back to BBSHELL." % sys.version )
+
+ def showdata( self, params ):
+ """Execute 'showdata' on a providee"""
+ cooker.showEnvironment(None, params)
+ showdata.usage = "<providee>"
+
+ def setVar( self, params ):
+ """Set an outer BitBake environment variable"""
+ var, value = params
+ data.setVar( var, value, cooker.configuration.data )
+ print "OK"
+ setVar.usage = "<variable> <value>"
+
+ def rebuild( self, params ):
+ """Clean and rebuild a .bb file or a providee"""
+ self.build( params, "clean" )
+ self.build( params, "build" )
+ rebuild.usage = "<providee>"
+
+ def shell( self, params ):
+ """Execute a shell command and dump the output"""
+ if params != "":
+ print commands.getoutput( " ".join( params ) )
+ shell.usage = "<...>"
+
+ def stage( self, params ):
+ """Execute 'stage' on a providee"""
+ self.build( params, "stage" )
+ stage.usage = "<providee>"
+
+ def status( self, params ):
+ """<just for testing>"""
+ print "-" * 78
+ print "building list = '%s'" % cooker.building_list
+ print "build path = '%s'" % cooker.build_path
+ print "consider_msgs_cache = '%s'" % cooker.consider_msgs_cache
+ print "build stats = '%s'" % cooker.stats
+ if last_exception is not None: print "last_exception = '%s'" % repr( last_exception.args )
+ print "memory output contents = '%s'" % self._shell.myout._buffer
+
+ def test( self, params ):
+ """<just for testing>"""
+ print "testCommand called with '%s'" % params
+
+ def unpack( self, params ):
+ """Execute 'unpack' on a providee"""
+ self.build( params, "unpack" )
+ unpack.usage = "<providee>"
+
+ def which( self, params ):
+ """Computes the providers for a given providee"""
+ # Need to use taskData for this information
+ item = params[0]
+
+ self._checkParsed()
+
+ preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
+ if not preferred: preferred = item
+
+ try:
+ lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
+ except KeyError:
+ lv, lf, pv, pf = (None,)*4
+
+ try:
+ providers = cooker.status.providers[item]
+ except KeyError:
+ print "SHELL: ERROR: Nothing provides", preferred
+ else:
+ for provider in providers:
+ if provider == pf: provider = " (***) %s" % provider
+ else: provider = " %s" % provider
+ print provider
+ which.usage = "<providee>"
+
+##########################################################################
+# Common helper functions
+##########################################################################
+
+def completeFilePath( bbfile ):
+ """Get the complete bbfile path"""
+ if not cooker.status: return bbfile
+ if not cooker.status.pkg_fn: return bbfile
+ for key in cooker.status.pkg_fn.keys():
+ if key.endswith( bbfile ):
+ return key
+ return bbfile
+
+def sendToPastebin( desc, content ):
+ """Send content to http://oe.pastebin.com"""
+ mydata = {}
+ mydata["lang"] = "Plain Text"
+ mydata["desc"] = desc
+ mydata["cvt_tabs"] = "No"
+ mydata["nick"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" )
+ mydata["text"] = content
+ params = urllib.urlencode( mydata )
+ headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
+
+ host = "rafb.net"
+ conn = httplib.HTTPConnection( "%s:80" % host )
+ conn.request("POST", "/paste/paste.php", params, headers )
+
+ response = conn.getresponse()
+ conn.close()
+
+ if response.status == 302:
+ location = response.getheader( "location" ) or "unknown"
+ print "SHELL: Pasted to http://%s%s" % ( host, location )
+ else:
+ print "ERROR: %s %s" % ( response.status, response.reason )
+
+def completer( text, state ):
+ """Return a possible readline completion"""
+ debugOut( "completer called with text='%s', state='%d'" % ( text, state ) )
+
+ if state == 0:
+ line = readline.get_line_buffer()
+ if " " in line:
+ line = line.split()
+ # we are in second (or more) argument
+ if line[0] in cmds and hasattr( cmds[line[0]][0], "usage" ): # known command and usage
+ u = getattr( cmds[line[0]][0], "usage" ).split()[0]
+ if u == "<variable>":
+ allmatches = cooker.configuration.data.keys()
+ elif u == "<bbfile>":
+ if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ]
+ else: allmatches = [ x.split("/")[-1] for x in cooker.status.pkg_fn.keys() ]
+ elif u == "<providee>":
+ if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ]
+ else: allmatches = cooker.status.providers.iterkeys()
+ else: allmatches = [ "(No tab completion available for this command)" ]
+ else: allmatches = [ "(No tab completion available for this command)" ]
+ else:
+ # we are in first argument
+ allmatches = cmds.iterkeys()
+
+ completer.matches = [ x for x in allmatches if x[:len(text)] == text ]
+ #print "completer.matches = '%s'" % completer.matches
+ if len( completer.matches ) > state:
+ return completer.matches[state]
+ else:
+ return None
+
+def debugOut( text ):
+ if debug:
+ sys.stderr.write( "( %s )\n" % text )
+
+def columnize( alist, width = 80 ):
+ """
+ A word-wrap function that preserves existing line breaks
+ and most spaces in the text. Expects that existing line
+ breaks are posix newlines (\n).
+ """
+ return reduce(lambda line, word, width=width: '%s%s%s' %
+ (line,
+ ' \n'[(len(line[line.rfind('\n')+1:])
+ + len(word.split('\n',1)[0]
+ ) >= width)],
+ word),
+ alist
+ )
+
+def globfilter( names, pattern ):
+ return fnmatch.filter( names, pattern )
+
+##########################################################################
+# Class MemoryOutput
+##########################################################################
+
+class MemoryOutput:
+ """File-like output class buffering the output of the last 10 commands"""
+ def __init__( self, delegate ):
+ self.delegate = delegate
+ self._buffer = []
+ self.text = []
+ self._command = None
+
+ def startCommand( self, command ):
+ self._command = command
+ self.text = []
+ def endCommand( self ):
+ if self._command is not None:
+ if len( self._buffer ) == 10: del self._buffer[0]
+ self._buffer.append( ( self._command, self.text ) )
+ def removeLast( self ):
+ if self._buffer:
+ del self._buffer[ len( self._buffer ) - 1 ]
+ self.text = []
+ self._command = None
+ def lastBuffer( self ):
+ if self._buffer:
+ return self._buffer[ len( self._buffer ) -1 ][1]
+ def bufferedCommands( self ):
+ return [ cmd for cmd, output in self._buffer ]
+ def buffer( self, i ):
+ if i < len( self._buffer ):
+ return "BB>> %s\n%s" % ( self._buffer[i][0], "".join( self._buffer[i][1] ) )
+ else: return "ERROR: Invalid buffer number. Buffer needs to be in (0, %d)" % ( len( self._buffer ) - 1 )
+ def write( self, text ):
+ if self._command is not None and text != "BB>> ": self.text.append( text )
+ if self.delegate is not None: self.delegate.write( text )
+ def flush( self ):
+ return self.delegate.flush()
+ def fileno( self ):
+ return self.delegate.fileno()
+ def isatty( self ):
+ return self.delegate.isatty()
+
+##########################################################################
+# Class BitBakeShell
+##########################################################################
+
+class BitBakeShell:
+
+ def __init__( self ):
+ """Register commands and set up readline"""
+ self.commandQ = Queue.Queue()
+ self.commands = BitBakeShellCommands( self )
+ self.myout = MemoryOutput( sys.stdout )
+ self.historyfilename = os.path.expanduser( "~/.bbsh_history" )
+ self.startupfilename = os.path.expanduser( "~/.bbsh_startup" )
+
+ readline.set_completer( completer )
+ readline.set_completer_delims( " " )
+ readline.parse_and_bind("tab: complete")
+
+ try:
+ readline.read_history_file( self.historyfilename )
+ except IOError:
+ pass # It doesn't exist yet.
+
+ print __credits__
+
+ def cleanup( self ):
+ """Write readline history and clean up resources"""
+ debugOut( "writing command history" )
+ try:
+ readline.write_history_file( self.historyfilename )
+ except:
+ print "SHELL: Unable to save command history"
+
+ def registerCommand( self, command, function, numparams = 0, usage = "", helptext = "" ):
+ """Register a command"""
+ if usage == "": usage = command
+ if helptext == "": helptext = function.__doc__ or "<not yet documented>"
+ cmds[command] = ( function, numparams, usage, helptext )
+
+ def processCommand( self, command, params ):
+ """Process a command. Check number of params and print a usage string, if appropriate"""
+ debugOut( "processing command '%s'..." % command )
+ try:
+ function, numparams, usage, helptext = cmds[command]
+ except KeyError:
+ print "SHELL: ERROR: '%s' command is not a valid command." % command
+ self.myout.removeLast()
+ else:
+ if (numparams != -1) and (not len( params ) == numparams):
+ print "Usage: '%s'" % usage
+ return
+
+ result = function( self.commands, params )
+ debugOut( "result was '%s'" % result )
+
+ def processStartupFile( self ):
+ """Read and execute all commands found in $HOME/.bbsh_startup"""
+ if os.path.exists( self.startupfilename ):
+ startupfile = open( self.startupfilename, "r" )
+ for cmdline in startupfile:
+ debugOut( "processing startup line '%s'" % cmdline )
+ if not cmdline:
+ continue
+ if "|" in cmdline:
+ print "ERROR: '|' in startup file is not allowed. Ignoring line"
+ continue
+ self.commandQ.put( cmdline.strip() )
+
+ def main( self ):
+ """The main command loop"""
+ while not leave_mainloop:
+ try:
+ if self.commandQ.empty():
+ sys.stdout = self.myout.delegate
+ cmdline = raw_input( "BB>> " )
+ sys.stdout = self.myout
+ else:
+ cmdline = self.commandQ.get()
+ if cmdline:
+ allCommands = cmdline.split( ';' )
+ for command in allCommands:
+ pipecmd = None
+ #
+ # special case for expert mode
+ if command == 'python':
+ sys.stdout = self.myout.delegate
+ self.processCommand( command, "" )
+ sys.stdout = self.myout
+ else:
+ self.myout.startCommand( command )
+ if '|' in command: # disable output
+ command, pipecmd = command.split( '|' )
+ delegate = self.myout.delegate
+ self.myout.delegate = None
+ tokens = shlex.split( command, True )
+ self.processCommand( tokens[0], tokens[1:] or "" )
+ self.myout.endCommand()
+ if pipecmd is not None: # restore output
+ self.myout.delegate = delegate
+
+ pipe = popen2.Popen4( pipecmd )
+ pipe.tochild.write( "\n".join( self.myout.lastBuffer() ) )
+ pipe.tochild.close()
+ sys.stdout.write( pipe.fromchild.read() )
+ #
+ except EOFError:
+ print
+ return
+ except KeyboardInterrupt:
+ print
+
+##########################################################################
+# Start function - called from the BitBake command line utility
+##########################################################################
+
+def start( aCooker ):
+ global cooker
+ cooker = aCooker
+ bbshell = BitBakeShell()
+ bbshell.processStartupFile()
+ bbshell.main()
+ bbshell.cleanup()
+
+if __name__ == "__main__":
+ print "SHELL: Sorry, this program should only be called by BitBake."
diff --git a/bitbake-dev/lib/bb/taskdata.py b/bitbake-dev/lib/bb/taskdata.py
new file mode 100644
index 0000000000..566614ee63
--- /dev/null
+++ b/bitbake-dev/lib/bb/taskdata.py
@@ -0,0 +1,594 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'TaskData' implementation
+
+Task data collection and handling
+
+"""
+
+# Copyright (C) 2006 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from bb import data, event, mkdirhier, utils
+import bb, os
+
+class TaskData:
+ """
+ BitBake Task Data implementation
+ """
+ def __init__(self, abort = True):
+ self.build_names_index = []
+ self.run_names_index = []
+ self.fn_index = []
+
+ self.build_targets = {}
+ self.run_targets = {}
+
+ self.external_targets = []
+
+ self.tasks_fnid = []
+ self.tasks_name = []
+ self.tasks_tdepends = []
+ self.tasks_idepends = []
+ # Cache to speed up task ID lookups
+ self.tasks_lookup = {}
+
+ self.depids = {}
+ self.rdepids = {}
+
+ self.consider_msgs_cache = []
+
+ self.failed_deps = []
+ self.failed_rdeps = []
+ self.failed_fnids = []
+
+ self.abort = abort
+
+ def getbuild_id(self, name):
+ """
+ Return an ID number for the build target name.
+ If it doesn't exist, create one.
+ """
+ if not name in self.build_names_index:
+ self.build_names_index.append(name)
+ return len(self.build_names_index) - 1
+
+ return self.build_names_index.index(name)
+
+ def getrun_id(self, name):
+ """
+ Return an ID number for the run target name.
+ If it doesn't exist, create one.
+ """
+ if not name in self.run_names_index:
+ self.run_names_index.append(name)
+ return len(self.run_names_index) - 1
+
+ return self.run_names_index.index(name)
+
+ def getfn_id(self, name):
+ """
+ Return an ID number for the filename.
+ If it doesn't exist, create one.
+ """
+ if not name in self.fn_index:
+ self.fn_index.append(name)
+ return len(self.fn_index) - 1
+
+ return self.fn_index.index(name)
+
+ def gettask_ids(self, fnid):
+ """
+ Return an array of the ID numbers matching a given fnid.
+ """
+ ids = []
+ if fnid in self.tasks_lookup:
+ for task in self.tasks_lookup[fnid]:
+ ids.append(self.tasks_lookup[fnid][task])
+ return ids
+
+ def gettask_id(self, fn, task, create = True):
+ """
+ Return an ID number for the task matching fn and task.
+ If it doesn't exist, create one by default.
+ Optionally return None instead.
+ """
+ fnid = self.getfn_id(fn)
+
+ if fnid in self.tasks_lookup:
+ if task in self.tasks_lookup[fnid]:
+ return self.tasks_lookup[fnid][task]
+
+ if not create:
+ return None
+
+ self.tasks_name.append(task)
+ self.tasks_fnid.append(fnid)
+ self.tasks_tdepends.append([])
+ self.tasks_idepends.append([])
+
+ listid = len(self.tasks_name) - 1
+
+ if fnid not in self.tasks_lookup:
+ self.tasks_lookup[fnid] = {}
+ self.tasks_lookup[fnid][task] = listid
+
+ return listid
+
+ def add_tasks(self, fn, dataCache):
+ """
+ Add tasks for a given fn to the database
+ """
+
+ task_deps = dataCache.task_deps[fn]
+
+ fnid = self.getfn_id(fn)
+
+ if fnid in self.failed_fnids:
+ bb.msg.fatal(bb.msg.domain.TaskData, "Trying to re-add a failed file? Something is broken...")
+
+ # Check if we've already seen this fn
+ if fnid in self.tasks_fnid:
+ return
+
+ for task in task_deps['tasks']:
+
+ # Work out task dependencies
+ parentids = []
+ for dep in task_deps['parents'][task]:
+ parentid = self.gettask_id(fn, dep)
+ parentids.append(parentid)
+ taskid = self.gettask_id(fn, task)
+ self.tasks_tdepends[taskid].extend(parentids)
+
+ # Touch all intertask dependencies
+ if 'depends' in task_deps and task in task_deps['depends']:
+ ids = []
+ for dep in task_deps['depends'][task].split():
+ if dep:
+ ids.append(((self.getbuild_id(dep.split(":")[0])), dep.split(":")[1]))
+ self.tasks_idepends[taskid].extend(ids)
+
+ # Work out build dependencies
+ if not fnid in self.depids:
+ dependids = {}
+ for depend in dataCache.deps[fn]:
+ bb.msg.debug(2, bb.msg.domain.TaskData, "Added dependency %s for %s" % (depend, fn))
+ dependids[self.getbuild_id(depend)] = None
+ self.depids[fnid] = dependids.keys()
+
+ # Work out runtime dependencies
+ if not fnid in self.rdepids:
+ rdependids = {}
+ rdepends = dataCache.rundeps[fn]
+ rrecs = dataCache.runrecs[fn]
+ for package in rdepends:
+ for rdepend in bb.utils.explode_deps(rdepends[package]):
+ bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime dependency %s for %s" % (rdepend, fn))
+ rdependids[self.getrun_id(rdepend)] = None
+ for package in rrecs:
+ for rdepend in bb.utils.explode_deps(rrecs[package]):
+ bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime recommendation %s for %s" % (rdepend, fn))
+ rdependids[self.getrun_id(rdepend)] = None
+ self.rdepids[fnid] = rdependids.keys()
+
+ for dep in self.depids[fnid]:
+ if dep in self.failed_deps:
+ self.fail_fnid(fnid)
+ return
+ for dep in self.rdepids[fnid]:
+ if dep in self.failed_rdeps:
+ self.fail_fnid(fnid)
+ return
+
+ def have_build_target(self, target):
+ """
+ Have we a build target matching this name?
+ """
+ targetid = self.getbuild_id(target)
+
+ if targetid in self.build_targets:
+ return True
+ return False
+
+ def have_runtime_target(self, target):
+ """
+ Have we a runtime target matching this name?
+ """
+ targetid = self.getrun_id(target)
+
+ if targetid in self.run_targets:
+ return True
+ return False
+
+ def add_build_target(self, fn, item):
+ """
+ Add a build target.
+ If already present, append the provider fn to the list
+ """
+ targetid = self.getbuild_id(item)
+ fnid = self.getfn_id(fn)
+
+ if targetid in self.build_targets:
+ if fnid in self.build_targets[targetid]:
+ return
+ self.build_targets[targetid].append(fnid)
+ return
+ self.build_targets[targetid] = [fnid]
+
+ def add_runtime_target(self, fn, item):
+ """
+ Add a runtime target.
+ If already present, append the provider fn to the list
+ """
+ targetid = self.getrun_id(item)
+ fnid = self.getfn_id(fn)
+
+ if targetid in self.run_targets:
+ if fnid in self.run_targets[targetid]:
+ return
+ self.run_targets[targetid].append(fnid)
+ return
+ self.run_targets[targetid] = [fnid]
+
+ def mark_external_target(self, item):
+ """
+ Mark a build target as being externally requested
+ """
+ targetid = self.getbuild_id(item)
+
+ if targetid not in self.external_targets:
+ self.external_targets.append(targetid)
+
+ def get_unresolved_build_targets(self, dataCache):
+ """
+ Return a list of build targets who's providers
+ are unknown.
+ """
+ unresolved = []
+ for target in self.build_names_index:
+ if target in dataCache.ignored_dependencies:
+ continue
+ if self.build_names_index.index(target) in self.failed_deps:
+ continue
+ if not self.have_build_target(target):
+ unresolved.append(target)
+ return unresolved
+
+ def get_unresolved_run_targets(self, dataCache):
+ """
+ Return a list of runtime targets who's providers
+ are unknown.
+ """
+ unresolved = []
+ for target in self.run_names_index:
+ if target in dataCache.ignored_dependencies:
+ continue
+ if self.run_names_index.index(target) in self.failed_rdeps:
+ continue
+ if not self.have_runtime_target(target):
+ unresolved.append(target)
+ return unresolved
+
+ def get_provider(self, item):
+ """
+ Return a list of providers of item
+ """
+ targetid = self.getbuild_id(item)
+
+ return self.build_targets[targetid]
+
+ def get_dependees(self, itemid):
+ """
+ Return a list of targets which depend on item
+ """
+ dependees = []
+ for fnid in self.depids:
+ if itemid in self.depids[fnid]:
+ dependees.append(fnid)
+ return dependees
+
+ def get_dependees_str(self, item):
+ """
+ Return a list of targets which depend on item as a user readable string
+ """
+ itemid = self.getbuild_id(item)
+ dependees = []
+ for fnid in self.depids:
+ if itemid in self.depids[fnid]:
+ dependees.append(self.fn_index[fnid])
+ return dependees
+
+ def get_rdependees(self, itemid):
+ """
+ Return a list of targets which depend on runtime item
+ """
+ dependees = []
+ for fnid in self.rdepids:
+ if itemid in self.rdepids[fnid]:
+ dependees.append(fnid)
+ return dependees
+
+ def get_rdependees_str(self, item):
+ """
+ Return a list of targets which depend on runtime item as a user readable string
+ """
+ itemid = self.getrun_id(item)
+ dependees = []
+ for fnid in self.rdepids:
+ if itemid in self.rdepids[fnid]:
+ dependees.append(self.fn_index[fnid])
+ return dependees
+
+ def add_provider(self, cfgData, dataCache, item):
+ try:
+ self.add_provider_internal(cfgData, dataCache, item)
+ except bb.providers.NoProvider:
+ if self.abort:
+ bb.msg.error(bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (item, self.get_dependees_str(item)))
+ raise
+ targetid = self.getbuild_id(item)
+ self.remove_buildtarget(targetid)
+
+ self.mark_external_target(item)
+
+ def add_provider_internal(self, cfgData, dataCache, item):
+ """
+ Add the providers of item to the task data
+ Mark entries were specifically added externally as against dependencies
+ added internally during dependency resolution
+ """
+
+ if item in dataCache.ignored_dependencies:
+ return
+
+ if not item in dataCache.providers:
+ bb.msg.note(2, bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (item, self.get_dependees_str(item)))
+ bb.event.fire(bb.event.NoProvider(item, cfgData))
+ raise bb.providers.NoProvider(item)
+
+ if self.have_build_target(item):
+ return
+
+ all_p = dataCache.providers[item]
+
+ eligible, foundUnique = bb.providers.filterProviders(all_p, item, cfgData, dataCache)
+
+ for p in eligible:
+ fnid = self.getfn_id(p)
+ if fnid in self.failed_fnids:
+ eligible.remove(p)
+
+ if not eligible:
+ bb.msg.note(2, bb.msg.domain.Provider, "No buildable provider PROVIDES '%s' but '%s' DEPENDS on or otherwise requires it. Enable debugging and see earlier logs to find unbuildable providers." % (item, self.get_dependees_str(item)))
+ bb.event.fire(bb.event.NoProvider(item, cfgData))
+ raise bb.providers.NoProvider(item)
+
+ if len(eligible) > 1 and foundUnique == False:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in eligible:
+ providers_list.append(dataCache.pkg_fn[fn])
+ bb.msg.note(1, bb.msg.domain.Provider, "multiple providers are available for %s (%s);" % (item, ", ".join(providers_list)))
+ bb.msg.note(1, bb.msg.domain.Provider, "consider defining PREFERRED_PROVIDER_%s" % item)
+ bb.event.fire(bb.event.MultipleProviders(item, providers_list, cfgData))
+ self.consider_msgs_cache.append(item)
+
+ for fn in eligible:
+ fnid = self.getfn_id(fn)
+ if fnid in self.failed_fnids:
+ continue
+ bb.msg.debug(2, bb.msg.domain.Provider, "adding %s to satisfy %s" % (fn, item))
+ self.add_build_target(fn, item)
+ self.add_tasks(fn, dataCache)
+
+
+ #item = dataCache.pkg_fn[fn]
+
+ def add_rprovider(self, cfgData, dataCache, item):
+ """
+ Add the runtime providers of item to the task data
+ (takes item names from RDEPENDS/PACKAGES namespace)
+ """
+
+ if item in dataCache.ignored_dependencies:
+ return
+
+ if self.have_runtime_target(item):
+ return
+
+ all_p = bb.providers.getRuntimeProviders(dataCache, item)
+
+ if not all_p:
+ bb.msg.error(bb.msg.domain.Provider, "'%s' RDEPENDS/RRECOMMENDS or otherwise requires the runtime entity '%s' but it wasn't found in any PACKAGE or RPROVIDES variables" % (self.get_rdependees_str(item), item))
+ bb.event.fire(bb.event.NoProvider(item, cfgData, runtime=True))
+ raise bb.providers.NoRProvider(item)
+
+ eligible, numberPreferred = bb.providers.filterProvidersRunTime(all_p, item, cfgData, dataCache)
+
+ for p in eligible:
+ fnid = self.getfn_id(p)
+ if fnid in self.failed_fnids:
+ eligible.remove(p)
+
+ if not eligible:
+ bb.msg.error(bb.msg.domain.Provider, "'%s' RDEPENDS/RRECOMMENDS or otherwise requires the runtime entity '%s' but it wasn't found in any PACKAGE or RPROVIDES variables of any buildable targets.\nEnable debugging and see earlier logs to find unbuildable targets." % (self.get_rdependees_str(item), item))
+ bb.event.fire(bb.event.NoProvider(item, cfgData, runtime=True))
+ raise bb.providers.NoRProvider(item)
+
+ if len(eligible) > 1 and numberPreferred == 0:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in eligible:
+ providers_list.append(dataCache.pkg_fn[fn])
+ bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (%s);" % (item, ", ".join(providers_list)))
+ bb.msg.note(2, bb.msg.domain.Provider, "consider defining a PREFERRED_PROVIDER entry to match runtime %s" % item)
+ bb.event.fire(bb.event.MultipleProviders(item,providers_list, cfgData, runtime=True))
+ self.consider_msgs_cache.append(item)
+
+ if numberPreferred > 1:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in eligible:
+ providers_list.append(dataCache.pkg_fn[fn])
+ bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (top %s entries preferred) (%s);" % (item, numberPreferred, ", ".join(providers_list)))
+ bb.msg.note(2, bb.msg.domain.Provider, "consider defining only one PREFERRED_PROVIDER entry to match runtime %s" % item)
+ bb.event.fire(bb.event.MultipleProviders(item,providers_list, cfgData, runtime=True))
+ self.consider_msgs_cache.append(item)
+
+ # run through the list until we find one that we can build
+ for fn in eligible:
+ fnid = self.getfn_id(fn)
+ if fnid in self.failed_fnids:
+ continue
+ bb.msg.debug(2, bb.msg.domain.Provider, "adding '%s' to satisfy runtime '%s'" % (fn, item))
+ self.add_runtime_target(fn, item)
+ self.add_tasks(fn, dataCache)
+
+ def fail_fnid(self, fnid, missing_list = []):
+ """
+ Mark a file as failed (unbuildable)
+ Remove any references from build and runtime provider lists
+
+ missing_list, A list of missing requirements for this target
+ """
+ if fnid in self.failed_fnids:
+ return
+ bb.msg.debug(1, bb.msg.domain.Provider, "File '%s' is unbuildable, removing..." % self.fn_index[fnid])
+ self.failed_fnids.append(fnid)
+ for target in self.build_targets:
+ if fnid in self.build_targets[target]:
+ self.build_targets[target].remove(fnid)
+ if len(self.build_targets[target]) == 0:
+ self.remove_buildtarget(target, missing_list)
+ for target in self.run_targets:
+ if fnid in self.run_targets[target]:
+ self.run_targets[target].remove(fnid)
+ if len(self.run_targets[target]) == 0:
+ self.remove_runtarget(target, missing_list)
+
+ def remove_buildtarget(self, targetid, missing_list = []):
+ """
+ Mark a build target as failed (unbuildable)
+ Trigger removal of any files that have this as a dependency
+ """
+ if not missing_list:
+ missing_list = [self.build_names_index[targetid]]
+ else:
+ missing_list = [self.build_names_index[targetid]] + missing_list
+ bb.msg.note(2, bb.msg.domain.Provider, "Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list))
+ self.failed_deps.append(targetid)
+ dependees = self.get_dependees(targetid)
+ for fnid in dependees:
+ self.fail_fnid(fnid, missing_list)
+ for taskid in range(len(self.tasks_idepends)):
+ idepends = self.tasks_idepends[taskid]
+ for (idependid, idependtask) in idepends:
+ if idependid == targetid:
+ self.fail_fnid(self.tasks_fnid[taskid], missing_list)
+
+ if self.abort and targetid in self.external_targets:
+ bb.msg.error(bb.msg.domain.Provider, "Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list))
+ raise bb.providers.NoProvider
+
+ def remove_runtarget(self, targetid, missing_list = []):
+ """
+ Mark a run target as failed (unbuildable)
+ Trigger removal of any files that have this as a dependency
+ """
+ if not missing_list:
+ missing_list = [self.run_names_index[targetid]]
+ else:
+ missing_list = [self.run_names_index[targetid]] + missing_list
+
+ bb.msg.note(1, bb.msg.domain.Provider, "Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s" % (self.run_names_index[targetid], missing_list))
+ self.failed_rdeps.append(targetid)
+ dependees = self.get_rdependees(targetid)
+ for fnid in dependees:
+ self.fail_fnid(fnid, missing_list)
+
+ def add_unresolved(self, cfgData, dataCache):
+ """
+ Resolve all unresolved build and runtime targets
+ """
+ bb.msg.note(1, bb.msg.domain.TaskData, "Resolving any missing task queue dependencies")
+ while 1:
+ added = 0
+ for target in self.get_unresolved_build_targets(dataCache):
+ try:
+ self.add_provider_internal(cfgData, dataCache, target)
+ added = added + 1
+ except bb.providers.NoProvider:
+ targetid = self.getbuild_id(target)
+ if self.abort and targetid in self.external_targets:
+ bb.msg.error(bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (target, self.get_dependees_str(target)))
+ raise
+ self.remove_buildtarget(targetid)
+ for target in self.get_unresolved_run_targets(dataCache):
+ try:
+ self.add_rprovider(cfgData, dataCache, target)
+ added = added + 1
+ except bb.providers.NoRProvider:
+ self.remove_runtarget(self.getrun_id(target))
+ bb.msg.debug(1, bb.msg.domain.TaskData, "Resolved " + str(added) + " extra dependecies")
+ if added == 0:
+ break
+ # self.dump_data()
+
+ def dump_data(self):
+ """
+ Dump some debug information on the internal data structures
+ """
+ bb.msg.debug(3, bb.msg.domain.TaskData, "build_names:")
+ bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.build_names_index))
+
+ bb.msg.debug(3, bb.msg.domain.TaskData, "run_names:")
+ bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.run_names_index))
+
+ bb.msg.debug(3, bb.msg.domain.TaskData, "build_targets:")
+ for buildid in range(len(self.build_names_index)):
+ target = self.build_names_index[buildid]
+ targets = "None"
+ if buildid in self.build_targets:
+ targets = self.build_targets[buildid]
+ bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (buildid, target, targets))
+
+ bb.msg.debug(3, bb.msg.domain.TaskData, "run_targets:")
+ for runid in range(len(self.run_names_index)):
+ target = self.run_names_index[runid]
+ targets = "None"
+ if runid in self.run_targets:
+ targets = self.run_targets[runid]
+ bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (runid, target, targets))
+
+ bb.msg.debug(3, bb.msg.domain.TaskData, "tasks:")
+ for task in range(len(self.tasks_name)):
+ bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s - %s: %s" % (
+ task,
+ self.fn_index[self.tasks_fnid[task]],
+ self.tasks_name[task],
+ self.tasks_tdepends[task]))
+
+ bb.msg.debug(3, bb.msg.domain.TaskData, "dependency ids (per fn):")
+ for fnid in self.depids:
+ bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.depids[fnid]))
+
+ bb.msg.debug(3, bb.msg.domain.TaskData, "runtime dependency ids (per fn):")
+ for fnid in self.rdepids:
+ bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.rdepids[fnid]))
+
+
diff --git a/bitbake-dev/lib/bb/ui/__init__.py b/bitbake-dev/lib/bb/ui/__init__.py
new file mode 100644
index 0000000000..c6a377a8e6
--- /dev/null
+++ b/bitbake-dev/lib/bb/ui/__init__.py
@@ -0,0 +1,18 @@
+#
+# BitBake UI Implementation
+#
+# Copyright (C) 2006-2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
diff --git a/bitbake-dev/lib/bb/ui/depexplorer.py b/bitbake-dev/lib/bb/ui/depexplorer.py
new file mode 100644
index 0000000000..becbb5dd5d
--- /dev/null
+++ b/bitbake-dev/lib/bb/ui/depexplorer.py
@@ -0,0 +1,271 @@
+#
+# BitBake Graphical GTK based Dependency Explorer
+#
+# Copyright (C) 2007 Ross Burton
+# Copyright (C) 2007 - 2008 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import gobject
+import gtk
+import threading
+
+# Package Model
+(COL_PKG_NAME) = (0)
+
+# Dependency Model
+(TYPE_DEP, TYPE_RDEP) = (0, 1)
+(COL_DEP_TYPE, COL_DEP_PARENT, COL_DEP_PACKAGE) = (0, 1, 2)
+
+class PackageDepView(gtk.TreeView):
+ def __init__(self, model, dep_type, label):
+ gtk.TreeView.__init__(self)
+ self.current = None
+ self.dep_type = dep_type
+ self.filter_model = model.filter_new()
+ self.filter_model.set_visible_func(self._filter)
+ self.set_model(self.filter_model)
+ #self.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
+ self.append_column(gtk.TreeViewColumn(label, gtk.CellRendererText(), text=COL_DEP_PACKAGE))
+
+ def _filter(self, model, iter):
+ (this_type, package) = model.get(iter, COL_DEP_TYPE, COL_DEP_PARENT)
+ if this_type != self.dep_type: return False
+ return package == self.current
+
+ def set_current_package(self, package):
+ self.current = package
+ self.filter_model.refilter()
+
+class PackageReverseDepView(gtk.TreeView):
+ def __init__(self, model, label):
+ gtk.TreeView.__init__(self)
+ self.current = None
+ self.filter_model = model.filter_new()
+ self.filter_model.set_visible_func(self._filter)
+ self.set_model(self.filter_model)
+ self.append_column(gtk.TreeViewColumn(label, gtk.CellRendererText(), text=COL_DEP_PARENT))
+
+ def _filter(self, model, iter):
+ package = model.get_value(iter, COL_DEP_PACKAGE)
+ return package == self.current
+
+ def set_current_package(self, package):
+ self.current = package
+ self.filter_model.refilter()
+
+class DepExplorer(gtk.Window):
+ def __init__(self):
+ gtk.Window.__init__(self)
+ self.set_title("Dependency Explorer")
+ self.set_default_size(500, 500)
+ self.connect("delete-event", gtk.main_quit)
+
+ # Create the data models
+ self.pkg_model = gtk.ListStore(gobject.TYPE_STRING)
+ self.depends_model = gtk.ListStore(gobject.TYPE_INT, gobject.TYPE_STRING, gobject.TYPE_STRING)
+
+ pane = gtk.HPaned()
+ pane.set_position(250)
+ self.add(pane)
+
+ # The master list of packages
+ scrolled = gtk.ScrolledWindow()
+ scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
+ scrolled.set_shadow_type(gtk.SHADOW_IN)
+ self.pkg_treeview = gtk.TreeView(self.pkg_model)
+ self.pkg_treeview.get_selection().connect("changed", self.on_cursor_changed)
+ self.pkg_treeview.append_column(gtk.TreeViewColumn("Package", gtk.CellRendererText(), text=COL_PKG_NAME))
+ pane.add1(scrolled)
+ scrolled.add(self.pkg_treeview)
+
+ box = gtk.VBox(homogeneous=True, spacing=4)
+
+ # Runtime Depends
+ scrolled = gtk.ScrolledWindow()
+ scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
+ scrolled.set_shadow_type(gtk.SHADOW_IN)
+ self.rdep_treeview = PackageDepView(self.depends_model, TYPE_RDEP, "Runtime Depends")
+ self.rdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
+ scrolled.add(self.rdep_treeview)
+ box.add(scrolled)
+
+ # Build Depends
+ scrolled = gtk.ScrolledWindow()
+ scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
+ scrolled.set_shadow_type(gtk.SHADOW_IN)
+ self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Build Depends")
+ self.dep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
+ scrolled.add(self.dep_treeview)
+ box.add(scrolled)
+ pane.add2(box)
+
+ # Reverse Depends
+ scrolled = gtk.ScrolledWindow()
+ scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
+ scrolled.set_shadow_type(gtk.SHADOW_IN)
+ self.revdep_treeview = PackageReverseDepView(self.depends_model, "Reverse Depends")
+ self.revdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PARENT)
+ scrolled.add(self.revdep_treeview)
+ box.add(scrolled)
+ pane.add2(box)
+
+ self.show_all()
+
+ def on_package_activated(self, treeview, path, column, data_col):
+ model = treeview.get_model()
+ package = model.get_value(model.get_iter(path), data_col)
+
+ pkg_path = []
+ def finder(model, path, iter, needle):
+ package = model.get_value(iter, COL_PKG_NAME)
+ if package == needle:
+ pkg_path.append(path)
+ return True
+ else:
+ return False
+ self.pkg_model.foreach(finder, package)
+ if pkg_path:
+ self.pkg_treeview.get_selection().select_path(pkg_path[0])
+ self.pkg_treeview.scroll_to_cell(pkg_path[0])
+
+ def on_cursor_changed(self, selection):
+ (model, it) = selection.get_selected()
+ if iter is None:
+ current_package = None
+ else:
+ current_package = model.get_value(it, COL_PKG_NAME)
+ self.rdep_treeview.set_current_package(current_package)
+ self.dep_treeview.set_current_package(current_package)
+ self.revdep_treeview.set_current_package(current_package)
+
+
+def parse(depgraph, pkg_model, depends_model):
+
+ for package in depgraph["pn"]:
+ pkg_model.set(pkg_model.append(), COL_PKG_NAME, package)
+
+ for package in depgraph["depends"]:
+ for depend in depgraph["depends"][package]:
+ depends_model.set (depends_model.append(),
+ COL_DEP_TYPE, TYPE_DEP,
+ COL_DEP_PARENT, package,
+ COL_DEP_PACKAGE, depend)
+
+ for package in depgraph["rdepends-pn"]:
+ for rdepend in depgraph["rdepends-pn"][package]:
+ depends_model.set (depends_model.append(),
+ COL_DEP_TYPE, TYPE_RDEP,
+ COL_DEP_PARENT, package,
+ COL_DEP_PACKAGE, rdepend)
+
+class ProgressBar(gtk.Window):
+ def __init__(self):
+
+ gtk.Window.__init__(self)
+ self.set_title("Parsing .bb files, please wait...")
+ self.set_default_size(500, 0)
+ self.connect("delete-event", gtk.main_quit)
+
+ self.progress = gtk.ProgressBar()
+ self.add(self.progress)
+ self.show_all()
+
+class gtkthread(threading.Thread):
+ quit = threading.Event()
+ def __init__(self, shutdown):
+ threading.Thread.__init__(self)
+ self.setDaemon(True)
+ self.shutdown = shutdown
+
+ def run(self):
+ gobject.threads_init()
+ gtk.gdk.threads_init()
+ gtk.main()
+ gtkthread.quit.set()
+
+def init(server, eventHandler):
+
+ try:
+ cmdline = server.runCommand(["getCmdLineAction"])
+ if not cmdline or cmdline[0] != "generateDotGraph":
+ print "This UI is only compatible with the -g option"
+ return
+ ret = server.runCommand(["generateDepTreeEvent", cmdline[1]])
+ if ret != True:
+ print "Couldn't run command! %s" % ret
+ return
+ except xmlrpclib.Fault, x:
+ print "XMLRPC Fault getting commandline:\n %s" % x
+ return
+
+ shutdown = 0
+
+ gtkgui = gtkthread(shutdown)
+ gtkgui.start()
+
+ gtk.gdk.threads_enter()
+ pbar = ProgressBar()
+ dep = DepExplorer()
+ gtk.gdk.threads_leave()
+
+ while True:
+ try:
+ event = eventHandler.waitEvent(0.25)
+ if gtkthread.quit.isSet():
+ break
+
+ if event is None:
+ continue
+ if event[0].startswith('bb.event.ParseProgress'):
+ x = event[1]['sofar']
+ y = event[1]['total']
+ if x == y:
+ print("\nParsing finished. %d cached, %d parsed, %d skipped, %d masked, %d errors."
+ % ( event[1]['cached'], event[1]['parsed'], event[1]['skipped'], event[1]['masked'], event[1]['errors']))
+ pbar.hide()
+ gtk.gdk.threads_enter()
+ pbar.progress.set_fraction(float(x)/float(y))
+ pbar.progress.set_text("%d/%d (%2d %%)" % (x, y, x*100/y))
+ gtk.gdk.threads_leave()
+ continue
+
+ if event[0] == "bb.event.DepTreeGenerated":
+ gtk.gdk.threads_enter()
+ parse(event[1]['_depgraph'], dep.pkg_model, dep.depends_model)
+ gtk.gdk.threads_leave()
+
+ if event[0] == 'bb.command.CookerCommandCompleted':
+ continue
+ if event[0] == 'bb.command.CookerCommandFailed':
+ print "Command execution failed: %s" % event[1]['error']
+ break
+ if event[0] == 'bb.cooker.CookerExit':
+ break
+
+ continue
+
+ except KeyboardInterrupt:
+ if shutdown == 2:
+ print "\nThird Keyboard Interrupt, exit.\n"
+ break
+ if shutdown == 1:
+ print "\nSecond Keyboard Interrupt, stopping...\n"
+ server.runCommand(["stateStop"])
+ if shutdown == 0:
+ print "\nKeyboard Interrupt, closing down...\n"
+ server.runCommand(["stateShutdown"])
+ shutdown = shutdown + 1
+ pass
+
diff --git a/bitbake-dev/lib/bb/ui/knotty.py b/bitbake-dev/lib/bb/ui/knotty.py
new file mode 100644
index 0000000000..9e89660307
--- /dev/null
+++ b/bitbake-dev/lib/bb/ui/knotty.py
@@ -0,0 +1,157 @@
+#
+# BitBake (No)TTY UI Implementation
+#
+# Handling output to TTYs or files (no TTY)
+#
+# Copyright (C) 2006-2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import bb
+from bb import cooker
+
+import sys
+import time
+import itertools
+import xmlrpclib
+
+parsespin = itertools.cycle( r'|/-\\' )
+
+def init(server, eventHandler):
+
+ # Get values of variables which control our output
+ includelogs = server.runCommand(["readVariable", "BBINCLUDELOGS"])
+ loglines = server.runCommand(["readVariable", "BBINCLUDELOGS_LINES"])
+
+ try:
+ cmdline = server.runCommand(["getCmdLineAction"])
+ #print cmdline
+ if not cmdline:
+ return 1
+ ret = server.runCommand(cmdline)
+ if ret != True:
+ print "Couldn't get default commandline! %s" % ret
+ return 1
+ except xmlrpclib.Fault, x:
+ print "XMLRPC Fault getting commandline:\n %s" % x
+ return 1
+
+ shutdown = 0
+ return_value = 0
+ while True:
+ try:
+ event = eventHandler.waitEvent(0.25)
+ if event is None:
+ continue
+ #print event
+ if event[0].startswith('bb.event.Pkg'):
+ print "NOTE: %s" % event[1]['_message']
+ continue
+ if event[0].startswith('bb.msg.MsgPlain'):
+ print event[1]['_message']
+ continue
+ if event[0].startswith('bb.msg.MsgDebug'):
+ print 'DEBUG: ' + event[1]['_message']
+ continue
+ if event[0].startswith('bb.msg.MsgNote'):
+ print 'NOTE: ' + event[1]['_message']
+ continue
+ if event[0].startswith('bb.msg.MsgWarn'):
+ print 'WARNING: ' + event[1]['_message']
+ continue
+ if event[0].startswith('bb.msg.MsgError'):
+ return_value = 1
+ print 'ERROR: ' + event[1]['_message']
+ continue
+ if event[0].startswith('bb.build.TaskFailed'):
+ return_value = 1
+ logfile = event[1]['logfile']
+ if logfile:
+ print "ERROR: Logfile of failure stored in %s." % logfile
+ if includelogs:
+ print "Log data follows:"
+ f = open(logfile, "r")
+ lines = []
+ while True:
+ l = f.readline()
+ if l == '':
+ break
+ l = l.rstrip()
+ if loglines:
+ lines.append(' | %s' % l)
+ if len(lines) > int(loglines):
+ lines.pop(0)
+ else:
+ print '| %s' % l
+ f.close()
+ if lines:
+ for line in lines:
+ print line
+ if event[0].startswith('bb.build.Task'):
+ print "NOTE: %s" % event[1]['_message']
+ continue
+ if event[0].startswith('bb.event.ParseProgress'):
+ x = event[1]['sofar']
+ y = event[1]['total']
+ if os.isatty(sys.stdout.fileno()):
+ sys.stdout.write("\rNOTE: Handling BitBake files: %s (%04d/%04d) [%2d %%]" % ( parsespin.next(), x, y, x*100/y ) )
+ sys.stdout.flush()
+ else:
+ if x == 1:
+ sys.stdout.write("Parsing .bb files, please wait...")
+ sys.stdout.flush()
+ if x == y:
+ sys.stdout.write("done.")
+ sys.stdout.flush()
+ if x == y:
+ print("\nParsing finished. %d cached, %d parsed, %d skipped, %d masked, %d errors."
+ % ( event[1]['cached'], event[1]['parsed'], event[1]['skipped'], event[1]['masked'], event[1]['errors']))
+ continue
+
+ if event[0] == 'bb.command.CookerCommandCompleted':
+ break
+ if event[0] == 'bb.command.CookerCommandFailed':
+ return_value = 1
+ print "Command execution failed: %s" % event[1]['error']
+ break
+ if event[0] == 'bb.cooker.CookerExit':
+ break
+
+ # ignore
+ if event[0].startswith('bb.event.BuildStarted'):
+ continue
+ if event[0].startswith('bb.event.BuildCompleted'):
+ continue
+ if event[0].startswith('bb.event.MultipleProviders'):
+ continue
+ if event[0].startswith('bb.runqueue.runQueue'):
+ continue
+ if event[0].startswith('bb.event.StampUpdate'):
+ continue
+ print "Unknown Event: %s" % event
+
+ except KeyboardInterrupt:
+ if shutdown == 2:
+ print "\nThird Keyboard Interrupt, exit.\n"
+ break
+ if shutdown == 1:
+ print "\nSecond Keyboard Interrupt, stopping...\n"
+ server.runCommand(["stateStop"])
+ if shutdown == 0:
+ print "\nKeyboard Interrupt, closing down...\n"
+ server.runCommand(["stateShutdown"])
+ shutdown = shutdown + 1
+ pass
+ return return_value
diff --git a/bitbake-dev/lib/bb/ui/ncurses.py b/bitbake-dev/lib/bb/ui/ncurses.py
new file mode 100644
index 0000000000..1476baa61f
--- /dev/null
+++ b/bitbake-dev/lib/bb/ui/ncurses.py
@@ -0,0 +1,333 @@
+#
+# BitBake Curses UI Implementation
+#
+# Implements an ncurses frontend for the BitBake utility.
+#
+# Copyright (C) 2006 Michael 'Mickey' Lauer
+# Copyright (C) 2006-2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ We have the following windows:
+
+ 1.) Main Window: Shows what we are ultimately building and how far we are. Includes status bar
+ 2.) Thread Activity Window: Shows one status line for every concurrent bitbake thread.
+ 3.) Command Line Window: Contains an interactive command line where you can interact w/ Bitbake.
+
+ Basic window layout is like that:
+
+ |---------------------------------------------------------|
+ | <Main Window> | <Thread Activity Window> |
+ | | 0: foo do_compile complete|
+ | Building Gtk+-2.6.10 | 1: bar do_patch complete |
+ | Status: 60% | ... |
+ | | ... |
+ | | ... |
+ |---------------------------------------------------------|
+ |<Command Line Window> |
+ |>>> which virtual/kernel |
+ |openzaurus-kernel |
+ |>>> _ |
+ |---------------------------------------------------------|
+
+"""
+
+import os, sys, curses, time, random, threading, itertools, time
+from curses.textpad import Textbox
+import bb
+from bb import ui
+from bb.ui import uihelper
+
+parsespin = itertools.cycle( r'|/-\\' )
+
+X = 0
+Y = 1
+WIDTH = 2
+HEIGHT = 3
+
+MAXSTATUSLENGTH = 32
+
+class NCursesUI:
+ """
+ NCurses UI Class
+ """
+ class Window:
+ """Base Window Class"""
+ def __init__( self, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ):
+ self.win = curses.newwin( height, width, y, x )
+ self.dimensions = ( x, y, width, height )
+ """
+ if curses.has_colors():
+ color = 1
+ curses.init_pair( color, fg, bg )
+ self.win.bkgdset( ord(' '), curses.color_pair(color) )
+ else:
+ self.win.bkgdset( ord(' '), curses.A_BOLD )
+ """
+ self.erase()
+ self.setScrolling()
+ self.win.noutrefresh()
+
+ def erase( self ):
+ self.win.erase()
+
+ def setScrolling( self, b = True ):
+ self.win.scrollok( b )
+ self.win.idlok( b )
+
+ def setBoxed( self ):
+ self.boxed = True
+ self.win.box()
+ self.win.noutrefresh()
+
+ def setText( self, x, y, text, *args ):
+ self.win.addstr( y, x, text, *args )
+ self.win.noutrefresh()
+
+ def appendText( self, text, *args ):
+ self.win.addstr( text, *args )
+ self.win.noutrefresh()
+
+ def drawHline( self, y ):
+ self.win.hline( y, 0, curses.ACS_HLINE, self.dimensions[WIDTH] )
+ self.win.noutrefresh()
+
+ class DecoratedWindow( Window ):
+ """Base class for windows with a box and a title bar"""
+ def __init__( self, title, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ):
+ NCursesUI.Window.__init__( self, x+1, y+3, width-2, height-4, fg, bg )
+ self.decoration = NCursesUI.Window( x, y, width, height, fg, bg )
+ self.decoration.setBoxed()
+ self.decoration.win.hline( 2, 1, curses.ACS_HLINE, width-2 )
+ self.setTitle( title )
+
+ def setTitle( self, title ):
+ self.decoration.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD )
+
+ #-------------------------------------------------------------------------#
+# class TitleWindow( Window ):
+ #-------------------------------------------------------------------------#
+# """Title Window"""
+# def __init__( self, x, y, width, height ):
+# NCursesUI.Window.__init__( self, x, y, width, height )
+# version = bb.__version__
+# title = "BitBake %s" % version
+# credit = "(C) 2003-2007 Team BitBake"
+# #self.win.hline( 2, 1, curses.ACS_HLINE, width-2 )
+# self.win.border()
+# self.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD )
+# self.setText( 1, 2, credit.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD )
+
+ #-------------------------------------------------------------------------#
+ class ThreadActivityWindow( DecoratedWindow ):
+ #-------------------------------------------------------------------------#
+ """Thread Activity Window"""
+ def __init__( self, x, y, width, height ):
+ NCursesUI.DecoratedWindow.__init__( self, "Thread Activity", x, y, width, height )
+
+ def setStatus( self, thread, text ):
+ line = "%02d: %s" % ( thread, text )
+ width = self.dimensions[WIDTH]
+ if ( len(line) > width ):
+ line = line[:width-3] + "..."
+ else:
+ line = line.ljust( width )
+ self.setText( 0, thread, line )
+
+ #-------------------------------------------------------------------------#
+ class MainWindow( DecoratedWindow ):
+ #-------------------------------------------------------------------------#
+ """Main Window"""
+ def __init__( self, x, y, width, height ):
+ self.StatusPosition = width - MAXSTATUSLENGTH
+ NCursesUI.DecoratedWindow.__init__( self, None, x, y, width, height )
+ curses.nl()
+
+ def setTitle( self, title ):
+ title = "BitBake %s" % bb.__version__
+ self.decoration.setText( 2, 1, title, curses.A_BOLD )
+ self.decoration.setText( self.StatusPosition - 8, 1, "Status:", curses.A_BOLD )
+
+ def setStatus(self, status):
+ while len(status) < MAXSTATUSLENGTH:
+ status = status + " "
+ self.decoration.setText( self.StatusPosition, 1, status, curses.A_BOLD )
+
+
+ #-------------------------------------------------------------------------#
+ class ShellOutputWindow( DecoratedWindow ):
+ #-------------------------------------------------------------------------#
+ """Interactive Command Line Output"""
+ def __init__( self, x, y, width, height ):
+ NCursesUI.DecoratedWindow.__init__( self, "Command Line Window", x, y, width, height )
+
+ #-------------------------------------------------------------------------#
+ class ShellInputWindow( Window ):
+ #-------------------------------------------------------------------------#
+ """Interactive Command Line Input"""
+ def __init__( self, x, y, width, height ):
+ NCursesUI.Window.__init__( self, x, y, width, height )
+
+# self.textbox = Textbox( self.win )
+# t = threading.Thread()
+# t.run = self.textbox.edit
+# t.start()
+
+ #-------------------------------------------------------------------------#
+ def main(self, stdscr, server, eventHandler):
+ #-------------------------------------------------------------------------#
+ height, width = stdscr.getmaxyx()
+
+ # for now split it like that:
+ # MAIN_y + THREAD_y = 2/3 screen at the top
+ # MAIN_x = 2/3 left, THREAD_y = 1/3 right
+ # CLI_y = 1/3 of screen at the bottom
+ # CLI_x = full
+
+ main_left = 0
+ main_top = 0
+ main_height = ( height / 3 * 2 )
+ main_width = ( width / 3 ) * 2
+ clo_left = main_left
+ clo_top = main_top + main_height
+ clo_height = height - main_height - main_top - 1
+ clo_width = width
+ cli_left = main_left
+ cli_top = clo_top + clo_height
+ cli_height = 1
+ cli_width = width
+ thread_left = main_left + main_width
+ thread_top = main_top
+ thread_height = main_height
+ thread_width = width - main_width
+
+ #tw = self.TitleWindow( 0, 0, width, main_top )
+ mw = self.MainWindow( main_left, main_top, main_width, main_height )
+ taw = self.ThreadActivityWindow( thread_left, thread_top, thread_width, thread_height )
+ clo = self.ShellOutputWindow( clo_left, clo_top, clo_width, clo_height )
+ cli = self.ShellInputWindow( cli_left, cli_top, cli_width, cli_height )
+ cli.setText( 0, 0, "BB>" )
+
+ mw.setStatus("Idle")
+
+ helper = uihelper.BBUIHelper()
+ shutdown = 0
+
+ try:
+ cmdline = server.runCommand(["getCmdLineAction"])
+ if not cmdline:
+ return
+ ret = server.runCommand(cmdline)
+ if ret != True:
+ print "Couldn't get default commandlind! %s" % ret
+ return
+ except xmlrpclib.Fault, x:
+ print "XMLRPC Fault getting commandline:\n %s" % x
+ return
+
+ exitflag = False
+ while not exitflag:
+ try:
+ event = eventHandler.waitEvent(0.25)
+ if not event:
+ continue
+ helper.eventHandler(event)
+ #mw.appendText("%s\n" % event[0])
+ if event[0].startswith('bb.event.Pkg'):
+ mw.appendText("NOTE: %s\n" % event[1]['_message'])
+ if event[0].startswith('bb.build.Task'):
+ mw.appendText("NOTE: %s\n" % event[1]['_message'])
+ if event[0].startswith('bb.msg.MsgDebug'):
+ mw.appendText('DEBUG: ' + event[1]['_message'] + '\n')
+ if event[0].startswith('bb.msg.MsgNote'):
+ mw.appendText('NOTE: ' + event[1]['_message'] + '\n')
+ if event[0].startswith('bb.msg.MsgWarn'):
+ mw.appendText('WARNING: ' + event[1]['_message'] + '\n')
+ if event[0].startswith('bb.msg.MsgError'):
+ mw.appendText('ERROR: ' + event[1]['_message'] + '\n')
+ if event[0].startswith('bb.msg.MsgFatal'):
+ mw.appendText('FATAL: ' + event[1]['_message'] + '\n')
+ if event[0].startswith('bb.event.ParseProgress'):
+ x = event[1]['sofar']
+ y = event[1]['total']
+ if x == y:
+ mw.setStatus("Idle")
+ mw.appendText("Parsing finished. %d cached, %d parsed, %d skipped, %d masked."
+ % ( event[1]['cached'], event[1]['parsed'], event[1]['skipped'], event[1]['masked'] ))
+ else:
+ mw.setStatus("Parsing: %s (%04d/%04d) [%2d %%]" % ( parsespin.next(), x, y, x*100/y ) )
+# if event[0].startswith('bb.build.TaskFailed'):
+# if event[1]['logfile']:
+# if data.getVar("BBINCLUDELOGS", d):
+# bb.msg.error(bb.msg.domain.Build, "log data follows (%s)" % logfile)
+# number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d)
+# if number_of_lines:
+# os.system('tail -n%s %s' % (number_of_lines, logfile))
+# else:
+# f = open(logfile, "r")
+# while True:
+# l = f.readline()
+# if l == '':
+# break
+# l = l.rstrip()
+# print '| %s' % l
+# f.close()
+# else:
+# bb.msg.error(bb.msg.domain.Build, "see log in %s" % logfile)
+
+ if event[0] == 'bb.command.CookerCommandCompleted':
+ exitflag = True
+ if event[0] == 'bb.command.CookerCommandFailed':
+ mw.appendText("Command execution failed: %s" % event[1]['error'])
+ time.sleep(2)
+ exitflag = True
+ if event[0] == 'bb.cooker.CookerExit':
+ exitflag = True
+
+ if helper.needUpdate:
+ activetasks, failedtasks = helper.getTasks()
+ taw.erase()
+ taw.setText(0, 0, "")
+ if activetasks:
+ taw.appendText("Active Tasks:\n")
+ for task in activetasks:
+ taw.appendText(task)
+ if failedtasks:
+ taw.appendText("Failed Tasks:\n")
+ for task in failedtasks:
+ taw.appendText(task)
+
+ curses.doupdate()
+ except KeyboardInterrupt:
+ if shutdown == 2:
+ mw.appendText("Third Keyboard Interrupt, exit.\n")
+ exitflag = True
+ if shutdown == 1:
+ mw.appendText("Second Keyboard Interrupt, stopping...\n")
+ server.runCommand(["stateStop"])
+ if shutdown == 0:
+ mw.appendText("Keyboard Interrupt, closing down...\n")
+ server.runCommand(["stateShutdown"])
+ shutdown = shutdown + 1
+ pass
+
+def init(server, eventHandler):
+ ui = NCursesUI()
+ try:
+ curses.wrapper(ui.main, server, eventHandler)
+ except:
+ import traceback
+ traceback.print_exc()
+
diff --git a/bitbake-dev/lib/bb/ui/uievent.py b/bitbake-dev/lib/bb/ui/uievent.py
new file mode 100644
index 0000000000..9d724d7fc5
--- /dev/null
+++ b/bitbake-dev/lib/bb/ui/uievent.py
@@ -0,0 +1,127 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
+# Copyright (C) 2006 - 2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+"""
+Use this class to fork off a thread to recieve event callbacks from the bitbake
+server and queue them for the UI to process. This process must be used to avoid
+client/server deadlocks.
+"""
+
+import sys, socket, threading
+from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
+
+class BBUIEventQueue:
+ def __init__(self, BBServer):
+
+ self.eventQueue = []
+ self.eventQueueLock = threading.Lock()
+ self.eventQueueNotify = threading.Event()
+
+ self.BBServer = BBServer
+
+ self.t = threading.Thread()
+ self.t.setDaemon(True)
+ self.t.run = self.startCallbackHandler
+ self.t.start()
+
+ def getEvent(self):
+
+ self.eventQueueLock.acquire()
+
+ if len(self.eventQueue) == 0:
+ self.eventQueueLock.release()
+ return None
+
+ item = self.eventQueue.pop(0)
+
+ if len(self.eventQueue) == 0:
+ self.eventQueueNotify.clear()
+
+ self.eventQueueLock.release()
+
+ return item
+
+ def waitEvent(self, delay):
+ self.eventQueueNotify.wait(delay)
+ return self.getEvent()
+
+ def queue_event(self, event):
+
+ self.eventQueueLock.acquire()
+ self.eventQueue.append(event)
+ self.eventQueueNotify.set()
+ self.eventQueueLock.release()
+
+ def startCallbackHandler(self):
+
+ server = UIXMLRPCServer()
+ self.host, self.port = server.socket.getsockname()
+
+ server.register_function( self.system_quit, "event.quit" )
+ server.register_function( self.queue_event, "event.send" )
+ server.socket.settimeout(1)
+
+ self.EventHandle = self.BBServer.registerEventHandler(self.host, self.port)
+
+ self.server = server
+ while not server.quit:
+ server.handle_request()
+ server.server_close()
+
+ def system_quit( self ):
+ """
+ Shut down the callback thread
+ """
+ try:
+ self.BBServer.unregisterEventHandler(self.EventHandle)
+ except:
+ pass
+ self.server.quit = True
+
+class UIXMLRPCServer (SimpleXMLRPCServer):
+
+ def __init__( self, interface = ("localhost", 0) ):
+ self.quit = False
+ SimpleXMLRPCServer.__init__( self,
+ interface,
+ requestHandler=SimpleXMLRPCRequestHandler,
+ logRequests=False, allow_none=True)
+
+ def get_request(self):
+ while not self.quit:
+ try:
+ sock, addr = self.socket.accept()
+ sock.settimeout(1)
+ return (sock, addr)
+ except socket.timeout:
+ pass
+ return (None,None)
+
+ def close_request(self, request):
+ if request is None:
+ return
+ SimpleXMLRPCServer.close_request(self, request)
+
+ def process_request(self, request, client_address):
+ if request is None:
+ return
+ SimpleXMLRPCServer.process_request(self, request, client_address)
+
+
diff --git a/bitbake-dev/lib/bb/ui/uihelper.py b/bitbake-dev/lib/bb/ui/uihelper.py
new file mode 100644
index 0000000000..246844c9d2
--- /dev/null
+++ b/bitbake-dev/lib/bb/ui/uihelper.py
@@ -0,0 +1,49 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
+# Copyright (C) 2006 - 2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+class BBUIHelper:
+ def __init__(self):
+ self.needUpdate = False
+ self.running_tasks = {}
+ self.failed_tasks = {}
+
+ def eventHandler(self, event):
+ if event[0].startswith('bb.build.TaskStarted'):
+ self.running_tasks["%s %s\n" % (event[1]['_package'], event[1]['_task'])] = ""
+ self.needUpdate = True
+ if event[0].startswith('bb.build.TaskSucceeded'):
+ del self.running_tasks["%s %s\n" % (event[1]['_package'], event[1]['_task'])]
+ self.needUpdate = True
+ if event[0].startswith('bb.build.TaskFailed'):
+ del self.running_tasks["%s %s\n" % (event[1]['_package'], event[1]['_task'])]
+ self.failed_tasks["%s %s\n" % (event[1]['_package'], event[1]['_task'])] = ""
+ self.needUpdate = True
+
+ # Add runqueue event handling
+ #if event[0].startswith('bb.runqueue.runQueueTaskCompleted'):
+ # a = 1
+ #if event[0].startswith('bb.runqueue.runQueueTaskStarted'):
+ # a = 1
+ #if event[0].startswith('bb.runqueue.runQueueTaskFailed'):
+ # a = 1
+ #if event[0].startswith('bb.runqueue.runQueueExitWait'):
+ # a = 1
+
+ def getTasks(self):
+ return (self.running_tasks, self.failed_tasks)
diff --git a/bitbake-dev/lib/bb/utils.py b/bitbake-dev/lib/bb/utils.py
new file mode 100644
index 0000000000..17e22e389e
--- /dev/null
+++ b/bitbake-dev/lib/bb/utils.py
@@ -0,0 +1,270 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake Utility Functions
+"""
+
+# Copyright (C) 2004 Michael Lauer
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+digits = "0123456789"
+ascii_letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+import re, fcntl, os
+
+def explode_version(s):
+ r = []
+ alpha_regexp = re.compile('^([a-zA-Z]+)(.*)$')
+ numeric_regexp = re.compile('^(\d+)(.*)$')
+ while (s != ''):
+ if s[0] in digits:
+ m = numeric_regexp.match(s)
+ r.append(int(m.group(1)))
+ s = m.group(2)
+ continue
+ if s[0] in ascii_letters:
+ m = alpha_regexp.match(s)
+ r.append(m.group(1))
+ s = m.group(2)
+ continue
+ s = s[1:]
+ return r
+
+def vercmp_part(a, b):
+ va = explode_version(a)
+ vb = explode_version(b)
+ while True:
+ if va == []:
+ ca = None
+ else:
+ ca = va.pop(0)
+ if vb == []:
+ cb = None
+ else:
+ cb = vb.pop(0)
+ if ca == None and cb == None:
+ return 0
+ if ca > cb:
+ return 1
+ if ca < cb:
+ return -1
+
+def vercmp(ta, tb):
+ (ea, va, ra) = ta
+ (eb, vb, rb) = tb
+
+ r = int(ea)-int(eb)
+ if (r == 0):
+ r = vercmp_part(va, vb)
+ if (r == 0):
+ r = vercmp_part(ra, rb)
+ return r
+
+def explode_deps(s):
+ """
+ Take an RDEPENDS style string of format:
+ "DEPEND1 (optional version) DEPEND2 (optional version) ..."
+ and return a list of dependencies.
+ Version information is ignored.
+ """
+ r = []
+ l = s.split()
+ flag = False
+ for i in l:
+ if i[0] == '(':
+ flag = True
+ #j = []
+ if not flag:
+ r.append(i)
+ #else:
+ # j.append(i)
+ if flag and i.endswith(')'):
+ flag = False
+ # Ignore version
+ #r[-1] += ' ' + ' '.join(j)
+ return r
+
+
+
+def _print_trace(body, line):
+ """
+ Print the Environment of a Text Body
+ """
+ import bb
+
+ # print the environment of the method
+ bb.msg.error(bb.msg.domain.Util, "Printing the environment of the function")
+ min_line = max(1,line-4)
+ max_line = min(line+4,len(body)-1)
+ for i in range(min_line,max_line+1):
+ bb.msg.error(bb.msg.domain.Util, "\t%.4d:%s" % (i, body[i-1]) )
+
+
+def better_compile(text, file, realfile):
+ """
+ A better compile method. This method
+ will print the offending lines.
+ """
+ try:
+ return compile(text, file, "exec")
+ except Exception, e:
+ import bb,sys
+
+ # split the text into lines again
+ body = text.split('\n')
+ bb.msg.error(bb.msg.domain.Util, "Error in compiling python function in: ", realfile)
+ bb.msg.error(bb.msg.domain.Util, "The lines resulting into this error were:")
+ bb.msg.error(bb.msg.domain.Util, "\t%d:%s:'%s'" % (e.lineno, e.__class__.__name__, body[e.lineno-1]))
+
+ _print_trace(body, e.lineno)
+
+ # exit now
+ sys.exit(1)
+
+def better_exec(code, context, text, realfile):
+ """
+ Similiar to better_compile, better_exec will
+ print the lines that are responsible for the
+ error.
+ """
+ import bb,sys
+ try:
+ exec code in context
+ except:
+ (t,value,tb) = sys.exc_info()
+
+ if t in [bb.parse.SkipPackage, bb.build.FuncFailed]:
+ raise
+
+ # print the Header of the Error Message
+ bb.msg.error(bb.msg.domain.Util, "Error in executing python function in: ", realfile)
+ bb.msg.error(bb.msg.domain.Util, "Exception:%s Message:%s" % (t,value) )
+
+ # let us find the line number now
+ while tb.tb_next:
+ tb = tb.tb_next
+
+ import traceback
+ line = traceback.tb_lineno(tb)
+
+ _print_trace( text.split('\n'), line )
+
+ raise
+
+def Enum(*names):
+ """
+ A simple class to give Enum support
+ """
+
+ assert names, "Empty enums are not supported"
+
+ class EnumClass(object):
+ __slots__ = names
+ def __iter__(self): return iter(constants)
+ def __len__(self): return len(constants)
+ def __getitem__(self, i): return constants[i]
+ def __repr__(self): return 'Enum' + str(names)
+ def __str__(self): return 'enum ' + str(constants)
+
+ class EnumValue(object):
+ __slots__ = ('__value')
+ def __init__(self, value): self.__value = value
+ Value = property(lambda self: self.__value)
+ EnumType = property(lambda self: EnumType)
+ def __hash__(self): return hash(self.__value)
+ def __cmp__(self, other):
+ # C fans might want to remove the following assertion
+ # to make all enums comparable by ordinal value {;))
+ assert self.EnumType is other.EnumType, "Only values from the same enum are comparable"
+ return cmp(self.__value, other.__value)
+ def __invert__(self): return constants[maximum - self.__value]
+ def __nonzero__(self): return bool(self.__value)
+ def __repr__(self): return str(names[self.__value])
+
+ maximum = len(names) - 1
+ constants = [None] * len(names)
+ for i, each in enumerate(names):
+ val = EnumValue(i)
+ setattr(EnumClass, each, val)
+ constants[i] = val
+ constants = tuple(constants)
+ EnumType = EnumClass()
+ return EnumType
+
+def lockfile(name):
+ """
+ Use the file fn as a lock file, return when the lock has been acquired.
+ Returns a variable to pass to unlockfile().
+ """
+ while True:
+ # If we leave the lockfiles lying around there is no problem
+ # but we should clean up after ourselves. This gives potential
+ # for races though. To work around this, when we acquire the lock
+ # we check the file we locked was still the lock file on disk.
+ # by comparing inode numbers. If they don't match or the lockfile
+ # no longer exists, we start again.
+
+ # This implementation is unfair since the last person to request the
+ # lock is the most likely to win it.
+
+ lf = open(name, "a+")
+ fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
+ statinfo = os.fstat(lf.fileno())
+ if os.path.exists(lf.name):
+ statinfo2 = os.stat(lf.name)
+ if statinfo.st_ino == statinfo2.st_ino:
+ return lf
+ # File no longer exists or changed, retry
+ lf.close
+
+def unlockfile(lf):
+ """
+ Unlock a file locked using lockfile()
+ """
+ os.unlink(lf.name)
+ fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
+ lf.close
+
+def md5_file(filename):
+ """
+ Return the hex string representation of the MD5 checksum of filename.
+ """
+ try:
+ import hashlib
+ m = hashlib.md5()
+ except ImportError:
+ import md5
+ m = md5.new()
+
+ for line in open(filename):
+ m.update(line)
+ return m.hexdigest()
+
+def sha256_file(filename):
+ """
+ Return the hex string representation of the 256-bit SHA checksum of
+ filename. On Python 2.4 this will return None, so callers will need to
+ handle that by either skipping SHA checks, or running a standalone sha256sum
+ binary.
+ """
+ try:
+ import hashlib
+ except ImportError:
+ return None
+
+ s = hashlib.sha256()
+ for line in open(filename):
+ s.update(line)
+ return s.hexdigest()
diff --git a/bitbake-dev/lib/bb/xmlrpcserver.py b/bitbake-dev/lib/bb/xmlrpcserver.py
new file mode 100644
index 0000000000..075eda0573
--- /dev/null
+++ b/bitbake-dev/lib/bb/xmlrpcserver.py
@@ -0,0 +1,157 @@
+#
+# BitBake XMLRPC Server
+#
+# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
+# Copyright (C) 2006 - 2008 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ This module implements an xmlrpc server for BitBake.
+
+ Use this by deriving a class from BitBakeXMLRPCServer and then adding
+ methods which you want to "export" via XMLRPC. If the methods have the
+ prefix xmlrpc_, then registering those function will happen automatically,
+ if not, you need to call register_function.
+
+ Use register_idle_function() to add a function which the xmlrpc server
+ calls from within server_forever when no requests are pending. Make sure
+ that those functions are non-blocking or else you will introduce latency
+ in the server's main loop.
+"""
+
+import bb
+import xmlrpclib
+
+DEBUG = False
+
+from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
+import os, sys, inspect, select
+
+class BitBakeServerCommands():
+ def __init__(self, server, cooker):
+ self.cooker = cooker
+ self.server = server
+
+ def registerEventHandler(self, host, port):
+ """
+ Register a remote UI Event Handler
+ """
+ s = xmlrpclib.Server("http://%s:%d" % (host, port), allow_none=True)
+ return bb.event.register_UIHhandler(s)
+
+ def unregisterEventHandler(self, handlerNum):
+ """
+ Unregister a remote UI Event Handler
+ """
+ return bb.event.unregister_UIHhandler(handlerNum)
+
+ def runCommand(self, command):
+ """
+ Run a cooker command on the server
+ """
+ return self.cooker.command.runCommand(command)
+
+ def terminateServer(self):
+ """
+ Trigger the server to quit
+ """
+ self.server.quit = True
+ print "Server (cooker) exitting"
+ return
+
+ def ping(self):
+ """
+ Dummy method which can be used to check the server is still alive
+ """
+ return True
+
+class BitBakeXMLRPCServer(SimpleXMLRPCServer):
+ # remove this when you're done with debugging
+ # allow_reuse_address = True
+
+ def __init__(self, cooker, interface = ("localhost", 0)):
+ """
+ Constructor
+ """
+ SimpleXMLRPCServer.__init__(self, interface,
+ requestHandler=SimpleXMLRPCRequestHandler,
+ logRequests=False, allow_none=True)
+ self._idlefuns = {}
+ self.host, self.port = self.socket.getsockname()
+ #self.register_introspection_functions()
+ commands = BitBakeServerCommands(self, cooker)
+ self.autoregister_all_functions(commands, "")
+
+ def autoregister_all_functions(self, context, prefix):
+ """
+ Convenience method for registering all functions in the scope
+ of this class that start with a common prefix
+ """
+ methodlist = inspect.getmembers(context, inspect.ismethod)
+ for name, method in methodlist:
+ if name.startswith(prefix):
+ self.register_function(method, name[len(prefix):])
+
+ def register_idle_function(self, function, data):
+ """Register a function to be called while the server is idle"""
+ assert callable(function)
+ self._idlefuns[function] = data
+
+ def serve_forever(self):
+ """
+ Serve Requests. Overloaded to honor a quit command
+ """
+ self.quit = False
+ while not self.quit:
+ self.handle_request()
+
+ # Tell idle functions we're exiting
+ for function, data in self._idlefuns.items():
+ try:
+ retval = function(self, data, True)
+ except:
+ pass
+
+ self.server_close()
+ return
+
+ def get_request(self):
+ """
+ Get next request. Behaves like the parent class unless a waitpid callback
+ has been set. In that case, we regularly check waitpid when the server is idle
+ """
+ while True:
+ # wait 500 ms for an xmlrpc request
+ if DEBUG:
+ print "DEBUG: select'ing 500ms waiting for an xmlrpc request..."
+ ifds, ofds, xfds = select.select([self.socket.fileno()], [], [], 0.5)
+ if ifds:
+ return self.socket.accept()
+ # call idle functions only if we're not shutting down atm to prevent a recursion
+ if not self.quit:
+ if DEBUG:
+ print "DEBUG: server is idle -- calling idle functions..."
+ for function, data in self._idlefuns.items():
+ try:
+ retval = function(self, data, False)
+ if not retval:
+ del self._idlefuns[function]
+ except SystemExit:
+ raise
+ except:
+ import traceback
+ traceback.print_exc()
+ pass
+