diff options
-rw-r--r-- | TODO | 17 | ||||
-rwxr-xr-x | bin/bitbake | 12 | ||||
-rw-r--r-- | classes/base.bbclass | 12 | ||||
-rw-r--r-- | lib/bb/__init__.py | 31 | ||||
-rw-r--r-- | lib/bb/build.py | 22 | ||||
-rw-r--r-- | lib/bb/cache.py | 53 | ||||
-rw-r--r-- | lib/bb/cooker.py | 126 | ||||
-rw-r--r-- | lib/bb/data_smart.py | 10 | ||||
-rw-r--r-- | lib/bb/fetch/__init__.py | 35 | ||||
-rw-r--r-- | lib/bb/fetch/bzr.py | 13 | ||||
-rw-r--r-- | lib/bb/fetch/cvs.py | 13 | ||||
-rw-r--r-- | lib/bb/fetch/git.py | 9 | ||||
-rw-r--r-- | lib/bb/fetch/hg.py | 14 | ||||
-rw-r--r-- | lib/bb/fetch/local.py | 2 | ||||
-rw-r--r-- | lib/bb/fetch/osc.py | 11 | ||||
-rw-r--r-- | lib/bb/fetch/perforce.py | 18 | ||||
-rw-r--r-- | lib/bb/fetch/repo.py | 2 | ||||
-rw-r--r-- | lib/bb/fetch/svk.py | 10 | ||||
-rw-r--r-- | lib/bb/fetch/svn.py | 14 | ||||
-rw-r--r-- | lib/bb/fetch/wget.py | 13 | ||||
-rw-r--r-- | lib/bb/msg.py | 19 | ||||
-rw-r--r-- | lib/bb/parse/__init__.py | 8 | ||||
-rw-r--r-- | lib/bb/parse/ast.py | 12 | ||||
-rw-r--r-- | lib/bb/parse/parse_py/BBHandler.py | 9 | ||||
-rw-r--r-- | lib/bb/parse/parse_py/ConfHandler.py | 5 | ||||
-rw-r--r-- | lib/bb/persist_data.py | 8 | ||||
-rw-r--r-- | lib/bb/providers.py | 25 | ||||
-rw-r--r-- | lib/bb/runqueue.py | 104 | ||||
-rw-r--r-- | lib/bb/taskdata.py | 61 | ||||
-rw-r--r-- | lib/bb/utils.py | 27 |
30 files changed, 375 insertions, 340 deletions
@@ -15,14 +15,15 @@ exception like that, *perhaps*, to abstract away exceptions raised by event handlers, but it has no place in bb.build.exec_task -- Leverage the python logging module - -- Convert bb modules to stop using the bb.msg functions. -- Switch the messaging wrappers in the bb package to use the logging module - directly, as we'll be deprecating the bb.msg versions, but these should - stay around for the metadata to use. -- Deprecate the bb.msg functions. -- Should we use the logging module rather than print in the knotty ui? +- BUG: if you chmod 000 local.conf, it silently doesn't parse it, when it + should really fail, so the user can fix the problem. + +- Audit bb.fatal usage - these should all be able to be replaced with + exceptions +- Add deprecation warnings to the bb.msg functions +- Figure out how to handle the ncurses UI. Should some of our logging + formatting stuff be made common to all of bitbake, or perhaps all UIs via + the UIHelper? Long term, high impact: diff --git a/bin/bitbake b/bin/bitbake index 67cc9d594..b281c2a30 100755 --- a/bin/bitbake +++ b/bin/bitbake @@ -57,23 +57,23 @@ class BBConfiguration(object): self.pkgs_to_build = [] -def print_exception(exc, value, tb): - """Send exception information through bb.msg""" - bb.fatal("".join(format_exception(exc, value, tb, limit=8))) +def print_exception(*exc_info): + logger.error("Uncaught exception: ", exc_info=exc_info) + sys.exit(1) sys.excepthook = print_exception +# Display bitbake/OE warnings via the BitBake.Warnings logger, ignoring others""" +warnlog = logging.getLogger("BitBake.Warnings") _warnings_showwarning = warnings.showwarning def _showwarning(message, category, filename, lineno, file=None, line=None): - """Display python warning messages using bb.msg""" if file is not None: if _warnings_showwarning is not None: _warnings_showwarning(message, category, filename, lineno, file, line) else: s = warnings.formatwarning(message, category, filename, lineno) - s = s.split("\n")[0] - bb.msg.warn(None, s) + warnlog.warn(s) warnings.showwarning = _showwarning warnings.filterwarnings("ignore") diff --git a/classes/base.bbclass b/classes/base.bbclass index bd637e517..c6693ce31 100644 --- a/classes/base.bbclass +++ b/classes/base.bbclass @@ -35,18 +35,6 @@ bbfatal() { exit 1 } -bbdebug() { - test $# -ge 2 || { - echo "Usage: bbdebug level \"message\"" - exit 1 - } - - test ${@bb.msg.debug_level['default']} -ge $1 && { - shift - echo "DEBUG:" $* - } -} - addtask showdata do_showdata[nostamp] = "1" python do_showdata() { diff --git a/lib/bb/__init__.py b/lib/bb/__init__.py index 8cda4255b..11983f0e0 100644 --- a/lib/bb/__init__.py +++ b/lib/bb/__init__.py @@ -35,7 +35,25 @@ class NullHandler(logging.Handler): def emit(self, record): pass +Logger = logging.getLoggerClass() +class BBLogger(Logger): + def __init__(self, name): + if name.split(".")[0] == "BitBake": + self.debug = self.bbdebug + Logger.__init__(self, name) + + def bbdebug(self, level, msg, *args, **kwargs): + return self.log(logging.DEBUG - level - 1, msg, *args, **kwargs) + + def plain(self, msg, *args, **kwargs): + return self.log(logging.INFO + 1, msg, *args, **kwargs) + + def verbose(self, msg, *args, **kwargs): + return self.log(logging.INFO - 1, msg, *args, **kwargs) + logging.raiseExceptions = False +logging.setLoggerClass(BBLogger) + logger = logging.getLogger("BitBake") logger.addHandler(NullHandler()) logger.setLevel(logging.INFO) @@ -48,22 +66,23 @@ if "BBDEBUG" in os.environ: # Messaging convenience functions def plain(*args): - bb.msg.plain(''.join(args)) + logger.plain(''.join(args)) def debug(lvl, *args): - bb.msg.debug(lvl, None, ''.join(args)) + logger.debug(lvl, ''.join(args)) def note(*args): - bb.msg.note(1, None, ''.join(args)) + logger.info(''.join(args)) def warn(*args): - bb.msg.warn(None, ''.join(args)) + logger.warn(''.join(args)) def error(*args): - bb.msg.error(None, ''.join(args)) + logger.error(''.join(args)) def fatal(*args): - bb.msg.fatal(None, ''.join(args)) + logger.critical(''.join(args)) + sys.exit(1) def deprecated(func, name = None, advice = ""): diff --git a/lib/bb/build.py b/lib/bb/build.py index 88ba1d541..c04a31880 100644 --- a/lib/bb/build.py +++ b/lib/bb/build.py @@ -26,9 +26,14 @@ #Based on functions from the base bb module, Copyright 2003 Holger Schurig from bb import data, event, mkdirhier, utils -import bb, os, sys +import os +import sys +import logging +import bb import bb.utils +logger = logging.getLogger("BitBake.Build") + # When we execute a python function we'd like certain things # in all namespaces, hence we add them to __builtins__ # If we do not do this and use the exec globals, they will @@ -137,12 +142,12 @@ def exec_func(func, d, dirs = None): # Handle logfiles si = file('/dev/null', 'r') try: - if bb.msg.debug_level['default'] > 0 or ispython: + if logger.getEffectiveLevel() <= logging.DEBUG or ispython: so = os.popen("tee \"%s\"" % logfile, "w") else: so = file(logfile, 'w') - except OSError as e: - bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e) + except OSError: + logger.exception("Opening log file '%s'", logfile) pass se = so @@ -193,7 +198,7 @@ def exec_func(func, d, dirs = None): se.close() if os.path.exists(logfile) and os.path.getsize(logfile) == 0: - bb.msg.debug(2, bb.msg.domain.Build, "Zero size logfile %s, removing" % logfile) + logger.debug(2, "Zero size logfile %s, removing", logfile) os.remove(logfile) # Close the backup fds @@ -239,7 +244,8 @@ def exec_func_shell(func, d, runfile, logfile, flags): f = open(runfile, "w") f.write("#!/bin/sh -e\n") - if bb.msg.debug_level['default'] > 0: f.write("set -x\n") + if logger.getEffectiveLevel() <= logging.DEBUG: + f.write("set -x\n") data.emit_env(f, d) f.write("cd %s\n" % os.getcwd()) @@ -275,7 +281,7 @@ def exec_task(task, d): raise EventException("No such task", InvalidTask(task, d)) try: - bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % task) + logger.debug(1, "Executing task %s", task) old_overrides = data.getVar('OVERRIDES', d, 0) localdata = data.createCopy(d) data.setVar('OVERRIDES', 'task-%s:%s' % (task[3:], old_overrides), localdata) @@ -291,7 +297,7 @@ def exec_task(task, d): except: logfile = None msg = message - bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % message ) + logger.info("Task failed: %s", message ) failedevent = TaskFailed(msg, logfile, task, d) event.fire(failedevent, d) raise EventException("Function failed in task: %s" % message, failedevent) diff --git a/lib/bb/cache.py b/lib/bb/cache.py index 21cbad915..96a405604 100644 --- a/lib/bb/cache.py +++ b/lib/bb/cache.py @@ -29,14 +29,17 @@ import os +import logging import bb.data import bb.utils +logger = logging.getLogger("BitBake.Cache") + try: import cPickle as pickle except ImportError: import pickle - bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.") + logger.info("Importing cPickle failed. Falling back to a very slow implementation.") __cache_version__ = "131" @@ -57,13 +60,13 @@ class Cache: if self.cachedir in [None, '']: self.has_cache = False - bb.msg.note(1, bb.msg.domain.Cache, "Not using a cache. Set CACHE = <directory> to enable.") + logger.info("Not using a cache. Set CACHE = <directory> to enable.") return self.has_cache = True self.cachefile = os.path.join(self.cachedir, "bb_cache.dat") - bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir) + logger.debug(1, "Using cache in '%s'", self.cachedir) bb.utils.mkdirhier(self.cachedir) # If any of configuration.data's dependencies are newer than the @@ -84,14 +87,14 @@ class Cache: if version_data['BITBAKE_VER'] != bb.__version__: raise ValueError('Bitbake Version Mismatch') except EOFError: - bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...") + logger.info("Truncated cache found, rebuilding...") self.depends_cache = {} except: - bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...") + logger.info("Invalid cache found, rebuilding...") self.depends_cache = {} else: if os.path.isfile(self.cachefile): - bb.msg.note(1, bb.msg.domain.Cache, "Out of date cache found, rebuilding...") + logger.info("Out of date cache found, rebuilding...") def getVar(self, var, fn, exp = 0): """ @@ -111,7 +114,7 @@ class Cache: if fn != self.data_fn: # We're trying to access data in the cache which doesn't exist # yet setData hasn't been called to setup the right access. Very bad. - bb.msg.error(bb.msg.domain.Cache, "Parsing error data_fn %s and fn %s don't match" % (self.data_fn, fn)) + logger.error("data_fn %s and fn %s don't match", self.data_fn, fn) self.cacheclean = False result = bb.data.getVar(var, self.data, exp) @@ -152,7 +155,6 @@ class Cache: if virtualfn.startswith('virtual:'): cls = virtualfn.split(':', 2)[1] fn = virtualfn.replace('virtual:' + cls + ':', '') - #bb.msg.debug(2, bb.msg.domain.Cache, "virtualfn2realfn %s to %s %s" % (virtualfn, fn, cls)) return (fn, cls) def realfn2virtual(self, realfn, cls): @@ -160,9 +162,7 @@ class Cache: Convert a real filename + the associated subclass keyword to a virtual filename """ if cls == "": - #bb.msg.debug(2, bb.msg.domain.Cache, "realfn2virtual %s and '%s' to %s" % (realfn, cls, realfn)) return realfn - #bb.msg.debug(2, bb.msg.domain.Cache, "realfn2virtual %s and %s to %s" % (realfn, cls, "virtual:" + cls + ":" + realfn)) return "virtual:" + cls + ":" + realfn def loadDataFull(self, virtualfn, appends, cfgData): @@ -173,7 +173,7 @@ class Cache: (fn, cls) = self.virtualfn2realfn(virtualfn) - bb.msg.debug(1, bb.msg.domain.Cache, "Parsing %s (full)" % fn) + logger.debug(1, "Parsing %s (full)", fn) bb_data = self.load_bbfile(fn, appends, cfgData) return bb_data[cls] @@ -198,13 +198,13 @@ class Cache: virtualfn = self.realfn2virtual(fn, cls) if self.depends_cache[virtualfn]["__SKIPPED"]: skipped += 1 - bb.msg.debug(1, bb.msg.domain.Cache, "Skipping %s" % virtualfn) + logger.debug(1, "Skipping %s", virtualfn) continue self.handle_data(virtualfn, cacheData) virtuals += 1 return True, skipped, virtuals - bb.msg.debug(1, bb.msg.domain.Cache, "Parsing %s" % fn) + logger.debug(1, "Parsing %s", fn) bb_data = self.load_bbfile(fn, appends, cfgData) @@ -213,7 +213,7 @@ class Cache: self.setData(virtualfn, fn, bb_data[data]) if self.getVar("__SKIPPED", virtualfn): skipped += 1 - bb.msg.debug(1, bb.msg.domain.Cache, "Skipping %s" % virtualfn) + logger.debug(1, "Skipping %s", virtualfn) else: self.handle_data(virtualfn, cacheData) virtuals += 1 @@ -248,7 +248,7 @@ class Cache: # File isn't in depends_cache if not fn in self.depends_cache: - bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s is not cached" % fn) + logger.debug(2, "Cache: %s is not cached", fn) self.remove(fn) return False @@ -256,13 +256,13 @@ class Cache: # Check file still exists if mtime == 0: - bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s no longer exists" % fn) + logger.debug(2, "Cache: %s no longer exists", fn) self.remove(fn) return False # Check the file's timestamp if mtime != self.getVar("CACHETIMESTAMP", fn, True): - bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s changed" % fn) + logger.debug(2, "Cache: %s changed", fn) self.remove(fn) return False @@ -277,11 +277,10 @@ class Cache: return False if (fmtime != old_mtime): - bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s's dependency %s changed" % (fn, f)) + logger.debug(2, "Cache: %s's dependency %s changed", fn, f) self.remove(fn) return False - #bb.msg.debug(2, bb.msg.domain.Cache, "Depends Cache: %s is clean" % fn) if not fn in self.clean: self.clean[fn] = "" @@ -292,16 +291,16 @@ class Cache: virtualfn = self.realfn2virtual(fn, cls) self.clean[virtualfn] = "" if not virtualfn in self.depends_cache: - bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s is not cached" % virtualfn) + logger.debug(2, "Cache: %s is not cached", virtualfn) invalid = True # If any one of the varients is not present, mark cache as invalid for all if invalid: for cls in (multi or "").split(): virtualfn = self.realfn2virtual(fn, cls) - bb.msg.debug(2, bb.msg.domain.Cache, "Cache: Removing %s from cache" % virtualfn) + logger.debug(2, "Cache: Removing %s from cache", virtualfn) del self.clean[virtualfn] - bb.msg.debug(2, bb.msg.domain.Cache, "Cache: Removing %s from cache" % fn) + logger.debug(2, "Cache: removing %s from cache", fn) del self.clean[fn] return False @@ -312,7 +311,7 @@ class Cache: Remove a fn from the cache Called from the parser in error cases """ - bb.msg.debug(1, bb.msg.domain.Cache, "Removing %s from cache" % fn) + logger.debug(1, "Removing %s from cache", fn) if fn in self.depends_cache: del self.depends_cache[fn] if fn in self.clean: @@ -329,7 +328,7 @@ class Cache: return if self.cacheclean: - bb.msg.note(1, bb.msg.domain.Cache, "Cache is clean, not saving.") + logger.info("Cache is clean, not saving.") return version_data = {} @@ -339,10 +338,10 @@ class Cache: cache_data = copy.copy(self.depends_cache) for fn in self.depends_cache: if '__BB_DONT_CACHE' in self.depends_cache[fn] and self.depends_cache[fn]['__BB_DONT_CACHE']: - bb.msg.debug(2, bb.msg.domain.Cache, "Not caching %s, marked as not cacheable" % fn) + logger.debug(2, "Not caching %s, marked as not cacheable", fn) del cache_data[fn] elif 'PV' in self.depends_cache[fn] and 'SRCREVINACTION' in self.depends_cache[fn]['PV']: - bb.msg.error(bb.msg.domain.Cache, "Not caching %s as it had SRCREVINACTION in PV. Please report this bug" % fn) + logger.error("Not caching %s as it had SRCREVINACTION in PV. Please report this bug", fn) del cache_data[fn] p = pickle.Pickler(file(self.cachefile, "wb" ), -1 ) @@ -360,7 +359,7 @@ class Cache: pe = self.getVar('PE', file_name, True) or "0" pv = self.getVar('PV', file_name, True) if 'SRCREVINACTION' in pv: - bb.msg.note(1, bb.msg.domain.Cache, "Found SRCREVINACTION in PV (%s) or %s. Please report this bug." % (pv, file_name)) + logger.info("Found SRCREVINACTION in PV (%s) or %s. Please report this bug.", pv, file_name) pr = self.getVar('PR', file_name, True) dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0") depends = bb.utils.explode_deps(self.getVar("DEPENDS", file_name, True) or "") diff --git a/lib/bb/cooker.py b/lib/bb/cooker.py index ad1d6023f..722fa3101 100644 --- a/lib/bb/cooker.py +++ b/lib/bb/cooker.py @@ -24,12 +24,19 @@ from __future__ import print_function import sys, os, glob, os.path, re, time +import logging import sre_constants from cStringIO import StringIO from contextlib import closing import bb from bb import utils, data, parse, event, cache, providers, taskdata, command, runqueue +logger = logging.getLogger("BitBake") +collectlog = logging.getLogger("BitBake.Collection") +buildlog = logging.getLogger("BitBake.Build") +parselog = logging.getLogger("BitBake.Parsing") +providerlog = logging.getLogger("BitBake.Provider") + class MultipleMatches(Exception): """ Exception raised when multiple file matches are found @@ -99,7 +106,7 @@ class BBCooker: import termios tcattr = termios.tcgetattr(fd) if tcattr[3] & termios.TOSTOP: - bb.msg.note(1, bb.msg.domain.Build, "The terminal had the TOSTOP bit set, clearing...") + buildlog.info("The terminal had the TOSTOP bit set, clearing...") tcattr[3] = tcattr[3] & ~termios.TOSTOP termios.tcsetattr(fd, termios.TCSANOW, tcattr) @@ -115,7 +122,7 @@ class BBCooker: if nice: curnice = os.nice(0) nice = int(nice) - curnice - bb.msg.note(2, bb.msg.domain.Build, "Renice to %s " % os.nice(nice)) + buildlog.verbose("Renice to %s " % os.nice(nice)) def parseCommandLine(self): # Parse any commandline into actions @@ -123,11 +130,11 @@ class BBCooker: self.commandlineAction = None if 'world' in self.configuration.pkgs_to_build: - bb.msg.error(bb.msg.domain.Build, "'world' is not a valid target for --environment.") + buildlog.error("'world' is not a valid target for --environment.") elif len(self.configuration.pkgs_to_build) > 1: - bb.msg.error(bb.msg.domain.Build, "Only one target can be used with the --environment option.") + buildlog.error("Only one target can be used with the --environment option.") elif self.configuration.buildfile and len(self.configuration.pkgs_to_build) > 0: - bb.msg.error(bb.msg.domain.Build, "No target should be used with the --environment and --buildfile options.") + buildlog.error("No target should be used with the --environment and --buildfile options.") elif len(self.configuration.pkgs_to_build) > 0: self.commandlineAction = ["showEnvironmentTarget", self.configuration.pkgs_to_build] else: @@ -145,13 +152,13 @@ class BBCooker: self.commandlineAction = ["generateDotGraph", self.configuration.pkgs_to_build, self.configuration.cmd] else: self.commandlineAction = None - bb.msg.error(bb.msg.domain.Build, "Please specify a package name for dependency graph generation.") + buildlog.error("Please specify a package name for dependency graph generation.") else: if self.configuration.pkgs_to_build: self.commandlineAction = ["buildTargets", self.configuration.pkgs_to_build, self.configuration.cmd] else: self.commandlineAction = None - bb.msg.error(bb.msg.domain.Build, "Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.") + buildlog.error("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.") def runCommands(self, server, data, abort): """ @@ -177,8 +184,8 @@ class BBCooker: preferred_versions[pn] = (pref_ver, pref_file) latest_versions[pn] = (last_ver, last_file) - bb.msg.plain("%-35s %25s %25s" % ("Package Name", "Latest Version", "Preferred Version")) - bb.msg.plain("%-35s %25s %25s\n" % ("============", "==============", "=================")) + logger.plain("%-35s %25s %25s", "Package Name", "Latest Version", "Preferred Version") + logger.plain("%-35s %25s %25s\n", "============", "==============", "=================") for p in sorted(pkg_pn): pref = preferred_versions[p] @@ -190,7 +197,7 @@ class BBCooker: if pref == latest: prefstr = "" - bb.msg.plain("%-35s %25s %25s" % (p, lateststr, prefstr)) + logger.plain("%-35s %25s %25s", p, lateststr, prefstr) def compareRevisions(self): ret = bb.fetch.fetcher_compare_revisons(self.configuration.data) @@ -227,27 +234,21 @@ class BBCooker: if fn: try: envdata = self.bb_cache.loadDataFull(fn, self.get_file_appends(fn), self.configuration.data) - except IOError as e: - bb.msg.error(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e)) - raise - except Exception as e: - bb.msg.error(bb.msg.domain.Parsing, "%s" % e) + except Exception, e: + parselog.exception("Unable to read %s", fn) raise # emit variables and shell functions - try: - data.update_data(envdata) - with closing(StringIO()) as env: - data.emit_env(env, envdata, True) - bb.msg.plain(env.getvalue()) - except Exception as e: - bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e) + data.update_data(envdata) + with closing(StringIO()) as env: + data.emit_env(env, envdata, True) + logger.plain(env.getvalue()) # emit the metadata which isnt valid shell data.expandKeys(envdata) for e in envdata.keys(): if data.getVarFlag( e, 'python', envdata ): - bb.msg.plain("\npython %s () {\n%s}\n" % (e, data.getVar(e, envdata, 1))) + logger.plain("\npython %s () {\n%s}\n", e, data.getVar(e, envdata, 1)) def generateDepTreeData(self, pkgs_to_build, task): """ @@ -371,7 +372,7 @@ class BBCooker: for rdepend in depgraph["rdepends-pn"][pn]: print('"%s" -> "%s" [style=dashed]' % (pn, rdepend), file=depends_file) print("}", file=depends_file) - bb.msg.plain("PN dependencies saved to 'pn-depends.dot'") + logger.info("PN dependencies saved to 'pn-depends.dot'") depends_file = file('package-depends.dot', 'w' ) print("digraph depends {", file=depends_file) @@ -392,7 +393,7 @@ class BBCooker: for rdepend in depgraph["rrecs-pkg"][package]: print('"%s" -> "%s" [style=dashed]' % (package, rdepend), file=depends_file) print("}", file=depends_file) - bb.msg.plain("Package dependencies saved to 'package-depends.dot'") + logger.info("Package dependencies saved to 'package-depends.dot'") tdepends_file = file('task-depends.dot', 'w' ) print("digraph depends {", file=tdepends_file) @@ -404,7 +405,7 @@ class BBCooker: for dep in depgraph["tdepends"][task]: print('"%s" -> "%s"' % (task, dep), file=tdepends_file) print("}", file=tdepends_file) - bb.msg.plain("Task dependencies saved to 'task-depends.dot'") + logger.info("Task dependencies saved to 'task-depends.dot'") def buildDepgraph( self ): all_depends = self.status.all_depends @@ -428,10 +429,10 @@ class BBCooker: try: (providee, provider) = p.split(':') except: - bb.msg.fatal(bb.msg.domain.Provider, "Malformed option in PREFERRED_PROVIDERS variable: %s" % p) + providerlog.critical("Malformed option in PREFERRED_PROVIDERS variable: %s" % p) continue if providee in self.status.preferred and self.status.preferred[providee] != provider: - bb.msg.error(bb.msg.domain.Provider, "conflicting preferences for %s: both %s and %s specified" % (providee, provider, self.status.preferred[providee])) + providerlog.error("conflicting preferences for %s: both %s and %s specified", providee, provider, self.status.preferred[providee]) self.status.preferred[providee] = provider # Calculate priorities for each file @@ -440,8 +441,7 @@ class BBCooker: for collection, pattern, regex, _ in self.status.bbfile_config_priorities: if not regex in matched: - bb.msg.warn(bb.msg.domain.Provider, "No bb files matched BBFILE_PATTERN_%s '%s'" % - (collection, pattern)) + collectlog.warn("No bb files matched BBFILE_PATTERN_%s '%s'" % (collection, pattern)) def buildWorldTargetList(self): """ @@ -449,19 +449,19 @@ class BBCooker: """ all_depends = self.status.all_depends pn_provides = self.status.pn_provides - bb.msg.debug(1, bb.msg.domain.Parsing, "collating packages for \"world\"") + parselog.debug(1, "collating packages for \"world\"") for f in self.status.possible_world: terminal = True pn = self.status.pkg_fn[f] for p in pn_provides[pn]: if p.startswith('virtual/'): - bb.msg.debug(2, bb.msg.domain.Parsing, "World build skipping %s due to %s provider starting with virtual/" % (f, p)) + parselog.debug(2, "World build skipping %s due to %s provider starting with virtual/", f, p) terminal = False break for pf in self.status.providers[p]: if self.status.pkg_fn[pf] != pn: - bb.msg.debug(2, bb.msg.domain.Parsing, "World build skipping %s due to both us and %s providing %s" % (f, pf, p)) + parselog.debug(2, "World build skipping %s due to both us and %s providing %s", f, pf, p) terminal = False break if terminal: @@ -475,8 +475,9 @@ class BBCooker: """Drop off into a shell""" try: from bb import shell - except ImportError as details: - bb.msg.fatal(bb.msg.domain.Parsing, "Sorry, shell not available (%s)" % details ) + except ImportError: + parselog.exception("Interactive mode not available") + sys.exit(1) else: shell.start( self ) @@ -497,14 +498,14 @@ class BBCooker: layerconf = self._findLayerConf() if layerconf: - bb.msg.debug(2, bb.msg.domain.Parsing, "Found bblayers.conf (%s)" % layerconf) + parselog.debug(2, "Found bblayers.conf (%s)", layerconf) data = bb.parse.handle(layerconf, data) layers = (bb.data.getVar('BBLAYERS', data, True) or "").split() data = bb.data.createCopy(data) for layer in layers: - bb.msg.debug(2, bb.msg.domain.Parsing, "Adding layer %s" % layer) + parselog.debug(2, "Adding layer %s", layer) bb.data.setVar('LAYERDIR', layer, data) data = bb.parse.handle(os.path.join(layer, "conf", "layer.conf"), data) @@ -524,7 +525,7 @@ class BBCooker: bb.data.delVar('LAYERDIR', data) if not data.getVar("BBPATH", True): - bb.fatal("The BBPATH variable is not set") + raise SystemExit("The BBPATH variable is not set") data = bb.parse.handle(os.path.join("conf", "bitbake.conf"), data) @@ -545,10 +546,9 @@ class BBCooker: bb.event.fire(bb.event.ConfigParsed(), self.configuration.data) - except IOError as e: - bb.msg.fatal(bb.msg.domain.Parsing, "Error when parsing %s: %s" % (files, str(e))) - except bb.parse.ParseError as details: - bb.msg.fatal(bb.msg.domain.Parsing, "Unable to parse %s (%s)" % (files, details) ) + except (IOError, bb.parse.ParseError): + parselog.exception("Error when parsing %s", files) + sys.exit(1) def handleCollections( self, collections ): """Handle collections""" @@ -557,22 +557,22 @@ class BBCooker: for c in collection_list: regex = bb.data.getVar("BBFILE_PATTERN_%s" % c, self.configuration.data, 1) if regex == None: - bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PATTERN_%s not defined" % c) + parselog.error("BBFILE_PATTERN_%s not defined" % c) continue priority = bb.data.getVar("BBFILE_PRIORITY_%s" % c, self.configuration.data, 1) if priority == None: - bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PRIORITY_%s not defined" % c) + parselog.error("BBFILE_PRIORITY_%s not defined" % c) continue try: cre = re.compile(regex) except re.error: - bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PATTERN_%s \"%s\" is not a valid regular expression" % (c, regex)) + parselog.error("BBFILE_PATTERN_%s \"%s\" is not a valid regular expression", c, regex) continue try: pri = int(priority) self.status.bbfile_config_priorities.append((c, regex, cre, pri)) except ValueError: - bb.msg.error(bb.msg.domain.Parsing, "invalid value for BBFILE_PRIORITY_%s: \"%s\"" % (c, priority)) + parselog.error("invalid value for BBFILE_PRIORITY_%s: \"%s\"", c, priority) def buildSetVars(self): """ @@ -608,9 +608,9 @@ class BBCooker: """ matches = self.matchFiles(buildfile) if len(matches) != 1: - bb.msg.error(bb.msg.domain.Parsing, "Unable to match %s (%s matches found):" % (buildfile, len(matches))) + parselog.error("Unable to match %s (%s matches found):" % (buildfile, len(matches))) for f in matches: - bb.msg.error(bb.msg.domain.Parsing, " %s" % f) + parselog.error(" %s" % f) raise MultipleMatches return matches[0] @@ -654,7 +654,7 @@ class BBCooker: # Remove stamp for target if force mode active if self.configuration.force: - bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (task, fn)) + logger.verbose("Remove stamp %s, %s", task, fn) bb.build.del_stamp('do_%s' % task, self.status, fn) # Setup taskdata structure @@ -680,7 +680,7 @@ class BBCooker: retval = rq.execute_runqueue() except runqueue.TaskFailure as exc: for fnid in exc.args: - bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid]) + buildlog.error("'%s' failed" % taskdata.fn_index[fnid]) failures = failures + 1 retval = False if not retval: @@ -716,7 +716,7 @@ class BBCooker: retval = rq.execute_runqueue() except runqueue.TaskFailure as exc: for fnid in exc.args: - bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid]) + buildlog.error("'%s' failed" % taskdata.fn_index[fnid]) failures = failures + 1 retval = False if not retval: @@ -762,11 +762,11 @@ class BBCooker: try: import psyco except ImportError: - bb.msg.note(1, bb.msg.domain.Collection, "Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.") + collectlog.info("Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.") else: psyco.bind( CookerParser.parse_next ) else: - bb.msg.note(1, bb.msg.domain.Collection, "You have disabled Psyco. This decreases performance.") + collectlog.info("You have disabled Psyco. This decreases performance.") self.status = bb.cache.CacheData() @@ -785,7 +785,7 @@ class BBCooker: self.cookerState = cookerParsing if not self.parser.parse_next(): - bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete") + collectlog.debug(1, "parsing complete") self.buildDepgraph() self.cookerState = cookerParsed return None @@ -833,7 +833,7 @@ class BBCooker: parsed, cached, skipped, masked = 0, 0, 0, 0 self.bb_cache = bb.cache.init(self) - bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files") + collectlog.debug(1, "collecting .bb files") files = (data.getVar( "BBFILES", self.configuration.data, 1 ) or "").split() data.setVar("BBFILES", " ".join(files), self.configuration.data) @@ -842,7 +842,7 @@ class BBCooker: files = self.get_bbfiles() if not len(files): - bb.msg.error(bb.msg.domain.Collection, "no recipe files to build, check your BBPATH and BBFILES?") + collectlog.error("no recipe files to build, check your BBPATH and BBFILES?") bb.event.fire(CookerExit(), self.configuration.event_data) newfiles = set() @@ -862,13 +862,14 @@ class BBCooker: try: bbmask_compiled = re.compile(bbmask) except sre_constants.error: - bb.msg.fatal(bb.msg.domain.Collection, "BBMASK is not a valid regular expression.") + collectlog.critical("BBMASK is not a valid regular expression, ignoring.") + return list(newfiles), 0 bbfiles = [] bbappend = [] for f in newfiles: if bbmask and bbmask_compiled.search(f): - bb.msg.debug(1, bb.msg.domain.Collection, "skipping masked file %s" % f) + collectlog.debug(1, "skipping masked file %s", f) masked += 1 continue if f.endswith('.bb'): @@ -876,7 +877,7 @@ class BBCooker: elif f.endswith('.bbappend'): bbappend.append(f) else: - bb.msg.note(1, bb.msg.domain.Collection, "File %s of unknown filetype in BBFILES? Ignorning..." % f) + collectlog.debug(1, "skipping %s: unknown file extension", f) # Build a list of .bbappend files for each .bb file self.appendlist = {} @@ -975,11 +976,6 @@ class CookerParser: self.skipped += skipped self.virtuals += virtuals - except IOError as e: - self.error += 1 - cooker.bb_cache.remove(f) - bb.msg.error(bb.msg.domain.Collection, "opening %s: %s" % (f, e)) - pass except KeyboardInterrupt: cooker.bb_cache.remove(f) cooker.bb_cache.sync() @@ -987,7 +983,7 @@ class CookerParser: except Exception as e: self.error += 1 cooker.bb_cache.remove(f) - bb.msg.error(bb.msg.domain.Collection, "%s while parsing %s" % (e, f)) + parselog.exception("Unable to open %s", f) except: cooker.bb_cache.remove(f) raise diff --git a/lib/bb/data_smart.py b/lib/bb/data_smart.py index ac41c14fd..aac13490d 100644 --- a/lib/bb/data_smart.py +++ b/lib/bb/data_smart.py @@ -30,10 +30,12 @@ BitBake build tools. import copy, re, sys from collections import MutableMapping +import logging import bb from bb import utils from bb.COW import COWDictBase +logger = logging.getLogger("BitBake.Data") __setvar_keyword__ = ["_append", "_prepend"] __setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?') @@ -82,10 +84,8 @@ class DataSmart(MutableMapping): s = __expand_python_regexp__.sub(python_sub, s) if s == olds: break - except KeyboardInterrupt: - raise - except: - bb.msg.note(1, bb.msg.domain.Data, "%s:%s while evaluating:\n%s" % (sys.exc_info()[0], sys.exc_info()[1], s)) + except Exception: + logger.exception("Error evaluating '%s'", s) raise if varname: @@ -127,7 +127,7 @@ class DataSmart(MutableMapping): try: self[name] = self[var] except Exception: - bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar") + logger.info("Untracked delVar") # now on to the appends and prepends if "_append" in self._special_values: diff --git a/lib/bb/fetch/__init__.py b/lib/bb/fetch/__init__.py index 46957670a..13a2c419e 100644 --- a/lib/bb/fetch/__init__.py +++ b/lib/bb/fetch/__init__.py @@ -27,10 +27,13 @@ BitBake build tools. from __future__ import absolute_import from __future__ import print_function import os, re +import logging import bb from bb import data from bb import persist_data +logger = logging.getLogger("BitBake.Fetch") + class MalformedUrl(Exception): """Exception raised when encountering an invalid url""" @@ -117,9 +120,8 @@ def encodeurl(decoded): return url def uri_replace(uri, uri_find, uri_replace, d): -# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: operating on %s" % uri) if not uri or not uri_find or not uri_replace: - bb.msg.debug(1, bb.msg.domain.Fetcher, "uri_replace: passed an undefined value, not replacing") + logger.debug(1, "uri_replace: passed an undefined value, not replacing") uri_decoded = list(decodeurl(uri)) uri_find_decoded = list(decodeurl(uri_find)) uri_replace_decoded = list(decodeurl(uri_replace)) @@ -135,13 +137,8 @@ def uri_replace(uri, uri_find, uri_replace, d): localfn = bb.fetch.localpath(uri, d) if localfn: result_decoded[loc] = os.path.dirname(result_decoded[loc]) + "/" + os.path.basename(bb.fetch.localpath(uri, d)) -# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: matching %s against %s and replacing with %s" % (i, uri_decoded[loc], uri_replace_decoded[loc])) else: -# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: no match") return uri -# else: -# for j in i: -# FIXME: apply replacements against options return encodeurl(result_decoded) methods = [] @@ -158,9 +155,9 @@ def fetcher_init(d): # When to drop SCM head revisions controlled by user policy srcrev_policy = bb.data.getVar('BB_SRCREV_POLICY', d, 1) or "clear" if srcrev_policy == "cache": - bb.msg.debug(1, bb.msg.domain.Fetcher, "Keeping SRCREV cache due to cache policy of: %s" % srcrev_policy) + logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy) elif srcrev_policy == "clear": - bb.msg.debug(1, bb.msg.domain.Fetcher, "Clearing SRCREV cache due to cache policy of: %s" % srcrev_policy) + logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy) try: bb.fetch.saved_headrevs = pd.getKeyValues("BB_URI_HEADREVS") except: @@ -190,11 +187,11 @@ def fetcher_compare_revisons(d): changed = False for key in data: if key not in data2 or data2[key] != data[key]: - bb.msg.debug(1, bb.msg.domain.Fetcher, "%s changed" % key) + logger.debug(1, "%s changed", key) changed = True return True else: - bb.msg.debug(2, bb.msg.domain.Fetcher, "%s did not change" % key) + logger.debug(2, "%s did not change", key) return False # Function call order is usually: @@ -288,7 +285,7 @@ def checkstatus(d): for u in urldata: ud = urldata[u] m = ud.method - bb.msg.note(1, bb.msg.domain.Fetcher, "Testing URL %s" % u) + logger.info("Testing URL %s", u) # First try checking uri, u, from PREMIRRORS mirrors = [ i.split() for i in (bb.data.getVar('PREMIRRORS', d, 1) or "").split('\n') if i ] ret = try_mirrors(d, u, mirrors, True) @@ -352,7 +349,7 @@ def get_srcrev(d): scms.append(u) if len(scms) == 0: - bb.msg.error(bb.msg.domain.Fetcher, "SRCREV was used yet no valid SCM was found in SRC_URI") + logger.error("SRCREV was used yet no valid SCM was found in SRC_URI") raise ParameterError if bb.data.getVar('BB_SRCREV_POLICY', d, True) != "cache": @@ -366,7 +363,7 @@ def get_srcrev(d): # format = bb.data.getVar('SRCREV_FORMAT', d, 1) if not format: - bb.msg.error(bb.msg.domain.Fetcher, "The SRCREV_FORMAT variable must be set when multiple SCMs are used.") + logger.error("The SRCREV_FORMAT variable must be set when multiple SCMs are used.") raise ParameterError for scm in scms: @@ -405,7 +402,7 @@ def runfetchcmd(cmd, d, quiet = False): if val: cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cmd) + logger.debug(1, "Running %s", cmd) # redirect stderr to stdout stdout_handle = os.popen(cmd + " 2>&1", "r") @@ -441,7 +438,7 @@ def try_mirrors(d, uri, mirrors, check = False): """ fpath = os.path.join(data.getVar("DL_DIR", d, 1), os.path.basename(uri)) if not check and os.access(fpath, os.R_OK): - bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists, skipping checkout." % fpath) + logger.debug(1, "%s already exists, skipping checkout.", fpath) return fpath ld = d.createCopy() @@ -451,7 +448,7 @@ def try_mirrors(d, uri, mirrors, check = False): try: ud = FetchData(newuri, ld) except bb.fetch.NoMethodError: - bb.msg.debug(1, bb.msg.domain.Fetcher, "No method for %s" % uri) + logger.debug(1, "No method for %s", uri) continue ud.setup_localpath(ld) @@ -467,7 +464,7 @@ def try_mirrors(d, uri, mirrors, check = False): bb.fetch.MD5SumError): import sys (type, value, traceback) = sys.exc_info() - bb.msg.debug(2, bb.msg.domain.Fetcher, "Mirror fetch failure: %s" % value) + logger.debug(2, "Mirror fetch failure: %s", value) continue return None @@ -579,7 +576,7 @@ class Fetch(object): Check the status of a URL Assumes localpath was called first """ - bb.msg.note(1, bb.msg.domain.Fetcher, "URL %s could not be checked for status since no method exists." % url) + logger.info("URL %s could not be checked for status since no method exists.", url) return True def getSRCDate(urldata, d): diff --git a/lib/bb/fetch/bzr.py b/lib/bb/fetch/bzr.py index 813d7d8c8..8b0bd9ff3 100644 --- a/lib/bb/fetch/bzr.py +++ b/lib/bb/fetch/bzr.py @@ -25,11 +25,10 @@ BitBake 'Fetch' implementation for bzr. import os import sys +import logging import bb from bb import data -from bb.fetch import Fetch -from bb.fetch import FetchError -from bb.fetch import runfetchcmd +from bb.fetch import Fetch, FetchError, runfetchcmd, logger class Bzr(Fetch): def supports(self, url, ud, d): @@ -93,16 +92,16 @@ class Bzr(Fetch): if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK): bzrcmd = self._buildbzrcommand(ud, d, "update") - bb.msg.debug(1, bb.msg.domain.Fetcher, "BZR Update %s" % loc) + logger.debug(1, "BZR Update %s", loc) os.chdir(os.path.join (ud.pkgdir, os.path.basename(ud.path))) runfetchcmd(bzrcmd, d) else: os.system("rm -rf %s" % os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir))) bzrcmd = self._buildbzrcommand(ud, d, "fetch") - bb.msg.debug(1, bb.msg.domain.Fetcher, "BZR Checkout %s" % loc) + logger.debug(1, "BZR Checkout %s", loc) bb.mkdirhier(ud.pkgdir) os.chdir(ud.pkgdir) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % bzrcmd) + logger.debug(1, "Running %s", bzrcmd) runfetchcmd(bzrcmd, d) os.chdir(ud.pkgdir) @@ -130,7 +129,7 @@ class Bzr(Fetch): """ Return the latest upstream revision number """ - bb.msg.debug(2, bb.msg.domain.Fetcher, "BZR fetcher hitting network for %s" % url) + logger.debug(2, "BZR fetcher hitting network for %s", url) output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True) diff --git a/lib/bb/fetch/cvs.py b/lib/bb/fetch/cvs.py index 61976f7ef..1064b09e1 100644 --- a/lib/bb/fetch/cvs.py +++ b/lib/bb/fetch/cvs.py @@ -27,11 +27,10 @@ BitBake build tools. # import os +import logging import bb from bb import data -from bb.fetch import Fetch -from bb.fetch import FetchError -from bb.fetch import MissingParameterError +from bb.fetch import Fetch, FetchError, MissingParameterError, logger class Cvs(Fetch): """ @@ -136,21 +135,21 @@ class Cvs(Fetch): cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd) # create module directory - bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory") + logger.debug(2, "Fetch: checking for module directory") pkg = data.expand('${PN}', d) pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) moddir = os.path.join(pkgdir, localdir) if os.access(os.path.join(moddir, 'CVS'), os.R_OK): - bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) + logger.info("Update " + loc) # update sources there os.chdir(moddir) myret = os.system(cvsupdatecmd) else: - bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) + logger.info("Fetch " + loc) # check out sources there bb.mkdirhier(pkgdir) os.chdir(pkgdir) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cvscmd) + logger.debug(1, "Running %s", cvscmd) myret = os.system(cvscmd) if myret != 0 or not os.access(moddir, os.R_OK): diff --git a/lib/bb/fetch/git.py b/lib/bb/fetch/git.py index 8c91de9db..1b1bc9585 100644 --- a/lib/bb/fetch/git.py +++ b/lib/bb/fetch/git.py @@ -25,6 +25,7 @@ import bb from bb import data from bb.fetch import Fetch from bb.fetch import runfetchcmd +from bb.fetch import logger class Git(Fetch): """Class to fetch a module or modules from git repositories""" @@ -115,7 +116,7 @@ class Git(Fetch): os.chdir(ud.clonedir) mirror_tarballs = data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) if mirror_tarballs != "0" or 'fullclone' in ud.parm: - bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git repository") + logger.info("Creating tarball of git repository") runfetchcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ), d) if 'fullclone' in ud.parm: @@ -147,7 +148,7 @@ class Git(Fetch): runfetchcmd("%s checkout-index -q -f --prefix=%s -a" % (ud.basecmd, coprefix), d) os.chdir(codir) - bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git checkout") + logger.info("Creating tarball of git checkout") runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.join(".", "*") ), d) os.chdir(ud.clonedir) @@ -200,7 +201,7 @@ class Git(Fetch): print("no repo") self.go(None, ud, d) if not os.path.exists(ud.clonedir): - bb.msg.error(bb.msg.domain.Fetcher, "GIT repository for %s doesn't exist in %s, cannot get sortable buildnumber, using old value" % (url, ud.clonedir)) + logger.error("GIT repository for %s doesn't exist in %s, cannot get sortable buildnumber, using old value", url, ud.clonedir) return None @@ -212,5 +213,5 @@ class Git(Fetch): os.chdir(cwd) buildindex = "%s" % output.split()[0] - bb.msg.debug(1, bb.msg.domain.Fetcher, "GIT repository for %s in %s is returning %s revisions in rev-list before %s" % (url, ud.clonedir, buildindex, rev)) + logger.debug(1, "GIT repository for %s in %s is returning %s revisions in rev-list before %s", url, ud.clonedir, buildindex, rev) return buildindex diff --git a/lib/bb/fetch/hg.py b/lib/bb/fetch/hg.py index efb3b5c76..ab00d4303 100644 --- a/lib/bb/fetch/hg.py +++ b/lib/bb/fetch/hg.py @@ -26,12 +26,14 @@ BitBake 'Fetch' implementation for mercurial DRCS (hg). import os import sys +import logging import bb from bb import data from bb.fetch import Fetch from bb.fetch import FetchError from bb.fetch import MissingParameterError from bb.fetch import runfetchcmd +from bb.fetch import logger class Hg(Fetch): """Class to fetch a from mercurial repositories""" @@ -116,29 +118,29 @@ class Hg(Fetch): def go(self, loc, ud, d): """Fetch url""" - bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'") + logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'") if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK): updatecmd = self._buildhgcommand(ud, d, "pull") - bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) + logger.info("Update " + loc) # update sources there os.chdir(ud.moddir) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd) + logger.debug(1, "Running %s", updatecmd) runfetchcmd(updatecmd, d) else: fetchcmd = self._buildhgcommand(ud, d, "fetch") - bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) + logger.info("Fetch " + loc) # check out sources there bb.mkdirhier(ud.pkgdir) os.chdir(ud.pkgdir) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % fetchcmd) + logger.debug(1, "Running %s", fetchcmd) runfetchcmd(fetchcmd, d) # Even when we clone (fetch), we still need to update as hg's clone # won't checkout the specified revision if its on a branch updatecmd = self._buildhgcommand(ud, d, "update") - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd) + logger.debug(1, "Running %s", updatecmd) runfetchcmd(updatecmd, d) os.chdir(ud.pkgdir) diff --git a/lib/bb/fetch/local.py b/lib/bb/fetch/local.py index 882a2c460..6aa9e4576 100644 --- a/lib/bb/fetch/local.py +++ b/lib/bb/fetch/local.py @@ -66,7 +66,7 @@ class Local(Fetch): Check the status of the url """ if urldata.localpath.find("*") != -1: - bb.msg.note(1, bb.msg.domain.Fetcher, "URL %s looks like a glob and was therefore not checked." % url) + logger.info("URL %s looks like a glob and was therefore not checked.", url) return True if os.path.exists(urldata.localpath): return True diff --git a/lib/bb/fetch/osc.py b/lib/bb/fetch/osc.py index ed773939b..6fcb344ce 100644 --- a/lib/bb/fetch/osc.py +++ b/lib/bb/fetch/osc.py @@ -8,6 +8,7 @@ Based on the svn "Fetch" implementation. import os import sys +import logging import bb from bb import data from bb.fetch import Fetch @@ -91,22 +92,22 @@ class Osc(Fetch): Fetch url """ - bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'") + logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'") if os.access(os.path.join(data.expand('${OSCDIR}', d), ud.path, ud.module), os.R_OK): oscupdatecmd = self._buildosccommand(ud, d, "update") - bb.msg.note(1, bb.msg.domain.Fetcher, "Update "+ loc) + logger.info("Update "+ loc) # update sources there os.chdir(ud.moddir) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % oscupdatecmd) + logger.debug(1, "Running %s", oscupdatecmd) runfetchcmd(oscupdatecmd, d) else: oscfetchcmd = self._buildosccommand(ud, d, "fetch") - bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) + logger.info("Fetch " + loc) # check out sources there bb.mkdirhier(ud.pkgdir) os.chdir(ud.pkgdir) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % oscfetchcmd) + logger.debug(1, "Running %s", oscfetchcmd) runfetchcmd(oscfetchcmd, d) os.chdir(os.path.join(ud.pkgdir + ud.path)) diff --git a/lib/bb/fetch/perforce.py b/lib/bb/fetch/perforce.py index 1c74cff34..6f68d8561 100644 --- a/lib/bb/fetch/perforce.py +++ b/lib/bb/fetch/perforce.py @@ -27,10 +27,12 @@ BitBake build tools. from future_builtins import zip import os +import logging import bb from bb import data from bb.fetch import Fetch from bb.fetch import FetchError +from bb.fetch import logger class Perforce(Fetch): def supports(self, url, ud, d): @@ -86,10 +88,10 @@ class Perforce(Fetch): depot += "@%s" % (p4date) p4cmd = data.getVar('FETCHCOMMAND_p4', d, 1) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s%s changes -m 1 %s" % (p4cmd, p4opt, depot)) + logger.debug(1, "Running %s%s changes -m 1 %s", p4cmd, p4opt, depot) p4file = os.popen("%s%s changes -m 1 %s" % (p4cmd, p4opt, depot)) cset = p4file.readline().strip() - bb.msg.debug(1, bb.msg.domain.Fetcher, "READ %s" % (cset)) + logger.debug(1, "READ %s", cset) if not cset: return -1 @@ -155,13 +157,13 @@ class Perforce(Fetch): p4cmd = data.getVar('FETCHCOMMAND', localdata, 1) # create temp directory - bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory") + logger.debug(2, "Fetch: creating temporary directory") bb.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata) tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") tmpfile = tmppipe.readline().strip() if not tmpfile: - bb.msg.error(bb.msg.domain.Fetcher, "Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") + logger.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") raise FetchError(module) if "label" in parm: @@ -171,12 +173,12 @@ class Perforce(Fetch): depot = "%s@%s" % (depot, cset) os.chdir(tmpfile) - bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) - bb.msg.note(1, bb.msg.domain.Fetcher, "%s%s files %s" % (p4cmd, p4opt, depot)) + logger.info("Fetch " + loc) + logger.info("%s%s files %s", p4cmd, p4opt, depot) p4file = os.popen("%s%s files %s" % (p4cmd, p4opt, depot)) if not p4file: - bb.msg.error(bb.msg.domain.Fetcher, "Fetch: unable to get the P4 files from %s" % (depot)) + logger.error("Fetch: unable to get the P4 files from %s", depot) raise FetchError(module) count = 0 @@ -194,7 +196,7 @@ class Perforce(Fetch): count = count + 1 if count == 0: - bb.msg.error(bb.msg.domain.Fetcher, "Fetch: No files gathered from the P4 fetch") + logger.error("Fetch: No files gathered from the P4 fetch") raise FetchError(module) myret = os.system("tar -czf %s %s" % (ud.localpath, module)) diff --git a/lib/bb/fetch/repo.py b/lib/bb/fetch/repo.py index 883310b01..479479681 100644 --- a/lib/bb/fetch/repo.py +++ b/lib/bb/fetch/repo.py @@ -72,7 +72,7 @@ class Repo(Fetch): """Fetch url""" if os.access(os.path.join(data.getVar("DL_DIR", d, True), ud.localfile), os.R_OK): - bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists (or was stashed). Skipping repo init / sync." % ud.localpath) + logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath) return gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", ".")) diff --git a/lib/bb/fetch/svk.py b/lib/bb/fetch/svk.py index a17ac04d2..2754971eb 100644 --- a/lib/bb/fetch/svk.py +++ b/lib/bb/fetch/svk.py @@ -26,11 +26,13 @@ This implementation is for svk. It is based on the svn implementation # Based on functions from the base bb module, Copyright 2003 Holger Schurig import os +import logging import bb from bb import data from bb.fetch import Fetch from bb.fetch import FetchError from bb.fetch import MissingParameterError +from bb.fetch import logger class Svk(Fetch): """Class to fetch a module or modules from svk repositories""" @@ -72,19 +74,19 @@ class Svk(Fetch): # create temp directory localdata = data.createCopy(d) data.update_data(localdata) - bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory") + logger.debug(2, "Fetch: creating temporary directory") bb.mkdirhier(data.expand('${WORKDIR}', localdata)) data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata) tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") tmpfile = tmppipe.readline().strip() if not tmpfile: - bb.msg.error(bb.msg.domain.Fetcher, "Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") + logger.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") raise FetchError(ud.module) # check out sources there os.chdir(tmpfile) - bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svkcmd) + logger.info("Fetch " + loc) + logger.debug(1, "Running %s", svkcmd) myret = os.system(svkcmd) if myret != 0: try: diff --git a/lib/bb/fetch/svn.py b/lib/bb/fetch/svn.py index 375e8df05..34f813225 100644 --- a/lib/bb/fetch/svn.py +++ b/lib/bb/fetch/svn.py @@ -25,12 +25,14 @@ BitBake 'Fetch' implementation for svn. import os import sys +import logging import bb from bb import data from bb.fetch import Fetch from bb.fetch import FetchError from bb.fetch import MissingParameterError from bb.fetch import runfetchcmd +from bb.fetch import logger class Svn(Fetch): """Class to fetch a module or modules from svn repositories""" @@ -136,22 +138,22 @@ class Svn(Fetch): def go(self, loc, ud, d): """Fetch url""" - bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'") + logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'") if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK): svnupdatecmd = self._buildsvncommand(ud, d, "update") - bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) + logger.info("Update " + loc) # update sources there os.chdir(ud.moddir) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnupdatecmd) + logger.debug(1, "Running %s", svnupdatecmd) runfetchcmd(svnupdatecmd, d) else: svnfetchcmd = self._buildsvncommand(ud, d, "fetch") - bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) + logger.info("Fetch " + loc) # check out sources there bb.mkdirhier(ud.pkgdir) os.chdir(ud.pkgdir) - bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnfetchcmd) + logger.debug(1, "Running %s", svnfetchcmd) runfetchcmd(svnfetchcmd, d) os.chdir(ud.pkgdir) @@ -179,7 +181,7 @@ class Svn(Fetch): """ Return the latest upstream revision number """ - bb.msg.debug(2, bb.msg.domain.Fetcher, "SVN fetcher hitting network for %s" % url) + logger.debug(2, "SVN fetcher hitting network for %s", url) output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "info"), d, True) diff --git a/lib/bb/fetch/wget.py b/lib/bb/fetch/wget.py index 18503a03f..4d4bdfd49 100644 --- a/lib/bb/fetch/wget.py +++ b/lib/bb/fetch/wget.py @@ -26,13 +26,11 @@ BitBake build tools. # Based on functions from the base bb module, Copyright 2003 Holger Schurig import os +import logging import bb import urllib from bb import data -from bb.fetch import Fetch -from bb.fetch import FetchError -from bb.fetch import encodeurl, decodeurl -from bb.fetch import runfetchcmd +from bb.fetch import Fetch, FetchError, encodeurl, decodeurl, logger, runfetchcmd class Wget(Fetch): """Class to fetch urls via 'wget'""" @@ -69,15 +67,14 @@ class Wget(Fetch): fetchcmd = fetchcmd.replace("${URI}", uri.split(";")[0]) fetchcmd = fetchcmd.replace("${FILE}", ud.basename) - - bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri) - bb.msg.debug(2, bb.msg.domain.Fetcher, "executing " + fetchcmd) + logger.info("fetch " + uri) + logger.debug(2, "executing " + fetchcmd) runfetchcmd(fetchcmd, d) # Sanity check since wget can pretend it succeed when it didn't # Also, this used to happen if sourceforge sent us to the mirror page if not os.path.exists(ud.localpath) and not checkonly: - bb.msg.debug(2, bb.msg.domain.Fetcher, "The fetch command for %s returned success but %s doesn't exist?..." % (uri, ud.localpath)) + logger.debug(2, "The fetch command for %s returned success but %s doesn't exist?...", uri, ud.localpath) return False return True diff --git a/lib/bb/msg.py b/lib/bb/msg.py index 098b0dc80..d0fe1f76c 100644 --- a/lib/bb/msg.py +++ b/lib/bb/msg.py @@ -109,19 +109,24 @@ def set_debug_domains(domainargs): def debug(level, msgdomain, msg, fn = None): level = logging.DEBUG - (level - 1) if not msgdomain: - logger.log(level, msg) + logger.debug(level, msg) else: - loggers[msgdomain].log(level, msg) + loggers[msgdomain].debug(level, msg) def plain(msg, fn = None): - logger.log(logging.INFO + 1, msg) + logger.plain(msg) def note(level, msgdomain, msg, fn = None): - level = logging.INFO - (level - 1) - if not msgdomain: - logger.log(level, msg) + if level > 1: + if msgdomain: + logger.verbose(msg) + else: + loggers[msgdomain].verbose(msg) else: - loggers[msgdomain].log(level, msg) + if msgdomain: + logger.info(msg) + else: + loggers[msgdomain].info(msg) def warn(msgdomain, msg, fn = None): if not msgdomain: diff --git a/lib/bb/parse/__init__.py b/lib/bb/parse/__init__.py index 95f372b00..e2e7fe639 100644 --- a/lib/bb/parse/__init__.py +++ b/lib/bb/parse/__init__.py @@ -26,9 +26,13 @@ File parsers for the BitBake build tools. handlers = [] -import bb, os +import os +import logging +import bb import bb.utils +logger = logging.getLogger("BitBake.Parsing") + class ParseError(Exception): """Exception raised when parsing fails""" @@ -87,7 +91,7 @@ def resolve_file(fn, d): raise IOError("file %s not found in %s" % (fn, bbpath)) fn = newfn - bb.msg.debug(2, bb.msg.domain.Parsing, "LOAD %s" % fn) + logger.debug(2, "LOAD %s", fn) return fn # Used by OpenEmbedded metadata diff --git a/lib/bb/parse/ast.py b/lib/bb/parse/ast.py index eb24e0ddd..8cc166b02 100644 --- a/lib/bb/parse/ast.py +++ b/lib/bb/parse/ast.py @@ -23,9 +23,13 @@ from __future__ import absolute_import from future_builtins import filter -import bb, re, string -from bb import methodpool +import re +import string +import logging +import bb import itertools +from bb import methodpool +from bb.parse import logger __word__ = re.compile(r"\S+") __parsed_methods__ = bb.methodpool.get_parsed_dict() @@ -51,7 +55,7 @@ class IncludeNode(AstNode): Include the file and evaluate the statements """ s = bb.data.expand(self.what_file, data) - bb.msg.debug(3, bb.msg.domain.Parsing, "CONF %s:%d: including %s" % (self.from_fn, self.from_lineno, s)) + logger.debug(2, "CONF %s:%s: including %s", self.from_fn, self.from_lineno, s) # TODO: Cache those includes... maybe not here though if self.force: @@ -359,7 +363,7 @@ def _expand_versions(versions): def multi_finalize(fn, d): appends = (d.getVar("__BBAPPEND", True) or "").split() for append in appends: - bb.msg.debug(2, bb.msg.domain.Parsing, "Appending .bbappend file " + append + " to " + fn) + logger.debug(2, "Appending .bbappend file %s to %s", append, fn) bb.parse.BBHandler.handle(append, d, True) safe_d = d diff --git a/lib/bb/parse/parse_py/BBHandler.py b/lib/bb/parse/parse_py/BBHandler.py index bb5617488..bda6acc94 100644 --- a/lib/bb/parse/parse_py/BBHandler.py +++ b/lib/bb/parse/parse_py/BBHandler.py @@ -27,11 +27,12 @@ from __future__ import absolute_import import re, bb, os +import logging import bb.fetch, bb.build, bb.utils from bb import data from . import ConfHandler -from .. import resolve_file, ast +from .. import resolve_file, ast, logger from .ConfHandler import include, init # For compatibility @@ -76,7 +77,7 @@ def inherit(files, d): file = os.path.join('classes', '%s.bbclass' % file) if not file in __inherit_cache: - bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s:%d: inheriting %s" % (fn, lineno, file)) + logger.log(logging.DEBUG -1, "BB %s:%d: inheriting %s", fn, lineno, file) __inherit_cache.append( file ) data.setVar('__inherit_cache', __inherit_cache, d) include(fn, file, d, "inherit") @@ -115,9 +116,9 @@ def handle(fn, d, include): if include == 0: - bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data)") + logger.debug(2, "BB %s: handle(data)", fn) else: - bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)") + logger.debug(2, "BB %s: handle(data, include)", fn) (root, ext) = os.path.splitext(os.path.basename(fn)) base_name = "%s%s" % (root, ext) diff --git a/lib/bb/parse/parse_py/ConfHandler.py b/lib/bb/parse/parse_py/ConfHandler.py index 9128a2ef8..2abd2fdc2 100644 --- a/lib/bb/parse/parse_py/ConfHandler.py +++ b/lib/bb/parse/parse_py/ConfHandler.py @@ -25,8 +25,9 @@ # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import re, bb.data, os +import logging import bb.utils -from bb.parse import ParseError, resolve_file, ast +from bb.parse import ParseError, resolve_file, ast, logger #__config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}]+)\s*(?P<colon>:)?(?P<ques>\?)?=\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$") __config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}/]+)(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?\s*((?P<colon>:=)|(?P<lazyques>\?\?=)|(?P<ques>\?=)|(?P<append>\+=)|(?P<prepend>=\+)|(?P<predot>=\.)|(?P<postdot>\.=)|=)\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$") @@ -68,7 +69,7 @@ def include(oldfn, fn, data, error_out): except IOError: if error_out: raise ParseError("Could not %(error_out)s file %(fn)s" % vars() ) - bb.msg.debug(2, bb.msg.domain.Parsing, "CONF file '%s' not found" % fn) + logger.debug(2, "CONF file '%s' not found", fn) def handle(fn, data, include): init(data) diff --git a/lib/bb/persist_data.py b/lib/bb/persist_data.py index df0409cd8..91c089bdf 100644 --- a/lib/bb/persist_data.py +++ b/lib/bb/persist_data.py @@ -15,9 +15,13 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -import bb, os +import os +import logging +import bb import bb.utils +logger = logging.getLogger("BitBake.PersistData") + try: import sqlite3 except ImportError: @@ -56,7 +60,7 @@ class PersistData: bb.utils.mkdirhier(self.cachedir) self.cachefile = os.path.join(self.cachedir, "bb_persist_data.sqlite3") - bb.msg.debug(1, bb.msg.domain.PersistData, "Using '%s' as the persistent data cache" % self.cachefile) + logger.debug(1, "Using '%s' as the persistent data cache", self.cachefile) connection = sqlite3.connect(self.cachefile, timeout=5, isolation_level=None) persistent_database_connection["connection"] = connection diff --git a/lib/bb/providers.py b/lib/bb/providers.py index 58326f039..d017d6006 100644 --- a/lib/bb/providers.py +++ b/lib/bb/providers.py @@ -22,9 +22,12 @@ # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import re +import logging from bb import data, utils import bb +logger = logging.getLogger("BitBake.Provider") + class NoProvider(Exception): """Exception raised when no provider of a build dependency can be found""" @@ -120,9 +123,9 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): if item: itemstr = " (for item %s)" % item if preferred_file is None: - bb.msg.note(1, bb.msg.domain.Provider, "preferred version %s of %s not available%s" % (pv_str, pn, itemstr)) + logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr) else: - bb.msg.debug(1, bb.msg.domain.Provider, "selecting %s as PREFERRED_VERSION %s of package %s%s" % (preferred_file, pv_str, pn, itemstr)) + logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr) return (preferred_ver, preferred_file) @@ -189,7 +192,7 @@ def _filterProviders(providers, item, cfgData, dataCache): pkg_pn[pn] = [] pkg_pn[pn].append(p) - bb.msg.debug(1, bb.msg.domain.Provider, "providers for %s are: %s" % (item, pkg_pn.keys())) + logger.debug(1, "providers for %s are: %s", item, pkg_pn.keys()) # First add PREFERRED_VERSIONS for pn in pkg_pn: @@ -206,7 +209,7 @@ def _filterProviders(providers, item, cfgData, dataCache): eligible.append(preferred_versions[pn][1]) if len(eligible) == 0: - bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item) + logger.error("no eligible providers for %s", item) return 0 # If pn == item, give it a slight default preference @@ -242,13 +245,13 @@ def filterProviders(providers, item, cfgData, dataCache): for p in eligible: pn = dataCache.pkg_fn[p] if dataCache.preferred[item] == pn: - bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy %s due to PREFERRED_PROVIDERS" % (pn, item)) + logger.verbose("selecting %s to satisfy %s due to PREFERRED_PROVIDERS", pn, item) eligible.remove(p) eligible = [p] + eligible foundUnique = True break - bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible)) + logger.debug(1, "sorted providers for %s are: %s", item, eligible) return eligible, foundUnique @@ -268,11 +271,11 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache): pn = dataCache.pkg_fn[p] provides = dataCache.pn_provides[pn] for provide in provides: - bb.msg.note(2, bb.msg.domain.Provider, "checking PREFERRED_PROVIDER_%s" % (provide)) + logger.verbose("checking PREFERRED_PROVIDER_%s", provide) prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, cfgData, 1) if prefervar == pn: var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar) - bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to %s" % (pn, item, var)) + logger.verbose("selecting %s to satisfy runtime %s due to %s", pn, item, var) preferred_vars.append(var) eligible.remove(p) eligible = [p] + eligible @@ -282,9 +285,9 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache): numberPreferred = len(preferred) if numberPreferred > 1: - bb.msg.error(bb.msg.domain.Provider, "Conflicting PREFERRED_PROVIDER entries were found which resulted in an attempt to select multiple providers (%s) for runtime dependecy %s\nThe entries resulting in this conflict were: %s" % (preferred, item, preferred_vars)) + logger.error("Conflicting PREFERRED_PROVIDER entries were found which resulted in an attempt to select multiple providers (%s) for runtime dependecy %s\nThe entries resulting in this conflict were: %s", preferred, item, preferred_vars) - bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible)) + logger.debug(1, "sorted providers for %s are: %s", item, eligible) return eligible, numberPreferred @@ -314,7 +317,7 @@ def getRuntimeProviders(dataCache, rdepend): try: regexp = re.compile(pattern) except: - bb.msg.error(bb.msg.domain.Provider, "Error parsing re expression: %s" % pattern) + logger.error("Error parsing regular expression '%s'", pattern) raise regexp_cache[pattern] = regexp if regexp.match(rdepend): diff --git a/lib/bb/runqueue.py b/lib/bb/runqueue.py index 4010dadf7..004cdc1c0 100644 --- a/lib/bb/runqueue.py +++ b/lib/bb/runqueue.py @@ -22,14 +22,17 @@ Handles preparation and execution of a queue of tasks # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -import bb, os, sys +import os +import sys import signal import stat import fcntl import logging +import bb from bb import msg, data, event bblogger = logging.getLogger("BitBake") +logger = logging.getLogger("BitBake.RunQueue") class TaskFailure(Exception): """Exception raised when a task in a runqueue fails""" @@ -359,14 +362,14 @@ class RunQueue: for task in range(numTasks): if task_done[task] is False or deps_left[task] != 0: problem_tasks.append(task) - bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s) is not buildable\n" % (task, self.get_user_idstring(task))) - bb.msg.debug(2, bb.msg.domain.RunQueue, "(Complete marker was %s and the remaining dependency count was %s)\n\n" % (task_done[task], deps_left[task])) + logger.debug(2, "Task %s (%s) is not buildable\n", task, self.get_user_idstring(task)) + logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n\n", task_done[task], deps_left[task]) if problem_tasks: message = "Unbuildable tasks were found.\n" message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n" message = message + "Identifying dependency loops (this may take a short while)...\n" - bb.msg.error(bb.msg.domain.RunQueue, message) + logger.error(message) msgs = self.circular_depchains_handler(problem_tasks) @@ -394,7 +397,7 @@ class RunQueue: # Nothing to do return - bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue") + logger.info("Preparing runqueue") # Step A - Work out a list of tasks to run # @@ -441,7 +444,7 @@ class RunQueue: fn = taskData.fn_index[fnid] task_deps = self.dataCache.task_deps[fn] - bb.msg.debug(2, bb.msg.domain.RunQueue, "Processing %s:%s" %(fn, taskData.tasks_name[task])) + logger.debug(2, "Processing %s:%s", fn, taskData.tasks_name[task]) if fnid not in taskData.failed_fnids: @@ -501,7 +504,7 @@ class RunQueue: # Rmove all self references if task in depends: newdep = [] - bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends)) + logger.debug(2, "Task %s (%s %s) contains self reference! %s", task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends) for dep in depends: if task != dep: newdep.append(dep) @@ -562,7 +565,7 @@ class RunQueue: # as active too. If the task is to be 'forced', clear its stamp. Once # all active tasks are marked, prune the ones we don't need. - bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks") + logger.verbose("Marking Active Tasks") def mark_active(listid, depth): """ @@ -595,7 +598,7 @@ class RunQueue: # Remove stamps for targets if force mode active if self.cooker.configuration.force: - bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (target[1], fn)) + logger.verbose("Remove stamp %s, %s", target[1], fn) bb.build.del_stamp(target[1], self.dataCache, fn) if fnid in taskData.failed_fnids: @@ -637,7 +640,7 @@ class RunQueue: else: bb.msg.fatal(bb.msg.domain.RunQueue, "No active tasks and not in --continue mode?! Please report this bug.") - bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid))) + logger.verbose("Pruned %s inactive tasks, %s left", delcount, len(self.runq_fnid)) # Remap the dependencies to account for the deleted tasks # Check we didn't delete a task we depend on @@ -650,7 +653,7 @@ class RunQueue: newdeps.append(maps[origdep]) self.runq_depends[listid] = set(newdeps) - bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings") + logger.verbose("Assign Weightings") # Generate a list of reverse dependencies to ease future calculations for listid in range(len(self.runq_fnid)): @@ -669,7 +672,7 @@ class RunQueue: #self.dump_data(taskData) bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep], taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) - bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) + logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints)) # Calculate task weights # Check of higher length circular dependencies @@ -678,7 +681,7 @@ class RunQueue: for scheduler in self.schedulers: if self.scheduler == scheduler.name: self.sched = scheduler(self) - bb.msg.debug(1, bb.msg.domain.RunQueue, "Using runqueue scheduler '%s'" % scheduler.name) + logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name) break else: bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" % @@ -701,9 +704,7 @@ class RunQueue: for prov in prov_list: if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist: error = True - bb.msg.error(bb.msg.domain.RunQueue, "Multiple .bb files are due to be built which each provide %s (%s).\n This usually means one provides something the other doesn't and should." % (prov, " ".join(prov_list[prov]))) - #if error: - # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...") + logger.error("Multiple .bb files are due to be built which each provide %s (%s).\n This usually means one provides something the other doesn't and should.", prov, " ".join(prov_list[prov])) # Create a whitelist usable by the stamp checks @@ -831,12 +832,12 @@ class RunQueue: stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname) # If the stamp is missing its not current if not os.access(stampfile, os.F_OK): - bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s not available\n" % stampfile) + logger.debug(2, "Stampfile %s not available\n", stampfile) return False # If its a 'nostamp' task, it's not current taskdep = self.dataCache.task_deps[fn] if 'nostamp' in taskdep and taskname in taskdep['nostamp']: - bb.msg.debug(2, bb.msg.domain.RunQueue, "%s.%s is nostamp\n" % (fn, taskname)) + logger.debug(2, "%s.%s is nostamp\n", fn, taskname) return False iscurrent = True @@ -850,10 +851,10 @@ class RunQueue: try: t2 = os.stat(stampfile2)[stat.ST_MTIME] if t1 < t2: - bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile, stampfile2)) + logger.debug(2, "Stampfile %s < %s", stampfile, stampfile2) iscurrent = False except: - bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2, stampfile)) + logger.debug(2, "Exception reading %s for %s", stampfile2, stampfile) iscurrent = False return iscurrent @@ -869,7 +870,7 @@ class RunQueue: self.prepare_runqueue() if self.state is runQueueRunInit: - bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue") + logger.info("Executing runqueue") self.execute_runqueue_initVars() if self.state is runQueueRunning: @@ -887,7 +888,7 @@ class RunQueue: if self.state is runQueueComplete: # All done - bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (self.stats.completed, self.stats.skipped, self.stats.failed)) + logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.stats.completed, self.stats.skipped, self.stats.failed) return False if self.state is runQueueChildProcess: @@ -941,14 +942,14 @@ class RunQueue: self.runq_buildable[revdep] = 1 fn = self.taskData.fn_index[self.runq_fnid[revdep]] taskname = self.runq_task[revdep] - bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname)) + logger.debug(1, "Marking task %s (%s, %s) as buildable", revdep, fn, taskname) def task_fail(self, task, exitcode): """ Called when a task has failed Updates the state engine with the failure """ - bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed with %s" % (task, self.get_user_idstring(task), exitcode)) + logger.error("Task %s (%s) failed with %s", task, self.get_user_idstring(task), exitcode) self.stats.taskFailed() fnid = self.runq_fnid[task] self.failed_fnids.append(fnid) @@ -971,7 +972,7 @@ class RunQueue: taskname = self.runq_task[task] if self.check_stamp_task(task, taskname): - bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task))) + logger.debug(2, "Stamp current task %s (%s)", task, self.get_user_idstring(task)) self.runq_running[task] = 1 self.runq_buildable[task] = 1 self.task_complete(task) @@ -1001,11 +1002,11 @@ class RunQueue: # Sanity Checks for task in range(self.stats.total): if self.runq_buildable[task] == 0: - bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task) + logger.error("Task %s never buildable!", task) if self.runq_running[task] == 0: - bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task) + logger.error("Task %s never ran!", task) if self.runq_complete[task] == 0: - bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task) + logger.error("Task %s never completed!", task) self.state = runQueueComplete return @@ -1030,7 +1031,7 @@ class RunQueue: def finish_runqueue_now(self): if self.stats.active: - bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGTERM to remaining %s tasks" % self.stats.active) + logger.info("Sending SIGTERM to remaining %s tasks", self.stats.active) for k, v in self.build_pids.iteritems(): try: os.kill(-k, signal.SIGTERM) @@ -1097,11 +1098,10 @@ class RunQueue: #os.dup2(newso.fileno(), sys.stderr.fileno()) bb.event.fire(runQueueTaskStarted(task, self.stats, self), self.cfgData) - bb.msg.note(1, bb.msg.domain.RunQueue, - "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.stats.active + self.stats.failed + 1, - self.stats.total, - task, - self.get_user_idstring(task))) + logger.info("Running task %d of %d (ID: %s, %s)", self.stats.completed + self.stats.active + self.stats.failed + 1, + self.stats.total, + task, + self.get_user_idstring(task)) bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", self, self.cooker.configuration.data) bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY2", fn, self.cooker.configuration.data) @@ -1114,12 +1114,10 @@ class RunQueue: except bb.build.EventException as e: event = e.args[1] - bb.msg.error(bb.msg.domain.Build, "%s event exception, aborting" % bb.event.getName(event)) + logger.error("%s event exception, aborting" % bb.event.getName(event)) os._exit(1) except Exception: - from traceback import format_exc - bb.msg.error(bb.msg.domain.Build, "Build of %s %s failed" % (fn, taskname)) - bb.msg.error(bb.msg.domain.Build, format_exc()) + logger.exception("Build of %s %s failed", fn, taskname) os._exit(1) os._exit(0) return pid, pipein, pipeout @@ -1129,25 +1127,25 @@ class RunQueue: """ Dump some debug information on the internal data structures """ - bb.msg.debug(3, bb.msg.domain.RunQueue, "run_tasks:") + logger.debug(3, "run_tasks:") for task in range(len(self.runq_task)): - bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task, - taskQueue.fn_index[self.runq_fnid[task]], - self.runq_task[task], - self.runq_weight[task], - self.runq_depends[task], - self.runq_revdeps[task])) - - bb.msg.debug(3, bb.msg.domain.RunQueue, "sorted_tasks:") + logger.debug(3, " (%s)%s - %s: %s Deps %s RevDeps %s", task, + taskQueue.fn_index[self.runq_fnid[task]], + self.runq_task[task], + self.runq_weight[task], + self.runq_depends[task], + self.runq_revdeps[task]) + + logger.debug(3, "sorted_tasks:") for task1 in range(len(self.runq_task)): if task1 in self.prio_map: task = self.prio_map[task1] - bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task, - taskQueue.fn_index[self.runq_fnid[task]], - self.runq_task[task], - self.runq_weight[task], - self.runq_depends[task], - self.runq_revdeps[task])) + logger.debug(3, " (%s)%s - %s: %s Deps %s RevDeps %s", task, + taskQueue.fn_index[self.runq_fnid[task]], + self.runq_task[task], + self.runq_weight[task], + self.runq_depends[task], + self.runq_revdeps[task]) class TaskFailure(Exception): diff --git a/lib/bb/taskdata.py b/lib/bb/taskdata.py index e31f96785..7dc1c0c82 100644 --- a/lib/bb/taskdata.py +++ b/lib/bb/taskdata.py @@ -23,8 +23,11 @@ Task data collection and handling # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +import logging import bb +logger = logging.getLogger("BitBake.TaskData") + def re_match_strings(target, strings): """ Whether or not the string 'target' matches @@ -182,7 +185,7 @@ class TaskData: if not fnid in self.depids: dependids = {} for depend in dataCache.deps[fn]: - bb.msg.debug(2, bb.msg.domain.TaskData, "Added dependency %s for %s" % (depend, fn)) + logger.debug(2, "Added dependency %s for %s", depend, fn) dependids[self.getbuild_id(depend)] = None self.depids[fnid] = dependids.keys() @@ -193,11 +196,11 @@ class TaskData: rrecs = dataCache.runrecs[fn] for package in rdepends: for rdepend in bb.utils.explode_deps(rdepends[package]): - bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime dependency %s for %s" % (rdepend, fn)) + logger.debug(2, "Added runtime dependency %s for %s", rdepend, fn) rdependids[self.getrun_id(rdepend)] = None for package in rrecs: for rdepend in bb.utils.explode_deps(rrecs[package]): - bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime recommendation %s for %s" % (rdepend, fn)) + logger.debug(2, "Added runtime recommendation %s for %s", rdepend, fn) rdependids[self.getrun_id(rdepend)] = None self.rdepids[fnid] = rdependids.keys() @@ -397,7 +400,7 @@ class TaskData: fnid = self.getfn_id(fn) if fnid in self.failed_fnids: continue - bb.msg.debug(2, bb.msg.domain.Provider, "adding %s to satisfy %s" % (fn, item)) + logger.debug(2, "adding %s to satisfy %s", fn, item) self.add_build_target(fn, item) self.add_tasks(fn, dataCache) @@ -450,7 +453,7 @@ class TaskData: fnid = self.getfn_id(fn) if fnid in self.failed_fnids: continue - bb.msg.debug(2, bb.msg.domain.Provider, "adding '%s' to satisfy runtime '%s'" % (fn, item)) + logger.debug(2, "adding '%s' to satisfy runtime '%s'", fn, item) self.add_runtime_target(fn, item) self.add_tasks(fn, dataCache) @@ -463,7 +466,7 @@ class TaskData: """ if fnid in self.failed_fnids: return - bb.msg.debug(1, bb.msg.domain.Provider, "File '%s' is unbuildable, removing..." % self.fn_index[fnid]) + logger.debug(1, "File '%s' is unbuildable, removing...", self.fn_index[fnid]) self.failed_fnids.append(fnid) for target in self.build_targets: if fnid in self.build_targets[target]: @@ -485,7 +488,7 @@ class TaskData: missing_list = [self.build_names_index[targetid]] else: missing_list = [self.build_names_index[targetid]] + missing_list - bb.msg.note(2, bb.msg.domain.Provider, "Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list)) + providerlog.verbose("Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", self.build_names_index[targetid], missing_list) self.failed_deps.append(targetid) dependees = self.get_dependees(targetid) for fnid in dependees: @@ -498,7 +501,7 @@ class TaskData: if self.abort and targetid in self.external_targets: target = self.build_names_index[targetid] - bb.msg.error(bb.msg.domain.Provider, "Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s" % (target, missing_list)) + providerlog.error("Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s", target, missing_list) raise bb.providers.NoProvider(target) def remove_runtarget(self, targetid, missing_list = []): @@ -511,7 +514,7 @@ class TaskData: else: missing_list = [self.run_names_index[targetid]] + missing_list - bb.msg.note(1, bb.msg.domain.Provider, "Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s" % (self.run_names_index[targetid], missing_list)) + providerlog.info("Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", self.run_names_index[targetid], missing_list) self.failed_rdeps.append(targetid) dependees = self.get_rdependees(targetid) for fnid in dependees: @@ -521,7 +524,7 @@ class TaskData: """ Resolve all unresolved build and runtime targets """ - bb.msg.note(1, bb.msg.domain.TaskData, "Resolving any missing task queue dependencies") + logger.info("Resolving any missing task queue dependencies") while True: added = 0 for target in self.get_unresolved_build_targets(dataCache): @@ -539,7 +542,7 @@ class TaskData: added = added + 1 except bb.providers.NoRProvider: self.remove_runtarget(self.getrun_id(target)) - bb.msg.debug(1, bb.msg.domain.TaskData, "Resolved " + str(added) + " extra dependencies") + logger.debug(1, "Resolved " + str(added) + " extra dependencies") if added == 0: break # self.dump_data() @@ -548,40 +551,40 @@ class TaskData: """ Dump some debug information on the internal data structures """ - bb.msg.debug(3, bb.msg.domain.TaskData, "build_names:") - bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.build_names_index)) + logger.debug(3, "build_names:") + logger.debug(3, ", ".join(self.build_names_index)) - bb.msg.debug(3, bb.msg.domain.TaskData, "run_names:") - bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.run_names_index)) + logger.debug(3, "run_names:") + logger.debug(3, ", ".join(self.run_names_index)) - bb.msg.debug(3, bb.msg.domain.TaskData, "build_targets:") + logger.debug(3, "build_targets:") for buildid in range(len(self.build_names_index)): target = self.build_names_index[buildid] targets = "None" if buildid in self.build_targets: targets = self.build_targets[buildid] - bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (buildid, target, targets)) + logger.debug(3, " (%s)%s: %s", buildid, target, targets) - bb.msg.debug(3, bb.msg.domain.TaskData, "run_targets:") + logger.debug(3, "run_targets:") for runid in range(len(self.run_names_index)): target = self.run_names_index[runid] targets = "None" if runid in self.run_targets: targets = self.run_targets[runid] - bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (runid, target, targets)) + logger.debug(3, " (%s)%s: %s", runid, target, targets) - bb.msg.debug(3, bb.msg.domain.TaskData, "tasks:") + logger.debug(3, "tasks:") for task in range(len(self.tasks_name)): - bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s - %s: %s" % ( - task, - self.fn_index[self.tasks_fnid[task]], - self.tasks_name[task], - self.tasks_tdepends[task])) + logger.debug(3, " (%s)%s - %s: %s", + task, + self.fn_index[self.tasks_fnid[task]], + self.tasks_name[task], + self.tasks_tdepends[task]) - bb.msg.debug(3, bb.msg.domain.TaskData, "dependency ids (per fn):") + logger.debug(3, "dependency ids (per fn):") for fnid in self.depids: - bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.depids[fnid])) + logger.debug(3, " %s %s: %s", fnid, self.fn_index[fnid], self.depids[fnid]) - bb.msg.debug(3, bb.msg.domain.TaskData, "runtime dependency ids (per fn):") + logger.debug(3, "runtime dependency ids (per fn):") for fnid in self.rdepids: - bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.rdepids[fnid])) + logger.debug(3, " %s %s: %s", fnid, self.fn_index[fnid], self.rdepids[fnid]) diff --git a/lib/bb/utils.py b/lib/bb/utils.py index 172c67c23..6185637c9 100644 --- a/lib/bb/utils.py +++ b/lib/bb/utils.py @@ -21,11 +21,14 @@ BitBake Utility Functions import re, fcntl, os, string, stat, shutil, time import sys -import bb import errno +import logging +import bb import bb.msg from commands import getstatusoutput +logger = logging.getLogger("BitBake.Util") + # Version comparison separators = ".-" @@ -292,11 +295,11 @@ def _print_trace(body, line): Print the Environment of a Text Body """ # print the environment of the method - bb.msg.error(bb.msg.domain.Util, "Printing the environment of the function") + logger.error("Printing the environment of the function") min_line = max(1, line-4) max_line = min(line + 4, len(body)-1) for i in range(min_line, max_line + 1): - bb.msg.error(bb.msg.domain.Util, "\t%.4d:%s" % (i, body[i-1]) ) + logger.error("\t%.4d:%s", i, body[i-1]) def better_compile(text, file, realfile, mode = "exec"): @@ -309,9 +312,9 @@ def better_compile(text, file, realfile, mode = "exec"): except Exception as e: # split the text into lines again body = text.split('\n') - bb.msg.error(bb.msg.domain.Util, "Error in compiling python function in: %s" % (realfile)) - bb.msg.error(bb.msg.domain.Util, "The lines leading to this error were:") - bb.msg.error(bb.msg.domain.Util, "\t%d:%s:'%s'" % (e.lineno, e.__class__.__name__, body[e.lineno-1])) + logger.error("Error in compiling python function in %s", realfile) + logger.error("The lines leading to this error were:") + logger.error("\t%d:%s:'%s'", e.lineno, e.__class__.__name__, body[e.lineno-1]) _print_trace(body, e.lineno) @@ -335,9 +338,7 @@ def better_exec(code, context, text, realfile = "<code>"): if t in [bb.parse.SkipPackage, bb.build.FuncFailed]: raise - # print the Header of the Error Message - bb.msg.error(bb.msg.domain.Util, "Error in executing python function in: %s" % code.co_filename) - bb.msg.error(bb.msg.domain.Util, "Exception:%s Message:%s" % (t, value)) + logger.exception("Error executing python function in '%s'", code.co_filename) # let us find the line number now while tb.tb_next: @@ -363,7 +364,7 @@ def lockfile(name): """ path = os.path.dirname(name) if not os.path.isdir(path): - bb.msg.error(bb.msg.domain.Util, "Error, lockfile path does not exist!: %s" % path) + logger.error("Lockfile path '%s' does not exist", path) sys.exit(1) while True: @@ -478,7 +479,7 @@ def filter_environment(good_vars): del os.environ[key] if len(removed_vars): - bb.msg.debug(1, bb.msg.domain.Util, "Removed the following variables from the environment: %s" % (", ".join(removed_vars))) + logger.debug(1, "Removed the following variables from the environment: %s", ", ".join(removed_vars)) return removed_vars @@ -544,10 +545,10 @@ def mkdirhier(dir): directory already exists like os.makedirs """ - bb.msg.debug(3, bb.msg.domain.Util, "mkdirhier(%s)" % dir) + logger.debug(3, "mkdirhier(%s)", dir) try: os.makedirs(dir) - bb.msg.debug(2, bb.msg.domain.Util, "created " + dir) + logger.debug(2, "created " + dir) except OSError as e: if e.errno != errno.EEXIST: raise e |