summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Larson <chris_larson@mentor.com>2011-02-11 09:42:38 -0700
committerChris Larson <chris_larson@mentor.com>2011-02-11 09:55:11 -0700
commit34c7ff67d7f4c7dde2027e000def1a49f3286829 (patch)
tree3537e0b1d7e65b0a9970f2962b4bca2cd96de3fb
parentc197043717ce621c345800bde689b1231fe8b679 (diff)
downloadbitbake-34c7ff67d7f4c7dde2027e000def1a49f3286829.tar.gz
persist_data: drop SQLData indirection
The common case (if not the only case) is to only use a single domain. The only reason SQLData exists is to make it easier to delete a domain. Yet, there's no need for us to delete a domain if SQLTable knows how to clear itself out. So, add clear() to the table and pass the domain to persist(). Signed-off-by: Chris Larson <chris_larson@mentor.com>
-rw-r--r--lib/bb/fetch/__init__.py15
-rw-r--r--lib/bb/fetch/git.py6
-rw-r--r--lib/bb/fetch2/__init__.py15
-rw-r--r--lib/bb/persist_data.py39
4 files changed, 23 insertions, 52 deletions
diff --git a/lib/bb/fetch/__init__.py b/lib/bb/fetch/__init__.py
index 2f92d87d9..4cf78779e 100644
--- a/lib/bb/fetch/__init__.py
+++ b/lib/bb/fetch/__init__.py
@@ -153,18 +153,18 @@ def fetcher_init(d):
Called to initialize the fetchers once the configuration data is known.
Calls before this must not hit the cache.
"""
- pd = persist_data.persist(d)
# When to drop SCM head revisions controlled by user policy
srcrev_policy = bb.data.getVar('BB_SRCREV_POLICY', d, 1) or "clear"
if srcrev_policy == "cache":
logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
elif srcrev_policy == "clear":
logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
+ revs = persist_data.persist('BB_URI_HEADREVS', d)
try:
- bb.fetch.saved_headrevs = pd['BB_URI_HEADREVS'].items()
+ bb.fetch.saved_headrevs = revs.items()
except:
pass
- del pd['BB_URI_HEADREVS']
+ revs.clear()
else:
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
@@ -178,8 +178,7 @@ def fetcher_compare_revisions(d):
return true/false on whether they've changed.
"""
- pd = persist_data.persist(d)
- data = pd['BB_URI_HEADREVS'].items()
+ data = persist_data.persist('BB_URI_HEADREVS', d).items()
data2 = bb.fetch.saved_headrevs
changed = False
@@ -756,8 +755,7 @@ class Fetch(object):
if not hasattr(self, "_latest_revision"):
raise ParameterError
- pd = persist_data.persist(d)
- revs = pd['BB_URI_HEADREVS']
+ revs = persist_data.persist('BB_URI_HEADREVS', d)
key = self.generate_revision_key(url, ud, d)
rev = revs[key]
if rev != None:
@@ -773,8 +771,7 @@ class Fetch(object):
if hasattr(self, "_sortable_revision"):
return self._sortable_revision(url, ud, d)
- pd = persist_data.persist(d)
- localcounts = pd['BB_URI_LOCALCOUNT']
+ localcounts = persist_data.persist('BB_URI_LOCALCOUNT', d)
key = self.generate_revision_key(url, ud, d)
latest_rev = self._build_revision(url, ud, d)
diff --git a/lib/bb/fetch/git.py b/lib/bb/fetch/git.py
index b37a09743..7d5233911 100644
--- a/lib/bb/fetch/git.py
+++ b/lib/bb/fetch/git.py
@@ -242,8 +242,7 @@ class Git(Fetch):
"""
Look in the cache for the latest revision, if not present ask the SCM.
"""
- persisted = bb.persist_data.persist(d)
- revs = persisted['BB_URI_HEADREVS']
+ revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
key = self.generate_revision_key(url, ud, d, branch=True)
rev = revs[key]
@@ -263,8 +262,7 @@ class Git(Fetch):
"""
"""
- pd = bb.persist_data.persist(d)
- localcounts = pd['BB_URI_LOCALCOUNT']
+ localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', d)
key = self.generate_revision_key(url, ud, d, branch=True)
oldkey = self.generate_revision_key(url, ud, d, branch=False)
diff --git a/lib/bb/fetch2/__init__.py b/lib/bb/fetch2/__init__.py
index 1ec42717f..e36835195 100644
--- a/lib/bb/fetch2/__init__.py
+++ b/lib/bb/fetch2/__init__.py
@@ -206,18 +206,18 @@ def fetcher_init(d):
Called to initialize the fetchers once the configuration data is known.
Calls before this must not hit the cache.
"""
- pd = persist_data.persist(d)
# When to drop SCM head revisions controlled by user policy
srcrev_policy = bb.data.getVar('BB_SRCREV_POLICY', d, True) or "clear"
if srcrev_policy == "cache":
logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
elif srcrev_policy == "clear":
logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
+ revs = persist_data.persist('BB_URI_HEADREVS', d)
try:
- bb.fetch2.saved_headrevs = pd['BB_URI_HEADREVS'].items()
+ bb.fetch2.saved_headrevs = revs.items()
except:
pass
- del pd['BB_URI_HEADREVS']
+ revs.clear()
else:
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
@@ -231,8 +231,7 @@ def fetcher_compare_revisions(d):
return true/false on whether they've changed.
"""
- pd = persist_data.persist(d)
- data = pd['BB_URI_HEADREVS'].items()
+ data = persist_data.persist('BB_URI_HEADREVS', d).items()
data2 = bb.fetch2.saved_headrevs
changed = False
@@ -762,8 +761,7 @@ class FetchMethod(object):
if not hasattr(self, "_latest_revision"):
raise ParameterError("The fetcher for this URL does not support _latest_revision", url)
- pd = persist_data.persist(d)
- revs = pd['BB_URI_HEADREVS']
+ revs = persist_data.persist('BB_URI_HEADREVS', d)
key = self.generate_revision_key(url, ud, d, name)
rev = revs[key]
if rev != None:
@@ -779,8 +777,7 @@ class FetchMethod(object):
if hasattr(self, "_sortable_revision"):
return self._sortable_revision(url, ud, d)
- pd = persist_data.persist(d)
- localcounts = pd['BB_URI_LOCALCOUNT']
+ localcounts = persist_data.persist('BB_URI_LOCALCOUNT', d)
key = self.generate_revision_key(url, ud, d, name)
latest_rev = self._build_revision(url, ud, d, name)
diff --git a/lib/bb/persist_data.py b/lib/bb/persist_data.py
index b3a9e5f22..b5ce371d9 100644
--- a/lib/bb/persist_data.py
+++ b/lib/bb/persist_data.py
@@ -107,33 +107,8 @@ class SQLTable(collections.MutableMapping):
def iteritems(self):
return self._execute("SELECT * FROM %s;" % self.table)
-
-class SQLData(object):
- """Object representing the persistent data"""
- def __init__(self, filename):
- bb.utils.mkdirhier(os.path.dirname(filename))
-
- self.filename = filename
- self.connection = sqlite3.connect(filename, timeout=5,
- isolation_level=None)
- self.cursor = self.connection.cursor()
- self._tables = {}
-
- def __getitem__(self, table):
- if not isinstance(table, basestring):
- raise TypeError("table argument must be a string, not '%s'" %
- type(table))
-
- if table in self._tables:
- return self._tables[table]
- else:
- tableobj = self._tables[table] = SQLTable(self.cursor, table)
- return tableobj
-
- def __delitem__(self, table):
- if table in self._tables:
- del self._tables[table]
- self.cursor.execute("DROP TABLE IF EXISTS %s;" % table)
+ def clear(self):
+ self._execute("DELETE FROM %s;" % self.table)
class PersistData(object):
@@ -183,14 +158,18 @@ class PersistData(object):
"""
del self.data[domain][key]
+def connect(database):
+ return sqlite3.connect(database, timeout=5, isolation_level=None)
-def persist(d):
- """Convenience factory for construction of SQLData based upon metadata"""
+def persist(domain, d):
+ """Convenience factory for SQLTable objects based upon metadata"""
cachedir = (bb.data.getVar("PERSISTENT_DIR", d, True) or
bb.data.getVar("CACHE", d, True))
if not cachedir:
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
sys.exit(1)
+ bb.utils.mkdirhier(cachedir)
cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3")
- return SQLData(cachefile)
+ connection = connect(cachefile)
+ return SQLTable(connection, domain)