Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Saller <bcsaller@objectrealms.net>2007-07-27 21:06:56 (GMT)
committer Benjamin Saller <bcsaller@objectrealms.net>2007-07-27 21:06:56 (GMT)
commit21b3150356031bfed0e833d148c4f370364c3164 (patch)
tree8029d47b603d4a5824a0685b4edcb99cfbdc72a9
parentad0cad894d78da610ccdb3cea6b8df6539f3a2bf (diff)
proposed fixes for:
#2057 #2445 #2459
-rw-r--r--src/olpc/datastore/backingstore.py67
-rw-r--r--src/olpc/datastore/datastore.py90
-rw-r--r--tests/milestone_2.txt7
-rw-r--r--tests/mountpoints.txt29
-rw-r--r--tests/properties.txt14
-rw-r--r--tests/runalltests.py2
-rw-r--r--tests/test_model.py9
7 files changed, 149 insertions, 69 deletions
diff --git a/src/olpc/datastore/backingstore.py b/src/olpc/datastore/backingstore.py
index 8903df7..f163755 100644
--- a/src/olpc/datastore/backingstore.py
+++ b/src/olpc/datastore/backingstore.py
@@ -101,7 +101,14 @@ class BackingStore(object):
@property
def title(self): return self.descriptor()['title']
-
+ # Storage Translation
+ def localizedName(self, uid=None, content=None, target=None):
+ """taking any of uid, a content object, or a direct target
+ filename (which includes all of the relative components under a
+ store). Return the localized filename that should be used _within_
+ the repository for the storage of this content object
+ """
+ pass
class FileBackingStore(BackingStore):
@@ -141,18 +148,23 @@ class FileBackingStore(BackingStore):
# a hidden file with a pickled dict will live in the base
# directory for each storage
fn = os.path.join(self.base, self.DESCRIPTOR_NAME)
- if not os.path.exists(fn):
+ if os.path.exists(fn):
+ try:
+ fp = open(fn, 'r')
+ desc = pickle.load(fp)
+ fp.close()
+ except:
+ desc = None
+ if not desc:
# the data isn't there, this could happen for a number of
# reasons (the store isn't writeable)
+ # or if the information on it was corrupt
+ # in this case, just create a new one
desc = {'id' : self.uri,
'uri' : self.uri,
'title' : self.uri
}
self.create_descriptor(**desc)
- else:
- fp = open(fn, 'r')
- desc = pickle.load(fp)
- fp.close()
return desc
@@ -166,8 +178,12 @@ class FileBackingStore(BackingStore):
desc = {}
if os.path.exists(fn):
fp = open(fn, 'r')
- desc = pickle.load(fp)
- fp.close()
+ try:
+ desc = pickle.load(fp)
+ except:
+ desc = {}
+ finally:
+ fp.close()
desc.update(kwargs)
@@ -233,6 +249,27 @@ class FileBackingStore(BackingStore):
## signal from datastore that we are being bound to it
self.datastore = datastore
+ def localizedName(self, uid=None, content=None, target=None):
+ """taking any of uid, a content object, or a direct target
+ filename (which includes all of the relative components under a
+ store). Return the localized filename that should be used _within_
+ the repository for the storage of this content object
+ """
+ if target: return os.path.join(self.base, target)
+ elif content:
+ # see if it expects a filename
+ fn, ext = content.suggestName()
+ if fn: return os.path.join(self.base, fn)
+ if ext: return os.path.join(self.base, "%s.%s" %
+ (content.id, ext))
+ if not uid: uid = content.id
+
+ if uid:
+ return os.path.join(self.base, uid)
+ else:
+ raise ValueError("""Nothing submitted to generate internal
+ storage name from""")
+
def _translatePath(self, uid):
"""translate a UID to a path name"""
# paths into the datastore
@@ -243,7 +280,7 @@ class FileBackingStore(BackingStore):
path = self._translatePath(uid)
if not os.path.exists(path):
return None
-
+
if target: targetpath = target
else:
targetpath = uid.replace('/', '_').replace('.', '__')
@@ -301,8 +338,11 @@ class FileBackingStore(BackingStore):
raise ValueError("Content for %s corrupt" % uid)
return content
- def _writeContent(self, uid, filelike, replace=True):
- path = self._translatePath(uid)
+ def _writeContent(self, uid, filelike, replace=True, target=None):
+ if target: path = target
+ else:
+ path = self._translatePath(uid)
+
if replace is False and os.path.exists(path):
raise KeyError("objects with path:%s for uid:%s exists" %(
path, uid))
@@ -484,7 +524,7 @@ class InplaceFileBackingStore(FileBackingStore):
def _translatePath(self, uid):
try: content = self.indexmanager.get(uid)
except KeyError: return None
- return os.path.join(self.uri, content.get_property('filename'))
+ return os.path.join(self.uri, content.get_property('filename', uid))
## def _targetFile(self, uid, target=None, ext=None, env=None):
## # in this case the file should really be there unless it was
@@ -513,7 +553,7 @@ class InplaceFileBackingStore(FileBackingStore):
proposed_name = os.path.split(filelike.name)[1]
proposed_name = os.path.join(self.uri, proposed_name)
if not os.path.exists(proposed_name):
- self._writeContent(uid, filelike, replace=False)
+ self._writeContent(uid, filelike, replace=False, target=proposed_name)
return uid
@@ -532,6 +572,7 @@ class InplaceFileBackingStore(FileBackingStore):
c = self.indexmanager.get(uid)
path = c.get_property('filename', None)
self.indexmanager.delete(uid)
+ path = os.path.join(self.uri, path)
if path and os.path.exists(path):
os.unlink(path)
diff --git a/src/olpc/datastore/datastore.py b/src/olpc/datastore/datastore.py
index 111548c..34f5ba3 100644
--- a/src/olpc/datastore/datastore.py
+++ b/src/olpc/datastore/datastore.py
@@ -82,6 +82,8 @@ class DataStore(dbus.service.Object):
self.mountpoints[mp.id] = mp
if self.root is None:
self.root = mp
+
+ self.Mounted(mp.descriptor())
return mp.id
@dbus.service.method(DS_DBUS_INTERFACE,
@@ -103,42 +105,52 @@ class DataStore(dbus.service.Object):
def unmount(self, mountpoint_id):
"""Unmount a mountpoint by id"""
if mountpoint_id not in self.mountpoints: return
- self.mountpoints[mountpoint_id].stop()
+ mp = self.mountpoints[mountpoint_id]
+ mp.stop()
+ self.Unmounted(mp.descriptor())
del self.mountpoints[mountpoint_id]
- ### End Mount Points
-
- ### Buddy Management
- ## A single datastore typically refers to a single user
- ## this breaks down a little in the case of things like USB
- ## sticks and so on. We provide a facility for tracking
- ## co-authors of content
- ## there are associated changes to 'find' to resolve buddies
- def addBuddy(self, id, name, fg_color, bg_color, mountpoint=None):
- mp = None
- if mountpoint is None: mp = self.root
- else: mp = self.mountpoints.get(mountpoint)
- if mp is None: raise ValueError("Invalid mountpoint")
- mp.addBuddy(id, name, fg_color, bg_color)
-
- def getBuddy(self, bid):
- """Get a buddy by its id"""
- b = None
- for mp in self.mountpoints.itervalues():
- b = mp.getBuddy(bid)
- if b: break
- return b
+ @dbus.service.signal(DS_DBUS_INTERFACE, signature="a{sv}")
+ def Mounted(self, descriptior):
+ """indicates that a new backingstore has been mounted by the
+ datastore. Returns the mount descriptor, like mounts()"""
+ pass
+
+ @dbus.service.signal(DS_DBUS_INTERFACE, signature="a{sv}")
+ def Unmounted(self, descriptor):
+ """indicates that a new backingstore has been mounted by the
+ datastore. Returns the mount descriptor, like mounts()"""
+ pass
- def buddies(self):
- buddies = set()
- for mp in self.mountpoints.itervalues():
- buddies = buddies.union(mp.getBuddies())
- return buddies
-
+ ### End Mount Points
- ## end buddy api
-
+ ### Backup support
+ def pause(self, mountpoints=None):
+ """pause the datastore, during this time it will not process
+ requests. this allows the underlying stores to be backup up via
+ traditional mechanisms
+ """
+ if mountpoints:
+ mps = [self.mountpoints[mp] for mp in mountpoints]
+ else:
+ mps = self.mountpoints.values()
+
+ for mp in mps:
+ mp.stop()
+
+ def unpause(self, mountpoints=None):
+ """resume the operation of a set of paused mountpoints"""
+ if mountpoints:
+ mps = [self.mountpoints[mp] for mp in mountpoints]
+ else:
+ mps = self.mountpoints.values()
+
+ for mp in mps:
+ mp.initialize_and_load()
+
+ ### End Backups
+
def connect_backingstore(self, uri, **kwargs):
"""
connect to a new backing store
@@ -325,6 +337,8 @@ class DataStore(dbus.service.Object):
if include_files :
try: filename = r.filename
except KeyError: pass
+ # XXX: this means that find never shows the internally
+ # stored filename attribute (which is private)
props['filename'] = filename
d.append(props)
@@ -368,7 +382,9 @@ class DataStore(dbus.service.Object):
out_signature='a{sv}')
def get_properties(self, uid):
content = self.get(uid)
- return content.properties
+ props = content.properties
+ props['mountpoint'] = content.backingstore.id
+ return props
@dbus.service.method(DS_DBUS_INTERFACE,
in_signature='sa{sv}',
@@ -399,9 +415,9 @@ class DataStore(dbus.service.Object):
content = self.get(uid)
mountpoint = props.pop('mountpoint', None)
content.backingstore.update(uid, props, filelike)
- if filelike:
- self.Updated(content.id)
- logger.debug("updated %s" % content.id)
+
+ self.Updated(content.id)
+ logger.debug("updated %s" % content.id)
@dbus.service.signal(DS_DBUS_INTERFACE, signature="s")
def Updated(self, uid): pass
@@ -414,8 +430,8 @@ class DataStore(dbus.service.Object):
content = self.get(uid)
if content:
content.backingstore.delete(uid)
- self.Deleted(uid)
- logger.debug("deleted %s" % uid)
+ self.Deleted(uid)
+ logger.debug("deleted %s" % uid)
@dbus.service.signal(DS_DBUS_INTERFACE, signature="s")
def Deleted(self, uid): pass
diff --git a/tests/milestone_2.txt b/tests/milestone_2.txt
index 73fd43a..551e1e3 100644
--- a/tests/milestone_2.txt
+++ b/tests/milestone_2.txt
@@ -10,12 +10,11 @@ First clean up from any other tests.
>>> from olpc.datastore import backingstore, model
>>> ds = DataStore()
>>> ds.registerBackend(backingstore.FileBackingStore)
->>> dm = model.defaultModel.copy().addField('year', 'int').addField('month', 'string')
->>> assert ds.mount("/tmp/test_ds", {'indexmanager.model' : dm})
+>>> assert ds.mount("/tmp/test_ds")
->>> a = ds.create(dict(title="Content A", author="Bob", year="1999", month="Jan"), '')
->>> b = ds.create(dict(title="Content B", author="Alice", year="2000", month="Jan"), '')
+>>> a = ds.create({'title':"Content A", 'author':"Bob", 'year:int':"1999", 'month':"Jan"}, '')
+>>> b = ds.create({'title':"Content B", 'author':"Alice", 'year:int':"2000", 'month':"Jan"}, '')
Find should return both
>>> def find2uids(results): return [i['uid'] for i in results[0]]
diff --git a/tests/mountpoints.txt b/tests/mountpoints.txt
index 304d4eb..45a359a 100644
--- a/tests/mountpoints.txt
+++ b/tests/mountpoints.txt
@@ -168,5 +168,34 @@ inplace one. Lets move the object with u1 to mp3
>>> assert count == 1
>>> assert result[0]['uid'] == pen_copy
+The file was properly created in the expected place.
+
+>>> assert os.path.exists('/tmp/store3/one.txt')
+
+
+We also need to be sure that delete commands work on inplace
+mounts. We will delete the object from the datastore and then verify
+that the file is missing.
+
+>>> ds.delete(pen_copy)
+>>> ds.complete_indexing()
+
+>>> os.path.exists('/tmp/store3/one.txt')
+False
+
+
+Now a tricky case where we corrupt the metadata on a mount and want to
+verify that we can still remount the store.
+
+>>> ds.unmount(mp3)
+>>> fp = open('/tmp/store3/.olpc.store/metainfo', 'w')
+>>> fp.seek(0)
+>>> fp.write('broken')
+>>> fp.close()
+
+>>> mp3 = ds.mount("inplace:/tmp/store3", dict(title="Fake USB from broken"))
+>>> mp = ds.mountpoints[mp3]
+>>> assert mp.descriptor()['title'] == 'Fake USB from broken'
+
>>> ds.stop(); del ds
diff --git a/tests/properties.txt b/tests/properties.txt
index 4a95ec4..6c3c91b 100644
--- a/tests/properties.txt
+++ b/tests/properties.txt
@@ -19,20 +19,18 @@ Set up two mount points.
Extend the model to retain a 'year' property used below.
->>> dm = model.defaultModel.copy().addField('year', "number")
-
Mount a couple of stores.
->>> mp1 = ds.mount("/tmp/store1", {'title' : "Primary Storage", 'indexmanager.model' : dm})
->>> mp2 = ds.mount("/tmp/store2", {'title' : "Secondary Storage", 'indexmanager.model' : dm})
+>>> mp1 = ds.mount("/tmp/store1", {'title' : "Primary Storage",})
+>>> mp2 = ds.mount("/tmp/store2", {'title' : "Secondary Storage"})
Create some content on each.
->>> u1 = ds.create({'title' : "Alpha doc", 'author' : "Ben", 'year:number' : 2000}, tmpData("""Document 1"""))
->>> u2 = ds.create({'title' : "Beta doc", 'author' : "Ben", 'year:number' : 2001} , tmpData("""Document 2"""))
+>>> u1 = ds.create({'title' : "Alpha doc", 'author' : "Ben", 'year:int' : 2000}, tmpData("""Document 1"""))
+>>> u2 = ds.create({'title' : "Beta doc", 'author' : "Ben", 'year:int' : 2001} , tmpData("""Document 2"""))
->>> u3 = ds.create({'title' : "Delta doc", 'author' :"HAL", 'year:number' : 2000, 'mountpoint' : mp2}, tmpData("""Document 3"""))
->>> u4 = ds.create({'title' : "Gamma doc", 'author' : "HAL", 'year:number' : 2001, 'mountpoint' : mp2}, tmpData("""Document 4"""))
+>>> u3 = ds.create({'title' : "Delta doc", 'author' :"HAL", 'year:int' : 2000, 'mountpoint' : mp2}, tmpData("""Document 3"""))
+>>> u4 = ds.create({'title' : "Gamma doc", 'author' : "HAL", 'year:int' : 2001, 'mountpoint' : mp2}, tmpData("""Document 4"""))
Now we should be able to discover things about the system properties.
>>> ds.complete_indexing()
diff --git a/tests/runalltests.py b/tests/runalltests.py
index 564cee2..8fee87e 100644
--- a/tests/runalltests.py
+++ b/tests/runalltests.py
@@ -31,7 +31,7 @@ doctests = [
]
doctest_options = doctest.ELLIPSIS
-doctest_options |= doctest.REPORT_ONLY_FIRST_FAILURE
+#doctest_options |= doctest.REPORT_ONLY_FIRST_FAILURE
def test_suite():
diff --git a/tests/test_model.py b/tests/test_model.py
index 059a128..6d171c1 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -28,18 +28,15 @@ class Test(unittest.TestCase):
ds = DataStore()
ds.registerBackend(backingstore.FileBackingStore)
- #add a custom field to the model
- dm = model.defaultModel.copy().addField('thumbnail', 'binary')
-
-
- ds.mount(DEFAULT_STORE, {'indexmanager.model' : dm})
+
+ ds.mount(DEFAULT_STORE)
n = datetime.datetime.now()
data = open('test.jpg', 'r').read()
# binary data with \0's in it can cause dbus errors here
fn = tmpData("with image\0\0 prop")
# The key types are looked up in the model now
- uid = ds.create({'title' : "Document 1", 'thumbnail' : data, 'ctime' : n.isoformat()}, fn)
+ uid = ds.create({'title' : "Document 1", 'thumbnail:binary' : data, 'ctime' : n.isoformat()}, fn)
ds.complete_indexing()