Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/src/jarabe/journal/model.py
diff options
context:
space:
mode:
Diffstat (limited to 'src/jarabe/journal/model.py')
-rw-r--r--src/jarabe/journal/model.py674
1 files changed, 456 insertions, 218 deletions
diff --git a/src/jarabe/journal/model.py b/src/jarabe/journal/model.py
index 5eac555..4d369e2 100644
--- a/src/jarabe/journal/model.py
+++ b/src/jarabe/journal/model.py
@@ -17,16 +17,17 @@
import cPickle
import logging
import os
-import errno
import subprocess
from datetime import datetime
+import errno
import time
import shutil
import tempfile
-from stat import S_IFLNK, S_IFMT, S_IFDIR, S_IFREG
import re
from operator import itemgetter
import simplejson
+import urllib
+from urlparse import urlsplit
import xapian
from gettext import gettext as _
@@ -36,8 +37,10 @@ import gio
import gconf
from sugar import dispatch
+from sugar import env
from sugar import mime
from sugar import util
+from sugar.logger import trace
DS_DBUS_SERVICE = 'org.laptop.sugar.DataStore'
@@ -47,7 +50,7 @@ DS_DBUS_PATH = '/org/laptop/sugar/DataStore'
# Properties the journal cares about.
PROPERTIES = ['activity', 'activity_id', 'buddies', 'bundle_id',
'creation_time', 'filesize', 'icon-color', 'keep', 'mime_type',
- 'mountpoint', 'mtime', 'progress', 'timestamp', 'title', 'uid']
+ 'mtime', 'progress', 'timestamp', 'title', 'uid']
MIN_PAGES_TO_CACHE = 3
MAX_PAGES_TO_CACHE = 5
@@ -55,11 +58,20 @@ MAX_PAGES_TO_CACHE = 5
JOURNAL_0_METADATA_DIR = '.olpc.store'
JOURNAL_METADATA_DIR = '.Sugar-Metadata'
+SUGAR_WEBDAV_NAMESPACE = 'http://people.sugarlabs.org/silbe/webdavns/sugar'
+_SUGAR_WEBDAV_PREFIX = 'webdav::%s::' % (urllib.quote(SUGAR_WEBDAV_NAMESPACE,
+ safe=''), )
+_QUERY_GIO_ATTRIBUTES = ','.join(['standard::*', 'webdav::*',
+ gio.FILE_ATTRIBUTE_ID_FILE,
+ gio.FILE_ATTRIBUTE_TIME_MODIFIED])
+
_datastore = None
created = dispatch.Signal()
updated = dispatch.Signal()
deleted = dispatch.Signal()
+_documents_path = None
+
class _Cache(object):
@@ -228,21 +240,25 @@ class DatastoreResultSet(BaseResultSet):
byte_arrays=True)
for entry in entries:
- entry['mountpoint'] = '/'
+ entry['mount_uri'] = 'datastore:'
- return entries, total_count
+ return [('datastore:' + entry['uid'], entry)
+ for entry in entries], total_count
class InplaceResultSet(BaseResultSet):
"""Encapsulates the result of a query on a mount point
"""
- def __init__(self, query, page_size, mount_point):
+
+ _NUM_ENTRIES_PER_REQUEST = 100
+
+ def __init__(self, query, page_size, uri):
BaseResultSet.__init__(self, query, page_size)
- self._mount_point = mount_point
+ self._uri = uri
self._file_list = None
self._pending_directories = []
- self._visited_directories = []
- self._pending_files = []
+ self._visited_directories = set()
+ self._pending_entries = []
self._stopped = False
query_text = query.get('query', '')
@@ -256,7 +272,7 @@ class InplaceResultSet(BaseResultSet):
else:
self._regex = None
- if query.get('timestamp', ''):
+ if query.get('stamp', ''):
self._date_start = int(query['timestamp']['start'])
self._date_end = int(query['timestamp']['end'])
else:
@@ -269,14 +285,15 @@ class InplaceResultSet(BaseResultSet):
def setup(self):
self._file_list = []
- self._pending_directories = [self._mount_point]
- self._visited_directories = []
- self._pending_files = []
- gobject.idle_add(self._scan)
+ self._pending_directories = [gio.File(uri=self._uri)]
+ self._visited_directories = set()
+ self._pending_entries = []
+ self._schedule_scan_iteration()
def stop(self):
self._stopped = True
+ @trace()
def setup_ready(self):
if self._sort[1:] == 'filesize':
keygetter = itemgetter(3)
@@ -304,152 +321,233 @@ class InplaceResultSet(BaseResultSet):
files = self._file_list[offset:offset + limit]
entries = []
- for file_path, stat, mtime_, size_, metadata in files:
- if metadata is None:
- metadata = _get_file_metadata(file_path, stat)
- metadata['mountpoint'] = self._mount_point
- entries.append(metadata)
+ for uri, mtime_, size_, metadata in files:
+ metadata['mount_uri'] = self._uri
+ entries.append((uri, metadata))
logging.debug('InplaceResultSet.find took %f s.', time.time() - t)
return entries, total_count
+ @trace()
def _scan(self):
if self._stopped:
return False
self.progress.send(self)
- if self._pending_files:
- self._scan_a_file()
- return True
+ if self._pending_entries:
+ self._scan_an_entry()
+ return False
if self._pending_directories:
self._scan_a_directory()
- return True
+ return False
self.setup_ready()
- self._visited_directories = []
+ self._visited_directories = set()
return False
- def _scan_a_file(self):
- full_path = self._pending_files.pop(0)
+ @trace()
+ def _scan_an_entry(self):
+ directory, entry = self._pending_entries.pop(0)
+ logging.debug('Scanning entry %r / %r', directory.get_uri(),
+ entry.get_name())
+
+ if entry.get_is_symlink():
+ self._scan_a_symlink(directory, entry)
+ elif entry.get_file_type() == gio.FILE_TYPE_DIRECTORY:
+ self._scan_a_directory_entry(directory, entry)
+ elif entry.get_file_type() == gio.FILE_TYPE_REGULAR:
+ # FIXME: process file entries in bulk
+ self._scan_a_file(directory, entry)
+
+ @trace()
+ def _scan_a_file(self, directory, entry):
+ self._schedule_scan_iteration()
metadata = None
+ gfile = directory.get_child(entry.get_name())
+ logging.debug('gfile.uri=%r, self._uri=%r', gfile.get_uri(), self._uri)
+ path = urllib.unquote(urlsplit(gfile.get_uri())[2])
- try:
- stat = os.lstat(full_path)
- except OSError, e:
- if e.errno != errno.ENOENT:
- logging.exception(
- 'Error reading metadata of file %r', full_path)
- return
-
- if S_IFMT(stat.st_mode) == S_IFLNK:
- try:
- link = os.readlink(full_path)
- except OSError, e:
- logging.exception(
- 'Error reading target of link %r', full_path)
- return
-
- if not os.path.abspath(link).startswith(self._mount_point):
- return
-
- try:
- stat = os.stat(full_path)
-
- except OSError, e:
- if e.errno != errno.ENOENT:
- logging.exception(
- 'Error reading metadata of linked file %r', full_path)
- return
-
- if S_IFMT(stat.st_mode) == S_IFDIR:
- id_tuple = stat.st_ino, stat.st_dev
- if not id_tuple in self._visited_directories:
- self._visited_directories.append(id_tuple)
- self._pending_directories.append(full_path)
- return
-
- if S_IFMT(stat.st_mode) != S_IFREG:
- return
-
- if self._regex is not None and \
- not self._regex.match(full_path):
- metadata = _get_file_metadata(full_path, stat,
+ if self._regex is not None and not self._regex.match(path):
+ metadata = _get_file_metadata(gfile, entry, path,
fetch_preview=False)
if not metadata:
return
+
add_to_list = False
for f in ['fulltext', 'title',
'description', 'tags']:
- if f in metadata and \
- self._regex.match(metadata[f]):
+ if f in metadata and self._regex.match(metadata[f]):
add_to_list = True
break
if not add_to_list:
return
- if self._date_start is not None and stat.st_mtime < self._date_start:
+ mtime = entry.get_modification_time()
+ if self._date_start is not None and mtime < self._date_start:
return
- if self._date_end is not None and stat.st_mtime > self._date_end:
+ if self._date_end is not None and mtime > self._date_end:
return
if self._mime_types:
- mime_type = gio.content_type_guess(filename=full_path)
- if mime_type not in self._mime_types:
+ if entry.get_content_type() not in self._mime_types:
return
- file_info = (full_path, stat, int(stat.st_mtime), stat.st_size,
- metadata)
+ if metadata is None:
+ metadata = _get_file_metadata(gfile, entry, path,
+ fetch_preview=False)
+
+ self._add_a_file(directory, entry, gfile.get_uri(), metadata)
+
+ @trace()
+ def _add_a_file(self, directory, entry, uri, metadata):
+ mtime = entry.get_modification_time()
+ size = entry.get_size()
+ file_info = (uri, int(mtime), size, metadata)
self._file_list.append(file_info)
- return
+ @trace()
+ def _scan_a_symlink(self, directory, entry):
+ link = entry.get_symlink_target()
+ directory_uri = directory.get_uri().rstrip('/') + '/'
+ absolute_uri = urllib.basejoin(directory_uri, link)
+ logging.debug('symlink %r in %r => %r', link, directory_uri,
+ absolute_uri)
+ if not absolute_uri.startswith(self._uri):
+ self._schedule_scan_iteration()
+ return
+
+ gfile = gio.File(uri=absolute_uri)
+ gfile.query_info_async(_QUERY_GIO_ATTRIBUTES, self.__symlink_query_cb)
+
+ @trace()
+ def _scan_a_directory_entry(self, parent_directory, entry):
+ self._schedule_scan_iteration()
+ directory_id = entry.get_attribute_string(gio.FILE_ATTRIBUTE_ID_FILE)
+ if directory_id in self._visited_directories:
+ logging.debug('Skipping already visited directory %r (%r)',
+ entry.get_name(), directory_id)
+ return
+
+ logging.debug('Scheduling directory %r (%r) for scanning',
+ entry.get_name(), directory_id)
+ directory = parent_directory.get_child(entry.get_name())
+ self._visited_directories.add(directory_id)
+ self._pending_directories.append(directory)
+
+ @trace()
def _scan_a_directory(self):
- dir_path = self._pending_directories.pop(0)
+ directory = self._pending_directories.pop(0)
+ logging.debug('Scanning directory %r', directory.get_uri())
+ # TODO: pass a gio.Cancellable
+ directory.enumerate_children_async(_QUERY_GIO_ATTRIBUTES,
+ self.__enumerate_children_cb)
+
+ @trace()
+ def __symlink_query_cb(self, symlink, result):
+ try:
+ entry = symlink.query_info_finish(result)
+ except gio.Error, exception:
+ logging.error('Could not examine symlink %s: %s',
+ symlink.get_uri(), exception)
+ self._schedule_scan_iteration()
+ return
+ self._pending_entries.append((symlink.get_parent(), entry))
+ self._schedule_scan_iteration()
+
+ @trace()
+ def __enumerate_children_cb(self, directory, result):
try:
- entries = os.listdir(dir_path)
- except OSError, e:
- if e.errno != errno.EACCES:
- logging.exception('Error reading directory %r', dir_path)
+ enumerator = directory.enumerate_children_finish(result)
+ except gio.Error, exception:
+ logging.error('Could not enumerate %s: %s', directory.get_uri(),
+ exception)
+ self._schedule_scan_iteration()
return
- for entry in entries:
- if entry.startswith('.'):
- continue
- self._pending_files.append(dir_path + '/' + entry)
- return
+ enumerator.next_files_async(self._NUM_ENTRIES_PER_REQUEST,
+ self.__next_files_cb)
+ @trace()
+ def __next_files_cb(self, enumerator, result):
+ directory = enumerator.get_container()
+ try:
+ entries = enumerator.next_files_finish(result)
+ except gio.Error, exception:
+ logging.error('Error while enumerating %s: %s',
+ directory.get_uri(), exception)
+ self._schedule_scan_iteration()
+ return
-def _get_file_metadata(path, stat, fetch_preview=True):
+ logging.debug('__next_files_cb: entries=%r', entries)
+ logging.debug('__next_files_cb: names=%r',
+ [entry.get_name() for entry in entries])
+ self._pending_entries += [(directory, entry) for entry in entries
+ if not entry.get_name().startswith('.')]
+
+ if len(entries) >= self._NUM_ENTRIES_PER_REQUEST:
+ enumerator.next_files_async(self._NUM_ENTRIES_PER_REQUEST,
+ self.__next_files_cb)
+ else:
+ self._schedule_scan_iteration()
+
+ @trace()
+ def _schedule_scan_iteration(self):
+ gobject.idle_add(self._scan)
+
+
+@trace()
+def _get_file_metadata(gfile, info, path, fetch_preview=True):
"""Return the metadata from the corresponding file.
Reads the metadata stored in the json file or create the
metadata based on the file properties.
"""
- filename = os.path.basename(path)
- dir_path = os.path.dirname(path)
- metadata = _get_file_metadata_from_json(dir_path, filename, fetch_preview)
- if metadata:
- if 'filesize' not in metadata:
- metadata['filesize'] = stat.st_size
- return metadata
-
- return {'uid': path,
- 'title': os.path.basename(path),
- 'timestamp': stat.st_mtime,
- 'filesize': stat.st_size,
- 'mime_type': gio.content_type_guess(filename=path),
- 'activity': '',
- 'activity_id': '',
- 'icon-color': '#000000,#ffffff',
- 'description': path}
+ if gfile.is_native():
+ filename = os.path.basename(path)
+ dir_path = os.path.dirname(path)
+ metadata = _get_file_metadata_from_json(dir_path, filename,
+ fetch_preview)
+ if metadata:
+ metadata['filesize'] = info.get_size()
+ #metadata['uid'] = gfile.get_uri()
+ return metadata
+
+ metadata = {#'uid': gfile.get_uri(),
+ 'title': info.get_display_name(),
+ 'timestamp': info.get_modification_time(),
+ 'filesize': info.get_size(),
+ 'mime_type': info.get_content_type(),
+ 'activity': '',
+ 'activity_id': '',
+ 'icon-color': '#000000,#ffffff',
+ 'description': path}
+
+ for attr_name in info.list_attributes('webdav'):
+ if not attr_name.startswith(_SUGAR_WEBDAV_PREFIX):
+ continue
+ attribute_type = info.get_attribute_type(attr_name)
+ if attribute_type != gio.FILE_ATTRIBUTE_TYPE_STRING:
+ logging.debug('%r is not a string: %s', attr_name, attribute_type)
+ continue
+ property_name = urllib.unquote(attr_name[len(_SUGAR_WEBDAV_PREFIX):])
+ if property_name == 'filesize':
+ continue
+
+ metadata[property_name] = info.get_attribute_string(attr_name)
+
+ return metadata
+
+
+@trace()
def _get_file_metadata_from_json(dir_path, filename, fetch_preview):
"""Read the metadata from the json file and the preview
stored on the external device.
@@ -475,8 +573,6 @@ def _get_file_metadata_from_json(dir_path, filename, fetch_preview):
logging.error('Could not read metadata for file %r on '
'external device.', filename)
return None
- else:
- metadata['uid'] = os.path.join(dir_path, filename)
if not fetch_preview:
if 'preview' in metadata:
@@ -507,15 +603,15 @@ def _get_datastore():
def _datastore_created_cb(object_id):
- created.send(None, object_id=object_id)
+ created.send(None, object_uri='datastore:' + object_id)
def _datastore_updated_cb(object_id):
- updated.send(None, object_id=object_id)
+ updated.send(None, object_uri='datastore:' + object_id)
def _datastore_deleted_cb(object_id):
- deleted.send(None, object_id=object_id)
+ deleted.send(None, object_uri='datastore:' + object_id)
def find(query_, page_size):
@@ -523,61 +619,87 @@ def find(query_, page_size):
"""
query = query_.copy()
- mount_points = query.pop('mountpoints', ['/'])
- if mount_points is None or len(mount_points) != 1:
- raise ValueError('Exactly one mount point must be specified')
+ mount_uri = query.pop('mount_uri', None)
+ if mount_uri != 'datastore:':
+ return InplaceResultSet(query, page_size, mount_uri)
- if mount_points[0] == '/':
- return DatastoreResultSet(query, page_size)
- else:
- return InplaceResultSet(query, page_size, mount_points[0])
+ return DatastoreResultSet(query, page_size)
-def _get_mount_point(path):
- dir_path = os.path.dirname(path)
- while dir_path:
- if os.path.ismount(dir_path):
- return dir_path
- else:
- dir_path = dir_path.rsplit(os.sep, 1)[0]
- return None
+def _get_mount_uri(gfile):
+ try:
+ mount = gfile.find_enclosing_mount()
+ except gio.Error, exception:
+ if exception.domain != gio.ERROR or \
+ exception.code != gio.ERROR_NOT_FOUND:
+ raise
+ else:
+ return mount.get_root().get_uri()
+ # find_enclosing_mount() doesn't work for local "internal" mounts.
+ # But since the XDG Documents folder is the only thing we show that
+ # could contain paths on "internal" mounts, we can just "hardcode"
+ # that directory.
+ return 'file://' + get_documents_path()
-def get(object_id):
+
+def get(object_uri):
"""Returns the metadata for an object
"""
- if os.path.exists(object_id):
- stat = os.stat(object_id)
- metadata = _get_file_metadata(object_id, stat)
- metadata['mountpoint'] = _get_mount_point(object_id)
- else:
+ logging.debug('get(%r)', object_uri)
+ scheme, netloc_, quoted_path = urlsplit(object_uri)[:3]
+ if scheme == 'datastore':
+ object_id = quoted_path
metadata = _get_datastore().get_properties(object_id, byte_arrays=True)
- metadata['mountpoint'] = '/'
+ metadata['mount_uri'] = 'datastore:'
+ else:
+ gfile = gio.File(uri=object_uri)
+ info = gfile.query_info(_QUERY_GIO_ATTRIBUTES)
+ path = urllib.unquote(quoted_path)
+ metadata = _get_file_metadata(gfile, info, path)
+ metadata['mount_uri'] = _get_mount_uri(gfile)
return metadata
-def get_file(object_id):
+@trace()
+def get_file(object_uri):
"""Returns the file for an object
"""
- if os.path.exists(object_id):
- logging.debug('get_file asked for file with path %r', object_id)
- return object_id
- else:
- logging.debug('get_file asked for entry with id %r', object_id)
+ # TODO: add async interface
+ logging.debug('get_file(%r)', object_uri)
+ scheme, netloc_, quoted_path = urlsplit(object_uri)[:3]
+ if scheme == 'file':
+ # We use local files as-is, so no pruning / book-keeping required.
+ return gio.File(uri=object_uri).get_path()
+
+ if scheme == 'datastore':
+ object_id = quoted_path
file_path = _get_datastore().get_filename(object_id)
- if file_path:
- return util.TempFilePath(file_path)
- else:
- return None
+ else:
+ input_stream = gio.File(uri=object_uri).read()
+ file_path = tempfile.mktemp(dir=env.get_profile_path('data'))
+ output_stream = gio.File(file_path).create(gio.FILE_CREATE_PRIVATE)
+ shutil.copyfileobj(input_stream, output_stream)
+ input_stream.close()
+ output_stream.close()
+ if file_path:
+ return util.TempFilePath(file_path)
+ else:
+ return None
-def get_file_size(object_id):
+
+def get_file_size(object_uri):
"""Return the file size for an object
"""
- logging.debug('get_file_size %r', object_id)
- if os.path.exists(object_id):
- return os.stat(object_id).st_size
-
+ logging.debug('get_file_size(%r)', object_uri)
+ scheme, netloc_, quoted_path = urlsplit(object_uri)[:3]
+ if scheme != 'datastore':
+ gfile = gio.File(uri=object_uri)
+ info = gfile.query_info(gio.FILE_ATTRIBUTE_STANDARD_SIZE)
+ return info.get_attribute_uint64(gio.FILE_ATTRIBUTE_STANDARD_SIZE)
+
+ object_id = quoted_path
file_path = _get_datastore().get_filename(object_id)
if file_path:
size = os.stat(file_path).st_size
@@ -594,68 +716,148 @@ def get_unique_values(key):
return _get_datastore().get_uniquevaluesfor(key, empty_dict)
-def delete(object_id):
+def delete(object_uri):
"""Removes an object from persistent storage
"""
- if not os.path.exists(object_id):
+ scheme, netloc_, quoted_path = urlsplit(object_uri)[:3]
+ if scheme == 'datastore':
+ object_id = quoted_path
_get_datastore().delete(object_id)
- else:
- os.unlink(object_id)
- dir_path = os.path.dirname(object_id)
- filename = os.path.basename(object_id)
- old_files = [os.path.join(dir_path, JOURNAL_METADATA_DIR,
- filename + '.metadata'),
- os.path.join(dir_path, JOURNAL_METADATA_DIR,
- filename + '.preview')]
- for old_file in old_files:
- if os.path.exists(old_file):
- try:
- os.unlink(old_file)
- except EnvironmentError:
- logging.error('Could not remove metadata=%s '
- 'for file=%s', old_file, filename)
- deleted.send(None, object_id=object_id)
+ return
+
+ gfile = gio.File(uri=object_uri)
+ gfile.delete()
+ if gfile.get_uri_scheme() == 'file':
+ _delete_metadata_files(gfile.get_path())
+
+ deleted.send(None, object_uri=object_uri)
+
+
+def _delete_metadata_files(path):
+ """Delete Sugar metadata files associated with the given data file"""
+ dir_path = os.path.dirname(path)
+ filename = os.path.basename(path)
+ old_files = [os.path.join(dir_path, JOURNAL_METADATA_DIR,
+ filename + '.metadata'),
+ os.path.join(dir_path, JOURNAL_METADATA_DIR,
+ filename + '.preview')]
+ for old_file in old_files:
+ if os.path.exists(old_file):
+ try:
+ os.unlink(old_file)
+ except EnvironmentError:
+ logging.error('Could not remove metadata=%s '
+ 'for file=%s', old_file, filename)
-def copy(metadata, mount_point):
+def copy(object_uri, mount_uri):
"""Copies an object to another mount point
"""
- metadata = get(metadata['uid'])
- if mount_point == '/' and metadata['icon-color'] == '#000000,#ffffff':
+ metadata = get(object_uri)
+ color = metadata.get('icon-color')
+ if mount_uri == 'datastore:' and color == '#000000,#ffffff':
client = gconf.client_get_default()
metadata['icon-color'] = client.get_string('/desktop/sugar/user/color')
- file_path = get_file(metadata['uid'])
+ file_path = get_file(object_uri)
if file_path is None:
file_path = ''
- metadata['mountpoint'] = mount_point
- del metadata['uid']
+ metadata['mount_uri'] = mount_uri
+ metadata.pop('uid', None)
- return write(metadata, file_path, transfer_ownership=False)
+ return write(metadata, mount_uri, None, file_path,
+ transfer_ownership=False)
-def write(metadata, file_path='', update_mtime=True, transfer_ownership=True):
- """Creates or updates an entry for that id
+# FIXME: pass target uri (mount_uri) as parameter
+@trace()
+def write(metadata, mount_uri, object_uri=None, file_path='',
+ update_mtime=True, transfer_ownership=True):
+ """Create or update an entry on mounted object storage
+
+ If object_uri is None, create a new entry, otherwise update an
+ existing entry.
+
+ If update_mtime is True (default), update the time of last
+ modification in metadata (in-place).
+
+ If transfer_ownership is True (default), move file_path into place.
+ If that is not possible, remove file_path after copying.
+
+ For objects on POSIX file systems, the title property may be
+ updated in metadata (in-place).
+
+ Return the URI of the written entry. Even for updates this may be
+ different from the original value.
"""
- logging.debug('model.write %r %r %r', metadata.get('uid', ''), file_path,
+ logging.debug('model.write %r %r %r', object_uri, file_path,
update_mtime)
+
+ assert mount_uri
+ if object_uri and not object_uri.startswith(mount_uri):
+ raise ValueError('Object to be updated not located on given mount.')
+
if update_mtime:
metadata['mtime'] = datetime.now().isoformat()
metadata['timestamp'] = int(time.time())
- if metadata.get('mountpoint', '/') == '/':
- if metadata.get('uid', ''):
- object_id = metadata['uid']
- _get_datastore().update(object_id, dbus.Dictionary(metadata),
- file_path, transfer_ownership)
- else:
- object_id = _get_datastore().create(dbus.Dictionary(metadata),
- file_path,
- transfer_ownership)
+ if mount_uri == 'datastore:':
+ return _write_entry_to_datastore(object_uri, metadata, file_path,
+ transfer_ownership)
+ elif mount_uri.startswith('dav'):
+ return _write_entry_on_webdav_share(mount_uri, object_uri, metadata,
+ file_path, transfer_ownership)
+ elif mount_uri.startswith('file:'):
+ return _write_entry_on_external_device(mount_uri, object_uri,
+ metadata, file_path,
+ transfer_ownership)
else:
- object_id = _write_entry_on_external_device(metadata, file_path)
+ raise NotImplementedError("Don't know how to write to"
+ " %r" % (mount_uri, ))
+
+
+def _write_entry_to_datastore(object_uri, metadata, file_path,
+ transfer_ownership):
+ if object_uri:
+ object_id = urlsplit(object_uri)[2]
+ _get_datastore().update(object_id, dbus.Dictionary(metadata),
+ file_path, transfer_ownership)
+ return object_uri
+ else:
+ object_id = _get_datastore().create(dbus.Dictionary(metadata),
+ file_path, transfer_ownership)
+ return 'datastore:' + object_id
+
+
+def _write_entry_on_webdav_share(mount_uri, object_uri, metadata, file_path,
+ transfer_ownership):
+ title = metadata.get('title') or _('Untitled')
+ proposed_name = get_file_name(title, metadata.get('mime_type', ''))
+ output_stream, gfile = create_unique_file(mount_uri, proposed_name)
+ if file_path:
+ shutil.copyfileobj(file(file_path), output_stream)
+
+ output_stream.close()
+
+ info = gio.FileInfo()
+ attr_infos = gfile.query_settable_attributes() or []
+ settable_attrs = [attr_info.name for attr_info in attr_infos]
+ if gio.FILE_ATTRIBUTE_TIME_MODIFIED in settable_attrs:
+ info.set_modification_time(metadata.get('timestamp', time.time()))
+ if gio.FILE_ATTRIBUTE_STANDARD_CONTENT_TYPE in settable_attrs:
+ info.set_content_type(metadata.get('mime_type',
+ 'application/octet-stream'))
+
+ if 'webdav' in gfile.query_writable_namespaces():
+ for name, value in metadata.items():
+ attr_name = _SUGAR_WEBDAV_PREFIX + urllib.quote(name)
+ if isinstance(value, basestring) and '\0' in value:
+ # strings with NUL bytes are not supported by gio
+ continue
+
+ info.set_attribute_string(attr_name, str(value))
- return object_id
+ gfile.set_attributes_from_info(info)
def _rename_entry_on_external_device(file_path, destination_path,
@@ -678,9 +880,9 @@ def _rename_entry_on_external_device(file_path, destination_path,
'for file=%s', ofile, old_fname)
-def _write_entry_on_external_device(metadata, file_path):
- """Create and update an entry copied from the
- DS to an external storage device.
+def _write_entry_on_external_device(mount_uri, object_uri, metadata,
+ file_path, transfer_ownership):
+ """Create or update an entry on a mounted POSIX file system
Besides copying the associated file a file for the preview
and one for the metadata are stored in the hidden directory
@@ -691,66 +893,66 @@ def _write_entry_on_external_device(metadata, file_path):
handled failsafe.
"""
- if 'uid' in metadata and os.path.exists(metadata['uid']):
- file_path = metadata['uid']
-
if not file_path or not os.path.exists(file_path):
raise ValueError('Entries without a file cannot be copied to '
'removable devices')
if not metadata.get('title'):
metadata['title'] = _('Untitled')
- file_name = get_file_name(metadata['title'], metadata['mime_type'])
+ destination_name = get_file_name(metadata['title'], metadata['mime_type'])
- destination_path = os.path.join(metadata['mountpoint'], file_name)
+ mount_point = gio.File(mount_uri).get_path()
+ logging.debug('_write_entry_on_external_device: mount_point=%r,'
+ ' destination_name=%r', mount_point, destination_name)
+ destination_path = os.path.join(mount_point, destination_name)
if destination_path != file_path:
- file_name = get_unique_file_name(metadata['mountpoint'], file_name)
- destination_path = os.path.join(metadata['mountpoint'], file_name)
- clean_name, extension_ = os.path.splitext(file_name)
+ destination_name = get_unique_file_name(mount_point, destination_name)
+ destination_path = os.path.join(mount_point, destination_name)
+ clean_name, extension_ = os.path.splitext(destination_name)
metadata['title'] = clean_name
metadata_copy = metadata.copy()
- metadata_copy.pop('mountpoint', None)
+ metadata_copy.pop('mount_uri')
metadata_copy.pop('uid', None)
metadata_copy.pop('filesize', None)
- metadata_dir_path = os.path.join(metadata['mountpoint'],
+ metadata_dir_path = os.path.join(mount_point,
JOURNAL_METADATA_DIR)
if not os.path.exists(metadata_dir_path):
os.mkdir(metadata_dir_path)
- preview = None
- if 'preview' in metadata_copy:
- preview = metadata_copy['preview']
- preview_fname = file_name + '.preview'
- metadata_copy.pop('preview', None)
+ preview = metadata_copy.pop('preview', None)
+ if preview:
+ preview_fname = destination_name + '.preview'
try:
metadata_json = simplejson.dumps(metadata_copy)
except (UnicodeDecodeError, EnvironmentError):
logging.error('Could not convert metadata to json.')
else:
- (fh, fn) = tempfile.mkstemp(dir=metadata['mountpoint'])
+ (fh, fn) = tempfile.mkstemp(dir=mount_point)
os.write(fh, metadata_json)
os.close(fh)
- os.rename(fn, os.path.join(metadata_dir_path, file_name + '.metadata'))
+ os.rename(fn, os.path.join(metadata_dir_path,
+ destination_name + '.metadata'))
if preview:
- (fh, fn) = tempfile.mkstemp(dir=metadata['mountpoint'])
+ (fh, fn) = tempfile.mkstemp(dir=mount_point)
os.write(fh, preview)
os.close(fh)
os.rename(fn, os.path.join(metadata_dir_path, preview_fname))
- if not os.path.dirname(destination_path) == os.path.dirname(file_path):
+ if not transfer_ownership:
shutil.copy(file_path, destination_path)
- else:
+ elif gio.File(object_uri).get_path() == file_path:
_rename_entry_on_external_device(file_path, destination_path,
metadata_dir_path)
+ else:
+ shutil.move(file_path, destination_path)
- object_id = destination_path
- created.send(None, object_id=object_id)
-
- return object_id
+ object_uri = gio.File(path=destination_path).get_uri()
+ created.send(None, object_uri=object_uri)
+ return object_uri
def get_file_name(title, mime_type):
@@ -791,11 +993,42 @@ def get_unique_file_name(mount_point, file_name):
return file_name
-def is_editable(metadata):
- if metadata.get('mountpoint', '/') == '/':
- return True
- else:
- return os.access(metadata['mountpoint'], os.W_OK)
+@trace()
+def create_unique_file(mount_uri, file_name):
+ """Create a new file with a unique name on given mount
+
+ Create a new file on the mount identified by mount_uri. Use
+ file_name as the name on the mount if possible, otherwise use it
+ as a template for the name of the new file, inserting a decimal
+ number as necessary. Return both the gio.FileOutputStream instance
+ and the gio.File instance of the new object (in this order).
+ """
+ gdir = gio.File(mount_uri)
+ name, extension = os.path.splitext(file_name)
+ last_error = None
+ for try_nr in range(255):
+ if not try_nr:
+ try_name = file_name
+ else:
+ try_name = '%s_%d%s' % (name, try_nr, extension)
+
+ gfile = gdir.get_child(try_name)
+ try:
+ output_stream = gfile.create()
+ except gio.Error, exception:
+ last_error = exception
+ continue
+
+ return output_stream, gfile
+
+ raise last_error
+
+@trace()
+def is_editable(object_uri):
+ scheme, netloc_, quoted_path = urlsplit(object_uri)[:3]
+ if scheme == 'file':
+ return os.access(urllib.unquote(quoted_path), os.W_OK)
+ return scheme in ['datastore', 'dav', 'davs']
def get_documents_path():
@@ -807,13 +1040,18 @@ def get_documents_path():
Returns: Path to $HOME/DOCUMENTS or None if an error occurs
"""
+ global _documents_path
+ if _documents_path is not None:
+ return _documents_path
+
try:
pipe = subprocess.Popen(['xdg-user-dir', 'DOCUMENTS'],
stdout=subprocess.PIPE)
documents_path = os.path.normpath(pipe.communicate()[0].strip())
if os.path.exists(documents_path) and \
os.environ.get('HOME') != documents_path:
- return documents_path
+ _documents_path = documents_path
+ return _documents_path
except OSError, exception:
if exception.errno != errno.ENOENT:
logging.exception('Could not run xdg-user-dir')