diff options
author | Tomeu Vizoso <tomeu@sugarlabs.org> | 2009-01-07 16:00:21 (GMT) |
---|---|---|
committer | Tomeu Vizoso <tomeu@sugarlabs.org> | 2009-01-07 16:00:21 (GMT) |
commit | 847d482caca11f4a2bcd982d70240360016daaf4 (patch) | |
tree | 164776c2e1a0c13c17f2647e12eded84f67b132a /src/jarabe/journal | |
parent | 40d859ca33f0a11a98cb0e14cd5016fc6743f964 (diff) |
Split backends in two ResultSet subclasses
Diffstat (limited to 'src/jarabe/journal')
-rw-r--r-- | src/jarabe/journal/model.py | 151 |
1 files changed, 87 insertions, 64 deletions
diff --git a/src/jarabe/journal/model.py b/src/jarabe/journal/model.py index 19fcaf1..4827321 100644 --- a/src/jarabe/journal/model.py +++ b/src/jarabe/journal/model.py @@ -73,7 +73,7 @@ class _Cache(object): else: return self._array[key] -class ResultSet(object): +class BaseResultSet(object): """Encapsulates the result of a query """ @@ -90,64 +90,16 @@ class ResultSet(object): def get_length(self): if self._total_count == -1: query = self._query.copy() - query['limit'] = ResultSet._CACHE_LIMIT - entries, self._total_count = self._find(query) + query['limit'] = BaseResultSet._CACHE_LIMIT + entries, self._total_count = self.find(query) self._cache.append_all(entries) self._offset = 0 return self._total_count length = property(get_length) - def _get_all_files(self, dir_path): - files = [] - for entry in os.listdir(dir_path): - full_path = os.path.join(dir_path, entry) - - stat = os.stat(full_path) - if S_IFMT(stat.st_mode) == S_IFDIR: - files.extend(self._get_all_files(full_path)) - elif S_IFMT(stat.st_mode) == S_IFREG: - files.append((full_path, stat)) - - return files - - def _query_mount_point(self, mount_point, query): - t = time.time() - - files = self._get_all_files(mount_point) - offset = int(query.get('offset', 0)) - limit = int(query.get('limit', len(files))) - - total_count = len(files) - files.sort(lambda a, b: int(b[1].st_mtime - a[1].st_mtime)) - files = files[offset:offset + limit] - - result = [] - for file_path, stat in files: - metadata = _get_file_metadata(file_path, stat) - result.append(metadata) - - logging.debug('_query_mount_point took %f s.' % (time.time() - t)) - - return result, total_count - - def _find(self, query): - mount_points = query.get('mountpoints', ['/']) - if mount_points is None or len(mount_points) != 1: - raise ValueError('Exactly one mount point must be specified') - - if mount_points[0] == '/': - data_store = _get_datastore() - entries, total_count = _get_datastore().find(query, PROPERTIES, - byte_arrays=True) - else: - entries, total_count = self._query_mount_point(mount_points[0], - query) - - for entry in entries: - entry['mountpoint'] = mount_points[0] - - return entries, total_count + def find(self, query): + raise NotImplementedError() def seek(self, position): self._position = position @@ -155,10 +107,10 @@ class ResultSet(object): def read(self, max_count): logging.debug('ResultSet.read position: %r' % self._position) - if max_count * 5 > ResultSet._CACHE_LIMIT: + if max_count * 5 > BaseResultSet._CACHE_LIMIT: raise RuntimeError( - 'max_count (%i) too big for ResultSet._CACHE_LIMIT' - ' (%i).' % (max_count, ResultSet._CACHE_LIMIT)) + 'max_count (%i) too big for BaseResultSet._CACHE_LIMIT' + ' (%i).' % (max_count, BaseResultSet._CACHE_LIMIT)) if self._position == -1: self.seek(0) @@ -178,16 +130,16 @@ class ResultSet(object): if (remaining_forward_entries <= 0 and remaining_backwards_entries <= 0) or \ - max_count > ResultSet._CACHE_LIMIT: + max_count > BaseResultSet._CACHE_LIMIT: # Total cache miss: remake it offset = max(0, self._position - max_count) logging.debug('remaking cache, offset: %r limit: %r' % \ (offset, max_count * 2)) query = self._query.copy() - query['limit'] = ResultSet._CACHE_LIMIT + query['limit'] = BaseResultSet._CACHE_LIMIT query['offset'] = offset - entries, self._total_count = self._find(query) + entries, self._total_count = self.find(query) self._cache.remove_all(self._cache) self._cache.append_all(entries) @@ -202,13 +154,13 @@ class ResultSet(object): query = self._query.copy() query['limit'] = max_count query['offset'] = last_cached_entry - entries, self._total_count = self._find(query) + entries, self._total_count = self.find(query) # update cache self._cache.append_all(entries) # apply the cache limit - objects_excess = len(self._cache) - ResultSet._CACHE_LIMIT + objects_excess = len(self._cache) - BaseResultSet._CACHE_LIMIT if objects_excess > 0: self._offset += objects_excess self._cache.remove_all(self._cache[:objects_excess]) @@ -224,13 +176,13 @@ class ResultSet(object): query = self._query.copy() query['limit'] = limit query['offset'] = self._offset - entries, self._total_count = self._find(query) + entries, self._total_count = self.find(query) # update cache self._cache.prepend_all(entries) # apply the cache limit - objects_excess = len(self._cache) - ResultSet._CACHE_LIMIT + objects_excess = len(self._cache) - BaseResultSet._CACHE_LIMIT if objects_excess > 0: self._cache.remove_all(self._cache[-objects_excess:]) else: @@ -240,6 +192,69 @@ class ResultSet(object): last_pos = self._position - self._offset + max_count return self._cache[first_pos:last_pos] +class DatastoreResultSet(BaseResultSet): + """Encapsulates the result of a query on the datastore + """ + def __init__(self, query): + BaseResultSet.__init__(self, query) + + def find(self, query): + entries, total_count = _get_datastore().find(query, PROPERTIES, + byte_arrays=True) + + for entry in entries: + entry['mountpoint'] = '/' + + return entries, total_count + +class InplaceResultSet(BaseResultSet): + """Encapsulates the result of a query on a mount point + """ + def __init__(self, query, mount_point): + BaseResultSet.__init__(self, query) + self._mount_point = mount_point + + def find(self, query): + entries, total_count = self._query_mount_point(self._mount_point, query) + + for entry in entries: + entry['mountpoint'] = self._mount_point + + return entries, total_count + + def _get_all_files(self, dir_path): + files = [] + for entry in os.listdir(dir_path): + full_path = os.path.join(dir_path, entry) + + stat = os.stat(full_path) + if S_IFMT(stat.st_mode) == S_IFDIR: + files.extend(self._get_all_files(full_path)) + elif S_IFMT(stat.st_mode) == S_IFREG: + files.append((full_path, stat)) + + return files + + def _query_mount_point(self, mount_point, query): + t = time.time() + + files = self._get_all_files(mount_point) + offset = int(query.get('offset', 0)) + limit = int(query.get('limit', len(files))) + + total_count = len(files) + files.sort(lambda a, b: int(b[1].st_mtime - a[1].st_mtime)) + files = files[offset:offset + limit] + + result = [] + for file_path, stat in files: + metadata = _get_file_metadata(file_path, stat) + result.append(metadata) + + logging.debug('_query_mount_point took %f s.' % (time.time() - t)) + + return result, total_count + def _get_file_metadata(path, stat): client = gconf.client_get_default() return {'uid': path, @@ -278,7 +293,15 @@ def find(query): """ if 'order_by' not in query: query['order_by'] = ['-mtime'] - return ResultSet(query) + + mount_points = query.pop('mountpoints', ['/']) + if mount_points is None or len(mount_points) != 1: + raise ValueError('Exactly one mount point must be specified') + + if mount_points[0] == '/': + return DatastoreResultSet(query) + else: + return InplaceResultSet(query, mount_points[0]) def _get_mount_point(path): dir_path = os.path.dirname(path) |