Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSascha Silbe <sascha@silbe.org>2009-08-17 17:41:04 (GMT)
committer Tomeu Vizoso <tomeu@sugarlabs.org>2009-08-17 19:27:29 (GMT)
commitf46f5fb5a7e88ec81a08aa1c115ef1096acf4a04 (patch)
tree9488041202ee2cb9d0740da769e841de3bc9d4cb
parentd7ab281cabeab4fe3529ca1fa14b8a15895b9d36 (diff)
IndexStore refactoring and prefix term support
-rw-r--r--src/carquinyol/indexstore.py287
1 files changed, 175 insertions, 112 deletions
diff --git a/src/carquinyol/indexstore.py b/src/carquinyol/indexstore.py
index 42c3132..7b620b5 100644
--- a/src/carquinyol/indexstore.py
+++ b/src/carquinyol/indexstore.py
@@ -16,10 +16,11 @@
import logging
import os
+import sys
import gobject
import xapian
-from xapian import WritableDatabase, Document, Enquire, Query, QueryParser
+from xapian import WritableDatabase, Document, Enquire, Query
from carquinyol import layoutmanager
from carquinyol.layoutmanager import MAX_QUERY_LIMIT
@@ -28,6 +29,8 @@ _VALUE_UID = 0
_VALUE_TIMESTAMP = 1
_VALUE_TITLE = 2
+_PREFIX_NONE = 'N'
+_PREFIX_FULL_VALUE = 'F'
_PREFIX_UID = 'Q'
_PREFIX_ACTIVITY = 'A'
_PREFIX_ACTIVITY_ID = 'I'
@@ -40,10 +43,164 @@ _FLUSH_THRESHOLD = 20
# Force a flush after _n_ seconds since the last change to the db
_FLUSH_TIMEOUT = 60
-_PROPERTIES_NOT_TO_INDEX = ['timestamp', 'activity_id', 'keep', 'preview']
+_PROPERTIES_NOT_TO_INDEX = ['timestamp', 'preview']
_MAX_RESULTS = int(2 ** 31 - 1)
+_QUERY_TERM_MAP = {
+ 'uid': _PREFIX_UID,
+ 'activity': _PREFIX_ACTIVITY,
+ 'activity_id': _PREFIX_ACTIVITY_ID,
+ 'mime_type': _PREFIX_MIME_TYPE,
+ 'keep': _PREFIX_KEEP,
+}
+
+_QUERY_VALUE_MAP = {
+ 'timestamp': _VALUE_TIMESTAMP,
+}
+
+
+class TermGenerator (xapian.TermGenerator):
+
+ def index_document(self, document, properties):
+ document.add_value(_VALUE_TIMESTAMP, str(properties['timestamp']))
+ document.add_value(_VALUE_TITLE, properties.get('title', '').strip())
+
+ xapian.TermGenerator.set_document(self, document)
+
+ properties = dict(properties)
+ self._index_known(document, properties)
+ self._index_unknown(document, properties)
+
+ def _index_known(self, document, properties):
+ for name, prefix in _QUERY_TERM_MAP.items():
+ if (name not in properties):
+ continue
+
+ self._index_property(document, name, properties.pop(name), prefix)
+
+ def _index_unknown(self, document, properties):
+ for name, value in properties.items():
+ self._index_property(document, name, value)
+
+ def _index_property(self, doc, name, value, prefix=''):
+ if name in _PROPERTIES_NOT_TO_INDEX or not value:
+ return
+
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ elif not isinstance(value, basestring):
+ value = str(value)
+
+ # We need to add the full value (i.e. not split into words) so
+ # we can enumerate unique values. It also simplifies setting up
+ # dictionary-based queries.
+ if prefix:
+ doc.add_term(_PREFIX_FULL_VALUE + prefix + value)
+
+ self.index_text(value, 1, prefix or _PREFIX_NONE)
+ self.increase_termpos()
+
+
+class QueryParser (xapian.QueryParser):
+ """QueryParser that understands dictionaries and Xapian query strings.
+
+ The dictionary contains metadata names as keys and either basic types
+ (exact match), 2-tuples (range, only valid for value-stored metadata)
+ or a list (multiple exact matches joined with OR) as values.
+ An empty dictionary matches everything. Queries from different keys
+ (i.e. different metadata names) are joined with AND.
+ """
+
+ def __init__(self):
+ xapian.QueryParser.__init__(self)
+
+ for name, prefix in _QUERY_TERM_MAP.items():
+ self.add_prefix(name, prefix)
+ self.add_prefix('', prefix)
+
+ self.add_prefix('', _PREFIX_NONE)
+
+ def _parse_query_term(self, name, prefix, value):
+ if isinstance(value, list):
+ subqueries = [self._parse_query_term(name, prefix, word)
+ for word in value]
+ return Query(Query.OP_OR, subqueries)
+
+ elif prefix:
+ return Query(_PREFIX_FULL_VALUE + prefix + str(value))
+ else:
+ return Query(_PREFIX_NONE + str(value))
+
+ def _parse_query_value_range(self, name, value, value_no):
+ if len(value) != 2:
+ raise TypeError(
+ 'Only tuples of size 2 have a defined meaning. '
+ 'Did you mean to pass a list instead?')
+
+ start, end = value
+ return Query(Query.OP_VALUE_RANGE, value_no, str(start), str(end))
+
+ def _parse_query_value(self, name, value_no, value):
+ if isinstance(value, list):
+ subqueries = [self._parse_query_value(name, value_no, word)
+ for word in value]
+ return Query(Query.OP_OR, subqueries)
+
+ elif isinstance(value, tuple):
+ return self._parse_query_value_range(name, value, value_no)
+
+ elif isinstance(value, dict):
+ # compatibility option for timestamp: {'start': 0, 'end': 1}
+ start = value.get('start', 0)
+ end = value.get('end', sys.maxint)
+ return self._parse_query_value_range(name, (start, end), value_no)
+
+ else:
+ return Query(Query.OP_VALUE_RANGE,
+ _QUERY_VALUE_MAP[name], str(value), str(value))
+
+ def _parse_query_xapian(self, query_str):
+ try:
+ return xapian.QueryParser.parse_query(
+ self, query_str,
+ QueryParser.FLAG_PHRASE |
+ QueryParser.FLAG_BOOLEAN |
+ QueryParser.FLAG_LOVEHATE |
+ QueryParser.FLAG_WILDCARD,
+ '')
+
+ except xapian.QueryParserError, exception:
+ logging.warning('Invalid query string: '+exception.get_msg())
+ return Query()
+
+ def parse_query(self, query_dict, query_string):
+ logging.debug('parse_query %r %r', query_dict, query_string)
+ queries = []
+ query_dict = dict(query_dict)
+
+ if query_string:
+ queries.append(self._parse_query_xapian(str(query_string)))
+
+ for name, value in query_dict.items():
+ if name in _QUERY_TERM_MAP:
+ queries.append(self._parse_query_term(name,
+ _QUERY_TERM_MAP[name], value))
+ elif name in _QUERY_VALUE_MAP:
+ queries.append(self._parse_query_value(name,
+ _QUERY_VALUE_MAP[name], value))
+ else:
+ logging.warning('Unknown term: %r=%r', name, value)
+
+ if not queries:
+ queries.append(Query(''))
+
+ if query_dict:
+ logging.warning('Unknown term(s): %r', query_dict)
+
+ logging.debug('queries: %r', [str(q) for q in queries])
+ return Query(Query.OP_AND, queries)
+
class IndexStore(object):
"""Index metadata and provide rich query facilities on it.
@@ -70,7 +227,8 @@ class IndexStore(object):
os.remove(os.path.join(index_path, f))
def contains(self, uid):
- postings = self._database.postlist(_PREFIX_UID + uid)
+ postings = self._database.postlist(_PREFIX_FULL_VALUE + \
+ _PREFIX_UID + uid)
try:
postlist_item = postings.next()
except StopIteration:
@@ -79,60 +237,28 @@ class IndexStore(object):
def store(self, uid, properties):
document = Document()
- document.add_term(_PREFIX_UID + uid)
- document.add_term(_PREFIX_ACTIVITY + properties.get('activity', ''))
- document.add_term(_PREFIX_MIME_TYPE + properties.get('mime_type', ''))
- document.add_term(_PREFIX_ACTIVITY_ID +
- properties.get('activity_id', ''))
- document.add_term(_PREFIX_KEEP + str(properties.get('keep', 0)))
-
document.add_value(_VALUE_UID, uid)
- document.add_value(_VALUE_TIMESTAMP, str(properties['timestamp']))
- document.add_value(_VALUE_TITLE, properties.get('title', '').strip())
-
- term_generator = xapian.TermGenerator()
-
- # TODO: we should do stemming, but in which language?
- #if language is not None:
- # term_generator.set_stemmer(_xapian.Stem(language))
-
- # TODO: we should use a stopper
- #if stop is not None:
- # stopper = _xapian.SimpleStopper()
- # for term in stop:
- # stopper.add (term)
- # term_generator.set_stopper (stopper)
-
- term_generator.set_document(document)
- term_generator.index_text_without_positions(
- self._extract_text(properties), 1, '')
+ term_generator = TermGenerator()
+ term_generator.index_document(document, properties)
if not self.contains(uid):
self._database.add_document(document)
else:
- self._database.replace_document(_PREFIX_UID + uid, document)
- self._flush()
+ self._database.replace_document(_PREFIX_FULL_VALUE + \
+ _PREFIX_UID + uid, document)
- def _extract_text(self, properties):
- text = ''
- for key, value in properties.items():
- if key not in _PROPERTIES_NOT_TO_INDEX:
- if text:
- text += ' '
- if isinstance(value, unicode):
- value = value.encode('utf-8')
- elif not isinstance(value, basestring):
- value = str(value)
- text += value
- return text
+ self._flush()
def find(self, query):
offset = query.pop('offset', 0)
limit = query.pop('limit', MAX_QUERY_LIMIT)
order_by = query.pop('order_by', [])
+ query_string = query.pop('query', None)
+ query_parser = QueryParser()
+ query_parser.set_database(self._database)
enquire = Enquire(self._database)
- enquire.set_query(self._parse_query(query))
+ enquire.set_query(query_parser.parse_query(query, query_string))
# This will assure that the results count is exact.
check_at_least = offset + limit + 1
@@ -151,7 +277,7 @@ class IndexStore(object):
elif order_by == '-title':
enquire.set_sort_by_value(_VALUE_TITLE, False)
else:
- logging.warning('Unsupported property for sorting: %s' % order_by)
+ logging.warning('Unsupported property for sorting: %s', order_by)
query_result = enquire.get_mset(offset, limit, check_at_least)
total_count = query_result.get_matches_estimated()
@@ -162,77 +288,14 @@ class IndexStore(object):
return (uids, total_count)
- def _parse_query(self, query_dict):
- logging.debug('_parse_query %r' % query_dict)
- queries = []
-
- query_str = query_dict.pop('query', None)
- if query_str is not None:
- query_parser = QueryParser()
- query_parser.set_database(self._database)
- #query_parser.set_default_op(Query.OP_AND)
-
- # TODO: we should do stemming, but in which language?
- #query_parser.set_stemmer(_xapian.Stem(lang))
- #query_parser.set_stemming_strategy(qp.STEM_SOME)
-
- query = query_parser.parse_query(
- query_str,
- QueryParser.FLAG_PHRASE |
- QueryParser.FLAG_BOOLEAN |
- QueryParser.FLAG_LOVEHATE |
- QueryParser.FLAG_WILDCARD,
- '')
-
- queries.append(query)
-
- timestamp = query_dict.pop('timestamp', None)
- if timestamp is not None:
- start = str(timestamp.pop('start', 0))
- end = str(timestamp.pop('end', _MAX_RESULTS))
- query = Query(Query.OP_VALUE_RANGE, _VALUE_TIMESTAMP, start, end)
- queries.append(query)
-
- uid = query_dict.pop('uid', None)
- if uid is not None:
- queries.append(Query(_PREFIX_UID + uid))
-
- activity = query_dict.pop('activity', None)
- if activity is not None:
- queries.append(Query(_PREFIX_ACTIVITY + activity))
-
- activity_id = query_dict.pop('activity_id', None)
- if activity_id is not None:
- query = Query(_PREFIX_ACTIVITY_ID + activity_id)
- queries.append(query)
-
- keep = query_dict.pop('keep', None)
- if keep is not None:
- query = Query(_PREFIX_KEEP + str(keep))
- queries.append(query)
-
- mime_type = query_dict.pop('mime_type', None)
- if mime_type is not None:
- mime_queries = []
- for mime_type in mime_type:
- mime_queries.append(Query(_PREFIX_MIME_TYPE + mime_type))
- queries.append(Query(Query.OP_OR, mime_queries))
-
- if not queries:
- queries.append(Query(''))
-
- if query_dict:
- logging.warning('Unknown term(s): %r' % query_dict)
-
- return Query(Query.OP_AND, queries)
-
def delete(self, uid):
- self._database.delete_document(_PREFIX_UID + uid)
+ self._database.delete_document(_PREFIX_FULL_VALUE + _PREFIX_UID + uid)
def get_activities(self):
activities = []
- for term in self._database.allterms(_PREFIX_ACTIVITY):
- activities.append(term.term[len(_PREFIX_ACTIVITY):])
+ prefix = _PREFIX_FULL_VALUE + _PREFIX_ACTIVITY
+ for term in self._database.allterms(prefix):
+ activities.append(term.term[len(prefix):])
return activities
def _flush_timeout_cb(self):