Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/translate-toolkit-1.3.0/translate/misc
diff options
context:
space:
mode:
Diffstat (limited to 'translate-toolkit-1.3.0/translate/misc')
-rw-r--r--translate-toolkit-1.3.0/translate/misc/README25
-rw-r--r--translate-toolkit-1.3.0/translate/misc/__init__.py23
-rw-r--r--translate-toolkit-1.3.0/translate/misc/autoencode.py59
-rw-r--r--translate-toolkit-1.3.0/translate/misc/context.py48
-rw-r--r--translate-toolkit-1.3.0/translate/misc/contextlib.py193
-rw-r--r--translate-toolkit-1.3.0/translate/misc/dictutils.py164
-rw-r--r--translate-toolkit-1.3.0/translate/misc/file_discovery.py64
-rw-r--r--translate-toolkit-1.3.0/translate/misc/hash.py30
-rw-r--r--translate-toolkit-1.3.0/translate/misc/ini.py569
-rw-r--r--translate-toolkit-1.3.0/translate/misc/multistring.py80
-rw-r--r--translate-toolkit-1.3.0/translate/misc/optrecurse.py699
-rw-r--r--translate-toolkit-1.3.0/translate/misc/ourdom.py260
-rw-r--r--translate-toolkit-1.3.0/translate/misc/progressbar.py141
-rw-r--r--translate-toolkit-1.3.0/translate/misc/quote.py429
-rw-r--r--translate-toolkit-1.3.0/translate/misc/rich.py56
-rw-r--r--translate-toolkit-1.3.0/translate/misc/selector.py533
-rw-r--r--translate-toolkit-1.3.0/translate/misc/sparse.py188
-rw-r--r--translate-toolkit-1.3.0/translate/misc/stdiotell.py57
-rw-r--r--translate-toolkit-1.3.0/translate/misc/test_autoencode.py34
-rw-r--r--translate-toolkit-1.3.0/translate/misc/test_dictutils.py22
-rw-r--r--translate-toolkit-1.3.0/translate/misc/test_multistring.py42
-rw-r--r--translate-toolkit-1.3.0/translate/misc/test_optrecurse.py20
-rw-r--r--translate-toolkit-1.3.0/translate/misc/test_quote.py77
-rwxr-xr-xtranslate-toolkit-1.3.0/translate/misc/test_zipfileext.py160
-rw-r--r--translate-toolkit-1.3.0/translate/misc/textwrap.py380
-rw-r--r--translate-toolkit-1.3.0/translate/misc/typecheck/__init__.py1559
-rw-r--r--translate-toolkit-1.3.0/translate/misc/typecheck/doctest_support.py36
-rw-r--r--translate-toolkit-1.3.0/translate/misc/typecheck/mixins.py84
-rw-r--r--translate-toolkit-1.3.0/translate/misc/typecheck/sets.py62
-rw-r--r--translate-toolkit-1.3.0/translate/misc/typecheck/typeclasses.py35
-rw-r--r--translate-toolkit-1.3.0/translate/misc/wStringIO.py156
-rw-r--r--translate-toolkit-1.3.0/translate/misc/xmlwrapper.py130
-rw-r--r--translate-toolkit-1.3.0/translate/misc/zipfileext.py116
33 files changed, 6531 insertions, 0 deletions
diff --git a/translate-toolkit-1.3.0/translate/misc/README b/translate-toolkit-1.3.0/translate/misc/README
new file mode 100644
index 0000000..7c70a9a
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/README
@@ -0,0 +1,25 @@
+This directory contains miscellaneous code for translate
+It includes code written specifically for translate
+and also other modules that are included with Python 2.3
+but have been included here for backwards compatibility with
+earlier versions of Python.
+
+translate is distributed under the GPL; some of the code in this
+directory is distributed under other open source licenses.
+Please see each relevant file for details...
+
+quote.py is a standard translate module that is used for quoting/unquoting
+and escaping/unescaping strings. It is released under the GPL
+
+The csv module is defined in csv.py and _csv.c
+Since some of this code is in C, it needs to be compiled and installed
+on your Python distribution if you are using a version of Python before
+Python 2.3. To do this, run:
+ setup.py install
+(This will require the standard C compiler for your operating system)
+
+optparse.py is the standard Python command line options parser, previously
+known as Optik. It also requires textwrap.py which is a standard Python 2.3
+module, and which is also included here.
+
+
diff --git a/translate-toolkit-1.3.0/translate/misc/__init__.py b/translate-toolkit-1.3.0/translate/misc/__init__.py
new file mode 100644
index 0000000..0b818e4
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/__init__.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2002, 2003 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""Miscellaneous modules for translate - including modules for backward compatibility with pre-2.3 versions of Python"""
+
diff --git a/translate-toolkit-1.3.0/translate/misc/autoencode.py b/translate-toolkit-1.3.0/translate/misc/autoencode.py
new file mode 100644
index 0000000..37786de
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/autoencode.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2006 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""Supports a hybrid Unicode string that knows which encoding is preferable,
+and uses this when converting to a string."""
+
+class autoencode(unicode):
+ def __new__(newtype, string=u"", encoding=None, errors=None):
+ if isinstance(string, unicode):
+ if errors is None:
+ newstring = unicode.__new__(newtype, string)
+ else:
+ newstring = unicode.__new__(newtype, string, errors=errors)
+ if encoding is None and isinstance(string, autoencode):
+ newstring.encoding = string.encoding
+ else:
+ newstring.encoding = encoding
+ else:
+ if errors is None and encoding is None:
+ newstring = unicode.__new__(newtype, string)
+ elif errors is None:
+ try:
+ newstring = unicode.__new__(newtype, string, encoding)
+ except LookupError, e:
+ raise ValueError(str(e))
+ elif encoding is None:
+ newstring = unicode.__new__(newtype, string, errors)
+ else:
+ newstring = unicode.__new__(newtype, string, encoding, errors)
+ newstring.encoding = encoding
+ return newstring
+
+ def join(self, seq):
+ return autoencode(super(autoencode, self).join(seq))
+
+ def __str__(self):
+ if self.encoding is None:
+ return super(autoencode, self).__str__()
+ else:
+ return self.encode(self.encoding)
+
diff --git a/translate-toolkit-1.3.0/translate/misc/context.py b/translate-toolkit-1.3.0/translate/misc/context.py
new file mode 100644
index 0000000..914d6bf
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/context.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2002-2006 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+import sys
+
+def with_(mgr, body):
+ """A function to mimic the with statement introduced in Python 2.5
+
+ The code below was taken from http://www.python.org/dev/peps/pep-0343/
+ """
+ exit = mgr.__exit__ # Not calling it yet
+ value = mgr.__enter__()
+ exc = True
+ try:
+ try:
+ if isinstance(value, (tuple, list)):
+ return body(*value)
+ else:
+ return body(value)
+ except:
+ # The exceptional case is handled here
+ exc = False
+ if not exit(*sys.exc_info()):
+ raise
+ # The exception is swallowed if exit() returns true
+ finally:
+ # The normal and non-local-goto cases are handled here
+ if exc:
+ exit(None, None, None)
diff --git a/translate-toolkit-1.3.0/translate/misc/contextlib.py b/translate-toolkit-1.3.0/translate/misc/contextlib.py
new file mode 100644
index 0000000..f889b70
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/contextlib.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2002-2006 Zuza Software Foundation
+#
+# This file is part of translate.
+# The file was copied from the Python 2.5 source.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+# NB! IMPORTANT SEMANTIC DIFFERENCE WITH THE OFFICIAL contextlib.
+# In Python 2.5+, if an exception is thrown in a 'with' statement
+# which uses a generator-based context manager (that is, a
+# context manager created by decorating a generator with
+# @contextmanager), the exception will be propagated to the
+# generator via the .throw method of the generator.
+#
+# This does not exist in Python 2.4. Thus, we just naively finish
+# off the context manager. This also means that generator-based
+# context managers can't deal with exceptions, so be warned.
+
+"""Utilities for with-statement contexts. See PEP 343."""
+
+import sys
+
+__all__ = ["contextmanager", "nested", "closing"]
+
+class GeneratorContextManager(object):
+ """Helper for @contextmanager decorator."""
+
+ def __init__(self, gen):
+ self.gen = gen
+
+ def __enter__(self):
+ try:
+ return self.gen.next()
+ except StopIteration:
+ raise RuntimeError("generator didn't yield")
+
+ def __exit__(self, type, value, tb):
+ if type is None:
+ try:
+ self.gen.next()
+ except StopIteration:
+ return
+ else:
+ raise RuntimeError("generator didn't stop")
+ else:
+ if value is None:
+ # Need to force instantiation so we can reliably
+ # tell if we get the same exception back
+ value = type()
+ try:
+ try:
+ self.gen.next()
+ except StopIteration:
+ import traceback
+ traceback.print_exception(type, value, tb)
+ raise value
+ except StopIteration, exc:
+ # Suppress the exception *unless* it's the same exception that
+ # was passed to throw(). This prevents a StopIteration
+ # raised inside the "with" statement from being suppressed
+ return exc is not value
+
+def contextmanager(func):
+ """@contextmanager decorator.
+
+ Typical usage:
+
+ @contextmanager
+ def some_generator(<arguments>):
+ <setup>
+ try:
+ yield <value>
+ finally:
+ <cleanup>
+
+ This makes this:
+
+ with some_generator(<arguments>) as <variable>:
+ <body>
+
+ equivalent to this:
+
+ <setup>
+ try:
+ <variable> = <value>
+ <body>
+ finally:
+ <cleanup>
+
+ """
+ def helper(*args, **kwds):
+ return GeneratorContextManager(func(*args, **kwds))
+ try:
+ helper.__name__ = func.__name__
+ helper.__doc__ = func.__doc__
+ helper.__dict__ = func.__dict__
+ except:
+ pass
+ return helper
+
+
+@contextmanager
+def nested(*managers):
+ """Support multiple context managers in a single with-statement.
+
+ Code like this:
+
+ with nested(A, B, C) as (X, Y, Z):
+ <body>
+
+ is equivalent to this:
+
+ with A as X:
+ with B as Y:
+ with C as Z:
+ <body>
+
+ """
+ exits = []
+ vars = []
+ exc = (None, None, None)
+ # Lambdas are an easy way to create unique objects. We don't want
+ # this to be None, since our answer might actually be None
+ undefined = lambda: 42
+ result = undefined
+
+ try:
+ for mgr in managers:
+ exit = mgr.__exit__
+ enter = mgr.__enter__
+ vars.append(enter())
+ exits.append(exit)
+ result = vars
+ except:
+ exc = sys.exc_info()
+
+ # If nothing has gone wrong, then result contains our return value
+ # and thus it is not equal to 'undefined'. Thus, yield the value.
+ if result != undefined:
+ yield result
+
+ while exits:
+ exit = exits.pop()
+ try:
+ if exit(*exc):
+ exc = (None, None, None)
+ except:
+ exc = sys.exc_info()
+ if exc != (None, None, None):
+ # Don't rely on sys.exc_info() still containing
+ # the right information. Another exception may
+ # have been raised and caught by an exit method
+ raise exc[0], exc[1], exc[2]
+
+class closing(object):
+ """Context to automatically close something at the end of a block.
+
+ Code like this:
+
+ with closing(<module>.open(<arguments>)) as f:
+ <block>
+
+ is equivalent to this:
+
+ f = <module>.open(<arguments>)
+ try:
+ <block>
+ finally:
+ f.close()
+
+ """
+ def __init__(self, thing):
+ self.thing = thing
+ def __enter__(self):
+ return self.thing
+ def __exit__(self, *exc_info):
+ self.thing.close()
diff --git a/translate-toolkit-1.3.0/translate/misc/dictutils.py b/translate-toolkit-1.3.0/translate/misc/dictutils.py
new file mode 100644
index 0000000..61b074b
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/dictutils.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""Implements a case-insensitive (on keys) dictionary and order-sensitive dictionary"""
+
+# Copyright 2002, 2003 St James Software
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+def generalupper(str):
+ """this uses the object's upper method - works with string and unicode"""
+ if str is None: return str
+ return str.upper()
+
+class cidict(dict):
+ def __init__(self, fromdict = None):
+ """constructs the cidict, optionally using another dict to do so"""
+ if fromdict is not None:
+ self.update(fromdict)
+
+ def __getitem__(self, key):
+ if type(key) != str and type(key) != unicode:
+ raise TypeError, "cidict can only have str or unicode as key (got %r)" % type(key)
+ for akey in self.iterkeys():
+ if akey.lower() == key.lower():
+ return dict.__getitem__(self, akey)
+ raise IndexError
+
+ def __setitem__(self, key, value):
+ if type(key) != str and type(key) != unicode:
+ raise TypeError, "cidict can only have str or unicode as key (got %r)" % type(key)
+ for akey in self.iterkeys():
+ if akey.lower() == key.lower():
+ return dict.__setitem__(self, akey, value)
+ return dict.__setitem__(self, key, value)
+
+ def update(self, updatedict):
+ """D.update(E) -> None. Update D from E: for k in E.keys(): D[k] = E[k]"""
+ for key, value in updatedict.iteritems():
+ self[key] = value
+
+ def __delitem__(self, key):
+ if type(key) != str and type(key) != unicode:
+ raise TypeError, "cidict can only have str or unicode as key (got %r)" % type(key)
+ for akey in self.iterkeys():
+ if akey.lower() == key.lower():
+ return dict.__delitem__(self, akey)
+ raise IndexError
+
+ def __contains__(self, key):
+ if type(key) != str and type(key) != unicode:
+ raise TypeError, "cidict can only have str or unicode as key (got %r)" % type(key)
+ for akey in self.iterkeys():
+ if akey.lower() == key.lower():
+ return 1
+ return 0
+
+ def has_key(self, key):
+ return self.__contains__(key)
+
+ def get(self, key, default=None):
+ if self.has_key(key):
+ return self[key]
+ else:
+ return default
+
+class ordereddict(dict):
+ """a dictionary which remembers its keys in the order in which they were given"""
+ def __init__(self, *args):
+ if len(args) == 0:
+ super(ordereddict, self).__init__()
+ self.order = []
+ elif len(args) > 1:
+ raise TypeError("ordereddict() takes at most 1 argument (%d given)" % len(args))
+ else:
+ initarg = args[0]
+ apply(super(ordereddict, self).__init__, args)
+ if hasattr(initarg, "keys"):
+ self.order = initarg.keys()
+ else:
+ # danger: could have duplicate keys...
+ self.order = []
+ checkduplicates = {}
+ for key, value in initarg:
+ if not key in checkduplicates:
+ self.order.append(key)
+ checkduplicates[key] = None
+
+ def __setitem__(self, key, value):
+ alreadypresent = key in self
+ result = dict.__setitem__(self, key, value)
+ if not alreadypresent: self.order.append(key)
+ return result
+
+ def update(self, updatedict):
+ """D.update(E) -> None. Update D from E: for k in E.keys(): D[k] = E[k]"""
+ for key, value in updatedict.iteritems():
+ self[key] = value
+
+ def __delitem__(self, key):
+ alreadypresent = key in self
+ result = dict.__delitem__(self, key)
+ if alreadypresent: del self.order[self.order.index(key)]
+ return result
+
+ def copy(self):
+ """D.copy() -> a shallow copy of D"""
+ thecopy = ordereddict(super(ordereddict, self).copy())
+ thecopy.order = self.order[:]
+ return thecopy
+
+ def items(self):
+ """D.items() -> list of D's (key, value) pairs, as 2-tuples"""
+ return [(key, self[key]) for key in self.order]
+
+ def iteritems(self):
+ """D.iteritems() -> an iterator over the (key, value) items of D"""
+ for key in self.order:
+ yield (key, self[key])
+
+ def iterkeys(self):
+ """D.iterkeys() -> an iterator over the keys of D"""
+ for key in self.order:
+ yield key
+
+ __iter__ = iterkeys
+
+ def itervalues(self):
+ """D.itervalues() -> an iterator over the values of D"""
+ for key in self.order:
+ yield self[key]
+
+ def keys(self):
+ """D.keys() -> list of D's keys"""
+ return self.order[:]
+
+ def popitem(self):
+ """D.popitem() -> (k, v), remove and return some (key, value) pair as a 2-tuple; but raise KeyError if D is empty"""
+ if len(self.order) == 0:
+ raise KeyError("popitem(): ordered dictionary is empty")
+ k = self.order.pop()
+ v = self[k]
+ del self[k]
+ return (k,v)
+
+ def pop(self, key):
+ """remove entry from dict and internal list"""
+ value = super(ordereddict, self).pop(key)
+ del self.order[self.order.index(key)]
+ return value
diff --git a/translate-toolkit-1.3.0/translate/misc/file_discovery.py b/translate-toolkit-1.3.0/translate/misc/file_discovery.py
new file mode 100644
index 0000000..733f091
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/file_discovery.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2008 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+__all__ = ['get_abs_data_filename']
+
+import sys
+import os
+
+def get_abs_data_filename(path_parts, basedirs=[]):
+ """Get the absolute path to the given file- or directory name in the current
+ running application's data directory.
+
+ @type path_parts: list
+ @param path_parts: The path parts that can be joined by os.path.join().
+ """
+
+ if isinstance(path_parts, str):
+ path_parts = [path_parts]
+
+ BASE_DIRS = basedirs + [
+ os.path.dirname(unicode(__file__, sys.getfilesystemencoding())),
+ os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding()))
+ ]
+
+ # Freedesktop standard
+ if 'XDG_DATA_HOME' in os.environ:
+ BASE_DIRS += [os.environ['XDG_DATA_HOME']]
+ if 'XDG_DATA_DIRS' in os.environ:
+ BASE_DIRS += os.environ['XDG_DATA_DIRS'].split(os.path.pathsep)
+
+ # Mac OSX app bundles
+ if 'RESOURCEPATH' in os.environ:
+ BASE_DIRS += os.environ['RESOURCEPATH'].split(os.path.pathsep)
+
+ DATA_DIRS = [
+ ["..", "share"],
+ ["share"]
+ ]
+
+ for basepath, data_dir in ((x, y) for x in BASE_DIRS for y in DATA_DIRS):
+ dir_and_filename = data_dir + path_parts
+ datafile = os.path.join(basepath or os.path.dirname(__file__), *dir_and_filename)
+ if os.path.exists(datafile):
+ return datafile
+ raise Exception('Could not find "%s"' % (os.path.join(*path_parts)))
diff --git a/translate-toolkit-1.3.0/translate/misc/hash.py b/translate-toolkit-1.3.0/translate/misc/hash.py
new file mode 100644
index 0000000..1343d22
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/hash.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2008 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+"""This module contains some temporary glue to make us work with md5 hashes on
+old and new versions of Python. The function md5_f() wraps whatever is
+available."""
+
+try:
+ import hashlib
+ md5_f = hashlib.md5
+except ImportError:
+ import md5
+ md5_f = md5.new
diff --git a/translate-toolkit-1.3.0/translate/misc/ini.py b/translate-toolkit-1.3.0/translate/misc/ini.py
new file mode 100644
index 0000000..b72f6ca
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/ini.py
@@ -0,0 +1,569 @@
+# Copyright (c) 2001, 2002, 2003 Python Software Foundation
+# Copyright (c) 2004 Paramjit Oberoi <param.cs.wisc.edu>
+# All Rights Reserved. See LICENSE-PSF & LICENSE for details.
+
+"""Access and/or modify INI files
+
+* Compatiable with ConfigParser
+* Preserves order of sections & options
+* Preserves comments/blank lines/etc
+* More convenient access to data
+
+Example:
+
+ >>> from StringIO import StringIO
+ >>> sio = StringIO('''# configure foo-application
+ ... [foo]
+ ... bar1 = qualia
+ ... bar2 = 1977
+ ... [foo-ext]
+ ... special = 1''')
+
+ >>> cfg = INIConfig(sio)
+ >>> print cfg.foo.bar1
+ qualia
+ >>> print cfg['foo-ext'].special
+ 1
+ >>> cfg.foo.newopt = 'hi!'
+
+ >>> print cfg
+ # configure foo-application
+ [foo]
+ bar1 = qualia
+ bar2 = 1977
+ newopt = hi!
+ [foo-ext]
+ special = 1
+
+"""
+
+# An ini parser that supports ordered sections/options
+# Also supports updates, while preserving structure
+# Backward-compatiable with ConfigParser
+
+import re
+from iniparse import config
+from sets import Set
+from ConfigParser import DEFAULTSECT, ParsingError, MissingSectionHeaderError
+
+class LineType(object):
+ line = None
+
+ def __init__(self, line=None):
+ if line is not None:
+ self.line = line.strip('\n')
+
+ # Return the original line for unmodified objects
+ # Otherwise construct using the current attribute values
+ def __str__(self):
+ if self.line is not None:
+ return self.line
+ else:
+ return self.to_string()
+
+ # If an attribute is modified after initialization
+ # set line to None since it is no longer accurate.
+ def __setattr__(self, name, value):
+ if hasattr(self,name):
+ self.__dict__['line'] = None
+ self.__dict__[name] = value
+
+ def to_string(self):
+ raise Exception('This method must be overridden in derived classes')
+
+
+class SectionLine(LineType):
+ regex = re.compile(r'^\['
+ r'(?P<name>[^]]+)'
+ r'\]\s*'
+ r'((?P<csep>;|#)(?P<comment>.*))?$')
+
+ def __init__(self, name, comment=None, comment_separator=None,
+ comment_offset=-1, line=None):
+ super(SectionLine, self).__init__(line)
+ self.name = name
+ self.comment = comment
+ self.comment_separator = comment_separator
+ self.comment_offset = comment_offset
+
+ def to_string(self):
+ out = '[' + self.name + ']'
+ if self.comment is not None:
+ # try to preserve indentation of comments
+ out = (out+' ').ljust(self.comment_offset)
+ out = out + self.comment_separator + self.comment
+ return out
+
+ def parse(cls, line):
+ m = cls.regex.match(line.rstrip())
+ if m is None:
+ return None
+ return cls(m.group('name'), m.group('comment'),
+ m.group('csep'), m.start('csep'),
+ line)
+ parse = classmethod(parse)
+
+
+class OptionLine(LineType):
+ def __init__(self, name, value, separator=' = ', comment=None,
+ comment_separator=None, comment_offset=-1, line=None):
+ super(OptionLine, self).__init__(line)
+ self.name = name
+ self.value = value
+ self.separator = separator
+ self.comment = comment
+ self.comment_separator = comment_separator
+ self.comment_offset = comment_offset
+
+ def to_string(self):
+ out = '%s%s%s' % (self.name, self.separator, self.value)
+ if self.comment is not None:
+ # try to preserve indentation of comments
+ out = (out+' ').ljust(self.comment_offset)
+ out = out + self.comment_separator + self.comment
+ return out
+
+ regex = re.compile(r'^(?P<name>[^:=\s[][^:=\s]*)'
+ r'(?P<sep>\s*[:=]\s*)'
+ r'(?P<value>.*)$')
+
+ def parse(cls, line):
+ m = cls.regex.match(line.rstrip())
+ if m is None:
+ return None
+
+ name = m.group('name').rstrip()
+ value = m.group('value')
+ sep = m.group('sep')
+
+ # comments are not detected in the regex because
+ # ensuring total compatibility with ConfigParser
+ # requires that:
+ # option = value ;comment // value=='value'
+ # option = value;1 ;comment // value=='value;1 ;comment'
+ #
+ # Doing this in a regex would be complicated. I
+ # think this is a bug. The whole issue of how to
+ # include ';' in the value needs to be addressed.
+ # Also, '#' doesn't mark comments in options...
+
+ coff = value.find(';')
+ if coff != -1 and value[coff-1].isspace():
+ comment = value[coff+1:]
+ csep = value[coff]
+ value = value[:coff].rstrip()
+ coff = m.start('value') + coff
+ else:
+ comment = None
+ csep = None
+ coff = -1
+
+ return cls(name, value, sep, comment, csep, coff, line)
+ parse = classmethod(parse)
+
+
+class CommentLine(LineType):
+ regex = re.compile(r'^(?P<csep>[;#]|[rR][eE][mM])'
+ r'(?P<comment>.*)$')
+
+ def __init__(self, comment='', separator='#', line=None):
+ super(CommentLine, self).__init__(line)
+ self.comment = comment
+ self.separator = separator
+
+ def to_string(self):
+ return self.separator + self.comment
+
+ def parse(cls, line):
+ m = cls.regex.match(line.rstrip())
+ if m is None:
+ return None
+ return cls(m.group('comment'), m.group('csep'), line)
+ parse = classmethod(parse)
+
+
+class EmptyLine(LineType):
+ # could make this a singleton
+ def to_string(self):
+ return ''
+
+ def parse(cls, line):
+ if line.strip(): return None
+ return cls(line)
+ parse = classmethod(parse)
+
+
+class ContinuationLine(LineType):
+ regex = re.compile(r'^\s+(?P<value>.*)$')
+
+ def __init__(self, value, value_offset=8, line=None):
+ super(ContinuationLine, self).__init__(line)
+ self.value = value
+ self.value_offset = value_offset
+
+ def to_string(self):
+ return ' '*self.value_offset + self.value
+
+ def parse(cls, line):
+ m = cls.regex.match(line.rstrip())
+ if m is None:
+ return None
+ return cls(m.group('value'), m.start('value'), line)
+ parse = classmethod(parse)
+
+
+class LineContainer(object):
+ def __init__(self, d=None):
+ self.contents = []
+ self.orgvalue = None
+ if d:
+ if isinstance(d, list): self.extend(d)
+ else: self.add(d)
+
+ def add(self, x):
+ self.contents.append(x)
+
+ def extend(self, x):
+ for i in x: self.add(i)
+
+ def get_name(self):
+ return self.contents[0].name
+
+ def set_name(self, data):
+ self.contents[0].name = data
+
+ def get_value(self):
+ if self.orgvalue is not None:
+ return self.orgvalue
+ elif len(self.contents) == 1:
+ return self.contents[0].value
+ else:
+ return '\n'.join([str(x.value) for x in self.contents
+ if not isinstance(x, (CommentLine, EmptyLine))])
+
+ def set_value(self, data):
+ self.orgvalue = data
+ lines = str(data).split('\n')
+ linediff = len(lines) - len(self.contents)
+ if linediff > 0:
+ for _ in range(linediff):
+ self.add(ContinuationLine(''))
+ elif linediff < 0:
+ self.contents = self.contents[:linediff]
+ for i,v in enumerate(lines):
+ self.contents[i].value = v
+
+ name = property(get_name, set_name)
+ value = property(get_value, set_value)
+
+ def __str__(self):
+ s = [str(x) for x in self.contents]
+ return '\n'.join(s)
+
+ def finditer(self, key):
+ for x in self.contents[::-1]:
+ if hasattr(x, 'name') and x.name==key:
+ yield x
+
+ def find(self, key):
+ for x in self.finditer(key):
+ return x
+ raise KeyError(key)
+
+
+def _make_xform_property(myattrname, srcattrname=None):
+ private_attrname = myattrname + 'value'
+ private_srcname = myattrname + 'source'
+ if srcattrname is None:
+ srcattrname = myattrname
+
+ def getfn(self):
+ srcobj = getattr(self, private_srcname)
+ if srcobj is not None:
+ return getattr(srcobj, srcattrname)
+ else:
+ return getattr(self, private_attrname)
+
+ def setfn(self, value):
+ srcobj = getattr(self, private_srcname)
+ if srcobj is not None:
+ setattr(srcobj, srcattrname, value)
+ else:
+ setattr(self, private_attrname, value)
+
+ return property(getfn, setfn)
+
+
+class INISection(config.ConfigNamespace):
+ _lines = None
+ _options = None
+ _defaults = None
+ _optionxformvalue = None
+ _optionxformsource = None
+ def __init__(self, lineobj, defaults = None,
+ optionxformvalue=None, optionxformsource=None):
+ self._lines = [lineobj]
+ self._defaults = defaults
+ self._optionxformvalue = optionxformvalue
+ self._optionxformsource = optionxformsource
+ self._options = {}
+
+ _optionxform = _make_xform_property('_optionxform')
+
+ def __getitem__(self, key):
+ if key == '__name__':
+ return self._lines[-1].name
+ if self._optionxform: key = self._optionxform(key)
+ try:
+ return self._options[key].value
+ except KeyError:
+ if self._defaults and key in self._defaults._options:
+ return self._defaults._options[key].value
+ else:
+ raise
+
+ def __setitem__(self, key, value):
+ if self._optionxform: xkey = self._optionxform(key)
+ else: xkey = key
+ if xkey not in self._options:
+ # create a dummy object - value may have multiple lines
+ obj = LineContainer(OptionLine(key, ''))
+ self._lines[-1].add(obj)
+ self._options[xkey] = obj
+ # the set_value() function in LineContainer
+ # automatically handles multi-line values
+ self._options[xkey].value = value
+
+ def __delitem__(self, key):
+ if self._optionxform: key = self._optionxform(key)
+ for l in self._lines:
+ remaining = []
+ for o in l.contents:
+ if isinstance(o, LineContainer):
+ n = o.name
+ if self._optionxform: n = self._optionxform(n)
+ if key != n: remaining.append(o)
+ else:
+ remaining.append(o)
+ l.contents = remaining
+ del self._options[key]
+
+ def __iter__(self):
+ d = Set()
+ for l in self._lines:
+ for x in l.contents:
+ if isinstance(x, LineContainer):
+ if self._optionxform:
+ ans = self._optionxform(x.name)
+ else:
+ ans = x.name
+ if ans not in d:
+ yield ans
+ d.add(ans)
+ if self._defaults:
+ for x in self._defaults:
+ if x not in d:
+ yield x
+ d.add(x)
+
+ def new_namespace(self, name):
+ raise Exception('No sub-sections allowed', name)
+
+
+def make_comment(line):
+ return CommentLine(line.rstrip())
+
+
+def readline_iterator(f):
+ """iterate over a file by only using the file object's readline method"""
+
+ have_newline = False
+ while True:
+ line = f.readline()
+
+ if not line:
+ if have_newline:
+ yield ""
+ return
+
+ if line.endswith('\n'):
+ have_newline = True
+ else:
+ have_newline = False
+
+ yield line
+
+
+class INIConfig(config.ConfigNamespace):
+ _data = None
+ _sections = None
+ _defaults = None
+ _optionxformvalue = None
+ _optionxformsource = None
+ _sectionxformvalue = None
+ _sectionxformsource = None
+ _parse_exc = None
+ def __init__(self, fp=None, defaults = None, parse_exc=True,
+ optionxformvalue=str.lower, optionxformsource=None,
+ sectionxformvalue=None, sectionxformsource=None):
+ self._data = LineContainer()
+ self._parse_exc = parse_exc
+ self._optionxformvalue = optionxformvalue
+ self._optionxformsource = optionxformsource
+ self._sectionxformvalue = sectionxformvalue
+ self._sectionxformsource = sectionxformsource
+ self._sections = {}
+ if defaults is None: defaults = {}
+ self._defaults = INISection(LineContainer(), optionxformsource=self)
+ for name, value in defaults.iteritems():
+ self._defaults[name] = value
+ if fp is not None:
+ self.readfp(fp)
+
+ _optionxform = _make_xform_property('_optionxform', 'optionxform')
+ _sectionxform = _make_xform_property('_sectionxform', 'optionxform')
+
+ def __getitem__(self, key):
+ if key == DEFAULTSECT:
+ return self._defaults
+ if self._sectionxform: key = self._sectionxform(key)
+ return self._sections[key]
+
+ def __setitem__(self, key, value):
+ raise Exception('Values must be inside sections', key, value)
+
+ def __delitem__(self, key):
+ if self._sectionxform: key = self._sectionxform(key)
+ for line in self._sections[key]._lines:
+ self._data.contents.remove(line)
+ del self._sections[key]
+
+ def __iter__(self):
+ d = Set()
+ for x in self._data.contents:
+ if isinstance(x, LineContainer):
+ if x.name not in d:
+ yield x.name
+ d.add(x.name)
+
+ def new_namespace(self, name):
+ if self._data.contents:
+ self._data.add(EmptyLine())
+ obj = LineContainer(SectionLine(name))
+ self._data.add(obj)
+ if self._sectionxform: name = self._sectionxform(name)
+ if name in self._sections:
+ ns = self._sections[name]
+ ns._lines.append(obj)
+ else:
+ ns = INISection(obj, defaults=self._defaults,
+ optionxformsource=self)
+ self._sections[name] = ns
+ return ns
+
+ def __str__(self):
+ return str(self._data)
+
+ _line_types = [EmptyLine, CommentLine,
+ SectionLine, OptionLine,
+ ContinuationLine]
+
+ def _parse(self, line):
+ for linetype in self._line_types:
+ lineobj = linetype.parse(line)
+ if lineobj:
+ return lineobj
+ else:
+ # can't parse line
+ return None
+
+ def readfp(self, fp):
+ cur_section = None
+ cur_option = None
+ cur_section_name = None
+ cur_option_name = None
+ pending_lines = []
+ try:
+ fname = fp.name
+ except AttributeError:
+ fname = '<???>'
+ linecount = 0
+ exc = None
+ line = None
+
+ for line in readline_iterator(fp):
+ lineobj = self._parse(line)
+ linecount += 1
+
+ if not cur_section and not isinstance(lineobj,
+ (CommentLine, EmptyLine, SectionLine)):
+ if self._parse_exc:
+ raise MissingSectionHeaderError(fname, linecount, line)
+ else:
+ lineobj = make_comment(line)
+
+ if lineobj is None:
+ if self._parse_exc:
+ if exc is None: exc = ParsingError(fname)
+ exc.append(linecount, line)
+ lineobj = make_comment(line)
+
+ if isinstance(lineobj, ContinuationLine):
+ if cur_option:
+ cur_option.extend(pending_lines)
+ pending_lines = []
+ cur_option.add(lineobj)
+ else:
+ # illegal continuation line - convert to comment
+ if self._parse_exc:
+ if exc is None: exc = ParsingError(fname)
+ exc.append(linecount, line)
+ lineobj = make_comment(line)
+
+ if isinstance(lineobj, OptionLine):
+ cur_section.extend(pending_lines)
+ pending_lines = []
+ cur_option = LineContainer(lineobj)
+ cur_section.add(cur_option)
+ if self._optionxform:
+ cur_option_name = self._optionxform(cur_option.name)
+ else:
+ cur_option_name = cur_option.name
+ if cur_section_name == DEFAULTSECT:
+ optobj = self._defaults
+ else:
+ optobj = self._sections[cur_section_name]
+ optobj._options[cur_option_name] = cur_option
+
+ if isinstance(lineobj, SectionLine):
+ self._data.extend(pending_lines)
+ pending_lines = []
+ cur_section = LineContainer(lineobj)
+ self._data.add(cur_section)
+ cur_option = None
+ cur_option_name = None
+ if cur_section.name == DEFAULTSECT:
+ self._defaults._lines.append(cur_section)
+ cur_section_name = DEFAULTSECT
+ else:
+ if self._sectionxform:
+ cur_section_name = self._sectionxform(cur_section.name)
+ else:
+ cur_section_name = cur_section.name
+ if not self._sections.has_key(cur_section_name):
+ self._sections[cur_section_name] = \
+ INISection(cur_section, defaults=self._defaults,
+ optionxformsource=self)
+ else:
+ self._sections[cur_section_name]._lines.append(cur_section)
+
+ if isinstance(lineobj, (CommentLine, EmptyLine)):
+ pending_lines.append(lineobj)
+
+ self._data.extend(pending_lines)
+ if line and line[-1]=='\n':
+ self._data.add(EmptyLine())
+
+ if exc:
+ raise exc
+
diff --git a/translate-toolkit-1.3.0/translate/misc/multistring.py b/translate-toolkit-1.3.0/translate/misc/multistring.py
new file mode 100644
index 0000000..4efbc3a
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/multistring.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2006 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""Supports a hybrid Unicode string that can also have a list of alternate strings in the strings attribute"""
+
+from translate.misc import autoencode
+
+class multistring(autoencode.autoencode):
+ def __new__(newtype, string=u"", encoding=None, errors=None):
+ if isinstance(string, list):
+ if not string:
+ raise ValueError("multistring must contain at least one string")
+ mainstring = string[0]
+ newstring = multistring.__new__(newtype, string[0], encoding, errors)
+ newstring.strings = [newstring] + [autoencode.autoencode.__new__(autoencode.autoencode, altstring, encoding, errors) for altstring in string[1:]]
+ else:
+ newstring = autoencode.autoencode.__new__(newtype, string, encoding, errors)
+ newstring.strings = [newstring]
+ return newstring
+
+ def __init__(self, *args, **kwargs):
+ super(multistring, self).__init__(*args, **kwargs)
+ if not hasattr(self, "strings"):
+ self.strings = []
+
+ def __cmp__(self, otherstring):
+ if isinstance(otherstring, multistring):
+ parentcompare = cmp(autoencode.autoencode(self), otherstring)
+ if parentcompare:
+ return parentcompare
+ else:
+ return cmp(self.strings[1:], otherstring.strings[1:])
+ elif isinstance(otherstring, autoencode.autoencode):
+ return cmp(autoencode.autoencode(self), otherstring)
+ elif isinstance(otherstring, unicode):
+ return cmp(unicode(self), otherstring)
+ elif isinstance(otherstring, str):
+ return cmp(str(self), otherstring)
+ else:
+ return cmp(type(self), type(otherstring))
+
+ def __ne__(self, otherstring):
+ return self.__cmp__(otherstring) != 0
+
+ def __eq__(self, otherstring):
+ return self.__cmp__(otherstring) == 0
+
+ def __repr__(self):
+ parts = [autoencode.autoencode.__repr__(self)] + [repr(a) for a in self.strings[1:]]
+ return "multistring([" + ",".join(parts) + "])"
+
+ def replace(self, old, new, count=None):
+ if count is None:
+ newstr = multistring(super(multistring, self).replace(old, new), self.encoding)
+ else:
+ newstr = multistring(super(multistring, self).replace(old, new, count), self.encoding)
+ for s in self.strings[1:]:
+ if count is None:
+ newstr.strings.append(s.replace(old, new))
+ else:
+ newstr.strings.append(s.replace(old, new, count))
+ return newstr
diff --git a/translate-toolkit-1.3.0/translate/misc/optrecurse.py b/translate-toolkit-1.3.0/translate/misc/optrecurse.py
new file mode 100644
index 0000000..d89c31a
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/optrecurse.py
@@ -0,0 +1,699 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2002-2006 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import sys
+import os.path
+import fnmatch
+import traceback
+import optparse
+from translate.misc import progressbar
+from translate import __version__
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+class ManPageOption(optparse.Option, object):
+ ACTIONS = optparse.Option.ACTIONS + ("manpage",)
+
+ def take_action(self, action, dest, opt, value, values, parser):
+ """take_action that can handle manpage as well as standard actions"""
+ if action == "manpage":
+ parser.print_manpage()
+ sys.exit(0)
+ return super(ManPageOption, self).take_action(action, dest, opt, value, values, parser)
+
+class ManHelpFormatter (optparse.HelpFormatter):
+ def __init__ (self,
+ indent_increment=0,
+ max_help_position=0,
+ width=80,
+ short_first=1):
+ optparse.HelpFormatter.__init__(
+ self, indent_increment, max_help_position, width, short_first)
+
+ def format_option_strings (self, option):
+ """Return a comma-separated list of option strings & metavariables."""
+ if option.takes_value():
+ metavar = option.metavar or option.dest.upper()
+ metavar = '\\fI%s\\fP'%metavar
+ short_opts = [sopt + metavar for sopt in option._short_opts]
+ long_opts = [lopt + "\\fR=\\fP" + metavar for lopt in option._long_opts]
+ else:
+ short_opts = option._short_opts
+ long_opts = option._long_opts
+
+ if self.short_first:
+ opts = short_opts + long_opts
+ else:
+ opts = long_opts + short_opts
+
+ return '\\fB%s\\fP'%("\\fR, \\fP".join(opts))
+
+class RecursiveOptionParser(optparse.OptionParser, object):
+ """A specialized Option Parser for recursing through directories."""
+
+ def __init__(self, formats, usetemplates=False, allowmissingtemplate=False, description=None):
+ """Construct the specialized Option Parser.
+
+ @type formats: Dictionary
+ @param formats: See L{setformats()} for an explanation of the formats parameter.
+
+ """
+
+ optparse.OptionParser.__init__(self, version="%prog "+__version__.sver, description=description)
+ self.setmanpageoption()
+ self.setprogressoptions()
+ self.seterrorleveloptions()
+ self.setformats(formats, usetemplates)
+ self.setpsycooption()
+ self.passthrough = []
+ self.allowmissingtemplate = allowmissingtemplate
+
+ def get_prog_name(self):
+ return os.path.basename(sys.argv[0])
+
+ def setmanpageoption(self):
+ """creates a manpage option that allows the optionparser to generate a manpage"""
+ manpageoption = ManPageOption(None, "--manpage", dest="manpage", default=False, action="manpage",
+ help="output a manpage based on the help")
+ self.define_option(manpageoption)
+
+ def format_manpage(self):
+ """returns a formatted manpage"""
+ result = []
+ prog = self.get_prog_name()
+ formatprog = lambda x: x.replace("%prog", prog)
+ formatToolkit = lambda x: x.replace("%prog", "Translate Toolkit")
+ result.append('.\\" Autogenerated manpage\n')
+ result.append('.TH %s 1 "%s" "" "%s"\n' % (prog,
+ formatToolkit(self.version),
+ formatToolkit(self.version)))
+ result.append('.SH NAME\n')
+ result.append('%s \\- %s\n' % (self.get_prog_name(), self.description.split('\n\n')[0]))
+ result.append('.SH SYNOPSIS\n')
+ result.append('.PP\n')
+ usage = "\\fB%prog "
+ usage += " ".join([self.getusageman(option) for option in self.option_list])
+ usage += "\\fP"
+ result.append('%s\n' % formatprog(usage))
+ description_lines = self.description.split('\n\n')[1:]
+ if description_lines:
+ result.append('.SH DESCRIPTION\n')
+ result.append('\n'.join(description_lines))
+ result.append('.SH OPTIONS\n')
+ ManHelpFormatter().store_option_strings(self)
+ result.append('.PP\n')
+ for option in self.option_list:
+ result.append('.TP\n')
+ result.append('%s\n'%option)
+ result.append('%s\n'%option.help)
+ return "".join(result)
+
+ def print_manpage(self, file=None):
+ """outputs a manpage for the program using the help information"""
+ if file is None:
+ file = sys.stdout
+ file.write(self.format_manpage())
+
+ def setpsycooption(self):
+ try:
+ import psyco
+ except Exception:
+ return
+ psycomodes = ["none", "full", "profile"]
+ psycooption = optparse.Option(None, "--psyco", dest="psyco", default=None,
+ choices=psycomodes, metavar="MODE",
+ help="use psyco to speed up the operation, modes: %s" % (", ".join(psycomodes)))
+ self.define_option(psycooption)
+
+ def usepsyco(self, options):
+ # options.psyco == None means the default, which is "full", but don't give a warning...
+ # options.psyco == "none" means don't use psyco at all...
+ if getattr(options, "psyco", "none") == "none":
+ return
+ try:
+ import psyco
+ except Exception:
+ if options.psyco is not None:
+ self.warning("psyco unavailable", options, sys.exc_info())
+ return
+ if options.psyco is None:
+ options.psyco = "full"
+ if options.psyco == "full":
+ psyco.full()
+ elif options.psyco == "profile":
+ psyco.profile()
+ # tell psyco the functions it cannot compile, to prevent warnings
+ import encodings
+ psyco.cannotcompile(encodings.search_function)
+
+ def set_usage(self, usage=None):
+ """sets the usage string - if usage not given, uses getusagestring for each option"""
+ if usage is None:
+ self.usage = "%prog " + " ".join([self.getusagestring(option) for option in self.option_list])
+ else:
+ super(RecursiveOptionParser, self).set_usage(usage)
+
+ def warning(self, msg, options=None, exc_info=None):
+ """Print a warning message incorporating 'msg' to stderr and exit."""
+ if options:
+ if options.errorlevel == "traceback":
+ errorinfo = "\n".join(traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]))
+ elif options.errorlevel == "exception":
+ errorinfo = "\n".join(traceback.format_exception_only(exc_info[0], exc_info[1]))
+ elif options.errorlevel == "message":
+ errorinfo = str(exc_info[1])
+ else:
+ errorinfo = ""
+ if errorinfo:
+ msg += ": " + errorinfo
+ print >> sys.stderr, "\n%s: warning: %s" % (self.get_prog_name(), msg)
+
+ def getusagestring(self, option):
+ """returns the usage string for the given option"""
+ optionstring = "|".join(option._short_opts + option._long_opts)
+ if getattr(option, "optionalswitch", False):
+ optionstring = "[%s]" % optionstring
+ if option.metavar:
+ optionstring += " " + option.metavar
+ if getattr(option, "required", False):
+ return optionstring
+ else:
+ return "[%s]" % optionstring
+
+ def getusageman(self, option):
+ """returns the usage string for the given option"""
+ optionstring = "\\fR|\\fP".join(option._short_opts + option._long_opts)
+ if getattr(option, "optionalswitch", False):
+ optionstring = "\\fR[\\fP%s\\fR]\\fP" % optionstring
+ if option.metavar:
+ optionstring += " \\fI%s\\fP" % option.metavar
+ if getattr(option, "required", False):
+ return optionstring
+ else:
+ return "\\fR[\\fP%s\\fR]\\fP" % optionstring
+
+ def define_option(self, option):
+ """defines the given option, replacing an existing one of the same short name if neccessary..."""
+ for short_opt in option._short_opts:
+ if self.has_option(short_opt):
+ self.remove_option(short_opt)
+ for long_opt in option._long_opts:
+ if self.has_option(long_opt):
+ self.remove_option(long_opt)
+ self.add_option(option)
+
+ def setformats(self, formats, usetemplates):
+ """Sets the format options using the given format dictionary.
+
+ @type formats: Dictionary
+ @param formats: The dictionary I{keys} should be:
+ - single strings (or 1-tuples) containing an input format (if not usetemplates)
+ - tuples containing an input format and template format (if usetemplates)
+ - formats can be None to indicate what to do with standard input
+ The dictionary I{values} should be tuples of outputformat (string) and processor method.
+
+ """
+
+ inputformats = []
+ outputformats = []
+ templateformats = []
+ self.outputoptions = {}
+ self.usetemplates = usetemplates
+ for formatgroup, outputoptions in formats.iteritems():
+ if isinstance(formatgroup, (str, unicode)) or formatgroup is None:
+ formatgroup = (formatgroup, )
+ if not isinstance(formatgroup, tuple):
+ raise ValueError("formatgroups must be tuples or None/str/unicode")
+ if len(formatgroup) < 1 or len(formatgroup) > 2:
+ raise ValueError("formatgroups must be tuples of length 1 or 2")
+ if len(formatgroup) == 1:
+ formatgroup += (None, )
+ inputformat, templateformat = formatgroup
+ if not isinstance(outputoptions, tuple) or len(outputoptions) != 2:
+ raise ValueError("output options must be tuples of length 2")
+ outputformat, processor = outputoptions
+ if not inputformat in inputformats:
+ inputformats.append(inputformat)
+ if not outputformat in outputformats:
+ outputformats.append(outputformat)
+ if not templateformat in templateformats:
+ templateformats.append(templateformat)
+ self.outputoptions[(inputformat, templateformat)] = (outputformat, processor)
+ self.inputformats = inputformats
+ inputformathelp = self.getformathelp(inputformats)
+ inputoption = optparse.Option("-i", "--input", dest="input", default=None, metavar="INPUT",
+ help="read from INPUT in %s" % (inputformathelp))
+ inputoption.optionalswitch = True
+ inputoption.required = True
+ self.define_option(inputoption)
+ excludeoption = optparse.Option("-x", "--exclude", dest="exclude", action="append",
+ type="string", default=["CVS", ".svn", "_darcs", ".git", ".hg", ".bzr"], metavar="EXCLUDE",
+ help="exclude names matching EXCLUDE from input paths")
+ self.define_option(excludeoption)
+ outputformathelp = self.getformathelp(outputformats)
+ outputoption = optparse.Option("-o", "--output", dest="output", default=None, metavar="OUTPUT",
+ help="write to OUTPUT in %s" % (outputformathelp))
+ outputoption.optionalswitch = True
+ outputoption.required = True
+ self.define_option(outputoption)
+ if self.usetemplates:
+ self.templateformats = templateformats
+ templateformathelp = self.getformathelp(self.templateformats)
+ templateoption = optparse.Option("-t", "--template", dest="template", default=None, metavar="TEMPLATE",
+ help="read from TEMPLATE in %s" % (templateformathelp))
+ self.define_option(templateoption)
+
+ def setprogressoptions(self):
+ """sets the progress options"""
+ self.progresstypes = {
+ "none": progressbar.NoProgressBar,
+ "bar": progressbar.HashProgressBar,
+ "dots": progressbar.DotsProgressBar,
+ "names": progressbar.MessageProgressBar,
+ "verbose": progressbar.VerboseProgressBar
+ }
+ progressoption = optparse.Option(None, "--progress", dest="progress", default="bar",
+ choices = self.progresstypes.keys(), metavar="PROGRESS",
+ help="show progress as: %s" % (", ".join(self.progresstypes)))
+ self.define_option(progressoption)
+
+ def seterrorleveloptions(self):
+ """sets the errorlevel options"""
+ self.errorleveltypes = ["none", "message", "exception", "traceback"]
+ errorleveloption = optparse.Option(None, "--errorlevel", dest="errorlevel", default="message",
+ choices = self.errorleveltypes, metavar="ERRORLEVEL",
+ help="show errorlevel as: %s" % (", ".join(self.errorleveltypes)))
+ self.define_option(errorleveloption)
+
+ def getformathelp(self, formats):
+ """make a nice help string for describing formats..."""
+ if None in formats:
+ formats = filter(lambda format: format is not None, formats)
+ if len(formats) == 0:
+ return ""
+ elif len(formats) == 1:
+ return "%s format" % (", ".join(formats))
+ else:
+ return "%s formats" % (", ".join(formats))
+
+ def isrecursive(self, fileoption, filepurpose='input'):
+ """checks if fileoption is a recursive file"""
+ if fileoption is None:
+ return False
+ elif isinstance(fileoption, list):
+ return True
+ else:
+ return os.path.isdir(fileoption)
+
+ def parse_args(self, args=None, values=None):
+ """parses the command line options, handling implicit input/output args"""
+ (options, args) = super(RecursiveOptionParser, self).parse_args(args, values)
+ # some intelligent as to what reasonable people might give on the command line
+ if args and not options.input:
+ if len(args) > 1:
+ options.input = args[:-1]
+ args = args[-1:]
+ else:
+ options.input = args[0]
+ args = []
+ if args and not options.output:
+ options.output = args[-1]
+ args = args[:-1]
+ if args:
+ self.error("You have used an invalid combination of --input, --output and freestanding args")
+ if isinstance(options.input, list) and len(options.input) == 1:
+ options.input = options.input[0]
+ if options.input is None:
+ self.error("You need to give an inputfile or use - for stdin ; use --help for full usage instructions")
+ elif options.input == '-':
+ options.input = None
+ return (options, args)
+
+ def getpassthroughoptions(self, options):
+ """get the options required to pass to the filtermethod..."""
+ passthroughoptions = {}
+ for optionname in dir(options):
+ if optionname in self.passthrough:
+ passthroughoptions[optionname] = getattr(options, optionname)
+ return passthroughoptions
+
+ def getoutputoptions(self, options, inputpath, templatepath):
+ """works out which output format and processor method to use..."""
+ if inputpath:
+ inputbase, inputext = self.splitinputext(inputpath)
+ else:
+ inputext = None
+ if templatepath:
+ templatebase, templateext = self.splittemplateext(templatepath)
+ else:
+ templateext = None
+ if (inputext, templateext) in options.outputoptions:
+ return options.outputoptions[inputext, templateext]
+ elif (inputext, "*") in options.outputoptions:
+ outputformat, fileprocessor = options.outputoptions[inputext, "*"]
+ elif ("*", templateext) in options.outputoptions:
+ outputformat, fileprocessor = options.outputoptions["*", templateext]
+ elif ("*", "*") in options.outputoptions:
+ outputformat, fileprocessor = options.outputoptions["*", "*"]
+ elif (inputext, None) in options.outputoptions:
+ return options.outputoptions[inputext, None]
+ elif (None, templateext) in options.outputoptions:
+ return options.outputoptions[None, templateext]
+ elif ("*", None) in options.outputoptions:
+ outputformat, fileprocessor = options.outputoptions["*", None]
+ elif (None, "*") in options.outputoptions:
+ outputformat, fileprocessor = options.outputoptions[None, "*"]
+ else:
+ if self.usetemplates:
+ if inputext is None:
+ raise ValueError("don't know what to do with input format (no file extension), no template file")
+ elif templateext is None:
+ raise ValueError("don't know what to do with input format %s, no template file" % (os.extsep + inputext))
+ else:
+ raise ValueError("don't know what to do with input format %s, template format %s" % (os.extsep + inputext, os.extsep + templateext))
+ else:
+ raise ValueError("don't know what to do with input format %s" % os.extsep + inputext)
+ if outputformat == "*":
+ if inputext:
+ outputformat = inputext
+ elif templateext:
+ outputformat = templateext
+ elif ("*", "*") in options.outputoptions:
+ outputformat = None
+ else:
+ if self.usetemplates:
+ if templateext is None:
+ raise ValueError("don't know what to do with input format %s, no template file" % (os.extsep + inputext))
+ else:
+ raise ValueError("don't know what to do with input format %s, template format %s" % (os.extsep + inputext, os.extsep + templateext))
+ else:
+ raise ValueError("don't know what to do with input format %s" % os.extsep + inputext)
+ return outputformat, fileprocessor
+
+ def initprogressbar(self, allfiles, options):
+ """sets up a progress bar appropriate to the options and files"""
+ if options.progress in ('bar', 'verbose'):
+ self.progressbar = self.progresstypes[options.progress](0, len(allfiles))
+ print >> sys.stderr, "processing %d files..." % len(allfiles)
+ else:
+ self.progressbar = self.progresstypes[options.progress]()
+
+ def getfullinputpath(self, options, inputpath):
+ """gets the absolute path to an input file"""
+ if options.input:
+ return os.path.join(options.input, inputpath)
+ else:
+ return inputpath
+
+ def getfulloutputpath(self, options, outputpath):
+ """gets the absolute path to an output file"""
+ if options.recursiveoutput and options.output:
+ return os.path.join(options.output, outputpath)
+ else:
+ return outputpath
+
+ def getfulltemplatepath(self, options, templatepath):
+ """gets the absolute path to a template file"""
+ if not options.recursivetemplate:
+ return templatepath
+ elif templatepath is not None and self.usetemplates and options.template:
+ return os.path.join(options.template, templatepath)
+ else:
+ return None
+
+ def run(self):
+ """parses the arguments, and runs recursiveprocess with the resulting options..."""
+ (options, args) = self.parse_args()
+ # this is so derived classes can modify the inputformats etc based on the options
+ options.inputformats = self.inputformats
+ options.outputoptions = self.outputoptions
+ self.usepsyco(options)
+ self.recursiveprocess(options)
+
+ def recursiveprocess(self, options):
+ """recurse through directories and process files"""
+ if self.isrecursive(options.input, 'input') and getattr(options, "allowrecursiveinput", True):
+ if not self.isrecursive(options.output, 'output'):
+ try:
+ self.warning("Output directory does not exist. Attempting to create")
+ os.mkdir(options.output)
+ except IOError, e:
+ self.error(optparse.OptionValueError("Output directory does not exist, attempt to create failed"))
+ if isinstance(options.input, list):
+ inputfiles = self.recurseinputfilelist(options)
+ else:
+ inputfiles = self.recurseinputfiles(options)
+ else:
+ if options.input:
+ inputfiles = [os.path.basename(options.input)]
+ options.input = os.path.dirname(options.input)
+ else:
+ inputfiles = [options.input]
+ options.recursiveoutput = self.isrecursive(options.output, 'output') and getattr(options, "allowrecursiveoutput", True)
+ options.recursivetemplate = self.usetemplates and self.isrecursive(options.template, 'template') and getattr(options, "allowrecursivetemplate", True)
+ self.initprogressbar(inputfiles, options)
+ for inputpath in inputfiles:
+ try:
+ templatepath = self.gettemplatename(options, inputpath)
+ # If we have a recursive template, but the template doesn't have this
+ # input file, let's drop it.
+ if options.recursivetemplate and templatepath is None and not self.allowmissingtemplate:
+ self.warning("No template at %s. Skipping %s." % (templatepath, inputpath))
+ continue
+ outputformat, fileprocessor = self.getoutputoptions(options, inputpath, templatepath)
+ fullinputpath = self.getfullinputpath(options, inputpath)
+ fulltemplatepath = self.getfulltemplatepath(options, templatepath)
+ outputpath = self.getoutputname(options, inputpath, outputformat)
+ fulloutputpath = self.getfulloutputpath(options, outputpath)
+ if options.recursiveoutput and outputpath:
+ self.checkoutputsubdir(options, os.path.dirname(outputpath))
+ except Exception, error:
+ if isinstance(error, KeyboardInterrupt):
+ raise
+ self.warning("Couldn't handle input file %s" % inputpath, options, sys.exc_info())
+ continue
+ try:
+ success = self.processfile(fileprocessor, options, fullinputpath, fulloutputpath, fulltemplatepath)
+ except Exception, error:
+ if isinstance(error, KeyboardInterrupt):
+ raise
+ self.warning("Error processing: input %s, output %s, template %s" % (fullinputpath, fulloutputpath, fulltemplatepath), options, sys.exc_info())
+ success = False
+ self.reportprogress(inputpath, success)
+ del self.progressbar
+
+ def openinputfile(self, options, fullinputpath):
+ """opens the input file"""
+ if fullinputpath is None:
+ return sys.stdin
+ return open(fullinputpath, 'r')
+
+ def openoutputfile(self, options, fulloutputpath):
+ """opens the output file"""
+ if fulloutputpath is None:
+ return sys.stdout
+ return open(fulloutputpath, 'w')
+
+ def opentempoutputfile(self, options, fulloutputpath):
+ """opens a temporary output file"""
+ return StringIO()
+
+ def finalizetempoutputfile(self, options, outputfile, fulloutputpath):
+ """write the temp outputfile to its final destination"""
+ outputfile.reset()
+ outputstring = outputfile.read()
+ outputfile = self.openoutputfile(options, fulloutputpath)
+ outputfile.write(outputstring)
+ outputfile.close()
+
+ def opentemplatefile(self, options, fulltemplatepath):
+ """opens the template file (if required)"""
+ if fulltemplatepath is not None:
+ if os.path.isfile(fulltemplatepath):
+ return open(fulltemplatepath, 'r')
+ else:
+ self.warning("missing template file %s" % fulltemplatepath)
+ return None
+
+ def processfile(self, fileprocessor, options, fullinputpath, fulloutputpath, fulltemplatepath):
+ """process an individual file"""
+ inputfile = self.openinputfile(options, fullinputpath)
+ if fulloutputpath and fulloutputpath in (fullinputpath, fulltemplatepath):
+ outputfile = self.opentempoutputfile(options, fulloutputpath)
+ tempoutput = True
+ else:
+ outputfile = self.openoutputfile(options, fulloutputpath)
+ tempoutput = False
+ templatefile = self.opentemplatefile(options, fulltemplatepath)
+ passthroughoptions = self.getpassthroughoptions(options)
+ if fileprocessor(inputfile, outputfile, templatefile, **passthroughoptions):
+ if tempoutput:
+ self.warning("writing to temporary output...")
+ self.finalizetempoutputfile(options, outputfile, fulloutputpath)
+ return True
+ else:
+ # remove the file if it is a file (could be stdout etc)
+ if fulloutputpath and os.path.isfile(fulloutputpath):
+ outputfile.close()
+ os.unlink(fulloutputpath)
+ return False
+
+ def reportprogress(self, filename, success):
+ """shows that we are progressing..."""
+ self.progressbar.amount += 1
+ self.progressbar.show(filename)
+
+ def mkdir(self, parent, subdir):
+ """makes a subdirectory (recursively if neccessary)"""
+ if not os.path.isdir(parent):
+ raise ValueError("cannot make child directory %r if parent %r does not exist" % (subdir, parent))
+ currentpath = parent
+ subparts = subdir.split(os.sep)
+ for part in subparts:
+ currentpath = os.path.join(currentpath, part)
+ if not os.path.isdir(currentpath):
+ os.mkdir(currentpath)
+
+ def checkoutputsubdir(self, options, subdir):
+ """checks to see if subdir under options.output needs to be created, creates if neccessary"""
+ fullpath = os.path.join(options.output, subdir)
+ if not os.path.isdir(fullpath):
+ self.mkdir(options.output, subdir)
+
+ def isexcluded(self, options, inputpath):
+ """checks if this path has been excluded"""
+ basename = os.path.basename(inputpath)
+ for excludename in options.exclude:
+ if fnmatch.fnmatch(basename, excludename):
+ return True
+ return False
+
+ def recurseinputfilelist(self, options):
+ """use a list of files, and find a common base directory for them"""
+ # find a common base directory for the files to do everything relative to
+ commondir = os.path.dirname(os.path.commonprefix(options.input))
+ inputfiles = []
+ for inputfile in options.input:
+ if self.isexcluded(options, inputfile):
+ continue
+ if inputfile.startswith(commondir+os.sep):
+ inputfiles.append(inputfile.replace(commondir+os.sep, "", 1))
+ else:
+ inputfiles.append(inputfile.replace(commondir, "", 1))
+ options.input = commondir
+ return inputfiles
+
+ def recurseinputfiles(self, options):
+ """recurse through directories and return files to be processed..."""
+ dirstack = ['']
+ join = os.path.join
+ inputfiles = []
+ while dirstack:
+ top = dirstack.pop(-1)
+ names = os.listdir(join(options.input, top))
+ dirs = []
+ for name in names:
+ inputpath = join(top, name)
+ if self.isexcluded(options, inputpath):
+ continue
+ fullinputpath = self.getfullinputpath(options, inputpath)
+ # handle directories...
+ if os.path.isdir(fullinputpath):
+ dirs.append(inputpath)
+ elif os.path.isfile(fullinputpath):
+ if not self.isvalidinputname(options, name):
+ # only handle names that match recognized input file extensions
+ continue
+ inputfiles.append(inputpath)
+ # make sure the directories are processed next time round...
+ dirs.reverse()
+ dirstack.extend(dirs)
+ return inputfiles
+
+ def splitext(self, pathname):
+ """Splits L{pathname} into name and ext, and removes the extsep
+
+ @param pathname: A file path
+ @type pathname: string
+ @return: root, ext
+ @rtype: tuple
+ """
+ root, ext = os.path.splitext(pathname)
+ ext = ext.replace(os.extsep, "", 1)
+ return (root, ext)
+
+ def splitinputext(self, inputpath):
+ """splits an inputpath into name and extension"""
+ return self.splitext(inputpath)
+
+ def splittemplateext(self, templatepath):
+ """splits a templatepath into name and extension"""
+ return self.splitext(templatepath)
+
+ def templateexists(self, options, templatepath):
+ """returns whether the given template exists..."""
+ fulltemplatepath = self.getfulltemplatepath(options, templatepath)
+ return os.path.isfile(fulltemplatepath)
+
+ def gettemplatename(self, options, inputname):
+ """gets an output filename based on the input filename"""
+ if not self.usetemplates:
+ return None
+ if not inputname or not options.recursivetemplate:
+ return options.template
+ inputbase, inputext = self.splitinputext(inputname)
+ if options.template:
+ for inputext1, templateext1 in options.outputoptions:
+ if inputext == inputext1:
+ if templateext1:
+ templatepath = inputbase + os.extsep + templateext1
+ if self.templateexists(options, templatepath):
+ return templatepath
+ if "*" in options.inputformats:
+ for inputext1, templateext1 in options.outputoptions:
+ if (inputext == inputext1) or (inputext1 == "*"):
+ if templateext1 == "*":
+ templatepath = inputname
+ if self.templateexists(options, templatepath):
+ return templatepath
+ elif templateext1:
+ templatepath = inputbase + os.extsep + templateext1
+ if self.templateexists(options, templatepath):
+ return templatepath
+ return None
+
+ def getoutputname(self, options, inputname, outputformat):
+ """gets an output filename based on the input filename"""
+ if not inputname or not options.recursiveoutput:
+ return options.output
+ inputbase, inputext = self.splitinputext(inputname)
+ outputname = inputbase
+ if outputformat:
+ outputname += os.extsep + outputformat
+ return outputname
+
+ def isvalidinputname(self, options, inputname):
+ """checks if this is a valid input filename"""
+ inputbase, inputext = self.splitinputext(inputname)
+ return (inputext in options.inputformats) or ("*" in options.inputformats)
+
diff --git a/translate-toolkit-1.3.0/translate/misc/ourdom.py b/translate-toolkit-1.3.0/translate/misc/ourdom.py
new file mode 100644
index 0000000..eaa137d
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/ourdom.py
@@ -0,0 +1,260 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2004-2007 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""module that provides modified DOM functionality for our needs
+
+Note that users of ourdom should ensure that no code might still use classes
+directly from minidom, like minidom.Element, minidom.Document or methods such
+as minidom.parseString, since the functionality provided here will not be in
+those objects.
+"""
+
+from xml.dom import minidom
+from xml.dom import expatbuilder
+
+# helper functions we use to do xml the way we want, used by modified classes below
+
+def writexml_helper(self, writer, indent="", addindent="", newl=""):
+ """A replacement for writexml that formats it like typical XML files.
+ Nodes are intendented but text nodes, where whitespace can be significant, are not indented."""
+ # indent = current indentation
+ # addindent = indentation to add to higher levels
+ # newl = newline string
+ writer.write(indent+"<" + self.tagName)
+
+ attrs = self._get_attributes()
+ a_names = attrs.keys()
+ a_names.sort()
+
+ for a_name in a_names:
+ writer.write(" %s=\"" % a_name)
+ minidom._write_data(writer, attrs[a_name].value)
+ writer.write("\"")
+ if self.childNodes:
+ # We need to write text nodes without newline and indentation, so
+ # we handle them differently. Note that we here assume that "empty"
+ # text nodes can be done away with (see the strip()). Note also that
+ # nested tags in a text node (like ph tags in xliff) should also not
+ # have newlines and indentation or an extra newline, since that will
+ # alter the text node.
+ haveText = False
+ for childNode in self.childNodes:
+ if childNode.nodeType == self.TEXT_NODE and childNode.data.strip():
+ haveText = True
+ break
+ if haveText:
+ writer.write(">")
+ for node in self.childNodes:
+ node.writexml(writer,"","","")
+ writer.write("</%s>%s" % (self.tagName,newl))
+ else:
+ # This is the normal case that we do with pretty layout
+ writer.write(">%s"%(newl))
+ for node in self.childNodes:
+ if node.nodeType != self.TEXT_NODE:
+ node.writexml(writer,indent+addindent,addindent,newl)
+ writer.write("%s</%s>%s" % (indent,self.tagName,newl))
+ else:
+ writer.write("/>%s"%(newl))
+
+def getElementsByTagName_helper(parent, name, dummy=None):
+ """A reimplementation of getElementsByTagName as an iterator.
+
+ Note that this is not compatible with getElementsByTagName that returns a
+ list, therefore, the class below exposes this through yieldElementsByTagName"""
+
+ for node in parent.childNodes:
+ if node.nodeType == minidom.Node.ELEMENT_NODE and \
+ (name == "*" or node.tagName == name):
+ yield node
+ if node.hasChildNodes():
+ for othernode in node.getElementsByTagName(name):
+ yield othernode
+
+def searchElementsByTagName_helper(parent, name, onlysearch):
+ """limits the search to within tags occuring in onlysearch"""
+ for node in parent.childNodes:
+ if node.nodeType == minidom.Node.ELEMENT_NODE and \
+ (name == "*" or node.tagName == name):
+ yield node
+ if node.nodeType == minidom.Node.ELEMENT_NODE and node.tagName in onlysearch:
+ for node in node.searchElementsByTagName(name, onlysearch):
+ yield node
+
+def getFirstElementByTagName(node, name):
+ results = node.yieldElementsByTagName(name)
+# if isinstance(results, list):
+# if len(results) == 0:
+# return None
+# else:
+# return results[0]
+ try:
+ result = results.next()
+ return result
+ except StopIteration:
+ return None
+
+def getnodetext(node):
+ """returns the node's text by iterating through the child nodes"""
+ if node is None: return ""
+ return "".join([t.data for t in node.childNodes if t.nodeType == t.TEXT_NODE])
+
+# various modifications to minidom classes to add functionality we like
+
+class DOMImplementation(minidom.DOMImplementation):
+ def _create_document(self):
+ return Document()
+
+class Element(minidom.Element):
+ def yieldElementsByTagName(self, name):
+ return getElementsByTagName_helper(self, name)
+ def searchElementsByTagName(self, name, onlysearch):
+ return searchElementsByTagName_helper(self, name, onlysearch)
+ def writexml(self, writer, indent, addindent, newl):
+ return writexml_helper(self, writer, indent, addindent, newl)
+
+class Document(minidom.Document):
+ implementation = DOMImplementation()
+ def yieldElementsByTagName(self, name):
+ return getElementsByTagName_helper(self, name)
+ def searchElementsByTagName(self, name, onlysearch):
+ return searchElementsByTagName_helper(self, name, onlysearch)
+ def createElement(self, tagName):
+ e = Element(tagName)
+ e.ownerDocument = self
+ return e
+ def createElementNS(self, namespaceURI, qualifiedName):
+ prefix, localName = _nssplit(qualifiedName)
+ e = Element(qualifiedName, namespaceURI, prefix)
+ e.ownerDocument = self
+ return e
+
+theDOMImplementation = DOMImplementation()
+
+# an ExpatBuilder that allows us to use the above modifications
+
+class ExpatBuilderNS(expatbuilder.ExpatBuilderNS):
+ def reset(self):
+ """Free all data structures used during DOM construction."""
+ self.document = theDOMImplementation.createDocument(
+ expatbuilder.EMPTY_NAMESPACE, None, None)
+ self.curNode = self.document
+ self._elem_info = self.document._elem_info
+ self._cdata = False
+ self._initNamespaces()
+
+ def start_element_handler(self, name, attributes):
+ # all we want to do is construct our own Element instead of minidom.Element
+ # unfortunately the only way to do this is to copy this whole function from expatbuilder.py
+ if ' ' in name:
+ uri, localname, prefix, qname = expatbuilder._parse_ns_name(self, name)
+ else:
+ uri = expatbuilder.EMPTY_NAMESPACE
+ qname = name
+ localname = None
+ prefix = expatbuilder.EMPTY_PREFIX
+ node = Element(qname, uri, prefix, localname)
+ node.ownerDocument = self.document
+ expatbuilder._append_child(self.curNode, node)
+ self.curNode = node
+
+ if self._ns_ordered_prefixes:
+ for prefix, uri in self._ns_ordered_prefixes:
+ if prefix:
+ a = minidom.Attr(expatbuilder._intern(self, 'xmlns:' + prefix),
+ expatbuilder.XMLNS_NAMESPACE, prefix, "xmlns")
+ else:
+ a = minidom.Attr("xmlns", expatbuilder.XMLNS_NAMESPACE,
+ "xmlns", expatbuilder.EMPTY_PREFIX)
+ d = a.childNodes[0].__dict__
+ d['data'] = d['nodeValue'] = uri
+ d = a.__dict__
+ d['value'] = d['nodeValue'] = uri
+ d['ownerDocument'] = self.document
+ expatbuilder._set_attribute_node(node, a)
+ del self._ns_ordered_prefixes[:]
+
+ if attributes:
+ _attrs = node._attrs
+ _attrsNS = node._attrsNS
+ for i in range(0, len(attributes), 2):
+ aname = attributes[i]
+ value = attributes[i+1]
+ if ' ' in aname:
+ uri, localname, prefix, qname = expatbuilder._parse_ns_name(self, aname)
+ a = minidom.Attr(qname, uri, localname, prefix)
+ _attrs[qname] = a
+ _attrsNS[(uri, localname)] = a
+ else:
+ a = minidom.Attr(aname, expatbuilder.EMPTY_NAMESPACE,
+ aname, expatbuilder.EMPTY_PREFIX)
+ _attrs[aname] = a
+ _attrsNS[(expatbuilder.EMPTY_NAMESPACE, aname)] = a
+ d = a.childNodes[0].__dict__
+ d['data'] = d['nodeValue'] = value
+ d = a.__dict__
+ d['ownerDocument'] = self.document
+ d['value'] = d['nodeValue'] = value
+ d['ownerElement'] = node
+
+ if __debug__:
+ # This only adds some asserts to the original
+ # end_element_handler(), so we only define this when -O is not
+ # used. If changing one, be sure to check the other to see if
+ # it needs to be changed as well.
+ #
+ def end_element_handler(self, name):
+ curNode = self.curNode
+ if ' ' in name:
+ uri, localname, prefix, qname = expatbuilder._parse_ns_name(self, name)
+ assert (curNode.namespaceURI == uri
+ and curNode.localName == localname
+ and curNode.prefix == prefix), \
+ "element stack messed up! (namespace)"
+ else:
+ assert curNode.nodeName == name, \
+ "element stack messed up - bad nodeName"
+ assert curNode.namespaceURI == expatbuilder.EMPTY_NAMESPACE, \
+ "element stack messed up - bad namespaceURI"
+ self.curNode = curNode.parentNode
+ self._finish_end_element(curNode)
+
+# parser methods that use our modified xml classes
+
+def parse(file, parser=None, bufsize=None):
+ """Parse a file into a DOM by filename or file object."""
+ builder = ExpatBuilderNS()
+ if isinstance(file, basestring):
+ fp = open(file, 'rb')
+ try:
+ result = builder.parseFile(fp)
+ finally:
+ fp.close()
+ else:
+ result = builder.parseFile(file)
+ return result
+
+def parseString(string, parser=None):
+ """Parse a file into a DOM from a string."""
+ builder = ExpatBuilderNS()
+ return builder.parseString(string)
+
diff --git a/translate-toolkit-1.3.0/translate/misc/progressbar.py b/translate-toolkit-1.3.0/translate/misc/progressbar.py
new file mode 100644
index 0000000..d34cfda
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/progressbar.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2004, 2005 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""progress bar utilities for reporting feedback on progress of application..."""
+
+class DotsProgressBar:
+ """an ultra-simple progress indicator that just writes a dot for each action"""
+ def __init__(self):
+ import sys
+ self.stderr = sys.stderr
+ self.amount = 0
+
+ def show(self, verbosemessage):
+ """show a dot for progress :-)"""
+ self.stderr.write('.')
+ self.stderr.flush()
+
+ def close(self):
+ self.stderr.write('\n')
+ self.stderr.flush()
+
+ def __del__(self):
+ self.close()
+
+class NoProgressBar:
+ """an invisible indicator that does nothing..."""
+ def __init__(self):
+ self.amount = 0
+
+ def show(self, verbosemessage):
+ """show nothing for progress :-)"""
+ pass
+
+ def close(self):
+ pass
+
+class ProgressBar:
+ """a plain progress bar that doesn't know very much about output..."""
+ def __init__(self, minValue = 0, maxValue = 100, totalWidth=50):
+ self.progBar = "[]" # This holds the progress bar string
+ self.min = minValue
+ self.max = maxValue
+ self.span = maxValue - minValue
+ self.width = totalWidth
+ self.amount = 0 # When amount == max, we are 100% done
+
+ def __str__(self):
+ """produces the string representing the progress bar"""
+ if self.amount < self.min: self.amount = self.min
+ if self.amount > self.max: self.amount = self.max
+
+ # Figure out the new percent done, round to an integer
+ diffFromMin = float(self.amount - self.min)
+ percentDone = (diffFromMin / float(self.span)) * 100.0
+ percentDone = round(percentDone)
+ percentDone = int(percentDone)
+
+ # Figure out how many hash bars the percentage should be
+ allFull = self.width - 7
+ numHashes = (percentDone / 100.0) * allFull
+ numHashes = int(round(numHashes))
+
+ # build a progress bar with hashes and spaces
+ self.progBar = "[%s%s] %3d%%" % ('#'*numHashes, ' '*(allFull-numHashes), percentDone)
+ return str(self.progBar)
+
+ def show(self, verbosemessage):
+ """displays the progress bar"""
+ print self
+
+class MessageProgressBar(ProgressBar):
+ """a ProgressBar that just writes out the messages without any progress display"""
+ def __init__(self, *args, **kwargs):
+ import sys
+ self.sys = sys
+ ProgressBar.__init__(self, *args, **kwargs)
+
+ def show(self, verbosemessage):
+ self.sys.stderr.write(verbosemessage + '\n')
+ self.sys.stderr.flush()
+
+class HashProgressBar(ProgressBar):
+ """a ProgressBar which knows how to go back to the beginning of the line..."""
+ def __init__(self, *args, **kwargs):
+ import sys
+ self.sys = sys
+ ProgressBar.__init__(self, *args, **kwargs)
+
+ def show(self, verbosemessage):
+ self.sys.stderr.write(str(self) + '\r')
+ self.sys.stderr.flush()
+
+ def close(self):
+ self.sys.stderr.write('\n')
+ self.sys.stderr.flush()
+
+ def __del__(self):
+ self.close()
+
+class VerboseProgressBar(HashProgressBar):
+ def __init__(self, *args, **kwargs):
+ self.lastwidth = 0
+ HashProgressBar.__init__(self, *args, **kwargs)
+
+ def show(self, verbosemessage):
+ output = str(self)
+ self.sys.stderr.write('\r' + ' '*self.lastwidth)
+ self.sys.stderr.write('\r' + verbosemessage + '\n')
+ self.lastwidth = len(output)
+ self.sys.stderr.write('\r' + output)
+ self.sys.stderr.flush()
+
+def test(progressbar):
+ import time
+ for n in range(progressbar.min, progressbar.max+1, 5):
+ progressbar.amount = n
+ progressbar.show("Some message")
+ time.sleep(0.2)
+
+if __name__ == '__main__':
+ p = HashProgressBar(0,100,50)
+ test(p)
+
diff --git a/translate-toolkit-1.3.0/translate/misc/quote.py b/translate-toolkit-1.3.0/translate/misc/quote.py
new file mode 100644
index 0000000..e9d21ce
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/quote.py
@@ -0,0 +1,429 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2002-2006 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""string processing utilities for extracting strings with various kinds of delimiters"""
+
+import logging
+import htmlentitydefs
+
+def find_all(searchin, substr):
+ """returns a list of locations where substr occurs in searchin
+ locations are not allowed to overlap"""
+ location = 0
+ locations = []
+ while location != -1:
+ location = searchin.find(substr, location)
+ if location != -1:
+ locations.append(location)
+ location += len(substr)
+ return locations
+
+def extract(source, startdelim, enddelim, escape=None, startinstring=False, allowreentry=True):
+ """Extracts a doublequote-delimited string from a string, allowing for backslash-escaping
+ returns tuple of (quoted string with quotes, still in string at end)"""
+ # note that this returns the quote characters as well... even internally
+ instring = startinstring
+ enteredonce = False
+ lenstart = len(startdelim)
+ lenend = len(enddelim)
+ startdelim_places = find_all(source, startdelim)
+ if startdelim == enddelim:
+ enddelim_places = startdelim_places[:]
+ else:
+ enddelim_places = find_all(source, enddelim)
+ if escape is not None:
+ lenescape = len(escape)
+ escape_places = find_all(source, escape)
+ last_escape_pos = -1
+ # filter escaped escapes
+ true_escape = False
+ true_escape_places = []
+ for escape_pos in escape_places:
+ if escape_pos - lenescape in escape_places:
+ true_escape = not true_escape
+ else:
+ true_escape = True
+ if true_escape:
+ true_escape_places.append(escape_pos)
+ startdelim_places = [pos for pos in startdelim_places if pos - lenescape not in true_escape_places]
+ enddelim_places = [pos + lenend for pos in enddelim_places if pos - lenescape not in true_escape_places]
+ else:
+ enddelim_places = [pos + lenend for pos in enddelim_places]
+ # get a unique sorted list of the significant places in the string
+ significant_places = [0] + startdelim_places + enddelim_places + [len(source)-1]
+ significant_places.sort()
+ extracted = ""
+ lastpos = None
+ for pos in significant_places:
+ if instring and pos in enddelim_places:
+ # make sure that if startdelim == enddelim we don't get confused and count the same string as start and end
+ if lastpos == pos - lenstart and lastpos in startdelim_places:
+ continue
+ extracted += source[lastpos:pos]
+ instring = False
+ lastpos = pos
+ if (not instring) and pos in startdelim_places and not (enteredonce and not allowreentry):
+ instring = True
+ enteredonce = True
+ lastpos = pos
+ if instring:
+ extracted += source[lastpos:]
+ return (extracted, instring)
+
+def extractfromlines(lines, startdelim, enddelim, escape):
+ """Calls extract over multiple lines, remembering whether in the string or not"""
+ result = ""
+ instring = 0
+ for line in lines:
+ (string, instring) = extract(line, startdelim, enddelim, escape, instring)
+ result += string
+ if not instring: break
+ return result
+
+def extractstr(source):
+ "Extracts a doublequote-delimited string from a string, allowing for backslash-escaping"
+ (string, instring) = extract(source, '"', '"', '\\')
+ return string
+
+def extractcomment(lines):
+ "Extracts <!-- > XML comments from lines"
+ return extractfromlines(lines, "<!--", "-->", None)
+
+def extractwithoutquotes(source, startdelim, enddelim, escape=None, startinstring=False, includeescapes=True, allowreentry=True):
+ """Extracts a doublequote-delimited string from a string, allowing for backslash-escaping
+ includeescapes can also be a function that takes the whole escaped string and returns the replaced version"""
+ instring = startinstring
+ enteredonce = False
+ lenstart = len(startdelim)
+ lenend = len(enddelim)
+ startdelim_places = find_all(source, startdelim)
+ if startdelim == enddelim:
+ enddelim_places = startdelim_places[:]
+ else:
+ enddelim_places = find_all(source, enddelim)
+ if escape is not None:
+ lenescape = len(escape)
+ escape_places = find_all(source, escape)
+ last_escape_pos = -1
+ # filter escaped escapes
+ true_escape = False
+ true_escape_places = []
+ for escape_pos in escape_places:
+ if escape_pos - lenescape in escape_places:
+ true_escape = not true_escape
+ else:
+ true_escape = True
+ if true_escape:
+ true_escape_places.append(escape_pos)
+ startdelim_places = [pos for pos in startdelim_places if pos - lenescape not in true_escape_places]
+ enddelim_places = [pos + lenend for pos in enddelim_places if pos - lenescape not in true_escape_places]
+ else:
+ enddelim_places = [pos + lenend for pos in enddelim_places]
+ # get a unique sorted list of the significant places in the string
+ significant_places = [0] + startdelim_places + enddelim_places + [len(source)-1]
+ significant_places.sort()
+ extracted = ""
+ lastpos = 0
+ callable_includeescapes = callable(includeescapes)
+ checkescapes = callable_includeescapes or not includeescapes
+ for pos in significant_places:
+ if instring and pos in enddelim_places and lastpos != pos - lenstart:
+ section_start, section_end = lastpos + len(startdelim), pos - len(enddelim)
+ section = source[section_start:section_end]
+ if escape is not None and checkescapes:
+ escape_list = [epos - section_start for epos in true_escape_places if section_start <= epos <= section_end]
+ new_section = ""
+ last_epos = 0
+ for epos in escape_list:
+ new_section += section[last_epos:epos]
+ if callable_includeescapes:
+ replace_escape = includeescapes(section[epos:epos+lenescape+1])
+ # TODO: deprecate old method of returning boolean from includeescape, by removing this if block
+ if not isinstance(replace_escape, basestring):
+ if replace_escape:
+ replace_escape = section[epos:epos+lenescape+1]
+ else:
+ replace_escape = section[epos+lenescape:epos+lenescape+1]
+ new_section += replace_escape
+ last_epos = epos + lenescape + 1
+ else:
+ last_epos = epos + lenescape
+ section = new_section + section[last_epos:]
+ extracted += section
+ instring = False
+ lastpos = pos
+ if (not instring) and pos in startdelim_places and not (enteredonce and not allowreentry):
+ instring = True
+ enteredonce = True
+ lastpos = pos
+ if instring:
+ section_start = lastpos + len(startdelim)
+ section = source[section_start:]
+ if escape is not None and not includeescapes:
+ escape_list = [epos - section_start for epos in true_escape_places if section_start <= epos]
+ new_section = ""
+ last_epos = 0
+ for epos in escape_list:
+ new_section += section[last_epos:epos]
+ if callable_includeescapes and includeescapes(section[epos:epos+lenescape+1]):
+ last_epos = epos
+ else:
+ last_epos = epos + lenescape
+ section = new_section + section[last_epos:]
+ extracted += section
+ return (extracted, instring)
+
+def escapequotes(source, escapeescapes=0):
+ "Returns the same string, with double quotes escaped with backslash"
+ if escapeescapes:
+ return source.replace('\\', '\\\\').replace('"', '\\"')
+ else:
+ return source.replace('"','\\"')
+
+def escapesinglequotes(source):
+ "Returns the same string, with single quotes doubled"
+ return source.replace("'","''")
+
+def htmlentityencode(source):
+ """encodes source using HTML entities e.g. © -> &copy;"""
+ output = ""
+ for char in source:
+ charnum = ord(char)
+ if charnum in htmlentitydefs.codepoint2name:
+ output += "&%s;" % htmlentitydefs.codepoint2name[charnum]
+ else:
+ output += str(char)
+ return output
+
+def htmlentitydecode(source):
+ """decodes source using HTML entities e.g. &copy; -> ©"""
+ output = u""
+ inentity = False
+ for char in source:
+ if char == "&":
+ inentity = True
+ possibleentity = ""
+ continue
+ if inentity:
+ if char == ";":
+ if len(possibleentity) > 0 and possibleentity in htmlentitydefs.name2codepoint:
+ output += unichr(htmlentitydefs.name2codepoint[possibleentity])
+ inentity = False
+ else:
+ output += "&" + possibleentity + ";"
+ inentity = False
+ elif char == " ":
+ output += "&" + possibleentity + char
+ inentity = False
+ else:
+ possibleentity += char
+ else:
+ output += char
+ return output
+
+def javapropertiesencode(source):
+ """encodes source in the escaped-unicode encoding used by Java .properties files"""
+ output = ""
+ for char in source:
+ charnum = ord(char)
+ if char in controlchars:
+ output += controlchars[char]
+ elif 0 <= charnum < 128:
+ output += str(char)
+ else:
+ output += "\\u%04X" % charnum
+ return output
+
+def mozillapropertiesencode(source):
+ """encodes source in the escaped-unicode encoding used by Mozilla .properties files"""
+ output = ""
+ for char in source:
+ charnum = ord(char)
+ if char in controlchars:
+ output += controlchars[char]
+ else:
+ output += char
+ return output
+
+propertyescapes = {
+ # escapes that are self-escaping
+ "\\": "\\", "'": "'", '"': '"',
+ # control characters that we keep
+ "b": "\b", "f": "\f", "t": "\t", "n": "\n", "v": "\v", "a": "\a"
+ }
+
+controlchars = {
+ # the reverse of the above...
+ "\b": "\\b", "\f": "\\f", "\t": "\\t", "\n": "\\n", "\v": "\\v"
+ }
+
+def escapecontrols(source):
+ """escape control characters in the given string"""
+ for key, value in controlchars.iteritems():
+ source = source.replace(key, value)
+ return source
+
+def mozillapropertiesdecode(source):
+ """decodes source from the escaped-unicode encoding used by mozilla .properties files"""
+ # since the .decode("unicode-escape") routine decodes everything, and we don't want to
+ # we reimplemented the algorithm from Python Objects/unicode.c in Python here
+ # and modified it to retain escaped control characters
+ output = u""
+ s = 0
+ if isinstance(source, str):
+ source = source.decode("utf-8")
+ def unichr2(i):
+ """Returns a Unicode string of one character with ordinal 32 <= i, otherwise an escaped control character"""
+ if 32 <= i:
+ return unichr(i)
+ elif unichr(i) in controlchars:
+ # we just return the character, unescaped
+ # if people want to escape them they can use escapecontrols
+ return unichr(i)
+ else:
+ return "\\u%04x" % i
+ while s < len(source):
+ c = source[s]
+ if c != '\\':
+ output += c
+ s += 1
+ continue
+ s += 1
+ if s >= len(source):
+ # this is an escape at the end of the line, which implies a continuation...
+ # return the escape to inform the parser
+ output += c
+ continue
+ c = source[s]
+ s += 1
+ if c == '\n': pass
+ # propertyescapes lookups
+ elif c in propertyescapes: output += propertyescapes[c]
+ # \uXXXX escapes
+ # \UXXXX escapes
+ elif c in "uU":
+ digits = 4
+ x = 0
+ for digit in range(digits):
+ x <<= 4
+ if s + digit >= len(source):
+ digits = digit
+ break
+ c = source[s+digit].lower()
+ if c.isdigit():
+ x += ord(c) - ord('0')
+ elif c in "abcdef":
+ x += ord(c) - ord('a') + 10
+ else:
+ break
+ s += digits
+ output += unichr2(x)
+ elif c == "N":
+ if source[s] != "{":
+ logging.warn("Invalid named unicode escape: no { after \\N")
+ output += "\\" + c
+ continue
+ s += 1
+ e = source.find("}", s)
+ if e == -1:
+ logging.warn("Invalid named unicode escape: no } after \\N{")
+ output += "\\" + c
+ continue
+ import unicodedata
+ name = source[s:e]
+ output += unicodedata.lookup(name)
+ s = e + 1
+ else:
+ output += "\\" + c
+ return output
+
+def quotestr(source, escapeescapes=0):
+ "Returns a doublequote-delimited quoted string, escaping double quotes with backslash"
+ if isinstance(source, list):
+ firstline = True
+ for line in source:
+ if firstline:
+ newsource = '"' + escapequotes(line, escapeescapes) + '"'
+ firstline = False
+ else:
+ newsource = newsource + '\n' + '"' + escapequotes(line, escapeescapes) + '"'
+ return newsource
+ else:
+ return '"' + escapequotes(source, escapeescapes) + '"'
+
+def singlequotestr(source):
+ "Returns a doublequote-delimited quoted string, escaping single quotes with themselves"
+ return "'" + escapesinglequotes(source) + "'"
+
+def eitherquotestr(source):
+ "Returns a singlequote- or doublequote-delimited string, depending on what quotes it contains"
+ if '"' in source:
+ return singlequotestr(source)
+ else:
+ return quotestr(source)
+
+def findend(string, substring):
+ s = string.find(substring)
+ if s != -1:
+ s += len(substring)
+ return s
+
+def rstripeol(string):
+ return string.rstrip("\r\n")
+
+def stripcomment(comment, startstring="<!--", endstring="-->"):
+ cstart = comment.find(startstring)
+ if cstart == -1:
+ cstart = 0
+ else:
+ cstart += len(startstring)
+ cend = comment.find(endstring, cstart)
+ return comment[cstart:cend].strip()
+
+def unstripcomment(comment, startstring="<!-- ", endstring=" -->\n"):
+ return startstring+comment.strip()+endstring
+
+def encodewithdict(unencoded, encodedict):
+ """encodes certain characters in the string using an encode dictionary"""
+ encoded = unencoded
+ for key, value in encodedict.iteritems():
+ if key in encoded:
+ encoded = encoded.replace(key, value)
+ return encoded
+
+def makeutf8(d):
+ """convert numbers to utf8 codes in the values of a dictionary"""
+ for key, value in d.items():
+ if type(value) == int:
+ d[key] = unichr(value).encode('utf8')
+ return d
+
+def testcase():
+ x = ' "this" " is " "a" " test!" '
+ print extract(x, '"', '"', None)
+ print extract(x, '"', '"', '!')
+ print extractwithoutquotes(x, '"', '"', None)
+ print extractwithoutquotes(x, '"', '"', '!')
+ print extractwithoutquotes(x, '"', '"', '!', includeescapes=False)
+
+if __name__ == '__main__':
+ testcase()
+
diff --git a/translate-toolkit-1.3.0/translate/misc/rich.py b/translate-toolkit-1.3.0/translate/misc/rich.py
new file mode 100644
index 0000000..dc77029
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/rich.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2008 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+__all__ = ['only_strings', 'map_rich']
+
+from translate.storage.placeables import Placeable
+
+def map_content(f, chunk):
+ """If chunk is a Placeable and it has content, we need
+ to modify the content as well.
+
+ Note that this is NOT a pure function. For that, we would
+ need to copy the placeables themselves."""
+ if isinstance(chunk, Placeable):
+ if chunk.content is not None:
+ chunk.content = map_entry(f, chunk.content)
+ return chunk
+
+def map_entry(f, chunk_seq):
+ """Transform every chunk in chunk_seq with the function f,
+ including the inner content of any placeables."""
+ return [f(map_content(f, chunk)) for chunk in chunk_seq]
+
+def only_strings(f):
+ """A decorator to ensure that f is only applied to strings
+ and not Placeables. It's used to decorate the function
+ passed to map_rich."""
+ def decorated_f(arg):
+ if not isinstance(arg, Placeable):
+ return f(arg)
+ else:
+ return arg
+ return decorated_f
+
+def map_rich(f, rich_string):
+ """Return a new list of chunk sequences, where each chunk
+ sequence has f applied to it."""
+ return [map_entry(f, entry) for entry in rich_string]
diff --git a/translate-toolkit-1.3.0/translate/misc/selector.py b/translate-toolkit-1.3.0/translate/misc/selector.py
new file mode 100644
index 0000000..c85920e
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/selector.py
@@ -0,0 +1,533 @@
+# -*- coding: latin-1 -*-
+"""selector - WSGI delegation based on URL path and method.
+
+(See the docstring of selector.Selector.)
+
+Copyright (C) 2006 Luke Arno - http://lukearno.com/
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to
+the Free Software Foundation, Inc., 51 Franklin Street,
+Fifth Floor, Boston, MA 02110-1301 USA
+
+Luke Arno can be found at http://lukearno.com/
+
+"""
+
+import re
+from itertools import starmap
+from wsgiref.util import shift_path_info
+
+
+try:
+ from resolver import resolve
+except ImportError:
+ # resolver not essential for basic featurs
+ #FIXME: this library is overkill, simplify
+ pass
+
+class MappingFileError(Exception): pass
+
+
+class PathExpressionParserError(Exception): pass
+
+
+def method_not_allowed(environ, start_response):
+ """Respond with a 405 and appropriate Allow header."""
+ start_response("405 Method Not Allowed",
+ [('Allow', ', '.join(environ['selector.methods'])),
+ ('Content-Type', 'text/plain')])
+ return ["405 Method Not Allowed\n\n"
+ "The method specified in the Request-Line is not allowed "
+ "for the resource identified by the Request-URI."]
+
+
+def not_found(environ, start_response):
+ """Respond with a 404."""
+ start_response("404 Not Found", [('Content-Type', 'text/plain')])
+ return ["404 Not Found\n\n"
+ "The server has not found anything matching the Request-URI."]
+
+
+class Selector(object):
+ """WSGI middleware for URL paths and HTTP method based delegation.
+
+ see http://lukearno.com/projects/selector/
+
+ mappings are given are an iterable that returns tuples like this:
+
+ (path_expression, http_methods_dict, optional_prefix)
+ """
+
+ status405 = staticmethod(method_not_allowed)
+ status404 = staticmethod(not_found)
+
+ def __init__(self,
+ mappings=None,
+ prefix="",
+ parser=None,
+ wrap=None,
+ mapfile=None,
+ consume_path=True):
+ """Initialize selector."""
+ self.mappings = []
+ self.prefix = prefix
+ if parser is None:
+ self.parser = SimpleParser()
+ else:
+ self.parser = parser
+ self.wrap = wrap
+ if mapfile is not None:
+ self.slurp_file(mapfile)
+ if mappings is not None:
+ self.slurp(mappings)
+ self.consume_path = consume_path
+
+ def slurp(self, mappings, prefix=None, parser=None, wrap=None):
+ """Slurp in a whole list (or iterable) of mappings.
+
+ Prefix and parser args will override self.parser and self.args
+ for the given mappings.
+ """
+ if prefix is not None:
+ oldprefix = self.prefix
+ self.prefix = prefix
+ if parser is not None:
+ oldparser = self.parser
+ self.parser = parser
+ if wrap is not None:
+ oldwrap = self.wrap
+ self.wrap = wrap
+ list(starmap(self.add, mappings))
+ if wrap is not None:
+ self.wrap = oldwrap
+ if parser is not None:
+ self.parser = oldparser
+ if prefix is not None:
+ self.prefix = oldprefix
+
+ def add(self, path, method_dict=None, prefix=None, **http_methods):
+ """Add a mapping.
+
+ HTTP methods can be specified in a dict or using kwargs,
+ but kwargs will override if both are given.
+
+ Prefix will override self.prefix for this mapping.
+ """
+ # Thanks to Sébastien Pierre
+ # for suggesting that this accept keyword args.
+ if method_dict is None:
+ method_dict = {}
+ if prefix is None:
+ prefix = self.prefix
+ method_dict = dict(method_dict)
+ method_dict.update(http_methods)
+ if self.wrap is not None:
+ for meth, cbl in method_dict.items():
+ method_dict[meth] = self.wrap(cbl)
+ regex = self.parser(self.prefix + path)
+ compiled_regex = re.compile(regex, re.DOTALL | re.MULTILINE)
+ self.mappings.append((compiled_regex, method_dict))
+
+ def __call__(self, environ, start_response):
+ """Delegate request to the appropriate WSGI app."""
+ app, svars, methods, matched = \
+ self.select(environ['PATH_INFO'], environ['REQUEST_METHOD'])
+ unnamed, named = [], {}
+ for k, v in svars.iteritems():
+ if k.startswith('__pos'):
+ k = k[5:]
+ named[k] = v
+ environ['selector.vars'] = dict(named)
+ for k in named.keys():
+ if k.isdigit():
+ unnamed.append((k, named.pop(k)))
+ unnamed.sort(); unnamed = [v for k, v in unnamed]
+ cur_unnamed, cur_named = environ.get('wsgiorg.routing_args', ([], {}))
+ unnamed = cur_unnamed + unnamed
+ named.update(cur_named)
+ environ['wsgiorg.routing_args'] = unnamed, named
+ environ['selector.methods'] = methods
+ environ.setdefault('selector.matches', []).append(matched)
+ if self.consume_path:
+ environ['SCRIPT_NAME'] = environ.get('SCRIPT_NAME', '') + matched
+ environ['PATH_INFO'] = environ['PATH_INFO'][len(matched):]
+ return app(environ, start_response)
+
+ def select(self, path, method):
+ """Figure out which app to delegate to or send 404 or 405."""
+ for regex, method_dict in self.mappings:
+ match = regex.search(path)
+ if match:
+ methods = method_dict.keys()
+ if method_dict.has_key(method):
+ return (method_dict[method],
+ match.groupdict(),
+ methods,
+ match.group(0))
+ elif method_dict.has_key('_ANY_'):
+ return (method_dict['_ANY_'],
+ match.groupdict(),
+ methods,
+ match.group(0))
+ else:
+ return self.status405, {}, methods, ''
+ return self.status404, {}, [], ''
+
+ def slurp_file(self, the_file, prefix=None, parser=None, wrap=None):
+ """Read mappings from a simple text file.
+
+ == Format looks like this: ==
+
+ {{{
+
+ # Comments if first non-whitespace char on line is '#'
+ # Blank lines are ignored
+
+ /foo/{id}[/]
+ GET somemodule:some_wsgi_app
+ POST pak.subpak.mod:other_wsgi_app
+
+ @prefix /myapp
+ /path[/]
+ GET module:app
+ POST package.module:get_app('foo')
+ PUT package.module:FooApp('hello', resolve('module.setting'))
+
+ @parser :lambda x: x
+ @prefix
+ ^/spam/eggs[/]$
+ GET mod:regex_mapped_app
+
+ }}}
+
+ @prefix and @parser directives take effect
+ until the end of the file or until changed
+ """
+ if isinstance(the_file, str):
+ the_file = open(the_file)
+ oldprefix = self.prefix
+ if prefix is not None:
+ self.prefix = prefix
+ oldparser = self.parser
+ if parser is not None:
+ self.parser = parser
+ oldwrap = self.wrap
+ if parser is not None:
+ self.wrap = wrap
+ path = methods = None
+ lineno = 0
+ try:
+ #try:
+ # accumulate methods (notice add in 2 places)
+ for line in the_file:
+ lineno += 1
+ path, methods = self._parse_line(line, path, methods)
+ if path and methods:
+ self.add(path, methods)
+ #except Exception, e:
+ # raise MappingFileError("Mapping line %s: %s" % (lineno, e))
+ finally:
+ the_file.close()
+ self.wrap = oldwrap
+ self.parser = oldparser
+ self.prefix = oldprefix
+
+ def _parse_line(self, line, path, methods):
+ """Parse one line of a mapping file.
+
+ This method is for the use of selector.slurp_file.
+ """
+ if not line.strip() or line.strip()[0] == '#':
+ pass
+ elif not line.strip() or line.strip()[0] == '@':
+ #
+ if path and methods:
+ self.add(path, methods)
+ path = line.strip()
+ methods = {}
+ #
+ parts = line.strip()[1:].split(' ', 1)
+ if len(parts) == 2:
+ directive, rest = parts
+ else:
+ directive = parts[0]
+ rest = ''
+ if directive == 'prefix':
+ self.prefix = rest.strip()
+ if directive == 'parser':
+ self.parser = resolve(rest.strip())
+ if directive == 'wrap':
+ self.wrap = resolve(rest.strip())
+ elif line and line[0] not in ' \t':
+ if path and methods:
+ self.add(path, methods)
+ path = line.strip()
+ methods = {}
+ else:
+ meth, app = line.strip().split(' ', 1)
+ methods[meth.strip()] = resolve(app)
+ return path, methods
+
+
+class SimpleParser(object):
+ """Callable to turn path expressions into regexes with named groups.
+
+ For instance "/hello/{name}" becomes r"^\/hello\/(?P<name>[^\^.]+)$"
+
+ For /hello/{name:pattern}
+ you get whatever is in self.patterns['pattern'] instead of "[^\^.]+"
+
+ Optional portions of path expression can be expressed [like this]
+
+ /hello/{name}[/] (can have trailing slash or not)
+
+ Example:
+
+ /blog/archive/{year:digits}/{month:digits}[/[{article}[/]]]
+
+ This would catch any of these:
+
+ /blog/archive/2005/09
+ /blog/archive/2005/09/
+ /blog/archive/2005/09/1
+ /blog/archive/2005/09/1/
+
+ (I am not suggesting that this example is a best practice.
+ I would probably have a separate mapping for listing the month
+ and retrieving an individual entry. It depends, though.)
+ """
+
+ start, end = '{}'
+ ostart, oend = '[]'
+ _patterns = {'word': r'\w+',
+ 'alpha': r'[a-zA-Z]+',
+ 'digits': r'\d+',
+ 'number': r'\d*.?\d+',
+ 'chunk': r'[^/^.]+',
+ 'segment': r'[^/]+',
+ 'any': r'.+'}
+ default_pattern = 'chunk'
+
+ def __init__(self, patterns=None):
+ """Initialize with character class mappings."""
+ self.patterns = dict(self._patterns)
+ if patterns is not None:
+ self.patterns.update(patterns)
+
+ def lookup(self, name):
+ """Return the replacement for the name found."""
+ if ':' in name:
+ name, pattern = name.split(':')
+ pattern = self.patterns[pattern]
+ else:
+ pattern = self.patterns[self.default_pattern]
+ if name == '':
+ name = '__pos%s' % self._pos
+ self._pos += 1
+ return '(?P<%s>%s)' % (name, pattern)
+
+ def lastly(self, regex):
+ """Process the result of __call__ right before it returns.
+
+ Adds the ^ and the $ to the beginning and the end, respectively.
+ """
+ return "^%s$" % regex
+
+ def openended(self, regex):
+ """Process the result of __call__ right before it returns.
+
+ Adds the ^ to the beginning but no $ to the end.
+ Called as a special alternative to lastly.
+ """
+ return "^%s" % regex
+
+ def outermost_optionals_split(self, text):
+ """Split out optional portions by outermost matching delims."""
+ parts = []
+ buffer = ""
+ starts = ends = 0
+ for c in text:
+ if c == self.ostart:
+ if starts == 0:
+ parts.append(buffer)
+ buffer = ""
+ else:
+ buffer += c
+ starts +=1
+ elif c == self.oend:
+ ends +=1
+ if starts == ends:
+ parts.append(buffer)
+ buffer = ""
+ starts = ends = 0
+ else:
+ buffer += c
+ else:
+ buffer += c
+ if not starts == ends == 0:
+ raise PathExpressionParserError(
+ "Mismatch of optional portion delimiters."
+ )
+ parts.append(buffer)
+ return parts
+
+ def parse(self, text):
+ """Turn a path expression into regex."""
+ if self.ostart in text:
+ parts = self.outermost_optionals_split(text)
+ parts = map(self.parse, parts)
+ parts[1::2] = ["(%s)?" % p for p in parts[1::2]]
+ else:
+ parts = [part.split(self.end)
+ for part in text.split(self.start)]
+ parts = [y for x in parts for y in x]
+ parts[::2] = map(re.escape, parts[::2])
+ parts[1::2] = map(self.lookup, parts[1::2])
+ return ''.join(parts)
+
+ def __call__(self, url_pattern):
+ """Turn a path expression into regex via parse and lastly."""
+ self._pos = 0
+ if url_pattern.endswith('|'):
+ return self.openended(self.parse(url_pattern[:-1]))
+ else:
+ return self.lastly(self.parse(url_pattern))
+
+
+class EnvironDispatcher(object):
+ """Dispatch based on list of rules."""
+
+ def __init__(self, rules):
+ """Instantiate with a list of (predicate, wsgiapp) rules."""
+ self.rules = rules
+
+ def __call__(self, environ, start_response):
+ """Call the first app whose predicate is true.
+
+ Each predicate is passes the environ to evaluate.
+ """
+ for predicate, app in self.rules:
+ if predicate(environ):
+ return app(environ, start_response)
+
+
+class MiddlewareComposer(object):
+ """Compose middleware based on list of rules."""
+
+ def __init__(self, app, rules):
+ """Instantiate with an app and a list of rules."""
+ self.app = app
+ self.rules = rules
+
+ def __call__(self, environ, start_response):
+ """Apply each middleware whose predicate is true.
+
+ Each predicate is passes the environ to evaluate.
+
+ Given this set of rules:
+
+ t = lambda x: True; f = lambda x: False
+ [(t, a), (f, b), (t, c), (f, d), (t, e)]
+
+ The app composed would be equivalent to this:
+
+ a(c(e(app)))
+ """
+ app = self.app
+ for predicate, middleware in reversed(self.rules):
+ if predicate(environ):
+ app = middleware(app)
+ return app(environ, start_response)
+
+
+def expose(obj):
+ """Set obj._exposed = True and return obj."""
+ obj._exposed = True
+ return obj
+
+
+class Naked(object):
+ """Naked object style dispatch base class."""
+
+ _not_found = staticmethod(not_found)
+ _expose_all = True
+ _exposed = True
+
+ def _is_exposed(self, obj):
+ """Determine if obj should be exposed.
+
+ If self._expose_all is True, always return True.
+ Otherwise, look at obj._exposed.
+ """
+ return self._expose_all or getattr(obj, '_exposed', False)
+
+ def __call__(self, environ, start_response):
+ """Dispatch to the method named by the next bit of PATH_INFO."""
+ name = shift_path_info(dict(SCRIPT_NAME=environ['SCRIPT_NAME'],
+ PATH_INFO=environ['PATH_INFO']))
+ callable = getattr(self, name or 'index', None)
+ if callable is not None and self._is_exposed(callable):
+ shift_path_info(environ)
+ return callable(environ, start_response)
+ else:
+ return self._not_found(environ, start_response)
+
+
+class ByMethod(object):
+ """Base class for dispatching to method named by REQUEST_METHOD."""
+
+ _method_not_allowed = staticmethod(method_not_allowed)
+
+ def __call__(self, environ, start_response):
+ """Dispatch based on REQUEST_METHOD."""
+ environ['selector.methods'] = \
+ [m for m in dir(self) if not m.startswith('_')]
+ return getattr(self,
+ environ['REQUEST_METHOD'],
+ self._method_not_allowed)(environ, start_response)
+
+
+def pliant(func):
+ """Decorate an unbound wsgi callable taking args from wsgiorg.routing_args.
+
+ @pliant
+ def app(environ, start_response, arg1, arg2, foo='bar'):
+ ...
+ """
+ def wsgi_func(environ, start_response):
+ args, kwargs = environ.get('wsgiorg.routing_args', ([], {}))
+ args = list(args)
+ args.insert(0, start_response)
+ args.insert(0, environ)
+ return apply(func, args, dict(kwargs))
+ return wsgi_func
+
+
+def opliant(meth):
+ """Decorate a bound wsgi callable taking args from wsgiorg.routing_args.
+
+ class App(object):
+ @opliant
+ def __call__(self, environ, start_response, arg1, arg2, foo='bar'):
+ ...
+ """
+ def wsgi_meth(self, environ, start_response):
+ args, kwargs = environ.get('wsgiorg.routing_args', ([], {}))
+ args = list(args)
+ args.insert(0, start_response)
+ args.insert(0, environ)
+ args.insert(0, self)
+ return apply(meth, args, dict(kwargs))
+ return wsgi_meth
+
diff --git a/translate-toolkit-1.3.0/translate/misc/sparse.py b/translate-toolkit-1.3.0/translate/misc/sparse.py
new file mode 100644
index 0000000..bf2ba04
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/sparse.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""simple parser / string tokenizer
+rather than returning a list of token types etc, we simple return a list of tokens...
+each tokenizing function takes a string as input and returns a list of tokens
+"""
+
+# Copyright 2002, 2003 St James Software
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+def stringeval(text):
+ """takes away repeated quotes (escapes) and returns the string represented by the text"""
+ stringchar = text[0]
+ if text[-1] != stringchar or stringchar not in ("'",'"'):
+ # scratch your head
+ raise ValueError, "error parsing escaped string: %r" % text
+ return text[1:-1].replace(stringchar+stringchar,stringchar)
+
+def stringquote(text):
+ """escapes quotes as neccessary and returns a string representing the text"""
+ if "'" in text:
+ if '"' in text:
+ return '"' + text.replace('"', '""') + '"'
+ else:
+ return '"' + text + '"'
+ else:
+ return "'" + text + "'"
+
+class ParserError(ValueError):
+ """Intelligent parser error"""
+ def __init__(self, parser, message, tokennum):
+ """takes a message and the number of the token that caused the error"""
+ tokenpos = parser.findtokenpos(tokennum)
+ line, charpos = parser.getlinepos(tokenpos)
+ ValueError.__init__(self, "%s at line %d, char %d (token %r)" % \
+ (message, line, charpos, parser.tokens[tokennum]))
+ self.parser = parser
+ self.tokennum = tokennum
+
+class SimpleParser:
+ """this is a simple parser"""
+ def __init__(self, defaulttokenlist=None, whitespacechars=" \t\r\n", includewhitespacetokens=0):
+ if defaulttokenlist is None:
+ self.defaulttokenlist = ['<=', '>=', '==', '!=', '+=', '-=', '*=', '/=', '<>']
+ self.defaulttokenlist.extend('(),[]:=+-')
+ else:
+ self.defaulttokenlist = defaulttokenlist
+ self.whitespacechars = whitespacechars
+ self.includewhitespacetokens = includewhitespacetokens
+ self.standardtokenizers = [self.stringtokenize, self.removewhitespace, self.separatetokens]
+ self.quotechars = ('"', "'")
+ self.endquotechars = {'"':'"',"'":"'"}
+ self.stringescaping = 1
+
+ def stringtokenize(self, text):
+ """makes strings in text into tokens..."""
+ tokens = []
+ laststart = 0
+ instring = 0
+ endstringchar, escapechar = '', '\\'
+ gotclose, gotescape = 0, 0
+ for pos in range(len(text)):
+ char = text[pos]
+ if instring:
+ if self.stringescaping and (gotescape or char == escapechar) and not gotclose:
+ gotescape = not gotescape
+ elif char == endstringchar:
+ gotclose = not gotclose
+ elif gotclose:
+ tokens.append(text[laststart:pos])
+ instring, laststart, endstringchar = 0, pos, ''
+ if not instring:
+ if char in self.quotechars:
+ if pos > laststart: tokens.append(text[laststart:pos])
+ instring, laststart, endstringchar, gotclose = 1, pos, self.endquotechars[char], 0
+ if laststart < len(text): tokens.append(text[laststart:])
+ return tokens
+
+ def keeptogether(self, text):
+ """checks whether a token should be kept together"""
+ return self.isstringtoken(text)
+
+ def isstringtoken(self, text):
+ """checks whether a token is a string token"""
+ return text[:1] in self.quotechars
+
+ def separatetokens(self, text, tokenlist = None):
+ """this separates out tokens in tokenlist from whitespace etc"""
+ if self.keeptogether(text): return [text]
+ if tokenlist is None:
+ tokenlist = self.defaulttokenlist
+ # loop through and put tokens into a list
+ tokens = []
+ pos = 0
+ laststart = 0
+ lentext = len(text)
+ while pos < lentext:
+ foundtoken = 0
+ for token in tokenlist:
+ lentoken = len(token)
+ if text[pos:pos+lentoken] == token:
+ if laststart < pos: tokens.append(text[laststart:pos])
+ tokens.append(token)
+ pos += lentoken
+ foundtoken, laststart = 1, pos
+ break
+ if not foundtoken: pos += 1
+ if laststart < lentext: tokens.append(text[laststart:])
+ return tokens
+
+ def removewhitespace(self, text):
+ """this removes whitespace but lets it separate things out into separate tokens"""
+ if self.keeptogether(text): return [text]
+ # loop through and put tokens into a list
+ tokens = []
+ pos = 0
+ inwhitespace = 0
+ laststart = 0
+ for pos in range(len(text)):
+ char = text[pos]
+ if inwhitespace:
+ if char not in self.whitespacechars:
+ if laststart < pos and self.includewhitespacetokens: tokens.append(text[laststart:pos])
+ inwhitespace, laststart = 0, pos
+ else:
+ if char in self.whitespacechars:
+ if laststart < pos: tokens.append(text[laststart:pos])
+ inwhitespace, laststart = 1, pos
+ if laststart < len(text) and (not inwhitespace or self.includewhitespacetokens):
+ tokens.append(text[laststart:])
+ return tokens
+
+ def applytokenizer(self, inputlist, tokenizer):
+ """apply a tokenizer to a set of text, flattening the result"""
+ tokenizedlists = [tokenizer(text) for text in inputlist]
+ joined = []
+ map(joined.extend, tokenizedlists)
+ return joined
+
+ def applytokenizers(self, inputlist, tokenizers):
+ """apply a set of tokenizers to a set of text, flattening each time"""
+ for tokenizer in tokenizers:
+ inputlist = self.applytokenizer(inputlist, tokenizer)
+ return inputlist
+
+ def tokenize(self, source, tokenizers=None):
+ """tokenize the text string with the standard tokenizers"""
+ self.source = source
+ if tokenizers is None:
+ tokenizers = self.standardtokenizers
+ self.tokens = self.applytokenizers([self.source], tokenizers)
+ return self.tokens
+
+ def findtokenpos(self, tokennum):
+ """finds the position of the given token in the text"""
+ currenttokenpos = 0
+ for currenttokennum in range(tokennum+1):
+ currenttokenpos = self.source.find(self.tokens[currenttokennum], currenttokenpos)
+ return currenttokenpos
+
+ def getlinepos(self, tokenpos):
+ """finds the line and character position of the given character"""
+ sourcecut = self.source[:tokenpos]
+ line = sourcecut.count("\n")+1
+ charpos = tokenpos - sourcecut.rfind("\n")
+ return line, charpos
+
+ def raiseerror(self, message, tokennum):
+ """raises a ParserError"""
+ raise ParserError(self, message, tokennum)
+
+
diff --git a/translate-toolkit-1.3.0/translate/misc/stdiotell.py b/translate-toolkit-1.3.0/translate/misc/stdiotell.py
new file mode 100644
index 0000000..a41bda8
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/stdiotell.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2006 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""A wrapper for sys.stdout etc that provides tell() for current position"""
+
+class StdIOWrapper:
+ def __init__(self, stream):
+ self.stream = stream
+ self.pos = 0
+ self.closed = 0
+
+ def __getattr__(self, attrname, default=None):
+ return getattr(self.stream, attrname, default)
+
+ def close(self):
+ if not self.closed:
+ self.closed = 1
+ self.stream.close()
+
+ def seek(self, pos, mode = 0):
+ raise ValueError, "I/O operation on closed file"
+
+ def tell(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ return self.pos
+
+ def write(self, s):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ self.stream.write(s)
+ self.pos += len(s)
+
+ def writelines(self, lines):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ self.stream.writelines(lines)
+ self.pos += len("".join(lines))
+
diff --git a/translate-toolkit-1.3.0/translate/misc/test_autoencode.py b/translate-toolkit-1.3.0/translate/misc/test_autoencode.py
new file mode 100644
index 0000000..1a5308e
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/test_autoencode.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+from translate.misc import autoencode
+from py import test
+
+class TestAutoencode:
+ type2test = autoencode.autoencode
+
+ def test_default_encoding(self):
+ """tests that conversion to string uses the encoding attribute"""
+ s = self.type2test(u'unicode string', 'utf-8')
+ assert s.encoding == 'utf-8'
+ assert str(s) == 'unicode string'
+ s = self.type2test(u'\u20ac')
+ assert str(self.type2test(u'\u20ac', 'utf-8')) == '\xe2\x82\xac'
+
+ def test_uniqueness(self):
+ """tests constructor creates unique objects"""
+ s1 = unicode(u'unicode string')
+ s2 = unicode(u'unicode string')
+ assert s1 == s2
+ assert s1 is s2
+ s1 = self.type2test(u'unicode string', 'utf-8')
+ s2 = self.type2test(u'unicode string', 'ascii')
+ s3 = self.type2test(u'unicode string', 'utf-8')
+ assert s1 == s2 == s3
+ assert s1 is not s2
+ # even though all the attributes are the same, this is a mutable type
+ # so the objects created must be different
+ assert s1 is not s3
+
+ def test_bad_encoding(self):
+ """tests that we throw an exception if we don't know the encoding"""
+ assert test.raises(ValueError, self.type2test, 'text', 'some-encoding')
diff --git a/translate-toolkit-1.3.0/translate/misc/test_dictutils.py b/translate-toolkit-1.3.0/translate/misc/test_dictutils.py
new file mode 100644
index 0000000..fa9b650
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/test_dictutils.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+from translate.misc import dictutils
+
+
+def test_add():
+ d = dictutils.ordereddict()
+ d[2] = 3
+ assert len(d.order) == 1
+
+def test_delete():
+ d = dictutils.ordereddict()
+ d[2] = 3
+ del d[2]
+ assert len(d.order) == 0
+
+def test_pop():
+ d = dictutils.ordereddict()
+ d[2] = 3
+ value = d.pop(2)
+ assert len(d.order) == 0
+ assert value == 3
diff --git a/translate-toolkit-1.3.0/translate/misc/test_multistring.py b/translate-toolkit-1.3.0/translate/misc/test_multistring.py
new file mode 100644
index 0000000..1b28b68
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/test_multistring.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+from translate.misc import multistring
+from translate.misc import test_autoencode
+
+class TestMultistring(test_autoencode.TestAutoencode):
+ type2test = multistring.multistring
+
+ def test_constructor(self):
+ t = self.type2test
+ s1 = t("test")
+ assert type(s1) == t
+ assert s1 == "test"
+ assert s1.strings == ["test"]
+ s2 = t(["test", "me"])
+ assert type(s2) == t
+ assert s2 == "test"
+ assert s2.strings == ["test", "me"]
+ assert s2 != s1
+
+ def test_replace(self):
+ t = self.type2test
+ s1 = t(["abcdef", "def"])
+
+ result = s1.replace("e", "")
+ assert type(result) == t
+ assert result == t(["abcdf", "df"])
+
+ result = s1.replace("e", "xx")
+ assert result == t(["abcdxxf", "dxxf"])
+
+ result = s1.replace("e", u"\xe9")
+ assert result == t([u"abcd\xe9f", u"d\xe9f"])
+
+ result = s1.replace("e", "\n")
+ assert result == t([u"abcd\nf", u"d\nf"])
+
+ result = result.replace("\n", "\\n")
+ assert result == t([u"abcd\\nf", u"d\\nf"])
+
+ result = result.replace("\\n", "\n")
+ assert result == t([u"abcd\nf", u"d\nf"])
diff --git a/translate-toolkit-1.3.0/translate/misc/test_optrecurse.py b/translate-toolkit-1.3.0/translate/misc/test_optrecurse.py
new file mode 100644
index 0000000..05b7e87
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/test_optrecurse.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+from translate.misc import optrecurse
+import os
+
+class TestRecursiveOptionParser():
+
+ def __init__(self):
+ self.parser = optrecurse.RecursiveOptionParser({"txt":("po", None)})
+
+ def test_splitext(self):
+ """test the L{optrecurse.splitext} function"""
+ name = "name"
+ extension = "ext"
+ filename = name + os.extsep + extension
+ dirname = os.path.join("some", "path", "to")
+ fullpath = os.path.join(dirname, filename)
+ root = os.path.join(dirname, name)
+ print fullpath
+ assert self.parser.splitext(fullpath) == (root, extension)
diff --git a/translate-toolkit-1.3.0/translate/misc/test_quote.py b/translate-toolkit-1.3.0/translate/misc/test_quote.py
new file mode 100644
index 0000000..de8add8
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/test_quote.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from translate.misc import quote
+
+def test_find_all():
+ """tests the find_all function"""
+ assert quote.find_all("", "a") == []
+ assert quote.find_all("a", "b") == []
+ assert quote.find_all("a", "a") == [0]
+ assert quote.find_all("aa", "a") == [0, 1]
+ assert quote.find_all("abba", "ba") == [2]
+ # check we skip the whole instance
+ assert quote.find_all("banana", "ana") == [1]
+
+def test_extract():
+ """tests the extract function"""
+ assert quote.extract("the <quoted> part", "<", ">", "\\", 0) == ("<quoted>", False)
+ assert quote.extract("the 'quoted' part", "'", "'", "\\", 0) == ("'quoted'", False)
+ assert quote.extract("the 'isn\\'t escaping fun' part", "'", "'", "\\", 0) == ("'isn\\'t escaping fun'", False)
+ assert quote.extract("the 'isn\\'t something ", "'", "'", "\\", 0) == ("'isn\\'t something ", True)
+ assert quote.extract("<quoted>\\", "<", ">", "\\", 0) == ("<quoted>", False)
+ assert quote.extract("<quoted><again>", "<", ">", "\\", 0) == ("<quoted><again>", False)
+ assert quote.extract("<quoted>\\\\<again>", "<", ">", "\\", 0) == ("<quoted><again>", False)
+ assert quote.extract("<quoted\\>", "<", ">", "\\", 0) == ("<quoted\\>", True)
+ assert quote.extract(' -->\n<!ENTITY blah "Some">', "<!--", "-->", None, 1) == (" -->", False)
+ assert quote.extract('">\n', '"', '"', None, True) == ('"', False)
+
+def test_extractwithoutquotes():
+ """tests the extractwithoutquotes function"""
+ assert quote.extractwithoutquotes("the <quoted> part", "<", ">", "\\", 0) == ("quoted", False)
+ assert quote.extractwithoutquotes("the 'quoted' part", "'", "'", "\\", 0) == ("quoted", False)
+ assert quote.extractwithoutquotes("the 'isn\\'t escaping fun' part", "'", "'", "\\", 0) == ("isn\\'t escaping fun", False)
+ assert quote.extractwithoutquotes("the 'isn\\'t something ", "'", "'", "\\", 0) == ("isn\\'t something ", True)
+ assert quote.extractwithoutquotes("<quoted>\\", "<", ">", "\\", 0) == ("quoted", False)
+ assert quote.extractwithoutquotes("<quoted>\\\\<again>", "<", ">", "\\", 0) == ("quotedagain", False)
+ assert quote.extractwithoutquotes("<quoted><again\\\\", "<", ">", "\\", 0, True) == ("quotedagain\\\\", True)
+ # don't include escapes...
+ assert quote.extractwithoutquotes("the 'isn\\'t escaping fun' part", "'", "'", "\\", 0, False) == ("isn't escaping fun", False)
+ assert quote.extractwithoutquotes("the 'isn\\'t something ", "'", "'", "\\", 0, False) == ("isn't something ", True)
+ assert quote.extractwithoutquotes("<quoted\\", "<", ">", "\\", 0, False) == ("quoted", True)
+ assert quote.extractwithoutquotes("<quoted><again\\\\", "<", ">", "\\", 0, False) == ("quotedagain\\", True)
+ # escaping of quote char
+ assert quote.extractwithoutquotes("<quoted\\>", "<", ">", "\\", 0, False) == ("quoted>", True)
+
+def isnewlineortabescape(escape):
+ if escape == "\\n" or escape == "\\t":
+ return escape
+ return escape[-1]
+
+def test_extractwithoutquotes_passfunc():
+ """tests the extractwithoutquotes function with a function for includeescapes as a parameter"""
+ assert quote.extractwithoutquotes("<test \\r \\n \\t \\\\>", "<", ">", "\\", 0, isnewlineortabescape) == ("test r \\n \\t \\", False)
+
+class TestQuote:
+
+ def test_mozilla_control_escapes(self):
+ """test that we do \uNNNN escapes for certain control characters instead of converting to UTF-8 characters"""
+ prefix, suffix = "bling", "blang"
+ for control in (u"\u0005", u"\u0006", u"\u0007", u"\u0011"):
+ string = prefix + control + suffix
+ assert quote.escapecontrols(string) == string
+
+ def test_quote_wrapping(self):
+ """test that we can wrap strings in double quotes"""
+ string = 'A string'
+ assert quote.quotestr(string) == '"A string"'
+ list = ['One', 'Two']
+ assert quote.quotestr(list) == '"One"\n"Two"'
+
+ def test_htmlencoding(self):
+ """test that we can encode and decode HTML entities"""
+ raw_encoded = [(u"€", "&euro;"), (u"©", "&copy;"), (u'"', "&quot;")]
+ for raw, encoded in raw_encoded:
+ assert quote.htmlentityencode(raw) == encoded
+ assert quote.htmlentitydecode(encoded) == raw
+
diff --git a/translate-toolkit-1.3.0/translate/misc/test_zipfileext.py b/translate-toolkit-1.3.0/translate/misc/test_zipfileext.py
new file mode 100755
index 0000000..193b650
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/test_zipfileext.py
@@ -0,0 +1,160 @@
+import zlib # implied prerequisite
+import zipfile, os, StringIO, tempfile
+try:
+ from test.test_support import TestFailed
+except ImportError:
+ class TestFailed(Exception):
+ pass
+from translate.misc import zipfileext
+
+BrokenStringIO = StringIO.StringIO
+class FixedStringIO(BrokenStringIO):
+ def truncate(self, size=None):
+ BrokenStringIO.truncate(self, size)
+ self.len = len(self.buf)
+
+StringIO.StringIO = FixedStringIO
+
+def zipTest(srcname, f, compression, srccontents):
+ zip = zipfileext.ZipFileExt(f, "w", compression) # Create the ZIP archive
+ zip.write(srcname, "another"+os.extsep+"name")
+ zip.write(srcname, srcname)
+ zip.close()
+
+ zip = zipfileext.ZipFileExt(f, "r", compression) # Read the ZIP archive
+ readData2 = zip.read(srcname)
+ readData1 = zip.read("another"+os.extsep+"name")
+ zip.close()
+
+ if readData1 != srccontents or readData2 != srccontents:
+ raise TestFailed, "Written data doesn't equal read data."
+
+def deleteTest(srcname, f, compression, srccontents):
+ zip = zipfileext.ZipFileExt(f, "w", compression) # Create the ZIP archive
+ othername = "another"+os.extsep+"name"
+ finalname = "adifferent"+os.extsep+"name"
+ leftname, deletenames = othername, [srcname, finalname]
+ zip.write(srcname, srcname)
+ zip.write(srcname, othername)
+ zip.write(srcname, finalname)
+ zip.close()
+
+ zip = zipfileext.ZipFileExt(f, "a", compression) # Modify the ZIP archive
+ for deletename in deletenames:
+ zip.delete(deletename)
+ zip.close()
+
+ zip = zipfileext.ZipFileExt(f, "r", compression) # Read the ZIP archive
+ testfailed = zip.testzip()
+ readData = zip.read(leftname)
+ zip.close()
+
+ if testfailed:
+ raise TestFailed, "zip file didn't pass test"
+ if readData != srccontents:
+ raise TestFailed, "Written data doesn't equal read data."
+
+class TestZipfile:
+
+ def setup_method(self, method):
+ print repr(method), dir(method)
+ self.srcname = self.__class__.__name__ + "_" + method.__name__ + os.extsep + "tmp"
+ self.zipname = self.__class__.__name__ + "_" + method.__name__ + os.extsep + "zip"
+ if os.path.exists(self.srcname):
+ os.remove(self.srcname)
+ if os.path.exists(self.zipname):
+ os.remove(self.zipname)
+
+ def teardown_method(self, method):
+ if os.path.exists(self.srcname): # Remove temporary files
+ os.unlink(self.srcname)
+ if os.path.exists(self.zipname):
+ os.unlink(self.zipname)
+
+ def test_consistent(self):
+ fp = open(self.srcname, "wb") # Make a source file with some lines
+ for i in range(0, 1000):
+ fp.write("Test of zipfile line %d.\n" % i)
+ fp.close()
+
+ fp = open(self.srcname, "rb")
+ writtenData = fp.read()
+ fp.close()
+
+ for file in (self.zipname, tempfile.TemporaryFile(), StringIO.StringIO()):
+ zipTest(self.srcname, file, zipfile.ZIP_STORED, writtenData)
+
+ for file in (self.zipname, tempfile.TemporaryFile(), StringIO.StringIO()):
+ zipTest(self.srcname, file, zipfile.ZIP_DEFLATED, writtenData)
+
+ def test_delete(self):
+ fp = open(self.srcname, "wb") # Make a source file with some lines
+ for i in range(0, 1000):
+ fp.write("Test of zipfile line %d.\n" % i)
+ fp.close()
+
+ fp = open(self.srcname, "rb")
+ writtenData = fp.read()
+ fp.close()
+
+ for file in (self.zipname, tempfile.TemporaryFile(), StringIO.StringIO()):
+ deleteTest(self.srcname, file, zipfile.ZIP_STORED, writtenData)
+
+ for file in (self.zipname, tempfile.TemporaryFile(), StringIO.StringIO()):
+ deleteTest(self.srcname, file, zipfile.ZIP_DEFLATED, writtenData)
+
+ def test_closes(self):
+ # This test checks that the ZipFile constructor closes the file object
+ # it opens if there's an error in the file. If it doesn't, the traceback
+ # holds a reference to the ZipFile object and, indirectly, the file object.
+ # On Windows, this causes the os.unlink() call to fail because the
+ # underlying file is still open. This is SF bug #412214.
+ #
+ fp = open(self.srcname, "w")
+ fp.write("this is not a legal zip file\n")
+ fp.close()
+ try:
+ zf = zipfileext.ZipFileExt(self.srcname)
+ except zipfile.BadZipfile:
+ os.unlink(self.srcname)
+
+ def test_403871(self):
+ # make sure we don't raise an AttributeError when a partially-constructed
+ # ZipFile instance is finalized; this tests for regression on SF tracker
+ # bug #403871.
+ try:
+ zipfileext.ZipFileExt(self.srcname)
+ except IOError:
+ # The bug we're testing for caused an AttributeError to be raised
+ # when a ZipFile instance was created for a file that did not
+ # exist; the .fp member was not initialized but was needed by the
+ # __del__() method. Since the AttributeError is in the __del__(),
+ # it is ignored, but the user should be sufficiently annoyed by
+ # the message on the output that regression will be noticed
+ # quickly.
+ pass
+ else:
+ raise TestFailed("expected creation of readable ZipFile without\n"
+ " a file to raise an IOError.")
+
+ def test_closedthrow(self):
+ # Verify that testzip() doesn't swallow inappropriate exceptions.
+ data = StringIO.StringIO()
+ zipf = zipfileext.ZipFileExt(data, mode="w")
+ zipf.writestr("foo.txt", "O, for a Muse of Fire!")
+ zipf.close()
+ zipf = zipfileext.ZipFileExt(data, mode="r")
+ zipf.close()
+ try:
+ zipf.testzip()
+ except RuntimeError:
+ # This is correct; calling .read on a closed ZipFile should throw
+ # a RuntimeError, and so should calling .testzip. An earlier
+ # version of .testzip would swallow this exception (and any other)
+ # and report that the first file in the archive was corrupt.
+ pass
+ else:
+ raise TestFailed("expected calling .testzip on a closed ZipFile"
+ " to raise a RuntimeError")
+ del data, zipf
+
diff --git a/translate-toolkit-1.3.0/translate/misc/textwrap.py b/translate-toolkit-1.3.0/translate/misc/textwrap.py
new file mode 100644
index 0000000..43e90dd
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/textwrap.py
@@ -0,0 +1,380 @@
+# -*- coding: utf-8 -*-
+"""Text wrapping and filling.
+"""
+
+# Copyright (C) 1999-2001 Gregory P. Ward.
+# Copyright (C) 2002, 2003 Python Software Foundation.
+# Written by Greg Ward <gward@python.net>
+
+__revision__ = "$Id: textwrap.py 9228 2008-12-13 04:50:49Z friedelwolff $"
+
+import string, re
+
+# Do the right thing with boolean values for all known Python versions
+# (so this module can be copied to projects that don't depend on Python
+# 2.3, e.g. Optik and Docutils).
+try:
+ True, False
+except NameError:
+ (True, False) = (1, 0)
+
+__all__ = ['TextWrapper', 'wrap', 'fill']
+
+# Hardcode the recognized whitespace characters to the US-ASCII
+# whitespace characters. The main reason for doing this is that in
+# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
+# that character winds up in string.whitespace. Respecting
+# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
+# same as any other whitespace char, which is clearly wrong (it's a
+# *non-breaking* space), 2) possibly cause problems with Unicode,
+# since 0xa0 is not in range(128).
+_whitespace = '\t\n\x0b\x0c\r '
+
+class TextWrapper:
+ """
+ Object for wrapping/filling text. The public interface consists of
+ the wrap() and fill() methods; the other methods are just there for
+ subclasses to override in order to tweak the default behaviour.
+ If you want to completely replace the main wrapping algorithm,
+ you'll probably have to override _wrap_chunks().
+
+ Several instance attributes control various aspects of wrapping:
+ width (default: 70)
+ the maximum width of wrapped lines (unless break_long_words
+ is false)
+ initial_indent (default: "")
+ string that will be prepended to the first line of wrapped
+ output. Counts towards the line's width.
+ subsequent_indent (default: "")
+ string that will be prepended to all lines save the first
+ of wrapped output; also counts towards each line's width.
+ expand_tabs (default: true)
+ Expand tabs in input text to spaces before further processing.
+ Each tab will become 1 .. 8 spaces, depending on its position in
+ its line. If false, each tab is treated as a single character.
+ drop_whitespace (default: true)
+ Drop leading and trailing whitespace from lines.
+ replace_whitespace (default: true)
+ Replace all whitespace characters in the input text by spaces
+ after tab expansion. Note that if expand_tabs is false and
+ replace_whitespace is true, every tab will be converted to a
+ single space!
+ fix_sentence_endings (default: false)
+ Ensure that sentence-ending punctuation is always followed
+ by two spaces. Off by default because the algorithm is
+ (unavoidably) imperfect.
+ break_long_words (default: true)
+ Break words longer than 'width'. If false, those words will not
+ be broken, and some lines might be longer than 'width'.
+ """
+
+ whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
+
+ unicode_whitespace_trans = {}
+ uspace = ord(u' ')
+ for x in map(ord, _whitespace):
+ unicode_whitespace_trans[x] = uspace
+
+ # This funky little regex is just the trick for splitting
+ # text up into word-wrappable chunks. E.g.
+ # "Hello there -- you goof-ball, use the -b option!"
+ # splits into
+ # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
+ # (after stripping out empty strings).
+ wordsep_re = re.compile(
+ r'(\s+|' # any whitespace
+ r'%|' # gettext handles % like whitespace
+ r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
+ r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
+
+ # XXX this is not locale- or charset-aware -- string.lowercase
+ # is US-ASCII only (and therefore English-only)
+ sentence_end_re = re.compile(r'[%s]' # lowercase letter
+ r'[\.\!\?]' # sentence-ending punct.
+ r'[\"\']?' # optional end-of-quote
+ % string.lowercase)
+
+
+ def __init__(self,
+ width=70,
+ initial_indent="",
+ subsequent_indent="",
+ expand_tabs=True,
+ drop_whitespace=True,
+ replace_whitespace=True,
+ fix_sentence_endings=False,
+ break_long_words=True):
+ self.width = width
+ self.initial_indent = initial_indent
+ self.subsequent_indent = subsequent_indent
+ self.expand_tabs = expand_tabs
+ self.drop_whitespace = drop_whitespace
+ self.replace_whitespace = replace_whitespace
+ self.fix_sentence_endings = fix_sentence_endings
+ self.break_long_words = break_long_words
+
+
+ # -- Private methods -----------------------------------------------
+ # (possibly useful for subclasses to override)
+
+ def _munge_whitespace(self, text):
+ """_munge_whitespace(text : string) -> string
+
+ Munge whitespace in text: expand tabs and convert all other
+ whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
+ becomes " foo bar baz".
+ """
+ if self.expand_tabs:
+ text = text.expandtabs()
+ if self.replace_whitespace:
+ if isinstance(text, str):
+ text = text.translate(self.whitespace_trans)
+ elif isinstance(text, unicode):
+ text = text.translate(self.unicode_whitespace_trans)
+ return text
+
+
+ def _split(self, text):
+ """_split(text : string) -> [string]
+
+ Split the text to wrap into indivisible chunks. Chunks are
+ not quite the same as words; see wrap_chunks() for full
+ details. As an example, the text
+ Look, goof-ball -- use the -b option!
+ breaks into the following chunks:
+ 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
+ 'use', ' ', 'the', ' ', '-b', ' ', 'option!'
+ """
+ chunks = self.wordsep_re.split(text)
+ chunks = filter(None, chunks)
+ return chunks
+
+ def _fix_sentence_endings(self, chunks):
+ """_fix_sentence_endings(chunks : [string])
+
+ Correct for sentence endings buried in 'chunks'. Eg. when the
+ original text contains "... foo.\nBar ...", munge_whitespace()
+ and split() will convert that to [..., "foo.", " ", "Bar", ...]
+ which has one too few spaces; this method simply changes the one
+ space to two.
+ """
+ i = 0
+ pat = self.sentence_end_re
+ while i < len(chunks)-1:
+ if chunks[i+1] == " " and pat.search(chunks[i]):
+ chunks[i+1] = " "
+ i += 2
+ else:
+ i += 1
+
+ def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+ """_handle_long_word(chunks : [string],
+ cur_line : [string],
+ cur_len : int, width : int)
+
+ Handle a chunk of text (most likely a word, not whitespace) that
+ is too long to fit in any line.
+ """
+ space_left = max(width - cur_len, 1)
+
+ # If we're allowed to break long words, then do so: put as much
+ # of the next chunk onto the current line as will fit.
+ if self.break_long_words:
+ cur_line.append(reversed_chunks[-1][:space_left])
+ reversed_chunks[-1] = reversed_chunks[-1][space_left:]
+
+ # Otherwise, we have to preserve the long word intact. Only add
+ # it to the current line if there's nothing already there --
+ # that minimizes how much we violate the width constraint.
+ elif not cur_line:
+ cur_line.append(reversed_chunks.pop())
+
+ # If we're not allowed to break long words, and there's already
+ # text on the current line, do nothing. Next time through the
+ # main loop of _wrap_chunks(), we'll wind up here again, but
+ # cur_len will be zero, so the next line will be entirely
+ # devoted to the long word that we can't handle right now.
+
+ def _wrap_chunks(self, chunks):
+ """_wrap_chunks(chunks : [string]) -> [string]
+
+ Wrap a sequence of text chunks and return a list of lines of
+ length 'self.width' or less. (If 'break_long_words' is false,
+ some lines may be longer than this.) Chunks correspond roughly
+ to words and the whitespace between them: each chunk is
+ indivisible (modulo 'break_long_words'), but a line break can
+ come between any two chunks. Chunks should not have internal
+ whitespace; ie. a chunk is either all whitespace or a "word".
+ Whitespace chunks will be removed from the beginning and end of
+ lines, but apart from that whitespace is preserved.
+ """
+ lines = []
+ if self.width <= 0:
+ raise ValueError("invalid width %r (must be > 0)" % self.width)
+
+ # Arrange in reverse order so items can be efficiently popped
+ # from a stack of chucks.
+ chunks.reverse()
+
+ while chunks:
+
+ # Start the list of chunks that will make up the current line.
+ # cur_len is just the length of all the chunks in cur_line.
+ cur_line = []
+ cur_len = 0
+
+ # Figure out which static string will prefix this line.
+ if lines:
+ indent = self.subsequent_indent
+ else:
+ indent = self.initial_indent
+
+ # Maximum width for this line.
+ width = self.width - len(indent)
+
+ # First chunk on line is whitespace -- drop it, unless this
+ # is the very beginning of the text (ie. no lines started yet).
+ if self.drop_whitespace and chunks[-1].strip() == '' and lines:
+ del chunks[-1]
+
+ while chunks:
+ l = len(chunks[-1])
+
+ # Can at least squeeze this chunk onto the current line.
+ if cur_len + l <= width:
+ cur_line.append(chunks.pop())
+ cur_len += l
+
+ # Nope, this line is full.
+ else:
+ break
+
+ # The current line is full, and the next chunk is too big to
+ # fit on *any* line (not just this one).
+ if chunks and len(chunks[-1]) > width:
+ self._handle_long_word(chunks, cur_line, cur_len, width)
+
+ # If the last chunk on this line is all whitespace, drop it.
+ if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
+ del cur_line[-1]
+
+ # Convert current line back to a string and store it in list
+ # of all lines (return value).
+ if cur_line:
+ lines.append(indent + ''.join(cur_line))
+
+ return lines
+
+
+ # -- Public interface ----------------------------------------------
+
+ def wrap(self, text):
+ """wrap(text : string) -> [string]
+
+ Reformat the single paragraph in 'text' so it fits in lines of
+ no more than 'self.width' columns, and return a list of wrapped
+ lines. Tabs in 'text' are expanded with string.expandtabs(),
+ and all other whitespace characters (including newline) are
+ converted to space.
+ """
+ text = self._munge_whitespace(text)
+ chunks = self._split(text)
+ if self.fix_sentence_endings:
+ self._fix_sentence_endings(chunks)
+ return self._wrap_chunks(chunks)
+
+ def fill(self, text):
+ """fill(text : string) -> string
+
+ Reformat the single paragraph in 'text' to fit in lines of no
+ more than 'self.width' columns, and return a new string
+ containing the entire wrapped paragraph.
+ """
+ return "\n".join(self.wrap(text))
+
+
+# -- Convenience interface ---------------------------------------------
+
+def wrap(text, width=70, **kwargs):
+ """Wrap a single paragraph of text, returning a list of wrapped lines.
+
+ Reformat the single paragraph in 'text' so it fits in lines of no
+ more than 'width' columns, and return a list of wrapped lines. By
+ default, tabs in 'text' are expanded with string.expandtabs(), and
+ all other whitespace characters (including newline) are converted to
+ space. See TextWrapper class for available keyword args to customize
+ wrapping behaviour.
+ """
+ w = TextWrapper(width=width, **kwargs)
+ return w.wrap(text)
+
+def fill(text, width=70, **kwargs):
+ """Fill a single paragraph of text, returning a new string.
+
+ Reformat the single paragraph in 'text' to fit in lines of no more
+ than 'width' columns, and return a new string containing the entire
+ wrapped paragraph. As with wrap(), tabs are expanded and other
+ whitespace characters converted to space. See TextWrapper class for
+ available keyword args to customize wrapping behaviour.
+ """
+ w = TextWrapper(width=width, **kwargs)
+ return w.fill(text)
+
+
+# -- Loosely related functionality -------------------------------------
+
+_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
+_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
+
+def dedent(text):
+ """Remove any common leading whitespace from every line in `text`.
+
+ This can be used to make triple-quoted strings line up with the left
+ edge of the display, while still presenting them in the source code
+ in indented form.
+
+ Note that tabs and spaces are both treated as whitespace, but they
+ are not equal: the lines " hello" and "\thello" are
+ considered to have no common leading whitespace. (This behaviour is
+ new in Python 2.5; older versions of this module incorrectly
+ expanded tabs before searching for common leading whitespace.)
+ """
+ # Look for the longest leading string of spaces and tabs common to
+ # all lines.
+ margin = None
+ text = _whitespace_only_re.sub('', text)
+ indents = _leading_whitespace_re.findall(text)
+ for indent in indents:
+ if margin is None:
+ margin = indent
+
+ # Current line more deeply indented than previous winner:
+ # no change (previous winner is still on top).
+ elif indent.startswith(margin):
+ pass
+
+ # Current line consistent with and no deeper than previous winner:
+ # it's the new winner.
+ elif margin.startswith(indent):
+ margin = indent
+
+ # Current line and previous winner have no common whitespace:
+ # there is no margin.
+ else:
+ margin = ""
+ break
+
+ # sanity check (testing/debugging only)
+ if 0 and margin:
+ for line in text.split("\n"):
+ assert not line or line.startswith(margin), \
+ "line = %r, margin = %r" % (line, margin)
+
+ if margin:
+ text = re.sub(r'(?m)^' + margin, '', text)
+ return text
+
+if __name__ == "__main__":
+ #print dedent("\tfoo\n\tbar")
+ #print dedent(" \thello there\n \t how are you?")
+ print dedent("Hello there.\n This is indented.")
diff --git a/translate-toolkit-1.3.0/translate/misc/typecheck/__init__.py b/translate-toolkit-1.3.0/translate/misc/typecheck/__init__.py
new file mode 100644
index 0000000..334ace0
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/typecheck/__init__.py
@@ -0,0 +1,1559 @@
+__all__ = ['accepts', 'returns', 'yields', 'TypeCheckError', 'Length', 'Empty'
+ ,'TypeSignatureError', 'And', 'Any', 'Class', 'Exact', 'HasAttr'
+ ,'IsAllOf', 'IsCallable', 'IsIterable', 'IsNoneOf', 'IsOneOf'
+ ,'IsOnlyOneOf', 'Not', 'Or', 'Self', 'Xor', 'YieldSeq'
+ ,'register_type', 'is_registered_type', 'unregister_type'
+ ,'Function']
+
+import inspect
+import types
+
+from types import GeneratorType, FunctionType, MethodType, ClassType, TypeType
+
+# Controls whether typechecking is on (True) or off (False)
+enable_checking = True
+
+# Pretty little wrapper function around __typecheck__
+def check_type(type, func, val):
+ type.__typecheck__(func, val)
+
+### Internal exception classes (these MUST NOT get out to the user)
+### typecheck_{args,return,yield} should catch these and convert them to
+### appropriate Type{Check,Signature}Error instances
+
+# We can't inherit from object because raise doesn't like new-style classes
+# We can't use super() because we can't inherit from object
+class _TC_Exception(Exception):
+ def error_message(self):
+ raise NotImplementedError("Incomplete _TC_Exception subclass (%s)" % str(self.__class__))
+
+ def format_bad_object(self, bad_object):
+ return ("for %s, " % str(bad_object), self)
+
+class _TC_LengthError(_TC_Exception):
+ def __init__(self, wrong, right=None):
+ _TC_Exception.__init__(self)
+
+ self.wrong = wrong
+ self.right = right
+
+ def error_message(self):
+ m = None
+ if self.right is not None:
+ m = ", expected %d" % self.right
+ return "length was %d%s" % (self.wrong, m or "")
+
+class _TC_TypeError(_TC_Exception):
+ def __init__(self, wrong, right):
+ _TC_Exception.__init__(self)
+
+ self.wrong = calculate_type(wrong)
+ self.right = right
+
+ def error_message(self):
+ return "expected %s, got %s" % (self.right, self.wrong)
+
+class _TC_NestedError(_TC_Exception):
+ def __init__(self, inner_exception):
+ self.inner = inner_exception
+
+ def error_message(self):
+ try:
+ return ", " + self.inner.error_message()
+ except:
+ print "'%s'" % self.inner.message
+ raw_input()
+ raise
+
+class _TC_IndexError(_TC_NestedError):
+ def __init__(self, index, inner_exception):
+ _TC_NestedError.__init__(self, inner_exception)
+
+ self.index = index
+
+ def error_message(self):
+ return ("at index %d" % self.index) + _TC_NestedError.error_message(self)
+
+# _TC_DictError exists as a wrapper around dict-related exceptions.
+# It provides a single place to sort the bad dictionary's keys in the error
+# message.
+class _TC_DictError(_TC_NestedError):
+ def format_bad_object(self, bad_object):
+ message = "for {%s}, " % ', '.join(["%s: %s" % (repr(k), repr(bad_object[k])) for k in sorted(bad_object.keys())])
+
+ if not isinstance(self.inner, _TC_LengthError):
+ return (message, self)
+ return (message, self.inner)
+
+ def error_message(self):
+ raise NotImplementedError("Incomplete _TC_DictError subclass: " + str(self.__class__))
+
+class _TC_KeyError(_TC_DictError):
+ def __init__(self, key, inner_exception):
+ _TC_NestedError.__init__(self, inner_exception)
+
+ self.key = key
+
+ def error_message(self):
+ return ("for key %s" % repr(self.key)) + _TC_NestedError.error_message(self)
+
+class _TC_KeyValError(_TC_KeyError):
+ def __init__(self, key, val, inner_exception):
+ _TC_KeyError.__init__(self, key, inner_exception)
+
+ self.val = val
+
+ def error_message(self):
+ return ("at key %s, value %s" % (repr(self.key), repr(self.val))) + _TC_NestedError.error_message(self)
+
+class _TC_GeneratorError(_TC_NestedError):
+ def __init__(self, yield_no, inner_exception):
+ _TC_NestedError.__init__(self, inner_exception)
+
+ self.yield_no = yield_no
+
+ def error_message(self):
+ raise RuntimeError("_TC_GeneratorError.message should never be called")
+
+ def format_bad_object(self, bad_object):
+ bad_obj, start_message = self.inner.format_bad_object(bad_object)
+ message = "At yield #%d: %s" % (self.yield_no, bad_obj)
+ return (message, start_message)
+
+### These next three exceptions exist to give HasAttr better error messages
+class _TC_AttrException(_TC_Exception):
+ def __init__(self, attr):
+ _TC_Exception.__init__(self, attr)
+
+ self.attr = attr
+
+class _TC_AttrError(_TC_AttrException, _TC_NestedError):
+ def __init__(self, attr, inner_exception):
+ _TC_AttrException.__init__(self, attr)
+ _TC_NestedError.__init__(self, inner_exception)
+
+ def error_message(self):
+ return ("as for attribute %s" % self.attr) + _TC_NestedError.error_message(self)
+
+class _TC_MissingAttrError(_TC_AttrException):
+ def error_message(self):
+ return "missing attribute %s" % self.attr
+
+# This is like _TC_LengthError for YieldSeq
+class _TC_YieldCountError(_TC_Exception):
+ def __init__(self, expected):
+ _TC_Exception.__init__(self, expected)
+
+ self.expected = expected
+
+ def format_bad_object(self, bad_object):
+ return ("", self)
+
+ def error_message(self):
+ plural = "s"
+ if self.expected == 1:
+ plural = ""
+
+ return "only expected the generator to yield %d time%s" % (self.expected, plural)
+
+# This exists to provide more detailed error messages about why a given
+# Xor() assertion failed
+class _TC_XorError(_TC_NestedError):
+ def __init__(self, matched_conds, inner_exception):
+ assert matched_conds in (0, 2)
+ assert isinstance(inner_exception, _TC_TypeError)
+
+ _TC_Exception.__init__(self, matched_conds, inner_exception)
+ _TC_NestedError.__init__(self, inner_exception)
+ self.matched_conds = matched_conds
+
+ def error_message(self):
+ if self.matched_conds == 0:
+ m = "neither assertion"
+ else:
+ m = "both assertions"
+
+ return _TC_NestedError.error_message(self) + " (matched %s)" % m
+
+class _TC_FunctionError(_TC_Exception):
+ def __init__(self, checking_func, obj):
+ self.checking_func = checking_func
+ self.rejected_obj = obj
+
+ def error_message(self):
+ return " was rejected by %s" % self.checking_func
+
+ def format_bad_object(self, bad_object):
+ return (str(bad_object), self)
+
+class _TC_ExactError(_TC_Exception):
+ def __init__(self, wrong, right):
+ self.wrong = wrong
+ self.right = right
+
+ def error_message(self):
+ return "expected %s, got %s" % (self.right, self.wrong)
+
+### The following exist to provide detailed TypeSignatureErrors
+class _TS_Exception(Exception):
+ def error_message(self):
+ raise NotImplementedError("Incomplete _TS_Exception subclass (%s)" % str(self.__class__))
+
+# This is used when there was an error related to an auto-unpacked tuple
+# in the function's signature
+class _TS_TupleError(_TS_Exception):
+ def __init__(self, parameters, types):
+ parameters = _rec_tuple(parameters)
+ types = _rec_tuple(types)
+ _TS_Exception.__init__(self, parameters, types)
+
+ self.parameters = parameters
+ self.types = types
+
+ def error_message(self):
+ return "the signature type %s does not match %s" % (str(self.types), str(self.parameters))
+
+class _TS_ExtraKeywordError(_TS_Exception):
+ def __init__(self, keyword):
+ _TS_Exception.__init__(self, keyword)
+
+ self.keyword = keyword
+
+ def error_message(self):
+ return "the keyword '%s' in the signature is not in the function" % self.keyword
+
+class _TS_ExtraPositionalError(_TS_Exception):
+ def __init__(self, type):
+ _TS_Exception.__init__(self, type)
+
+ self.type = type
+
+ def error_message(self):
+ return "an extra positional type has been supplied"
+
+class _TS_MissingTypeError(_TS_Exception):
+ def __init__(self, parameter):
+ _TS_Exception.__init__(self, parameter)
+
+ self.parameter = parameter
+
+ def error_message(self):
+ return "parameter '%s' lacks a type" % self.parameter
+
+# If the user has given a keyword parameter a type both positionally and
+# with a keyword argument, this will be raised
+class _TS_TwiceTypedError(_TS_Exception):
+ def __init__(self, parameter, kw_type, pos_type):
+ _TS_Exception.__init__(self, parameter, kw_type, pos_type)
+
+ self.parameter = parameter
+ self.kw_type = kw_type
+ self.pos_type = pos_type
+
+ def error_message(self):
+ return "parameter '%s' is provided two types (%s and %s)" % (self.parameter, str(self.kw_type), str(self.pos_type))
+
+### The following functions are the way new type handlers are registered
+### The Type function will iterate over all registered type handlers;
+### the first handler to return a non-None value is considered the winner
+#########################################################################
+
+_hooks = ("__typesig__", "__startchecking__", "__stopchecking__", "__switchchecking__")
+
+_registered_types = set()
+_registered_hooks = dict([(_h, set()) for _h in _hooks])
+
+def _manage_registration(add_remove, reg_type):
+ if not isinstance(reg_type, (types.ClassType, types.TypeType)):
+ raise ValueError("registered types must be classes or types")
+
+ valid = False
+ for hook in _hooks:
+ if hasattr(reg_type, hook):
+ getattr(_registered_hooks[hook], add_remove)(reg_type)
+ valid = True
+
+ if valid:
+ getattr(_registered_types, add_remove)(reg_type)
+ else:
+ raise ValueError("registered types must have at least one of the following methods: " + ", ".join(_hooks))
+
+def register_type(reg_type):
+ _manage_registration('add', reg_type)
+
+def unregister_type(reg_type):
+ _manage_registration('remove', reg_type)
+
+def is_registered_type(reg_type):
+ return reg_type in _registered_types
+
+### Factory function; this is what should be used to dispatch
+### type-checker class requests
+
+def Type(obj):
+ # Note that registered types cannot count on being run in a certain order;
+ # their __typesig__ methods must be sufficiently flexible to account for
+ # this
+ for reg_type in _registered_hooks['__typesig__']:
+ v = reg_type.__typesig__(obj)
+ if v is not None:
+ return v
+
+ raise AssertionError("Object is of type '%s'; not a type" % str(type(obj)))
+
+def __checking(start_stop, *args):
+ attr = '__%schecking__' % start_stop
+
+ for reg_type in _registered_hooks[attr]:
+ getattr(reg_type, attr)(*args)
+
+def start_checking(function):
+ __checking('start', function)
+
+def stop_checking(function):
+ __checking('stop', function)
+
+def switch_checking(from_func, to_func):
+ for reg_type in _registered_types:
+ if hasattr(reg_type, '__switchchecking__'):
+ getattr(reg_type, '__switchchecking__')(from_func, to_func)
+ else:
+ if hasattr(reg_type, '__stopchecking__'):
+ getattr(reg_type, '__stopchecking__')(from_func)
+ if hasattr(reg_type, '__startchecking__'):
+ getattr(reg_type, '__startchecking__')(to_func)
+
+### Deduce the type of a data structure
+###
+### XXX: Find a way to allow registered utility classes
+### to hook into this
+def calculate_type(obj):
+ if isinstance(obj, types.InstanceType):
+ return obj.__class__
+ elif isinstance(obj, dict):
+ if len(obj) == 0:
+ return {}
+
+ key_types = set()
+ val_types = set()
+
+ for (k,v) in obj.items():
+ key_types.add( calculate_type(k) )
+ val_types.add( calculate_type(v) )
+
+ if len(key_types) == 1:
+ key_types = key_types.pop()
+ else:
+ key_types = Or(*key_types)
+
+ if len(val_types) == 1:
+ val_types = val_types.pop()
+ else:
+ val_types = Or(*val_types)
+
+ return {key_types: val_types}
+ elif isinstance(obj, tuple):
+ return tuple([calculate_type(t) for t in obj])
+ elif isinstance(obj, list):
+ length = len(obj)
+ if length == 0:
+ return []
+ obj = [calculate_type(o) for o in obj]
+
+ partitions = [1]
+ partitions.extend([i for i in range(2, int(length/2)+1) if length%i==0])
+ partitions.append(length)
+
+ def evaluate(items_per):
+ parts = length / items_per
+
+ for i in range(0, parts):
+ for j in range(0, items_per):
+ if obj[items_per * i + j] != obj[j]:
+ raise StopIteration
+ return obj[0:items_per]
+
+ for items_per in partitions:
+ try:
+ return evaluate(items_per)
+ except StopIteration:
+ continue
+ else:
+ return type(obj)
+
+### The following classes are the work-horses of the typechecker
+
+# The base class for all the other utility classes
+class CheckType(object):
+ def __repr__(self):
+ return type(self).name + '(' + ', '.join(sorted(repr(t) for t in self._types)) + ')'
+
+ __str__ = __repr__
+
+ def __eq__(self, other):
+ return not self != other
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ raise NotImplementedError("Incomplete CheckType subclass: %s" % self.__class__)
+
+ def __typecheck__(self, func, obj):
+ raise NotImplementedError("Incomplete CheckType subclass: %s" % self.__class__)
+
+ @classmethod
+ def __typesig__(cls, obj):
+ if isinstance(obj, CheckType):
+ return obj
+
+class Single(CheckType):
+ name = "Single"
+
+ def __init__(self, type):
+ if not isinstance(type, (types.ClassType, types.TypeType)):
+ raise TypeError("Cannot type-check a %s" % type(type))
+ else:
+ self.type = type
+
+ self._types = [self.type]
+
+ def __typecheck__(self, func, to_check):
+ if not isinstance(to_check, self.type):
+ raise _TC_TypeError(to_check, self.type)
+
+ def __eq__(self, other):
+ if other.__class__ is not self.__class__:
+ return False
+ return self.type == other.type
+
+ def __hash__(self):
+ return hash(str(hash(self.__class__)) + str(hash(self.type)))
+
+ # XXX Is this really a good idea?
+ # Removing this only breaks 3 tests; that seems suspiciously low
+ def __repr__(self):
+ return repr(self.type)
+
+ @classmethod
+ def __typesig__(cls, obj):
+ if isinstance(obj, (types.ClassType, types.TypeType)):
+ return Single(obj)
+
+### Provide a way to enforce the empty-ness of iterators
+class Empty(Single):
+ name = "Empty"
+
+ def __init__(self, type):
+ if not hasattr(type, '__len__'):
+ raise TypeError("Can only assert emptyness for types with __len__ methods")
+
+ Single.__init__(self, type)
+
+ def __typecheck__(self, func, to_check):
+ Single.__typecheck__(self, func, to_check)
+
+ if len(to_check) > 0:
+ err = _TC_LengthError(len(to_check), 0)
+ if isinstance(to_check, dict):
+ raise _TC_DictError(err)
+ raise err
+
+class Dict(CheckType):
+ name = "Dict"
+
+ def __init__(self, key, val):
+ self.__check_key = Type(key)
+ self.__check_val = Type(val)
+
+ self.type = {key: val}
+ self._types = [key, val]
+
+ def __typecheck__(self, func, to_check):
+ if not isinstance(to_check, types.DictType):
+ raise _TC_TypeError(to_check, self.type)
+
+ for (k, v) in to_check.items():
+ # Check the key
+ try:
+ check_type(self.__check_key, func, k)
+ except _TC_Exception, inner:
+ raise _TC_KeyError(k, inner)
+
+ # Check the value
+ try:
+ check_type(self.__check_val, func, v)
+ except _TC_Exception, inner:
+ raise _TC_KeyValError(k, v, inner)
+
+ def __eq__(self, other):
+ if other.__class__ is not self.__class__:
+ return False
+ return self.type == other.type
+
+ def __hash__(self):
+ cls = self.__class__
+ key = self.__check_key
+ val = self.__check_val
+
+ def strhash(obj):
+ return str(hash(obj))
+
+ return hash(''.join(map(strhash, [cls, key, val])))
+
+ @classmethod
+ def __typesig__(cls, obj):
+ if isinstance(obj, dict):
+ if len(obj) == 0:
+ return Empty(dict)
+ return Dict(obj.keys()[0], obj.values()[0])
+
+### Provide typechecking for the built-in list() type
+class List(CheckType):
+ name = "List"
+
+ def __init__(self, *type):
+ self._types = [Type(t) for t in type]
+ self.type = [t.type for t in self._types]
+
+ def __typecheck__(self, func, to_check):
+ if not isinstance(to_check, list):
+ raise _TC_TypeError(to_check, self.type)
+ if len(to_check) % len(self._types):
+ raise _TC_LengthError(len(to_check))
+
+ # lists can be patterned, meaning that [int, float]
+ # requires that the to-be-checked list contain an alternating
+ # sequence of integers and floats. The pattern must be completed
+ # (e.g, [5, 5.0, 6, 6.0] but not [5, 5.0, 6]) for the list to
+ # typecheck successfully.
+ #
+ # A list with a single type, [int], is a sub-case of patterned
+ # lists
+ #
+ # XXX: Investigate speed increases by special-casing single-typed
+ # lists
+ pat_len = len(self._types)
+ type_tuples = [(i, val, self._types[i % pat_len]) for (i, val)
+ in enumerate(to_check)]
+ for (i, val, type) in type_tuples:
+ try:
+ check_type(type, func, val)
+ except _TC_Exception, e:
+ raise _TC_IndexError(i, e)
+
+ def __eq__(self, other):
+ if other.__class__ is not self.__class__:
+ return False
+
+ if len(self._types) != len(other._types):
+ return False
+
+ for (s, o) in zip(self._types, other._types):
+ if s != o:
+ return False
+ return True
+
+ def __hash__(self):
+ def strhash(obj):
+ return str(hash(obj))
+
+ return hash(''.join(map(strhash, [self.__class__] + self._types)))
+
+ @classmethod
+ def __typesig__(cls, obj):
+ if isinstance(obj, list):
+ if len(obj) == 0:
+ return Empty(list)
+ return List(*obj)
+
+### Provide typechecking for the built-in tuple() class
+class Tuple(List):
+ name = "Tuple"
+
+ def __init__(self, *type):
+ List.__init__(self, *type)
+
+ self.type = tuple(self.type)
+
+ def __typecheck__(self, func, to_check):
+ # Note that tuples of varying length (e.g., (int, int) and (int, int, int))
+ # are separate types, not merely differences in length like lists
+ if not isinstance(to_check, types.TupleType) or len(to_check) != len(self._types):
+ raise _TC_TypeError(to_check, self.type)
+
+ for (i, (val, type)) in enumerate(zip(to_check, self._types)):
+ try:
+ check_type(type, func, val)
+ except _TC_Exception, inner:
+ raise _TC_IndexError(i, inner)
+
+ @classmethod
+ def __typesig__(cls, obj):
+ if isinstance(obj, tuple):
+ return Tuple(*obj)
+
+class TypeVariables(CheckType):
+ # This is a stack of {typevariable -> type} mappings
+ # It is intentional that it is class-wide; it maintains
+ # the mappings of the outer functions if we descend into
+ # nested typechecked functions
+ __mapping_stack = []
+
+ # This is the {typevariable -> type} mapping for the function
+ # currently being checked
+ __active_mapping = None
+
+ # This dict maps generators to their mappings
+ __gen_mappings = {}
+
+ def __init__(self, name):
+ self.type = name
+
+ def __str__(self):
+ return "TypeVariable(%s)" % self.type
+
+ __repr__ = __str__
+
+ def __hash__(self):
+ return hash(''.join([str(o) for o in self.__class__
+ , hash(type(self.type))
+ , hash(self.type)]))
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return type(self.type) is type(other.type) and self.type == other.type
+
+ def __typecheck__(self, func, to_check):
+ name = self.type
+ if isinstance(func, GeneratorType):
+ active = self.__class__.__gen_mappings[func]
+ else:
+ active = self.__class__.__active_mapping
+
+ # We have to do this because u'a' == 'a'
+ lookup = (name, type(name))
+ if lookup in active:
+ check_type(active[lookup], func, to_check)
+ else:
+ # This is the first time we've encountered this
+ # typevariable for this function call.
+ #
+ # In this case, we automatically approve the object
+ active[lookup] = Type(calculate_type(to_check))
+
+ @classmethod
+ def __typesig__(cls, obj):
+ if isinstance(obj, basestring):
+ return cls(obj)
+
+ @classmethod
+ def __startchecking__(cls, func):
+ if isinstance(func, GeneratorType):
+ cls.__gen_mappings.setdefault(func, {})
+ elif isinstance(func, FunctionType):
+ cls.__mapping_stack.append(cls.__active_mapping)
+ cls.__active_mapping = {}
+ else:
+ raise TypeError(func)
+
+ @classmethod
+ def __switchchecking__(cls, from_func, to_func):
+ if isinstance(from_func, FunctionType):
+ if isinstance(to_func, GeneratorType):
+ cls.__gen_mappings[to_func] = cls.__active_mapping
+ cls.__stopchecking__(from_func)
+ elif isinstance(to_func, FunctionType):
+ cls.__stopchecking__(from_func)
+ cls.__startchecking__(to_func)
+ else:
+ raise TypeError(to_func)
+ else:
+ raise TypeError(from_func)
+
+ @classmethod
+ def __stopchecking__(cls, func):
+ if isinstance(func, GeneratorType):
+ del cls.__gen_mappings[func]
+ elif isinstance(func, FunctionType):
+ cls.__active_mapping = cls.__mapping_stack.pop()
+ else:
+ raise TypeError(func)
+
+class Function(CheckType):
+ def __init__(self, func):
+ self._func = func
+ self.type = self
+
+ @classmethod
+ def __typesig__(cls, obj):
+ if isinstance(obj, (FunctionType, MethodType)):
+ return cls(obj)
+
+ # Snag callable class instances (that aren't types or classes)
+ if type(obj) not in (types.ClassType, type) and callable(obj):
+ return cls(obj)
+
+ def __typecheck__(self, func, to_check):
+ if False == self._func(to_check):
+ raise _TC_FunctionError(self._func, to_check)
+
+ def __str__(self):
+ return "Function(%s)" % self._func
+
+ def __repr__(self):
+ return str(self)
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self._func is other._func
+
+ def __hash__(self):
+ return hash(str(self.__class__) + str(hash(self._func)))
+
+# Register some of the above types so that Type() knows about them
+for c in (CheckType, List, Tuple, Dict, Single, TypeVariables, Function):
+ register_type(c)
+
+### The following are utility classes intended to make writing complex
+### signatures easier.
+######################################################################
+
+### Instances of Any() automatically approve of the object they're supposed
+### to be checking (ie, they don't actually check it; use this with caution)
+class Any(CheckType):
+ name = "Any"
+
+ def __init__(self):
+ self.type = object
+
+ def __typecheck__(self, func, to_check):
+ pass
+
+ def __str__(self):
+ return "Any()"
+
+ __repr__ = __str__
+
+ # All instances of this class are equal
+ def __eq__(self, other):
+ return other.__class__ is self.__class__
+
+ def __hash__(self):
+ return hash(self.__class__)
+
+### Base class for Or() and And()
+class _Boolean(CheckType):
+ def __init__(self, first_type, second_type, *types):
+ self._types = set()
+
+ for t in (first_type, second_type)+types:
+ if type(t) is type(self):
+ self._types.update(t._types)
+ else:
+ self._types.add(Type(t))
+
+ if len(self._types) < 2:
+ raise TypeError("there must be at least 2 distinct parameters to __init__()")
+
+ self.type = self
+
+ def __eq__(self, other):
+ if other.__class__ is not self.__class__:
+ return False
+
+ return self._types == other._types
+
+ def __hash__(self):
+ return hash(str(hash(self.__class__)) + str(hash(frozenset(self._types))))
+
+class Or(_Boolean):
+ name = "Or"
+
+ def __typecheck__(self, func, to_check):
+ for type in self._types:
+ try:
+ check_type(type, func, to_check)
+ return
+ except _TC_Exception:
+ pass
+
+ raise _TC_TypeError(to_check, self)
+
+class And(_Boolean):
+ name = "And"
+
+ def __typecheck__(self, func, to_check):
+ for type in self._types:
+ try:
+ check_type(type, func, to_check)
+ except _TC_Exception, e:
+ raise _TC_TypeError(to_check, self)
+
+class Not(Or):
+ name = "Not"
+
+ # We override _Boolean's __init__ so that we can accept a single
+ # condition
+ def __init__(self, first_type, *types):
+ self._types = set([Type(t) for t in (first_type,)+types])
+
+ self.type = self
+
+ def __typecheck__(self, func, to_check):
+ # Or does our work for us, but we invert its result
+ try:
+ Or.__typecheck__(self, func, to_check)
+ except _TC_Exception:
+ return
+ raise _TC_TypeError(to_check, self)
+
+class Xor(_Boolean):
+ name = "Xor"
+
+ def __typecheck__(self, func, to_check):
+ already_met_1_cond = False
+
+ for typ in self._types:
+ try:
+ check_type(typ, func, to_check)
+ except _TC_Exception:
+ pass
+ else:
+ if already_met_1_cond:
+ raise _TC_XorError(2, _TC_TypeError(to_check, self))
+ already_met_1_cond = True
+
+ if not already_met_1_cond:
+ raise _TC_XorError(0, _TC_TypeError(to_check, self))
+
+class IsCallable(CheckType):
+ def __init__(self):
+ self.type = self
+
+ def __str__(self):
+ return "IsCallable()"
+
+ __repr__ = __str__
+
+ # They're all the same
+ # XXX Change IsCallable to a singleton class
+ def __hash__(self):
+ return id(self.__class__)
+
+ def __eq__(self, other):
+ return self.__class__ is other.__class__
+
+ def __typecheck__(self, func, to_check):
+ if not callable(to_check):
+ raise _TC_TypeError(to_check, 'a callable')
+
+class HasAttr(CheckType):
+ def __init__(self, set_1, set_2=None):
+ attr_sets = {list: [], dict: {}}
+
+ for (arg_1, arg_2) in ((set_1, set_2), (set_2, set_1)):
+ for t in (list, dict):
+ if isinstance(arg_1, t):
+ attr_sets[t] = arg_1
+ if isinstance(arg_2, t):
+ raise TypeError("can only have one list and/or one dict")
+
+ self._attr_types = dict.fromkeys(attr_sets[list], Any())
+
+ for (attr, typ) in attr_sets[dict].items():
+ self._attr_types[attr] = Type(typ)
+
+ def __typecheck__(self, func, to_check):
+ for (attr, typ) in self._attr_types.items():
+ if not hasattr(to_check, attr):
+ raise _TC_MissingAttrError(attr)
+
+ try:
+ check_type(typ, func, getattr(to_check, attr))
+ except _TC_Exception, e:
+ raise _TC_AttrError(attr, e)
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self._attr_types == other._attr_types
+
+ def __hash__(self):
+ return hash(str(hash(self.__class__)) + str(hash(str(self._attr_types))))
+
+ def __str__(self):
+ any_type = []
+ spec_type = {}
+
+ any = Any()
+
+ for (attr, typ) in self._attr_types.items():
+ if typ == any:
+ any_type.append(attr)
+ else:
+ spec_type[attr] = typ
+
+ msg = [t for t in (any_type, spec_type) if len(t)]
+
+ return "HasAttr(" + ', '.join(map(str, msg)) + ")"
+
+ __repr__ = __str__
+
+class IsIterable(CheckType):
+ def __init__(self):
+ self.type = self
+
+ def __eq__(self, other):
+ return self.__class__ is other.__class__
+
+ # They're all the same
+ # XXX Change IsIterable to a singleton class
+ def __hash__(self):
+ return id(self.__class__)
+
+ def __str__(self):
+ return "IsIterable()"
+
+ __repr__ = __str__
+
+ def __typecheck__(self, func, to_check):
+ if not (hasattr(to_check, '__iter__') and callable(to_check.__iter__)):
+ raise _TC_TypeError(to_check, "an iterable")
+
+class YieldSeq(CheckType):
+ _index_map = {}
+
+ def __init__(self, type_1, type_2, *types):
+ self.type = self
+
+ self._type = [type_1, type_2] + list(types)
+ self._types = [Type(t) for t in self._type]
+
+ def __hash__(self):
+ return id(self)
+
+ def __str__(self):
+ return "YieldSeq(" + ", ".join(map(str, self._type)) + ")"
+
+ __repr__ = __str__
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self._types == other._types
+
+ def __hash__(self):
+ return hash(str(self.__class__) + str([hash(t) for t in self._types]))
+
+ # We have to use __{start,stop}checking__ so that the indexes get
+ # reset every time we run through the typechecking sequence
+ @classmethod
+ def __startchecking__(cls, gen):
+ if isinstance(gen, GeneratorType):
+ cls._index_map[gen] = {}
+
+ @classmethod
+ def __stopchecking__(cls, gen):
+ if gen in cls._index_map:
+ del cls._index_map[gen]
+
+ def __typecheck__(self, gen, to_check):
+ index_map = self.__class__._index_map
+
+ # There might be multiple YieldSeq's per signature
+ if self not in index_map[gen]:
+ index_map[gen][self] = -1
+ index = index_map[gen]
+
+ if index[self] >= len(self._types)-1:
+ raise _TC_YieldCountError(len(self._types))
+
+ index[self] += 1
+ check_type(self._types[index[self]], gen, to_check)
+
+register_type(YieldSeq)
+
+class Exact(CheckType):
+ def __init__(self, obj):
+ self.type = self
+ self._obj = obj
+
+ def __hash__(self):
+ try:
+ obj_hash = str(hash(self._obj))
+ except TypeError:
+ obj_hash = str(type(self._obj)) + str(self._obj)
+
+ return hash(str(self.__class__) + obj_hash)
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self._obj == other._obj
+
+ def __typecheck__(self, func, to_check):
+ if self._obj != to_check:
+ raise _TC_ExactError(to_check, self._obj)
+
+class Length(CheckType):
+ def __init__(self, length):
+ self.type = self
+ self._length = int(length)
+
+ def __hash__(self):
+ return hash(str(self.__class__) + str(self._length))
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self._length == other._length
+
+ def __typecheck__(self, func, to_check):
+ try:
+ length = len(to_check)
+ except TypeError:
+ raise _TC_TypeError(to_check, "something with a __len__ method")
+
+ if length != self._length:
+ raise _TC_LengthError(length, self._length)
+
+import sys
+class Class(CheckType):
+ def __init__(self, class_name):
+ self.type = self
+ self.class_name = class_name
+ self.class_obj = None
+ self._frame = sys._getframe(1)
+
+ def __hash__(self):
+ return hash(str(self.__class__) + self.class_name)
+
+ def __str__(self):
+ return "Class('%s')" % self.class_name
+
+ __repr__ = __str__
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self.class_name == other.class_name
+
+ def __typecheck__(self, func, to_check):
+ if self.class_obj is None:
+ class_name = self.class_name
+ frame = self._frame
+
+ for f_dict in (frame.f_locals, frame.f_globals):
+ if class_name in frame.f_locals:
+ if self is not frame.f_locals[class_name]:
+ self.class_obj = frame.f_locals[class_name]
+ self._frame = None
+ break
+ else:
+ raise NameError("name '%s' is not defined" % class_name)
+
+ if not isinstance(to_check, self.class_obj):
+ raise _TC_TypeError(to_check, self.class_obj)
+
+class Typeclass(CheckType):
+ bad_members = dict.fromkeys(['__class__', '__new__', '__init__'], True)
+
+ def __init__(self, *types):
+ if len(types) == 0:
+ raise TypeError("Must supply at least one type to __init__()")
+
+ self.type = self
+
+ self._cache = set()
+ self._interface = set()
+ self._instances = set()
+ for t in types:
+ self.add_instance(t)
+
+ self._calculate_interface()
+
+ def recalculate_interface(self):
+ self._cache = self._instances.copy()
+ self._calculate_interface()
+
+ def instances(self):
+ return list(self._instances)
+
+ def interface(self):
+ return list(self._interface)
+
+ def has_instance(self, instance):
+ return instance in self._instances
+
+ def add_instance(self, instance):
+ if isinstance(instance, self.__class__):
+ for inst in instance.instances():
+ self._instances.add(inst)
+ self._cache.add(inst)
+ elif isinstance(instance, (ClassType, TypeType)):
+ self._instances.add(instance)
+ self._cache.add(instance)
+ else:
+ raise TypeError("All instances must be classes or types")
+
+ def intersect(self, other):
+ if isinstance(other, self.__class__):
+ new_instances = other.instances()
+ else:
+ new_instances = other
+
+ self._instances.update(new_instances)
+ self._cache.update(new_instances)
+ self._calculate_interface()
+
+ def _calculate_interface(self):
+ bad_members = self.bad_members
+
+ for instance in self._instances:
+ inst_attrs = []
+
+ for attr, obj in instance.__dict__.items():
+ if callable(obj) and attr not in bad_members:
+ inst_attrs.append(attr)
+
+ if len(self._interface) == 0:
+ self._interface = set(inst_attrs)
+ else:
+ self._interface.intersection_update(inst_attrs)
+
+ def __typecheck__(self, func, to_check):
+ if to_check.__class__ in self._cache:
+ return
+
+ for method in self._interface:
+ if not hasattr(to_check, method):
+ raise _TC_MissingAttrError(method)
+
+ attr = getattr(to_check, method)
+ if not callable(attr):
+ raise _TC_AttrError(method, _TC_TypeError(attr, IsCallable()))
+
+ self._cache.add(to_check.__class__)
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self._instances == other._instances
+
+ def __hash__(self):
+ return hash(str(self.__class__) + str(hash(frozenset(self._instances))))
+
+ def __repr__(self):
+ return object.__repr__(self)
+
+ def __str__(self):
+ return 'Typeclass(' + ', '.join(map(str, self._instances)) + ')'
+
+# The current implementation of Self relies on the TypeVariables machinery
+_Self = TypeVariables("this is the class of the invocant")
+def Self():
+ return _Self
+
+### Aliases
+###########
+
+IsOneOf = Or
+IsAllOf = And
+IsNoneOf = Not
+IsOnlyOneOf = Xor
+
+### This is the public side of the module
+#########################################
+
+# This is for backwards compatibility with v0.1.6 and earlier
+class TypeCheckException(Exception):
+ pass
+
+class TypeCheckError(TypeCheckException):
+ def __init__(self, prefix, bad_object, exception):
+ TypeCheckException.__init__(self, prefix, bad_object, exception)
+
+ self.prefix = prefix
+ self.internal = exception
+ self.bad_object = bad_object
+
+ (bad_obj_str, start_message) = exception.format_bad_object(bad_object)
+ self.__message = prefix + bad_obj_str + start_message.error_message()
+
+ def __str__(self):
+ return self.__message
+
+class TypeSignatureError(Exception):
+ def __init__(self, internal_exc):
+ Exception.__init__(self, internal_exc)
+
+ self.internal = internal_exc
+ self.__message = internal_exc.error_message()
+
+ def __str__(self):
+ return self.__message
+
+### Begin helper classes/functions for typecheck_args
+#####################################################
+def _rec_tuple(obj):
+ if isinstance(obj, list):
+ return tuple(_rec_tuple(o) for o in obj)
+ return obj
+
+def _rec_tuple_str(obj):
+ if not isinstance(obj, (list, tuple)):
+ return obj
+
+ if len(obj) == 1:
+ return '(%s,)' % obj
+
+ return '(' + ', '.join(_rec_tuple_str(o) for o in obj) + ')'
+
+def _gen_arg_to_param(func, (posargs, varargs, varkw, defaults)):
+ sig_args = list()
+ dic_args = list()
+
+ for obj in posargs:
+ if isinstance(obj, list):
+ rts = _rec_tuple_str(obj)
+
+ sig_args.append(rts)
+ dic_args.append((_rec_tuple(obj), rts))
+ else:
+ sig_args.append(str(obj))
+ dic_args.append(('"%s"' % obj, obj))
+
+ func_code = ''
+ if varargs:
+ dic_args.append(('"%s"' % varargs, varargs))
+ sig_args.append('*' + varargs)
+ func_code = '\n\t%s = list(%s)' % (varargs, varargs)
+ if varkw:
+ dic_args.append(('"%s"' % varkw, varkw))
+ sig_args.append('**' + varkw)
+
+ func_name = func.func_name + '_'
+ while func_name in dic_args:
+ func_name += '_'
+
+ func_def = 'def %s(' % func.func_name
+ func_return = func_code \
+ + '\n\treturn {' \
+ + ', '.join('%s: %s' % kv for kv in dic_args) \
+ + '}'
+
+ locals = {}
+ exec func_def + ','.join(sig_args) + '):' + func_return in locals
+ func = locals[func.func_name]
+ func.func_defaults = defaults
+ return func
+
+def _validate_tuple(ref, obj):
+ if not isinstance(ref, (list, tuple)):
+ return
+ if not isinstance(obj, (list, tuple)):
+ raise _TS_TupleError(ref, obj)
+
+ if len(ref) != len(obj):
+ raise _TS_TupleError(ref, obj)
+
+ try:
+ for r, o in zip(ref, obj):
+ _validate_tuple(r, o)
+ except _TS_TupleError:
+ raise _TS_TupleError(ref, obj)
+
+def _param_to_type((params, varg_name, kwarg_name), vargs, kwargs):
+ vargs = list(vargs)
+ kwargs = dict(kwargs)
+
+ # Make parameter names to values
+ param_value = dict()
+
+ # There are excess positional arguments, but no *args parameter
+ if len(params) < len(vargs) and varg_name is None:
+ raise _TS_ExtraPositionalError(vargs[len(params)])
+ # There are not enough position args and no kwargs to draw from
+ if len(params) > len(vargs) and len(kwargs) == 0:
+ raise _TS_MissingTypeError(params[len(vargs)])
+
+ # No reason to do this if there aren't any vargs
+ if len(vargs):
+ for p, a in zip(params, vargs):
+ # Make sure all auto-unpacked tuples match up
+ _validate_tuple(p, a)
+ param_value[_rec_tuple(p)] = a
+
+ # No reason to do all this work if there aren't any kwargs
+ if len(kwargs) > 0:
+ # All params that still need values
+ params = set([k for k in params if k not in param_value])
+ if kwarg_name and kwarg_name not in param_value:
+ params.add(kwarg_name)
+ if varg_name and varg_name not in param_value:
+ params.add(varg_name)
+
+ # Lift this out of the loop
+ no_double_star = kwarg_name is None
+
+ # All parameter slots have been filled, but there are still keyword
+ # args remaining with no **kwargs parameter present
+ if len(params) == 0 and no_double_star:
+ raise _TS_ExtraKeywordError(kwargs.keys()[0])
+
+ # Match up remaining keyword args with open parameter slots
+ for p, a in kwargs.items():
+ if p in param_value:
+ raise _TS_TwiceTypedError(p, a, param_value[p])
+ if p not in params and no_double_star:
+ raise _TS_ExtraKeywordError(p)
+
+ # Make sure all auto-unpacked tuples match up
+ _validate_tuple(p, a)
+
+ # Bookkeeping
+ params.remove(p)
+ param_value[p] = a
+
+ # Any elements left in params indicate that the parameter is missing
+ # a value
+ if len(params):
+ raise _TS_MissingTypeError(params.pop())
+
+ return param_value
+
+def _make_fake_function(func):
+ def fake_function(*vargs, **kwargs):
+ # We call start_checking here, but __check_result
+ # has to call stop_checking on its own. The reason
+ # for this is so that typecheck_yield can call
+ # stop_checking on the function and then start_checking
+ # on the generator
+ start_checking(func)
+
+ # If either one of these operations fails, we need to call
+ # stop_checking()
+ try:
+ fake_function.__check_args(vargs, kwargs)
+ result = func(*vargs, **kwargs)
+ except:
+ stop_checking(func)
+ raise
+
+ return fake_function.__check_result(func, result)
+
+ # These are the default implementations of __check_args
+ # and __check_results
+ def _pass_args(vargs, kwargs):
+ pass
+ def _pass_result(func, result):
+ stop_checking(func)
+ return result
+
+ fake_function.__check_args = _pass_args
+ fake_function.__check_result = _pass_result
+ fake_function.__wrapped_func = func
+
+ # Mock-up the fake function to look as much like the
+ # real function as possible
+ fake_function.__module__ = func.__module__
+ fake_function.__name__ = func.__name__
+ fake_function.__doc__ = func.__doc__
+
+ return fake_function
+
+###################################################
+### End helper classes/functions for typecheck_args
+
+def typecheck_args(*v_sig, **kw_sig):
+ # typecheck_args is run to obtain the real decorator
+ def decorator(func):
+ if hasattr(func, '__wrapped_func'):
+ if hasattr(func, 'type_args'):
+ raise RuntimeError('Cannot use the same typecheck_* function more than once on the same function')
+ wrapped_func = func.__wrapped_func
+ else:
+ wrapped_func = func
+
+ param_list, varg_name, kwarg_name, defaults = inspect.getargspec(wrapped_func)
+ args_to_params = _gen_arg_to_param(wrapped_func, (param_list, varg_name, kwarg_name, defaults))
+
+ try:
+ param_types = _param_to_type((param_list, varg_name, kwarg_name), v_sig, kw_sig)
+ except _TS_Exception, e:
+ raise TypeSignatureError(e)
+
+ ### We need to fix-up the types of the *vargs and **kwargs parameters
+ #####################################################################
+ if varg_name:
+ if not isinstance(param_types[varg_name], list):
+ param_types[varg_name] = [param_types[varg_name]]
+
+ if kwarg_name:
+ if not isinstance(param_types[kwarg_name], dict):
+ param_types[kwarg_name] = {str: param_types[kwarg_name]}
+
+ #####################################################################
+ ### /Fix-up
+
+ # Convert the signatures to types now, rather than rebuild them in every function call
+ check_param_types = dict()
+ for k, v in param_types.items():
+ check_param_types[k] = Type(v)
+
+ def __check_args(__vargs, __kwargs):
+ # Type-checking can be turned on and off by toggling the
+ # value of the global enable_checking variable
+ if enable_checking:
+ arg_dict = args_to_params(*__vargs, **__kwargs)
+
+ # Type-check the keyword arguments
+ try:
+ for name, val in arg_dict.items():
+ check_type(check_param_types[name], wrapped_func, val)
+ except _TC_Exception, e:
+ str_name = _rec_tuple_str(name)
+ raise TypeCheckError("Argument %s: " % str_name, val, e)
+
+ if hasattr(func, '__check_result'):
+ # This is one of our wrapper functions, probably created by
+ # typecheck_yield or typecheck_return
+ fake_function = func
+ else:
+ # We need to build a wrapper
+ fake_function = _make_fake_function(func)
+
+ # Specify how argument checking should be done
+ fake_function.__check_args = __check_args
+
+ ### Add the publically-accessible signature information
+ fake_function.type_args = param_types
+
+ return fake_function
+ return decorator
+
+# Refactor this out of typecheck_{return,yield}
+def _decorator(signature, conflict_field, twice_field, check_result_func):
+ def decorator(func):
+ if hasattr(func, '__check_result'):
+ # This is one of our wrapper functions, probably created by
+ # typecheck_args
+ if hasattr(func, conflict_field):
+ raise RuntimeError("Cannot use typecheck_return and typecheck_yield on the same function")
+ elif hasattr(func, twice_field):
+ raise RuntimeError('Cannot use the same typecheck_* function more than once on the same function')
+
+ fake_function = func
+ else:
+ fake_function = _make_fake_function(func)
+
+ setattr(fake_function, twice_field, signature)
+ fake_function.__check_result = check_result_func
+ return fake_function
+ return decorator
+
+def typecheck_return(*signature):
+ if len(signature) == 1:
+ signature = signature[0]
+ sig_types = Type(signature)
+
+ def __check_return(func, return_vals):
+ if enable_checking:
+ try:
+ check_type(sig_types, func, return_vals)
+ except _TC_Exception, e:
+ stop_checking(func)
+ raise TypeCheckError("Return value: ", return_vals, e)
+
+ stop_checking(func)
+ return return_vals
+ return _decorator(signature, 'type_yield', 'type_return', __check_return)
+
+class Fake_generator(object):
+ def __init__(self, real_gen, signature):
+ # The generator should have the same yield signature
+ # as the function that produced it; however, we don't
+ # copy the args signature because the generator
+ # doesn't take arguments
+ self.type_yield = signature
+
+ self.__yield_no = 0
+ self.__real_gen = real_gen
+ self.__sig_types = Type(signature)
+ self.__needs_stopping = True
+
+ def next(self):
+ gen = self.__real_gen
+
+ self.__yield_no += 1
+
+ try:
+ return_vals = gen.next()
+ except StopIteration:
+ if self.__needs_stopping:
+ stop_checking(gen)
+ self.__needs_stopping = False
+ raise
+
+ if enable_checking:
+ try:
+ check_type(self.__sig_types, gen, return_vals)
+ except _TC_Exception, e:
+ # Insert this error into the chain so we can know
+ # which yield the error occurred at
+ middle_exc = _TC_GeneratorError(self.__yield_no, e)
+ raise TypeCheckError("", return_vals, middle_exc)
+
+ # Everything checks out. Return the results
+ return return_vals
+
+ def __del__(self):
+ if self.__needs_stopping:
+ stop_checking(self.__real_gen)
+
+def typecheck_yield(*signature):
+ if len(signature) == 1:
+ signature = signature[0]
+
+ def __check_yield(func, gen):
+ # If the return value isn't a generator, we blow up
+ if not isinstance(gen, types.GeneratorType):
+ stop_checking(func)
+ raise TypeError("typecheck_yield only works for generators")
+
+ # Inform all listening classes that they might want to preserve any information
+ # from the function to the generator (*hint* TypeVariables *hint*)
+ #
+ # stop_checking() will not be invoked on the generator until it raises
+ # StopIteration or its refcount drops to 0
+ switch_checking(func, gen)
+
+ # Otherwise, we build ourselves a fake generator
+ return Fake_generator(gen, signature)
+ return _decorator(signature, 'type_return', 'type_yield', __check_yield)
+
+_null_decorator = lambda *args, **kwargs: lambda f: f
+typecheck = _null_decorator
+accepts = _null_decorator
+returns = _null_decorator
+yields = _null_decorator
+
+# Aliases
+def enable_typechecking():
+ global typecheck
+ global accepts
+ global returns
+ global yields
+
+ typecheck = typecheck_args
+ accepts = typecheck_args
+ returns = typecheck_return
+ yields = typecheck_yield
+
+import os
+if "PYTHONTYPECHECK" in os.environ:
+ enable_typechecking()
+
diff --git a/translate-toolkit-1.3.0/translate/misc/typecheck/doctest_support.py b/translate-toolkit-1.3.0/translate/misc/typecheck/doctest_support.py
new file mode 100644
index 0000000..0933dda
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/typecheck/doctest_support.py
@@ -0,0 +1,36 @@
+"""
+This module allows doctest to find typechecked functions.
+
+Currently, doctest verifies functions to make sure that their
+globals() dict is the __dict__ of their module. In the case of
+decorated functions, the globals() dict *is* not the right one.
+
+To enable support for doctest do:
+
+ import typecheck.doctest_support
+
+This import must occur before any calls to doctest methods.
+"""
+
+def __DocTestFinder_from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ import inspect
+
+ if module is None:
+ return True
+ elif inspect.isfunction(object) or inspect.isclass(object):
+ return module.__name__ == object.__module__
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+import doctest as __doctest
+__doctest.DocTestFinder._from_module = __DocTestFinder_from_module \ No newline at end of file
diff --git a/translate-toolkit-1.3.0/translate/misc/typecheck/mixins.py b/translate-toolkit-1.3.0/translate/misc/typecheck/mixins.py
new file mode 100644
index 0000000..11ac7bb
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/typecheck/mixins.py
@@ -0,0 +1,84 @@
+from translate.misc.typecheck import _TC_NestedError, _TC_TypeError, check_type, Or
+from translate.misc.typecheck import register_type, _TC_Exception
+
+class _TC_IterationError(_TC_NestedError):
+ def __init__(self, iteration, value, inner_exception):
+ _TC_NestedError.__init__(self, inner_exception)
+
+ self.iteration = iteration
+ self.value = value
+
+ def error_message(self):
+ return ("at iteration %d (value: %s)" % (self.iteration, repr(self.value))) + _TC_NestedError.error_message(self)
+
+### This is the shadow class behind UnorderedIteratorMixin.
+### Again, it tries to pretend it doesn't exist by mimicing
+### the class of <obj> as much as possible.
+###
+### This mixin provides typechecking for iterator classes
+### where you don't care about the order of the types (ie,
+### you simply Or() the types together, as opposed to patterned
+### lists, which would be ordered mixins)
+class _UnorderedIteratorMixin(object):
+ def __init__(self, class_name, obj):
+ vals = [o for o in obj]
+
+ self.type = self
+ self._type = Or(*vals)
+ self.__cls = obj.__class__
+ self.__vals = vals
+ # This is necessary because it's a huge pain in the ass
+ # to get the "raw" name of the class once it's created
+ self.__cls_name = class_name
+
+ def __typecheck__(self, func, to_check):
+ if not isinstance(to_check, self.__cls):
+ raise _TC_TypeError(to_check, self)
+
+ for i, item in enumerate(to_check):
+ try:
+ check_type(self._type, func, item)
+ except _TC_Exception, e:
+ raise _TC_IterationError(i, item, e)
+
+ @classmethod
+ def __typesig__(cls, obj):
+ if isinstance(obj, cls):
+ return obj
+
+ def __str__(self):
+ return "%s(%s)" % (self.__cls_name, str(self._type))
+
+ __repr__ = __str__
+
+### This is included in a class's parent-class section like so:
+### class MyClass(UnorderedIteratorMixin("MyClass")):
+### blah blah blah
+###
+### This serves as a class factory, whose produced classes
+### attempt to mask the fact they exist. Their purpose
+### is to redirect __typesig__ calls to appropriate
+### instances of _UnorderedIteratorMixin
+def UnorderedIteratorMixin(class_name):
+ class UIM(object):
+ @classmethod
+ def __typesig__(cls, obj):
+ if isinstance(obj, cls):
+ return _UnorderedIteratorMixin(class_name, obj)
+
+ def __repr__(self):
+ return "%s%s" % (class_name, str(tuple(e for e in self)))
+
+ # We register each produced class anew
+ # If someone needs to unregister these classes, they should
+ # save a copy of it before including it in the class-definition:
+ #
+ # my_UIM = UnorderedIteratorMixin("FooClass")
+ # class FooClass(my_UIM):
+ # ...
+ #
+ # Alternatively, you could just look in FooClass.__bases__ later; whatever
+ register_type(UIM)
+ return UIM
+
+register_type(_UnorderedIteratorMixin)
diff --git a/translate-toolkit-1.3.0/translate/misc/typecheck/sets.py b/translate-toolkit-1.3.0/translate/misc/typecheck/sets.py
new file mode 100644
index 0000000..42743bd
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/typecheck/sets.py
@@ -0,0 +1,62 @@
+from translate.misc.typecheck import CheckType, _TC_TypeError, check_type, Type
+from translate.misc.typecheck import register_type, Or, _TC_Exception, _TC_KeyError
+from translate.misc.typecheck import _TC_LengthError
+
+### Provide typechecking for the built-in set() class
+###
+### XXX: Investigate rewriting this in terms of
+### UnorderedIteratorMixin or Or()
+class Set(CheckType):
+ def __init__(self, set_list):
+ self.type = set(set_list)
+ self._types = [Type(t) for t in self.type]
+
+ # self._type is used to build _TC_TypeError
+ if len(self._types) > 1:
+ self._type = Or(*self.type)
+ elif len(self._types) == 1:
+ # XXX Is there an easier way to get this?
+ t = self.type.pop()
+ self._type = t
+ self.type.add(t)
+
+ def __str__(self):
+ return "Set(" + str([e for e in self.type]) + ")"
+
+ __repr__ = __str__
+
+ def __typecheck__(self, func, to_check):
+ if not isinstance(to_check, set):
+ raise _TC_TypeError(to_check, self.type)
+
+ if len(self._types) == 0 and len(to_check) > 0:
+ raise _TC_LengthError(len(to_check), 0)
+
+ for obj in to_check:
+ error = False
+ for type in self._types:
+ try:
+ check_type(type, func, obj)
+ except _TC_Exception:
+ error = True
+ continue
+ else:
+ error = False
+ break
+ if error:
+ raise _TC_KeyError(obj, _TC_TypeError(obj, self._type))
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self.type == other.type
+
+ def __hash__(self):
+ return hash(str(hash(self.__class__)) + str(hash(frozenset(self.type))))
+
+ @classmethod
+ def __typesig__(self, obj):
+ if isinstance(obj, set):
+ return Set(obj)
+
+register_type(Set)
diff --git a/translate-toolkit-1.3.0/translate/misc/typecheck/typeclasses.py b/translate-toolkit-1.3.0/translate/misc/typecheck/typeclasses.py
new file mode 100644
index 0000000..fcab3f4
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/typecheck/typeclasses.py
@@ -0,0 +1,35 @@
+from translate.misc.typecheck import Typeclass
+
+### Number
+####################################################
+
+_numbers = [int, float, complex, long, bool]
+try:
+ from decimal import Decimal
+ _numbers.append(Decimal)
+ del Decimal
+except ImportError:
+ pass
+
+Number = Typeclass(*_numbers)
+del _numbers
+
+### String -- subinstance of ImSequence
+####################################################
+
+String = Typeclass(str, unicode)
+
+### ImSequence -- immutable sequences
+####################################################
+
+ImSequence = Typeclass(tuple, xrange, String)
+
+### MSequence -- mutable sequences
+####################################################
+
+MSequence = Typeclass(list)
+
+### Mapping
+####################################################
+
+Mapping = Typeclass(dict)
diff --git a/translate-toolkit-1.3.0/translate/misc/wStringIO.py b/translate-toolkit-1.3.0/translate/misc/wStringIO.py
new file mode 100644
index 0000000..637513d
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/wStringIO.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2004-2006 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""A wrapper for cStringIO that provides more of the functions of StringIO at the speed of cStringIO"""
+
+import cStringIO
+
+class StringIO:
+ def __init__(self, buf = ''):
+ if not isinstance(buf, (str, unicode)):
+ buf = str(buf)
+ if isinstance(buf, unicode):
+ buf = buf.encode('utf-8')
+ self.len = len(buf)
+ self.buf = cStringIO.StringIO()
+ self.buf.write(buf)
+ self.buf.seek(0)
+ self.pos = 0
+ self.closed = 0
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.closed:
+ raise StopIteration
+ r = self.readline()
+ if not r:
+ raise StopIteration
+ return r
+
+ def close(self):
+ """Free the memory buffer.
+ """
+ if not self.closed:
+ self.closed = 1
+ del self.buf, self.pos
+
+ def isatty(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ return False
+
+ def seek(self, pos, mode = 0):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ self.buf.seek(pos, mode)
+ self.pos = self.buf.tell()
+
+ def tell(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ return self.pos
+
+ def read(self, n = None):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if n == None:
+ r = self.buf.read()
+ else:
+ r = self.buf.read(n)
+ self.pos = self.buf.tell()
+ return r
+
+ def readline(self, length=None):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if length is not None:
+ r = self.buf.readline(length)
+ else:
+ r = self.buf.readline()
+ self.pos = self.buf.tell()
+ return r
+
+ def readlines(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ lines = self.buf.readlines()
+ self.pos = self.buf.tell()
+ return lines
+
+ def truncate(self, size=None):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ self.buf.truncate(size)
+ self.pos = self.buf.tell()
+ self.buf.seek(0, 2)
+ self.len = self.buf.tell()
+ self.buf.seek(self.pos)
+
+ def write(self, s):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ origpos = self.buf.tell()
+ self.buf.write(s)
+ self.pos = self.buf.tell()
+ if origpos + len(s) > self.len:
+ self.buf.seek(0, 2)
+ self.len = self.buf.tell()
+ self.buf.seek(self.pos)
+
+ def writelines(self, lines):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ self.buf.writelines(lines)
+ self.pos = self.buf.tell()
+ self.buf.seek(0, 2)
+ self.len = self.buf.tell()
+ self.buf.seek(self.pos)
+
+ def flush(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ self.buf.flush()
+
+ def getvalue(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ return self.buf.getvalue()
+
+class CatchStringOutput(StringIO, object):
+ """catches the output before it is closed and sends it to an onclose method"""
+ def __init__(self, onclose):
+ """Set up the output stream, and remember a method to call on closing"""
+ StringIO.__init__(self)
+ self.onclose = onclose
+
+ def close(self):
+ """wrap the underlying close method, to pass the value to onclose before it goes"""
+ value = self.getvalue()
+ self.onclose(value)
+ super(CatchStringOutput, self).close()
+
+ def slam(self):
+ """use this method to force the closing of the stream if it isn't closed yet"""
+ if not self.closed:
+ self.close()
+
diff --git a/translate-toolkit-1.3.0/translate/misc/xmlwrapper.py b/translate-toolkit-1.3.0/translate/misc/xmlwrapper.py
new file mode 100644
index 0000000..96cfc95
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/xmlwrapper.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2004, 2005 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""simpler wrapper to the elementtree XML parser"""
+
+try:
+ from xml.etree import ElementTree
+except ImportError:
+ from elementtree import ElementTree
+# this is needed to prevent expat-version conflicts with wx >= 2.5.2.2
+from xml.parsers import expat
+
+basicfixtag = ElementTree.fixtag
+
+def makefixtagproc(namespacemap):
+ """this constructs an alternative fixtag procedure that will use appropriate names for namespaces..."""
+ def fixtag(tag, namespaces):
+ """given a decorated tag (of the form {uri}tag), return prefixed tag and namespace declaration, if any"""
+ if isinstance(tag, ElementTree.QName):
+ tag = tag.text
+ namespace_uri, tag = tag[1:].split("}", 1)
+ prefix = namespaces.get(namespace_uri)
+ if prefix is None:
+ if namespace_uri in namespacemap:
+ prefix = namespacemap[namespace_uri]
+ else:
+ prefix = "ns%d" % len(namespaces)
+ namespaces[namespace_uri] = prefix
+ xmlns = ("xmlns:%s" % prefix, namespace_uri)
+ else:
+ xmlns = None
+ return "%s:%s" % (prefix, tag), xmlns
+ return fixtag
+
+def splitnamespace(fulltag):
+ if '{' in fulltag:
+ namespace = fulltag[fulltag.find('{'):fulltag.find('}')+1]
+ else:
+ namespace = ""
+ tag = fulltag.replace(namespace, "", 1)
+ return namespace, tag
+
+class XMLWrapper:
+ """simple wrapper for xml objects"""
+ def __init__(self,obj):
+ """construct object from the elementtree item"""
+ self.obj = obj
+ self.namespace, self.tag = splitnamespace(self.obj.tag)
+ self.attrib = {}
+ for fullkey, value in self.obj.attrib.iteritems():
+ namespace, key = splitnamespace(fullkey)
+ self.attrib[key] = value
+ def getchild(self, searchtag, tagclass=None):
+ """get a child with the given tag name"""
+ if tagclass is None: tagclass = XMLWrapper
+ for childobj in self.obj.getiterator():
+ # getiterator() includes self...
+ if childobj == self.obj: continue
+ childns, childtag = splitnamespace(childobj.tag)
+ if childtag == searchtag:
+ child = tagclass(childobj)
+ return child
+ raise KeyError("could not find child with tag %r" % searchtag)
+ def getchildren(self, searchtag, tagclass=None, excludetags=[]):
+ """get all children with the given tag name"""
+ if tagclass is None: tagclass = XMLWrapper
+ childobjects = []
+ for childobj in self.obj.getiterator():
+ # getiterator() includes self...
+ if childobj == self.obj: continue
+ childns, childtag = splitnamespace(childobj.tag)
+ if childtag == searchtag:
+ childobjects.append(childobj)
+ children = [tagclass(childobj) for childobj in childobjects]
+ return children
+ def gettext(self, searchtag):
+ """get some contained text"""
+ return self.getchild(searchtag).obj.text
+ def getxml(self, encoding=None):
+ return ElementTree.tostring(self.obj, encoding)
+ def getplaintext(self, excludetags=[]):
+ text = ""
+ if self.obj.text != None: text += self.obj.text
+ for child in self.obj._children:
+ simplechild = XMLWrapper(child)
+ if simplechild.tag not in excludetags:
+ text += simplechild.getplaintext(excludetags)
+ if self.obj.tail != None: text += self.obj.tail
+ return text
+ def getvalues(self, searchtag):
+ """get some contained values..."""
+ values = [child.obj.text for child in self.getchildren(searchtag)]
+ return values
+ def __repr__(self):
+ """return a representation of the object"""
+ return self.tag+':'+repr(self.__dict__)
+ def getattr(self, attrname):
+ """gets an attribute of the tag"""
+ return self.attrib[attrname]
+ def write(self, file, encoding="UTF-8"):
+ """writes the object as XML to a file..."""
+ e = ElementTree.ElementTree(self.obj)
+ e.write(file, encoding)
+
+def BuildTree(xmlstring):
+ parser = ElementTree.XMLTreeBuilder()
+ parser.feed(xmlstring)
+ return parser.close()
+
+def MakeElement(tag, attrib={}, **extraargs):
+ return ElementTree.Element(tag, attrib, **extraargs)
+
diff --git a/translate-toolkit-1.3.0/translate/misc/zipfileext.py b/translate-toolkit-1.3.0/translate/misc/zipfileext.py
new file mode 100644
index 0000000..a902f66
--- /dev/null
+++ b/translate-toolkit-1.3.0/translate/misc/zipfileext.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2004, 2005 Zuza Software Foundation
+#
+# This file is part of translate.
+#
+# translate is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# translate is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with translate; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""extensions to zipfile standard module that will hopefully get included in future..."""
+
+from zipfile import ZipFile, struct, structCentralDir, stringCentralDir, structEndArchive, stringEndArchive
+
+class ZipFileExt(ZipFile, object):
+ """a ZipFile that can handle replacing objects"""
+ def delete(self, name):
+ """Delete the file from the archive. If it appears multiple
+ times only the first instance will be deleted."""
+ for i in range (0, len(self.filelist)):
+ if self.filelist[i].filename == name:
+ if self.debug:
+ print "Removing", name
+ deleted_offset = self.filelist[i].header_offset
+ # "file_offset" is only available in python up to 2.4
+ if hasattr(self.filelist[i], "file_offset"):
+ deleted_size = (self.filelist[i].file_offset - self.filelist[i].header_offset) + self.filelist[i].compress_size
+ else:
+ deleted_size = (len(self.filelist[i].FileHeader()) - self.filelist[i].header_offset) + self.filelist[i].compress_size
+ zinfo_size = struct.calcsize(structCentralDir) + len(self.filelist[i].filename) + len(self.filelist[i].extra)
+ # Remove the file's data from the archive.
+ current_offset = self.fp.tell()
+ # go to the end of the archive to calculate the total archive_size
+ self.fp.seek(0, 2)
+ archive_size = self.fp.tell()
+ self.fp.seek(deleted_offset + deleted_size)
+ buf = self.fp.read()
+ self.fp.seek(deleted_offset)
+ self.fp.write(buf)
+ self.fp.truncate(archive_size - deleted_size - zinfo_size)
+ # go to the end of the archive to calculate the total archive_size
+ self.fp.seek(0, 2)
+ if self.debug >= 2:
+ if self.fp.tell() != archive_size - deleted_size - zinfo_size:
+ print "truncation failed: %r != %r" % (self.fp.tell(), archive_size - deleted_size - zinfo_size)
+ if current_offset > deleted_offset + deleted_size:
+ current_offset -= deleted_size
+ elif current_offset > deleted_offset:
+ current_offset = deleted_offset
+ self.fp.seek(current_offset, 0)
+ # Remove file from central directory.
+ del self.filelist[i]
+ # Adjust the remaining offsets in the central directory.
+ for j in range (i, len(self.filelist)):
+ if self.filelist[j].header_offset > deleted_offset:
+ self.filelist[j].header_offset -= deleted_size
+ # "file_offset" is only available in python up to 2.4
+ if hasattr(self.filelist[i], "file_offset"):
+ if self.filelist[j].file_offset > deleted_offset:
+ self.filelist[j].file_offset -= deleted_size
+ del self.NameToInfo[name]
+ return
+ if self.debug:
+ print name, "not in archive"
+
+ def close(self):
+ """Close the file, and for mode "w" and "a" write the ending
+ records."""
+ if self.fp is None:
+ return
+ self.writeendrec()
+ if not self._filePassed:
+ self.fp.close()
+ self.fp = None
+
+ def writeendrec(self):
+ """Write the ending records (without neccessarily closing the file)"""
+ if self.mode in ("w", "a"): # write ending records
+ count = 0
+ current_offset = self.fp.tell()
+ pos1 = self.fp.tell()
+ for zinfo in self.filelist: # write central directory
+ count = count + 1
+ dt = zinfo.date_time
+ dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
+ dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
+ centdir = struct.pack(structCentralDir,
+ stringCentralDir, zinfo.create_version,
+ zinfo.create_system, zinfo.extract_version, zinfo.reserved,
+ zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
+ zinfo.CRC, zinfo.compress_size, zinfo.file_size,
+ len(zinfo.filename), len(zinfo.extra), len(zinfo.comment),
+ 0, zinfo.internal_attr, zinfo.external_attr,
+ zinfo.header_offset)
+ self.fp.write(centdir)
+ self.fp.write(zinfo.filename)
+ self.fp.write(zinfo.extra)
+ self.fp.write(zinfo.comment)
+ pos2 = self.fp.tell()
+ # Write end-of-zip-archive record
+ endrec = struct.pack(structEndArchive, stringEndArchive,
+ 0, 0, count, count, pos2 - pos1, pos1, 0)
+ self.fp.write(endrec)
+ self.fp.seek(pos1)
+