Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorflorent <florent.pigout@gmail.com>2011-07-10 15:15:18 (GMT)
committer florent <florent.pigout@gmail.com>2011-07-10 15:15:18 (GMT)
commit17e69fa91ee9df58ca5479125087776f89228cb0 (patch)
tree40535c4832be2ea6a249646c6baa7e898f8cc3b6
parent69637d214246b26423c92ea7ca8a3571ecc56b01 (diff)
make the flask based tools more clean and nicer -> move requirements to lib dir + limit import of code
-rw-r--r--.gitignore8
-rw-r--r--activity/activity-atoideweb.svg2
-rwxr-xr-xactivity/activity.info4
-rw-r--r--app_main.py112
-rw-r--r--atoideweb/activity.py24
-rw-r--r--atoideweb/controllers/__init__.py3
-rw-r--r--atoideweb/controllers/ajax.py31
-rw-r--r--atoideweb/controllers/base.py37
-rw-r--r--atoideweb/controllers/index.py14
-rw-r--r--atoideweb/controllers/post.py25
-rw-r--r--atoideweb/controllers/templates/_helpers.html14
-rw-r--r--atoideweb/controllers/templates/atoideweb/activity.html16
-rw-r--r--atoideweb/controllers/templates/atoideweb/story.html12
-rw-r--r--atoideweb/tools/image.py20
-rw-r--r--atoideweb/tools/keys.py686
-rw-r--r--atoideweb/tools/registry.py57
-rw-r--r--atoideweb/tools/sound.py2
-rw-r--r--atoideweb/tools/storage.py487
-rw-r--r--atoideweb/ui/toolbar.py595
-rw-r--r--babel/__init__.py39
-rw-r--r--babel/core.py784
-rw-r--r--babel/dates.py991
-rw-r--r--babel/global.datbin36319 -> 0 bytes
-rw-r--r--babel/localedata.py209
-rw-r--r--babel/localedata/en_US.datbin364 -> 0 bytes
-rw-r--r--babel/localedata/fr_FR.datbin364 -> 0 bytes
-rw-r--r--babel/messages/__init__.py16
-rw-r--r--babel/messages/catalog.py721
-rw-r--r--babel/messages/checkers.py179
-rw-r--r--babel/messages/extract.py554
-rw-r--r--babel/messages/frontend.py1194
-rw-r--r--babel/messages/jslexer.py175
-rw-r--r--babel/messages/mofile.py121
-rw-r--r--babel/messages/plurals.py256
-rw-r--r--babel/messages/pofile.py455
-rw-r--r--babel/numbers.py583
-rw-r--r--babel/support.py398
-rw-r--r--babel/util.py348
-rw-r--r--bewype/__init__.py2
-rw-r--r--bewype/flask/__init__.py4
-rw-r--r--bewype/flask/_app.py26
-rw-r--r--bewype/flask/controllers/__init__.py46
-rw-r--r--bewype/flask/controllers/index.py8
-rw-r--r--bewype/flask/controllers/themes.py24
-rw-r--r--config.ini6
-rw-r--r--flaskext/__init__.py1
-rw-r--r--flaskext/themes.py443
-rw-r--r--lib/__init__.py0
-rw-r--r--lib/euclid.py516
-rw-r--r--lib/flask/__init__.py (renamed from flask/__init__.py)0
-rw-r--r--lib/flask/app.py (renamed from flask/app.py)0
-rw-r--r--lib/flask/config.py (renamed from flask/config.py)0
-rw-r--r--lib/flask/ctx.py (renamed from flask/ctx.py)0
-rw-r--r--lib/flask/globals.py (renamed from flask/globals.py)0
-rw-r--r--lib/flask/helpers.py (renamed from flask/helpers.py)0
-rw-r--r--lib/flask/logging.py (renamed from flask/logging.py)0
-rw-r--r--lib/flask/module.py (renamed from flask/module.py)0
-rw-r--r--lib/flask/session.py (renamed from flask/session.py)0
-rw-r--r--lib/flask/signals.py (renamed from flask/signals.py)0
-rw-r--r--lib/flask/templating.py (renamed from flask/templating.py)0
-rw-r--r--lib/flask/testing.py (renamed from flask/testing.py)0
-rw-r--r--lib/flask/wrappers.py (renamed from flask/wrappers.py)0
-rw-r--r--lib/graphics.py1681
-rw-r--r--lib/jinja2/__init__.py (renamed from jinja2/__init__.py)0
-rw-r--r--lib/jinja2/_debugsupport.c (renamed from jinja2/_debugsupport.c)0
-rw-r--r--lib/jinja2/_markupsafe/__init__.py (renamed from jinja2/_markupsafe/__init__.py)0
-rw-r--r--lib/jinja2/_markupsafe/_bundle.py (renamed from jinja2/_markupsafe/_bundle.py)0
-rw-r--r--lib/jinja2/_markupsafe/_constants.py (renamed from jinja2/_markupsafe/_constants.py)0
-rw-r--r--lib/jinja2/_markupsafe/_native.py (renamed from jinja2/_markupsafe/_native.py)0
-rw-r--r--lib/jinja2/_markupsafe/tests.py (renamed from jinja2/_markupsafe/tests.py)0
-rw-r--r--lib/jinja2/_stringdefs.py (renamed from jinja2/_stringdefs.py)0
-rw-r--r--lib/jinja2/bccache.py (renamed from jinja2/bccache.py)0
-rw-r--r--lib/jinja2/compiler.py (renamed from jinja2/compiler.py)0
-rw-r--r--lib/jinja2/constants.py (renamed from jinja2/constants.py)0
-rw-r--r--lib/jinja2/debug.py (renamed from jinja2/debug.py)0
-rw-r--r--lib/jinja2/defaults.py (renamed from jinja2/defaults.py)0
-rw-r--r--lib/jinja2/environment.py (renamed from jinja2/environment.py)0
-rw-r--r--lib/jinja2/exceptions.py (renamed from jinja2/exceptions.py)0
-rw-r--r--lib/jinja2/ext.py (renamed from jinja2/ext.py)0
-rw-r--r--lib/jinja2/filters.py (renamed from jinja2/filters.py)0
-rw-r--r--lib/jinja2/lexer.py (renamed from jinja2/lexer.py)0
-rw-r--r--lib/jinja2/loaders.py (renamed from jinja2/loaders.py)0
-rw-r--r--lib/jinja2/meta.py (renamed from jinja2/meta.py)0
-rw-r--r--lib/jinja2/nodes.py (renamed from jinja2/nodes.py)0
-rw-r--r--lib/jinja2/optimizer.py (renamed from jinja2/optimizer.py)0
-rw-r--r--lib/jinja2/parser.py (renamed from jinja2/parser.py)0
-rw-r--r--lib/jinja2/runtime.py (renamed from jinja2/runtime.py)0
-rw-r--r--lib/jinja2/sandbox.py (renamed from jinja2/sandbox.py)0
-rw-r--r--lib/jinja2/tests.py (renamed from jinja2/tests.py)0
-rw-r--r--lib/jinja2/utils.py (renamed from jinja2/utils.py)0
-rw-r--r--lib/jinja2/visitor.py (renamed from jinja2/visitor.py)0
-rwxr-xr-xlib/png.py3785
-rw-r--r--lib/proximity.py88
-rw-r--r--lib/pytweener.py343
-rw-r--r--lib/server/__init__.py2
-rw-r--r--lib/server/_server.py20
-rw-r--r--lib/server/config.py (renamed from atoideweb/tools/config.py)66
-rw-r--r--lib/server/flask/__init__.py5
-rw-r--r--lib/server/flask/_app.py57
-rw-r--r--lib/werkzeug/__init__.py (renamed from werkzeug/__init__.py)0
-rw-r--r--lib/werkzeug/_internal.py (renamed from werkzeug/_internal.py)0
-rw-r--r--lib/werkzeug/contrib/__init__.py (renamed from werkzeug/contrib/__init__.py)0
-rw-r--r--lib/werkzeug/contrib/atom.py (renamed from werkzeug/contrib/atom.py)0
-rw-r--r--lib/werkzeug/contrib/cache.py (renamed from werkzeug/contrib/cache.py)0
-rw-r--r--lib/werkzeug/contrib/fixers.py (renamed from werkzeug/contrib/fixers.py)0
-rw-r--r--lib/werkzeug/contrib/iterio.py (renamed from werkzeug/contrib/iterio.py)0
-rw-r--r--lib/werkzeug/contrib/jsrouting.py (renamed from werkzeug/contrib/jsrouting.py)0
-rw-r--r--lib/werkzeug/contrib/kickstart.py (renamed from werkzeug/contrib/kickstart.py)0
-rw-r--r--lib/werkzeug/contrib/limiter.py (renamed from werkzeug/contrib/limiter.py)0
-rw-r--r--lib/werkzeug/contrib/lint.py (renamed from werkzeug/contrib/lint.py)0
-rw-r--r--lib/werkzeug/contrib/profiler.py (renamed from werkzeug/contrib/profiler.py)0
-rw-r--r--lib/werkzeug/contrib/securecookie.py (renamed from werkzeug/contrib/securecookie.py)0
-rw-r--r--lib/werkzeug/contrib/sessions.py (renamed from werkzeug/contrib/sessions.py)0
-rw-r--r--lib/werkzeug/contrib/testtools.py (renamed from werkzeug/contrib/testtools.py)0
-rw-r--r--lib/werkzeug/contrib/wrappers.py (renamed from werkzeug/contrib/wrappers.py)0
-rw-r--r--lib/werkzeug/datastructures.py (renamed from werkzeug/datastructures.py)0
-rw-r--r--lib/werkzeug/debug/__init__.py (renamed from werkzeug/debug/__init__.py)0
-rw-r--r--lib/werkzeug/debug/console.py (renamed from werkzeug/debug/console.py)0
-rw-r--r--lib/werkzeug/debug/render.py (renamed from werkzeug/debug/render.py)0
-rw-r--r--lib/werkzeug/debug/repr.py (renamed from werkzeug/debug/repr.py)0
-rw-r--r--lib/werkzeug/debug/shared/body.tmpl (renamed from werkzeug/debug/shared/body.tmpl)0
-rw-r--r--lib/werkzeug/debug/shared/codetable.tmpl (renamed from werkzeug/debug/shared/codetable.tmpl)0
-rw-r--r--lib/werkzeug/debug/shared/console.png (renamed from werkzeug/debug/shared/console.png)bin507 -> 507 bytes
-rw-r--r--lib/werkzeug/debug/shared/debugger.js (renamed from werkzeug/debug/shared/debugger.js)0
-rw-r--r--lib/werkzeug/debug/shared/jquery.js (renamed from werkzeug/debug/shared/jquery.js)0
-rw-r--r--lib/werkzeug/debug/shared/less.png (renamed from werkzeug/debug/shared/less.png)bin191 -> 191 bytes
-rw-r--r--lib/werkzeug/debug/shared/more.png (renamed from werkzeug/debug/shared/more.png)bin200 -> 200 bytes
-rw-r--r--lib/werkzeug/debug/shared/source.png (renamed from werkzeug/debug/shared/source.png)bin818 -> 818 bytes
-rw-r--r--lib/werkzeug/debug/shared/style.css (renamed from werkzeug/debug/shared/style.css)0
-rw-r--r--lib/werkzeug/debug/shared/vartable.tmpl (renamed from werkzeug/debug/shared/vartable.tmpl)0
-rw-r--r--lib/werkzeug/debug/tbtools.py (renamed from werkzeug/debug/tbtools.py)0
-rw-r--r--lib/werkzeug/debug/templates/console.html (renamed from werkzeug/debug/templates/console.html)0
-rw-r--r--lib/werkzeug/debug/templates/dump_object.html (renamed from werkzeug/debug/templates/dump_object.html)0
-rw-r--r--lib/werkzeug/debug/templates/frame.html (renamed from werkzeug/debug/templates/frame.html)0
-rw-r--r--lib/werkzeug/debug/templates/help_command.html (renamed from werkzeug/debug/templates/help_command.html)0
-rw-r--r--lib/werkzeug/debug/templates/source.html (renamed from werkzeug/debug/templates/source.html)0
-rw-r--r--lib/werkzeug/debug/templates/traceback_full.html (renamed from werkzeug/debug/templates/traceback_full.html)0
-rw-r--r--lib/werkzeug/debug/templates/traceback_plaintext.html (renamed from werkzeug/debug/templates/traceback_plaintext.html)0
-rw-r--r--lib/werkzeug/debug/templates/traceback_summary.html (renamed from werkzeug/debug/templates/traceback_summary.html)0
-rw-r--r--lib/werkzeug/debug/utils.py (renamed from werkzeug/debug/utils.py)0
-rw-r--r--lib/werkzeug/exceptions.py (renamed from werkzeug/exceptions.py)0
-rw-r--r--lib/werkzeug/formparser.py (renamed from werkzeug/formparser.py)0
-rw-r--r--lib/werkzeug/http.py (renamed from werkzeug/http.py)0
-rw-r--r--lib/werkzeug/local.py (renamed from werkzeug/local.py)0
-rw-r--r--lib/werkzeug/posixemulation.py (renamed from werkzeug/posixemulation.py)0
-rw-r--r--lib/werkzeug/routing.py (renamed from werkzeug/routing.py)0
-rw-r--r--lib/werkzeug/script.py (renamed from werkzeug/script.py)0
-rw-r--r--lib/werkzeug/security.py (renamed from werkzeug/security.py)0
-rw-r--r--lib/werkzeug/serving.py (renamed from werkzeug/serving.py)0
-rw-r--r--lib/werkzeug/templates.py (renamed from werkzeug/templates.py)0
-rw-r--r--lib/werkzeug/test.py (renamed from werkzeug/test.py)0
-rw-r--r--lib/werkzeug/testapp.py (renamed from werkzeug/testapp.py)0
-rw-r--r--lib/werkzeug/urls.py (renamed from werkzeug/urls.py)0
-rw-r--r--lib/werkzeug/useragents.py (renamed from werkzeug/useragents.py)0
-rw-r--r--lib/werkzeug/utils.py (renamed from werkzeug/utils.py)0
-rw-r--r--lib/werkzeug/wrappers.py (renamed from werkzeug/wrappers.py)0
-rw-r--r--lib/werkzeug/wsgi.py (renamed from werkzeug/wsgi.py)0
-rw-r--r--peak/__init__.py2
-rw-r--r--peak/util/__init__.py1
-rw-r--r--peak/util/imports.py410
-rw-r--r--pkg_resources.py2625
-rw-r--r--po/README2
-rw-r--r--po/messages.pot27
-rw-r--r--po/org.laptop.AToiDeWebActivity_en.po26
-rw-r--r--po/org.laptop.AToiDeWebActivity_fr.po27
-rw-r--r--run.py18
-rw-r--r--templates/_helpers.html43
-rw-r--r--templates/atoideweb/ajax.html41
-rw-r--r--templates/atoideweb/post.html24
-rw-r--r--templates/layout.html (renamed from atoideweb/controllers/templates/layout.html)1
170 files changed, 344 insertions, 19198 deletions
diff --git a/.gitignore b/.gitignore
index 028165f..f4d0d1e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,5 @@
syntax: glob
+.pydevproject
+.project
*.pyc
-*.swp
-*.seq
-*.ogg
-dist
-locale
+*.mo
diff --git a/activity/activity-atoideweb.svg b/activity/activity-atoideweb.svg
index d83ceb7..7ae847b 100644
--- a/activity/activity-atoideweb.svg
+++ b/activity/activity-atoideweb.svg
@@ -14,7 +14,7 @@
id="svg2"
sodipodi:version="0.32"
inkscape:version="0.47 r22583"
- sodipodi:docname="activity-atoidejouer.svg"
+ sodipodi:docname="activity-AToiDeWeb.svg"
width="55"
height="55"
inkscape:output_extension="org.inkscape.output.svg.inkscape">
diff --git a/activity/activity.info b/activity/activity.info
index 726ff01..3106a37 100755
--- a/activity/activity.info
+++ b/activity/activity.info
@@ -2,6 +2,6 @@
name = AToiDeWeb
activity_version = 1
service_name = org.laptop.AToiDeWebActivity
-icon = activity-atoideweb
-class = atoideweb.activity.AToiDeWebActivity
+icon = activity-AToiDeWeb
+class = AToiDeWeb.activity.AToiDeWebActivity
license = gplv3
diff --git a/app_main.py b/app_main.py
deleted file mode 100644
index de1d277..0000000
--- a/app_main.py
+++ /dev/null
@@ -1,112 +0,0 @@
-
-# python import
-import logging, multiprocessing, os
-# ..
-from gettext import gettext as _
-
-# atoidejouer import
-from atoideweb.tools import config
-# init debug
-config.Config(debug=True)
-# ...
-from atoideweb.tools import keys
-
-# bewype import
-from bewype.flask import app, controllers, run_app
-
-# get application logger
-logger = logging.getLogger('atoidejouer')
-logger.setLevel(logging.DEBUG)
-
-
-# ...
-from werkzeug import SharedDataMiddleware
-app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
- '/static': os.path.join(os.path.dirname(__file__), 'static')
-})
-
-
-class AToiDeWeb(object):
-
- def __init__(self):
- # start web server
- controllers.init_controllers(namespace='atoideweb.controllers')
- self._server = multiprocessing.Process(target=run_app)
- self._server.start()
- # sequence dict and list
- self.graphic_keys = keys.StoryKeys('graphics', self)
- self.sound_keys = keys.StoryKeys('sounds', self)
- # keep thread & screen
- self._current = None
- self._number_of_keys = 1
-
- def update_number_of_keys(self):
- # set max
- _graphic_max = self.graphic_keys.get_max_frame()
- _sound_max = self.sound_keys.get_max_frame()
- _max = _graphic_max if _graphic_max > _sound_max else _sound_max
- # update value
- self._number_of_keys = _max + 1
-
- def get_current_screen(self):
- pass
-
- def set_current_screen(self, name, screen):
- self._current = name
-
- def read_file(self, file_path):
- """Read saved data from datastore and resume activity based on a
- previous state.
- """
- # init content
- _data = None
- # read file
- _file = open(file_path, 'r')
- try:
- _data = _file.read()
- finally:
- _file.close()
- # parse json data
- self.graphic_keys.loads(_data)
- self.sound_keys.loads(_data)
- # set activity new number of keys
- self.update_number_of_keys()
- # clear
- self.graphic_keys.ask_clear()
- # refresh default
- # _current = self.get_current_screen()
- # _current.refresh()
-
- def write_file(self, file_path):
- """Dummy overiding ...
- """
- # content dict
- _data_dict = {
- 'graphic_names': self.graphic_keys._names,
- 'graphic_keys': self.graphic_keys._keys,
- 'sound_names': self.sound_keys._names,
- 'sound_keys': self.sound_keys._keys,
- }
- # prepare content
- _data = json.dumps(_data_dict)
- # save content
- _f = open(file_path, 'wb')
- try:
- _f.write(_data)
- finally:
- _f.close()
-
- def close(self):
- # stop web thread
- self._server.terminate()
- self._server.join()
-
-
-if __name__ == "__main__":
- # ...
- _web_debug = AToiDeWeb()
- # ...
- import atexit
- atexit.register(_web_debug.close)
- while True:
- continue
diff --git a/atoideweb/activity.py b/atoideweb/activity.py
index 5df308e..9f34d83 100644
--- a/atoideweb/activity.py
+++ b/atoideweb/activity.py
@@ -9,14 +9,13 @@ from sugar.activity import activity
# hulahop import
from hulahop.webview import WebView
-# atoidejouer import
-import app_main
+# atoideweb import
+import run
# ..
-from atoideweb.tools import config, keys
from atoideweb.ui import toolbar
# get application logger
-logger = logging.getLogger('atoidejouer')
+logger = logging.getLogger(run.APP_NAME)
URL_BASE = 'http://localhost:5000/'
@@ -35,11 +34,11 @@ def _toolbar_changed(toolbox, page, activity_):
return True
-class AToiDeWebActivity(activity.Activity, app_main.AToiDeWeb):
+class AToiDeWebActivity(activity.Activity, run.atoideweb):
def __init__(self, handle):
# init parents
- app_main.AToiDeWeb.__init__(self)
+ run.atoideweb.__init__(self)
activity.Activity.__init__(self, handle)
# ..
self.max_participants = 1
@@ -56,11 +55,11 @@ class AToiDeWebActivity(activity.Activity, app_main.AToiDeWeb):
# tmp var
_toolbar = None
# init toolbars
- for _n in ['story', 'graphics', 'graphics_add', 'sounds', 'sounds_add']:
+ for _n in ['eating', 'spare-time']:
# init toolbar
_t = toolbar.Toolbar(self, name=_n)
# if default toolbar .. set default screen
- if _n == 'story':
+ if _n == 'eating':
self._change_screen(_t)
_toolbar = _t
# set default tab
@@ -78,12 +77,13 @@ class AToiDeWebActivity(activity.Activity, app_main.AToiDeWeb):
self.web_view.load_uri(URL_BASE + _name)
def read_file(self, file_path):
- app_main.AToiDeWeb.read_file(self, file_path)
+ # .. should be overriden
+ pass
def write_file(self, file_path):
- app_main.AToiDeWeb.write_file(self, file_path)
+ # .. should be overriden
+ pass
def close(self, skip_save=False):
- app_main.AToiDeWeb.close(self)
+ run.atoideweb.close(self)
activity.Activity.close(self, skip_save=True)
-
diff --git a/atoideweb/controllers/__init__.py b/atoideweb/controllers/__init__.py
index 8b13789..547773f 100644
--- a/atoideweb/controllers/__init__.py
+++ b/atoideweb/controllers/__init__.py
@@ -1 +1,2 @@
-
+# atoideweb import
+from atoideweb.controllers.index import app, render
diff --git a/atoideweb/controllers/ajax.py b/atoideweb/controllers/ajax.py
new file mode 100644
index 0000000..2fb0120
--- /dev/null
+++ b/atoideweb/controllers/ajax.py
@@ -0,0 +1,31 @@
+# gettext import
+from gettext import gettext as _
+
+# server import
+from server.flask import app, logger, render, request, jsonify
+
+
+@app.route('/ajax', methods=['GET', 'POST'])
+def ajax():
+ # POST
+ if request.method == 'POST':
+ if 'ajax-sample' in request.form:
+ # prepare ajax result
+ _ajax_content = {
+ 'result': request.form['ajax-sample']
+ }
+ # render ajax
+ return jsonify(_ajax_content)
+ # ?? should not happen
+ else:
+ _result = '??'
+ # GET
+ else:
+ _result = ''
+ # prepare result
+ _content = {
+ 'title': _('atdw - Ajax sample'),
+ 'result': _result
+ }
+ # render result
+ return render('atoideweb/ajax.html', **_content)
diff --git a/atoideweb/controllers/base.py b/atoideweb/controllers/base.py
deleted file mode 100644
index dda2048..0000000
--- a/atoideweb/controllers/base.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# python import
-import logging
-# ..
-from gettext import gettext
-
-# make module
-from flask import Module
-module_base = Module(__name__)
-
-# bewype import
-from bewype.flask import app, render
-
-# get application logger
-logger = logging.getLogger('atoidejouer')
-
-def _render(template, **context):
- """Crappy hack!
- """
- context['_'] = gettext
- return render(template, **context)
-
-@module_base.route('/')
-def helo():
- return 'Helo!'
-
-
-@module_base.route('/activity')
-def activity():
- return _render('atoideweb/activity.html')
-
-
-@module_base.route('/story')
-def story():
- return _render('atoideweb/story.html')
-
-# do register
-app.register_module(module_base)
diff --git a/atoideweb/controllers/index.py b/atoideweb/controllers/index.py
new file mode 100644
index 0000000..7d0e1af
--- /dev/null
+++ b/atoideweb/controllers/index.py
@@ -0,0 +1,14 @@
+# gettext import
+from gettext import gettext as _
+
+# server import
+from server.flask import app, logger, render, request, jsonify
+
+# atoideweb import
+from atoideweb.controllers import post, ajax
+
+
+@app.route('/')
+def index():
+ logger.debug(_('Welcome!'))
+ return 'Welcome!'
diff --git a/atoideweb/controllers/post.py b/atoideweb/controllers/post.py
new file mode 100644
index 0000000..15c189c
--- /dev/null
+++ b/atoideweb/controllers/post.py
@@ -0,0 +1,25 @@
+# gettext import
+from gettext import gettext as _
+
+# server import
+from server.flask import app, logger, render, request, jsonify
+
+
+@app.route('/post', methods=['GET', 'POST'])
+def post():
+ # POST
+ if request.method == 'POST':
+ if 'post-sample' in request.form:
+ _result = request.form['post-sample']
+ else:
+ _result = '??'
+ # GET
+ else:
+ _result = ''
+ #
+ _content = {
+ 'title': _('atdw - POST sample'),
+ 'result': _result
+ }
+ # render result
+ return render('atoideweb/post.html', **_content)
diff --git a/atoideweb/controllers/templates/_helpers.html b/atoideweb/controllers/templates/_helpers.html
deleted file mode 100644
index c99703c..0000000
--- a/atoideweb/controllers/templates/_helpers.html
+++ /dev/null
@@ -1,14 +0,0 @@
-{% macro link_to(text, endpoint) -%}
- <a href="{{ url_for(endpoint, **kwargs) }}">{{ text }}</a>
-{%- endmacro %}
-
-{% macro options(_, name, option_list, current) -%}
- <div class='{{ name }}'>
- <h1>{{ _(name) }}</h1>
- <form name='{{ name }}' action='/config/{{ name }}' method="post">
- {% for opt in option_list %}
- <input type='radio' name='{{ name }}' value='{{ opt }}' {% if opt == current %}checked{% endif %} />{{ _(opt) }}<br />
- {% endfor %}
- </form>
- </div>
-{%- endmacro %}
diff --git a/atoideweb/controllers/templates/atoideweb/activity.html b/atoideweb/controllers/templates/atoideweb/activity.html
deleted file mode 100644
index 2f7ed46..0000000
--- a/atoideweb/controllers/templates/atoideweb/activity.html
+++ /dev/null
@@ -1,16 +0,0 @@
-{% extends "layout.html" %}
-{% block title %}{{ _('Activity') }}{% endblock %}
-{% block content %}
-<div class='config-options'>
- {{ options(_, 'difficutly', ['easy', 'advanced'], 'easy') }}
-<div>
-<script>
-<!--
-$(document).ready(function() {
- $(".config-options input[type='radio']").change( function() {
- $(".config-options form").submit();
- });
-});
--->
-</script>
-{% endblock %}
diff --git a/atoideweb/controllers/templates/atoideweb/story.html b/atoideweb/controllers/templates/atoideweb/story.html
deleted file mode 100644
index 5bfd600..0000000
--- a/atoideweb/controllers/templates/atoideweb/story.html
+++ /dev/null
@@ -1,12 +0,0 @@
-{% extends "layout.html" %}
-{% block title %}{{ _('Story') }}{% endblock %}
-{% block content %}
-<h1>{{ _('Story') }}</h1>
-<div id='helo'>
-</div>
-<script>
-<!--
-$('#helo').html('JQuery is working ...');
--->
-</script>
-{% endblock %}
diff --git a/atoideweb/tools/image.py b/atoideweb/tools/image.py
index 3fa93bd..c5057cb 100644
--- a/atoideweb/tools/image.py
+++ b/atoideweb/tools/image.py
@@ -2,24 +2,11 @@
# python import
import logging, os, struct, StringIO
-# atoidejouer import
-from atoideweb.tools import registry, storage
+# atoideweb import
+from atoideweb.tools import registry
# get application logger
-logger = logging.getLogger('atoidejouer')
-
-
-def get_sequence_first_graphic(type_, sequence_name):
- # ..
- _filename = storage.get_sequence_first_graphic_name(type_, sequence_name)
- if _filename is None:
- return storage.get_image_path('blank', dir_='data')
- if type_ == 'graphics':
- return storage.get_image_path(_filename)
- elif type_ == 'sounds':
- return storage.get_image_path('sound', dir_='data')
- else:
- return storage.get_image_path('blank', dir_='data')
+logger = logging.getLogger('atoideweb')
def compute_width_height(width, height, max_width, max_height, use_max=False):
@@ -61,7 +48,6 @@ def get_image_info(path):
pass
else:
return None, 0, 0
-
# read file
_f = open(path)
_data = _f.read()
diff --git a/atoideweb/tools/keys.py b/atoideweb/tools/keys.py
deleted file mode 100644
index 8a4099a..0000000
--- a/atoideweb/tools/keys.py
+++ /dev/null
@@ -1,686 +0,0 @@
-
-# python import
-import json, logging, os, random, string
-# ..
-from functools import partial
-from gettext import gettext as _
-
-# atoidejouer import
-from atoideweb.tools import storage
-
-# get application logger
-logger = logging.getLogger('atoidejouer')
-
-
-def random_str(length=12):
- """Simple method to generate unique and random string value.
- """
- return ''.join(random.choice(string.letters) for i in xrange(length))
-
-
-class StoryKeys(object):
-
- def __init__(self, type_, activity_):
- # keep type
- self._type = type_
- # ..
- self._activity = activity_
- # ..
- self.clear()
-
- def clear(self):
- # ..
- self._names = list()
- self.__codes = {
- 'graphics': dict(),
- 'sounds': dict(),
- 'story': dict(),
- }
- self.__clear = {
- 'graphics': False,
- 'sounds': False,
- 'story': False,
- }
- #
- self._keys = dict()
-
- def loads(self, data, clear=True):
- # parse json data
- _data_dict = json.loads(data)
- # ..
- if self._type == 'graphics':
- # ..
- _tmp_names = _data_dict['graphic_names']
- _tmp_keys = _data_dict['graphic_keys']
- # ..
- elif self._type == 'sounds':
- # ..
- _tmp_names = _data_dict['sound_names']
- _tmp_keys = _data_dict['sound_keys']
- # ??
- else:
- return
- # reset keys
- if clear is True:
- self.clear()
- else:
- pass
- # set names
- _exist_list = list()
- for _n in _tmp_names:
- if _n in self._names:
- _exist_list.append(_n)
- else:
- self._names.append(_n)
- # ensure keys
- for _seq_name, _seq_dict in _tmp_keys.items():
- # get seq_path for check
- _seq_path = storage.get_sequence_path(self._type, _seq_name)
- # little check
- if _seq_name in self._keys:
- continue
- # check path
- elif os.path.exists(_seq_path):
- # init seq dict copy
- _tmp_seq_dict = dict()
- # work on seq dict copy
- for _frame_name, _frame_dict in _seq_dict.items():
- # seq dict copy update
- _tmp_seq_dict[float(_frame_name)] = _frame_dict
- # update main dict
- self._keys[_seq_name] = _tmp_seq_dict
- # sequence does not exist
- else:
- if _seq_name in self._names:
- self._names.remove(_seq_name)
- else:
- pass
- # ..
- return _exist_list
-
- def add_sequence(self, sequence_name, filenames):
- # TODO ensure valid name
- if sequence_name in self._names:
- return False
- else:
- # add name to the registry
- self._names.append(sequence_name)
- # init seq dict
- _seq_dict = dict()
- # update dict
- for _i, _f in enumerate(filenames):
- _seq_dict[_f] = {
- '__align': [0, 0],
- '__duration': 0,
- '__loop': False,
- '__file_type': None,
- '__key_type': None
- }
- # udpate main dict
- self._keys[sequence_name] = {
- 0: _seq_dict
- }
- # ok
- return True
-
- def remove_sequence(self, sequence_name):
- # little check
- if sequence_name in self._names:
- self._names.remove(sequence_name)
- del self._keys[sequence_name]
- # ??
- else:
- pass
-
- def ask_clear(self):
- # ...
- self.__clear = {
- 'graphics': True,
- 'sounds': True,
- 'story': True,
- }
-
- def get_clear(self, screen):
- return self.__clear[screen]
-
- def set_clear(self, screen, flag):
- self.__clear[screen] = flag
-
- def set_layout(self, sequence_name, layout):
- # little check
- if sequence_name in self._names:
- _current = self._names.index(sequence_name)
- # little check
- if _current == layout:
- # do nothing
- pass
- else:
- # ...
- self._names.remove(sequence_name)
- # ...
- self._names.insert(layout, sequence_name)
- # nothing to do
- else:
- pass
-
- def get_layout(self, sequence_name):
- if sequence_name in self._names:
- return self._names.index(sequence_name)
- else:
- return -1
-
- def get_max_layout(self):
- return len(self._names) - 1
-
- def set_code(self, screen, sequence_name, filename, _code):
- # ..
- if sequence_name in self.__codes[screen]:
- pass
- else:
- self.__codes[screen][sequence_name] = dict()
- # ..
- self.__codes[screen][sequence_name][filename] = _code
-
- def get_code(self, screen, sequence_name, filename):
- # ..
- if sequence_name in self.__codes[screen]:
- # ..
- if filename in self.__codes[screen][sequence_name]:
- return self.__codes[screen][sequence_name][filename]
- # ??
- else:
- return None
- # ??
- else:
- return None
-
- def get_sequence_codes(self, screen, sequence_name):
- # ..
- if sequence_name in self.__codes[screen]:
- return self.__codes[screen][sequence_name]
- else:
- return dict()
-
- def get_names(self, mask=False):
- # .. add mask
- if mask is True:
- yield 'background_default'
- else:
- pass
- for _n in self._names:
- yield _n
- # .. add mask
- if mask is True:
- yield 'mask_default'
- else:
- pass
-
- def get_frame_dict(self, sequence_name, frame):
- # little check
- if sequence_name in self._names:
- # ..
- _seq_dict = self._keys[sequence_name]
- # little check
- if frame in _seq_dict:
- # get frame dict
- return _seq_dict[frame]
- else:
- return None
- else:
- return None
-
- def set_frame(self, sequence_name, previous_frame, new_frame):
- # little check
- if sequence_name in self._names:
- # ..
- _seq_dict = self._keys[sequence_name]
- # little check
- if previous_frame in _seq_dict:
- # set frame dict from previous
- _seq_dict[new_frame] = _seq_dict[previous_frame]
- # clear main dict
- del _seq_dict[previous_frame]
- else:
- pass
- else:
- pass
-
- def get_max_frame(self):
- # ...
- _tmp_set = set()
- # ...
- for _seq_dict in self._keys.values():
- # udpate set
- _tmp_set = _tmp_set.union(_seq_dict.keys())
- # order list
- _tmp_list = list(_tmp_set)
- _tmp_list.sort()
- # return max
- if len(_tmp_list) == 0:
- return 0
- else:
- return _tmp_list[-1]
-
- def set_filename_dict(self, sequence_name, frame, filename, dict_=None):
- # ensure default dict
- if dict_ is None:
- dict_ = {
- '__align': [0, 0],
- '__duration': 0,
- '__loop': False,
- '__file_type': None,
- '__key_type': None
- }
- else:
- pass
- # ensure frame dict
- _frame_dict = self.get_frame_dict(sequence_name, frame)
- if _frame_dict is None:
- self._keys[sequence_name][frame] = dict()
- else:
- pass
- # update the main dict
- self._keys[sequence_name][frame][filename] = dict_
-
- def get_filename_dict(self, sequence_name, frame, filename):
- # little check
- if sequence_name in self._names:
- # ..
- _seq_dict = self._keys[sequence_name]
- # little check
- if frame in _seq_dict:
- # get frame dict
- _frame_dict = _seq_dict[frame]
- # little check
- if filename in _frame_dict:
- # return filename dict
- return _frame_dict[filename]
- else:
- return None
- else:
- return None
- else:
- return None
-
- def remove_filename(self, sequence_name, frame, filename):
- # .. get the dict
- _f_dict = self.get_filename_dict(sequence_name, frame, filename)
- # little check
- if _f_dict is None:
- return
- # do remove
- else:
- del self._keys[sequence_name][frame][filename]
-
- def remove_filename_from_all(self, sequence_name, filename):
- # little check
- if sequence_name in self._names:
- for _frame in range(self.get_max_frame()):
- self.remove_filename(sequence_name, _frame, filename)
- # ..
- else:
- pass
-
- def set_align(self, sequence_name, frame, filename, align):
- # .. get the dict
- _f_dict = self.get_filename_dict(sequence_name, frame, filename)
- # little check
- if _f_dict is None:
- return
- else:
- self._keys[sequence_name][frame][filename]['__align'] = align
-
- def get_previous_align(self, sequence_name, frame):
- # prepare inversed range
- _range = range(frame)
- _range.reverse()
- # try one by one
- for _f in _range:
- # get frame dict
- _frame_dict = self.get_frame_dict(sequence_name, _f)
- # get frame values
- _frame_values = [] if _frame_dict is None else _frame_dict.values()
- # find first valid key
- for _f_dict in _frame_values:
- # little check
- if _f_dict['__key_type'] != 'key':
- continue
- elif '__align' in _f_dict:
- return _f_dict['__align']
- else:
- return [0, 0]
- # default
- return [0, 0]
-
- def get_align(self, sequence_name, frame, filename):
- # .. get the dict
- _f_dict = self.get_filename_dict(sequence_name, frame, filename)
- # little check
- if _f_dict is None:
- return [0, 0]
- else:
- # ensure default
- if '__align' in _f_dict:
- return _f_dict['__align']
- else:
- # set default
- _f_dict['__align'] = [0, 0]
- # return default
- return [0, 0]
-
- def get_next_align(self, sequence_name, frame, use_transition=False):
- # prepare inversed range
- _range = range(frame + 1, self.get_max_frame() + 1)
- # try one by one
- for _f in _range:
- # get frame dict
- _frame_dict = self.get_frame_dict(sequence_name, _f)
- # get frame values
- _frame_values = [] if _frame_dict is None else _frame_dict.values()
- # find first valid key
- for _f_dict in _frame_values:
- # little check
- if use_transition is False\
- and _f_dict['__key_type'] != 'key':
- continue
- elif '__align' in _f_dict\
- and _f_dict['__key_type'] in ['key', 'transition']:
- return _f_dict['__align']
- else:
- continue
- # default
- return [0, 0]
-
- def inc_position(self, sequence_name, frame, filename, move, value):
- # get align first
- _x, _y = self.get_align(sequence_name, frame, filename)
- # inc
- if move == 'x':
- _x += value
- elif move == 'y':
- _y += value
- else:
- # ??
- return
- # update
- self.set_align(sequence_name, frame, filename, (_x, _y))
- # return current pos
- return _x, _y
-
- def set_duration(self, sequence_name, frame, filename, duration):
- # .. get the dict
- _f_dict = self.get_filename_dict(sequence_name, frame, filename)
- # little check
- if _f_dict is None:
- return
- else:
- self._keys[sequence_name][frame][filename]['__duration'] = duration
-
- def get_duration(self, sequence_name, frame, filename):
- # .. get the dict
- _f_dict = self.get_filename_dict(sequence_name, frame, filename)
- # little check
- if _f_dict is None:
- return 0
- else:
- # ensure default
- if '__duration' in _f_dict:
- return _f_dict['__duration']
- else:
- # set default
- _f_dict['__duration'] = 0
- # return default
- return 0
-
- def inc_duration(self, sequence_name, frame, filename, value):
- # get duration first
- _d = self.get_duration(sequence_name, frame, filename)
- # inc
- _d += value
- # TODO max is max_ or next key
- # get max frame
- _max_frame = self._activity._number_of_keys - frame
- # ensure value
- if _d < 0:
- _d = 0
- elif _d >= _max_frame:
- _d = _max_frame - 1
- else:
- pass
- # update
- self.set_duration(sequence_name, frame, filename, _d)
- # return current pos
- return _d
-
- def set_loop(self, sequence_name, frame, filename, loop):
- # .. get the dict
- _f_dict = self.get_filename_dict(sequence_name, frame, filename)
- # little check
- if _f_dict is None:
- return
- else:
- self._keys[sequence_name][frame][filename]['__loop'] = loop
-
- def get_loop(self, sequence_name, frame, filename):
- # .. get the dict
- _f_dict = self.get_filename_dict(sequence_name, frame, filename)
- # little check
- if _f_dict is None:
- return False
- # ensure default
- else:
- if '__loop' in _f_dict:
- return _f_dict['__loop']
- else:
- # set default
- _f_dict['__loop'] = False
- # return default
- return False
-
- def set_current(self, sequence_name, frame, filename, file_type='lib',
- key_type='key'):
- """
- file_type lib or jnl
- key_type key or transition
- """
- # get the dict
- _filename_dict = self.get_filename_dict(sequence_name, frame, filename)
- # ensure filename dict
- if _filename_dict is None:
- self.set_filename_dict(sequence_name, frame, filename)
- else:
- pass
- # get the dict
- _frame_dict = self.get_frame_dict(sequence_name, frame)
- # little check
- if _frame_dict is None:
- pass
- else:
- # set current
- for _f, _d in _frame_dict.items():
- # key type or none
- _ft = key_type if _f == filename else None
- # update
- self._keys[sequence_name][frame][_f]['__file_type'] = _ft
- # key type or none
- _kt = key_type if _f == filename else None
- # update
- self._keys[sequence_name][frame][_f]['__key_type'] = _kt
-
- def get_current(self, sequence_name, frame):
- # get the dict
- _f_dict = self.get_frame_dict(sequence_name, frame)
- # little check
- if _f_dict is None:
- return None, None, None
- else:
- for _f, _d in _f_dict.items():
- if _d['__key_type'] is None:
- continue
- else:
- return _f, _d['__file_type'], _d['__key_type']
- # default
- return None, None, None
-
- def _refresh_graphic_keys(self, sequence_name):
- # get max frame
- _max_frame = self.get_max_frame()
- # little check
- if sequence_name in self._keys:
- # next dict for transition check
- _next_list = list()
- _range = list()
- _filenames = list()
- # update next dict
- for _frame, _filename_dict in self._keys[sequence_name].items():
- for _filename, _dict in _filename_dict.items():
- if _filename is None or _dict['__key_type'] != 'key':
- # invalidate previous
- self._keys[sequence_name][_frame][_filename] = {
- '__align': [0, 0],
- '__duration': 0,
- '__loop': False,
- '__file_type': None,
- '__key_type': None
- }
- else:
- # small update for transition management
- _tr_frame = (_frame + 1) if len(_range) == 0 else _frame
- # update current range
- _range.append(_tr_frame)
- _filenames.append((_filename, _dict['__file_type']))
- # ..
- if len(_range) == 2:
- _next_list.append((_range, _filenames))
- _range = list()
- _filenames = list()
- else:
- continue
- # set transition
- for _range, _filenames in _next_list:
- # get first align
- _first_filename, _first_filetype = _filenames[0]
- _first_x, _first_y = self.get_align(sequence_name, _range[0]-1,
- _first_filename)
- # get last align
- _last_filename, _last_filetype = _filenames[1]
- _last_x, _last_y = self.get_align(sequence_name, _range[1],
- _last_filename)
- # compute steps
- _inc_x = float(_last_x - _first_x)
- _inc_x /= (_range[1] - _range[0] + 1)
- _inc_y = float(_last_y - _first_y)
- _inc_y /= (_range[1] - _range[0] + 1)
- # init new position values
- _new_x = _first_x
- _new_y = _first_y
- for _i in range(*_range):
- # compute filename and file type
- if _i < ((_range[1] - _range[0]) / 2):
- _f = _first_filename
- _t = _first_filetype
- else:
- #_f = _last_filename
- #_t = _last_filetype
- _f = _first_filename
- _t = _first_filename
- # set transition
- self.set_current(sequence_name, _i, _f, file_type=_t,
- key_type='transition')
- # compute align
- _new_x += _inc_x
- _new_y += _inc_y
- # update align
- self.set_align(sequence_name, _i, _f, (_new_x, _new_y))
- else:
- pass
-
- def _refresh_sound_keys(self, sequence_name):
- # shortcut
- _max = self._activity._number_of_keys
- # little check
- if sequence_name in self._keys:
- # next dict for transition check
- _durations = list()
- _loops = list()
- # update next dict
- for _frame, _filename_dict in self._keys[sequence_name].items():
- for _filename, _dict in _filename_dict.items():
- if _filename is None or _dict['__key_type'] != 'key':
- # invalidate previous
- self._keys[sequence_name][_frame][_filename] = {
- '__align': [0, 0],
- '__duration': 0,
- '__loop': False,
- '__file_type': None,
- '__key_type': None
- }
- else:
- if _dict['__loop'] is True:
- _loops.append((_frame, _filename, _dict['__file_type']))
- elif _dict['__duration'] != None:
- _durations.append((_frame, _filename,
- _dict['__file_type'], _dict['__duration']))
- else:
- continue
- # set transition for loop keys
- for _frame, _f, _f_type in _loops:
- # little check
- _t_start = _frame + 1
- # ..
- if _t_start >= _max:
- continue
- else:
- pass
- # ..
- for _i in range(_t_start, _max+1):
- # set transition
- self.set_current(sequence_name, _i, _f,
- file_type=_f_type, key_type='transition')
- # set transition for loop keys
- for _frame, _f, _f_type, _d in _durations:
- # little check
- _t_start = _frame + 1
- _t_end = _t_start + _d
- # ..
- if _t_start >= _max:
- continue
- elif _t_end > _max:
- _t_end = _max+1
- else:
- pass
- # ..
- if _t_start > _t_end:
- continue
- else:
- pass
- # ..
- for _i in range(_t_start, _t_end):
- _f_dict = self.get_filename_dict(sequence_name, _i, _f)
- # update
- _kt = None if _f_dict is None else _f_dict['__key_type']
- # ..
- if _kt is None:
- # set transition
- self.set_current(sequence_name, _i, _f,
- file_type=_f_type, key_type='transition')
-
- def check_sequences(self):
- # sequence check
- _s_to_remove = list()
- for _s_name in self._names:
- _path = storage.get_sequence_path(self._type, _s_name)
- if os.path.exists(_path):
- continue
- else:
- _s_to_remove.append(_s_name)
- # ..
- for _s_name in _s_to_remove:
- self.remove_sequence(_s_name)
-
- def refresh(self, sequence_name):
- # ..
- if self._type == 'graphics':
- self._refresh_graphic_keys(sequence_name)
- else:
- self._refresh_sound_keys(sequence_name)
diff --git a/atoideweb/tools/registry.py b/atoideweb/tools/registry.py
index 8947d4d..a57285e 100644
--- a/atoideweb/tools/registry.py
+++ b/atoideweb/tools/registry.py
@@ -3,62 +3,7 @@
import logging
# get application logger
-logger = logging.getLogger('atoidejouer')
-
-class PixRegistry(object):
-
- class __Singleton:
- """Our singleton object.
- """
-
- def __init__(self):
- """Create the new singleton with the application config.
-
- :param config: SafeConfigParser object for all the application
- :see: `ConfigParser.SafeConfigParser`
- """
- # ensure config
- self.__dict = dict()
-
- def __key(self, path, width, height):
- return '%s|%sx%s' % (path, width, height)
-
- def get_pix(self, path, width, height):
- # get key
- _k = self.__key(path, width, height)
- # ..
- if _k in self.__dict:
- return self.__dict[_k]
- else:
- return None
-
- def set_pix(self, path, width, height, pixbuf):
- # get key
- _k = self.__key(path, width, height)
- # clear previous
- if _k in self.__dict:
- _p = self.__dict[_k]
- _p.destroy()
- else:
- pass
- # ...
- self.__dict[_k] = pixbuf
-
- # singleton instance
- instance = None
-
- def __new__(c, force=False):
- """Singleton new init.
- """
- # if doesn't already initialized
- if not PixRegistry.instance \
- or force is True:
- # create a new instance
- PixRegistry.instance = PixRegistry.__Singleton()
- else:
- pass
- # return the manager object
- return PixRegistry.instance
+logger = logging.getLogger('atoideweb')
class InfoRegistry(object):
diff --git a/atoideweb/tools/sound.py b/atoideweb/tools/sound.py
index 87db3d9..18edb8c 100644
--- a/atoideweb/tools/sound.py
+++ b/atoideweb/tools/sound.py
@@ -4,7 +4,7 @@ import gst, logging
from datetime import timedelta
# get application logger
-logger = logging.getLogger('atoidejouer')
+logger = logging.getLogger('AToiDeWeb')
class Player(object):
diff --git a/atoideweb/tools/storage.py b/atoideweb/tools/storage.py
index f3f40ca..329b1f8 100644
--- a/atoideweb/tools/storage.py
+++ b/atoideweb/tools/storage.py
@@ -7,7 +7,7 @@ from gettext import gettext as _
# png import from pypng
from lib import png
-# atoidejouer import
+# atoideweb import
from atoideweb.tools import config
# ...
@@ -23,7 +23,7 @@ else:
BUND_PATH = activity.get_bundle_path()
# get application logger
-logger = logging.getLogger('atoidejouer')
+logger = logging.getLogger('atoideweb')
ACTIVITY_NAMES = {
'paint': 'org.laptop.Oficina',
@@ -40,39 +40,6 @@ def get_config_path():
return os.path.join(BUND_PATH, 'static', 'data', 'config', 'config.ini')
-def get_sequence_items(sequence_path):
- if os.path.exists(sequence_path):
- _f = open(sequence_path)
- _rows = _f.readlines()
- _f.close()
- else:
- return []
- # ..
- _names = list()
- for _n in _rows:
- _n = _n.strip()
- if _n == '':
- continue
- else:
- _names.append(_n)
- # ..
- return _names
-
-
-def get_sequence_path(type_, sequence_name):
- return os.path.join(ROOT_PATH, 'data',
- 'sequences', type_, '%s.seq' % sequence_name)
-
-
-def get_sequence_first_graphic_name(type_, sequence_name):
- # seq file
- _f = open(get_sequence_path(type_, sequence_name))
- _names = _f.readlines()
- _f.close()
- # ..
- return None if len(_names) == 0 else _names[0].strip()
-
-
def get_sound_path(filename, dir_='sounds'):
# return path
return os.path.join(ROOT_PATH, 'data', dir_,
@@ -87,7 +54,7 @@ def get_icon_path(stock_id):
def get_image_path(filename, dir_='graphics'):
# return path
- if filename in ['background_default', 'mask_default']\
+ if filename in ['background_default']\
or dir_=='data':
return os.path.join(BUND_PATH, 'static', 'data',
'graphics', '%s.png' % filename)
@@ -96,140 +63,6 @@ def get_image_path(filename, dir_='graphics'):
'%s.png' % filename)
-def __remove_inner_true(a_list):
- _new_list = list()
- # ..
- _has_false = False
- # ..
- for _i, _v in enumerate(a_list):
- if _v is False:
- _has_false = True
- else:
- _sub = a_list[_i+1:]
- if _has_false is True\
- and _sub.count(False) != 0:
- _new_list.append(False)
- continue
- else:
- pass
- # ..
- _new_list.append(_v)
- # ..
- del a_list
- # ..
- return _new_list
-
-
-def png_from_pixbuf(filename, timestamp):
- # prepare outpath
- _out_path = get_image_path(filename)
- if os.path.exists(_out_path):
- return
- else:
- pass
- # prepare inpath
- _in_path = get_path_from_journal(timestamp, 'image/png')
- # init png reader
- _reader = png.Reader(filename=_in_path)
- # read the file
- _w, _h, _pixels, _metadata = _reader.read()
- # init working vars
- _new_pixels = list()
- _first_color = None
- _remove_row_list = [True for _dummy in range(_h)]
- _remove_col_list = [True for _dummy in range(_w)]
- # ..
- _planes = _metadata['planes']
- # update vars
- for _i, _row in enumerate(_pixels):
- # init new row
- _new_row = list()
- for _j, _col in enumerate(_row):
- # upate rgb
- if _j % _planes == 0:
- _rgb = [_col]
- continue
- else:
- _rgb.append(_col)
- # update color first and after
- if _j % _planes == (_planes - 1):
- # keep the first color
- if _first_color is None:
- _first_color = _rgb
- else:
- pass
- # make it alpha if first
- if _rgb == _first_color:
- _new_row.extend([0, 0, 0, 0])
- else:
- _remove_row_list[_i] = False
- _remove_col_list[(_j/_planes)-1] = False
- # small hack
- if _planes == 3:
- _rgb.append(255)
- else:
- pass
- _new_row.extend(_rgb)
- else:
- continue
- # add new row
- _new_pixels.append(_new_row)
- # cleaning
- del _reader
- del _pixels
- # remove inner True in cols or rows
- _remove_row_list = __remove_inner_true(_remove_row_list)
- _remove_col_list = __remove_inner_true(_remove_col_list)
- # init working vars
- _new_new_pixels = list()
- # 2cd pass
- for _i, _row in enumerate(_new_pixels):
- # transparent row
- if _remove_row_list[_i] is True:
- continue
- else:
- # init new row
- _new_new_row = list()
- # ..
- for _j, _col in enumerate(_row):
- # upate rgb
- if _j % 4 == 0:
- _rgb = [_col]
- continue
- else:
- _rgb.append(_col)
- # update color first and after
- if _j % 4 == 3:
- # transparent col
- if _remove_col_list[(_j/4)-1] is True:
- continue
- else:
- _new_new_row.extend(_rgb)
- else:
- continue
- # sorry for that!
- _new_new_pixels.append(_new_new_row)
- # cleaning
- del _new_pixels
- del _remove_row_list
- del _remove_col_list
- # update h and w
- _w = len(_new_new_pixels[0])/4
- _h = len(_new_new_pixels)
- # update alpha meta
- _metadata['alpha'] = True
- _metadata['planes'] = 4
- del _metadata['size']
- # write the new image with alpha
- _new_png = open(_out_path, 'wb')
- _writer = png.Writer(_w, _h, **_metadata)
- _writer.write(_new_png, _new_new_pixels)
- _new_png.close()
- # just in case
- del _writer
- del _new_new_pixels
-
-
def __do_query(query):
from sugar.datastore import datastore
# find in ds
@@ -298,7 +131,7 @@ def list_files_from_journal(activity_name=None, mime_type=None):
for _o in _objs:
# TODO open the files
yield _o.get_file_path()
-
+
def get_path_from_journal(timestamp, mime_type):
from sugar.datastore import datastore
@@ -315,315 +148,3 @@ def get_path_from_journal(timestamp, mime_type):
return _results[0].get_file_path()
else:
return None
-
-
-def __check_dir(dir_name, parent='data'):
- # get activity path
- if parent is None:
- _dir = os.path.join(ROOT_PATH, dir_name)
- else:
- _dir = os.path.join(ROOT_PATH, parent, dir_name)
- # ensure activity path
- if os.path.exists(_dir):
- pass
- else:
- os.mkdir(_dir)
-
-
-def __check_file(sub_path, file_name):
- # ..
- __check_dir(sub_path)
- # file path
- _path = os.path.join(ROOT_PATH, 'data', sub_path,
- file_name)
- # ensure file
- if os.path.exists(_path):
- pass
- else:
- # get bundle path
- _p = os.path.join(BUND_PATH, 'static', 'ext',
- sub_path, file_name)
- # copy
- shutil.copy(_p, _path)
-
-
-def __check_dir_files(sub_path):
- # get bundle path
- _path = os.path.join(BUND_PATH, 'static', 'ext', sub_path)
- # file by file
- for _f in os.listdir(_path):
- # full path
- _p = os.path.join(_path, _f)
- # little check
- if os.path.isdir(_p):
- pass
- else:
- __check_file(sub_path, _f)
-
-
-def init_activity_folder():
- # graphics
- __check_dir_files('graphics')
- # sounds
- __check_dir_files('sounds')
- # sequences
- __check_dir('sequences')
- __check_dir_files(os.path.join('sequences', 'graphics'))
- __check_dir_files(os.path.join('sequences', 'sounds'))
- # stories
- __check_dir_files('stories')
-
-
-def __show_in_out_result_message(label, message):
- # ..
- label.set_markup('<span size="large" style="italic">%s</span>' % message)
- label.show()
-
-
-def __merge_dir(project_name, dir_name, exist_list=None):
- # archive path
- _path_src = os.path.join(ROOT_PATH, 'tmp', project_name,
- dir_name)
- # little check
- if os.path.exists(_path_src):
- # project path
- _path_dst = os.path.join(ROOT_PATH, 'data',
- dir_name)
- # init existing list
- exist_list = list() if exist_list is None else exist_list
- for _f in os.listdir(_path_src):
- # ..
- _p_src = os.path.join(_path_src, _f)
- _p_dst = os.path.join(_path_dst, _f)
- # little check
- if os.path.isdir(_p_src):
- continue
- # do not replace
- elif os.path.exists(_p_dst):
- # update exist list
- exist_list.append(os.path.join(dir_name, _f))
- # do copy
- else:
- shutil.copy(_p_src, _path_dst)
- # OK!
- return True
- else:
- # Oops!
- return False
-
-
-def __import_keys(activity_, project_name):
- # ..
- _path_data = os.path.join(ROOT_PATH, 'tmp',
- project_name, 'story.keys')
- # init content
- _data = None
- # little check
- if os.path.exists(_path_data):
- # read file
- _file = open(_path_data, 'r')
- try:
- _data = _file.read()
- finally:
- _file.close()
- # parse json data
- _exist_graphic_keys = activity_.graphic_keys.loads(_data, clear=False)
- _exist_sound_keys = activity_.sound_keys.loads(_data, clear=False)
- # set activity new number of keys
- activity_.update_number_of_keys()
- # ..
- return {
- 'graphics': _exist_graphic_keys,
- 'sounds': _exist_sound_keys,
- }
- # ?? invalid archive
- else:
- return None
-
-
-def import_project(activity_, file_path, msg_label):
- # clean tmp dir
- __remove_dir('tmp', parent=None)
- __check_dir('tmp', parent=None)
- # ..
- _tmp_root = os.path.join(ROOT_PATH, 'tmp')
- try:
- # copy file to tmp
- _tar_path = os.path.join(_tmp_root, '__tmp.tar.bz2')
- shutil.copy(file_path, _tar_path)
- # change dir for unzipping
- os.chdir(_tmp_root)
- # extract files in tmp dir
- _tar = tarfile.open(file_path)
- _p_name = _tar.getnames()[0]
- _tar.extractall()
- _tar.close()
- except Exception, e:
- # prepare message
- _msg = _('Project import failed!')
- _msg += _('\n\n[Error] Can not read archive file!')
- # remove tmp structure
- __remove_dir('tmp', parent=None)
- # quit!
- return __show_in_out_result_message(msg_label, _msg)
- # merge dirs
- _exist_list = list()
- if __merge_dir(_p_name, 'graphics', exist_list=_exist_list)\
- and __merge_dir(_p_name, 'sounds', exist_list=_exist_list)\
- and __merge_dir(_p_name, os.path.join('sequences', 'graphics'),
- exist_list=_exist_list)\
- and __merge_dir(_p_name, os.path.join('sequences', 'sounds'),
- exist_list=_exist_list):
- # init result message
- _msg = _('Project sucessfully imported')
- else:
- # prepare message
- _msg = _('Project import failed!')
- _msg += _('\n\n[Error] Can not load files!')
- # remove tmp structure
- __remove_dir('tmp', parent=None)
- # quit!
- return __show_in_out_result_message(msg_label, _msg)
- # existing files stop
- if len(_exist_list) == 0:
- pass
- else:
- # prepare message
- _msg += _('\n\n[Warning] Following files already exist:\n')
- for _f in _exist_list:
- _msg = '%s - %s\n' % (_msg, _f)
- # merge keys
- _existing_dict = __import_keys(activity_, _p_name)
- if _existing_dict is None:
- # prepare message
- _msg = _('Project import failed!')
- _msg += _('\n\n[Error] Can not load keys!')
- # remove tmp structure
- __remove_dir('tmp', parent=None)
- # quit!
- return __show_in_out_result_message(msg_label, _msg)
- if len(_existing_dict['graphics']) == 0\
- or len(_existing_dict['sounds']) == 0:
- pass
- else:
- # prepare message
- _msg += _('\n\n[Warning] Following sequences already exist:\n')
- for _s in _existing_dict['graphics']:
- _msg = '%s - graphics.%s\n' % (_msg, _s)
- _msg = '%s\n' % _msg
- for _s in _existing_dict['sounds']:
- _msg = '%s - sounds.%s\n' % (_msg, _s)
- # remove tmp structure
- __remove_dir('tmp', parent=None)
- # show result
- __show_in_out_result_message(msg_label, _msg)
-
-
-def __remove_dir(dir_name, parent=None):
- # get activity path
- if parent is None:
- _dir = os.path.join(ROOT_PATH, dir_name)
- _next_parent = dir_name
- else:
- _dir = os.path.join(ROOT_PATH, parent, dir_name)
- _next_parent = os.path.join(parent, dir_name)
- # remove files and dir recursively
- if os.path.exists(_dir):
- for _f in os.listdir(_dir):
- _p = os.path.join(_dir, _f)
- if os.path.isdir(_p):
- __remove_dir(_f, parent=_next_parent)
- else:
- os.remove(_p)
- # and remove the dir
- if os.path.exists(_dir):
- os.removedirs(_dir)
- else:
- pass
- # nothing to do
- else:
- pass
-
-
-def __export_seq_and_res(activity_, tmp_root, type_='graphics'):
- # path updates
- _seq_src = os.path.join(ROOT_PATH, 'data', 'sequences',
- type_)
- _seq_dst = os.path.join(tmp_root, 'sequences', type_)
- # ..
- _res_root = os.path.join(ROOT_PATH, 'data', type_)
- _res_dst = os.path.join(tmp_root, type_)
- # keys factory
- _keys = activity_.graphic_keys if type_ == 'graphics'\
- else activity_.sound_keys
- # set res ext
- _ext = '.png' if type_ == 'graphics' else '.ogg'
- # copy
- for _n in _keys._names:
- if _n.strip() == '':
- continue
- else:
- _s_path = os.path.join(_seq_src, '%s.seq' % _n)
- shutil.copy(_s_path, _seq_dst)
- for _res in get_sequence_items(_s_path):
- _res_path = os.path.join(_res_root, '%s%s' % (_res, _ext))
- shutil.copy(_res_path, _res_dst)
-
-
-def export_project(activity_, msg_label, media):
- # get the toolbar
- _toolbar = activity_._toolbox.get_activity_toolbar()
- # get the projet name
- _name = _toolbar.title.get_text()
- # clean tmp dir first
- __remove_dir('tmp', parent=None)
- __check_dir('tmp', parent=None)
- # create a tmp stucture
- __check_dir(_name, parent='tmp')
- __check_dir(os.path.join(_name, 'graphics'), parent='tmp')
- __check_dir(os.path.join(_name, 'sequences'), parent='tmp')
- __check_dir(os.path.join(_name, 'sequences', 'graphics'), parent='tmp')
- __check_dir(os.path.join(_name, 'sequences', 'sounds'), parent='tmp')
- __check_dir(os.path.join(_name, 'sounds'), parent='tmp')
- # ..
- _tmp_root = os.path.join(ROOT_PATH, 'tmp')
- _out_root = os.path.join(_tmp_root, _name)
- # copy keys
- _keys_path = os.path.join(_out_root, 'story.keys')
- activity_.write_file(_keys_path)
- # copy sequences and resources
- __export_seq_and_res(activity_, _out_root, type_='graphics')
- __export_seq_and_res(activity_, _out_root, type_='sounds')
- # change dir for zipping
- os.chdir(_tmp_root)
- # zip all
- _tar_name = '%s.tar.bz2' % _name
- # ..
- _tar = tarfile.open(_tar_name, "w:bz2")
- _tar.add(_name)
- _tar.close()
- # try to copy
- try:
- if os.path.exists(os.path.join('/media', media, _tar_name)):
- # ..
- _msg = _('Project') + ' "' + _name + '" '
- _msg += _('already exported to') + ' "' + media + '" '
- else:
- # ..
- shutil.copy(os.path.join(_tmp_root, _tar_name),
- os.path.join('/media', media))
- # ..
- _msg = _('Project') + ' "' + _name + '" '
- _msg += _('sucessfully exported to') + ' "' + media + '" '
- except Exception, e:
- # ERROR
- logger.error('[storage] export_project - e: %s' % e)
- # ERROR
- # ..
- # ..
- _msg = _('Project') + ' "' + _name + '" '
- _msg += _('export to') + ' "' + media + '" ' + _('failed!')
- # remove tmp structure
- __remove_dir('tmp', parent=None)
- # tmp message
- __show_in_out_result_message(msg_label, _msg)
diff --git a/atoideweb/ui/toolbar.py b/atoideweb/ui/toolbar.py
index a2da877..0c0bc03 100644
--- a/atoideweb/ui/toolbar.py
+++ b/atoideweb/ui/toolbar.py
@@ -1,6 +1,6 @@
# python import
-import logging, os, re, shutil
+import logging
# ...
from functools import partial
from gettext import gettext as _
@@ -14,490 +14,59 @@ from sugar.activity import activity
# sugar import
from sugar.graphics.toolbutton import ToolButton
-# atoidejouer import
-from atoideweb.tools import config, storage
-# from atoidejouer.ui import screens
+# atoideweb import
+from atoideweb.tools import config
+# from atoideweb.ui import screens
# get application logger
-logger = logging.getLogger('atoidejouer')
+logger = logging.getLogger('atoideweb')
-def _clean_dir(dir_path):
- # little check first
- if os.path.exists(dir_path):
- pass
- # ???
- else:
- return
- # ..
- for _filename in os.listdir(dir_path):
- # ..
- _path = os.path.join(dir_path, _filename)
- # little check
- if os.path.isfile(_path):
- os.remove(_path)
- elif os.path.isdir(_path):
- _clean_dir(_path)
- else:
- # ERRROR
- logger.error('[toolbar] _clean_dir - path error: %s' % dir_path)
- # remove dir at the end
- os.removedirs(dir_path)
-
-
-def _cb_seq_new(widget, toolbar):
- # get screen
- _screen = toolbar.activity.get_current_screen()
- # remove items from preview boxes
- _screen.sequence_preview.clear()
- # ..
- _screen.notebook.current_sequence = None
- # clear entry
- toolbar._sequence_entry.set_text("")
-
-
-NON_ALPHA_NUM_PATTERN = re.compile('[\W_]+')
-
-
-def _cb_seq_name(entry):
- # ensure good string
- _str = NON_ALPHA_NUM_PATTERN.sub('-', entry.get_text())
- # update entry
- entry.set_text(_str)
-
-
-def _cb_seq_remove(widget, toolbar):
- # get sequence name
- _name = toolbar._sequence_entry.get_text()
- # get screen
- _screen = toolbar.activity.get_current_screen()
- # type shortcut 'graphic' or 'sound'
- _type = _screen.notebook._type
- # little check
- if _name.strip() == '':
- # do nothing
- pass
- else:
- # get sequence path
- _seq_path = storage.get_sequence_path(_type, _name)
- # remove dir
- if os.path.exists(_seq_path):
- # do clean
- os.remove(_seq_path)
- # and clear all at the end
- _screen.sequence_preview.clear()
- # clear entry
- toolbar._sequence_entry.set_text("")
- # update notebook
- _screen.notebook._get_store_sequence()
- # nothing to do
- else:
- pass
-
-
-def _cb_seq_save(widget, toolbar, remove=False):
- # get sequence name
- _name = toolbar._sequence_entry.get_text()
- # get screen
- _screen = toolbar.activity.get_current_screen()
- # get nb of graphics
- _nb_of_items = _screen.sequence_preview.number_of_items()
- # type shortcut 'graphic' or 'sound'
- _type = _screen.notebook._type
- # little check
- if _name.strip() == '':
- return
- elif _nb_of_items == 0\
- and remove is True:
- # get sequence path
- _seq_path = storage.get_sequence_path(_type, _name)
- # remove from files
- os.remove(_seq_path)
- # ..
- _keys = toolbar.activity.graphic_keys\
- if toolbar.name == 'graphics_add'\
- else toolbar.activity.sound_keys
- # remove from keys
- _keys.remove_sequence(_name)
- else:
- # get sequence path
- _seq_path = storage.get_sequence_path(_type, _name)
- # open file
- _file = open(_seq_path, 'wb')
- # update
- for _filename in _screen.sequence_preview.items:
- _filename = _filename.strip()
- if _filename == '':
- continue
- else:
- _file.write('%s\n' % _filename)
- _file.close()
- # update notebook
- _screen.notebook._get_store_sequence()
-
-
-"""
-def _show_browser(toolbar, cls):
- # next screen name
- _screen_name = '%s_add' % toolbar.name
- # do switch
- toolbar._switch(_screen_name)
- # get or create screen
- _screen = toolbar.activity.get_screen(_screen_name)\
- if toolbar.activity.has_screen(_screen_name)\
- else cls(toolbar)
- # update activity screens
- toolbar.activity.set_current_screen(_screen_name, _screen)
- # do show
- _screen._show()
- # update entry
- _seq_name = _screen.notebook.current_sequence
- if _seq_name is None:
- pass
- else:
- # ..
- toolbar._sequence_entry.set_text(_seq_name)
- # pos
- _current_pos = _screen.sequence_preview.get_current_pos()
- toolbar._frame_entry.set_text(str(_current_pos))
-"""
-
-
-def _cb_add(widget, toolbar):
- # browser screen factory
- if toolbar.name == 'graphics':
- # _show_browser(toolbar, screens.ScreenBrowserGraphics)
- pass
- # add graphic to the sequence
- elif toolbar.name in ['graphics_add', 'sounds_add']:
- # get current screen
- _screen = toolbar.activity.get_current_screen()
- # get graphic name
- _item_name = _screen.notebook.current_item
- # ..
- _sequence_name = _screen.notebook.current_sequence
- if _sequence_name is None\
- or _sequence_name.strip() == ''\
- or _item_name is None:
- pass
- else:
- # udpate sequence preview
- _screen.sequence_preview.add_item(_item_name)
- # update sequence file
- _cb_seq_save(widget, toolbar)
- # ..
- elif toolbar.name == 'sounds':
- # _show_browser(toolbar, screens.ScreenBrowserSounds)
- pass
- # ??
- else:
- # ERROR
- logger.error('[toolbar] _cb_add - unknown: %s' % toolbar.name)
-
-
-def _cb_remove(widget, toolbar):
- # browser screen factory
- if toolbar.name == 'graphics':
- pass
- # add graphic to the sequence
- elif toolbar.name in ['graphics_add', 'sounds_add']:
- # ..
- _screen = toolbar.activity.get_current_screen()
- # udpate sequence preview
- _screen.sequence_preview.remove_current()
- # seq name
- _seq_name = _screen.notebook.current_sequence
- _file_name = _screen.notebook.current_item
- # ..
- _keys = toolbar.activity.graphic_keys\
- if toolbar.name == 'graphics_add'\
- else toolbar.activity.sound_keys
- # remove from keys
- _keys.remove_filename_from_all(_seq_name, _file_name)
- # update sequence file
- _cb_seq_save(widget, toolbar, remove=True)
- # ..
- elif toolbar.name == 'sounds':
- pass
- # add graphic to the sequence
- elif toolbar.name == 'sounds_add':
- pass
- # ??
- else:
- # ERROR
- logger.error('[toolbar] _cb_remove - name: %s' % toolbar.name)
-
-
-def _cb_frame_after(widget, toolbar):
- # get previous value
- _value = int(toolbar._frame_entry.get_text())
- # inc it
- _update_frame_entry(widget, toolbar, _value + 1)
- # update sequence file
- _cb_seq_save(widget, toolbar)
-
-
-def _cb_frame_before(widget, toolbar):
- # get previous value
- _value = int(toolbar._frame_entry.get_text())
- # dec it
- _update_frame_entry(widget, toolbar, _value - 1)
- # update sequence file
- _cb_seq_save(widget, toolbar)
-
-
-def _update_frame_entry(entry, toolbar, value):
- # get screen
- _screen = toolbar.activity.get_current_screen()
- # get max value
- _max = _screen.sequence_preview.number_of_items() - 1
- # prepare value
- _new_val = None
- if value > _max:
- _new_val = _max
- elif value < 0:
- _new_val = 0
- # reset max just in case
- else:
- _new_val = value
- # update entry
- toolbar._frame_entry.set_text(str(_new_val))
- # update sequence
- _screen.sequence_preview.move_current(_new_val)
-
-
-def _cb_open(widget, toolbar):
- pass
-
-
-def _cb_back(widget, toolbar):
- # do switch
- toolbar._switch(toolbar.name.replace('_add', ''))
- # restore screen
- toolbar.activity._change_screen(toolbar)
-
-
-def _cb_import(widget, toolbar):
- # get the current sequence name
- _sequence_name = toolbar._sequence_entry.get_text()
- # get filenames
- _screen = toolbar.activity.get_current_screen()
- # shortcut
- _filenames = _screen.sequence_preview.items
- # and back
- _cb_back(widget, toolbar)
- # add sequence to the story keys
- if toolbar.story_keys.add_sequence(_sequence_name, _filenames):
- # get screen now
- _screen = toolbar.activity.get_current_screen()
- # add sequence to the timeline
- _screen.timeline.add_sequence(_sequence_name)
- # ..
- _screen.scene.refresh()
- # already added
- else:
- pass
-
-
-def _cb_play(widget, toolbar):
- # replace play button
- toolbar._replace_button('play', 'pause')
- # trigger playing
- toolbar.activity._thread.play()
-
-
-def _cb_pause(widget, toolbar):
- # replace pause button
- toolbar._replace_button('pause', 'play')
- # trigger pausing
- toolbar.activity._thread.pause()
-
-
-def _cb_stop(widget, toolbar):
- # replace pause button - if playing
- if toolbar._has_button('pause'):
- # ..
- toolbar._replace_button('pause', 'play')
- else:
- pass
- # update main thread
- toolbar.activity._thread.pause(stop=True)
- # ..
- toolbar.activity._thread.set_time()
-
-
-def _cb_view_fullscreen(widget, toolbar):
- # replace fullscreen button with return button
- toolbar._replace_button('view_fullscreen', 'view_return')
- # ask_clear
- toolbar.story_keys.ask_clear()
- # get current screen
- _screen = toolbar.activity.get_current_screen()
- # enbale fullscreen
- _screen.set_fullscreen(True)
-
-
-def _cb_view_return(widget, toolbar):
- # remove return button with fullscreen button
- toolbar._replace_button('view_return', 'view_fullscreen')
- # ask_clear
- toolbar.story_keys.ask_clear()
- # get current screen
- _screen = toolbar.activity.get_current_screen()
- # disable fullscreen
- _screen.set_fullscreen(False)
-
-
-def _cb_slider(widget, event, toolbar):
- """action = 'press' or 'release'
- """
- # ...
- if event.type == gtk.gdk.BUTTON_PRESS:
- pass
- elif event.type == gtk.gdk.BUTTON_RELEASE:
- toolbar.activity._thread.set_time(time_=widget.get_value())
- # ??
- else:
- pass
-
-
-def _cb_format_value(widget, value, toolbar):
- """Format the slider value to display
- """
- # return formated value
- return '%1d:%02d' % divmod(value, 60)
-
+def _cb_default(widget, toolbar):
+ # DEBUG
+ logger.debug('[toolbar] cb - toolbar.name: %s' % toolbar.name)
BUTTONS = {
- 'add' : ['list-add', _cb_add],
- 'back' : ['edit-undo', _cb_back],
- 'backward' : ['media-seek-backward', None],
- 'forward' : ['media-seek-forward', None],
- 'frame_after' : ['go-right', _cb_frame_after],
- 'frame_before' : ['go-left', _cb_frame_before],
- 'frame_entry' : [None, None],
- 'import' : ['insert-image', _cb_import],
- 'open' : ['media', _cb_open],
- 'pause' : ['media-playback-pause', _cb_pause],
- 'play' : ['media-playback-start', _cb_play],
- 'remove' : ['list-remove', _cb_remove],
+ 'add' : ['list-add', _cb_default],
+ 'back' : ['edit-undo', _cb_default],
+ 'backward' : ['media-seek-backward', _cb_default],
+ 'forward' : ['media-seek-forward', _cb_default],
+ 'open' : ['media', _cb_default],
+ 'pause' : ['media-playback-pause', _cb_default],
+ 'play' : ['media-playback-start', _cb_default],
+ 'remove' : ['list-remove', _cb_default],
'separator' : [None, None],
- 'seq_new' : ['document-generic', _cb_seq_new],
- 'seq_name' : [None, _cb_seq_name],
- 'seq_remove' : ['button_cancel', _cb_seq_remove],
- 'seq_save' : ['dialog-apply', _cb_seq_save],
- 'slider' : [None, _cb_slider],
- 'stop' : ['media-playback-stop', _cb_stop],
- 'view_fullscreen' : ['view-fullscreen', _cb_view_fullscreen],
- 'view_return' : ['view-return', _cb_view_return],
+ 'stop' : ['media-playback-stop', _cb_default],
}
TOOLBARS = {
- 'graphics' : [
- ['stop', 'play', 'slider', # 'backward', 'forward'
- 'separator',
- 'add'],
- []
- ],
- 'graphics_add' : [
- ['seq_new', 'seq_name', 'seq_save', 'seq_remove',
- 'separator',
- 'add', 'remove',
- 'separator',
- 'frame_before', 'frame_entry', 'frame_after'],
- ['import', 'back']
- ],
- 'sounds' : [
- ['stop', 'play', 'slider', # 'backward', 'forward'
+ 'eating' : [
+ ['stop', 'play', 'slider',
'separator',
'add'],
[]
],
- 'sounds_add' : [
- ['seq_new', 'seq_name', 'seq_save', 'seq_remove',
- 'separator',
- 'add', 'remove',
- 'separator',
- 'frame_before', 'frame_entry', 'frame_after'],
- ['import', 'back']
- ],
- 'story' : [
- ['stop', 'play', 'slider', # 'backward', 'forward'
- 'separator',
- 'view_fullscreen'],
+ 'spare-time' : [
+ ['stop', 'play',],
[]
],
}
TITLES = {
- 'graphics' : {
+ 'eating' : {
'toolbox': _('Graphic'),
'buttons': {
- # 'backward': _('Seek Backward'),
- # 'forward': _('Seek Forward'),
- 'pause': _('Pause Story'),
- 'play': _('Play Story'),
- 'slider': _('Progress Bar'),
- 'stop': _('Stop Story'),
- 'add': _('Add Graphic'),
- }
- },
- 'graphics_add' : {
- 'toolbox': None,
- 'buttons': {
- 'add': _('Add Graphic'),
- 'back': _('Back'),
- 'frame_after': _('Frame Before'),
- 'frame_before': _('Frame After'),
- 'frame_entry': None,
- 'import': _('Import Sequence'),
- 'remove': _('Remove Graphic'),
- 'seq_new': _('New Sequence'),
- 'seq_name': None,
- 'seq_remove': _('Remove Sequence'),
- 'seq_save': _('Save Sequence'),
- }
- },
- 'sounds' : {
- 'toolbox': _('Sound'),
- 'buttons': {
- # 'backward': _('Seek Backward'),
- # 'forward': _('Seek Forward'),
- 'pause': _('Pause Story'),
- 'play': _('Play Story'),
+ 'play': _('Play'),
'slider': _('Progress Bar'),
- 'stop': _('Stop Story'),
- 'add': _('Add Sound'),
+ 'stop': _('Stop'),
+ 'add': _('Add'),
}
},
- 'sounds_add' : {
+ 'spare-time' : {
'toolbox': None,
'buttons': {
- 'add': _('Add Sound'),
- 'back': _('Back'),
- 'frame_after': _('Frame Before'),
- 'frame_before': _('Frame After'),
- 'frame_entry': None,
- 'import': _('Import Sequence'),
- 'remove': _('Remove Sound'),
- 'seq_new': _('New Sound'),
- 'seq_name': None,
- 'seq_remove': _('Remove Sequence'),
- 'seq_save': _('Save Sequence'),
- }
- },
- 'story' : {
- 'toolbox': _('Story'),
- 'buttons': {
- # 'open': _('Open Story'),
- # 'backward': _('Seek Backward'),
- # 'forward': _('Seek Forward'),
- 'pause': _('Pause Story'),
- 'play': _('Play Story'),
- 'slider': _('Progress Bar'),
- 'stop': _('Stop Story'),
- 'view_fullscreen': _('Fullscreen'),
- 'view_return': _('Default Screen'),
+ 'play': _('Play'),
+ 'stop': _('Stop'),
}
},
}
@@ -512,18 +81,8 @@ class Toolbar(gtk.Toolbar):
self.set_name(name)
# keep activity
self.activity = activity
- # ..
- if self.name == 'graphics'\
- or self.name == 'story':
- self.story_keys = self.activity.graphic_keys
- else:
- self.story_keys = self.activity.sound_keys
# adjustment
self._adjustment = None
- self._number_of_keys = self.activity._number_of_keys
- # keep components
- self._sequence_entry = None
- self._frame_entry = None
# init widget dict
self._button_dict = dict()
# init buttons
@@ -577,60 +136,6 @@ class Toolbar(gtk.Toolbar):
# add button
self._add_button(_b)
- def update_slider(self, time_, pause):
- # get slider
- _s = self.get_slider()
- # slider stuff
- if _s is None:
- pass
- else:
- _s.set_value(time_)
- # update pause status
- if pause is True:
- self._replace_button('pause', 'play')
- else:
- self._replace_button('play', 'pause')
-
- def get_slider(self):
- # little check
- if 'slider' in self._button_dict:
- _children = self._button_dict['slider'].get_children()
- return None if len(_children) == 0 else _children[0]
- else:
- return None
-
- def play(self):
- # do pause
- _cb_play(None, self)
-
- def pause(self):
- # do pause
- _cb_pause(None, self)
-
- def refresh(self, value):
- # ...
- if self._number_of_keys != self.activity._number_of_keys:
- # update nb of keys
- self._number_of_keys = self.activity._number_of_keys
- # update adjustment
- if hasattr(self._adjustment, 'set_upper'):
- self._adjustment.set_upper(self._number_of_keys-1)
- else:
- self._adjustment.upper = self._number_of_keys-1
- else:
- pass
- # get slider
- _s = self.get_slider()
- # slider stuff
- if _s is None:
- # just in case
- value = 0
- else:
- # update value
- _s.set_value(int(value))
- # return _v to keep time value in thread
- return value
-
def _on_focus(self, widget, direction):
if self.name in ['graphics_add', 'sounds_add']:
self._switch(self.name.replace('_add', ''))
@@ -656,50 +161,6 @@ class Toolbar(gtk.Toolbar):
if button_id == 'separator':
_buton = gtk.SeparatorToolItem()
_buton.set_draw(True)
- # manage slider
- elif button_id == 'slider':
- #
- self._adjustment = gtk.Adjustment(0, 0, self._number_of_keys-1, 1)
- # ..
- _hscale = gtk.HScale(self._adjustment)
- _hscale.set_draw_value(True)
- _hscale.set_digits(False)
- _hscale.set_update_policy(gtk.UPDATE_CONTINUOUS)
- # manage cb
- _hscale.connect('button-release-event', _cb, self)
- _hscale.connect('format-value', _cb_format_value, self)
- # show
- _hscale.show()
- # ..
- _buton = gtk.ToolItem()
- _buton.set_expand(True)
- _buton.add(_hscale)
- elif button_id == 'seq_name':
- self._sequence_entry = gtk.Entry()
- # show
- self._sequence_entry.show()
- # ..
- self._sequence_entry.connect('changed', _cb)
- # ..
- _buton = gtk.ToolItem()
- _buton.set_expand(True)
- _buton.add(self._sequence_entry)
- elif button_id == 'frame_entry':
- # ...
- self._frame_entry = gtk.Entry()
- # ..
- self._frame_entry.set_editable(False)
- self._frame_entry.set_width_chars(2)
- self._frame_entry.set_size_request(44, -1)
- # set value
- self._frame_entry.set_text('0')
- # center text
- self._frame_entry.set_alignment(1)
- # show
- self._frame_entry.show()
- # ..
- _buton = gtk.ToolItem()
- _buton.add(self._frame_entry)
# standard button
elif button_id in BUTTONS:
# get tooltip
diff --git a/babel/__init__.py b/babel/__init__.py
deleted file mode 100644
index b0b79c6..0000000
--- a/babel/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007-2008 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Integrated collection of utilities that assist in internationalizing and
-localizing applications.
-
-This package is basically composed of two major parts:
-
- * tools to build and work with ``gettext`` message catalogs
- * a Python interface to the CLDR (Common Locale Data Repository), providing
- access to various locale display names, localized number and date
- formatting, etc.
-
-:see: http://www.gnu.org/software/gettext/
-:see: http://docs.python.org/lib/module-gettext.html
-:see: http://www.unicode.org/cldr/
-"""
-
-from babel.core import *
-
-__docformat__ = 'restructuredtext en'
-try:
- from pkg_resources import get_distribution, ResolutionError
- try:
- __version__ = get_distribution('Babel').version
- except ResolutionError:
- __version__ = None # unknown
-except ImportError:
- __version__ = None # unknown
diff --git a/babel/core.py b/babel/core.py
deleted file mode 100644
index cc677d0..0000000
--- a/babel/core.py
+++ /dev/null
@@ -1,784 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Core locale representation and locale data access."""
-
-import os
-import pickle
-
-from babel import localedata
-
-__all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale',
- 'parse_locale']
-__docformat__ = 'restructuredtext en'
-
-_global_data = None
-
-def get_global(key):
- """Return the dictionary for the given key in the global data.
-
- The global data is stored in the ``babel/global.dat`` file and contains
- information independent of individual locales.
-
- >>> get_global('zone_aliases')['UTC']
- 'Etc/GMT'
- >>> get_global('zone_territories')['Europe/Berlin']
- 'DE'
-
- :param key: the data key
- :return: the dictionary found in the global data under the given key
- :rtype: `dict`
- :since: version 0.9
- """
- global _global_data
- if _global_data is None:
- dirname = os.path.join(os.path.dirname(__file__))
- filename = os.path.join(dirname, 'global.dat')
- fileobj = open(filename, 'rb')
- try:
- _global_data = pickle.load(fileobj)
- finally:
- fileobj.close()
- return _global_data.get(key, {})
-
-
-LOCALE_ALIASES = {
- 'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ',
- 'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES',
- 'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES',
- 'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT',
- 'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV',
- 'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL',
- 'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI',
- 'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA'
-}
-
-
-class UnknownLocaleError(Exception):
- """Exception thrown when a locale is requested for which no locale data
- is available.
- """
-
- def __init__(self, identifier):
- """Create the exception.
-
- :param identifier: the identifier string of the unsupported locale
- """
- Exception.__init__(self, 'unknown locale %r' % identifier)
- self.identifier = identifier
-
-
-class Locale(object):
- """Representation of a specific locale.
-
- >>> locale = Locale('en', 'US')
- >>> repr(locale)
- '<Locale "en_US">'
- >>> locale.display_name
- u'English (United States)'
-
- A `Locale` object can also be instantiated from a raw locale string:
-
- >>> locale = Locale.parse('en-US', sep='-')
- >>> repr(locale)
- '<Locale "en_US">'
-
- `Locale` objects provide access to a collection of locale data, such as
- territory and language names, number and date format patterns, and more:
-
- >>> locale.number_symbols['decimal']
- u'.'
-
- If a locale is requested for which no locale data is available, an
- `UnknownLocaleError` is raised:
-
- >>> Locale.parse('en_DE')
- Traceback (most recent call last):
- ...
- UnknownLocaleError: unknown locale 'en_DE'
-
- :see: `IETF RFC 3066 <http://www.ietf.org/rfc/rfc3066.txt>`_
- """
-
- def __init__(self, language, territory=None, script=None, variant=None):
- """Initialize the locale object from the given identifier components.
-
- >>> locale = Locale('en', 'US')
- >>> locale.language
- 'en'
- >>> locale.territory
- 'US'
-
- :param language: the language code
- :param territory: the territory (country or region) code
- :param script: the script code
- :param variant: the variant code
- :raise `UnknownLocaleError`: if no locale data is available for the
- requested locale
- """
- self.language = language
- self.territory = territory
- self.script = script
- self.variant = variant
- self.__data = None
-
- identifier = str(self)
- if not localedata.exists(identifier):
- raise UnknownLocaleError(identifier)
-
- def default(cls, category=None, aliases=LOCALE_ALIASES):
- """Return the system default locale for the specified category.
-
- >>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']:
- ... os.environ[name] = ''
- >>> os.environ['LANG'] = 'fr_FR.UTF-8'
- >>> Locale.default('LC_MESSAGES')
- <Locale "fr_FR">
-
- :param category: one of the ``LC_XXX`` environment variable names
- :param aliases: a dictionary of aliases for locale identifiers
- :return: the value of the variable, or any of the fallbacks
- (``LANGUAGE``, ``LC_ALL``, ``LC_CTYPE``, and ``LANG``)
- :rtype: `Locale`
- :see: `default_locale`
- """
- return cls(default_locale(category, aliases=aliases))
- default = classmethod(default)
-
- def negotiate(cls, preferred, available, sep='_', aliases=LOCALE_ALIASES):
- """Find the best match between available and requested locale strings.
-
- >>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
- <Locale "de_DE">
- >>> Locale.negotiate(['de_DE', 'en_US'], ['en', 'de'])
- <Locale "de">
- >>> Locale.negotiate(['de_DE', 'de'], ['en_US'])
-
- You can specify the character used in the locale identifiers to separate
- the differnet components. This separator is applied to both lists. Also,
- case is ignored in the comparison:
-
- >>> Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-')
- <Locale "de_DE">
-
- :param preferred: the list of locale identifers preferred by the user
- :param available: the list of locale identifiers available
- :param aliases: a dictionary of aliases for locale identifiers
- :return: the `Locale` object for the best match, or `None` if no match
- was found
- :rtype: `Locale`
- :see: `negotiate_locale`
- """
- identifier = negotiate_locale(preferred, available, sep=sep,
- aliases=aliases)
- if identifier:
- return Locale.parse(identifier, sep=sep)
- negotiate = classmethod(negotiate)
-
- def parse(cls, identifier, sep='_'):
- """Create a `Locale` instance for the given locale identifier.
-
- >>> l = Locale.parse('de-DE', sep='-')
- >>> l.display_name
- u'Deutsch (Deutschland)'
-
- If the `identifier` parameter is not a string, but actually a `Locale`
- object, that object is returned:
-
- >>> Locale.parse(l)
- <Locale "de_DE">
-
- :param identifier: the locale identifier string
- :param sep: optional component separator
- :return: a corresponding `Locale` instance
- :rtype: `Locale`
- :raise `ValueError`: if the string does not appear to be a valid locale
- identifier
- :raise `UnknownLocaleError`: if no locale data is available for the
- requested locale
- :see: `parse_locale`
- """
- if isinstance(identifier, basestring):
- return cls(*parse_locale(identifier, sep=sep))
- return identifier
- parse = classmethod(parse)
-
- def __eq__(self, other):
- return str(self) == str(other)
-
- def __repr__(self):
- return '<Locale "%s">' % str(self)
-
- def __str__(self):
- return '_'.join(filter(None, [self.language, self.script,
- self.territory, self.variant]))
-
- def _data(self):
- if self.__data is None:
- self.__data = localedata.LocaleDataDict(localedata.load(str(self)))
- return self.__data
- _data = property(_data)
-
- def get_display_name(self, locale=None):
- """Return the display name of the locale using the given locale.
-
- The display name will include the language, territory, script, and
- variant, if those are specified.
-
- >>> Locale('zh', 'CN', script='Hans').get_display_name('en')
- u'Chinese (Simplified Han, China)'
-
- :param locale: the locale to use
- :return: the display name
- """
- if locale is None:
- locale = self
- locale = Locale.parse(locale)
- retval = locale.languages.get(self.language)
- if self.territory or self.script or self.variant:
- details = []
- if self.script:
- details.append(locale.scripts.get(self.script))
- if self.territory:
- details.append(locale.territories.get(self.territory))
- if self.variant:
- details.append(locale.variants.get(self.variant))
- details = filter(None, details)
- if details:
- retval += ' (%s)' % u', '.join(details)
- return retval
-
- display_name = property(get_display_name, doc="""\
- The localized display name of the locale.
-
- >>> Locale('en').display_name
- u'English'
- >>> Locale('en', 'US').display_name
- u'English (United States)'
- >>> Locale('sv').display_name
- u'svenska'
-
- :type: `unicode`
- """)
-
- def english_name(self):
- return self.get_display_name(Locale('en'))
- english_name = property(english_name, doc="""\
- The english display name of the locale.
-
- >>> Locale('de').english_name
- u'German'
- >>> Locale('de', 'DE').english_name
- u'German (Germany)'
-
- :type: `unicode`
- """)
-
- #{ General Locale Display Names
-
- def languages(self):
- return self._data['languages']
- languages = property(languages, doc="""\
- Mapping of language codes to translated language names.
-
- >>> Locale('de', 'DE').languages['ja']
- u'Japanisch'
-
- :type: `dict`
- :see: `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_
- """)
-
- def scripts(self):
- return self._data['scripts']
- scripts = property(scripts, doc="""\
- Mapping of script codes to translated script names.
-
- >>> Locale('en', 'US').scripts['Hira']
- u'Hiragana'
-
- :type: `dict`
- :see: `ISO 15924 <http://www.evertype.com/standards/iso15924/>`_
- """)
-
- def territories(self):
- return self._data['territories']
- territories = property(territories, doc="""\
- Mapping of script codes to translated script names.
-
- >>> Locale('es', 'CO').territories['DE']
- u'Alemania'
-
- :type: `dict`
- :see: `ISO 3166 <http://www.iso.org/iso/en/prods-services/iso3166ma/>`_
- """)
-
- def variants(self):
- return self._data['variants']
- variants = property(variants, doc="""\
- Mapping of script codes to translated script names.
-
- >>> Locale('de', 'DE').variants['1901']
- u'Alte deutsche Rechtschreibung'
-
- :type: `dict`
- """)
-
- #{ Number Formatting
-
- def currencies(self):
- return self._data['currency_names']
- currencies = property(currencies, doc="""\
- Mapping of currency codes to translated currency names.
-
- >>> Locale('en').currencies['COP']
- u'Colombian Peso'
- >>> Locale('de', 'DE').currencies['COP']
- u'Kolumbianischer Peso'
-
- :type: `dict`
- """)
-
- def currency_symbols(self):
- return self._data['currency_symbols']
- currency_symbols = property(currency_symbols, doc="""\
- Mapping of currency codes to symbols.
-
- >>> Locale('en', 'US').currency_symbols['USD']
- u'$'
- >>> Locale('es', 'CO').currency_symbols['USD']
- u'US$'
-
- :type: `dict`
- """)
-
- def number_symbols(self):
- return self._data['number_symbols']
- number_symbols = property(number_symbols, doc="""\
- Symbols used in number formatting.
-
- >>> Locale('fr', 'FR').number_symbols['decimal']
- u','
-
- :type: `dict`
- """)
-
- def decimal_formats(self):
- return self._data['decimal_formats']
- decimal_formats = property(decimal_formats, doc="""\
- Locale patterns for decimal number formatting.
-
- >>> Locale('en', 'US').decimal_formats[None]
- <NumberPattern u'#,##0.###'>
-
- :type: `dict`
- """)
-
- def currency_formats(self):
- return self._data['currency_formats']
- currency_formats = property(currency_formats, doc=r"""\
- Locale patterns for currency number formatting.
-
- >>> print Locale('en', 'US').currency_formats[None]
- <NumberPattern u'\xa4#,##0.00'>
-
- :type: `dict`
- """)
-
- def percent_formats(self):
- return self._data['percent_formats']
- percent_formats = property(percent_formats, doc="""\
- Locale patterns for percent number formatting.
-
- >>> Locale('en', 'US').percent_formats[None]
- <NumberPattern u'#,##0%'>
-
- :type: `dict`
- """)
-
- def scientific_formats(self):
- return self._data['scientific_formats']
- scientific_formats = property(scientific_formats, doc="""\
- Locale patterns for scientific number formatting.
-
- >>> Locale('en', 'US').scientific_formats[None]
- <NumberPattern u'#E0'>
-
- :type: `dict`
- """)
-
- #{ Calendar Information and Date Formatting
-
- def periods(self):
- return self._data['periods']
- periods = property(periods, doc="""\
- Locale display names for day periods (AM/PM).
-
- >>> Locale('en', 'US').periods['am']
- u'AM'
-
- :type: `dict`
- """)
-
- def days(self):
- return self._data['days']
- days = property(days, doc="""\
- Locale display names for weekdays.
-
- >>> Locale('de', 'DE').days['format']['wide'][3]
- u'Donnerstag'
-
- :type: `dict`
- """)
-
- def months(self):
- return self._data['months']
- months = property(months, doc="""\
- Locale display names for months.
-
- >>> Locale('de', 'DE').months['format']['wide'][10]
- u'Oktober'
-
- :type: `dict`
- """)
-
- def quarters(self):
- return self._data['quarters']
- quarters = property(quarters, doc="""\
- Locale display names for quarters.
-
- >>> Locale('de', 'DE').quarters['format']['wide'][1]
- u'1. Quartal'
-
- :type: `dict`
- """)
-
- def eras(self):
- return self._data['eras']
- eras = property(eras, doc="""\
- Locale display names for eras.
-
- >>> Locale('en', 'US').eras['wide'][1]
- u'Anno Domini'
- >>> Locale('en', 'US').eras['abbreviated'][0]
- u'BC'
-
- :type: `dict`
- """)
-
- def time_zones(self):
- return self._data['time_zones']
- time_zones = property(time_zones, doc="""\
- Locale display names for time zones.
-
- >>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight']
- u'British Summer Time'
- >>> Locale('en', 'US').time_zones['America/St_Johns']['city']
- u"St. John's"
-
- :type: `dict`
- """)
-
- def meta_zones(self):
- return self._data['meta_zones']
- meta_zones = property(meta_zones, doc="""\
- Locale display names for meta time zones.
-
- Meta time zones are basically groups of different Olson time zones that
- have the same GMT offset and daylight savings time.
-
- >>> Locale('en', 'US').meta_zones['Europe_Central']['long']['daylight']
- u'Central European Summer Time'
-
- :type: `dict`
- :since: version 0.9
- """)
-
- def zone_formats(self):
- return self._data['zone_formats']
- zone_formats = property(zone_formats, doc=r"""\
- Patterns related to the formatting of time zones.
-
- >>> Locale('en', 'US').zone_formats['fallback']
- u'%(1)s (%(0)s)'
- >>> Locale('pt', 'BR').zone_formats['region']
- u'Hor\xe1rio %s'
-
- :type: `dict`
- :since: version 0.9
- """)
-
- def first_week_day(self):
- return self._data['week_data']['first_day']
- first_week_day = property(first_week_day, doc="""\
- The first day of a week, with 0 being Monday.
-
- >>> Locale('de', 'DE').first_week_day
- 0
- >>> Locale('en', 'US').first_week_day
- 6
-
- :type: `int`
- """)
-
- def weekend_start(self):
- return self._data['week_data']['weekend_start']
- weekend_start = property(weekend_start, doc="""\
- The day the weekend starts, with 0 being Monday.
-
- >>> Locale('de', 'DE').weekend_start
- 5
-
- :type: `int`
- """)
-
- def weekend_end(self):
- return self._data['week_data']['weekend_end']
- weekend_end = property(weekend_end, doc="""\
- The day the weekend ends, with 0 being Monday.
-
- >>> Locale('de', 'DE').weekend_end
- 6
-
- :type: `int`
- """)
-
- def min_week_days(self):
- return self._data['week_data']['min_days']
- min_week_days = property(min_week_days, doc="""\
- The minimum number of days in a week so that the week is counted as the
- first week of a year or month.
-
- >>> Locale('de', 'DE').min_week_days
- 4
-
- :type: `int`
- """)
-
- def date_formats(self):
- return self._data['date_formats']
- date_formats = property(date_formats, doc="""\
- Locale patterns for date formatting.
-
- >>> Locale('en', 'US').date_formats['short']
- <DateTimePattern u'M/d/yy'>
- >>> Locale('fr', 'FR').date_formats['long']
- <DateTimePattern u'd MMMM yyyy'>
-
- :type: `dict`
- """)
-
- def time_formats(self):
- return self._data['time_formats']
- time_formats = property(time_formats, doc="""\
- Locale patterns for time formatting.
-
- >>> Locale('en', 'US').time_formats['short']
- <DateTimePattern u'h:mm a'>
- >>> Locale('fr', 'FR').time_formats['long']
- <DateTimePattern u'HH:mm:ss z'>
-
- :type: `dict`
- """)
-
- def datetime_formats(self):
- return self._data['datetime_formats']
- datetime_formats = property(datetime_formats, doc="""\
- Locale patterns for datetime formatting.
-
- >>> Locale('en').datetime_formats[None]
- u'{1} {0}'
- >>> Locale('th').datetime_formats[None]
- u'{1}, {0}'
-
- :type: `dict`
- """)
-
-
-def default_locale(category=None, aliases=LOCALE_ALIASES):
- """Returns the system default locale for a given category, based on
- environment variables.
-
- >>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']:
- ... os.environ[name] = ''
- >>> os.environ['LANG'] = 'fr_FR.UTF-8'
- >>> default_locale('LC_MESSAGES')
- 'fr_FR'
-
- The "C" or "POSIX" pseudo-locales are treated as aliases for the
- "en_US_POSIX" locale:
-
- >>> os.environ['LC_MESSAGES'] = 'POSIX'
- >>> default_locale('LC_MESSAGES')
- 'en_US_POSIX'
-
- :param category: one of the ``LC_XXX`` environment variable names
- :param aliases: a dictionary of aliases for locale identifiers
- :return: the value of the variable, or any of the fallbacks (``LANGUAGE``,
- ``LC_ALL``, ``LC_CTYPE``, and ``LANG``)
- :rtype: `str`
- """
- varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')
- for name in filter(None, varnames):
- locale = os.getenv(name)
- if locale:
- if name == 'LANGUAGE' and ':' in locale:
- # the LANGUAGE variable may contain a colon-separated list of
- # language codes; we just pick the language on the list
- locale = locale.split(':')[0]
- if locale in ('C', 'POSIX'):
- locale = 'en_US_POSIX'
- elif aliases and locale in aliases:
- locale = aliases[locale]
- return '_'.join(filter(None, parse_locale(locale)))
-
-def negotiate_locale(preferred, available, sep='_', aliases=LOCALE_ALIASES):
- """Find the best match between available and requested locale strings.
-
- >>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
- 'de_DE'
- >>> negotiate_locale(['de_DE', 'en_US'], ['en', 'de'])
- 'de'
-
- Case is ignored by the algorithm, the result uses the case of the preferred
- locale identifier:
-
- >>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
- 'de_DE'
-
- >>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
- 'de_DE'
-
- By default, some web browsers unfortunately do not include the territory
- in the locale identifier for many locales, and some don't even allow the
- user to easily add the territory. So while you may prefer using qualified
- locale identifiers in your web-application, they would not normally match
- the language-only locale sent by such browsers. To workaround that, this
- function uses a default mapping of commonly used langauge-only locale
- identifiers to identifiers including the territory:
-
- >>> negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US'])
- 'ja_JP'
-
- Some browsers even use an incorrect or outdated language code, such as "no"
- for Norwegian, where the correct locale identifier would actually be "nb_NO"
- (Bokmål) or "nn_NO" (Nynorsk). The aliases are intended to take care of
- such cases, too:
-
- >>> negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE'])
- 'nb_NO'
-
- You can override this default mapping by passing a different `aliases`
- dictionary to this function, or you can bypass the behavior althogher by
- setting the `aliases` parameter to `None`.
-
- :param preferred: the list of locale strings preferred by the user
- :param available: the list of locale strings available
- :param sep: character that separates the different parts of the locale
- strings
- :param aliases: a dictionary of aliases for locale identifiers
- :return: the locale identifier for the best match, or `None` if no match
- was found
- :rtype: `str`
- """
- available = [a.lower() for a in available if a]
- for locale in preferred:
- ll = locale.lower()
- if ll in available:
- return locale
- if aliases:
- alias = aliases.get(ll)
- if alias:
- alias = alias.replace('_', sep)
- if alias.lower() in available:
- return alias
- parts = locale.split(sep)
- if len(parts) > 1 and parts[0].lower() in available:
- return parts[0]
- return None
-
-def parse_locale(identifier, sep='_'):
- """Parse a locale identifier into a tuple of the form::
-
- ``(language, territory, script, variant)``
-
- >>> parse_locale('zh_CN')
- ('zh', 'CN', None, None)
- >>> parse_locale('zh_Hans_CN')
- ('zh', 'CN', 'Hans', None)
-
- The default component separator is "_", but a different separator can be
- specified using the `sep` parameter:
-
- >>> parse_locale('zh-CN', sep='-')
- ('zh', 'CN', None, None)
-
- If the identifier cannot be parsed into a locale, a `ValueError` exception
- is raised:
-
- >>> parse_locale('not_a_LOCALE_String')
- Traceback (most recent call last):
- ...
- ValueError: 'not_a_LOCALE_String' is not a valid locale identifier
-
- Encoding information and locale modifiers are removed from the identifier:
-
- >>> parse_locale('it_IT@euro')
- ('it', 'IT', None, None)
- >>> parse_locale('en_US.UTF-8')
- ('en', 'US', None, None)
- >>> parse_locale('de_DE.iso885915@euro')
- ('de', 'DE', None, None)
-
- :param identifier: the locale identifier string
- :param sep: character that separates the different components of the locale
- identifier
- :return: the ``(language, territory, script, variant)`` tuple
- :rtype: `tuple`
- :raise `ValueError`: if the string does not appear to be a valid locale
- identifier
-
- :see: `IETF RFC 4646 <http://www.ietf.org/rfc/rfc4646.txt>`_
- """
- if '.' in identifier:
- # this is probably the charset/encoding, which we don't care about
- identifier = identifier.split('.', 1)[0]
- if '@' in identifier:
- # this is a locale modifier such as @euro, which we don't care about
- # either
- identifier = identifier.split('@', 1)[0]
-
- parts = identifier.split(sep)
- lang = parts.pop(0).lower()
- if not lang.isalpha():
- raise ValueError('expected only letters, got %r' % lang)
-
- script = territory = variant = None
- if parts:
- if len(parts[0]) == 4 and parts[0].isalpha():
- script = parts.pop(0).title()
-
- if parts:
- if len(parts[0]) == 2 and parts[0].isalpha():
- territory = parts.pop(0).upper()
- elif len(parts[0]) == 3 and parts[0].isdigit():
- territory = parts.pop(0)
-
- if parts:
- if len(parts[0]) == 4 and parts[0][0].isdigit() or \
- len(parts[0]) >= 5 and parts[0][0].isalpha():
- variant = parts.pop()
-
- if parts:
- raise ValueError('%r is not a valid locale identifier' % identifier)
-
- return lang, territory, script, variant
diff --git a/babel/dates.py b/babel/dates.py
deleted file mode 100644
index 8d1b4f7..0000000
--- a/babel/dates.py
+++ /dev/null
@@ -1,991 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Locale dependent formatting and parsing of dates and times.
-
-The default locale for the functions in this module is determined by the
-following environment variables, in that order:
-
- * ``LC_TIME``,
- * ``LC_ALL``, and
- * ``LANG``
-"""
-
-from datetime import date, datetime, time, timedelta, tzinfo
-import re
-
-from babel.core import default_locale, get_global, Locale
-from babel.util import UTC
-
-__all__ = ['format_date', 'format_datetime', 'format_time',
- 'get_timezone_name', 'parse_date', 'parse_datetime', 'parse_time']
-__docformat__ = 'restructuredtext en'
-
-LC_TIME = default_locale('LC_TIME')
-
-# Aliases for use in scopes where the modules are shadowed by local variables
-date_ = date
-datetime_ = datetime
-time_ = time
-
-def get_period_names(locale=LC_TIME):
- """Return the names for day periods (AM/PM) used by the locale.
-
- >>> get_period_names(locale='en_US')['am']
- u'AM'
-
- :param locale: the `Locale` object, or a locale string
- :return: the dictionary of period names
- :rtype: `dict`
- """
- return Locale.parse(locale).periods
-
-def get_day_names(width='wide', context='format', locale=LC_TIME):
- """Return the day names used by the locale for the specified format.
-
- >>> get_day_names('wide', locale='en_US')[1]
- u'Tuesday'
- >>> get_day_names('abbreviated', locale='es')[1]
- u'mar'
- >>> get_day_names('narrow', context='stand-alone', locale='de_DE')[1]
- u'D'
-
- :param width: the width to use, one of "wide", "abbreviated", or "narrow"
- :param context: the context, either "format" or "stand-alone"
- :param locale: the `Locale` object, or a locale string
- :return: the dictionary of day names
- :rtype: `dict`
- """
- return Locale.parse(locale).days[context][width]
-
-def get_month_names(width='wide', context='format', locale=LC_TIME):
- """Return the month names used by the locale for the specified format.
-
- >>> get_month_names('wide', locale='en_US')[1]
- u'January'
- >>> get_month_names('abbreviated', locale='es')[1]
- u'ene'
- >>> get_month_names('narrow', context='stand-alone', locale='de_DE')[1]
- u'J'
-
- :param width: the width to use, one of "wide", "abbreviated", or "narrow"
- :param context: the context, either "format" or "stand-alone"
- :param locale: the `Locale` object, or a locale string
- :return: the dictionary of month names
- :rtype: `dict`
- """
- return Locale.parse(locale).months[context][width]
-
-def get_quarter_names(width='wide', context='format', locale=LC_TIME):
- """Return the quarter names used by the locale for the specified format.
-
- >>> get_quarter_names('wide', locale='en_US')[1]
- u'1st quarter'
- >>> get_quarter_names('abbreviated', locale='de_DE')[1]
- u'Q1'
-
- :param width: the width to use, one of "wide", "abbreviated", or "narrow"
- :param context: the context, either "format" or "stand-alone"
- :param locale: the `Locale` object, or a locale string
- :return: the dictionary of quarter names
- :rtype: `dict`
- """
- return Locale.parse(locale).quarters[context][width]
-
-def get_era_names(width='wide', locale=LC_TIME):
- """Return the era names used by the locale for the specified format.
-
- >>> get_era_names('wide', locale='en_US')[1]
- u'Anno Domini'
- >>> get_era_names('abbreviated', locale='de_DE')[1]
- u'n. Chr.'
-
- :param width: the width to use, either "wide", "abbreviated", or "narrow"
- :param locale: the `Locale` object, or a locale string
- :return: the dictionary of era names
- :rtype: `dict`
- """
- return Locale.parse(locale).eras[width]
-
-def get_date_format(format='medium', locale=LC_TIME):
- """Return the date formatting patterns used by the locale for the specified
- format.
-
- >>> get_date_format(locale='en_US')
- <DateTimePattern u'MMM d, yyyy'>
- >>> get_date_format('full', locale='de_DE')
- <DateTimePattern u'EEEE, d. MMMM yyyy'>
-
- :param format: the format to use, one of "full", "long", "medium", or
- "short"
- :param locale: the `Locale` object, or a locale string
- :return: the date format pattern
- :rtype: `DateTimePattern`
- """
- return Locale.parse(locale).date_formats[format]
-
-def get_datetime_format(format='medium', locale=LC_TIME):
- """Return the datetime formatting patterns used by the locale for the
- specified format.
-
- >>> get_datetime_format(locale='en_US')
- u'{1} {0}'
-
- :param format: the format to use, one of "full", "long", "medium", or
- "short"
- :param locale: the `Locale` object, or a locale string
- :return: the datetime format pattern
- :rtype: `unicode`
- """
- patterns = Locale.parse(locale).datetime_formats
- if format not in patterns:
- format = None
- return patterns[format]
-
-def get_time_format(format='medium', locale=LC_TIME):
- """Return the time formatting patterns used by the locale for the specified
- format.
-
- >>> get_time_format(locale='en_US')
- <DateTimePattern u'h:mm:ss a'>
- >>> get_time_format('full', locale='de_DE')
- <DateTimePattern u'HH:mm:ss v'>
-
- :param format: the format to use, one of "full", "long", "medium", or
- "short"
- :param locale: the `Locale` object, or a locale string
- :return: the time format pattern
- :rtype: `DateTimePattern`
- """
- return Locale.parse(locale).time_formats[format]
-
-def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME):
- """Return the timezone associated with the given `datetime` object formatted
- as string indicating the offset from GMT.
-
- >>> dt = datetime(2007, 4, 1, 15, 30)
- >>> get_timezone_gmt(dt, locale='en')
- u'GMT+00:00'
-
- >>> from pytz import timezone
- >>> tz = timezone('America/Los_Angeles')
- >>> dt = datetime(2007, 4, 1, 15, 30, tzinfo=tz)
- >>> get_timezone_gmt(dt, locale='en')
- u'GMT-08:00'
- >>> get_timezone_gmt(dt, 'short', locale='en')
- u'-0800'
-
- The long format depends on the locale, for example in France the acronym
- UTC string is used instead of GMT:
-
- >>> get_timezone_gmt(dt, 'long', locale='fr_FR')
- u'UTC-08:00'
-
- :param datetime: the ``datetime`` object; if `None`, the current date and
- time in UTC is used
- :param width: either "long" or "short"
- :param locale: the `Locale` object, or a locale string
- :return: the GMT offset representation of the timezone
- :rtype: `unicode`
- :since: version 0.9
- """
- if datetime is None:
- datetime = datetime_.utcnow()
- elif isinstance(datetime, (int, long)):
- datetime = datetime_.utcfromtimestamp(datetime).time()
- if datetime.tzinfo is None:
- datetime = datetime.replace(tzinfo=UTC)
- locale = Locale.parse(locale)
-
- offset = datetime.tzinfo.utcoffset(datetime)
- seconds = offset.days * 24 * 60 * 60 + offset.seconds
- hours, seconds = divmod(seconds, 3600)
- if width == 'short':
- pattern = u'%+03d%02d'
- else:
- pattern = locale.zone_formats['gmt'] % '%+03d:%02d'
- return pattern % (hours, seconds // 60)
-
-def get_timezone_location(dt_or_tzinfo=None, locale=LC_TIME):
- """Return a representation of the given timezone using "location format".
-
- The result depends on both the local display name of the country and the
- city assocaited with the time zone:
-
- >>> from pytz import timezone
- >>> tz = timezone('America/St_Johns')
- >>> get_timezone_location(tz, locale='de_DE')
- u"Kanada (St. John's)"
- >>> tz = timezone('America/Mexico_City')
- >>> get_timezone_location(tz, locale='de_DE')
- u'Mexiko (Mexiko-Stadt)'
-
- If the timezone is associated with a country that uses only a single
- timezone, just the localized country name is returned:
-
- >>> tz = timezone('Europe/Berlin')
- >>> get_timezone_name(tz, locale='de_DE')
- u'Deutschland'
-
- :param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
- the timezone; if `None`, the current date and time in
- UTC is assumed
- :param locale: the `Locale` object, or a locale string
- :return: the localized timezone name using location format
- :rtype: `unicode`
- :since: version 0.9
- """
- if dt_or_tzinfo is None or isinstance(dt_or_tzinfo, (int, long)):
- dt = None
- tzinfo = UTC
- elif isinstance(dt_or_tzinfo, (datetime, time)):
- dt = dt_or_tzinfo
- if dt.tzinfo is not None:
- tzinfo = dt.tzinfo
- else:
- tzinfo = UTC
- else:
- dt = None
- tzinfo = dt_or_tzinfo
- locale = Locale.parse(locale)
-
- if hasattr(tzinfo, 'zone'):
- zone = tzinfo.zone
- else:
- zone = tzinfo.tzname(dt or datetime.utcnow())
-
- # Get the canonical time-zone code
- zone = get_global('zone_aliases').get(zone, zone)
-
- info = locale.time_zones.get(zone, {})
-
- # Otherwise, if there is only one timezone for the country, return the
- # localized country name
- region_format = locale.zone_formats['region']
- territory = get_global('zone_territories').get(zone)
- if territory not in locale.territories:
- territory = 'ZZ' # invalid/unknown
- territory_name = locale.territories[territory]
- if territory and len(get_global('territory_zones').get(territory, [])) == 1:
- return region_format % (territory_name)
-
- # Otherwise, include the city in the output
- fallback_format = locale.zone_formats['fallback']
- if 'city' in info:
- city_name = info['city']
- else:
- metazone = get_global('meta_zones').get(zone)
- metazone_info = locale.meta_zones.get(metazone, {})
- if 'city' in metazone_info:
- city_name = metainfo['city']
- elif '/' in zone:
- city_name = zone.split('/', 1)[1].replace('_', ' ')
- else:
- city_name = zone.replace('_', ' ')
-
- return region_format % (fallback_format % {
- '0': city_name,
- '1': territory_name
- })
-
-def get_timezone_name(dt_or_tzinfo=None, width='long', uncommon=False,
- locale=LC_TIME):
- r"""Return the localized display name for the given timezone. The timezone
- may be specified using a ``datetime`` or `tzinfo` object.
-
- >>> from pytz import timezone
- >>> dt = time(15, 30, tzinfo=timezone('America/Los_Angeles'))
- >>> get_timezone_name(dt, locale='en_US')
- u'Pacific Standard Time'
- >>> get_timezone_name(dt, width='short', locale='en_US')
- u'PST'
-
- If this function gets passed only a `tzinfo` object and no concrete
- `datetime`, the returned display name is indenpendent of daylight savings
- time. This can be used for example for selecting timezones, or to set the
- time of events that recur across DST changes:
-
- >>> tz = timezone('America/Los_Angeles')
- >>> get_timezone_name(tz, locale='en_US')
- u'Pacific Time'
- >>> get_timezone_name(tz, 'short', locale='en_US')
- u'PT'
-
- If no localized display name for the timezone is available, and the timezone
- is associated with a country that uses only a single timezone, the name of
- that country is returned, formatted according to the locale:
-
- >>> tz = timezone('Europe/Berlin')
- >>> get_timezone_name(tz, locale='de_DE')
- u'Deutschland'
- >>> get_timezone_name(tz, locale='pt_BR')
- u'Hor\xe1rio Alemanha'
-
- On the other hand, if the country uses multiple timezones, the city is also
- included in the representation:
-
- >>> tz = timezone('America/St_Johns')
- >>> get_timezone_name(tz, locale='de_DE')
- u"Kanada (St. John's)"
-
- The `uncommon` parameter can be set to `True` to enable the use of timezone
- representations that are not commonly used by the requested locale. For
- example, while in frensh the central europian timezone is usually
- abbreviated as "HEC", in Canadian French, this abbreviation is not in
- common use, so a generic name would be chosen by default:
-
- >>> tz = timezone('Europe/Paris')
- >>> get_timezone_name(tz, 'short', locale='fr_CA')
- u'France'
- >>> get_timezone_name(tz, 'short', uncommon=True, locale='fr_CA')
- u'HEC'
-
- :param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
- the timezone; if a ``tzinfo`` object is used, the
- resulting display name will be generic, i.e.
- independent of daylight savings time; if `None`, the
- current date in UTC is assumed
- :param width: either "long" or "short"
- :param uncommon: whether even uncommon timezone abbreviations should be used
- :param locale: the `Locale` object, or a locale string
- :return: the timezone display name
- :rtype: `unicode`
- :since: version 0.9
- :see: `LDML Appendix J: Time Zone Display Names
- <http://www.unicode.org/reports/tr35/#Time_Zone_Fallback>`_
- """
- if dt_or_tzinfo is None or isinstance(dt_or_tzinfo, (int, long)):
- dt = None
- tzinfo = UTC
- elif isinstance(dt_or_tzinfo, (datetime, time)):
- dt = dt_or_tzinfo
- if dt.tzinfo is not None:
- tzinfo = dt.tzinfo
- else:
- tzinfo = UTC
- else:
- dt = None
- tzinfo = dt_or_tzinfo
- locale = Locale.parse(locale)
-
- if hasattr(tzinfo, 'zone'):
- zone = tzinfo.zone
- else:
- zone = tzinfo.tzname(dt)
-
- # Get the canonical time-zone code
- zone = get_global('zone_aliases').get(zone, zone)
-
- info = locale.time_zones.get(zone, {})
- # Try explicitly translated zone names first
- if width in info:
- if dt is None:
- field = 'generic'
- else:
- dst = tzinfo.dst(dt)
- if dst is None:
- field = 'generic'
- elif dst == 0:
- field = 'standard'
- else:
- field = 'daylight'
- if field in info[width]:
- return info[width][field]
-
- metazone = get_global('meta_zones').get(zone)
- if metazone:
- metazone_info = locale.meta_zones.get(metazone, {})
- if width in metazone_info and (uncommon or metazone_info.get('common')):
- if dt is None:
- field = 'generic'
- else:
- field = tzinfo.dst(dt) and 'daylight' or 'standard'
- if field in metazone_info[width]:
- return metazone_info[width][field]
-
- # If we have a concrete datetime, we assume that the result can't be
- # independent of daylight savings time, so we return the GMT offset
- if dt is not None:
- return get_timezone_gmt(dt, width=width, locale=locale)
-
- return get_timezone_location(dt_or_tzinfo, locale=locale)
-
-def format_date(date=None, format='medium', locale=LC_TIME):
- """Return a date formatted according to the given pattern.
-
- >>> d = date(2007, 04, 01)
- >>> format_date(d, locale='en_US')
- u'Apr 1, 2007'
- >>> format_date(d, format='full', locale='de_DE')
- u'Sonntag, 1. April 2007'
-
- If you don't want to use the locale default formats, you can specify a
- custom date pattern:
-
- >>> format_date(d, "EEE, MMM d, ''yy", locale='en')
- u"Sun, Apr 1, '07"
-
- :param date: the ``date`` or ``datetime`` object; if `None`, the current
- date is used
- :param format: one of "full", "long", "medium", or "short", or a custom
- date/time pattern
- :param locale: a `Locale` object or a locale identifier
- :rtype: `unicode`
-
- :note: If the pattern contains time fields, an `AttributeError` will be
- raised when trying to apply the formatting. This is also true if
- the value of ``date`` parameter is actually a ``datetime`` object,
- as this function automatically converts that to a ``date``.
- """
- if date is None:
- date = date_.today()
- elif isinstance(date, datetime):
- date = date.date()
-
- locale = Locale.parse(locale)
- if format in ('full', 'long', 'medium', 'short'):
- format = get_date_format(format, locale=locale)
- pattern = parse_pattern(format)
- return parse_pattern(format).apply(date, locale)
-
-def format_datetime(datetime=None, format='medium', tzinfo=None,
- locale=LC_TIME):
- """Return a date formatted according to the given pattern.
-
- >>> dt = datetime(2007, 04, 01, 15, 30)
- >>> format_datetime(dt, locale='en_US')
- u'Apr 1, 2007 3:30:00 PM'
-
- For any pattern requiring the display of the time-zone, the third-party
- ``pytz`` package is needed to explicitly specify the time-zone:
-
- >>> from pytz import timezone
- >>> format_datetime(dt, 'full', tzinfo=timezone('Europe/Paris'),
- ... locale='fr_FR')
- u'dimanche 1 avril 2007 17:30:00 HEC'
- >>> format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz",
- ... tzinfo=timezone('US/Eastern'), locale='en')
- u'2007.04.01 AD at 11:30:00 EDT'
-
- :param datetime: the `datetime` object; if `None`, the current date and
- time is used
- :param format: one of "full", "long", "medium", or "short", or a custom
- date/time pattern
- :param tzinfo: the timezone to apply to the time for display
- :param locale: a `Locale` object or a locale identifier
- :rtype: `unicode`
- """
- if datetime is None:
- datetime = datetime_.utcnow()
- elif isinstance(datetime, (int, long)):
- datetime = datetime_.utcfromtimestamp(datetime)
- elif isinstance(datetime, time):
- datetime = datetime_.combine(date.today(), datetime)
- if datetime.tzinfo is None:
- datetime = datetime.replace(tzinfo=UTC)
- if tzinfo is not None:
- datetime = datetime.astimezone(tzinfo)
- if hasattr(tzinfo, 'normalize'): # pytz
- datetime = tzinfo.normalize(datetime)
-
- locale = Locale.parse(locale)
- if format in ('full', 'long', 'medium', 'short'):
- return get_datetime_format(format, locale=locale) \
- .replace('{0}', format_time(datetime, format, tzinfo=None,
- locale=locale)) \
- .replace('{1}', format_date(datetime, format, locale=locale))
- else:
- return parse_pattern(format).apply(datetime, locale)
-
-def format_time(time=None, format='medium', tzinfo=None, locale=LC_TIME):
- """Return a time formatted according to the given pattern.
-
- >>> t = time(15, 30)
- >>> format_time(t, locale='en_US')
- u'3:30:00 PM'
- >>> format_time(t, format='short', locale='de_DE')
- u'15:30'
-
- If you don't want to use the locale default formats, you can specify a
- custom time pattern:
-
- >>> format_time(t, "hh 'o''clock' a", locale='en')
- u"03 o'clock PM"
-
- For any pattern requiring the display of the time-zone, the third-party
- ``pytz`` package is needed to explicitly specify the time-zone:
-
- >>> from pytz import timezone
- >>> t = datetime(2007, 4, 1, 15, 30)
- >>> tzinfo = timezone('Europe/Paris')
- >>> t = tzinfo.localize(t)
- >>> format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR')
- u'15:30:00 HEC'
- >>> format_time(t, "hh 'o''clock' a, zzzz", tzinfo=timezone('US/Eastern'),
- ... locale='en')
- u"09 o'clock AM, Eastern Daylight Time"
-
- As that example shows, when this function gets passed a
- ``datetime.datetime`` value, the actual time in the formatted string is
- adjusted to the timezone specified by the `tzinfo` parameter. If the
- ``datetime`` is "naive" (i.e. it has no associated timezone information),
- it is assumed to be in UTC.
-
- These timezone calculations are **not** performed if the value is of type
- ``datetime.time``, as without date information there's no way to determine
- what a given time would translate to in a different timezone without
- information about whether daylight savings time is in effect or not. This
- means that time values are left as-is, and the value of the `tzinfo`
- parameter is only used to display the timezone name if needed:
-
- >>> t = time(15, 30)
- >>> format_time(t, format='full', tzinfo=timezone('Europe/Paris'),
- ... locale='fr_FR')
- u'15:30:00 HEC'
- >>> format_time(t, format='full', tzinfo=timezone('US/Eastern'),
- ... locale='en_US')
- u'3:30:00 PM ET'
-
- :param time: the ``time`` or ``datetime`` object; if `None`, the current
- time in UTC is used
- :param format: one of "full", "long", "medium", or "short", or a custom
- date/time pattern
- :param tzinfo: the time-zone to apply to the time for display
- :param locale: a `Locale` object or a locale identifier
- :rtype: `unicode`
-
- :note: If the pattern contains date fields, an `AttributeError` will be
- raised when trying to apply the formatting. This is also true if
- the value of ``time`` parameter is actually a ``datetime`` object,
- as this function automatically converts that to a ``time``.
- """
- if time is None:
- time = datetime.utcnow()
- elif isinstance(time, (int, long)):
- time = datetime.utcfromtimestamp(time)
- if time.tzinfo is None:
- time = time.replace(tzinfo=UTC)
- if isinstance(time, datetime):
- if tzinfo is not None:
- time = time.astimezone(tzinfo)
- if hasattr(tzinfo, 'localize'): # pytz
- time = tzinfo.normalize(time)
- time = time.timetz()
- elif tzinfo is not None:
- time = time.replace(tzinfo=tzinfo)
-
- locale = Locale.parse(locale)
- if format in ('full', 'long', 'medium', 'short'):
- format = get_time_format(format, locale=locale)
- return parse_pattern(format).apply(time, locale)
-
-def parse_date(string, locale=LC_TIME):
- """Parse a date from a string.
-
- This function uses the date format for the locale as a hint to determine
- the order in which the date fields appear in the string.
-
- >>> parse_date('4/1/04', locale='en_US')
- datetime.date(2004, 4, 1)
- >>> parse_date('01.04.2004', locale='de_DE')
- datetime.date(2004, 4, 1)
-
- :param string: the string containing the date
- :param locale: a `Locale` object or a locale identifier
- :return: the parsed date
- :rtype: `date`
- """
- # TODO: try ISO format first?
- format = get_date_format(locale=locale).pattern.lower()
- year_idx = format.index('y')
- month_idx = format.index('m')
- if month_idx < 0:
- month_idx = format.index('l')
- day_idx = format.index('d')
-
- indexes = [(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')]
- indexes.sort()
- indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
-
- # FIXME: this currently only supports numbers, but should also support month
- # names, both in the requested locale, and english
-
- numbers = re.findall('(\d+)', string)
- year = numbers[indexes['Y']]
- if len(year) == 2:
- year = 2000 + int(year)
- else:
- year = int(year)
- month = int(numbers[indexes['M']])
- day = int(numbers[indexes['D']])
- if month > 12:
- month, day = day, month
- return date(year, month, day)
-
-def parse_datetime(string, locale=LC_TIME):
- """Parse a date and time from a string.
-
- This function uses the date and time formats for the locale as a hint to
- determine the order in which the time fields appear in the string.
-
- :param string: the string containing the date and time
- :param locale: a `Locale` object or a locale identifier
- :return: the parsed date/time
- :rtype: `datetime`
- """
- raise NotImplementedError
-
-def parse_time(string, locale=LC_TIME):
- """Parse a time from a string.
-
- This function uses the time format for the locale as a hint to determine
- the order in which the time fields appear in the string.
-
- >>> parse_time('15:30:00', locale='en_US')
- datetime.time(15, 30)
-
- :param string: the string containing the time
- :param locale: a `Locale` object or a locale identifier
- :return: the parsed time
- :rtype: `time`
- """
- # TODO: try ISO format first?
- format = get_time_format(locale=locale).pattern.lower()
- hour_idx = format.index('h')
- if hour_idx < 0:
- hour_idx = format.index('k')
- min_idx = format.index('m')
- sec_idx = format.index('s')
-
- indexes = [(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')]
- indexes.sort()
- indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
-
- # FIXME: support 12 hour clock, and 0-based hour specification
- # and seconds should be optional, maybe minutes too
- # oh, and time-zones, of course
-
- numbers = re.findall('(\d+)', string)
- hour = int(numbers[indexes['H']])
- minute = int(numbers[indexes['M']])
- second = int(numbers[indexes['S']])
- return time(hour, minute, second)
-
-
-class DateTimePattern(object):
-
- def __init__(self, pattern, format):
- self.pattern = pattern
- self.format = format
-
- def __repr__(self):
- return '<%s %r>' % (type(self).__name__, self.pattern)
-
- def __unicode__(self):
- return self.pattern
-
- def __mod__(self, other):
- assert type(other) is DateTimeFormat
- return self.format % other
-
- def apply(self, datetime, locale):
- return self % DateTimeFormat(datetime, locale)
-
-
-class DateTimeFormat(object):
-
- def __init__(self, value, locale):
- assert isinstance(value, (date, datetime, time))
- if isinstance(value, (datetime, time)) and value.tzinfo is None:
- value = value.replace(tzinfo=UTC)
- self.value = value
- self.locale = Locale.parse(locale)
-
- def __getitem__(self, name):
- char = name[0]
- num = len(name)
- if char == 'G':
- return self.format_era(char, num)
- elif char in ('y', 'Y', 'u'):
- return self.format_year(char, num)
- elif char in ('Q', 'q'):
- return self.format_quarter(char, num)
- elif char in ('M', 'L'):
- return self.format_month(char, num)
- elif char in ('w', 'W'):
- return self.format_week(char, num)
- elif char == 'd':
- return self.format(self.value.day, num)
- elif char == 'D':
- return self.format_day_of_year(num)
- elif char == 'F':
- return self.format_day_of_week_in_month()
- elif char in ('E', 'e', 'c'):
- return self.format_weekday(char, num)
- elif char == 'a':
- return self.format_period(char)
- elif char == 'h':
- if self.value.hour % 12 == 0:
- return self.format(12, num)
- else:
- return self.format(self.value.hour % 12, num)
- elif char == 'H':
- return self.format(self.value.hour, num)
- elif char == 'K':
- return self.format(self.value.hour % 12, num)
- elif char == 'k':
- if self.value.hour == 0:
- return self.format(24, num)
- else:
- return self.format(self.value.hour, num)
- elif char == 'm':
- return self.format(self.value.minute, num)
- elif char == 's':
- return self.format(self.value.second, num)
- elif char == 'S':
- return self.format_frac_seconds(num)
- elif char == 'A':
- return self.format_milliseconds_in_day(num)
- elif char in ('z', 'Z', 'v', 'V'):
- return self.format_timezone(char, num)
- else:
- raise KeyError('Unsupported date/time field %r' % char)
-
- def format_era(self, char, num):
- width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)]
- era = int(self.value.year >= 0)
- return get_era_names(width, self.locale)[era]
-
- def format_year(self, char, num):
- value = self.value.year
- if char.isupper():
- week = self.get_week_number(self.get_day_of_year())
- if week == 0:
- value -= 1
- year = self.format(value, num)
- if num == 2:
- year = year[-2:]
- return year
-
- def format_quarter(self, char, num):
- quarter = (self.value.month - 1) // 3 + 1
- if num <= 2:
- return ('%%0%dd' % num) % quarter
- width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
- context = {'Q': 'format', 'q': 'stand-alone'}[char]
- return get_quarter_names(width, context, self.locale)[quarter]
-
- def format_month(self, char, num):
- if num <= 2:
- return ('%%0%dd' % num) % self.value.month
- width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
- context = {'M': 'format', 'L': 'stand-alone'}[char]
- return get_month_names(width, context, self.locale)[self.value.month]
-
- def format_week(self, char, num):
- if char.islower(): # week of year
- day_of_year = self.get_day_of_year()
- week = self.get_week_number(day_of_year)
- if week == 0:
- date = self.value - timedelta(days=day_of_year)
- week = self.get_week_number(self.get_day_of_year(date),
- date.weekday())
- return self.format(week, num)
- else: # week of month
- week = self.get_week_number(self.value.day)
- if week == 0:
- date = self.value - timedelta(days=self.value.day)
- week = self.get_week_number(date.day, date.weekday())
- pass
- return '%d' % week
-
- def format_weekday(self, char, num):
- if num < 3:
- if char.islower():
- value = 7 - self.locale.first_week_day + self.value.weekday()
- return self.format(value % 7 + 1, num)
- num = 3
- weekday = self.value.weekday()
- width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
- context = {3: 'format', 4: 'format', 5: 'stand-alone'}[num]
- return get_day_names(width, context, self.locale)[weekday]
-
- def format_day_of_year(self, num):
- return self.format(self.get_day_of_year(), num)
-
- def format_day_of_week_in_month(self):
- return '%d' % ((self.value.day - 1) / 7 + 1)
-
- def format_period(self, char):
- period = {0: 'am', 1: 'pm'}[int(self.value.hour >= 12)]
- return get_period_names(locale=self.locale)[period]
-
- def format_frac_seconds(self, num):
- value = str(self.value.microsecond)
- return self.format(round(float('.%s' % value), num) * 10**num, num)
-
- def format_milliseconds_in_day(self, num):
- msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \
- self.value.minute * 60000 + self.value.hour * 3600000
- return self.format(msecs, num)
-
- def format_timezone(self, char, num):
- width = {3: 'short', 4: 'long'}[max(3, num)]
- if char == 'z':
- return get_timezone_name(self.value, width, locale=self.locale)
- elif char == 'Z':
- return get_timezone_gmt(self.value, width, locale=self.locale)
- elif char == 'v':
- return get_timezone_name(self.value.tzinfo, width,
- locale=self.locale)
- elif char == 'V':
- if num == 1:
- return get_timezone_name(self.value.tzinfo, width,
- uncommon=True, locale=self.locale)
- return get_timezone_location(self.value.tzinfo, locale=self.locale)
-
- def format(self, value, length):
- return ('%%0%dd' % length) % value
-
- def get_day_of_year(self, date=None):
- if date is None:
- date = self.value
- return (date - date_(date.year, 1, 1)).days + 1
-
- def get_week_number(self, day_of_period, day_of_week=None):
- """Return the number of the week of a day within a period. This may be
- the week number in a year or the week number in a month.
-
- Usually this will return a value equal to or greater than 1, but if the
- first week of the period is so short that it actually counts as the last
- week of the previous period, this function will return 0.
-
- >>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE'))
- >>> format.get_week_number(6)
- 1
-
- >>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US'))
- >>> format.get_week_number(6)
- 2
-
- :param day_of_period: the number of the day in the period (usually
- either the day of month or the day of year)
- :param day_of_week: the week day; if ommitted, the week day of the
- current date is assumed
- """
- if day_of_week is None:
- day_of_week = self.value.weekday()
- first_day = (day_of_week - self.locale.first_week_day -
- day_of_period + 1) % 7
- if first_day < 0:
- first_day += 7
- week_number = (day_of_period + first_day - 1) / 7
- if 7 - first_day >= self.locale.min_week_days:
- week_number += 1
- return week_number
-
-
-PATTERN_CHARS = {
- 'G': [1, 2, 3, 4, 5], # era
- 'y': None, 'Y': None, 'u': None, # year
- 'Q': [1, 2, 3, 4], 'q': [1, 2, 3, 4], # quarter
- 'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], # month
- 'w': [1, 2], 'W': [1], # week
- 'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, # day
- 'E': [1, 2, 3, 4, 5], 'e': [1, 2, 3, 4, 5], 'c': [1, 3, 4, 5], # week day
- 'a': [1], # period
- 'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2], # hour
- 'm': [1, 2], # minute
- 's': [1, 2], 'S': None, 'A': None, # second
- 'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4], 'v': [1, 4], 'V': [1, 4] # zone
-}
-
-def parse_pattern(pattern):
- """Parse date, time, and datetime format patterns.
-
- >>> parse_pattern("MMMMd").format
- u'%(MMMM)s%(d)s'
- >>> parse_pattern("MMM d, yyyy").format
- u'%(MMM)s %(d)s, %(yyyy)s'
-
- Pattern can contain literal strings in single quotes:
-
- >>> parse_pattern("H:mm' Uhr 'z").format
- u'%(H)s:%(mm)s Uhr %(z)s'
-
- An actual single quote can be used by using two adjacent single quote
- characters:
-
- >>> parse_pattern("hh' o''clock'").format
- u"%(hh)s o'clock"
-
- :param pattern: the formatting pattern to parse
- """
- if type(pattern) is DateTimePattern:
- return pattern
-
- result = []
- quotebuf = None
- charbuf = []
- fieldchar = ['']
- fieldnum = [0]
-
- def append_chars():
- result.append(''.join(charbuf).replace('%', '%%'))
- del charbuf[:]
-
- def append_field():
- limit = PATTERN_CHARS[fieldchar[0]]
- if limit and fieldnum[0] not in limit:
- raise ValueError('Invalid length for field: %r'
- % (fieldchar[0] * fieldnum[0]))
- result.append('%%(%s)s' % (fieldchar[0] * fieldnum[0]))
- fieldchar[0] = ''
- fieldnum[0] = 0
-
- for idx, char in enumerate(pattern.replace("''", '\0')):
- if quotebuf is None:
- if char == "'": # quote started
- if fieldchar[0]:
- append_field()
- elif charbuf:
- append_chars()
- quotebuf = []
- elif char in PATTERN_CHARS:
- if charbuf:
- append_chars()
- if char == fieldchar[0]:
- fieldnum[0] += 1
- else:
- if fieldchar[0]:
- append_field()
- fieldchar[0] = char
- fieldnum[0] = 1
- else:
- if fieldchar[0]:
- append_field()
- charbuf.append(char)
-
- elif quotebuf is not None:
- if char == "'": # end of quote
- charbuf.extend(quotebuf)
- quotebuf = None
- else: # inside quote
- quotebuf.append(char)
-
- if fieldchar[0]:
- append_field()
- elif charbuf:
- append_chars()
-
- return DateTimePattern(pattern, u''.join(result).replace('\0', "'"))
diff --git a/babel/global.dat b/babel/global.dat
deleted file mode 100644
index aadb90d..0000000
--- a/babel/global.dat
+++ /dev/null
Binary files differ
diff --git a/babel/localedata.py b/babel/localedata.py
deleted file mode 100644
index 203bec8..0000000
--- a/babel/localedata.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Low-level locale data access.
-
-:note: The `Locale` class, which uses this module under the hood, provides a
- more convenient interface for accessing the locale data.
-"""
-
-import os
-import pickle
-try:
- import threading
-except ImportError:
- import dummy_threading as threading
-from UserDict import DictMixin
-
-__all__ = ['exists', 'list', 'load']
-__docformat__ = 'restructuredtext en'
-
-_cache = {}
-_cache_lock = threading.RLock()
-_dirname = os.path.join(os.path.dirname(__file__), 'localedata')
-
-
-def exists(name):
- """Check whether locale data is available for the given locale.
-
- :param name: the locale identifier string
- :return: `True` if the locale data exists, `False` otherwise
- :rtype: `bool`
- """
- if name in _cache:
- return True
- return os.path.exists(os.path.join(_dirname, '%s.dat' % name))
-
-
-def list():
- """Return a list of all locale identifiers for which locale data is
- available.
-
- :return: a list of locale identifiers (strings)
- :rtype: `list`
- :since: version 0.8.1
- """
- return [stem for stem, extension in [
- os.path.splitext(filename) for filename in os.listdir(_dirname)
- ] if extension == '.dat' and stem != 'root']
-
-
-def load(name, merge_inherited=True):
- """Load the locale data for the given locale.
-
- The locale data is a dictionary that contains much of the data defined by
- the Common Locale Data Repository (CLDR). This data is stored as a
- collection of pickle files inside the ``babel`` package.
-
- >>> d = load('en_US')
- >>> d['languages']['sv']
- u'Swedish'
-
- Note that the results are cached, and subsequent requests for the same
- locale return the same dictionary:
-
- >>> d1 = load('en_US')
- >>> d2 = load('en_US')
- >>> d1 is d2
- True
-
- :param name: the locale identifier string (or "root")
- :param merge_inherited: whether the inherited data should be merged into
- the data of the requested locale
- :return: the locale data
- :rtype: `dict`
- :raise `IOError`: if no locale data file is found for the given locale
- identifer, or one of the locales it inherits from
- """
- _cache_lock.acquire()
- try:
- data = _cache.get(name)
- if not data:
- # Load inherited data
- if name == 'root' or not merge_inherited:
- data = {}
- else:
- parts = name.split('_')
- if len(parts) == 1:
- parent = 'root'
- else:
- parent = '_'.join(parts[:-1])
- data = load(parent).copy()
- filename = os.path.join(_dirname, '%s.dat' % name)
- fileobj = open(filename, 'rb')
- try:
- if name != 'root' and merge_inherited:
- merge(data, pickle.load(fileobj))
- else:
- data = pickle.load(fileobj)
- _cache[name] = data
- finally:
- fileobj.close()
- return data
- finally:
- _cache_lock.release()
-
-
-def merge(dict1, dict2):
- """Merge the data from `dict2` into the `dict1` dictionary, making copies
- of nested dictionaries.
-
- >>> d = {1: 'foo', 3: 'baz'}
- >>> merge(d, {1: 'Foo', 2: 'Bar'})
- >>> d
- {1: 'Foo', 2: 'Bar', 3: 'baz'}
-
- :param dict1: the dictionary to merge into
- :param dict2: the dictionary containing the data that should be merged
- """
- for key, val2 in dict2.items():
- if val2 is not None:
- val1 = dict1.get(key)
- if isinstance(val2, dict):
- if val1 is None:
- val1 = {}
- if isinstance(val1, Alias):
- val1 = (val1, val2)
- elif isinstance(val1, tuple):
- alias, others = val1
- others = others.copy()
- merge(others, val2)
- val1 = (alias, others)
- else:
- val1 = val1.copy()
- merge(val1, val2)
- else:
- val1 = val2
- dict1[key] = val1
-
-
-class Alias(object):
- """Representation of an alias in the locale data.
-
- An alias is a value that refers to some other part of the locale data,
- as specified by the `keys`.
- """
-
- def __init__(self, keys):
- self.keys = tuple(keys)
-
- def __repr__(self):
- return '<%s %r>' % (type(self).__name__, self.keys)
-
- def resolve(self, data):
- """Resolve the alias based on the given data.
-
- This is done recursively, so if one alias resolves to a second alias,
- that second alias will also be resolved.
-
- :param data: the locale data
- :type data: `dict`
- """
- base = data
- for key in self.keys:
- data = data[key]
- if isinstance(data, Alias):
- data = data.resolve(base)
- elif isinstance(data, tuple):
- alias, others = data
- data = alias.resolve(base)
- return data
-
-
-class LocaleDataDict(DictMixin, dict):
- """Dictionary wrapper that automatically resolves aliases to the actual
- values.
- """
-
- def __init__(self, data, base=None):
- dict.__init__(self, data)
- if base is None:
- base = data
- self.base = base
-
- def __getitem__(self, key):
- orig = val = dict.__getitem__(self, key)
- if isinstance(val, Alias): # resolve an alias
- val = val.resolve(self.base)
- if isinstance(val, tuple): # Merge a partial dict with an alias
- alias, others = val
- val = alias.resolve(self.base).copy()
- merge(val, others)
- if type(val) is dict: # Return a nested alias-resolving dict
- val = LocaleDataDict(val, base=self.base)
- if val is not orig:
- self[key] = val
- return val
-
- def copy(self):
- return LocaleDataDict(dict.copy(self), base=self.base)
diff --git a/babel/localedata/en_US.dat b/babel/localedata/en_US.dat
deleted file mode 100644
index 1967e9e..0000000
--- a/babel/localedata/en_US.dat
+++ /dev/null
Binary files differ
diff --git a/babel/localedata/fr_FR.dat b/babel/localedata/fr_FR.dat
deleted file mode 100644
index d4e5e45..0000000
--- a/babel/localedata/fr_FR.dat
+++ /dev/null
Binary files differ
diff --git a/babel/messages/__init__.py b/babel/messages/__init__.py
deleted file mode 100644
index 283e1f8..0000000
--- a/babel/messages/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Support for ``gettext`` message catalogs."""
-
-from babel.messages.catalog import *
diff --git a/babel/messages/catalog.py b/babel/messages/catalog.py
deleted file mode 100644
index b8a066c..0000000
--- a/babel/messages/catalog.py
+++ /dev/null
@@ -1,721 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Data structures for message catalogs."""
-
-from cgi import parse_header
-from datetime import datetime
-from difflib import get_close_matches
-from email import message_from_string
-from copy import copy
-import re
-try:
- set
-except NameError:
- from sets import Set as set
-import time
-
-from babel import __version__ as VERSION
-from babel.core import Locale
-from babel.dates import format_datetime
-from babel.messages.plurals import get_plural
-from babel.util import odict, distinct, LOCALTZ, UTC, FixedOffsetTimezone
-
-__all__ = ['Message', 'Catalog', 'TranslationError']
-__docformat__ = 'restructuredtext en'
-
-
-PYTHON_FORMAT = re.compile(r'''(?x)
- \%
- (?:\(([\w]*)\))?
- (
- [-#0\ +]?(?:\*|[\d]+)?
- (?:\.(?:\*|[\d]+))?
- [hlL]?
- )
- ([diouxXeEfFgGcrs%])
-''')
-
-
-class Message(object):
- """Representation of a single message in a catalog."""
-
- def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(),
- user_comments=(), previous_id=(), lineno=None):
- """Create the message object.
-
- :param id: the message ID, or a ``(singular, plural)`` tuple for
- pluralizable messages
- :param string: the translated message string, or a
- ``(singular, plural)`` tuple for pluralizable messages
- :param locations: a sequence of ``(filenname, lineno)`` tuples
- :param flags: a set or sequence of flags
- :param auto_comments: a sequence of automatic comments for the message
- :param user_comments: a sequence of user comments for the message
- :param previous_id: the previous message ID, or a ``(singular, plural)``
- tuple for pluralizable messages
- :param lineno: the line number on which the msgid line was found in the
- PO file, if any
- """
- self.id = id #: The message ID
- if not string and self.pluralizable:
- string = (u'', u'')
- self.string = string #: The message translation
- self.locations = list(distinct(locations))
- self.flags = set(flags)
- if id and self.python_format:
- self.flags.add('python-format')
- else:
- self.flags.discard('python-format')
- self.auto_comments = list(distinct(auto_comments))
- self.user_comments = list(distinct(user_comments))
- if isinstance(previous_id, basestring):
- self.previous_id = [previous_id]
- else:
- self.previous_id = list(previous_id)
- self.lineno = lineno
-
- def __repr__(self):
- return '<%s %r (flags: %r)>' % (type(self).__name__, self.id,
- list(self.flags))
-
- def __cmp__(self, obj):
- """Compare Messages, taking into account plural ids"""
- if isinstance(obj, Message):
- plural = self.pluralizable
- obj_plural = obj.pluralizable
- if plural and obj_plural:
- return cmp(self.id[0], obj.id[0])
- elif plural:
- return cmp(self.id[0], obj.id)
- elif obj_plural:
- return cmp(self.id, obj.id[0])
- return cmp(self.id, obj.id)
-
- def clone(self):
- return Message(*map(copy, (self.id, self.string, self.locations,
- self.flags, self.auto_comments,
- self.user_comments, self.previous_id,
- self.lineno)))
-
- def check(self, catalog=None):
- """Run various validation checks on the message. Some validations
- are only performed if the catalog is provided. This method returns
- a sequence of `TranslationError` objects.
-
- :rtype: ``iterator``
- :param catalog: A catalog instance that is passed to the checkers
- :see: `Catalog.check` for a way to perform checks for all messages
- in a catalog.
- """
- from babel.messages.checkers import checkers
- errors = []
- for checker in checkers:
- try:
- checker(catalog, self)
- except TranslationError, e:
- errors.append(e)
- return errors
-
- def fuzzy(self):
- return 'fuzzy' in self.flags
- fuzzy = property(fuzzy, doc="""\
- Whether the translation is fuzzy.
-
- >>> Message('foo').fuzzy
- False
- >>> msg = Message('foo', 'foo', flags=['fuzzy'])
- >>> msg.fuzzy
- True
- >>> msg
- <Message 'foo' (flags: ['fuzzy'])>
-
- :type: `bool`
- """)
-
- def pluralizable(self):
- return isinstance(self.id, (list, tuple))
- pluralizable = property(pluralizable, doc="""\
- Whether the message is plurizable.
-
- >>> Message('foo').pluralizable
- False
- >>> Message(('foo', 'bar')).pluralizable
- True
-
- :type: `bool`
- """)
-
- def python_format(self):
- ids = self.id
- if not isinstance(ids, (list, tuple)):
- ids = [ids]
- return bool(filter(None, [PYTHON_FORMAT.search(id) for id in ids]))
- python_format = property(python_format, doc="""\
- Whether the message contains Python-style parameters.
-
- >>> Message('foo %(name)s bar').python_format
- True
- >>> Message(('foo %(name)s', 'foo %(name)s')).python_format
- True
-
- :type: `bool`
- """)
-
-
-class TranslationError(Exception):
- """Exception thrown by translation checkers when invalid message
- translations are encountered."""
-
-
-DEFAULT_HEADER = u"""\
-# Translations template for PROJECT.
-# Copyright (C) YEAR ORGANIZATION
-# This file is distributed under the same license as the PROJECT project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
-#"""
-
-
-class Catalog(object):
- """Representation of a message catalog."""
-
- def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER,
- project=None, version=None, copyright_holder=None,
- msgid_bugs_address=None, creation_date=None,
- revision_date=None, last_translator=None, language_team=None,
- charset='utf-8', fuzzy=True):
- """Initialize the catalog object.
-
- :param locale: the locale identifier or `Locale` object, or `None`
- if the catalog is not bound to a locale (which basically
- means it's a template)
- :param domain: the message domain
- :param header_comment: the header comment as string, or `None` for the
- default header
- :param project: the project's name
- :param version: the project's version
- :param copyright_holder: the copyright holder of the catalog
- :param msgid_bugs_address: the email address or URL to submit bug
- reports to
- :param creation_date: the date the catalog was created
- :param revision_date: the date the catalog was revised
- :param last_translator: the name and email of the last translator
- :param language_team: the name and email of the language team
- :param charset: the encoding to use in the output
- :param fuzzy: the fuzzy bit on the catalog header
- """
- self.domain = domain #: The message domain
- if locale:
- locale = Locale.parse(locale)
- self.locale = locale #: The locale or `None`
- self._header_comment = header_comment
- self._messages = odict()
-
- self.project = project or 'PROJECT' #: The project name
- self.version = version or 'VERSION' #: The project version
- self.copyright_holder = copyright_holder or 'ORGANIZATION'
- self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
-
- self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
- """Name and email address of the last translator."""
- self.language_team = language_team or 'LANGUAGE <LL@li.org>'
- """Name and email address of the language team."""
-
- self.charset = charset or 'utf-8'
-
- if creation_date is None:
- creation_date = datetime.now(LOCALTZ)
- elif isinstance(creation_date, datetime) and not creation_date.tzinfo:
- creation_date = creation_date.replace(tzinfo=LOCALTZ)
- self.creation_date = creation_date #: Creation date of the template
- if revision_date is None:
- revision_date = datetime.now(LOCALTZ)
- elif isinstance(revision_date, datetime) and not revision_date.tzinfo:
- revision_date = revision_date.replace(tzinfo=LOCALTZ)
- self.revision_date = revision_date #: Last revision date of the catalog
- self.fuzzy = fuzzy #: Catalog header fuzzy bit (`True` or `False`)
-
- self.obsolete = odict() #: Dictionary of obsolete messages
- self._num_plurals = None
- self._plural_expr = None
-
- def _get_header_comment(self):
- comment = self._header_comment
- comment = comment.replace('PROJECT', self.project) \
- .replace('VERSION', self.version) \
- .replace('YEAR', self.revision_date.strftime('%Y')) \
- .replace('ORGANIZATION', self.copyright_holder)
- if self.locale:
- comment = comment.replace('Translations template', '%s translations'
- % self.locale.english_name)
- return comment
-
- def _set_header_comment(self, string):
- self._header_comment = string
-
- header_comment = property(_get_header_comment, _set_header_comment, doc="""\
- The header comment for the catalog.
-
- >>> catalog = Catalog(project='Foobar', version='1.0',
- ... copyright_holder='Foo Company')
- >>> print catalog.header_comment #doctest: +ELLIPSIS
- # Translations template for Foobar.
- # Copyright (C) ... Foo Company
- # This file is distributed under the same license as the Foobar project.
- # FIRST AUTHOR <EMAIL@ADDRESS>, ....
- #
-
- The header can also be set from a string. Any known upper-case variables
- will be replaced when the header is retrieved again:
-
- >>> catalog = Catalog(project='Foobar', version='1.0',
- ... copyright_holder='Foo Company')
- >>> catalog.header_comment = '''\\
- ... # The POT for my really cool PROJECT project.
- ... # Copyright (C) 1990-2003 ORGANIZATION
- ... # This file is distributed under the same license as the PROJECT
- ... # project.
- ... #'''
- >>> print catalog.header_comment
- # The POT for my really cool Foobar project.
- # Copyright (C) 1990-2003 Foo Company
- # This file is distributed under the same license as the Foobar
- # project.
- #
-
- :type: `unicode`
- """)
-
- def _get_mime_headers(self):
- headers = []
- headers.append(('Project-Id-Version',
- '%s %s' % (self.project, self.version)))
- headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
- headers.append(('POT-Creation-Date',
- format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ',
- locale='en')))
- if self.locale is None:
- headers.append(('PO-Revision-Date', 'YEAR-MO-DA HO:MI+ZONE'))
- headers.append(('Last-Translator', 'FULL NAME <EMAIL@ADDRESS>'))
- headers.append(('Language-Team', 'LANGUAGE <LL@li.org>'))
- else:
- headers.append(('PO-Revision-Date',
- format_datetime(self.revision_date,
- 'yyyy-MM-dd HH:mmZ', locale='en')))
- headers.append(('Last-Translator', self.last_translator))
- headers.append(('Language-Team',
- self.language_team.replace('LANGUAGE',
- str(self.locale))))
- headers.append(('Plural-Forms', self.plural_forms))
- headers.append(('MIME-Version', '1.0'))
- headers.append(('Content-Type',
- 'text/plain; charset=%s' % self.charset))
- headers.append(('Content-Transfer-Encoding', '8bit'))
- headers.append(('Generated-By', 'Babel %s\n' % VERSION))
- return headers
-
- def _set_mime_headers(self, headers):
- for name, value in headers:
- if name.lower() == 'content-type':
- mimetype, params = parse_header(value)
- if 'charset' in params:
- self.charset = params['charset'].lower()
- break
- for name, value in headers:
- name = name.lower().decode(self.charset)
- value = value.decode(self.charset)
- if name == 'project-id-version':
- parts = value.split(' ')
- self.project = u' '.join(parts[:-1])
- self.version = parts[-1]
- elif name == 'report-msgid-bugs-to':
- self.msgid_bugs_address = value
- elif name == 'last-translator':
- self.last_translator = value
- elif name == 'language-team':
- self.language_team = value
- elif name == 'plural-forms':
- _, params = parse_header(' ;' + value)
- self._num_plurals = int(params.get('nplurals', 2))
- self._plural_expr = params.get('plural', '(n != 1)')
- elif name == 'pot-creation-date':
- # FIXME: this should use dates.parse_datetime as soon as that
- # is ready
- value, tzoffset, _ = re.split('[+-](\d{4})$', value, 1)
- tt = time.strptime(value, '%Y-%m-%d %H:%M')
- ts = time.mktime(tt)
- tzoffset = FixedOffsetTimezone(int(tzoffset[:2]) * 60 +
- int(tzoffset[2:]))
- dt = datetime.fromtimestamp(ts)
- self.creation_date = dt.replace(tzinfo=tzoffset)
-
- mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
- The MIME headers of the catalog, used for the special ``msgid ""`` entry.
-
- The behavior of this property changes slightly depending on whether a locale
- is set or not, the latter indicating that the catalog is actually a template
- for actual translations.
-
- Here's an example of the output for such a catalog template:
-
- >>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
- >>> catalog = Catalog(project='Foobar', version='1.0',
- ... creation_date=created)
- >>> for name, value in catalog.mime_headers:
- ... print '%s: %s' % (name, value)
- Project-Id-Version: Foobar 1.0
- Report-Msgid-Bugs-To: EMAIL@ADDRESS
- POT-Creation-Date: 1990-04-01 15:30+0000
- PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
- Last-Translator: FULL NAME <EMAIL@ADDRESS>
- Language-Team: LANGUAGE <LL@li.org>
- MIME-Version: 1.0
- Content-Type: text/plain; charset=utf-8
- Content-Transfer-Encoding: 8bit
- Generated-By: Babel ...
-
- And here's an example of the output when the locale is set:
-
- >>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)
- >>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',
- ... creation_date=created, revision_date=revised,
- ... last_translator='John Doe <jd@example.com>',
- ... language_team='de_DE <de@example.com>')
- >>> for name, value in catalog.mime_headers:
- ... print '%s: %s' % (name, value)
- Project-Id-Version: Foobar 1.0
- Report-Msgid-Bugs-To: EMAIL@ADDRESS
- POT-Creation-Date: 1990-04-01 15:30+0000
- PO-Revision-Date: 1990-08-03 12:00+0000
- Last-Translator: John Doe <jd@example.com>
- Language-Team: de_DE <de@example.com>
- Plural-Forms: nplurals=2; plural=(n != 1)
- MIME-Version: 1.0
- Content-Type: text/plain; charset=utf-8
- Content-Transfer-Encoding: 8bit
- Generated-By: Babel ...
-
- :type: `list`
- """)
-
- def num_plurals(self):
- if self._num_plurals is None:
- num = 2
- if self.locale:
- num = get_plural(self.locale)[0]
- self._num_plurals = num
- return self._num_plurals
- num_plurals = property(num_plurals, doc="""\
- The number of plurals used by the catalog or locale.
-
- >>> Catalog(locale='en').num_plurals
- 2
- >>> Catalog(locale='ga').num_plurals
- 3
-
- :type: `int`
- """)
-
- def plural_expr(self):
- if self._plural_expr is None:
- expr = '(n != 1)'
- if self.locale:
- expr = get_plural(self.locale)[1]
- self._plural_expr = expr
- return self._plural_expr
- plural_expr = property(plural_expr, doc="""\
- The plural expression used by the catalog or locale.
-
- >>> Catalog(locale='en').plural_expr
- '(n != 1)'
- >>> Catalog(locale='ga').plural_expr
- '(n==1 ? 0 : n==2 ? 1 : 2)'
-
- :type: `basestring`
- """)
-
- def plural_forms(self):
- return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr)
- plural_forms = property(plural_forms, doc="""\
- Return the plural forms declaration for the locale.
-
- >>> Catalog(locale='en').plural_forms
- 'nplurals=2; plural=(n != 1)'
- >>> Catalog(locale='pt_BR').plural_forms
- 'nplurals=2; plural=(n > 1)'
-
- :type: `str`
- """)
-
- def __contains__(self, id):
- """Return whether the catalog has a message with the specified ID."""
- return self._key_for(id) in self._messages
-
- def __len__(self):
- """The number of messages in the catalog.
-
- This does not include the special ``msgid ""`` entry.
- """
- return len(self._messages)
-
- def __iter__(self):
- """Iterates through all the entries in the catalog, in the order they
- were added, yielding a `Message` object for every entry.
-
- :rtype: ``iterator``
- """
- buf = []
- for name, value in self.mime_headers:
- buf.append('%s: %s' % (name, value))
- flags = set()
- if self.fuzzy:
- flags |= set(['fuzzy'])
- yield Message(u'', '\n'.join(buf), flags=flags)
- for key in self._messages:
- yield self._messages[key]
-
- def __repr__(self):
- locale = ''
- if self.locale:
- locale = ' %s' % self.locale
- return '<%s %r%s>' % (type(self).__name__, self.domain, locale)
-
- def __delitem__(self, id):
- """Delete the message with the specified ID."""
- key = self._key_for(id)
- if key in self._messages:
- del self._messages[key]
-
- def __getitem__(self, id):
- """Return the message with the specified ID.
-
- :param id: the message ID
- :return: the message with the specified ID, or `None` if no such message
- is in the catalog
- :rtype: `Message`
- """
- return self._messages.get(self._key_for(id))
-
- def __setitem__(self, id, message):
- """Add or update the message with the specified ID.
-
- >>> catalog = Catalog()
- >>> catalog[u'foo'] = Message(u'foo')
- >>> catalog[u'foo']
- <Message u'foo' (flags: [])>
-
- If a message with that ID is already in the catalog, it is updated
- to include the locations and flags of the new message.
-
- >>> catalog = Catalog()
- >>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
- >>> catalog[u'foo'].locations
- [('main.py', 1)]
- >>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
- >>> catalog[u'foo'].locations
- [('main.py', 1), ('utils.py', 5)]
-
- :param id: the message ID
- :param message: the `Message` object
- """
- assert isinstance(message, Message), 'expected a Message object'
- key = self._key_for(id)
- current = self._messages.get(key)
- if current:
- if message.pluralizable and not current.pluralizable:
- # The new message adds pluralization
- current.id = message.id
- current.string = message.string
- current.locations = list(distinct(current.locations +
- message.locations))
- current.auto_comments = list(distinct(current.auto_comments +
- message.auto_comments))
- current.user_comments = list(distinct(current.user_comments +
- message.user_comments))
- current.flags |= message.flags
- message = current
- elif id == '':
- # special treatment for the header message
- headers = message_from_string(message.string.encode(self.charset))
- self.mime_headers = headers.items()
- self.header_comment = '\n'.join(['# %s' % comment for comment
- in message.user_comments])
- self.fuzzy = message.fuzzy
- else:
- if isinstance(id, (list, tuple)):
- assert isinstance(message.string, (list, tuple)), \
- 'Expected sequence but got %s' % type(message.string)
- self._messages[key] = message
-
- def add(self, id, string=None, locations=(), flags=(), auto_comments=(),
- user_comments=(), previous_id=(), lineno=None):
- """Add or update the message with the specified ID.
-
- >>> catalog = Catalog()
- >>> catalog.add(u'foo')
- >>> catalog[u'foo']
- <Message u'foo' (flags: [])>
-
- This method simply constructs a `Message` object with the given
- arguments and invokes `__setitem__` with that object.
-
- :param id: the message ID, or a ``(singular, plural)`` tuple for
- pluralizable messages
- :param string: the translated message string, or a
- ``(singular, plural)`` tuple for pluralizable messages
- :param locations: a sequence of ``(filenname, lineno)`` tuples
- :param flags: a set or sequence of flags
- :param auto_comments: a sequence of automatic comments
- :param user_comments: a sequence of user comments
- :param previous_id: the previous message ID, or a ``(singular, plural)``
- tuple for pluralizable messages
- :param lineno: the line number on which the msgid line was found in the
- PO file, if any
- """
- self[id] = Message(id, string, list(locations), flags, auto_comments,
- user_comments, previous_id, lineno=lineno)
-
- def check(self):
- """Run various validation checks on the translations in the catalog.
-
- For every message which fails validation, this method yield a
- ``(message, errors)`` tuple, where ``message`` is the `Message` object
- and ``errors`` is a sequence of `TranslationError` objects.
-
- :rtype: ``iterator``
- """
- for message in self._messages.values():
- errors = message.check(catalog=self)
- if errors:
- yield message, errors
-
- def update(self, template, no_fuzzy_matching=False):
- """Update the catalog based on the given template catalog.
-
- >>> from babel.messages import Catalog
- >>> template = Catalog()
- >>> template.add('green', locations=[('main.py', 99)])
- >>> template.add('blue', locations=[('main.py', 100)])
- >>> template.add(('salad', 'salads'), locations=[('util.py', 42)])
- >>> catalog = Catalog(locale='de_DE')
- >>> catalog.add('blue', u'blau', locations=[('main.py', 98)])
- >>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])
- >>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),
- ... locations=[('util.py', 38)])
-
- >>> catalog.update(template)
- >>> len(catalog)
- 3
-
- >>> msg1 = catalog['green']
- >>> msg1.string
- >>> msg1.locations
- [('main.py', 99)]
-
- >>> msg2 = catalog['blue']
- >>> msg2.string
- u'blau'
- >>> msg2.locations
- [('main.py', 100)]
-
- >>> msg3 = catalog['salad']
- >>> msg3.string
- (u'Salat', u'Salate')
- >>> msg3.locations
- [('util.py', 42)]
-
- Messages that are in the catalog but not in the template are removed
- from the main collection, but can still be accessed via the `obsolete`
- member:
-
- >>> 'head' in catalog
- False
- >>> catalog.obsolete.values()
- [<Message 'head' (flags: [])>]
-
- :param template: the reference catalog, usually read from a POT file
- :param no_fuzzy_matching: whether to use fuzzy matching of message IDs
- """
- messages = self._messages
- remaining = messages.copy()
- self._messages = odict()
-
- # Prepare for fuzzy matching
- fuzzy_candidates = []
- if not no_fuzzy_matching:
- fuzzy_candidates = [
- self._key_for(msgid) for msgid in messages
- if msgid and messages[msgid].string
- ]
- fuzzy_matches = set()
-
- def _merge(message, oldkey, newkey):
- message = message.clone()
- fuzzy = False
- if oldkey != newkey:
- fuzzy = True
- fuzzy_matches.add(oldkey)
- oldmsg = messages.get(oldkey)
- if isinstance(oldmsg.id, basestring):
- message.previous_id = [oldmsg.id]
- else:
- message.previous_id = list(oldmsg.id)
- else:
- oldmsg = remaining.pop(oldkey, None)
- message.string = oldmsg.string
- if isinstance(message.id, (list, tuple)):
- if not isinstance(message.string, (list, tuple)):
- fuzzy = True
- message.string = tuple(
- [message.string] + ([u''] * (len(message.id) - 1))
- )
- elif len(message.string) != self.num_plurals:
- fuzzy = True
- message.string = tuple(message.string[:len(oldmsg.string)])
- elif isinstance(message.string, (list, tuple)):
- fuzzy = True
- message.string = message.string[0]
- message.flags |= oldmsg.flags
- if fuzzy:
- message.flags |= set([u'fuzzy'])
- self[message.id] = message
-
- for message in template:
- if message.id:
- key = self._key_for(message.id)
- if key in messages:
- _merge(message, key, key)
- else:
- if no_fuzzy_matching is False:
- # do some fuzzy matching with difflib
- matches = get_close_matches(key.lower().strip(),
- fuzzy_candidates, 1)
- if matches:
- _merge(message, matches[0], key)
- continue
-
- self[message.id] = message
-
- self.obsolete = odict()
- for msgid in remaining:
- if no_fuzzy_matching or msgid not in fuzzy_matches:
- self.obsolete[msgid] = remaining[msgid]
-
- def _key_for(self, id):
- """The key for a message is just the singular ID even for pluralizable
- messages.
- """
- key = id
- if isinstance(key, (list, tuple)):
- key = id[0]
- return key
diff --git a/babel/messages/checkers.py b/babel/messages/checkers.py
deleted file mode 100644
index 128eadd..0000000
--- a/babel/messages/checkers.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Various routines that help with validation of translations.
-
-:since: version 0.9
-"""
-
-from itertools import izip
-from babel.messages.catalog import TranslationError, PYTHON_FORMAT
-
-#: list of format chars that are compatible to each other
-_string_format_compatibilities = [
- set(['i', 'd', 'u']),
- set(['x', 'X']),
- set(['f', 'F', 'g', 'G'])
-]
-
-
-def num_plurals(catalog, message):
- """Verify the number of plurals in the translation."""
- if not message.pluralizable:
- if not isinstance(message.string, basestring):
- raise TranslationError("Found plural forms for non-pluralizable "
- "message")
- return
-
- # skip further tests if no catalog is provided.
- elif catalog is None:
- return
-
- msgstrs = message.string
- if not isinstance(msgstrs, (list, tuple)):
- msgstrs = (msgstrs,)
- if len(msgstrs) != catalog.num_plurals:
- raise TranslationError("Wrong number of plural forms (expected %d)" %
- catalog.num_plurals)
-
-
-def python_format(catalog, message):
- """Verify the format string placeholders in the translation."""
- if 'python-format' not in message.flags:
- return
- msgids = message.id
- if not isinstance(msgids, (list, tuple)):
- msgids = (msgids,)
- msgstrs = message.string
- if not isinstance(msgstrs, (list, tuple)):
- msgstrs = (msgstrs,)
-
- for msgid, msgstr in izip(msgids, msgstrs):
- if msgstr:
- _validate_format(msgid, msgstr)
-
-
-def _validate_format(format, alternative):
- """Test format string `alternative` against `format`. `format` can be the
- msgid of a message and `alternative` one of the `msgstr`\s. The two
- arguments are not interchangeable as `alternative` may contain less
- placeholders if `format` uses named placeholders.
-
- If `format` does not use string formatting a `ValueError` is raised.
-
- If the string formatting of `alternative` is compatible to `format` the
- function returns `None`, otherwise a `TranslationError` is raised.
-
- Examples for compatible format strings:
-
- >>> _validate_format('Hello %s!', 'Hallo %s!')
- >>> _validate_format('Hello %i!', 'Hallo %d!')
-
- Example for an incompatible format strings:
-
- >>> _validate_format('Hello %(name)s!', 'Hallo %s!')
- Traceback (most recent call last):
- ...
- TranslationError: the format strings are of different kinds
-
- This function is used by the `python_format` checker.
-
- :param format: The original format string
- :param alternative: The alternative format string that should be checked
- against format
- :return: None on success
- :raises TranslationError: on formatting errors
- """
-
- def _parse(string):
- result = []
- for match in PYTHON_FORMAT.finditer(string):
- name, format, typechar = match.groups()
- if typechar == '%' and name is None:
- continue
- result.append((name, str(typechar)))
- return result
-
- def _compatible(a, b):
- if a == b:
- return True
- for set in _string_format_compatibilities:
- if a in set and b in set:
- return True
- return False
-
- def _check_positional(results):
- positional = None
- for name, char in results:
- if positional is None:
- positional = name is None
- else:
- if (name is None) != positional:
- raise TranslationError('format string mixes positional '
- 'and named placeholders')
- return bool(positional)
-
- a, b = map(_parse, (format, alternative))
-
- # if a does not use string formattings, we are dealing with invalid
- # input data. This function only works if the first string provided
- # does contain string format chars
- if not a:
- raise ValueError('original string provided does not use string '
- 'formatting.')
-
- # now check if both strings are positional or named
- a_positional, b_positional = map(_check_positional, (a, b))
- if a_positional and not b_positional and not b:
- raise TranslationError('placeholders are incompatible')
- elif a_positional != b_positional:
- raise TranslationError('the format strings are of different kinds')
-
- # if we are operating on positional strings both must have the
- # same number of format chars and those must be compatible
- if a_positional:
- if len(a) != len(b):
- raise TranslationError('positional format placeholders are '
- 'unbalanced')
- for idx, ((_, first), (_, second)) in enumerate(izip(a, b)):
- if not _compatible(first, second):
- raise TranslationError('incompatible format for placeholder '
- '%d: %r and %r are not compatible' %
- (idx + 1, first, second))
-
- # otherwise the second string must not have names the first one
- # doesn't have and the types of those included must be compatible
- else:
- type_map = dict(a)
- for name, typechar in b:
- if name not in type_map:
- raise TranslationError('unknown named placeholder %r' % name)
- elif not _compatible(typechar, type_map[name]):
- raise TranslationError('incompatible format for '
- 'placeholder %r: '
- '%r and %r are not compatible' %
- (name, typechar, type_map[name]))
-
-
-def _find_checkers():
- try:
- from pkg_resources import working_set
- except ImportError:
- return [num_plurals, python_format]
- checkers = []
- for entry_point in working_set.iter_entry_points('babel.checkers'):
- checkers.append(entry_point.load())
- return checkers
-
-
-checkers = _find_checkers()
diff --git a/babel/messages/extract.py b/babel/messages/extract.py
deleted file mode 100644
index 1f3a662..0000000
--- a/babel/messages/extract.py
+++ /dev/null
@@ -1,554 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Basic infrastructure for extracting localizable messages from source files.
-
-This module defines an extensible system for collecting localizable message
-strings from a variety of sources. A native extractor for Python source files
-is builtin, extractors for other sources can be added using very simple plugins.
-
-The main entry points into the extraction functionality are the functions
-`extract_from_dir` and `extract_from_file`.
-"""
-
-import os
-try:
- set
-except NameError:
- from sets import Set as set
-import sys
-from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
-
-from babel.util import parse_encoding, pathmatch, relpath
-from textwrap import dedent
-
-__all__ = ['extract', 'extract_from_dir', 'extract_from_file']
-__docformat__ = 'restructuredtext en'
-
-GROUP_NAME = 'babel.extractors'
-
-DEFAULT_KEYWORDS = {
- '_': None,
- 'gettext': None,
- 'ngettext': (1, 2),
- 'ugettext': None,
- 'ungettext': (1, 2),
- 'dgettext': (2,),
- 'dngettext': (2, 3),
- 'N_': None
-}
-
-DEFAULT_MAPPING = [('**.py', 'python')]
-
-empty_msgid_warning = (
-'%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") '
-'returns the header entry with meta information, not the empty string.')
-
-
-def _strip_comment_tags(comments, tags):
- """Helper function for `extract` that strips comment tags from strings
- in a list of comment lines. This functions operates in-place.
- """
- def _strip(line):
- for tag in tags:
- if line.startswith(tag):
- return line[len(tag):].strip()
- return line
- comments[:] = map(_strip, comments)
-
-
-def extract_from_dir(dirname=os.getcwd(), method_map=DEFAULT_MAPPING,
- options_map=None, keywords=DEFAULT_KEYWORDS,
- comment_tags=(), callback=None, strip_comment_tags=False):
- """Extract messages from any source files found in the given directory.
-
- This function generates tuples of the form:
-
- ``(filename, lineno, message, comments)``
-
- Which extraction method is used per file is determined by the `method_map`
- parameter, which maps extended glob patterns to extraction method names.
- For example, the following is the default mapping:
-
- >>> method_map = [
- ... ('**.py', 'python')
- ... ]
-
- This basically says that files with the filename extension ".py" at any
- level inside the directory should be processed by the "python" extraction
- method. Files that don't match any of the mapping patterns are ignored. See
- the documentation of the `pathmatch` function for details on the pattern
- syntax.
-
- The following extended mapping would also use the "genshi" extraction
- method on any file in "templates" subdirectory:
-
- >>> method_map = [
- ... ('**/templates/**.*', 'genshi'),
- ... ('**.py', 'python')
- ... ]
-
- The dictionary provided by the optional `options_map` parameter augments
- these mappings. It uses extended glob patterns as keys, and the values are
- dictionaries mapping options names to option values (both strings).
-
- The glob patterns of the `options_map` do not necessarily need to be the
- same as those used in the method mapping. For example, while all files in
- the ``templates`` folders in an application may be Genshi applications, the
- options for those files may differ based on extension:
-
- >>> options_map = {
- ... '**/templates/**.txt': {
- ... 'template_class': 'genshi.template:TextTemplate',
- ... 'encoding': 'latin-1'
- ... },
- ... '**/templates/**.html': {
- ... 'include_attrs': ''
- ... }
- ... }
-
- :param dirname: the path to the directory to extract messages from
- :param method_map: a list of ``(pattern, method)`` tuples that maps of
- extraction method names to extended glob patterns
- :param options_map: a dictionary of additional options (optional)
- :param keywords: a dictionary mapping keywords (i.e. names of functions
- that should be recognized as translation functions) to
- tuples that specify which of their arguments contain
- localizable strings
- :param comment_tags: a list of tags of translator comments to search for
- and include in the results
- :param callback: a function that is called for every file that message are
- extracted from, just before the extraction itself is
- performed; the function is passed the filename, the name
- of the extraction method and and the options dictionary as
- positional arguments, in that order
- :param strip_comment_tags: a flag that if set to `True` causes all comment
- tags to be removed from the collected comments.
- :return: an iterator over ``(filename, lineno, funcname, message)`` tuples
- :rtype: ``iterator``
- :see: `pathmatch`
- """
- if options_map is None:
- options_map = {}
-
- absname = os.path.abspath(dirname)
- for root, dirnames, filenames in os.walk(absname):
- for subdir in dirnames:
- if subdir.startswith('.') or subdir.startswith('_'):
- dirnames.remove(subdir)
- dirnames.sort()
- filenames.sort()
- for filename in filenames:
- filename = relpath(
- os.path.join(root, filename).replace(os.sep, '/'),
- dirname
- )
- for pattern, method in method_map:
- if pathmatch(pattern, filename):
- filepath = os.path.join(absname, filename)
- options = {}
- for opattern, odict in options_map.items():
- if pathmatch(opattern, filename):
- options = odict
- if callback:
- callback(filename, method, options)
- for lineno, message, comments in \
- extract_from_file(method, filepath,
- keywords=keywords,
- comment_tags=comment_tags,
- options=options,
- strip_comment_tags=
- strip_comment_tags):
- yield filename, lineno, message, comments
- break
-
-
-def extract_from_file(method, filename, keywords=DEFAULT_KEYWORDS,
- comment_tags=(), options=None, strip_comment_tags=False):
- """Extract messages from a specific file.
-
- This function returns a list of tuples of the form:
-
- ``(lineno, funcname, message)``
-
- :param filename: the path to the file to extract messages from
- :param method: a string specifying the extraction method (.e.g. "python")
- :param keywords: a dictionary mapping keywords (i.e. names of functions
- that should be recognized as translation functions) to
- tuples that specify which of their arguments contain
- localizable strings
- :param comment_tags: a list of translator tags to search for and include
- in the results
- :param strip_comment_tags: a flag that if set to `True` causes all comment
- tags to be removed from the collected comments.
- :param options: a dictionary of additional options (optional)
- :return: the list of extracted messages
- :rtype: `list`
- """
- fileobj = open(filename, 'U')
- try:
- return list(extract(method, fileobj, keywords, comment_tags, options,
- strip_comment_tags))
- finally:
- fileobj.close()
-
-
-def extract(method, fileobj, keywords=DEFAULT_KEYWORDS, comment_tags=(),
- options=None, strip_comment_tags=False):
- """Extract messages from the given file-like object using the specified
- extraction method.
-
- This function returns a list of tuples of the form:
-
- ``(lineno, message, comments)``
-
- The implementation dispatches the actual extraction to plugins, based on the
- value of the ``method`` parameter.
-
- >>> source = '''# foo module
- ... def run(argv):
- ... print _('Hello, world!')
- ... '''
-
- >>> from StringIO import StringIO
- >>> for message in extract('python', StringIO(source)):
- ... print message
- (3, u'Hello, world!', [])
-
- :param method: a string specifying the extraction method (.e.g. "python");
- if this is a simple name, the extraction function will be
- looked up by entry point; if it is an explicit reference
- to a function (of the form ``package.module:funcname`` or
- ``package.module.funcname``), the corresponding function
- will be imported and used
- :param fileobj: the file-like object the messages should be extracted from
- :param keywords: a dictionary mapping keywords (i.e. names of functions
- that should be recognized as translation functions) to
- tuples that specify which of their arguments contain
- localizable strings
- :param comment_tags: a list of translator tags to search for and include
- in the results
- :param options: a dictionary of additional options (optional)
- :param strip_comment_tags: a flag that if set to `True` causes all comment
- tags to be removed from the collected comments.
- :return: the list of extracted messages
- :rtype: `list`
- :raise ValueError: if the extraction method is not registered
- """
- func = None
- if ':' in method or '.' in method:
- if ':' not in method:
- lastdot = method.rfind('.')
- module, attrname = method[:lastdot], method[lastdot + 1:]
- else:
- module, attrname = method.split(':', 1)
- func = getattr(__import__(module, {}, {}, [attrname]), attrname)
- else:
- try:
- from pkg_resources import working_set
- except ImportError:
- # pkg_resources is not available, so we resort to looking up the
- # builtin extractors directly
- builtin = {'ignore': extract_nothing, 'python': extract_python}
- func = builtin.get(method)
- else:
- for entry_point in working_set.iter_entry_points(GROUP_NAME,
- method):
- func = entry_point.load(require=True)
- break
- if func is None:
- raise ValueError('Unknown extraction method %r' % method)
-
- results = func(fileobj, keywords.keys(), comment_tags,
- options=options or {})
-
- for lineno, funcname, messages, comments in results:
- if funcname:
- spec = keywords[funcname] or (1,)
- else:
- spec = (1,)
- if not isinstance(messages, (list, tuple)):
- messages = [messages]
- if not messages:
- continue
-
- # Validate the messages against the keyword's specification
- msgs = []
- invalid = False
- # last_index is 1 based like the keyword spec
- last_index = len(messages)
- for index in spec:
- if last_index < index:
- # Not enough arguments
- invalid = True
- break
- message = messages[index - 1]
- if message is None:
- invalid = True
- break
- msgs.append(message)
- if invalid:
- continue
-
- first_msg_index = spec[0] - 1
- if not messages[first_msg_index]:
- # An empty string msgid isn't valid, emit a warning
- where = '%s:%i' % (hasattr(fileobj, 'name') and \
- fileobj.name or '(unknown)', lineno)
- print >> sys.stderr, empty_msgid_warning % where
- continue
-
- messages = tuple(msgs)
- if len(messages) == 1:
- messages = messages[0]
-
- if strip_comment_tags:
- _strip_comment_tags(comments, comment_tags)
- yield lineno, messages, comments
-
-
-def extract_nothing(fileobj, keywords, comment_tags, options):
- """Pseudo extractor that does not actually extract anything, but simply
- returns an empty list.
- """
- return []
-
-
-def extract_python(fileobj, keywords, comment_tags, options):
- """Extract messages from Python source code.
-
- :param fileobj: the seekable, file-like object the messages should be
- extracted from
- :param keywords: a list of keywords (i.e. function names) that should be
- recognized as translation functions
- :param comment_tags: a list of translator tags to search for and include
- in the results
- :param options: a dictionary of additional options (optional)
- :return: an iterator over ``(lineno, funcname, message, comments)`` tuples
- :rtype: ``iterator``
- """
- funcname = lineno = message_lineno = None
- call_stack = -1
- buf = []
- messages = []
- translator_comments = []
- in_def = in_translator_comments = False
- comment_tag = None
-
- encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
-
- tokens = generate_tokens(fileobj.readline)
- for tok, value, (lineno, _), _, _ in tokens:
- if call_stack == -1 and tok == NAME and value in ('def', 'class'):
- in_def = True
- elif tok == OP and value == '(':
- if in_def:
- # Avoid false positives for declarations such as:
- # def gettext(arg='message'):
- in_def = False
- continue
- if funcname:
- message_lineno = lineno
- call_stack += 1
- elif in_def and tok == OP and value == ':':
- # End of a class definition without parens
- in_def = False
- continue
- elif call_stack == -1 and tok == COMMENT:
- # Strip the comment token from the line
- value = value.decode(encoding)[1:].strip()
- if in_translator_comments and \
- translator_comments[-1][0] == lineno - 1:
- # We're already inside a translator comment, continue appending
- translator_comments.append((lineno, value))
- continue
- # If execution reaches this point, let's see if comment line
- # starts with one of the comment tags
- for comment_tag in comment_tags:
- if value.startswith(comment_tag):
- in_translator_comments = True
- translator_comments.append((lineno, value))
- break
- elif funcname and call_stack == 0:
- if tok == OP and value == ')':
- if buf:
- messages.append(''.join(buf))
- del buf[:]
- else:
- messages.append(None)
-
- if len(messages) > 1:
- messages = tuple(messages)
- else:
- messages = messages[0]
- # Comments don't apply unless they immediately preceed the
- # message
- if translator_comments and \
- translator_comments[-1][0] < message_lineno - 1:
- translator_comments = []
-
- yield (message_lineno, funcname, messages,
- [comment[1] for comment in translator_comments])
-
- funcname = lineno = message_lineno = None
- call_stack = -1
- messages = []
- translator_comments = []
- in_translator_comments = False
- elif tok == STRING:
- # Unwrap quotes in a safe manner, maintaining the string's
- # encoding
- # https://sourceforge.net/tracker/?func=detail&atid=355470&
- # aid=617979&group_id=5470
- value = eval('# coding=%s\n%s' % (encoding, value),
- {'__builtins__':{}}, {})
- if isinstance(value, str):
- value = value.decode(encoding)
- buf.append(value)
- elif tok == OP and value == ',':
- if buf:
- messages.append(''.join(buf))
- del buf[:]
- else:
- messages.append(None)
- if translator_comments:
- # We have translator comments, and since we're on a
- # comma(,) user is allowed to break into a new line
- # Let's increase the last comment's lineno in order
- # for the comment to still be a valid one
- old_lineno, old_comment = translator_comments.pop()
- translator_comments.append((old_lineno+1, old_comment))
- elif call_stack > 0 and tok == OP and value == ')':
- call_stack -= 1
- elif funcname and call_stack == -1:
- funcname = None
- elif tok == NAME and value in keywords:
- funcname = value
-
-
-def extract_javascript(fileobj, keywords, comment_tags, options):
- """Extract messages from JavaScript source code.
-
- :param fileobj: the seekable, file-like object the messages should be
- extracted from
- :param keywords: a list of keywords (i.e. function names) that should be
- recognized as translation functions
- :param comment_tags: a list of translator tags to search for and include
- in the results
- :param options: a dictionary of additional options (optional)
- :return: an iterator over ``(lineno, funcname, message, comments)`` tuples
- :rtype: ``iterator``
- """
- from babel.messages.jslexer import tokenize, unquote_string
- funcname = message_lineno = None
- messages = []
- last_argument = None
- translator_comments = []
- concatenate_next = False
- encoding = options.get('encoding', 'utf-8')
- last_token = None
- call_stack = -1
-
- for token in tokenize(fileobj.read().decode(encoding)):
- if token.type == 'operator' and token.value == '(':
- if funcname:
- message_lineno = token.lineno
- call_stack += 1
-
- elif call_stack == -1 and token.type == 'linecomment':
- value = token.value[2:].strip()
- if translator_comments and \
- translator_comments[-1][0] == token.lineno - 1:
- translator_comments.append((token.lineno, value))
- continue
-
- for comment_tag in comment_tags:
- if value.startswith(comment_tag):
- translator_comments.append((token.lineno, value.strip()))
- break
-
- elif token.type == 'multilinecomment':
- # only one multi-line comment may preceed a translation
- translator_comments = []
- value = token.value[2:-2].strip()
- for comment_tag in comment_tags:
- if value.startswith(comment_tag):
- lines = value.splitlines()
- if lines:
- lines[0] = lines[0].strip()
- lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
- for offset, line in enumerate(lines):
- translator_comments.append((token.lineno + offset,
- line))
- break
-
- elif funcname and call_stack == 0:
- if token.type == 'operator' and token.value == ')':
- if last_argument is not None:
- messages.append(last_argument)
- if len(messages) > 1:
- messages = tuple(messages)
- elif messages:
- messages = messages[0]
- else:
- messages = None
-
- # Comments don't apply unless they immediately preceed the
- # message
- if translator_comments and \
- translator_comments[-1][0] < message_lineno - 1:
- translator_comments = []
-
- if messages is not None:
- yield (message_lineno, funcname, messages,
- [comment[1] for comment in translator_comments])
-
- funcname = message_lineno = last_argument = None
- concatenate_next = False
- translator_comments = []
- messages = []
- call_stack = -1
-
- elif token.type == 'string':
- new_value = unquote_string(token.value)
- if concatenate_next:
- last_argument = (last_argument or '') + new_value
- concatenate_next = False
- else:
- last_argument = new_value
-
- elif token.type == 'operator':
- if token.value == ',':
- if last_argument is not None:
- messages.append(last_argument)
- last_argument = None
- else:
- messages.append(None)
- concatenate_next = False
- elif token.value == '+':
- concatenate_next = True
-
- elif call_stack > 0 and token.type == 'operator' \
- and token.value == ')':
- call_stack -= 1
-
- elif funcname and call_stack == -1:
- funcname = None
-
- elif call_stack == -1 and token.type == 'name' and \
- token.value in keywords and \
- (last_token is None or last_token.type != 'name' or
- last_token.value != 'function'):
- funcname = token.value
-
- last_token = token
diff --git a/babel/messages/frontend.py b/babel/messages/frontend.py
deleted file mode 100644
index c9b5a57..0000000
--- a/babel/messages/frontend.py
+++ /dev/null
@@ -1,1194 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Frontends for the message extraction functionality."""
-
-from ConfigParser import RawConfigParser
-from datetime import datetime
-from distutils import log
-from distutils.cmd import Command
-from distutils.errors import DistutilsOptionError, DistutilsSetupError
-from locale import getpreferredencoding
-import logging
-from optparse import OptionParser
-import os
-import re
-import shutil
-from StringIO import StringIO
-import sys
-import tempfile
-
-from babel import __version__ as VERSION
-from babel import Locale, localedata
-from babel.core import UnknownLocaleError
-from babel.messages.catalog import Catalog
-from babel.messages.extract import extract_from_dir, DEFAULT_KEYWORDS, \
- DEFAULT_MAPPING
-from babel.messages.mofile import write_mo
-from babel.messages.pofile import read_po, write_po
-from babel.messages.plurals import PLURALS
-from babel.util import odict, LOCALTZ
-
-__all__ = ['CommandLineInterface', 'compile_catalog', 'extract_messages',
- 'init_catalog', 'check_message_extractors', 'update_catalog']
-__docformat__ = 'restructuredtext en'
-
-
-class compile_catalog(Command):
- """Catalog compilation command for use in ``setup.py`` scripts.
-
- If correctly installed, this command is available to Setuptools-using
- setup scripts automatically. For projects using plain old ``distutils``,
- the command needs to be registered explicitly in ``setup.py``::
-
- from babel.messages.frontend import compile_catalog
-
- setup(
- ...
- cmdclass = {'compile_catalog': compile_catalog}
- )
-
- :since: version 0.9
- :see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
- :see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
- """
-
- description = 'compile message catalogs to binary MO files'
- user_options = [
- ('domain=', 'D',
- "domain of PO file (default 'messages')"),
- ('directory=', 'd',
- 'path to base directory containing the catalogs'),
- ('input-file=', 'i',
- 'name of the input file'),
- ('output-file=', 'o',
- "name of the output file (default "
- "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
- ('locale=', 'l',
- 'locale of the catalog to compile'),
- ('use-fuzzy', 'f',
- 'also include fuzzy translations'),
- ('statistics', None,
- 'print statistics about translations')
- ]
- boolean_options = ['use-fuzzy', 'statistics']
-
- def initialize_options(self):
- self.domain = 'messages'
- self.directory = None
- self.input_file = None
- self.output_file = None
- self.locale = None
- self.use_fuzzy = False
- self.statistics = False
-
- def finalize_options(self):
- if not self.input_file and not self.directory:
- raise DistutilsOptionError('you must specify either the input file '
- 'or the base directory')
- if not self.output_file and not self.directory:
- raise DistutilsOptionError('you must specify either the input file '
- 'or the base directory')
-
- def run(self):
- po_files = []
- mo_files = []
-
- if not self.input_file:
- if self.locale:
- po_files.append((self.locale,
- os.path.join(self.directory, self.locale,
- 'LC_MESSAGES',
- self.domain + '.po')))
- mo_files.append(os.path.join(self.directory, self.locale,
- 'LC_MESSAGES',
- self.domain + '.mo'))
- else:
- for locale in os.listdir(self.directory):
- po_file = os.path.join(self.directory, locale,
- 'LC_MESSAGES', self.domain + '.po')
- if os.path.exists(po_file):
- po_files.append((locale, po_file))
- mo_files.append(os.path.join(self.directory, locale,
- 'LC_MESSAGES',
- self.domain + '.mo'))
- else:
- po_files.append((self.locale, self.input_file))
- if self.output_file:
- mo_files.append(self.output_file)
- else:
- mo_files.append(os.path.join(self.directory, self.locale,
- 'LC_MESSAGES',
- self.domain + '.mo'))
-
- if not po_files:
- raise DistutilsOptionError('no message catalogs found')
-
- for idx, (locale, po_file) in enumerate(po_files):
- mo_file = mo_files[idx]
- infile = open(po_file, 'r')
- try:
- catalog = read_po(infile, locale)
- finally:
- infile.close()
-
- if self.statistics:
- translated = 0
- for message in list(catalog)[1:]:
- if message.string:
- translated +=1
- percentage = 0
- if len(catalog):
- percentage = translated * 100 // len(catalog)
- log.info('%d of %d messages (%d%%) translated in %r',
- translated, len(catalog), percentage, po_file)
-
- if catalog.fuzzy and not self.use_fuzzy:
- log.warn('catalog %r is marked as fuzzy, skipping', po_file)
- continue
-
- for message, errors in catalog.check():
- for error in errors:
- log.error('error: %s:%d: %s', po_file, message.lineno,
- error)
-
- log.info('compiling catalog %r to %r', po_file, mo_file)
-
- outfile = open(mo_file, 'wb')
- try:
- write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
- finally:
- outfile.close()
-
-
-class extract_messages(Command):
- """Message extraction command for use in ``setup.py`` scripts.
-
- If correctly installed, this command is available to Setuptools-using
- setup scripts automatically. For projects using plain old ``distutils``,
- the command needs to be registered explicitly in ``setup.py``::
-
- from babel.messages.frontend import extract_messages
-
- setup(
- ...
- cmdclass = {'extract_messages': extract_messages}
- )
-
- :see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
- :see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
- """
-
- description = 'extract localizable strings from the project code'
- user_options = [
- ('charset=', None,
- 'charset to use in the output file'),
- ('keywords=', 'k',
- 'space-separated list of keywords to look for in addition to the '
- 'defaults'),
- ('no-default-keywords', None,
- 'do not include the default keywords'),
- ('mapping-file=', 'F',
- 'path to the mapping configuration file'),
- ('no-location', None,
- 'do not include location comments with filename and line number'),
- ('omit-header', None,
- 'do not include msgid "" entry in header'),
- ('output-file=', 'o',
- 'name of the output file'),
- ('width=', 'w',
- 'set output line width (default 76)'),
- ('no-wrap', None,
- 'do not break long message lines, longer than the output line width, '
- 'into several lines'),
- ('sort-output', None,
- 'generate sorted output (default False)'),
- ('sort-by-file', None,
- 'sort output by file location (default False)'),
- ('msgid-bugs-address=', None,
- 'set report address for msgid'),
- ('copyright-holder=', None,
- 'set copyright holder in output'),
- ('add-comments=', 'c',
- 'place comment block with TAG (or those preceding keyword lines) in '
- 'output file. Seperate multiple TAGs with commas(,)'),
- ('strip-comments', None,
- 'strip the comment TAGs from the comments.'),
- ('input-dirs=', None,
- 'directories that should be scanned for messages'),
- ]
- boolean_options = [
- 'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
- 'sort-output', 'sort-by-file', 'strip-comments'
- ]
-
- def initialize_options(self):
- self.charset = 'utf-8'
- self.keywords = ''
- self._keywords = DEFAULT_KEYWORDS.copy()
- self.no_default_keywords = False
- self.mapping_file = None
- self.no_location = False
- self.omit_header = False
- self.output_file = None
- self.input_dirs = None
- self.width = 76
- self.no_wrap = False
- self.sort_output = False
- self.sort_by_file = False
- self.msgid_bugs_address = None
- self.copyright_holder = None
- self.add_comments = None
- self._add_comments = []
- self.strip_comments = False
-
- def finalize_options(self):
- if self.no_default_keywords and not self.keywords:
- raise DistutilsOptionError('you must specify new keywords if you '
- 'disable the default ones')
- if self.no_default_keywords:
- self._keywords = {}
- if self.keywords:
- self._keywords.update(parse_keywords(self.keywords.split()))
-
- if not self.output_file:
- raise DistutilsOptionError('no output file specified')
- if self.no_wrap and self.width:
- raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
- "exclusive")
- if self.no_wrap:
- self.width = None
- else:
- self.width = int(self.width)
-
- if self.sort_output and self.sort_by_file:
- raise DistutilsOptionError("'--sort-output' and '--sort-by-file' "
- "are mutually exclusive")
-
- if not self.input_dirs:
- self.input_dirs = dict.fromkeys([k.split('.',1)[0]
- for k in self.distribution.packages
- ]).keys()
-
- if self.add_comments:
- self._add_comments = self.add_comments.split(',')
-
- def run(self):
- mappings = self._get_mappings()
- outfile = open(self.output_file, 'w')
- try:
- catalog = Catalog(project=self.distribution.get_name(),
- version=self.distribution.get_version(),
- msgid_bugs_address=self.msgid_bugs_address,
- copyright_holder=self.copyright_holder,
- charset=self.charset)
-
- for dirname, (method_map, options_map) in mappings.items():
- def callback(filename, method, options):
- if method == 'ignore':
- return
- filepath = os.path.normpath(os.path.join(dirname, filename))
- optstr = ''
- if options:
- optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
- k, v in options.items()])
- log.info('extracting messages from %s%s', filepath, optstr)
-
- extracted = extract_from_dir(dirname, method_map, options_map,
- keywords=self._keywords,
- comment_tags=self._add_comments,
- callback=callback,
- strip_comment_tags=
- self.strip_comments)
- for filename, lineno, message, comments in extracted:
- filepath = os.path.normpath(os.path.join(dirname, filename))
- catalog.add(message, None, [(filepath, lineno)],
- auto_comments=comments)
-
- log.info('writing PO template file to %s' % self.output_file)
- write_po(outfile, catalog, width=self.width,
- no_location=self.no_location,
- omit_header=self.omit_header,
- sort_output=self.sort_output,
- sort_by_file=self.sort_by_file)
- finally:
- outfile.close()
-
- def _get_mappings(self):
- mappings = {}
-
- if self.mapping_file:
- fileobj = open(self.mapping_file, 'U')
- try:
- method_map, options_map = parse_mapping(fileobj)
- for dirname in self.input_dirs:
- mappings[dirname] = method_map, options_map
- finally:
- fileobj.close()
-
- elif getattr(self.distribution, 'message_extractors', None):
- message_extractors = self.distribution.message_extractors
- for dirname, mapping in message_extractors.items():
- if isinstance(mapping, basestring):
- method_map, options_map = parse_mapping(StringIO(mapping))
- else:
- method_map, options_map = [], {}
- for pattern, method, options in mapping:
- method_map.append((pattern, method))
- options_map[pattern] = options or {}
- mappings[dirname] = method_map, options_map
-
- else:
- for dirname in self.input_dirs:
- mappings[dirname] = DEFAULT_MAPPING, {}
-
- return mappings
-
-
-def check_message_extractors(dist, name, value):
- """Validate the ``message_extractors`` keyword argument to ``setup()``.
-
- :param dist: the distutils/setuptools ``Distribution`` object
- :param name: the name of the keyword argument (should always be
- "message_extractors")
- :param value: the value of the keyword argument
- :raise `DistutilsSetupError`: if the value is not valid
- :see: `Adding setup() arguments
- <http://peak.telecommunity.com/DevCenter/setuptools#adding-setup-arguments>`_
- """
- assert name == 'message_extractors'
- if not isinstance(value, dict):
- raise DistutilsSetupError('the value of the "message_extractors" '
- 'parameter must be a dictionary')
-
-
-class init_catalog(Command):
- """New catalog initialization command for use in ``setup.py`` scripts.
-
- If correctly installed, this command is available to Setuptools-using
- setup scripts automatically. For projects using plain old ``distutils``,
- the command needs to be registered explicitly in ``setup.py``::
-
- from babel.messages.frontend import init_catalog
-
- setup(
- ...
- cmdclass = {'init_catalog': init_catalog}
- )
-
- :see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
- :see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
- """
-
- description = 'create a new catalog based on a POT file'
- user_options = [
- ('domain=', 'D',
- "domain of PO file (default 'messages')"),
- ('input-file=', 'i',
- 'name of the input file'),
- ('output-dir=', 'd',
- 'path to output directory'),
- ('output-file=', 'o',
- "name of the output file (default "
- "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
- ('locale=', 'l',
- 'locale for the new localized catalog'),
- ]
-
- def initialize_options(self):
- self.output_dir = None
- self.output_file = None
- self.input_file = None
- self.locale = None
- self.domain = 'messages'
-
- def finalize_options(self):
- if not self.input_file:
- raise DistutilsOptionError('you must specify the input file')
-
- if not self.locale:
- raise DistutilsOptionError('you must provide a locale for the '
- 'new catalog')
- try:
- self._locale = Locale.parse(self.locale)
- except UnknownLocaleError, e:
- raise DistutilsOptionError(e)
-
- if not self.output_file and not self.output_dir:
- raise DistutilsOptionError('you must specify the output directory')
- if not self.output_file:
- self.output_file = os.path.join(self.output_dir, self.locale,
- 'LC_MESSAGES', self.domain + '.po')
-
- if not os.path.exists(os.path.dirname(self.output_file)):
- os.makedirs(os.path.dirname(self.output_file))
-
- def run(self):
- log.info('creating catalog %r based on %r', self.output_file,
- self.input_file)
-
- infile = open(self.input_file, 'r')
- try:
- # Although reading from the catalog template, read_po must be fed
- # the locale in order to correcly calculate plurals
- catalog = read_po(infile, locale=self.locale)
- finally:
- infile.close()
-
- catalog.locale = self._locale
- catalog.fuzzy = False
-
- outfile = open(self.output_file, 'w')
- try:
- write_po(outfile, catalog)
- finally:
- outfile.close()
-
-
-class update_catalog(Command):
- """Catalog merging command for use in ``setup.py`` scripts.
-
- If correctly installed, this command is available to Setuptools-using
- setup scripts automatically. For projects using plain old ``distutils``,
- the command needs to be registered explicitly in ``setup.py``::
-
- from babel.messages.frontend import update_catalog
-
- setup(
- ...
- cmdclass = {'update_catalog': update_catalog}
- )
-
- :since: version 0.9
- :see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
- :see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
- """
-
- description = 'update message catalogs from a POT file'
- user_options = [
- ('domain=', 'D',
- "domain of PO file (default 'messages')"),
- ('input-file=', 'i',
- 'name of the input file'),
- ('output-dir=', 'd',
- 'path to base directory containing the catalogs'),
- ('output-file=', 'o',
- "name of the output file (default "
- "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
- ('locale=', 'l',
- 'locale of the catalog to compile'),
- ('ignore-obsolete=', None,
- 'whether to omit obsolete messages from the output'),
- ('no-fuzzy-matching', 'N',
- 'do not use fuzzy matching'),
- ('previous', None,
- 'keep previous msgids of translated messages')
- ]
- boolean_options = ['ignore_obsolete', 'no_fuzzy_matching', 'previous']
-
- def initialize_options(self):
- self.domain = 'messages'
- self.input_file = None
- self.output_dir = None
- self.output_file = None
- self.locale = None
- self.ignore_obsolete = False
- self.no_fuzzy_matching = False
- self.previous = False
-
- def finalize_options(self):
- if not self.input_file:
- raise DistutilsOptionError('you must specify the input file')
- if not self.output_file and not self.output_dir:
- raise DistutilsOptionError('you must specify the output file or '
- 'directory')
- if self.output_file and not self.locale:
- raise DistutilsOptionError('you must specify the locale')
- if self.no_fuzzy_matching and self.previous:
- self.previous = False
-
- def run(self):
- po_files = []
- if not self.output_file:
- if self.locale:
- po_files.append((self.locale,
- os.path.join(self.output_dir, self.locale,
- 'LC_MESSAGES',
- self.domain + '.po')))
- else:
- for locale in os.listdir(self.output_dir):
- po_file = os.path.join(self.output_dir, locale,
- 'LC_MESSAGES',
- self.domain + '.po')
- if os.path.exists(po_file):
- po_files.append((locale, po_file))
- else:
- po_files.append((self.locale, self.output_file))
-
- domain = self.domain
- if not domain:
- domain = os.path.splitext(os.path.basename(self.input_file))[0]
-
- infile = open(self.input_file, 'U')
- try:
- template = read_po(infile)
- finally:
- infile.close()
-
- if not po_files:
- raise DistutilsOptionError('no message catalogs found')
-
- for locale, filename in po_files:
- log.info('updating catalog %r based on %r', filename,
- self.input_file)
- infile = open(filename, 'U')
- try:
- catalog = read_po(infile, locale=locale, domain=domain)
- finally:
- infile.close()
-
- catalog.update(template, self.no_fuzzy_matching)
-
- tmpname = os.path.join(os.path.dirname(filename),
- tempfile.gettempprefix() +
- os.path.basename(filename))
- tmpfile = open(tmpname, 'w')
- try:
- try:
- write_po(tmpfile, catalog,
- ignore_obsolete=self.ignore_obsolete,
- include_previous=self.previous)
- finally:
- tmpfile.close()
- except:
- os.remove(tmpname)
- raise
-
- try:
- os.rename(tmpname, filename)
- except OSError:
- # We're probably on Windows, which doesn't support atomic
- # renames, at least not through Python
- # If the error is in fact due to a permissions problem, that
- # same error is going to be raised from one of the following
- # operations
- os.remove(filename)
- shutil.copy(tmpname, filename)
- os.remove(tmpname)
-
-
-class CommandLineInterface(object):
- """Command-line interface.
-
- This class provides a simple command-line interface to the message
- extraction and PO file generation functionality.
- """
-
- usage = '%%prog %s [options] %s'
- version = '%%prog %s' % VERSION
- commands = {
- 'compile': 'compile message catalogs to MO files',
- 'extract': 'extract messages from source files and generate a POT file',
- 'init': 'create new message catalogs from a POT file',
- 'update': 'update existing message catalogs from a POT file'
- }
-
- def run(self, argv=sys.argv):
- """Main entry point of the command-line interface.
-
- :param argv: list of arguments passed on the command-line
- """
- self.parser = OptionParser(usage=self.usage % ('command', '[args]'),
- version=self.version)
- self.parser.disable_interspersed_args()
- self.parser.print_help = self._help
- self.parser.add_option('--list-locales', dest='list_locales',
- action='store_true',
- help="print all known locales and exit")
- self.parser.add_option('-v', '--verbose', action='store_const',
- dest='loglevel', const=logging.DEBUG,
- help='print as much as possible')
- self.parser.add_option('-q', '--quiet', action='store_const',
- dest='loglevel', const=logging.ERROR,
- help='print as little as possible')
- self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
-
- options, args = self.parser.parse_args(argv[1:])
-
- # Configure logging
- self.log = logging.getLogger('babel')
- self.log.setLevel(options.loglevel)
- handler = logging.StreamHandler()
- handler.setLevel(options.loglevel)
- formatter = logging.Formatter('%(message)s')
- handler.setFormatter(formatter)
- self.log.addHandler(handler)
-
- if options.list_locales:
- identifiers = localedata.list()
- longest = max([len(identifier) for identifier in identifiers])
- format = u'%%-%ds %%s' % (longest + 1)
- for identifier in localedata.list():
- locale = Locale.parse(identifier)
- output = format % (identifier, locale.english_name)
- print output.encode(sys.stdout.encoding or
- getpreferredencoding() or
- 'ascii', 'replace')
- return 0
-
- if not args:
- self.parser.error('incorrect number of arguments')
-
- cmdname = args[0]
- if cmdname not in self.commands:
- self.parser.error('unknown command "%s"' % cmdname)
-
- return getattr(self, cmdname)(args[1:])
-
- def _help(self):
- print self.parser.format_help()
- print "commands:"
- longest = max([len(command) for command in self.commands])
- format = " %%-%ds %%s" % max(8, longest + 1)
- commands = self.commands.items()
- commands.sort()
- for name, description in commands:
- print format % (name, description)
-
- def compile(self, argv):
- """Subcommand for compiling a message catalog to a MO file.
-
- :param argv: the command arguments
- :since: version 0.9
- """
- parser = OptionParser(usage=self.usage % ('compile', ''),
- description=self.commands['compile'])
- parser.add_option('--domain', '-D', dest='domain',
- help="domain of MO and PO files (default '%default')")
- parser.add_option('--directory', '-d', dest='directory',
- metavar='DIR', help='base directory of catalog files')
- parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
- help='locale of the catalog')
- parser.add_option('--input-file', '-i', dest='input_file',
- metavar='FILE', help='name of the input file')
- parser.add_option('--output-file', '-o', dest='output_file',
- metavar='FILE',
- help="name of the output file (default "
- "'<output_dir>/<locale>/LC_MESSAGES/"
- "<domain>.mo')")
- parser.add_option('--use-fuzzy', '-f', dest='use_fuzzy',
- action='store_true',
- help='also include fuzzy translations (default '
- '%default)')
- parser.add_option('--statistics', dest='statistics',
- action='store_true',
- help='print statistics about translations')
-
- parser.set_defaults(domain='messages', use_fuzzy=False,
- compile_all=False, statistics=False)
- options, args = parser.parse_args(argv)
-
- po_files = []
- mo_files = []
- if not options.input_file:
- if not options.directory:
- parser.error('you must specify either the input file or the '
- 'base directory')
- if options.locale:
- po_files.append((options.locale,
- os.path.join(options.directory,
- options.locale, 'LC_MESSAGES',
- options.domain + '.po')))
- mo_files.append(os.path.join(options.directory, options.locale,
- 'LC_MESSAGES',
- options.domain + '.mo'))
- else:
- for locale in os.listdir(options.directory):
- po_file = os.path.join(options.directory, locale,
- 'LC_MESSAGES', options.domain + '.po')
- if os.path.exists(po_file):
- po_files.append((locale, po_file))
- mo_files.append(os.path.join(options.directory, locale,
- 'LC_MESSAGES',
- options.domain + '.mo'))
- else:
- po_files.append((options.locale, options.input_file))
- if options.output_file:
- mo_files.append(options.output_file)
- else:
- if not options.directory:
- parser.error('you must specify either the input file or '
- 'the base directory')
- mo_files.append(os.path.join(options.directory, options.locale,
- 'LC_MESSAGES',
- options.domain + '.mo'))
- if not po_files:
- parser.error('no message catalogs found')
-
- for idx, (locale, po_file) in enumerate(po_files):
- mo_file = mo_files[idx]
- infile = open(po_file, 'r')
- try:
- catalog = read_po(infile, locale)
- finally:
- infile.close()
-
- if options.statistics:
- translated = 0
- for message in list(catalog)[1:]:
- if message.string:
- translated +=1
- percentage = 0
- if len(catalog):
- percentage = translated * 100 // len(catalog)
- self.log.info("%d of %d messages (%d%%) translated in %r",
- translated, len(catalog), percentage, po_file)
-
- if catalog.fuzzy and not options.use_fuzzy:
- self.log.warn('catalog %r is marked as fuzzy, skipping',
- po_file)
- continue
-
- for message, errors in catalog.check():
- for error in errors:
- self.log.error('error: %s:%d: %s', po_file, message.lineno,
- error)
-
- self.log.info('compiling catalog %r to %r', po_file, mo_file)
-
- outfile = open(mo_file, 'wb')
- try:
- write_mo(outfile, catalog, use_fuzzy=options.use_fuzzy)
- finally:
- outfile.close()
-
- def extract(self, argv):
- """Subcommand for extracting messages from source files and generating
- a POT file.
-
- :param argv: the command arguments
- """
- parser = OptionParser(usage=self.usage % ('extract', 'dir1 <dir2> ...'),
- description=self.commands['extract'])
- parser.add_option('--charset', dest='charset',
- help='charset to use in the output (default '
- '"%default")')
- parser.add_option('-k', '--keyword', dest='keywords', action='append',
- help='keywords to look for in addition to the '
- 'defaults. You can specify multiple -k flags on '
- 'the command line.')
- parser.add_option('--no-default-keywords', dest='no_default_keywords',
- action='store_true',
- help="do not include the default keywords")
- parser.add_option('--mapping', '-F', dest='mapping_file',
- help='path to the extraction mapping file')
- parser.add_option('--no-location', dest='no_location',
- action='store_true',
- help='do not include location comments with filename '
- 'and line number')
- parser.add_option('--omit-header', dest='omit_header',
- action='store_true',
- help='do not include msgid "" entry in header')
- parser.add_option('-o', '--output', dest='output',
- help='path to the output POT file')
- parser.add_option('-w', '--width', dest='width', type='int',
- help="set output line width (default %default)")
- parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true',
- help='do not break long message lines, longer than '
- 'the output line width, into several lines')
- parser.add_option('--sort-output', dest='sort_output',
- action='store_true',
- help='generate sorted output (default False)')
- parser.add_option('--sort-by-file', dest='sort_by_file',
- action='store_true',
- help='sort output by file location (default False)')
- parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address',
- metavar='EMAIL@ADDRESS',
- help='set report address for msgid')
- parser.add_option('--copyright-holder', dest='copyright_holder',
- help='set copyright holder in output')
- parser.add_option('--add-comments', '-c', dest='comment_tags',
- metavar='TAG', action='append',
- help='place comment block with TAG (or those '
- 'preceding keyword lines) in output file. One '
- 'TAG per argument call')
- parser.add_option('--strip-comment-tags', '-s',
- dest='strip_comment_tags', action='store_true',
- help='Strip the comment tags from the comments.')
-
- parser.set_defaults(charset='utf-8', keywords=[],
- no_default_keywords=False, no_location=False,
- omit_header = False, width=76, no_wrap=False,
- sort_output=False, sort_by_file=False,
- comment_tags=[], strip_comment_tags=False)
- options, args = parser.parse_args(argv)
- if not args:
- parser.error('incorrect number of arguments')
-
- if options.output not in (None, '-'):
- outfile = open(options.output, 'w')
- else:
- outfile = sys.stdout
-
- keywords = DEFAULT_KEYWORDS.copy()
- if options.no_default_keywords:
- if not options.keywords:
- parser.error('you must specify new keywords if you disable the '
- 'default ones')
- keywords = {}
- if options.keywords:
- keywords.update(parse_keywords(options.keywords))
-
- if options.mapping_file:
- fileobj = open(options.mapping_file, 'U')
- try:
- method_map, options_map = parse_mapping(fileobj)
- finally:
- fileobj.close()
- else:
- method_map = DEFAULT_MAPPING
- options_map = {}
-
- if options.width and options.no_wrap:
- parser.error("'--no-wrap' and '--width' are mutually exclusive.")
- elif not options.width and not options.no_wrap:
- options.width = 76
- elif not options.width and options.no_wrap:
- options.width = 0
-
- if options.sort_output and options.sort_by_file:
- parser.error("'--sort-output' and '--sort-by-file' are mutually "
- "exclusive")
-
- try:
- catalog = Catalog(msgid_bugs_address=options.msgid_bugs_address,
- copyright_holder=options.copyright_holder,
- charset=options.charset)
-
- for dirname in args:
- if not os.path.isdir(dirname):
- parser.error('%r is not a directory' % dirname)
-
- def callback(filename, method, options):
- if method == 'ignore':
- return
- filepath = os.path.normpath(os.path.join(dirname, filename))
- optstr = ''
- if options:
- optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
- k, v in options.items()])
- self.log.info('extracting messages from %s%s', filepath,
- optstr)
-
- extracted = extract_from_dir(dirname, method_map, options_map,
- keywords, options.comment_tags,
- callback=callback,
- strip_comment_tags=
- options.strip_comment_tags)
- for filename, lineno, message, comments in extracted:
- filepath = os.path.normpath(os.path.join(dirname, filename))
- catalog.add(message, None, [(filepath, lineno)],
- auto_comments=comments)
-
- if options.output not in (None, '-'):
- self.log.info('writing PO template file to %s' % options.output)
- write_po(outfile, catalog, width=options.width,
- no_location=options.no_location,
- omit_header=options.omit_header,
- sort_output=options.sort_output,
- sort_by_file=options.sort_by_file)
- finally:
- if options.output:
- outfile.close()
-
- def init(self, argv):
- """Subcommand for creating new message catalogs from a template.
-
- :param argv: the command arguments
- """
- parser = OptionParser(usage=self.usage % ('init', ''),
- description=self.commands['init'])
- parser.add_option('--domain', '-D', dest='domain',
- help="domain of PO file (default '%default')")
- parser.add_option('--input-file', '-i', dest='input_file',
- metavar='FILE', help='name of the input file')
- parser.add_option('--output-dir', '-d', dest='output_dir',
- metavar='DIR', help='path to output directory')
- parser.add_option('--output-file', '-o', dest='output_file',
- metavar='FILE',
- help="name of the output file (default "
- "'<output_dir>/<locale>/LC_MESSAGES/"
- "<domain>.po')")
- parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
- help='locale for the new localized catalog')
-
- parser.set_defaults(domain='messages')
- options, args = parser.parse_args(argv)
-
- if not options.locale:
- parser.error('you must provide a locale for the new catalog')
- try:
- locale = Locale.parse(options.locale)
- except UnknownLocaleError, e:
- parser.error(e)
-
- if not options.input_file:
- parser.error('you must specify the input file')
-
- if not options.output_file and not options.output_dir:
- parser.error('you must specify the output file or directory')
-
- if not options.output_file:
- options.output_file = os.path.join(options.output_dir,
- options.locale, 'LC_MESSAGES',
- options.domain + '.po')
- if not os.path.exists(os.path.dirname(options.output_file)):
- os.makedirs(os.path.dirname(options.output_file))
-
- infile = open(options.input_file, 'r')
- try:
- # Although reading from the catalog template, read_po must be fed
- # the locale in order to correcly calculate plurals
- catalog = read_po(infile, locale=options.locale)
- finally:
- infile.close()
-
- catalog.locale = locale
- catalog.revision_date = datetime.now(LOCALTZ)
-
- self.log.info('creating catalog %r based on %r', options.output_file,
- options.input_file)
-
- outfile = open(options.output_file, 'w')
- try:
- write_po(outfile, catalog)
- finally:
- outfile.close()
-
- def update(self, argv):
- """Subcommand for updating existing message catalogs from a template.
-
- :param argv: the command arguments
- :since: version 0.9
- """
- parser = OptionParser(usage=self.usage % ('update', ''),
- description=self.commands['update'])
- parser.add_option('--domain', '-D', dest='domain',
- help="domain of PO file (default '%default')")
- parser.add_option('--input-file', '-i', dest='input_file',
- metavar='FILE', help='name of the input file')
- parser.add_option('--output-dir', '-d', dest='output_dir',
- metavar='DIR', help='path to output directory')
- parser.add_option('--output-file', '-o', dest='output_file',
- metavar='FILE',
- help="name of the output file (default "
- "'<output_dir>/<locale>/LC_MESSAGES/"
- "<domain>.po')")
- parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
- help='locale of the translations catalog')
- parser.add_option('--ignore-obsolete', dest='ignore_obsolete',
- action='store_true',
- help='do not include obsolete messages in the output '
- '(default %default)'),
- parser.add_option('--no-fuzzy-matching', '-N', dest='no_fuzzy_matching',
- action='store_true',
- help='do not use fuzzy matching (default %default)'),
- parser.add_option('--previous', dest='previous', action='store_true',
- help='keep previous msgids of translated messages '
- '(default %default)'),
-
- parser.set_defaults(domain='messages', ignore_obsolete=False,
- no_fuzzy_matching=False, previous=False)
- options, args = parser.parse_args(argv)
-
- if not options.input_file:
- parser.error('you must specify the input file')
- if not options.output_file and not options.output_dir:
- parser.error('you must specify the output file or directory')
- if options.output_file and not options.locale:
- parser.error('you must specify the loicale')
- if options.no_fuzzy_matching and options.previous:
- options.previous = False
-
- po_files = []
- if not options.output_file:
- if options.locale:
- po_files.append((options.locale,
- os.path.join(options.output_dir,
- options.locale, 'LC_MESSAGES',
- options.domain + '.po')))
- else:
- for locale in os.listdir(options.output_dir):
- po_file = os.path.join(options.output_dir, locale,
- 'LC_MESSAGES',
- options.domain + '.po')
- if os.path.exists(po_file):
- po_files.append((locale, po_file))
- else:
- po_files.append((options.locale, options.output_file))
-
- domain = options.domain
- if not domain:
- domain = os.path.splitext(os.path.basename(options.input_file))[0]
-
- infile = open(options.input_file, 'U')
- try:
- template = read_po(infile)
- finally:
- infile.close()
-
- if not po_files:
- parser.error('no message catalogs found')
-
- for locale, filename in po_files:
- self.log.info('updating catalog %r based on %r', filename,
- options.input_file)
- infile = open(filename, 'U')
- try:
- catalog = read_po(infile, locale=locale, domain=domain)
- finally:
- infile.close()
-
- catalog.update(template, options.no_fuzzy_matching)
-
- tmpname = os.path.join(os.path.dirname(filename),
- tempfile.gettempprefix() +
- os.path.basename(filename))
- tmpfile = open(tmpname, 'w')
- try:
- try:
- write_po(tmpfile, catalog,
- ignore_obsolete=options.ignore_obsolete,
- include_previous=options.previous)
- finally:
- tmpfile.close()
- except:
- os.remove(tmpname)
- raise
-
- try:
- os.rename(tmpname, filename)
- except OSError:
- # We're probably on Windows, which doesn't support atomic
- # renames, at least not through Python
- # If the error is in fact due to a permissions problem, that
- # same error is going to be raised from one of the following
- # operations
- os.remove(filename)
- shutil.copy(tmpname, filename)
- os.remove(tmpname)
-
-
-def main():
- return CommandLineInterface().run(sys.argv)
-
-def parse_mapping(fileobj, filename=None):
- """Parse an extraction method mapping from a file-like object.
-
- >>> buf = StringIO('''
- ... [extractors]
- ... custom = mypackage.module:myfunc
- ...
- ... # Python source files
- ... [python: **.py]
- ...
- ... # Genshi templates
- ... [genshi: **/templates/**.html]
- ... include_attrs =
- ... [genshi: **/templates/**.txt]
- ... template_class = genshi.template:TextTemplate
- ... encoding = latin-1
- ...
- ... # Some custom extractor
- ... [custom: **/custom/*.*]
- ... ''')
-
- >>> method_map, options_map = parse_mapping(buf)
- >>> len(method_map)
- 4
-
- >>> method_map[0]
- ('**.py', 'python')
- >>> options_map['**.py']
- {}
- >>> method_map[1]
- ('**/templates/**.html', 'genshi')
- >>> options_map['**/templates/**.html']['include_attrs']
- ''
- >>> method_map[2]
- ('**/templates/**.txt', 'genshi')
- >>> options_map['**/templates/**.txt']['template_class']
- 'genshi.template:TextTemplate'
- >>> options_map['**/templates/**.txt']['encoding']
- 'latin-1'
-
- >>> method_map[3]
- ('**/custom/*.*', 'mypackage.module:myfunc')
- >>> options_map['**/custom/*.*']
- {}
-
- :param fileobj: a readable file-like object containing the configuration
- text to parse
- :return: a `(method_map, options_map)` tuple
- :rtype: `tuple`
- :see: `extract_from_directory`
- """
- extractors = {}
- method_map = []
- options_map = {}
-
- parser = RawConfigParser()
- parser._sections = odict(parser._sections) # We need ordered sections
- parser.readfp(fileobj, filename)
- for section in parser.sections():
- if section == 'extractors':
- extractors = dict(parser.items(section))
- else:
- method, pattern = [part.strip() for part in section.split(':', 1)]
- method_map.append((pattern, method))
- options_map[pattern] = dict(parser.items(section))
-
- if extractors:
- for idx, (pattern, method) in enumerate(method_map):
- if method in extractors:
- method = extractors[method]
- method_map[idx] = (pattern, method)
-
- return (method_map, options_map)
-
-def parse_keywords(strings=[]):
- """Parse keywords specifications from the given list of strings.
-
- >>> kw = parse_keywords(['_', 'dgettext:2', 'dngettext:2,3'])
- >>> for keyword, indices in sorted(kw.items()):
- ... print (keyword, indices)
- ('_', None)
- ('dgettext', (2,))
- ('dngettext', (2, 3))
- """
- keywords = {}
- for string in strings:
- if ':' in string:
- funcname, indices = string.split(':')
- else:
- funcname, indices = string, None
- if funcname not in keywords:
- if indices:
- indices = tuple([(int(x)) for x in indices.split(',')])
- keywords[funcname] = indices
- return keywords
-
-
-if __name__ == '__main__':
- main()
diff --git a/babel/messages/jslexer.py b/babel/messages/jslexer.py
deleted file mode 100644
index d063ef0..0000000
--- a/babel/messages/jslexer.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2008 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""A simple JavaScript 1.5 lexer which is used for the JavaScript
-extractor.
-"""
-
-import re
-from operator import itemgetter
-
-
-operators = [
- '+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',
- '+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',
- '>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
- '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':'
-]
-operators.sort(lambda a, b: cmp(-len(a), -len(b)))
-
-escapes = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}
-
-rules = [
- (None, re.compile(r'\s+(?u)')),
- (None, re.compile(r'<!--.*')),
- ('linecomment', re.compile(r'//.*')),
- ('multilinecomment', re.compile(r'/\*.*?\*/(?us)')),
- ('name', re.compile(r'(\$+\w*|[^\W\d]\w*)(?u)')),
- ('number', re.compile(r'''(?x)(
- (?:0|[1-9]\d*)
- (\.\d+)?
- ([eE][-+]?\d+)? |
- (0x[a-fA-F0-9]+)
- )''')),
- ('operator', re.compile(r'(%s)' % '|'.join(map(re.escape, operators)))),
- ('string', re.compile(r'''(?xs)(
- '(?:[^'\\]*(?:\\.[^'\\]*)*)' |
- "(?:[^"\\]*(?:\\.[^"\\]*)*)"
- )'''))
-]
-
-division_re = re.compile(r'/=?')
-regex_re = re.compile(r'/(?:[^/\\]*(?:\\.[^/\\]*)*)/[a-zA-Z]*(?s)')
-line_re = re.compile(r'(\r\n|\n|\r)')
-line_join_re = re.compile(r'\\' + line_re.pattern)
-uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')
-
-
-class Token(tuple):
- """Represents a token as returned by `tokenize`."""
- __slots__ = ()
-
- def __new__(cls, type, value, lineno):
- return tuple.__new__(cls, (type, value, lineno))
-
- type = property(itemgetter(0))
- value = property(itemgetter(1))
- lineno = property(itemgetter(2))
-
-
-def indicates_division(token):
- """A helper function that helps the tokenizer to decide if the current
- token may be followed by a division operator.
- """
- if token.type == 'operator':
- return token.value in (')', ']', '}', '++', '--')
- return token.type in ('name', 'number', 'string', 'regexp')
-
-
-def unquote_string(string):
- """Unquote a string with JavaScript rules. The string has to start with
- string delimiters (``'`` or ``"``.)
-
- :return: a string
- """
- assert string and string[0] == string[-1] and string[0] in '"\'', \
- 'string provided is not properly delimited'
- string = line_join_re.sub('\\1', string[1:-1])
- result = []
- add = result.append
- pos = 0
-
- while 1:
- # scan for the next escape
- escape_pos = string.find('\\', pos)
- if escape_pos < 0:
- break
- add(string[pos:escape_pos])
-
- # check which character is escaped
- next_char = string[escape_pos + 1]
- if next_char in escapes:
- add(escapes[next_char])
-
- # unicode escapes. trie to consume up to four characters of
- # hexadecimal characters and try to interpret them as unicode
- # character point. If there is no such character point, put
- # all the consumed characters into the string.
- elif next_char in 'uU':
- escaped = uni_escape_re.match(string, escape_pos + 2)
- if escaped is not None:
- escaped_value = escaped.group()
- if len(escaped_value) == 4:
- try:
- add(unichr(int(escaped_value, 16)))
- except ValueError:
- pass
- else:
- pos = escape_pos + 6
- continue
- add(next_char + escaped_value)
- pos = escaped.end()
- continue
- else:
- add(next_char)
-
- # bogus escape. Just remove the backslash.
- else:
- add(next_char)
- pos = escape_pos + 2
-
- if pos < len(string):
- add(string[pos:])
-
- return u''.join(result)
-
-
-def tokenize(source):
- """Tokenize a JavaScript source.
-
- :return: generator of `Token`\s
- """
- may_divide = False
- pos = 0
- lineno = 1
- end = len(source)
-
- while pos < end:
- # handle regular rules first
- for token_type, rule in rules:
- match = rule.match(source, pos)
- if match is not None:
- break
- # if we don't have a match we don't give up yet, but check for
- # division operators or regular expression literals, based on
- # the status of `may_divide` which is determined by the last
- # processed non-whitespace token using `indicates_division`.
- else:
- if may_divide:
- match = division_re.match(source, pos)
- token_type = 'operator'
- else:
- match = regex_re.match(source, pos)
- token_type = 'regexp'
- if match is None:
- # woops. invalid syntax. jump one char ahead and try again.
- pos += 1
- continue
-
- token_value = match.group()
- if token_type is not None:
- token = Token(token_type, token_value, lineno)
- may_divide = indicates_division(token)
- yield token
- lineno += len(line_re.findall(token_value))
- pos = match.end()
diff --git a/babel/messages/mofile.py b/babel/messages/mofile.py
deleted file mode 100644
index bc0f3a8..0000000
--- a/babel/messages/mofile.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Writing of files in the ``gettext`` MO (machine object) format.
-
-:since: version 0.9
-:see: `The Format of MO Files
- <http://www.gnu.org/software/gettext/manual/gettext.html#MO-Files>`_
-"""
-
-import array
-import struct
-
-__all__ = ['write_mo']
-__docformat__ = 'restructuredtext en'
-
-def write_mo(fileobj, catalog, use_fuzzy=False):
- """Write a catalog to the specified file-like object using the GNU MO file
- format.
-
- >>> from babel.messages import Catalog
- >>> from gettext import GNUTranslations
- >>> from StringIO import StringIO
-
- >>> catalog = Catalog(locale='en_US')
- >>> catalog.add('foo', 'Voh')
- >>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))
- >>> catalog.add('fuz', 'Futz', flags=['fuzzy'])
- >>> catalog.add('Fizz', '')
- >>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
- >>> buf = StringIO()
-
- >>> write_mo(buf, catalog)
- >>> buf.seek(0)
- >>> translations = GNUTranslations(fp=buf)
- >>> translations.ugettext('foo')
- u'Voh'
- >>> translations.ungettext('bar', 'baz', 1)
- u'Bahr'
- >>> translations.ungettext('bar', 'baz', 2)
- u'Batz'
- >>> translations.ugettext('fuz')
- u'fuz'
- >>> translations.ugettext('Fizz')
- u'Fizz'
- >>> translations.ugettext('Fuzz')
- u'Fuzz'
- >>> translations.ugettext('Fuzzes')
- u'Fuzzes'
-
- :param fileobj: the file-like object to write to
- :param catalog: the `Catalog` instance
- :param use_fuzzy: whether translations marked as "fuzzy" should be included
- in the output
- """
- messages = list(catalog)
- if not use_fuzzy:
- messages[1:] = [m for m in messages[1:] if not m.fuzzy]
- messages.sort()
-
- ids = strs = ''
- offsets = []
-
- for message in messages:
- # For each string, we need size and file offset. Each string is NUL
- # terminated; the NUL does not count into the size.
- if message.pluralizable:
- msgid = '\x00'.join([
- msgid.encode(catalog.charset) for msgid in message.id
- ])
- msgstrs = []
- for idx, string in enumerate(message.string):
- if not string:
- msgstrs.append(message.id[min(int(idx), 1)])
- else:
- msgstrs.append(string)
- msgstr = '\x00'.join([
- msgstr.encode(catalog.charset) for msgstr in msgstrs
- ])
- else:
- msgid = message.id.encode(catalog.charset)
- if not message.string:
- msgstr = message.id.encode(catalog.charset)
- else:
- msgstr = message.string.encode(catalog.charset)
- offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
- ids += msgid + '\x00'
- strs += msgstr + '\x00'
-
- # The header is 7 32-bit unsigned integers. We don't use hash tables, so
- # the keys start right after the index tables.
- keystart = 7 * 4 + 16 * len(messages)
- valuestart = keystart + len(ids)
-
- # The string table first has the list of keys, then the list of values.
- # Each entry has first the size of the string, then the file offset.
- koffsets = []
- voffsets = []
- for o1, l1, o2, l2 in offsets:
- koffsets += [l1, o1 + keystart]
- voffsets += [l2, o2 + valuestart]
- offsets = koffsets + voffsets
-
- fileobj.write(struct.pack('Iiiiiii',
- 0x950412deL, # magic
- 0, # version
- len(messages), # number of entries
- 7 * 4, # start of key index
- 7 * 4 + len(messages) * 8, # start of value index
- 0, 0 # size and offset of hash table
- ) + array.array("i", offsets).tostring() + ids + strs)
diff --git a/babel/messages/plurals.py b/babel/messages/plurals.py
deleted file mode 100644
index 9eb48e5..0000000
--- a/babel/messages/plurals.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Plural form definitions."""
-
-
-from operator import itemgetter
-from babel.core import default_locale, Locale
-
-
-LC_CTYPE = default_locale('LC_CTYPE')
-
-
-PLURALS = {
- # Afar
- # 'aa': (),
- # Abkhazian
- # 'ab': (),
- # Avestan
- # 'ae': (),
- # Afrikaans - From Pootle's PO's
- 'af': (2, '(n != 1)'),
- # Akan
- # 'ak': (),
- # Amharic
- # 'am': (),
- # Aragonese
- # 'an': (),
- # Arabic - From Pootle's PO's
- 'ar': (6, '(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n>=3 && n<=10 ? 3 : n>=11 && n<=99 ? 4 : 5)'),
- # Assamese
- # 'as': (),
- # Avaric
- # 'av': (),
- # Aymara
- # 'ay': (),
- # Azerbaijani
- # 'az': (),
- # Bashkir
- # 'ba': (),
- # Belarusian
- # 'be': (),
- # Bulgarian - From Pootle's PO's
- 'bg': (2, '(n != 1)'),
- # Bihari
- # 'bh': (),
- # Bislama
- # 'bi': (),
- # Bambara
- # 'bm': (),
- # Bengali - From Pootle's PO's
- 'bn': (2, '(n != 1)'),
- # Tibetan - as discussed in private with Andrew West
- 'bo': (1, '0'),
- # Breton
- # 'br': (),
- # Bosnian
- # 'bs': (),
- # Catalan - From Pootle's PO's
- 'ca': (2, '(n != 1)'),
- # Chechen
- # 'ce': (),
- # Chamorro
- # 'ch': (),
- # Corsican
- # 'co': (),
- # Cree
- # 'cr': (),
- # Czech
- 'cs': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Church Slavic
- # 'cu': (),
- # Chuvash
- 'cv': (1, '0'),
- # Welsh
- 'cy': (5, '(n==1 ? 1 : n==2 ? 2 : n==3 ? 3 : n==6 ? 4 : 0)'),
- # Danish
- 'da': (2, '(n != 1)'),
- # German
- 'de': (2, '(n != 1)'),
- # Divehi
- # 'dv': (),
- # Dzongkha
- 'dz': (1, '0'),
- # Greek
- 'el': (2, '(n != 1)'),
- # English
- 'en': (2, '(n != 1)'),
- # Esperanto
- 'eo': (2, '(n != 1)'),
- # Spanish
- 'es': (2, '(n != 1)'),
- # Estonian
- 'et': (2, '(n != 1)'),
- # Basque - From Pootle's PO's
- 'eu': (2, '(n != 1)'),
- # Persian - From Pootle's PO's
- 'fa': (1, '0'),
- # Finnish
- 'fi': (2, '(n != 1)'),
- # French
- 'fr': (2, '(n > 1)'),
- # Friulian - From Pootle's PO's
- 'fur': (2, '(n > 1)'),
- # Irish
- 'ga': (3, '(n==1 ? 0 : n==2 ? 1 : 2)'),
- # Galician - From Pootle's PO's
- 'gl': (2, '(n != 1)'),
- # Hausa - From Pootle's PO's
- 'ha': (2, '(n != 1)'),
- # Hebrew
- 'he': (2, '(n != 1)'),
- # Hindi - From Pootle's PO's
- 'hi': (2, '(n != 1)'),
- # Croatian
- 'hr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Hungarian
- 'hu': (1, '0'),
- # Armenian - From Pootle's PO's
- 'hy': (1, '0'),
- # Icelandic - From Pootle's PO's
- 'is': (2, '(n != 1)'),
- # Italian
- 'it': (2, '(n != 1)'),
- # Japanese
- 'ja': (1, '0'),
- # Georgian - From Pootle's PO's
- 'ka': (1, '0'),
- # Kongo - From Pootle's PO's
- 'kg': (2, '(n != 1)'),
- # Khmer - From Pootle's PO's
- 'km': (1, '0'),
- # Korean
- 'ko': (1, '0'),
- # Kurdish - From Pootle's PO's
- 'ku': (2, '(n != 1)'),
- # Lao - Another member of the Tai language family, like Thai.
- 'lo': (1, '0'),
- # Lithuanian
- 'lt': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Latvian
- 'lv': (3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),
- # Maltese - From Pootle's PO's
- 'mt': (4, '(n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),
- # Norwegian Bokmål
- 'nb': (2, '(n != 1)'),
- # Dutch
- 'nl': (2, '(n != 1)'),
- # Norwegian Nynorsk
- 'nn': (2, '(n != 1)'),
- # Norwegian
- 'no': (2, '(n != 1)'),
- # Punjabi - From Pootle's PO's
- 'pa': (2, '(n != 1)'),
- # Polish
- 'pl': (3, '(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Portuguese
- 'pt': (2, '(n != 1)'),
- # Brazilian
- 'pt_BR': (2, '(n > 1)'),
- # Romanian - From Pootle's PO's
- 'ro': (3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'),
- # Russian
- 'ru': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Slovak
- 'sk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Slovenian
- 'sl': (4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),
- # Serbian - From Pootle's PO's
- 'sr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10< =4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Southern Sotho - From Pootle's PO's
- 'st': (2, '(n != 1)'),
- # Swedish
- 'sv': (2, '(n != 1)'),
- # Thai
- 'th': (1, '0'),
- # Turkish
- 'tr': (1, '0'),
- # Ukrainian
- 'uk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Venda - From Pootle's PO's
- 've': (2, '(n != 1)'),
- # Vietnamese - From Pootle's PO's
- 'vi': (1, '0'),
- # Xhosa - From Pootle's PO's
- 'xh': (2, '(n != 1)'),
- # Chinese - From Pootle's PO's
- 'zh_CN': (1, '0'),
- 'zh_HK': (1, '0'),
- 'zh_TW': (1, '0'),
-}
-
-
-DEFAULT_PLURAL = (2, '(n != 1)')
-
-
-class _PluralTuple(tuple):
- """A tuple with plural information."""
-
- __slots__ = ()
- num_plurals = property(itemgetter(0), doc="""
- The number of plurals used by the locale.""")
- plural_expr = property(itemgetter(1), doc="""
- The plural expression used by the locale.""")
- plural_forms = property(lambda x: 'npurals=%s; plural=%s' % x, doc="""
- The plural expression used by the catalog or locale.""")
-
- def __str__(self):
- return self.plural_forms
-
-
-def get_plural(locale=LC_CTYPE):
- """A tuple with the information catalogs need to perform proper
- pluralization. The first item of the tuple is the number of plural
- forms, the second the plural expression.
-
- >>> get_plural(locale='en')
- (2, '(n != 1)')
- >>> get_plural(locale='ga')
- (3, '(n==1 ? 0 : n==2 ? 1 : 2)')
-
- The object returned is a special tuple with additional members:
-
- >>> tup = get_plural("ja")
- >>> tup.num_plurals
- 1
- >>> tup.plural_expr
- '0'
- >>> tup.plural_forms
- 'npurals=1; plural=0'
-
- Converting the tuple into a string prints the plural forms for a
- gettext catalog:
-
- >>> str(tup)
- 'npurals=1; plural=0'
- """
- locale = Locale.parse(locale)
- try:
- tup = PLURALS[str(locale)]
- except KeyError:
- try:
- tup = PLURALS[locale.language]
- except KeyError:
- tup = DEFAULT_PLURAL
- return _PluralTuple(tup)
diff --git a/babel/messages/pofile.py b/babel/messages/pofile.py
deleted file mode 100644
index c92f991..0000000
--- a/babel/messages/pofile.py
+++ /dev/null
@@ -1,455 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Reading and writing of files in the ``gettext`` PO (portable object)
-format.
-
-:see: `The Format of PO Files
- <http://www.gnu.org/software/gettext/manual/gettext.html#PO-Files>`_
-"""
-
-from datetime import date, datetime
-import os
-import re
-try:
- set
-except NameError:
- from sets import Set as set
-
-from babel import __version__ as VERSION
-from babel.messages.catalog import Catalog, Message
-from babel.util import wraptext, LOCALTZ
-
-__all__ = ['read_po', 'write_po']
-__docformat__ = 'restructuredtext en'
-
-def unescape(string):
- r"""Reverse `escape` the given string.
-
- >>> print unescape('"Say:\\n \\"hello, world!\\"\\n"')
- Say:
- "hello, world!"
- <BLANKLINE>
-
- :param string: the string to unescape
- :return: the unescaped string
- :rtype: `str` or `unicode`
- """
- return string[1:-1].replace('\\\\', '\\') \
- .replace('\\t', '\t') \
- .replace('\\r', '\r') \
- .replace('\\n', '\n') \
- .replace('\\"', '\"')
-
-def denormalize(string):
- r"""Reverse the normalization done by the `normalize` function.
-
- >>> print denormalize(r'''""
- ... "Say:\n"
- ... " \"hello, world!\"\n"''')
- Say:
- "hello, world!"
- <BLANKLINE>
-
- >>> print denormalize(r'''""
- ... "Say:\n"
- ... " \"Lorem ipsum dolor sit "
- ... "amet, consectetur adipisicing"
- ... " elit, \"\n"''')
- Say:
- "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
- <BLANKLINE>
-
- :param string: the string to denormalize
- :return: the denormalized string
- :rtype: `unicode` or `str`
- """
- if string.startswith('""'):
- lines = []
- for line in string.splitlines()[1:]:
- lines.append(unescape(line))
- return ''.join(lines)
- else:
- return unescape(string)
-
-def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False):
- """Read messages from a ``gettext`` PO (portable object) file from the given
- file-like object and return a `Catalog`.
-
- >>> from StringIO import StringIO
- >>> buf = StringIO('''
- ... #: main.py:1
- ... #, fuzzy, python-format
- ... msgid "foo %(name)s"
- ... msgstr ""
- ...
- ... # A user comment
- ... #. An auto comment
- ... #: main.py:3
- ... msgid "bar"
- ... msgid_plural "baz"
- ... msgstr[0] ""
- ... msgstr[1] ""
- ... ''')
- >>> catalog = read_po(buf)
- >>> catalog.revision_date = datetime(2007, 04, 01)
-
- >>> for message in catalog:
- ... if message.id:
- ... print (message.id, message.string)
- ... print ' ', (message.locations, message.flags)
- ... print ' ', (message.user_comments, message.auto_comments)
- (u'foo %(name)s', '')
- ([(u'main.py', 1)], set([u'fuzzy', u'python-format']))
- ([], [])
- ((u'bar', u'baz'), ('', ''))
- ([(u'main.py', 3)], set([]))
- ([u'A user comment'], [u'An auto comment'])
-
- :param fileobj: the file-like object to read the PO file from
- :param locale: the locale identifier or `Locale` object, or `None`
- if the catalog is not bound to a locale (which basically
- means it's a template)
- :param domain: the message domain
- :param ignore_obsolete: whether to ignore obsolete messages in the input
- :return: an iterator over ``(message, translation, location)`` tuples
- :rtype: ``iterator``
- """
- catalog = Catalog(locale=locale, domain=domain)
-
- counter = [0]
- offset = [0]
- messages = []
- translations = []
- locations = []
- flags = []
- user_comments = []
- auto_comments = []
- obsolete = [False]
- in_msgid = [False]
- in_msgstr = [False]
-
- def _add_message():
- translations.sort()
- if len(messages) > 1:
- msgid = tuple([denormalize(m) for m in messages])
- else:
- msgid = denormalize(messages[0])
- if isinstance(msgid, (list, tuple)):
- string = []
- for idx in range(catalog.num_plurals):
- try:
- string.append(translations[idx])
- except IndexError:
- string.append((idx, ''))
- string = tuple([denormalize(t[1]) for t in string])
- else:
- string = denormalize(translations[0][1])
- message = Message(msgid, string, list(locations), set(flags),
- auto_comments, user_comments, lineno=offset[0] + 1)
- if obsolete[0]:
- if not ignore_obsolete:
- catalog.obsolete[msgid] = message
- else:
- catalog[msgid] = message
- del messages[:]; del translations[:]; del locations[:];
- del flags[:]; del auto_comments[:]; del user_comments[:]
- obsolete[0] = False
- counter[0] += 1
-
- def _process_message_line(lineno, line):
- if line.startswith('msgid_plural'):
- in_msgid[0] = True
- msg = line[12:].lstrip()
- messages.append(msg)
- elif line.startswith('msgid'):
- in_msgid[0] = True
- offset[0] = lineno
- txt = line[5:].lstrip()
- if messages:
- _add_message()
- messages.append(txt)
- elif line.startswith('msgstr'):
- in_msgid[0] = False
- in_msgstr[0] = True
- msg = line[6:].lstrip()
- if msg.startswith('['):
- idx, msg = msg[1:].split(']', 1)
- translations.append([int(idx), msg.lstrip()])
- else:
- translations.append([0, msg])
- elif line.startswith('"'):
- if in_msgid[0]:
- messages[-1] += u'\n' + line.rstrip()
- elif in_msgstr[0]:
- translations[-1][1] += u'\n' + line.rstrip()
-
- for lineno, line in enumerate(fileobj.readlines()):
- line = line.strip().decode(catalog.charset)
- if line.startswith('#'):
- in_msgid[0] = in_msgstr[0] = False
- if messages and translations:
- _add_message()
- if line[1:].startswith(':'):
- for location in line[2:].lstrip().split():
- pos = location.rfind(':')
- if pos >= 0:
- try:
- lineno = int(location[pos + 1:])
- except ValueError:
- continue
- locations.append((location[:pos], lineno))
- elif line[1:].startswith(','):
- for flag in line[2:].lstrip().split(','):
- flags.append(flag.strip())
- elif line[1:].startswith('~'):
- obsolete[0] = True
- _process_message_line(lineno, line[2:].lstrip())
- elif line[1:].startswith('.'):
- # These are called auto-comments
- comment = line[2:].strip()
- if comment: # Just check that we're not adding empty comments
- auto_comments.append(comment)
- else:
- # These are called user comments
- user_comments.append(line[1:].strip())
- else:
- _process_message_line(lineno, line)
-
- if messages:
- _add_message()
-
- # No actual messages found, but there was some info in comments, from which
- # we'll construct an empty header message
- elif not counter[0] and (flags or user_comments or auto_comments):
- messages.append(u'')
- translations.append([0, u''])
- _add_message()
-
- return catalog
-
-WORD_SEP = re.compile('('
- r'\s+|' # any whitespace
- r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
- r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
-')')
-
-def escape(string):
- r"""Escape the given string so that it can be included in double-quoted
- strings in ``PO`` files.
-
- >>> escape('''Say:
- ... "hello, world!"
- ... ''')
- '"Say:\\n \\"hello, world!\\"\\n"'
-
- :param string: the string to escape
- :return: the escaped string
- :rtype: `str` or `unicode`
- """
- return '"%s"' % string.replace('\\', '\\\\') \
- .replace('\t', '\\t') \
- .replace('\r', '\\r') \
- .replace('\n', '\\n') \
- .replace('\"', '\\"')
-
-def normalize(string, prefix='', width=76):
- r"""Convert a string into a format that is appropriate for .po files.
-
- >>> print normalize('''Say:
- ... "hello, world!"
- ... ''', width=None)
- ""
- "Say:\n"
- " \"hello, world!\"\n"
-
- >>> print normalize('''Say:
- ... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
- ... ''', width=32)
- ""
- "Say:\n"
- " \"Lorem ipsum dolor sit "
- "amet, consectetur adipisicing"
- " elit, \"\n"
-
- :param string: the string to normalize
- :param prefix: a string that should be prepended to every line
- :param width: the maximum line width; use `None`, 0, or a negative number
- to completely disable line wrapping
- :return: the normalized string
- :rtype: `unicode`
- """
- if width and width > 0:
- prefixlen = len(prefix)
- lines = []
- for idx, line in enumerate(string.splitlines(True)):
- if len(escape(line)) + prefixlen > width:
- chunks = WORD_SEP.split(line)
- chunks.reverse()
- while chunks:
- buf = []
- size = 2
- while chunks:
- l = len(escape(chunks[-1])) - 2 + prefixlen
- if size + l < width:
- buf.append(chunks.pop())
- size += l
- else:
- if not buf:
- # handle long chunks by putting them on a
- # separate line
- buf.append(chunks.pop())
- break
- lines.append(u''.join(buf))
- else:
- lines.append(line)
- else:
- lines = string.splitlines(True)
-
- if len(lines) <= 1:
- return escape(string)
-
- # Remove empty trailing line
- if lines and not lines[-1]:
- del lines[-1]
- lines[-1] += '\n'
- return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines])
-
-def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
- sort_output=False, sort_by_file=False, ignore_obsolete=False,
- include_previous=False):
- r"""Write a ``gettext`` PO (portable object) template file for a given
- message catalog to the provided file-like object.
-
- >>> catalog = Catalog()
- >>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
- ... flags=('fuzzy',))
- >>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
- >>> from StringIO import StringIO
- >>> buf = StringIO()
- >>> write_po(buf, catalog, omit_header=True)
- >>> print buf.getvalue()
- #: main.py:1
- #, fuzzy, python-format
- msgid "foo %(name)s"
- msgstr ""
- <BLANKLINE>
- #: main.py:3
- msgid "bar"
- msgid_plural "baz"
- msgstr[0] ""
- msgstr[1] ""
- <BLANKLINE>
- <BLANKLINE>
-
- :param fileobj: the file-like object to write to
- :param catalog: the `Catalog` instance
- :param width: the maximum line width for the generated output; use `None`,
- 0, or a negative number to completely disable line wrapping
- :param no_location: do not emit a location comment for every message
- :param omit_header: do not include the ``msgid ""`` entry at the top of the
- output
- :param sort_output: whether to sort the messages in the output by msgid
- :param sort_by_file: whether to sort the messages in the output by their
- locations
- :param ignore_obsolete: whether to ignore obsolete messages and not include
- them in the output; by default they are included as
- comments
- :param include_previous: include the old msgid as a comment when
- updating the catalog
- """
- def _normalize(key, prefix=''):
- return normalize(key, prefix=prefix, width=width) \
- .encode(catalog.charset, 'backslashreplace')
-
- def _write(text):
- if isinstance(text, unicode):
- text = text.encode(catalog.charset)
- fileobj.write(text)
-
- def _write_comment(comment, prefix=''):
- lines = comment
- if width and width > 0:
- lines = wraptext(comment, width)
- for line in lines:
- _write('#%s %s\n' % (prefix, line.strip()))
-
- def _write_message(message, prefix=''):
- if isinstance(message.id, (list, tuple)):
- _write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
- _write('%smsgid_plural %s\n' % (
- prefix, _normalize(message.id[1], prefix)
- ))
-
- for idx in range(catalog.num_plurals):
- try:
- string = message.string[idx]
- except IndexError:
- string = ''
- _write('%smsgstr[%d] %s\n' % (
- prefix, idx, _normalize(string, prefix)
- ))
- else:
- _write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
- _write('%smsgstr %s\n' % (
- prefix, _normalize(message.string or '', prefix)
- ))
-
- messages = list(catalog)
- if sort_output:
- messages.sort()
- elif sort_by_file:
- messages.sort(lambda x,y: cmp(x.locations, y.locations))
-
- for message in messages:
- if not message.id: # This is the header "message"
- if omit_header:
- continue
- comment_header = catalog.header_comment
- if width and width > 0:
- lines = []
- for line in comment_header.splitlines():
- lines += wraptext(line, width=width,
- subsequent_indent='# ')
- comment_header = u'\n'.join(lines) + u'\n'
- _write(comment_header)
-
- for comment in message.user_comments:
- _write_comment(comment)
- for comment in message.auto_comments:
- _write_comment(comment, prefix='.')
-
- if not no_location:
- locs = u' '.join([u'%s:%d' % (filename.replace(os.sep, '/'), lineno)
- for filename, lineno in message.locations])
- _write_comment(locs, prefix=':')
- if message.flags:
- _write('#%s\n' % ', '.join([''] + list(message.flags)))
-
- if message.previous_id and include_previous:
- _write_comment('msgid %s' % _normalize(message.previous_id[0]),
- prefix='|')
- if len(message.previous_id) > 1:
- _write_comment('msgid_plural %s' % _normalize(
- message.previous_id[1]
- ), prefix='|')
-
- _write_message(message)
- _write('\n')
-
- if not ignore_obsolete:
- for message in catalog.obsolete.values():
- for comment in message.user_comments:
- _write_comment(comment)
- _write_message(message, prefix='#~ ')
- _write('\n')
diff --git a/babel/numbers.py b/babel/numbers.py
deleted file mode 100644
index 1a52074..0000000
--- a/babel/numbers.py
+++ /dev/null
@@ -1,583 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Locale dependent formatting and parsing of numeric data.
-
-The default locale for the functions in this module is determined by the
-following environment variables, in that order:
-
- * ``LC_NUMERIC``,
- * ``LC_ALL``, and
- * ``LANG``
-"""
-# TODO:
-# Padding and rounding increments in pattern:
-# - http://www.unicode.org/reports/tr35/ (Appendix G.6)
-import math
-import re
-try:
- from decimal import Decimal
- have_decimal = True
-except ImportError:
- have_decimal = False
-
-from babel.core import default_locale, Locale
-
-__all__ = ['format_number', 'format_decimal', 'format_currency',
- 'format_percent', 'format_scientific', 'parse_number',
- 'parse_decimal', 'NumberFormatError']
-__docformat__ = 'restructuredtext en'
-
-LC_NUMERIC = default_locale('LC_NUMERIC')
-
-def get_currency_name(currency, locale=LC_NUMERIC):
- """Return the name used by the locale for the specified currency.
-
- >>> get_currency_name('USD', 'en_US')
- u'US Dollar'
-
- :param currency: the currency code
- :param locale: the `Locale` object or locale identifier
- :return: the currency symbol
- :rtype: `unicode`
- :since: version 0.9.4
- """
- return Locale.parse(locale).currencies.get(currency, currency)
-
-def get_currency_symbol(currency, locale=LC_NUMERIC):
- """Return the symbol used by the locale for the specified currency.
-
- >>> get_currency_symbol('USD', 'en_US')
- u'$'
-
- :param currency: the currency code
- :param locale: the `Locale` object or locale identifier
- :return: the currency symbol
- :rtype: `unicode`
- """
- return Locale.parse(locale).currency_symbols.get(currency, currency)
-
-def get_decimal_symbol(locale=LC_NUMERIC):
- """Return the symbol used by the locale to separate decimal fractions.
-
- >>> get_decimal_symbol('en_US')
- u'.'
-
- :param locale: the `Locale` object or locale identifier
- :return: the decimal symbol
- :rtype: `unicode`
- """
- return Locale.parse(locale).number_symbols.get('decimal', u'.')
-
-def get_plus_sign_symbol(locale=LC_NUMERIC):
- """Return the plus sign symbol used by the current locale.
-
- >>> get_plus_sign_symbol('en_US')
- u'+'
-
- :param locale: the `Locale` object or locale identifier
- :return: the plus sign symbol
- :rtype: `unicode`
- """
- return Locale.parse(locale).number_symbols.get('plusSign', u'+')
-
-def get_minus_sign_symbol(locale=LC_NUMERIC):
- """Return the plus sign symbol used by the current locale.
-
- >>> get_minus_sign_symbol('en_US')
- u'-'
-
- :param locale: the `Locale` object or locale identifier
- :return: the plus sign symbol
- :rtype: `unicode`
- """
- return Locale.parse(locale).number_symbols.get('minusSign', u'-')
-
-def get_exponential_symbol(locale=LC_NUMERIC):
- """Return the symbol used by the locale to separate mantissa and exponent.
-
- >>> get_exponential_symbol('en_US')
- u'E'
-
- :param locale: the `Locale` object or locale identifier
- :return: the exponential symbol
- :rtype: `unicode`
- """
- return Locale.parse(locale).number_symbols.get('exponential', u'E')
-
-def get_group_symbol(locale=LC_NUMERIC):
- """Return the symbol used by the locale to separate groups of thousands.
-
- >>> get_group_symbol('en_US')
- u','
-
- :param locale: the `Locale` object or locale identifier
- :return: the group symbol
- :rtype: `unicode`
- """
- return Locale.parse(locale).number_symbols.get('group', u',')
-
-def format_number(number, locale=LC_NUMERIC):
- """Return the given number formatted for a specific locale.
-
- >>> format_number(1099, locale='en_US')
- u'1,099'
-
- :param number: the number to format
- :param locale: the `Locale` object or locale identifier
- :return: the formatted number
- :rtype: `unicode`
- """
- # Do we really need this one?
- return format_decimal(number, locale=locale)
-
-def format_decimal(number, format=None, locale=LC_NUMERIC):
- """Return the given decimal number formatted for a specific locale.
-
- >>> format_decimal(1.2345, locale='en_US')
- u'1.234'
- >>> format_decimal(1.2346, locale='en_US')
- u'1.235'
- >>> format_decimal(-1.2346, locale='en_US')
- u'-1.235'
- >>> format_decimal(1.2345, locale='sv_SE')
- u'1,234'
- >>> format_decimal(12345, locale='de')
- u'12.345'
-
- The appropriate thousands grouping and the decimal separator are used for
- each locale:
-
- >>> format_decimal(12345.5, locale='en_US')
- u'12,345.5'
-
- :param number: the number to format
- :param format:
- :param locale: the `Locale` object or locale identifier
- :return: the formatted decimal number
- :rtype: `unicode`
- """
- locale = Locale.parse(locale)
- if not format:
- format = locale.decimal_formats.get(format)
- pattern = parse_pattern(format)
- return pattern.apply(number, locale)
-
-def format_currency(number, currency, format=None, locale=LC_NUMERIC):
- u"""Return formatted currency value.
-
- >>> format_currency(1099.98, 'USD', locale='en_US')
- u'$1,099.98'
- >>> format_currency(1099.98, 'USD', locale='es_CO')
- u'US$\\xa01.099,98'
- >>> format_currency(1099.98, 'EUR', locale='de_DE')
- u'1.099,98\\xa0\\u20ac'
-
- The pattern can also be specified explicitly:
-
- >>> format_currency(1099.98, 'EUR', u'\xa4\xa4 #,##0.00', locale='en_US')
- u'EUR 1,099.98'
-
- :param number: the number to format
- :param currency: the currency code
- :param locale: the `Locale` object or locale identifier
- :return: the formatted currency value
- :rtype: `unicode`
- """
- locale = Locale.parse(locale)
- if not format:
- format = locale.currency_formats.get(format)
- pattern = parse_pattern(format)
- return pattern.apply(number, locale, currency=currency)
-
-def format_percent(number, format=None, locale=LC_NUMERIC):
- """Return formatted percent value for a specific locale.
-
- >>> format_percent(0.34, locale='en_US')
- u'34%'
- >>> format_percent(25.1234, locale='en_US')
- u'2,512%'
- >>> format_percent(25.1234, locale='sv_SE')
- u'2\\xa0512\\xa0%'
-
- The format pattern can also be specified explicitly:
-
- >>> format_percent(25.1234, u'#,##0\u2030', locale='en_US')
- u'25,123\u2030'
-
- :param number: the percent number to format
- :param format:
- :param locale: the `Locale` object or locale identifier
- :return: the formatted percent number
- :rtype: `unicode`
- """
- locale = Locale.parse(locale)
- if not format:
- format = locale.percent_formats.get(format)
- pattern = parse_pattern(format)
- return pattern.apply(number, locale)
-
-def format_scientific(number, format=None, locale=LC_NUMERIC):
- """Return value formatted in scientific notation for a specific locale.
-
- >>> format_scientific(10000, locale='en_US')
- u'1E4'
-
- The format pattern can also be specified explicitly:
-
- >>> format_scientific(1234567, u'##0E00', locale='en_US')
- u'1.23E06'
-
- :param number: the number to format
- :param format:
- :param locale: the `Locale` object or locale identifier
- :return: value formatted in scientific notation.
- :rtype: `unicode`
- """
- locale = Locale.parse(locale)
- if not format:
- format = locale.scientific_formats.get(format)
- pattern = parse_pattern(format)
- return pattern.apply(number, locale)
-
-
-class NumberFormatError(ValueError):
- """Exception raised when a string cannot be parsed into a number."""
-
-
-def parse_number(string, locale=LC_NUMERIC):
- """Parse localized number string into a long integer.
-
- >>> parse_number('1,099', locale='en_US')
- 1099L
- >>> parse_number('1.099', locale='de_DE')
- 1099L
-
- When the given string cannot be parsed, an exception is raised:
-
- >>> parse_number('1.099,98', locale='de')
- Traceback (most recent call last):
- ...
- NumberFormatError: '1.099,98' is not a valid number
-
- :param string: the string to parse
- :param locale: the `Locale` object or locale identifier
- :return: the parsed number
- :rtype: `long`
- :raise `NumberFormatError`: if the string can not be converted to a number
- """
- try:
- return long(string.replace(get_group_symbol(locale), ''))
- except ValueError:
- raise NumberFormatError('%r is not a valid number' % string)
-
-def parse_decimal(string, locale=LC_NUMERIC):
- """Parse localized decimal string into a float.
-
- >>> parse_decimal('1,099.98', locale='en_US')
- 1099.98
- >>> parse_decimal('1.099,98', locale='de')
- 1099.98
-
- When the given string cannot be parsed, an exception is raised:
-
- >>> parse_decimal('2,109,998', locale='de')
- Traceback (most recent call last):
- ...
- NumberFormatError: '2,109,998' is not a valid decimal number
-
- :param string: the string to parse
- :param locale: the `Locale` object or locale identifier
- :return: the parsed decimal number
- :rtype: `float`
- :raise `NumberFormatError`: if the string can not be converted to a
- decimal number
- """
- locale = Locale.parse(locale)
- try:
- return float(string.replace(get_group_symbol(locale), '')
- .replace(get_decimal_symbol(locale), '.'))
- except ValueError:
- raise NumberFormatError('%r is not a valid decimal number' % string)
-
-
-PREFIX_END = r'[^0-9@#.,]'
-NUMBER_TOKEN = r'[0-9@#.\-,E+]'
-
-PREFIX_PATTERN = r"(?P<prefix>(?:'[^']*'|%s)*)" % PREFIX_END
-NUMBER_PATTERN = r"(?P<number>%s+)" % NUMBER_TOKEN
-SUFFIX_PATTERN = r"(?P<suffix>.*)"
-
-number_re = re.compile(r"%s%s%s" % (PREFIX_PATTERN, NUMBER_PATTERN,
- SUFFIX_PATTERN))
-
-def split_number(value):
- """Convert a number into a (intasstring, fractionasstring) tuple"""
- if have_decimal and isinstance(value, Decimal):
- text = str(value)
- else:
- text = ('%.9f' % value).rstrip('0')
- if '.' in text:
- a, b = text.split('.', 1)
- if b == '0':
- b = ''
- else:
- a, b = text, ''
- return a, b
-
-def bankersround(value, ndigits=0):
- """Round a number to a given precision.
-
- Works like round() except that the round-half-even (banker's rounding)
- algorithm is used instead of round-half-up.
-
- >>> bankersround(5.5, 0)
- 6.0
- >>> bankersround(6.5, 0)
- 6.0
- >>> bankersround(-6.5, 0)
- -6.0
- >>> bankersround(1234.0, -2)
- 1200.0
- """
- sign = int(value < 0) and -1 or 1
- value = abs(value)
- a, b = split_number(value)
- digits = a + b
- add = 0
- i = len(a) + ndigits
- if i < 0 or i >= len(digits):
- pass
- elif digits[i] > '5':
- add = 1
- elif digits[i] == '5' and digits[i-1] in '13579':
- add = 1
- scale = 10**ndigits
- if have_decimal and isinstance(value, Decimal):
- return Decimal(int(value * scale + add)) / scale * sign
- else:
- return float(int(value * scale + add)) / scale * sign
-
-def parse_pattern(pattern):
- """Parse number format patterns"""
- if isinstance(pattern, NumberPattern):
- return pattern
-
- # Do we have a negative subpattern?
- if ';' in pattern:
- pattern, neg_pattern = pattern.split(';', 1)
- pos_prefix, number, pos_suffix = number_re.search(pattern).groups()
- neg_prefix, _, neg_suffix = number_re.search(neg_pattern).groups()
- else:
- pos_prefix, number, pos_suffix = number_re.search(pattern).groups()
- neg_prefix = '-' + pos_prefix
- neg_suffix = pos_suffix
- if 'E' in number:
- number, exp = number.split('E', 1)
- else:
- exp = None
- if '@' in number:
- if '.' in number and '0' in number:
- raise ValueError('Significant digit patterns can not contain '
- '"@" or "0"')
- if '.' in number:
- integer, fraction = number.rsplit('.', 1)
- else:
- integer = number
- fraction = ''
- min_frac = max_frac = 0
-
- def parse_precision(p):
- """Calculate the min and max allowed digits"""
- min = max = 0
- for c in p:
- if c in '@0':
- min += 1
- max += 1
- elif c == '#':
- max += 1
- elif c == ',':
- continue
- else:
- break
- return min, max
-
- def parse_grouping(p):
- """Parse primary and secondary digit grouping
-
- >>> parse_grouping('##')
- 0, 0
- >>> parse_grouping('#,###')
- 3, 3
- >>> parse_grouping('#,####,###')
- 3, 4
- """
- width = len(p)
- g1 = p.rfind(',')
- if g1 == -1:
- return 1000, 1000
- g1 = width - g1 - 1
- g2 = p[:-g1 - 1].rfind(',')
- if g2 == -1:
- return g1, g1
- g2 = width - g1 - g2 - 2
- return g1, g2
-
- int_prec = parse_precision(integer)
- frac_prec = parse_precision(fraction)
- if exp:
- frac_prec = parse_precision(integer+fraction)
- exp_plus = exp.startswith('+')
- exp = exp.lstrip('+')
- exp_prec = parse_precision(exp)
- else:
- exp_plus = None
- exp_prec = None
- grouping = parse_grouping(integer)
- return NumberPattern(pattern, (pos_prefix, neg_prefix),
- (pos_suffix, neg_suffix), grouping,
- int_prec, frac_prec,
- exp_prec, exp_plus)
-
-
-class NumberPattern(object):
-
- def __init__(self, pattern, prefix, suffix, grouping,
- int_prec, frac_prec, exp_prec, exp_plus):
- self.pattern = pattern
- self.prefix = prefix
- self.suffix = suffix
- self.grouping = grouping
- self.int_prec = int_prec
- self.frac_prec = frac_prec
- self.exp_prec = exp_prec
- self.exp_plus = exp_plus
- if '%' in ''.join(self.prefix + self.suffix):
- self.scale = 100
- elif u'‰' in ''.join(self.prefix + self.suffix):
- self.scale = 1000
- else:
- self.scale = 1
-
- def __repr__(self):
- return '<%s %r>' % (type(self).__name__, self.pattern)
-
- def apply(self, value, locale, currency=None):
- value *= self.scale
- is_negative = int(value < 0)
- if self.exp_prec: # Scientific notation
- value = abs(value)
- if value:
- exp = int(math.floor(math.log(value, 10)))
- else:
- exp = 0
- # Minimum number of integer digits
- if self.int_prec[0] == self.int_prec[1]:
- exp -= self.int_prec[0] - 1
- # Exponent grouping
- elif self.int_prec[1]:
- exp = int(exp) / self.int_prec[1] * self.int_prec[1]
- if not have_decimal or not isinstance(value, Decimal):
- value = float(value)
- if exp < 0:
- value = value * 10**(-exp)
- else:
- value = value / 10**exp
- exp_sign = ''
- if exp < 0:
- exp_sign = get_minus_sign_symbol(locale)
- elif self.exp_plus:
- exp_sign = get_plus_sign_symbol(locale)
- exp = abs(exp)
- number = u'%s%s%s%s' % \
- (self._format_sigdig(value, self.frac_prec[0],
- self.frac_prec[1]),
- get_exponential_symbol(locale), exp_sign,
- self._format_int(str(exp), self.exp_prec[0],
- self.exp_prec[1], locale))
- elif '@' in self.pattern: # Is it a siginificant digits pattern?
- text = self._format_sigdig(abs(value),
- self.int_prec[0],
- self.int_prec[1])
- if '.' in text:
- a, b = text.split('.')
- a = self._format_int(a, 0, 1000, locale)
- if b:
- b = get_decimal_symbol(locale) + b
- number = a + b
- else:
- number = self._format_int(text, 0, 1000, locale)
- else: # A normal number pattern
- a, b = split_number(bankersround(abs(value),
- self.frac_prec[1]))
- b = b or '0'
- a = self._format_int(a, self.int_prec[0],
- self.int_prec[1], locale)
- b = self._format_frac(b, locale)
- number = a + b
- retval = u'%s%s%s' % (self.prefix[is_negative], number,
- self.suffix[is_negative])
- if u'¤' in retval:
- retval = retval.replace(u'¤¤', currency.upper())
- retval = retval.replace(u'¤', get_currency_symbol(currency, locale))
- return retval
-
- def _format_sigdig(self, value, min, max):
- """Convert value to a string.
-
- The resulting string will contain between (min, max) number of
- significant digits.
- """
- a, b = split_number(value)
- ndecimals = len(a)
- if a == '0' and b != '':
- ndecimals = 0
- while b.startswith('0'):
- b = b[1:]
- ndecimals -= 1
- a, b = split_number(bankersround(value, max - ndecimals))
- digits = len((a + b).lstrip('0'))
- if not digits:
- digits = 1
- # Figure out if we need to add any trailing '0':s
- if len(a) >= max and a != '0':
- return a
- if digits < min:
- b += ('0' * (min - digits))
- if b:
- return '%s.%s' % (a, b)
- return a
-
- def _format_int(self, value, min, max, locale):
- width = len(value)
- if width < min:
- value = '0' * (min - width) + value
- gsize = self.grouping[0]
- ret = ''
- symbol = get_group_symbol(locale)
- while len(value) > gsize:
- ret = symbol + value[-gsize:] + ret
- value = value[:-gsize]
- gsize = self.grouping[1]
- return value + ret
-
- def _format_frac(self, value, locale):
- min, max = self.frac_prec
- if len(value) < min:
- value += ('0' * (min - len(value)))
- if max == 0 or (min == 0 and int(value) == 0):
- return ''
- width = len(value)
- while len(value) > min and value[-1] == '0':
- value = value[:-1]
- return get_decimal_symbol(locale) + value
diff --git a/babel/support.py b/babel/support.py
deleted file mode 100644
index 74f7162..0000000
--- a/babel/support.py
+++ /dev/null
@@ -1,398 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Several classes and functions that help with integrating and using Babel
-in applications.
-
-.. note: the code in this module is not used by Babel itself
-"""
-
-from datetime import date, datetime, time
-import gettext
-
-try:
- set
-except NameError:
- from sets import set
-
-from babel.core import Locale
-from babel.dates import format_date, format_datetime, format_time, LC_TIME
-from babel.numbers import format_number, format_decimal, format_currency, \
- format_percent, format_scientific, LC_NUMERIC
-from babel.util import UTC
-
-__all__ = ['Format', 'LazyProxy', 'Translations']
-__docformat__ = 'restructuredtext en'
-
-
-class Format(object):
- """Wrapper class providing the various date and number formatting functions
- bound to a specific locale and time-zone.
-
- >>> fmt = Format('en_US', UTC)
- >>> fmt.date(date(2007, 4, 1))
- u'Apr 1, 2007'
- >>> fmt.decimal(1.2345)
- u'1.234'
- """
-
- def __init__(self, locale, tzinfo=None):
- """Initialize the formatter.
-
- :param locale: the locale identifier or `Locale` instance
- :param tzinfo: the time-zone info (a `tzinfo` instance or `None`)
- """
- self.locale = Locale.parse(locale)
- self.tzinfo = tzinfo
-
- def date(self, date=None, format='medium'):
- """Return a date formatted according to the given pattern.
-
- >>> fmt = Format('en_US')
- >>> fmt.date(date(2007, 4, 1))
- u'Apr 1, 2007'
-
- :see: `babel.dates.format_date`
- """
- return format_date(date, format, locale=self.locale)
-
- def datetime(self, datetime=None, format='medium'):
- """Return a date and time formatted according to the given pattern.
-
- >>> from pytz import timezone
- >>> fmt = Format('en_US', tzinfo=timezone('US/Eastern'))
- >>> fmt.datetime(datetime(2007, 4, 1, 15, 30))
- u'Apr 1, 2007 11:30:00 AM'
-
- :see: `babel.dates.format_datetime`
- """
- return format_datetime(datetime, format, tzinfo=self.tzinfo,
- locale=self.locale)
-
- def time(self, time=None, format='medium'):
- """Return a time formatted according to the given pattern.
-
- >>> from pytz import timezone
- >>> fmt = Format('en_US', tzinfo=timezone('US/Eastern'))
- >>> fmt.time(datetime(2007, 4, 1, 15, 30))
- u'11:30:00 AM'
-
- :see: `babel.dates.format_time`
- """
- return format_time(time, format, tzinfo=self.tzinfo, locale=self.locale)
-
- def number(self, number):
- """Return an integer number formatted for the locale.
-
- >>> fmt = Format('en_US')
- >>> fmt.number(1099)
- u'1,099'
-
- :see: `babel.numbers.format_number`
- """
- return format_number(number, locale=self.locale)
-
- def decimal(self, number, format=None):
- """Return a decimal number formatted for the locale.
-
- >>> fmt = Format('en_US')
- >>> fmt.decimal(1.2345)
- u'1.234'
-
- :see: `babel.numbers.format_decimal`
- """
- return format_decimal(number, format, locale=self.locale)
-
- def currency(self, number, currency):
- """Return a number in the given currency formatted for the locale.
-
- :see: `babel.numbers.format_currency`
- """
- return format_currency(number, currency, locale=self.locale)
-
- def percent(self, number, format=None):
- """Return a number formatted as percentage for the locale.
-
- >>> fmt = Format('en_US')
- >>> fmt.percent(0.34)
- u'34%'
-
- :see: `babel.numbers.format_percent`
- """
- return format_percent(number, format, locale=self.locale)
-
- def scientific(self, number):
- """Return a number formatted using scientific notation for the locale.
-
- :see: `babel.numbers.format_scientific`
- """
- return format_scientific(number, locale=self.locale)
-
-
-class LazyProxy(object):
- """Class for proxy objects that delegate to a specified function to evaluate
- the actual object.
-
- >>> def greeting(name='world'):
- ... return 'Hello, %s!' % name
- >>> lazy_greeting = LazyProxy(greeting, name='Joe')
- >>> print lazy_greeting
- Hello, Joe!
- >>> u' ' + lazy_greeting
- u' Hello, Joe!'
- >>> u'(%s)' % lazy_greeting
- u'(Hello, Joe!)'
-
- This can be used, for example, to implement lazy translation functions that
- delay the actual translation until the string is actually used. The
- rationale for such behavior is that the locale of the user may not always
- be available. In web applications, you only know the locale when processing
- a request.
-
- The proxy implementation attempts to be as complete as possible, so that
- the lazy objects should mostly work as expected, for example for sorting:
-
- >>> greetings = [
- ... LazyProxy(greeting, 'world'),
- ... LazyProxy(greeting, 'Joe'),
- ... LazyProxy(greeting, 'universe'),
- ... ]
- >>> greetings.sort()
- >>> for greeting in greetings:
- ... print greeting
- Hello, Joe!
- Hello, universe!
- Hello, world!
- """
- __slots__ = ['_func', '_args', '_kwargs', '_value']
-
- def __init__(self, func, *args, **kwargs):
- # Avoid triggering our own __setattr__ implementation
- object.__setattr__(self, '_func', func)
- object.__setattr__(self, '_args', args)
- object.__setattr__(self, '_kwargs', kwargs)
- object.__setattr__(self, '_value', None)
-
- def value(self):
- if self._value is None:
- value = self._func(*self._args, **self._kwargs)
- object.__setattr__(self, '_value', value)
- return self._value
- value = property(value)
-
- def __contains__(self, key):
- return key in self.value
-
- def __nonzero__(self):
- return bool(self.value)
-
- def __dir__(self):
- return dir(self.value)
-
- def __iter__(self):
- return iter(self.value)
-
- def __len__(self):
- return len(self.value)
-
- def __str__(self):
- return str(self.value)
-
- def __unicode__(self):
- return unicode(self.value)
-
- def __add__(self, other):
- return self.value + other
-
- def __radd__(self, other):
- return other + self.value
-
- def __mod__(self, other):
- return self.value % other
-
- def __rmod__(self, other):
- return other % self.value
-
- def __mul__(self, other):
- return self.value * other
-
- def __rmul__(self, other):
- return other * self.value
-
- def __call__(self, *args, **kwargs):
- return self.value(*args, **kwargs)
-
- def __lt__(self, other):
- return self.value < other
-
- def __le__(self, other):
- return self.value <= other
-
- def __eq__(self, other):
- return self.value == other
-
- def __ne__(self, other):
- return self.value != other
-
- def __gt__(self, other):
- return self.value > other
-
- def __ge__(self, other):
- return self.value >= other
-
- def __delattr__(self, name):
- delattr(self.value, name)
-
- def __getattr__(self, name):
- return getattr(self.value, name)
-
- def __setattr__(self, name, value):
- setattr(self.value, name, value)
-
- def __delitem__(self, key):
- del self.value[key]
-
- def __getitem__(self, key):
- return self.value[key]
-
- def __setitem__(self, key, value):
- self.value[key] = value
-
-
-class Translations(gettext.GNUTranslations, object):
- """An extended translation catalog class."""
-
- DEFAULT_DOMAIN = 'messages'
-
- def __init__(self, fileobj=None, domain=DEFAULT_DOMAIN):
- """Initialize the translations catalog.
-
- :param fileobj: the file-like object the translation should be read
- from
- """
- gettext.GNUTranslations.__init__(self, fp=fileobj)
- self.files = filter(None, [getattr(fileobj, 'name', None)])
- self.domain = domain
- self._domains = {}
-
- def load(cls, dirname=None, locales=None, domain=DEFAULT_DOMAIN):
- """Load translations from the given directory.
-
- :param dirname: the directory containing the ``MO`` files
- :param locales: the list of locales in order of preference (items in
- this list can be either `Locale` objects or locale
- strings)
- :param domain: the message domain
- :return: the loaded catalog, or a ``NullTranslations`` instance if no
- matching translations were found
- :rtype: `Translations`
- """
- if locales is not None:
- if not isinstance(locales, (list, tuple)):
- locales = [locales]
- locales = [str(locale) for locale in locales]
- if not domain:
- domain = cls.DEFAULT_DOMAIN
- filename = gettext.find(domain, dirname, locales)
- if not filename:
- return gettext.NullTranslations()
- return cls(fileobj=open(filename, 'rb'), domain=domain)
- load = classmethod(load)
-
- def __repr__(self):
- return '<%s: "%s">' % (type(self).__name__,
- self._info.get('project-id-version'))
-
- def add(self, translations, merge=True):
- """Add the given translations to the catalog.
-
- If the domain of the translations is different than that of the
- current catalog, they are added as a catalog that is only accessible
- by the various ``d*gettext`` functions.
-
- :param translations: the `Translations` instance with the messages to
- add
- :param merge: whether translations for message domains that have
- already been added should be merged with the existing
- translations
- :return: the `Translations` instance (``self``) so that `merge` calls
- can be easily chained
- :rtype: `Translations`
- """
- domain = getattr(translations, 'domain', self.DEFAULT_DOMAIN)
- if merge and domain == self.domain:
- return self.merge(translations)
-
- existing = self._domains.get(domain)
- if merge and existing is not None:
- existing.merge(translations)
- else:
- translations.add_fallback(self)
- self._domains[domain] = translations
-
- return self
-
- def merge(self, translations):
- """Merge the given translations into the catalog.
-
- Message translations in the specified catalog override any messages
- with the same identifier in the existing catalog.
-
- :param translations: the `Translations` instance with the messages to
- merge
- :return: the `Translations` instance (``self``) so that `merge` calls
- can be easily chained
- :rtype: `Translations`
- """
- if isinstance(translations, gettext.GNUTranslations):
- self._catalog.update(translations._catalog)
- if isinstance(translations, Translations):
- self.files.extend(translations.files)
-
- return self
-
- def dgettext(self, domain, message):
- """Like ``gettext()``, but look the message up in the specified
- domain.
- """
- return self._domains.get(domain, self).gettext(message)
-
- def ldgettext(self, domain, message):
- """Like ``lgettext()``, but look the message up in the specified
- domain.
- """
- return self._domains.get(domain, self).lgettext(message)
-
- def dugettext(self, domain, message):
- """Like ``ugettext()``, but look the message up in the specified
- domain.
- """
- return self._domains.get(domain, self).ugettext(message)
-
- def dngettext(self, domain, singular, plural, num):
- """Like ``ngettext()``, but look the message up in the specified
- domain.
- """
- return self._domains.get(domain, self).ngettext(singular, plural, num)
-
- def ldngettext(self, domain, singular, plural, num):
- """Like ``lngettext()``, but look the message up in the specified
- domain.
- """
- return self._domains.get(domain, self).lngettext(singular, plural, num)
-
- def dungettext(self, domain, singular, plural, num):
- """Like ``ungettext()`` but look the message up in the specified
- domain.
- """
- return self._domains.get(domain, self).ungettext(singular, plural, num)
diff --git a/babel/util.py b/babel/util.py
deleted file mode 100644
index b59918f..0000000
--- a/babel/util.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Edgewall Software
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://babel.edgewall.org/wiki/License.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://babel.edgewall.org/log/.
-
-"""Various utility classes and functions."""
-
-import codecs
-from datetime import timedelta, tzinfo
-import os
-import re
-try:
- set
-except NameError:
- from sets import Set as set
-import textwrap
-import time
-from itertools import izip, imap
-missing = object()
-
-__all__ = ['distinct', 'pathmatch', 'relpath', 'wraptext', 'odict', 'UTC',
- 'LOCALTZ']
-__docformat__ = 'restructuredtext en'
-
-
-def distinct(iterable):
- """Yield all items in an iterable collection that are distinct.
-
- Unlike when using sets for a similar effect, the original ordering of the
- items in the collection is preserved by this function.
-
- >>> print list(distinct([1, 2, 1, 3, 4, 4]))
- [1, 2, 3, 4]
- >>> print list(distinct('foobar'))
- ['f', 'o', 'b', 'a', 'r']
-
- :param iterable: the iterable collection providing the data
- :return: the distinct items in the collection
- :rtype: ``iterator``
- """
- seen = set()
- for item in iter(iterable):
- if item not in seen:
- yield item
- seen.add(item)
-
-# Regexp to match python magic encoding line
-PYTHON_MAGIC_COMMENT_re = re.compile(
- r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', re.VERBOSE)
-def parse_encoding(fp):
- """Deduce the encoding of a source file from magic comment.
-
- It does this in the same way as the `Python interpreter`__
-
- .. __: http://docs.python.org/ref/encodings.html
-
- The ``fp`` argument should be a seekable file object.
-
- (From Jeff Dairiki)
- """
- pos = fp.tell()
- fp.seek(0)
- try:
- line1 = fp.readline()
- has_bom = line1.startswith(codecs.BOM_UTF8)
- if has_bom:
- line1 = line1[len(codecs.BOM_UTF8):]
-
- m = PYTHON_MAGIC_COMMENT_re.match(line1)
- if not m:
- try:
- import parser
- parser.suite(line1)
- except (ImportError, SyntaxError):
- # Either it's a real syntax error, in which case the source is
- # not valid python source, or line2 is a continuation of line1,
- # in which case we don't want to scan line2 for a magic
- # comment.
- pass
- else:
- line2 = fp.readline()
- m = PYTHON_MAGIC_COMMENT_re.match(line2)
-
- if has_bom:
- if m:
- raise SyntaxError(
- "python refuses to compile code with both a UTF8 "
- "byte-order-mark and a magic encoding comment")
- return 'utf_8'
- elif m:
- return m.group(1)
- else:
- return None
- finally:
- fp.seek(pos)
-
-def pathmatch(pattern, filename):
- """Extended pathname pattern matching.
-
- This function is similar to what is provided by the ``fnmatch`` module in
- the Python standard library, but:
-
- * can match complete (relative or absolute) path names, and not just file
- names, and
- * also supports a convenience pattern ("**") to match files at any
- directory level.
-
- Examples:
-
- >>> pathmatch('**.py', 'bar.py')
- True
- >>> pathmatch('**.py', 'foo/bar/baz.py')
- True
- >>> pathmatch('**.py', 'templates/index.html')
- False
-
- >>> pathmatch('**/templates/*.html', 'templates/index.html')
- True
- >>> pathmatch('**/templates/*.html', 'templates/foo/bar.html')
- False
-
- :param pattern: the glob pattern
- :param filename: the path name of the file to match against
- :return: `True` if the path name matches the pattern, `False` otherwise
- :rtype: `bool`
- """
- symbols = {
- '?': '[^/]',
- '?/': '[^/]/',
- '*': '[^/]+',
- '*/': '[^/]+/',
- '**/': '(?:.+/)*?',
- '**': '(?:.+/)*?[^/]+',
- }
- buf = []
- for idx, part in enumerate(re.split('([?*]+/?)', pattern)):
- if idx % 2:
- buf.append(symbols[part])
- elif part:
- buf.append(re.escape(part))
- match = re.match(''.join(buf) + '$', filename.replace(os.sep, '/'))
- return match is not None
-
-
-class TextWrapper(textwrap.TextWrapper):
- wordsep_re = re.compile(
- r'(\s+|' # any whitespace
- r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))' # em-dash
- )
-
-
-def wraptext(text, width=70, initial_indent='', subsequent_indent=''):
- """Simple wrapper around the ``textwrap.wrap`` function in the standard
- library. This version does not wrap lines on hyphens in words.
-
- :param text: the text to wrap
- :param width: the maximum line width
- :param initial_indent: string that will be prepended to the first line of
- wrapped output
- :param subsequent_indent: string that will be prepended to all lines save
- the first of wrapped output
- :return: a list of lines
- :rtype: `list`
- """
- wrapper = TextWrapper(width=width, initial_indent=initial_indent,
- subsequent_indent=subsequent_indent,
- break_long_words=False)
- return wrapper.wrap(text)
-
-
-class odict(dict):
- """Ordered dict implementation.
-
- :see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
- """
- def __init__(self, data=None):
- dict.__init__(self, data or {})
- self._keys = dict.keys(self)
-
- def __delitem__(self, key):
- dict.__delitem__(self, key)
- self._keys.remove(key)
-
- def __setitem__(self, key, item):
- dict.__setitem__(self, key, item)
- if key not in self._keys:
- self._keys.append(key)
-
- def __iter__(self):
- return iter(self._keys)
- iterkeys = __iter__
-
- def clear(self):
- dict.clear(self)
- self._keys = []
-
- def copy(self):
- d = odict()
- d.update(self)
- return d
-
- def items(self):
- return zip(self._keys, self.values())
-
- def iteritems(self):
- return izip(self._keys, self.itervalues())
-
- def keys(self):
- return self._keys[:]
-
- def pop(self, key, default=missing):
- if default is missing:
- return dict.pop(self, key)
- elif key not in self:
- return default
- self._keys.remove(key)
- return dict.pop(self, key, default)
-
- def popitem(self, key):
- self._keys.remove(key)
- return dict.popitem(key)
-
- def setdefault(self, key, failobj = None):
- dict.setdefault(self, key, failobj)
- if key not in self._keys:
- self._keys.append(key)
-
- def update(self, dict):
- for (key, val) in dict.items():
- self[key] = val
-
- def values(self):
- return map(self.get, self._keys)
-
- def itervalues(self):
- return imap(self.get, self._keys)
-
-
-try:
- relpath = os.path.relpath
-except AttributeError:
- def relpath(path, start='.'):
- """Compute the relative path to one path from another.
-
- >>> relpath('foo/bar.txt', '').replace(os.sep, '/')
- 'foo/bar.txt'
- >>> relpath('foo/bar.txt', 'foo').replace(os.sep, '/')
- 'bar.txt'
- >>> relpath('foo/bar.txt', 'baz').replace(os.sep, '/')
- '../foo/bar.txt'
-
- :return: the relative path
- :rtype: `basestring`
- """
- start_list = os.path.abspath(start).split(os.sep)
- path_list = os.path.abspath(path).split(os.sep)
-
- # Work out how much of the filepath is shared by start and path.
- i = len(os.path.commonprefix([start_list, path_list]))
-
- rel_list = [os.path.pardir] * (len(start_list) - i) + path_list[i:]
- return os.path.join(*rel_list)
-
-ZERO = timedelta(0)
-
-
-class FixedOffsetTimezone(tzinfo):
- """Fixed offset in minutes east from UTC."""
-
- def __init__(self, offset, name=None):
- self._offset = timedelta(minutes=offset)
- if name is None:
- name = 'Etc/GMT+%d' % offset
- self.zone = name
-
- def __str__(self):
- return self.zone
-
- def __repr__(self):
- return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
-
- def utcoffset(self, dt):
- return self._offset
-
- def tzname(self, dt):
- return self.zone
-
- def dst(self, dt):
- return ZERO
-
-
-try:
- from pytz import UTC
-except ImportError:
- UTC = FixedOffsetTimezone(0, 'UTC')
- """`tzinfo` object for UTC (Universal Time).
-
- :type: `tzinfo`
- """
-
-STDOFFSET = timedelta(seconds = -time.timezone)
-if time.daylight:
- DSTOFFSET = timedelta(seconds = -time.altzone)
-else:
- DSTOFFSET = STDOFFSET
-
-DSTDIFF = DSTOFFSET - STDOFFSET
-
-
-class LocalTimezone(tzinfo):
-
- def utcoffset(self, dt):
- if self._isdst(dt):
- return DSTOFFSET
- else:
- return STDOFFSET
-
- def dst(self, dt):
- if self._isdst(dt):
- return DSTDIFF
- else:
- return ZERO
-
- def tzname(self, dt):
- return time.tzname[self._isdst(dt)]
-
- def _isdst(self, dt):
- tt = (dt.year, dt.month, dt.day,
- dt.hour, dt.minute, dt.second,
- dt.weekday(), 0, -1)
- stamp = time.mktime(tt)
- tt = time.localtime(stamp)
- return tt.tm_isdst > 0
-
-
-LOCALTZ = LocalTimezone()
-"""`tzinfo` object for local time-zone.
-
-:type: `tzinfo`
-"""
diff --git a/bewype/__init__.py b/bewype/__init__.py
deleted file mode 100644
index e704dd7..0000000
--- a/bewype/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# (C) Copyright 2010 Bewype <http://www.bewype.org>
-
diff --git a/bewype/flask/__init__.py b/bewype/flask/__init__.py
deleted file mode 100644
index ad6235e..0000000
--- a/bewype/flask/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# (C) Copyright 2010 Bewype <http://www.bewype.org>
-
-# bewype import
-from bewype.flask._app import app, render, run_app
diff --git a/bewype/flask/_app.py b/bewype/flask/_app.py
deleted file mode 100644
index 6c329ba..0000000
--- a/bewype/flask/_app.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# (C) Copyright 2010 Bewype <http://www.bewype.org>
-
-# flask import
-from flask import Flask, session
-
-# flask themes
-from flaskext.themes import setup_themes, render_theme_template
-
-
-def render(template, **context):
- """Theme renderer shortcut.
- """
- _theme = session.get('theme', 'default')
- return render_theme_template(_theme, template, **context)
-
-
-app = Flask(__name__)
-app.secret_key = 'abcdefg'
-# set themes path - currently hard coded for `easy to use` reason
-app.config['THEME_PATHS'] = ['themes']
-# init themes
-setup_themes(app, app_identifier='default')
-
-
-def run_app():
- app.run()
diff --git a/bewype/flask/controllers/__init__.py b/bewype/flask/controllers/__init__.py
deleted file mode 100644
index dbcaa11..0000000
--- a/bewype/flask/controllers/__init__.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# (C) Copyright 2010 Bewype <http://www.bewype.org>
-
-# python import
-import logging, os
-
-# peak import
-from peak.util.imports import importString
-
-# bewype-flask import
-from bewype.flask import app
-
-# get application logger
-logger = logging.getLogger('atoidejouer')
-
-
-def init_controllers(namespace="bewype.flask.controllers"):
- """Here we init all the controllers of the bewype namespace before running
- the app.
- """
- # controllers name space
- _controllers_package = importString(namespace)
- # controllers all paths
- _controllers_paths = _controllers_package.__path__
- # ...
- for _p in _controllers_paths:
- for _f in os.listdir(_p):
- # simple check
- if '__init__.' in _f\
- or os.path.splitext(_f)[-1] != '.py':
- continue
- # do init
- else:
- _controller_name = os.path.splitext(_f)[0]
- _c = importString("%s.%s" % (namespace, _controller_name))
- # prepare module name - custom bewype
- _m_name = 'module_%s' % _controller_name
- # module check
- if hasattr(_c, _m_name):
- # get module
- _m = getattr(_c, _m_name)
- _controller_name
- # get templates search path
- _search_path = _m.jinja_loader.searchpath
- # update jinja loader templates search path
- app.jinja_loader.searchpath.extend(_search_path)
-
diff --git a/bewype/flask/controllers/index.py b/bewype/flask/controllers/index.py
deleted file mode 100644
index cd39c96..0000000
--- a/bewype/flask/controllers/index.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# (C) Copyright 2010 Bewype <http://www.bewype.org>
-
-# bewype import
-from bewype.flask import app, render
-
-@app.route('/')
-def index():
- return render('index.html', greetings='Welcome!')
diff --git a/bewype/flask/controllers/themes.py b/bewype/flask/controllers/themes.py
deleted file mode 100644
index 6782c2b..0000000
--- a/bewype/flask/controllers/themes.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# (C) Copyright 2010 Bewype <http://www.bewype.org>
-
-# flask import
-from flask import abort, url_for, redirect, session
-
-# bewype import
-from bewype.flask import app, render
-
-# flask ext
-from flaskext import themes
-
-
-@app.route('/themes/')
-def themes_list():
- _themes = themes.get_themes_list()
- return render('themes/list.html', themes=_themes)
-
-
-@app.route('/themes/<identifier>')
-def themes_save(identifier):
- if identifier not in app.theme_manager.themes:
- abort(404)
- session['theme'] = identifier
- return redirect(url_for('themes_list'))
diff --git a/config.ini b/config.ini
index cfe8eb1..df2de55 100644
--- a/config.ini
+++ b/config.ini
@@ -1,6 +1,2 @@
[activity]
-mode = easy
-
-[mode]
-easy = image position sound
-advanced = image position sound time remove
+name = atoideweb
diff --git a/flaskext/__init__.py b/flaskext/__init__.py
deleted file mode 100644
index de40ea7..0000000
--- a/flaskext/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__import__('pkg_resources').declare_namespace(__name__)
diff --git a/flaskext/themes.py b/flaskext/themes.py
deleted file mode 100644
index 65d2e28..0000000
--- a/flaskext/themes.py
+++ /dev/null
@@ -1,443 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-flaskext.themes
-===============
-This provides infrastructure for theming support in your Flask applications.
-It takes care of:
-
-- Loading themes
-- Rendering their templates
-- Serving their static media
-- Letting themes reference their templates and static media
-
-:copyright: 2010 Matthew "LeafStorm" Frazier
-:license: MIT/X11, see LICENSE for details
-"""
-from __future__ import with_statement
-import itertools
-import os
-import os.path
-import re
-from flask import (Module, send_from_directory, render_template, json,
- _request_ctx_stack, abort, url_for)
-from jinja2 import contextfunction, Undefined
-from jinja2.loaders import FileSystemLoader, BaseLoader, TemplateNotFound
-from operator import attrgetter
-from werkzeug import cached_property
-
-DOCTYPES = 'html4 html5 xhtml'.split()
-IDENTIFIER = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
-
-containable = lambda i: i if hasattr(i, '__contains__') else tuple(i)
-
-def starchain(i):
- return itertools.chain(*i)
-
-
-class Theme(object):
- """
- This contains a theme's metadata.
-
- :param path: The path to the theme directory.
- """
- def __init__(self, path):
- #: The theme's root path. All the files in the theme are under this
- #: path.
- self.path = os.path.abspath(path)
-
- with open(os.path.join(self.path, 'info.json')) as fd:
- self.info = i = json.load(fd)
-
- #: The theme's name, as given in info.json. This is the human
- #: readable name.
- self.name = i['name']
-
- #: The application identifier given in the theme's info.json. Your
- #: application will probably want to validate it.
- self.application = i['application']
-
- #: The theme's identifier. This is an actual Python identifier,
- #: and in most situations should match the name of the directory the
- #: theme is in.
- self.identifier = i['identifier']
-
- #: The human readable description. This is the default (English)
- #: version.
- self.description = i.get('description')
-
- #: This is a dictionary of localized versions of the description.
- #: The language codes are all lowercase, and the ``en`` key is
- #: preloaded with the base description.
- self.localized_desc = dict(
- (k.split('_', 1)[1].lower(), v) for k, v in i.items()
- if k.startswith('description_')
- )
- self.localized_desc.setdefault('en', self.description)
-
- #: The author's name, as given in info.json. This may or may not
- #: include their email, so it's best just to display it as-is.
- self.author = i['author']
-
- #: A short phrase describing the license, like "GPL", "BSD", "Public
- #: Domain", or "Creative Commons BY-SA 3.0".
- self.license = i.get('license')
-
- #: A URL pointing to the license text online.
- self.license_url = i.get('license_url')
-
- #: The URL to the theme's or author's Web site.
- self.website = i.get('website')
-
- #: The theme's preview image, within the static folder.
- self.preview = i.get('preview')
-
- #: The theme's doctype. This can be ``html4``, ``html5``, or ``xhtml``
- #: with html5 being the default if not specified.
- self.doctype = i.get('doctype', 'html5')
-
- #: Any additional options. These are entirely application-specific,
- #: and may determine other aspects of the application's behavior.
- self.options = i.get('options', {})
-
- @cached_property
- def static_path(self):
- """
- The absolute path to the theme's static files directory.
- """
- return os.path.join(self.path, 'static')
-
- @cached_property
- def templates_path(self):
- """
- The absolute path to the theme's templates directory.
- """
- return os.path.join(self.path, 'templates')
-
- @cached_property
- def license_text(self):
- """
- The contents of the theme's license.txt file, if it exists. This is
- used to display the full license text if necessary. (It is `None` if
- there was not a license.txt.)
- """
- lt_path = os.path.join(self.path, 'license.txt')
- if os.path.exists(lt_path):
- with open(lt_path) as fd:
- return fd.read()
- else:
- return None
-
- @cached_property
- def jinja_loader(self):
- """
- This is a Jinja2 template loader that loads templates from the theme's
- ``templates`` directory.
- """
- return FileSystemLoader(self.templates_path)
-
-
-### theme loaders
-
-def list_folders(path):
- """
- This is a helper function that only returns the directories in a given
- folder.
-
- :param path: The path to list directories in.
- """
- return (name for name in os.listdir(path)
- if os.path.isdir(os.path.join(path, name)))
-
-
-def load_themes_from(path):
- """
- This is used by the default loaders. You give it a path, and it will find
- valid themes and yield them one by one.
-
- :param path: The path to search for themes in.
- """
- for basename in (b for b in list_folders(path) if IDENTIFIER.match(b)):
- try:
- t = Theme(os.path.join(path, basename))
- except:
- pass
- else:
- if t.identifier == basename:
- yield t
-
-
-def packaged_themes_loader(app):
- """
- This theme will find themes that are shipped with the application. It will
- look in the application's root path for a ``themes`` directory - for
- example, the ``someapp`` package can ship themes in the directory
- ``someapp/themes/``.
- """
- themes_path = os.path.join(app.root_path, 'themes')
- if os.path.exists(themes_path):
- return load_themes_from(themes_path)
- else:
- return ()
-
-
-def theme_paths_loader(app):
- """
- This checks the app's `THEME_PATHS` configuration variable to find
- directories that contain themes. The theme's identifier must match the
- name of its directory.
- """
- theme_paths = app.config.get('THEME_PATHS', ())
- if isinstance(theme_paths, basestring):
- theme_paths = [p.strip() for p in theme_paths.split(';')]
- return starchain(
- load_themes_from(path) for path in theme_paths
- )
-
-
-class ThemeManager(object):
- """
- This is responsible for loading and storing all the themes for an
- application. Calling `refresh` will cause it to invoke all of the theme
- loaders.
-
- A theme loader is simply a callable that takes an app and returns an
- iterable of `Theme` instances. You can implement your own loaders if your
- app has another way to load themes.
-
- :param app: The app to bind to. (Each instance is only usable for one
- app.)
- :param app_identifier: The value that the info.json's `application` key
- is required to have. If you require a more complex
- check, you can subclass and override the
- `valid_app_id` method.
- :param loaders: An iterable of loaders to use. The defaults are
- `packaged_themes_loader` and `theme_paths_loader`, in that
- order.
- """
- def __init__(self, app, app_identifier, loaders=None):
- self.bind_app(app)
- self.app_identifier = app_identifier
-
- self._themes = None
-
- #: This is a list of the loaders that will be used to load the themes.
- self.loaders = []
- if loaders:
- self.loaders.extend(loaders)
- else:
- self.loaders.extend((packaged_themes_loader, theme_paths_loader))
-
- @property
- def themes(self):
- """
- This is a dictionary of all the themes that have been loaded. The keys
- are the identifiers and the values are `Theme` objects.
- """
- if self._themes is None:
- self.refresh()
- return self._themes
-
- def list_themes(self):
- """
- This yields all the `Theme` objects, in sorted order.
- """
- return sorted(self.themes.itervalues(), key=attrgetter('identifier'))
-
- def bind_app(self, app):
- """
- If an app wasn't bound when the manager was created, this will bind
- it. The app must be bound for the loaders to work.
-
- :param app: A `~flask.Flask` instance.
- """
- self.app = app
- app.theme_manager = self
-
- def valid_app_id(self, app_identifier):
- """
- This checks whether the application identifier given will work with
- this application. The default implementation checks whether the given
- identifier matches the one given at initialization.
-
- :param app_identifier: The application identifier to check.
- """
- return self.app_identifier == app_identifier
-
- def refresh(self):
- """
- This loads all of the themes into the `themes` dictionary. The loaders
- are invoked in the order they are given, so later themes will override
- earlier ones. Any invalid themes found (for example, if the
- application identifier is incorrect) will be skipped.
- """
- self._themes = themes = {}
- for theme in starchain(ldr(self.app) for ldr in self.loaders):
- if self.valid_app_id(theme.application):
- self.themes[theme.identifier] = theme
-
-
-def get_theme(ident):
- """
- This gets the theme with the given identifier from the current app's
- theme manager.
-
- :param ident: The theme identifier.
- """
- ctx = _request_ctx_stack.top
- return ctx.app.theme_manager.themes[ident]
-
-
-def get_themes_list():
- """
- This returns a list of all the themes in the current app's theme manager,
- sorted by identifier.
- """
- ctx = _request_ctx_stack.top
- return list(ctx.app.theme_manager.list_themes())
-
-
-### theme template loader
-
-class ThemeTemplateLoader(BaseLoader):
- """
- This is a template loader that loads templates from the current app's
- loaded themes.
- """
- def get_source(self, environment, template):
- try:
- themename, templatename = template.split('/', 1)
- ctx = _request_ctx_stack.top
- theme = ctx.app.theme_manager.themes[themename]
- except (ValueError, KeyError):
- raise TemplateNotFound(template)
- try:
- return theme.jinja_loader.get_source(environment, templatename)
- except TemplateNotFound:
- raise TemplateNotFound(template)
-
- def list_templates(self):
- res = []
- ctx = _request_ctx_stack.top
- for ident, theme in ctx.app.theme_manager.themes.iteritems():
- res.extend('%s/%s' % (ident, t)
- for t in theme.jinja_loader.list_templates())
- return res
-
-
-def template_exists(templatename):
- ctx = _request_ctx_stack.top
- return templatename in containable(ctx.app.jinja_env.list_templates())
-
-
-### theme functionality
-
-
-themes_mod = Module(__name__, name='_themes', url_prefix='/_themes')
-themes_mod.jinja_loader # prevent any of the property's methods from
- # taking effect
-themes_mod.jinja_loader = ThemeTemplateLoader()
-
-
-@themes_mod.route('/<themeid>/<path:filename>')
-def static(themeid, filename):
- try:
- ctx = _request_ctx_stack.top
- theme = ctx.app.theme_manager.themes[themeid]
- except KeyError:
- abort(404)
- return send_from_directory(theme.static_path, filename)
-
-
-def setup_themes(app, loaders=None, app_identifier=None,
- manager_cls=ThemeManager, theme_url_prefix='/_themes'):
- """
- This sets up the theme infrastructure by adding a `ThemeManager` to the
- given app and registering the module containing the views and templates
- needed.
-
- :param app: The `~flask.Flask` instance to set up themes for.
- :param loaders: An iterable of loaders to use. It defaults to
- `packaged_themes_loader` and `theme_paths_loader`.
- :param app_identifier: The application identifier to use. If not given,
- it defaults to the app's import name.
- :param manager_cls: If you need a custom manager class, you can pass it
- in here.
- :param theme_url_prefix: The prefix to use for the URLs on the themes
- module. (Defaults to ``/_themes``.)
- """
- if app_identifier is None:
- app_identifier = app.import_name
- manager = manager_cls(app, app_identifier, loaders=loaders)
- app.jinja_env.globals['theme'] = global_theme_template
- app.jinja_env.globals['theme_static'] = global_theme_static
- app.register_module(themes_mod, url_prefix=theme_url_prefix)
-
-
-def active_theme(ctx):
- if '_theme' in ctx:
- return ctx['_theme']
- elif ctx.name.startswith('_themes/'):
- return ctx.name[8:].split('/', 1)[0]
- else:
- raise RuntimeError("Could not find the active theme")
-
-
-
-@contextfunction
-def global_theme_template(ctx, templatename, fallback=True):
- theme = active_theme(ctx)
- templatepath = '_themes/%s/%s' % (theme, templatename)
- if (not fallback) or template_exists(templatepath):
- return templatepath
- else:
- return templatename
-
-
-@contextfunction
-def global_theme_static(ctx, filename, external=False):
- theme = active_theme(ctx)
- return static_file_url(theme, filename, external)
-
-
-def static_file_url(theme, filename, external=False):
- """
- This is a shortcut for getting the URL of a static file in a theme.
-
- :param theme: A `Theme` instance or identifier.
- :param filename: The name of the file.
- :param external: Whether the link should be external or not. Defaults to
- `False`.
- """
- if isinstance(theme, Theme):
- theme = theme.identifier
- return url_for('_themes.static', themeid=theme, filename=filename,
- _external=external)
-
-
-def render_theme_template(theme, template_name, _fallback=True, **context):
- """
- This renders a template from the given theme. For example::
-
- return render_theme_template(g.user.theme, 'index.html', posts=posts)
-
- If `_fallback` is True and the themplate does not exist within the theme,
- it will fall back on trying to render the template using the application's
- normal templates. (The "active theme" will still be set, though, so you
- can try to extend or include other templates from the theme.)
-
- :param theme: Either the identifier of the theme to use, or an actual
- `Theme` instance.
- :param template_name: The name of the template to render.
- :param _fallback: Whether to fall back to the default
- """
- if isinstance(theme, Theme):
- theme = theme.identifier
- context['_theme'] = theme
- try:
- return render_template('_themes/%s/%s' % (theme, template_name),
- **context)
- except TemplateNotFound:
- if _fallback:
- return render_template(template_name, **context)
- else:
- raise
diff --git a/lib/__init__.py b/lib/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/lib/__init__.py
+++ /dev/null
diff --git a/lib/euclid.py b/lib/euclid.py
deleted file mode 100644
index b3834d6..0000000
--- a/lib/euclid.py
+++ /dev/null
@@ -1,516 +0,0 @@
-#!/usr/bin/env python
-#
-# euclid graphics maths module
-#
-# Copyright (c) 2006 Alex Holkner
-# Alex.Holkner@mail.google.com
-#
-# This library is free software; you can redistribute it and/or modify it
-# under the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation; either version 2.1 of the License, or (at your
-# option) any later version.
-#
-# This library is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
-# for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with this library; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-
-'''euclid graphics maths module
-
-Documentation and tests are included in the file "euclid.txt", or online
-at http://code.google.com/p/pyeuclid
-'''
-
-__docformat__ = 'restructuredtext'
-__version__ = '$Id$'
-__revision__ = '$Revision$'
-
-import math
-import operator
-import types
-
-
-
-class Vector2(object):
- __slots__ = ['x', 'y']
-
- def __init__(self, x=0, y=0):
- self.x = x
- self.y = y
-
- def __copy__(self):
- return self.__class__(self.x, self.y)
-
- copy = __copy__
-
- def __repr__(self):
- return 'Vector2(%.2f, %.2f)' % (self.x, self.y)
-
- def __eq__(self, other):
- if not other: return False
-
- if isinstance(other, Vector2):
- return self.x == other.x and \
- self.y == other.y
- else:
- if hasattr(other, '__len__') and len(other) == 2:
- return self.x == other[0] and \
- self.y == other[1]
- else:
- return False
-
- def __neq__(self, other):
- return not self.__eq__(other)
-
- def __nonzero__(self):
- return self.x != 0 or self.y != 0
-
- def __len__(self):
- return 2
-
- def __getitem__(self, key):
- return (self.x, self.y)[key]
-
- def __setitem__(self, key, value):
- l = [self.x, self.y]
- l[key] = value
- self.x, self.y = l
-
- def __iter__(self):
- return iter((self.x, self.y))
-
- def __getattr__(self, name):
- try:
- return tuple([(self.x, self.y)['xy'.index(c)] \
- for c in name])
- except ValueError:
- raise AttributeError, name
-
- def __add__(self, other):
- return Vector2(self.x + other.x, self.y + other.y)
-
- __radd__ = __add__
-
- def __iadd__(self, other):
- self.x += other.x
- self.y += other.y
- return self
-
- def __sub__(self, other):
- return Vector2(self.x - other.x, self.y - other.y)
-
- def __rsub__(self, other):
- return Vector2(other.x - self.x, other.y - self.y)
-
- def __mul__(self, other):
- return Vector2(self.x * other, self.y * other)
-
- __rmul__ = __mul__
-
- def __imul__(self, other):
- self.x *= other
- self.y *= other
- return self
-
- def __div__(self, other):
- return Vector2(operator.div(self.x, other),
- operator.div(self.y, other))
-
-
- def __rdiv__(self, other):
- return Vector2(operator.div(other, self.x),
- operator.div(other, self.y))
-
- def __floordiv__(self, other):
- return Vector2(operator.floordiv(self.x, other),
- operator.floordiv(self.y, other))
-
-
- def __rfloordiv__(self, other):
- return Vector2(operator.floordiv(other, self.x),
- operator.floordiv(other, self.y))
-
- def __truediv__(self, other):
- return Vector2(operator.truediv(self.x, other),
- operator.truediv(self.y, other))
-
- def __rtruediv__(self, other):
- return Vector2(operator.truediv(other, self.x),
- operator.truediv(other, self.y))
-
- def __neg__(self):
- return Vector2(-self.x, -self.y)
-
- __pos__ = __copy__
-
- def __abs__(self):
- return math.sqrt(self.x * self.x + self.y * self.y)
-
- magnitude = __abs__
-
- def magnitude_squared(self):
- return self.x * self.x + self.y * self.y
-
- def normalize(self):
- d = self.magnitude()
- if d:
- self.x /= d
- self.y /= d
- return self
-
- def normalized(self):
- d = self.magnitude()
- if d:
- return Vector2(self.x / d, self.y / d)
- return self.copy()
-
- def dot(self, other):
- assert isinstance(other, Vector2)
- return self.x * other.x + \
- self.y * other.y
-
- def cross(self):
- return Vector2(self.y, -self.x)
-
- def product(self, v2):
- # product of our vector and the other vector's perpendicular
- return self.x * v2.y - self.y * v2.x
-
- def reflect(self, normal):
- # assume normal is normalized
- assert isinstance(normal, Vector2)
- d = 2 * (self.x * normal.x + self.y * normal.y)
- return Vector2(self.x - d * normal.x,
- self.y - d * normal.y)
-
- def limit(self, max_magnitude):
- if self.magnitude() > max_magnitude:
- self.normalize()
- self *= max_magnitude
-
- def heading(self):
- return math.atan2(self.y, self.x)
-
- def angle(self, other):
- """angle between this and the other vector in radians"""
- if self == -other: # same vector facing the opposite way will kill acos on float precision
- return math.pi
-
- return math.acos(self.normalized().dot(other.normalized()))
-
-
-# Geometry
-# Much maths thanks to Paul Bourke, http://astronomy.swin.edu.au/~pbourke
-# ---------------------------------------------------------------------------
-
-class Geometry(object):
- def _connect_unimplemented(self, other):
- raise AttributeError, 'Cannot connect %s to %s' % \
- (self.__class__, other.__class__)
-
- def _intersect_unimplemented(self, other):
- raise AttributeError, 'Cannot intersect %s and %s' % \
- (self.__class__, other.__class__)
-
- _intersect_point2 = _intersect_unimplemented
- _intersect_line2 = _intersect_unimplemented
- _intersect_circle = _intersect_unimplemented
- _connect_point2 = _connect_unimplemented
- _connect_line2 = _connect_unimplemented
- _connect_circle = _connect_unimplemented
-
-
- def intersect(self, other):
- raise NotImplementedError
-
- def connect(self, other):
- raise NotImplementedError
-
- def distance(self, other):
- c = self.connect(other)
- if c:
- return c.length
- return 0.0
-
-def _intersect_point2_circle(P, C):
- return (P - C.c).magnitude_squared() <= C.r * C.r
-
-def _intersect_line2_line2(A, B):
- d = B.v.y * A.v.x - B.v.x * A.v.y
- if d == 0:
- return None
-
- dy = A.p.y - B.p.y
- dx = A.p.x - B.p.x
- ua = (B.v.x * dy - B.v.y * dx) / d
- if not A._u_in(ua):
- return None
- ub = (A.v.x * dy - A.v.y * dx) / d
- if not B._u_in(ub):
- return None
-
- return Point2(A.p.x + ua * A.v.x,
- A.p.y + ua * A.v.y)
-
-def _intersect_line2_circle(L, C):
- a = L.v.magnitude_squared()
- b = 2 * (L.v.x * (L.p.x - C.c.x) + \
- L.v.y * (L.p.y - C.c.y))
- c = C.c.magnitude_squared() + \
- L.p.magnitude_squared() - \
- 2 * C.c.dot(L.p) - \
- C.r * C.r
- det = b * b - 4 * a * c
- if det < 0:
- return None
- sq = math.sqrt(det)
- u1 = (-b + sq) / (2 * a)
- u2 = (-b - sq) / (2 * a)
- if not L._u_in(u1):
- u1 = max(min(u1, 1.0), 0.0)
- if not L._u_in(u2):
- u2 = max(min(u2, 1.0), 0.0)
-
- # Tangent
- if u1 == u2:
- return Point2(L.p.x + u1 * L.v.x,
- L.p.y + u1 * L.v.y)
-
- return LineSegment2(Point2(L.p.x + u1 * L.v.x,
- L.p.y + u1 * L.v.y),
- Point2(L.p.x + u2 * L.v.x,
- L.p.y + u2 * L.v.y))
-
-def _connect_point2_line2(P, L):
- d = L.v.magnitude_squared()
- assert d != 0
- u = ((P.x - L.p.x) * L.v.x + \
- (P.y - L.p.y) * L.v.y) / d
- if not L._u_in(u):
- u = max(min(u, 1.0), 0.0)
- return LineSegment2(P,
- Point2(L.p.x + u * L.v.x,
- L.p.y + u * L.v.y))
-
-def _connect_point2_circle(P, C):
- v = P - C.c
- v.normalize()
- v *= C.r
- return LineSegment2(P, Point2(C.c.x + v.x, C.c.y + v.y))
-
-def _connect_line2_line2(A, B):
- d = B.v.y * A.v.x - B.v.x * A.v.y
- if d == 0:
- # Parallel, connect an endpoint with a line
- if isinstance(B, Ray2) or isinstance(B, LineSegment2):
- p1, p2 = _connect_point2_line2(B.p, A)
- return p2, p1
- # No endpoint (or endpoint is on A), possibly choose arbitrary point
- # on line.
- return _connect_point2_line2(A.p, B)
-
- dy = A.p.y - B.p.y
- dx = A.p.x - B.p.x
- ua = (B.v.x * dy - B.v.y * dx) / d
- if not A._u_in(ua):
- ua = max(min(ua, 1.0), 0.0)
- ub = (A.v.x * dy - A.v.y * dx) / d
- if not B._u_in(ub):
- ub = max(min(ub, 1.0), 0.0)
-
- return LineSegment2(Point2(A.p.x + ua * A.v.x, A.p.y + ua * A.v.y),
- Point2(B.p.x + ub * B.v.x, B.p.y + ub * B.v.y))
-
-def _connect_circle_line2(C, L):
- d = L.v.magnitude_squared()
- assert d != 0
- u = ((C.c.x - L.p.x) * L.v.x + (C.c.y - L.p.y) * L.v.y) / d
- if not L._u_in(u):
- u = max(min(u, 1.0), 0.0)
- point = Point2(L.p.x + u * L.v.x, L.p.y + u * L.v.y)
- v = (point - C.c)
- v.normalize()
- v *= C.r
- return LineSegment2(Point2(C.c.x + v.x, C.c.y + v.y), point)
-
-def _connect_circle_circle(A, B):
- v = B.c - A.c
- v.normalize()
- return LineSegment2(Point2(A.c.x + v.x * A.r, A.c.y + v.y * A.r),
- Point2(B.c.x - v.x * B.r, B.c.y - v.y * B.r))
-
-
-class Point2(Vector2, Geometry):
- def __repr__(self):
- return 'Point2(%.2f, %.2f)' % (self.x, self.y)
-
- def intersect(self, other):
- return other._intersect_point2(self)
-
- def _intersect_circle(self, other):
- return _intersect_point2_circle(self, other)
-
- def connect(self, other):
- return other._connect_point2(self)
-
- def _connect_point2(self, other):
- return LineSegment2(other, self)
-
- def _connect_line2(self, other):
- c = _connect_point2_line2(self, other)
- if c:
- return c._swap()
-
- def _connect_circle(self, other):
- c = _connect_point2_circle(self, other)
- if c:
- return c._swap()
-
-class Line2(Geometry):
- __slots__ = ['p', 'v']
-
- def __init__(self, *args):
- if len(args) == 3:
- assert isinstance(args[0], Point2) and \
- isinstance(args[1], Vector2) and \
- type(args[2]) == float
- self.p = args[0].copy()
- self.v = args[1] * args[2] / abs(args[1])
- elif len(args) == 2:
- if isinstance(args[0], Point2) and isinstance(args[1], Point2):
- self.p = args[0].copy()
- self.v = args[1] - args[0]
- elif isinstance(args[0], Point2) and isinstance(args[1], Vector2):
- self.p = args[0].copy()
- self.v = args[1].copy()
- else:
- raise AttributeError, '%r' % (args,)
- elif len(args) == 1:
- if isinstance(args[0], Line2):
- self.p = args[0].p.copy()
- self.v = args[0].v.copy()
- else:
- raise AttributeError, '%r' % (args,)
- else:
- raise AttributeError, '%r' % (args,)
-
- if not self.v:
- raise AttributeError, 'Line has zero-length vector'
-
- def __copy__(self):
- return self.__class__(self.p, self.v)
-
- copy = __copy__
-
- def __repr__(self):
- return 'Line2(<%.2f, %.2f> + u<%.2f, %.2f>)' % \
- (self.p.x, self.p.y, self.v.x, self.v.y)
-
- p1 = property(lambda self: self.p)
- p2 = property(lambda self: Point2(self.p.x + self.v.x,
- self.p.y + self.v.y))
-
- def _apply_transform(self, t):
- self.p = t * self.p
- self.v = t * self.v
-
- def _u_in(self, u):
- return True
-
- def intersect(self, other):
- return other._intersect_line2(self)
-
- def _intersect_line2(self, other):
- return _intersect_line2_line2(self, other)
-
- def _intersect_circle(self, other):
- return _intersect_line2_circle(self, other)
-
- def connect(self, other):
- return other._connect_line2(self)
-
- def _connect_point2(self, other):
- return _connect_point2_line2(other, self)
-
- def _connect_line2(self, other):
- return _connect_line2_line2(other, self)
-
- def _connect_circle(self, other):
- return _connect_circle_line2(other, self)
-
-class Ray2(Line2):
- def __repr__(self):
- return 'Ray2(<%.2f, %.2f> + u<%.2f, %.2f>)' % \
- (self.p.x, self.p.y, self.v.x, self.v.y)
-
- def _u_in(self, u):
- return u >= 0.0
-
-class LineSegment2(Line2):
- def __repr__(self):
- return 'LineSegment2(<%.2f, %.2f> to <%.2f, %.2f>)' % \
- (self.p.x, self.p.y, self.p.x + self.v.x, self.p.y + self.v.y)
-
- def _u_in(self, u):
- return u >= 0.0 and u <= 1.0
-
- def __abs__(self):
- return abs(self.v)
-
- def magnitude_squared(self):
- return self.v.magnitude_squared()
-
- def _swap(self):
- # used by connect methods to switch order of points
- self.p = self.p2
- self.v *= -1
- return self
-
- length = property(lambda self: abs(self.v))
-
-class Circle(Geometry):
- __slots__ = ['c', 'r']
-
- def __init__(self, center, radius):
- assert isinstance(center, Vector2) and type(radius) == float
- self.c = center.copy()
- self.r = radius
-
- def __copy__(self):
- return self.__class__(self.c, self.r)
-
- copy = __copy__
-
- def __repr__(self):
- return 'Circle(<%.2f, %.2f>, radius=%.2f)' % \
- (self.c.x, self.c.y, self.r)
-
- def _apply_transform(self, t):
- self.c = t * self.c
-
- def intersect(self, other):
- return other._intersect_circle(self)
-
- def _intersect_point2(self, other):
- return _intersect_point2_circle(other, self)
-
- def _intersect_line2(self, other):
- return _intersect_line2_circle(other, self)
-
- def connect(self, other):
- return other._connect_circle(self)
-
- def _connect_point2(self, other):
- return _connect_point2_circle(other, self)
-
- def _connect_line2(self, other):
- c = _connect_circle_line2(self, other)
- if c:
- return c._swap()
-
- def _connect_circle(self, other):
- return _connect_circle_circle(other, self)
diff --git a/flask/__init__.py b/lib/flask/__init__.py
index ee8508b..ee8508b 100644
--- a/flask/__init__.py
+++ b/lib/flask/__init__.py
diff --git a/flask/app.py b/lib/flask/app.py
index 448df8f..448df8f 100644
--- a/flask/app.py
+++ b/lib/flask/app.py
diff --git a/flask/config.py b/lib/flask/config.py
index aa65f46..aa65f46 100644
--- a/flask/config.py
+++ b/lib/flask/config.py
diff --git a/flask/ctx.py b/lib/flask/ctx.py
index 1b17086..1b17086 100644
--- a/flask/ctx.py
+++ b/lib/flask/ctx.py
diff --git a/flask/globals.py b/lib/flask/globals.py
index 8471410..8471410 100644
--- a/flask/globals.py
+++ b/lib/flask/globals.py
diff --git a/flask/helpers.py b/lib/flask/helpers.py
index 9d64c19..9d64c19 100644
--- a/flask/helpers.py
+++ b/lib/flask/helpers.py
diff --git a/flask/logging.py b/lib/flask/logging.py
index 29caadc..29caadc 100644
--- a/flask/logging.py
+++ b/lib/flask/logging.py
diff --git a/flask/module.py b/lib/flask/module.py
index 4e719a4..4e719a4 100644
--- a/flask/module.py
+++ b/lib/flask/module.py
diff --git a/flask/session.py b/lib/flask/session.py
index df2d877..df2d877 100644
--- a/flask/session.py
+++ b/lib/flask/session.py
diff --git a/flask/signals.py b/lib/flask/signals.py
index 22447c7..22447c7 100644
--- a/flask/signals.py
+++ b/lib/flask/signals.py
diff --git a/flask/templating.py b/lib/flask/templating.py
index 4db03b7..4db03b7 100644
--- a/flask/templating.py
+++ b/lib/flask/templating.py
diff --git a/flask/testing.py b/lib/flask/testing.py
index 8423733..8423733 100644
--- a/flask/testing.py
+++ b/lib/flask/testing.py
diff --git a/flask/wrappers.py b/lib/flask/wrappers.py
index 4db1e78..4db1e78 100644
--- a/flask/wrappers.py
+++ b/lib/flask/wrappers.py
diff --git a/lib/graphics.py b/lib/graphics.py
deleted file mode 100644
index 97704f7..0000000
--- a/lib/graphics.py
+++ /dev/null
@@ -1,1681 +0,0 @@
-# - coding: utf-8 -
-
-# Copyright (C) 2008-2010 Toms Bauģis <toms.baugis at gmail.com>
-# Dual licensed under the MIT or GPL Version 2 licenses.
-# See http://github.com/tbaugis/hamster_experiments/blob/master/README.textile
-
-import math
-import datetime as dt
-import gtk, gobject
-
-import pango, cairo
-import re
-
-try:
- import pytweener
-except: # we can also live without tweener. Scene.animate will not work
- pytweener = None
-
-import colorsys
-from collections import deque
-
-if cairo.version in ('1.8.2', '1.8.4'):
- # in these two cairo versions the matrix multiplication was flipped
- # http://bugs.freedesktop.org/show_bug.cgi?id=19221
- def cairo_matrix_multiply(matrix1, matrix2):
- return matrix2 * matrix1
-else:
- def cairo_matrix_multiply(matrix1, matrix2):
- return matrix1 * matrix2
-
-
-class Colors(object):
- hex_color_normal = re.compile("#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})")
- hex_color_short = re.compile("#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])")
- hex_color_long = re.compile("#([a-fA-F0-9]{4})([a-fA-F0-9]{4})([a-fA-F0-9]{4})")
-
- def parse(self, color):
- assert color is not None
-
- #parse color into rgb values
- if isinstance(color, basestring):
- match = self.hex_color_long.match(color)
- if match:
- color = [int(color, 16) / 65535.0 for color in match.groups()]
- else:
- match = self.hex_color_normal.match(color)
- if match:
- color = [int(color, 16) / 255.0 for color in match.groups()]
- else:
- match = self.hex_color_short.match(color)
- color = [int(color + color, 16) / 255.0 for color in match.groups()]
-
- elif isinstance(color, gtk.gdk.Color):
- color = [color.red / 65535.0,
- color.green / 65535.0,
- color.blue / 65535.0]
-
- else:
- # otherwise we assume we have color components in 0..255 range
- if color[0] > 1 or color[1] > 1 or color[2] > 1:
- color = [c / 255.0 for c in color]
-
- return color
-
- def rgb(self, color):
- return [c * 255 for c in self.parse(color)]
-
- def gdk(self, color):
- c = self.parse(color)
- return gtk.gdk.Color(int(c[0] * 65535.0), int(c[1] * 65535.0), int(c[2] * 65535.0))
-
- def is_light(self, color):
- # tells you if color is dark or light, so you can up or down the
- # scale for improved contrast
- return colorsys.rgb_to_hls(*self.rgb(color))[1] > 150
-
- def darker(self, color, step):
- # returns color darker by step (where step is in range 0..255)
- hls = colorsys.rgb_to_hls(*self.rgb(color))
- return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2])
-
- def contrast(self, color, step):
- """if color is dark, will return a lighter one, otherwise darker"""
- hls = colorsys.rgb_to_hls(*self.rgb(color))
- if self.is_light(color):
- return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2])
- else:
- return colorsys.hls_to_rgb(hls[0], hls[1] + step, hls[2])
- # returns color darker by step (where step is in range 0..255)
-
-Colors = Colors() # this is a static class, so an instance will do
-
-
-class Graphics(object):
- """If context is given upon contruction, will perform drawing
- operations on context instantly. Otherwise queues up the drawing
- instructions and performs them in passed-in order when _draw is called
- with context.
-
- Most of instructions are mapped to cairo functions by the same name.
- Where there are differences, documenation is provided.
-
- See http://cairographics.org/documentation/pycairo/2/reference/context.html
- for detailed description of the cairo drawing functions.
- """
- def __init__(self, context = None):
- self.context = context
- self.colors = Colors # pointer to the color utilities instance
- self.extents = None # bounds of the object, only if interactive
- self.paths = None # paths for mouse hit checks
- self._last_matrix = None
- self.__new_instructions = [] # instruction set until it is converted into path-based instructions
- self.__instruction_cache = []
- self.cache_surface = None
- self._cache_layout = None
-
- def clear(self):
- """clear all instructions"""
- self.__new_instructions = []
- self.__instruction_cache = []
- self.paths = []
-
- @staticmethod
- def _stroke(context): context.stroke()
- def stroke(self, color = None, alpha = 1):
- if color or alpha < 1:self.set_color(color, alpha)
- self._add_instruction(self._stroke,)
-
- @staticmethod
- def _fill(context): context.fill()
- def fill(self, color = None, alpha = 1):
- if color or alpha < 1:self.set_color(color, alpha)
- self._add_instruction(self._fill,)
-
- @staticmethod
- def _mask(context, pattern): context.mask(pattern)
- def mask(self, pattern):
- self._add_instruction(self._mask, pattern)
-
- @staticmethod
- def _stroke_preserve(context): context.stroke_preserve()
- def stroke_preserve(self, color = None, alpha = 1):
- if color or alpha < 1:self.set_color(color, alpha)
- self._add_instruction(self._stroke_preserve,)
-
- @staticmethod
- def _fill_preserve(context): context.fill_preserve()
- def fill_preserve(self, color = None, alpha = 1):
- if color or alpha < 1:self.set_color(color, alpha)
- self._add_instruction(self._fill_preserve,)
-
- @staticmethod
- def _new_path(context): context.new_path()
- def new_path(self):
- self._add_instruction(self._new_path,)
-
- @staticmethod
- def _paint(context): context.paint()
- def paint(self):
- self._add_instruction(self._paint,)
-
- @staticmethod
- def _set_font_face(context, face): context.set_font_face(face)
- def set_font_face(self, face):
- self._add_instruction(self._set_font_face, face)
-
- @staticmethod
- def _set_font_size(context, size): context.set_font_size(size)
- def set_font_size(self, size):
- self._add_instruction(self._set_font_size, size)
-
- @staticmethod
- def _set_source(context, image):
- context.set_source(image)
- def set_source(self, image, x = 0, y = 0):
- self._add_instruction(self._set_source, image)
-
- @staticmethod
- def _set_source_surface(context, surface, x, y):
- context.set_source_surface(surface, x, y)
- def set_source_surface(self, surface, x = 0, y = 0):
- self._add_instruction(self._set_source_surface, surface, x, y)
-
- @staticmethod
- def _set_source_pixbuf(context, pixbuf, x, y):
- context.set_source_pixbuf(pixbuf, x, y)
- def set_source_pixbuf(self, pixbuf, x = 0, y = 0):
- self._add_instruction(self._set_source_pixbuf, pixbuf, x, y)
-
- @staticmethod
- def _save_context(context): context.save()
- def save_context(self):
- self._add_instruction(self._save_context)
-
- @staticmethod
- def _restore_context(context): context.restore()
- def restore_context(self):
- self._add_instruction(self._restore_context)
-
-
- @staticmethod
- def _clip(context): context.clip()
- def clip(self):
- self._add_instruction(self._clip)
-
- @staticmethod
- def _translate(context, x, y): context.translate(x, y)
- def translate(self, x, y):
- self._add_instruction(self._translate, x, y)
-
- @staticmethod
- def _rotate(context, radians): context.rotate(radians)
- def rotate(self, radians):
- self._add_instruction(self._rotate, radians)
-
- @staticmethod
- def _move_to(context, x, y): context.move_to(x, y)
- def move_to(self, x, y):
- self._add_instruction(self._move_to, x, y)
-
- @staticmethod
- def _line_to(context, x, y): context.line_to(x, y)
- def line_to(self, x, y = None):
- if y is not None:
- self._add_instruction(self._line_to, x, y)
- elif isinstance(x, list) and y is None:
- for x2, y2 in x:
- self._add_instruction(self._line_to, x2, y2)
-
-
- @staticmethod
- def _rel_line_to(context, x, y): context.rel_line_to(x, y)
- def rel_line_to(self, x, y = None):
- if x is not None and y is not None:
- self._add_instruction(self._rel_line_to, x, y)
- elif isinstance(x, list) and y is None:
- for x2, y2 in x:
- self._add_instruction(self._rel_line_to, x2, y2)
-
-
- @staticmethod
- def _curve_to(context, x, y, x2, y2, x3, y3):
- context.curve_to(x, y, x2, y2, x3, y3)
- def curve_to(self, x, y, x2, y2, x3, y3):
- """draw a curve. (x2, y2) is the middle point of the curve"""
- self._add_instruction(self._curve_to, x, y, x2, y2, x3, y3)
-
- @staticmethod
- def _close_path(context): context.close_path()
- def close_path(self):
- self._add_instruction(self._close_path,)
-
- @staticmethod
- def _set_line_width(context, width):
- context.set_line_width(width)
- @staticmethod
- def _set_dash(context, dash, dash_offset = 0):
- context.set_dash(dash, dash_offset)
-
- def set_line_style(self, width = None, dash = None, dash_offset = 0):
- """change width and dash of a line"""
- if width is not None:
- self._add_instruction(self._set_line_width, width)
-
- if dash is not None:
- self._add_instruction(self._set_dash, dash, dash_offset)
-
- def _set_color(self, context, r, g, b, a):
- if a < 1:
- context.set_source_rgba(r, g, b, a)
- else:
- context.set_source_rgb(r, g, b)
-
- def set_color(self, color, alpha = 1):
- """set active color. You can use hex colors like "#aaa", or you can use
- normalized RGB tripplets (where every value is in range 0..1), or
- you can do the same thing in range 0..65535.
- also consider skipping this operation and specify the color on stroke and
- fill.
- """
- color = self.colors.parse(color) # parse whatever we have there into a normalized triplet
- if len(color) == 4 and alpha is None:
- alpha = color[3]
- r, g, b = color[:3]
- self._add_instruction(self._set_color, r, g, b, alpha)
-
- @staticmethod
- def _arc(context, x, y, radius, start_angle, end_angle):
- context.arc(x, y, radius, start_angle, end_angle)
- def arc(self, x, y, radius, start_angle, end_angle):
- """draw arc going counter-clockwise from start_angle to end_angle"""
- self._add_instruction(self._arc, x, y, radius, start_angle, end_angle)
-
- def circle(self, x, y, radius):
- """draw circle"""
- self._add_instruction(self._arc, x, y, radius, 0, math.pi * 2)
-
- def ellipse(self, x, y, width, height, edges = None):
- """draw 'perfect' ellipse, opposed to squashed circle. works also for
- equilateral polygons"""
- # the automatic edge case is somewhat arbitrary
- steps = edges or max((32, width, height)) / 2
-
- angle = 0
- step = math.pi * 2 / steps
- points = []
- while angle < math.pi * 2:
- points.append((width / 2.0 * math.cos(angle),
- height / 2.0 * math.sin(angle)))
- angle += step
-
- min_x = min((point[0] for point in points))
- min_y = min((point[1] for point in points))
-
- self.move_to(points[0][0] - min_x + x, points[0][1] - min_y + y)
- for p_x, p_y in points:
- self.line_to(p_x - min_x + x, p_y - min_y + y)
- self.line_to(points[0][0] - min_x + x, points[0][1] - min_y + y)
-
-
- @staticmethod
- def _arc_negative(context, x, y, radius, start_angle, end_angle):
- context.arc_negative(x, y, radius, start_angle, end_angle)
- def arc_negative(self, x, y, radius, start_angle, end_angle):
- """draw arc going clockwise from start_angle to end_angle"""
- self._add_instruction(self._arc_negative, x, y, radius, start_angle, end_angle)
-
- @staticmethod
- def _rounded_rectangle(context, x, y, x2, y2, corner_radius):
- half_corner = corner_radius / 2
-
- context.move_to(x + corner_radius, y)
- context.line_to(x2 - corner_radius, y)
- context.curve_to(x2 - half_corner, y, x2, y + half_corner, x2, y + corner_radius)
- context.line_to(x2, y2 - corner_radius)
- context.curve_to(x2, y2 - half_corner, x2 - half_corner, y2, x2 - corner_radius, y2)
- context.line_to(x + corner_radius, y2)
- context.curve_to(x + half_corner, y2, x, y2 - half_corner, x, y2 - corner_radius)
- context.line_to(x, y + corner_radius)
- context.curve_to(x, y + half_corner, x + half_corner, y, x + corner_radius, y)
-
- @staticmethod
- def _rectangle(context, x, y, w, h): context.rectangle(x, y, w, h)
- def rectangle(self, x, y, width, height, corner_radius = 0):
- "draw a rectangle. if corner_radius is specified, will draw rounded corners"
- if corner_radius <= 0:
- self._add_instruction(self._rectangle, x, y, width, height)
- return
-
- # make sure that w + h are larger than 2 * corner_radius
- corner_radius = min(corner_radius, min(width, height) / 2)
- x2, y2 = x + width, y + height
- self._add_instruction(self._rounded_rectangle, x, y, x2, y2, corner_radius)
-
- def fill_area(self, x, y, width, height, color, opacity = 1):
- """fill rectangular area with specified color"""
- self.rectangle(x, y, width, height)
- self.fill(color, opacity)
-
-
- def fill_stroke(self, fill = None, stroke = None, line_width = None):
- """fill and stroke the drawn area in one go"""
- if line_width: self.set_line_style(line_width)
-
- if fill and stroke:
- self.fill_preserve(fill)
- elif fill:
- self.fill(fill)
-
- if stroke:
- self.stroke(stroke)
-
-
- @staticmethod
- def _show_layout(context, layout, text, font_desc, alignment, width, wrap, ellipsize):
- layout.set_font_description(font_desc)
- layout.set_markup(text)
- layout.set_width(int(width or -1))
- layout.set_alignment(alignment)
-
- if width > 0:
- if wrap is not None:
- layout.set_wrap(wrap)
- else:
- layout.set_ellipsize(ellipsize or pango.ELLIPSIZE_END)
-
- context.show_layout(layout)
-
- def create_layout(self, size = None):
- """utility function to create layout with the default font. Size and
- alignment parameters are shortcuts to according functions of the
- pango.Layout"""
- if not self.context:
- # TODO - this is rather sloppy as far as exception goes
- # should explain better
- raise "Can not create layout without existing context!"
-
- layout = self.context.create_layout()
- font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
- if size: font_desc.set_size(size * pango.SCALE)
-
- layout.set_font_description(font_desc)
- return layout
-
-
- def show_label(self, text, size = None, color = None):
- """display text with system's default font"""
- font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
- if color: self.set_color(color)
- if size: font_desc.set_size(size * pango.SCALE)
- self.show_layout(text, font_desc)
-
-
- @staticmethod
- def _show_text(context, text): context.show_text(text)
- def show_text(self, text):
- self._add_instruction(self._show_text, text)
-
- @staticmethod
- def _text_path(context, text): context.text_path(text)
- def text_path(self, text):
- """this function is most likely to change"""
- self._add_instruction(self._text_path, text)
-
- def show_layout(self, text, font_desc, alignment = pango.ALIGN_LEFT, width = -1, wrap = None, ellipsize = None):
- """display text. font_desc is string of pango font description
- often handier than calling this function directly, is to create
- a class:Label object
- """
- layout = self._cache_layout = self._cache_layout or gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0))).create_layout()
- self._add_instruction(self._show_layout, layout, text, font_desc, alignment, width, wrap, ellipsize)
-
- def _add_instruction(self, function, *params):
- if self.context:
- function(self.context, *params)
- else:
- self.paths = None
- self.__new_instructions.append((function, params))
-
-
- def _draw(self, context, opacity):
- """draw accumulated instructions in context"""
-
- # if we have been moved around, we should update bounds
- fresh_draw = self.__new_instructions and len(self.__new_instructions) > 0
- if fresh_draw: #new stuff!
- self.paths = []
- self.__instruction_cache = self.__new_instructions
- self.__new_instructions = []
- else:
- if not self.__instruction_cache:
- return
-
- for instruction, args in self.__instruction_cache:
- if fresh_draw and instruction in (self._new_path, self._stroke, self._fill, self._clip):
- self.paths.append(context.copy_path())
-
- if opacity < 1 and instruction == self._set_color:
- self._set_color(context, args[0], args[1], args[2], args[3] * opacity)
- elif opacity < 1 and instruction == self._paint:
- context.paint_with_alpha(opacity)
- else:
- instruction(context, *args)
-
-
-
- def _draw_as_bitmap(self, context, opacity):
- """
- instead of caching paths, this function caches the whole drawn thing
- use cache_as_bitmap on sprite to enable this mode
- """
- matrix = context.get_matrix()
- matrix_changed = matrix != self._last_matrix
- new_instructions = len(self.__new_instructions) > 0
-
- if new_instructions or matrix_changed:
- if new_instructions:
- self.__instruction_cache = list(self.__new_instructions)
- self.__new_instructions = deque()
-
- self.paths = deque()
- self.extents = None
-
- if not self.__instruction_cache:
- # no instructions - nothing to do
- return
-
- # instructions that end path
- path_end_instructions = (self._new_path, self._clip, self._stroke, self._fill, self._stroke_preserve, self._fill_preserve)
-
- # measure the path extents so we know the size of cache surface
- # also to save some time use the context to paint for the first time
- extents = gtk.gdk.Rectangle()
- for instruction, args in self.__instruction_cache:
- if instruction in path_end_instructions:
- self.paths.append(context.copy_path())
- exts = context.path_extents()
- exts = gtk.gdk.Rectangle(int(exts[0]), int(exts[1]),
- int(exts[2]-exts[0]), int(exts[3]-exts[1]))
- if extents.width and extents.height:
- extents = extents.union(exts)
- else:
- extents = exts
-
-
- if instruction in (self._set_source_pixbuf, self._set_source_surface):
- # draw a rectangle around the pathless instructions so that the extents are correct
- pixbuf = args[0]
- x = args[1] if len(args) > 1 else 0
- y = args[2] if len(args) > 2 else 0
- self._rectangle(context, x, y, pixbuf.get_width(), pixbuf.get_height())
- self._clip()
-
- if instruction == self._paint and opacity < 1:
- context.paint_with_alpha(opacity)
- elif instruction == self._set_color and opacity < 1:
- self._set_color(context, args[0], args[1], args[2], args[3] * opacity)
- else:
- instruction(context, *args)
-
-
- # avoid re-caching if we have just moved
- just_transforms = new_instructions == False and \
- matrix and self._last_matrix \
- and all([matrix[i] == self._last_matrix[i] for i in range(4)])
-
- # TODO - this does not look awfully safe
- extents.x += matrix[4]
- extents.y += matrix[5]
- self.extents = extents
-
- if not just_transforms:
- # now draw the instructions on the caching surface
- w = int(extents.width) + 1
- h = int(extents.height) + 1
- self.cache_surface = context.get_target().create_similar(cairo.CONTENT_COLOR_ALPHA, w, h)
- ctx = gtk.gdk.CairoContext(cairo.Context(self.cache_surface))
- ctx.translate(-extents.x, -extents.y)
-
- ctx.transform(matrix)
- for instruction, args in self.__instruction_cache:
- instruction(ctx, *args)
-
- self._last_matrix = matrix
- else:
- context.save()
- context.identity_matrix()
- context.translate(self.extents.x, self.extents.y)
- context.set_source_surface(self.cache_surface)
- if opacity < 1:
- context.paint_with_alpha(opacity)
- else:
- context.paint()
- context.restore()
-
-
-
-
-
-class Sprite(gtk.Object):
- """The Sprite class is a basic display list building block: a display list
- node that can display graphics and can also contain children.
- Once you have created the sprite, use Scene's add_child to add it to
- scene
- """
-
- __gsignals__ = {
- "on-mouse-over": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
- "on-mouse-out": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
- "on-mouse-down": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- "on-mouse-up": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- "on-click": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- "on-drag-start": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- "on-drag": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- "on-drag-finish": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- "on-render": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ())
- }
-
- transformation_attrs = set(('x', 'y', 'rotation', 'scale_x', 'scale_y', 'pivot_x', 'pivot_y'))
-
- visibility_attrs = set(('opacity', 'visible', 'z_order'))
-
- cache_attrs = set(('_stroke_context', '_matrix', '_prev_parent_matrix', '_extents', '_scene'))
-
- graphics_unrelated_attrs = set(('drag_x', 'drag_y', 'sprites', 'mouse_cursor', '_sprite_dirty'))
-
-
-
- def __init__(self, x = 0, y = 0,
- opacity = 1, visible = True,
- rotation = 0, pivot_x = 0, pivot_y = 0,
- scale_x = 1, scale_y = 1,
- interactive = False, draggable = False,
- z_order = 0, mouse_cursor = None,
- cache_as_bitmap = False, snap_to_pixel = True):
- gtk.Object.__init__(self)
-
- self._scene = None
-
- #: list of children sprites. Use :func:`add_child` to add sprites
- self.sprites = []
-
- #: instance of :ref:`graphics` for this sprite
- self.graphics = Graphics()
-
- #: boolean denoting whether the sprite responds to mouse events
- self.interactive = interactive
-
- #: boolean marking if sprite can be automatically dragged
- self.draggable = draggable
-
- #: relative x coordinate of the sprites' rotation point
- self.pivot_x = pivot_x
-
- #: relative y coordinates of the sprites' rotation point
- self.pivot_y = pivot_y
-
- #: sprite opacity
- self.opacity = opacity
-
- #: boolean visibility flag
- self.visible = visible
-
- #: pointer to parent :class:`Sprite` or :class:`Scene`
- self.parent = None
-
- #: sprite coordinates
- self.x, self.y = x, y
-
- #: rotation of the sprite in radians (use :func:`math.degrees` to convert to degrees if necessary)
- self.rotation = rotation
-
- #: scale X
- self.scale_x = scale_x
-
- #: scale Y
- self.scale_y = scale_y
-
- #: drawing order between siblings. The one with the highest z_order will be on top.
- self.z_order = z_order
-
- #: mouse-over cursor of the sprite. See :meth:`Scene.mouse_cursor`
- #: for possible values
- self.mouse_cursor = mouse_cursor
-
- #: x position of the cursor within mouse upon drag. change this value
- #: in on-drag-start to adjust drag point
- self.drag_x = 0
-
- #: y position of the cursor within mouse upon drag. change this value
- #: in on-drag-start to adjust drag point
- self.drag_y = 0
-
- #: Whether the sprite should be cached as a bitmap. Default: true
- #: Generally good when you have many static sprites
- self.cache_as_bitmap = cache_as_bitmap
-
- #: Should the sprite coordinates always rounded to full pixel. Default: true
- #: Mostly this is good for performance but in some cases that can lead
- #: to rounding errors in positioning.
- self.snap_to_pixel = snap_to_pixel
-
- self.__dict__["_sprite_dirty"] = True # flag that indicates that the graphics object of the sprite should be rendered
- self.__dict__["_sprite_moved"] = True # flag that indicates that the graphics object of the sprite should be rendered
-
- self._matrix = None
- self._prev_parent_matrix = None
-
- self._extents = None
- self._prev_extents = None
- self._stroke_context = None
-
-
- def __setattr__(self, name, val):
- if self.__dict__.get(name, "hamster_graphics_no_value_really") == val:
- return
- self.__dict__[name] = val
-
- # prev parent matrix walks downwards
- if name == '_prev_parent_matrix' and self.visible:
- self._extents = None
-
- # downwards recursive invalidation of parent matrix
- for sprite in self.sprites:
- sprite._prev_parent_matrix = None
-
-
- if name in self.cache_attrs or name in self.graphics_unrelated_attrs:
- return
-
-
- """all the other changes influence cache vars"""
-
- # either transforms or path operations - extents have to be recalculated
- self._extents = None
-
- if name == 'visible' and self.visible == False:
- # when transforms happen while sprite is invisible
- for sprite in self.sprites:
- sprite._prev_parent_matrix = None
-
-
- # on moves invalidate our matrix, child extent cache (as that depends on our transforms)
- # as well as our parent's child extents as we moved
- # then go into children and invalidate the parent matrix down the tree
- if name in self.transformation_attrs:
- self._matrix = None
- for sprite in self.sprites:
- sprite._prev_parent_matrix = None
-
- # if attribute is not in transformation nor visibility, we conclude
- # that it must be causing the sprite needs re-rendering
- if name not in self.transformation_attrs and name not in self.visibility_attrs:
- self.__dict__["_sprite_dirty"] = True
-
- # on parent change invalidate the matrix
- if name == 'parent':
- self._prev_parent_matrix = None
- return
-
- if name == 'opacity' and self.__dict__.get("cache_as_bitmap") and hasattr(self, "graphics"):
- # invalidating cache for the bitmap version as that paints opacity in the image
- self.graphics._last_matrix = None
-
- if name == 'z_order' and self.__dict__.get('parent'):
- self.parent._sort()
-
-
- self.redraw()
-
-
- def _sort(self):
- """sort sprites by z_order"""
- self.sprites = sorted(self.sprites, key=lambda sprite:sprite.z_order)
-
- def add_child(self, *sprites):
- """Add child sprite. Child will be nested within parent"""
- for sprite in sprites:
- if sprite == self:
- raise Exception("trying to add sprite to itself")
- if sprite.parent:
- sprite.x, sprite.y = self.from_scene_coords(*sprite.to_scene_coords())
- sprite.parent.remove_child(sprite)
-
- self.sprites.append(sprite)
- sprite.parent = self
- self._sort()
-
-
- def remove_child(self, *sprites):
- for sprite in sprites:
- self.sprites.remove(sprite)
- sprite._scene = None
- sprite.parent = None
-
- def bring_to_front(self):
- """adjusts sprite's z-order so that the sprite is on top of it's
- siblings"""
- if not self.parent:
- return
- self.z_order = self.parent.sprites[-1].z_order + 1
-
- def send_to_back(self):
- """adjusts sprite's z-order so that the sprite is behind it's
- siblings"""
- if not self.parent:
- return
- self.z_order = self.parent.sprites[0].z_order - 1
-
-
- def get_extents(self):
- """measure the extents of the sprite's graphics. if context is provided
- will use that to draw the paths"""
- if self._extents:
- return self._extents
-
-
- if self._sprite_dirty:
- # redrawing merely because we need fresh extents of the sprite
- context = gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0)))
- context.transform(self.get_matrix())
- self.emit("on-render")
- self.__dict__["_sprite_dirty"] = False
- self.graphics._draw(context, 1)
-
-
- if not self.graphics.paths:
- self.graphics._draw(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0)), 1)
-
- if not self.graphics.paths:
- return None
-
- context = gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0)))
- context.transform(self.get_matrix())
-
- for path in self.graphics.paths:
- context.append_path(path)
- context.identity_matrix()
-
- ext = context.path_extents()
- ext = gtk.gdk.Rectangle(int(ext[0]), int(ext[1]),
- int(ext[2] - ext[0]), int(ext[3] - ext[1]))
-
- if not ext.width and not ext.height:
- ext = None
-
- self.__dict__['_extents'] = ext
- self.__dict__['_stroke_context'] = context
-
- return ext
-
-
- def check_hit(self, x, y):
- """check if the given coordinates are inside the sprite's fill or stroke
- path"""
-
- extents = self.get_extents()
-
- if not extents:
- return False
-
- if extents.x <= x <= extents.x + extents.width and extents.y <= y <= extents.y + extents.height:
- return self._stroke_context is None or self._stroke_context.in_fill(x, y)
- else:
- return False
-
- def get_scene(self):
- """returns class:`Scene` the sprite belongs to"""
- if not self._scene:
- if hasattr(self, 'parent') and self.parent:
- if isinstance(self.parent, Sprite) == False:
- scene = self.parent
- else:
- scene = self.parent.get_scene()
-
- self._scene = scene
-
- return self._scene
-
- def redraw(self):
- """queue redraw of the sprite. this function is called automatically
- whenever a sprite attribute changes. sprite changes that happen
- during scene redraw are ignored in order to avoid echoes.
- Call scene.redraw() explicitly if you need to redraw in these cases.
- """
- scene = self.get_scene()
- if scene and scene._redraw_in_progress == False and self.parent:
- self.parent.redraw()
-
- def animate(self, duration = None, easing = None, on_complete = None, on_update = None, **kwargs):
- """Request paretn Scene to Interpolate attributes using the internal tweener.
- Specify sprite's attributes that need changing.
- `duration` defaults to 0.4 seconds and `easing` to cubic in-out
- (for others see pytweener.Easing class).
-
- Example::
- # tween some_sprite to coordinates (50,100) using default duration and easing
- self.animate(x = 50, y = 100)
- """
- scene = self.get_scene()
- if scene:
- scene.animate(self, duration, easing, on_complete, on_update, **kwargs)
- else:
- for key, val in kwargs.items():
- setattr(self, key, val)
-
- def get_local_matrix(self):
- if not self._matrix:
- self._matrix = cairo.Matrix()
-
- if self.snap_to_pixel:
- self._matrix.translate(int(self.x) + int(self.pivot_x), int(self.y) + int(self.pivot_y))
- else:
- self._matrix.translate(self.x + self.pivot_x, self.y + self.pivot_y)
-
- if self.rotation:
- self._matrix.rotate(self.rotation)
-
-
- if self.snap_to_pixel:
- self._matrix.translate(int(-self.pivot_x), int(-self.pivot_y))
- else:
- self._matrix.translate(-self.pivot_x, -self.pivot_y)
-
-
- if self.scale_x != 1 or self.scale_y != 1:
- self._matrix.scale(self.scale_x, self.scale_y)
-
- return cairo.Matrix() * self._matrix
-
-
- def get_matrix(self):
- """return sprite's current transformation matrix"""
- if self.parent:
- return cairo_matrix_multiply(self.get_local_matrix(),
- (self._prev_parent_matrix or self.parent.get_matrix()))
- else:
- return self.get_local_matrix()
-
-
- def from_scene_coords(self, x=0, y=0):
- """Converts x, y given in the scene coordinates to sprite's local ones
- coordinates"""
- matrix = self.get_matrix()
- matrix.invert()
- return matrix.transform_point(x, y)
-
- def to_scene_coords(self, x=0, y=0):
- """Converts x, y from sprite's local coordinates to scene coordinates"""
- return self.get_matrix().transform_point(x, y)
-
- def _draw(self, context, opacity = 1, parent_matrix = None):
- if self.visible is False:
- return
-
- if (self._sprite_dirty): # send signal to redo the drawing when sprite is dirty
- self.__dict__['_extents'] = None
- self.emit("on-render")
- self.__dict__["_sprite_dirty"] = False
-
-
- parent_matrix = parent_matrix or cairo.Matrix()
-
- # cache parent matrix
- self._prev_parent_matrix = parent_matrix
-
- matrix = self.get_local_matrix()
-
- context.save()
- context.transform(matrix)
-
-
- if self.cache_as_bitmap:
- self.graphics._draw_as_bitmap(context, self.opacity * opacity)
- else:
- self.graphics._draw(context, self.opacity * opacity)
-
- self.__dict__['_prev_extents'] = self._extents or self.get_extents()
-
- for sprite in self.sprites:
- sprite._draw(context, self.opacity * opacity, cairo_matrix_multiply(matrix, parent_matrix))
-
-
- context.restore()
- context.new_path() #forget about us
-
-
-class BitmapSprite(Sprite):
- """Caches given image data in a surface similar to targets, which ensures
- that drawing it will be quick and low on CPU.
- Image data can be either :class:`cairo.ImageSurface` or :class:`gtk.gdk.Pixbuf`
- """
- def __init__(self, image_data = None, cache_mode = None, **kwargs):
- Sprite.__init__(self, **kwargs)
-
- self.width, self.height = None, None
- self.cache_mode = cache_mode or cairo.CONTENT_COLOR_ALPHA
- #: image data
- self.image_data = image_data
-
- self._surface = None
-
- self.cache_attrs = self.cache_attrs ^ set(('_surface',))
-
- def __setattr__(self, name, val):
- Sprite.__setattr__(self, name, val)
- if name == 'image_data':
- self.__dict__['_surface'] = None
- if self.image_data:
- self.__dict__['width'] = self.image_data.get_width()
- self.__dict__['height'] = self.image_data.get_height()
-
- def _draw(self, context, opacity = 1, parent_matrix = None):
- if self.image_data is None or self.width is None or self.height is None:
- return
-
- if not self._surface:
- # caching image on surface similar to the target
- surface = context.get_target().create_similar(self.cache_mode,
- self.width,
- self.height)
-
-
- local_context = gtk.gdk.CairoContext(cairo.Context(surface))
- if isinstance(self.image_data, gtk.gdk.Pixbuf):
- local_context.set_source_pixbuf(self.image_data, 0, 0)
- else:
- local_context.set_source_surface(self.image_data)
- local_context.paint()
-
- # add instructions with the resulting surface
- self.graphics.clear()
- self.graphics.rectangle(0, 0, self.width, self.height)
- self.graphics.clip()
- self.graphics.set_source_surface(surface)
- self.graphics.paint()
- self._surface = surface
-
-
- Sprite._draw(self, context, opacity, parent_matrix)
-
-
-class Image(BitmapSprite):
- """Displays image by path. Currently supports only PNG images."""
- def __init__(self, path, **kwargs):
- BitmapSprite.__init__(self, **kwargs)
-
- #: path to the image
- self.path = path
-
- def __setattr__(self, name, val):
- BitmapSprite.__setattr__(self, name, val)
- if name == 'path': # load when the value is set to avoid penalty on render
- self.image_data = cairo.ImageSurface.create_from_png(self.path)
-
-
-
-class Icon(BitmapSprite):
- """Displays icon by name and size in the theme"""
- def __init__(self, name, size=24, **kwargs):
- BitmapSprite.__init__(self, **kwargs)
- self.theme = gtk.icon_theme_get_default()
-
- #: icon name from theme
- self.name = name
-
- #: icon size in pixels
- self.size = size
-
- def __setattr__(self, name, val):
- BitmapSprite.__setattr__(self, name, val)
- if name in ('name', 'size'): # no other reason to discard cache than just on path change
- if self.__dict__.get('name') and self.__dict__.get('size'):
- self.image_data = self.theme.load_icon(self.name, self.size, 0)
- else:
- self.image_data = None
-
-
-class Label(Sprite):
- __gsignals__ = {
- "on-change": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
- }
- def __init__(self, text = "", size = 10, color = None,
- alignment = pango.ALIGN_LEFT,
- max_width = None, wrap = None, ellipsize = None,
- **kwargs):
- Sprite.__init__(self, **kwargs)
- self.width, self.height = None, None
-
-
- self._test_context = gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A8, 0, 0)))
- self._test_layout = self._test_context.create_layout()
-
-
- #: pango.FontDescription, default is the system's font
- self.font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
- self.font_desc.set_size(size * pango.SCALE)
-
- #: color of label either as hex string or an (r,g,b) tuple
- self.color = color
-
- self._bounds_width = None
-
- #: wrapping method. Can be set to pango. [WRAP_WORD, WRAP_CHAR,
- #: WRAP_WORD_CHAR]
- self.wrap = wrap
-
- #: Ellipsize mode. Can be set to pango. [ELLIPSIZE_NONE,
- #: ELLIPSIZE_START, ELLIPSIZE_MIDDLE, ELLIPSIZE_END]
- self.ellipsize = ellipsize
-
- #: alignment. one of pango.[ALIGN_LEFT, ALIGN_RIGHT, ALIGN_CENTER]
- self.alignment = alignment
-
- #: font size
- self.size = size
-
- #: maximum width of the label in pixels. if specified, the label
- #: will be wrapped or ellipsized depending on the wrap and ellpisize settings
- self.max_width = max_width
-
- self.__surface = None
-
- #: label text
- self.text = text
-
- self._measures = {}
-
- self.connect("on-render", self.on_render)
-
- self.cache_attrs = self.cache_attrs ^ set(("_letter_sizes", "__surface", "_ascent", "_bounds_width", "_measures"))
-
-
- def __setattr__(self, name, val):
- if self.__dict__.get(name, "hamster_graphics_no_value_really") != val:
- if name == "width" and val and self.__dict__.get('_bounds_width') and val * pango.SCALE == self.__dict__['_bounds_width']:
- return
-
- Sprite.__setattr__(self, name, val)
-
-
- if name == "width":
- # setting width means consumer wants to contrain the label
- if val is None or val == -1:
- self.__dict__['_bounds_width'] = None
- else:
- self.__dict__['_bounds_width'] = val * pango.SCALE
-
- if name in ("width", "text", "size", "font_desc", "wrap", "ellipsize", "max_width"):
- self._measures = {}
- # avoid chicken and egg
- if hasattr(self, "text") and hasattr(self, "size"):
- self.__dict__['width'], self.__dict__['height'] = self.measure(self.text)
-
- if name == 'text':
- self.emit('on-change')
-
-
- def measure(self, text):
- """measures given text with label's font and size.
- returns width, height and ascent. Ascent's null in case if the label
- does not have font face specified (and is thusly using pango)"""
-
- if text in self._measures:
- return self._measures[text]
-
- width, height = None, None
-
- context = self._test_context
-
- layout = self._test_layout
- layout.set_font_description(self.font_desc)
- layout.set_markup(text)
-
- max_width = 0
- if self.max_width:
- max_width = self.max_width * pango.SCALE
-
- layout.set_width(int(self._bounds_width or max_width or -1))
- layout.set_ellipsize(pango.ELLIPSIZE_NONE)
-
- if self.wrap is not None:
- layout.set_wrap(self.wrap)
- else:
- layout.set_ellipsize(self.ellipsize or pango.ELLIPSIZE_END)
-
- width, height = layout.get_pixel_size()
-
- self._measures[text] = width, height
- return self._measures[text]
-
-
- def on_render(self, sprite):
- if not self.text:
- self.graphics.clear()
- return
-
- self.graphics.set_color(self.color)
-
- rect_width = self.width
-
- max_width = 0
- if self.max_width:
- max_width = self.max_width * pango.SCALE
-
- # when max width is specified and we are told to align in center
- # do that (the pango instruction takes care of aligning within
- # the lines of the text)
- if self.alignment == pango.ALIGN_CENTER:
- self.graphics.move_to(-(self.max_width - self.width)/2, 0)
-
- bounds_width = max_width or self._bounds_width or -1
-
- self.graphics.show_layout(self.text, self.font_desc,
- self.alignment,
- bounds_width,
- self.wrap,
- self.ellipsize)
-
- if self._bounds_width:
- rect_width = self._bounds_width / pango.SCALE
-
- self.graphics.rectangle(0, 0, rect_width, self.height)
- self.graphics.clip()
-
-
-
-class Rectangle(Sprite):
- def __init__(self, w, h, corner_radius = 0, fill = None, stroke = None, line_width = 1, **kwargs):
- Sprite.__init__(self, **kwargs)
-
- #: width
- self.width = w
-
- #: height
- self.height = h
-
- #: fill color
- self.fill = fill
-
- #: stroke color
- self.stroke = stroke
-
- #: stroke line width
- self.line_width = line_width
-
- #: corner radius. Set bigger than 0 for rounded corners
- self.corner_radius = corner_radius
- self.connect("on-render", self.on_render)
-
- def on_render(self, sprite):
- self.graphics.set_line_style(width = self.line_width)
- self.graphics.rectangle(0, 0, self.width, self.height, self.corner_radius)
- self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
-
-
-class Polygon(Sprite):
- def __init__(self, points, fill = None, stroke = None, line_width = 1, **kwargs):
- Sprite.__init__(self, **kwargs)
-
- #: list of (x,y) tuples that the line should go through. Polygon
- #: will automatically close path.
- self.points = points
-
- #: fill color
- self.fill = fill
-
- #: stroke color
- self.stroke = stroke
-
- #: stroke line width
- self.line_width = line_width
-
- self.connect("on-render", self.on_render)
-
- def on_render(self, sprite):
- if not self.points: return
-
- self.graphics.move_to(*self.points[0])
- self.graphics.line_to(self.points)
- self.graphics.close_path()
-
- self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
-
-
-class Circle(Sprite):
- def __init__(self, width, height, fill = None, stroke = None, line_width = 1, **kwargs):
- Sprite.__init__(self, **kwargs)
-
- #: circle width
- self.width = width
-
- #: circle height
- self.height = height
-
- #: fill color
- self.fill = fill
-
- #: stroke color
- self.stroke = stroke
-
- #: stroke line width
- self.line_width = line_width
-
- self.connect("on-render", self.on_render)
-
- def on_render(self, sprite):
- if self.width == self.height:
- radius = self.width / 2.0
- self.graphics.circle(radius, radius, radius)
- else:
- self.graphics.ellipse(0, 0, self.width, self.height)
-
- self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
-
-
-class Scene(gtk.DrawingArea):
- """ Drawing area for displaying sprites.
- Add sprites to the Scene by calling :func:`add_child`.
- Scene is descendant of `gtk.DrawingArea <http://www.pygtk.org/docs/pygtk/class-gtkdrawingarea.html>`_
- and thus inherits all it's methods and everything.
- """
-
- __gsignals__ = {
- "expose-event": "override",
- "configure_event": "override",
- "on-enter-frame": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, )),
- "on-finish-frame": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, )),
-
- "on-click": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
- "on-drag": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
- "on-drag-start": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
- "on-drag-finish": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
-
- "on-mouse-move": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- "on-mouse-down": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- "on-mouse-up": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- "on-mouse-over": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- "on-mouse-out": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
-
- "on-scroll": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
- }
-
- def __init__(self, interactive = True, framerate = 60,
- background_color = None, scale = False, keep_aspect = True):
- gtk.DrawingArea.__init__(self)
- if interactive:
- self.set_events(gtk.gdk.POINTER_MOTION_MASK
- | gtk.gdk.LEAVE_NOTIFY_MASK | gtk.gdk.ENTER_NOTIFY_MASK
- | gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK
- | gtk.gdk.SCROLL_MASK
- | gtk.gdk.KEY_PRESS_MASK)
- self.connect("motion_notify_event", self.__on_mouse_move)
- self.connect("enter_notify_event", self.__on_mouse_enter)
- self.connect("leave_notify_event", self.__on_mouse_leave)
- self.connect("button_press_event", self.__on_button_press)
- self.connect("button_release_event", self.__on_button_release)
- self.connect("scroll-event", self.__on_scroll)
-
- #: list of sprites in scene. use :func:`add_child` to add sprites
- self.sprites = []
-
- #: framerate of animation. This will limit how often call for
- #: redraw will be performed (that is - not more often than the framerate). It will
- #: also influence the smoothness of tweeners.
- self.framerate = framerate
-
- #: Scene width. Will be `None` until first expose (that is until first
- #: on-enter-frame signal below).
- self.width = None
-
- #: Scene height. Will be `None` until first expose (that is until first
- #: on-enter-frame signal below).
- self.height = None
-
- #: instance of :class:`pytweener.Tweener` that is used by
- #: :func:`animate` function, but can be also accessed directly for advanced control.
- self.tweener = False
- if pytweener:
- self.tweener = pytweener.Tweener(0.4, pytweener.Easing.Cubic.ease_in_out)
-
- #: instance of :class:`Colors` class for color parsing
- self.colors = Colors
-
- #: read only info about current framerate (frames per second)
- self.fps = 0 # inner frames per second counter
-
- #: Last known x position of the mouse (set on expose event)
- self.mouse_x = None
-
- #: Last known y position of the mouse (set on expose event)
- self.mouse_y = None
-
- #: Background color of the scene. Use either a string with hex color or an RGB triplet.
- self.background_color = background_color
-
- #: Mouse cursor appearance.
- #: Replace with your own cursor or set to False to have no cursor.
- #: None will revert back the default behavior
- self.mouse_cursor = None
-
- blank_pixmap = gtk.gdk.Pixmap(None, 1, 1, 1)
- self._blank_cursor = gtk.gdk.Cursor(blank_pixmap, blank_pixmap, gtk.gdk.Color(), gtk.gdk.Color(), 0, 0)
-
-
- #: Miminum distance in pixels for a drag to occur
- self.drag_distance = 1
-
- self._last_frame_time = None
- self._mouse_sprite = None
- self._drag_sprite = None
- self._mouse_down_sprite = None
- self.__drag_started = False
- self.__drag_start_x, self.__drag_start_y = None, None
-
- self._mouse_in = False
- self.__last_cursor = None
-
- self.__drawing_queued = False
- self._redraw_in_progress = False
-
- #: When specified, upon window resize the content will be scaled
- #: relative to original window size. Defaults to False.
- self.scale = scale
-
- #: Should the stage maintain aspect ratio upon scale if
- #: :attr:`Scene.scale` is enabled. Defaults to true.
- self.keep_aspect = keep_aspect
-
- self._original_width, self._original_height = None, None
-
-
-
- def add_child(self, *sprites):
- """Add one or several :class:`Sprite` objects to the scene"""
- for sprite in sprites:
- if sprite == self:
- raise Exception("trying to add sprite to itself")
- if sprite.parent:
- sprite.x, sprite.y = sprite.to_scene_coords(0, 0)
- sprite.parent.remove_child(sprite)
- self.sprites.append(sprite)
- sprite.parent = self
- self._sort()
-
- def _sort(self):
- """sort sprites by z_order"""
- self.sprites = sorted(self.sprites, key=lambda sprite:sprite.z_order)
-
-
- def remove_child(self, *sprites):
- """Remove one or several :class:`Sprite` sprites from scene """
- for sprite in sprites:
- self.sprites.remove(sprite)
- sprite._scene = None
- sprite.parent = None
-
- # these two mimic sprite functions so parent check can be avoided
- def from_scene_coords(self, x, y): return x, y
- def to_scene_coords(self, x, y): return x, y
- def get_matrix(self): return cairo.Matrix()
-
- def clear(self):
- """Remove all sprites from scene"""
- self.remove_child(*self.sprites)
-
- def animate(self, sprite, duration = None, easing = None, on_complete = None, on_update = None, **kwargs):
- """Interpolate attributes of the given object using the internal tweener
- and redrawing scene after every tweener update.
- Specify the sprite and sprite's attributes that need changing.
- `duration` defaults to 0.4 seconds and `easing` to cubic in-out
- (for others see pytweener.Easing class).
-
- Redraw is requested right after creating the animation.
- Example::
-
- # tween some_sprite to coordinates (50,100) using default duration and easing
- scene.animate(some_sprite, x = 50, y = 100)
- """
- if not self.tweener: # here we complain
- raise Exception("pytweener was not found. Include it to enable animations")
-
- tween = self.tweener.add_tween(sprite,
- duration=duration,
- easing=easing,
- on_complete=on_complete,
- on_update=on_update,
- **kwargs)
- self.redraw()
- return tween
-
-
- def redraw(self):
- """Queue redraw. The redraw will be performed not more often than
- the `framerate` allows"""
- if self.__drawing_queued == False: #if we are moving, then there is a timeout somewhere already
- self.__drawing_queued = True
- self._last_frame_time = dt.datetime.now()
- gobject.timeout_add(1000 / self.framerate, self.__redraw_loop)
-
- def __redraw_loop(self):
- """loop until there is nothing more to tween"""
- self.queue_draw() # this will trigger do_expose_event when the current events have been flushed
-
- self.__drawing_queued = self.tweener and self.tweener.has_tweens()
- return self.__drawing_queued
-
-
- def do_expose_event(self, event):
- context = self.window.cairo_create()
-
- # clip to the visible part
- context.rectangle(event.area.x, event.area.y,
- event.area.width, event.area.height)
- if self.background_color:
- color = self.colors.parse(self.background_color)
- context.set_source_rgb(*color)
- context.fill_preserve()
- context.clip()
-
- if self.scale:
- aspect_x = self.width / self._original_width
- aspect_y = self.height / self._original_height
- if self.keep_aspect:
- aspect_x = aspect_y = min(aspect_x, aspect_y)
- context.scale(aspect_x, aspect_y)
-
- self.mouse_x, self.mouse_y, mods = self.get_window().get_pointer()
-
- self._redraw_in_progress = True
-
- # update tweens
- now = dt.datetime.now()
- delta = (now - (self._last_frame_time or dt.datetime.now())).microseconds / 1000000.0
- self._last_frame_time = now
- if self.tweener:
- self.tweener.update(delta)
-
- self.fps = 1 / delta
-
-
- # start drawing
- self.emit("on-enter-frame", context)
- for sprite in self.sprites:
- sprite._draw(context)
-
- self.__check_mouse(self.mouse_x, self.mouse_y)
- self.emit("on-finish-frame", context)
- self._redraw_in_progress = False
-
-
- def do_configure_event(self, event):
- if self._original_width is None:
- self._original_width = float(event.width)
- self._original_height = float(event.height)
-
- self.width, self.height = event.width, event.height
-
-
- def all_visible_sprites(self):
- """Returns flat list of the sprite tree for simplified iteration"""
- def all_recursive(sprites):
- for sprite in sprites:
- if sprite.visible:
- yield sprite
- if sprite.sprites:
- for child in all_recursive(sprite.sprites):
- yield child
-
- return all_recursive(self.sprites)
-
-
- def get_sprite_at_position(self, x, y):
- """Returns the topmost visible interactive sprite for given coordinates"""
- over = None
-
- for sprite in self.all_visible_sprites():
- if (sprite.interactive or sprite.draggable) and sprite.check_hit(x, y):
- over = sprite
-
- return over
-
-
- def __check_mouse(self, x, y):
- if x is None or self._mouse_in == False:
- return
-
- cursor = gtk.gdk.ARROW # default
-
- if self.mouse_cursor is not None:
- cursor = self.mouse_cursor
-
- if self._drag_sprite:
- cursor = self._drag_sprite.mouse_cursor or self.mouse_cursor or gtk.gdk.FLEUR
- else:
- #check if we have a mouse over
- over = self.get_sprite_at_position(x, y)
- if self._mouse_sprite and self._mouse_sprite != over:
- self._mouse_sprite.emit("on-mouse-out")
- self.emit("on-mouse-out", self._mouse_sprite)
-
- if over:
- if over.mouse_cursor is not None:
- cursor = over.mouse_cursor
-
- elif self.mouse_cursor is None:
- # resort to defaults
- if over.draggable:
- cursor = gtk.gdk.FLEUR
- else:
- cursor = gtk.gdk.HAND2
-
- if over != self._mouse_sprite:
- over.emit("on-mouse-over")
- self.emit("on-mouse-over", over)
-
- self._mouse_sprite = over
-
- if cursor == False:
- cursor = self._blank_cursor
-
- if not self.__last_cursor or cursor != self.__last_cursor:
- if isinstance(cursor, gtk.gdk.Cursor):
- self.window.set_cursor(cursor)
- else:
- self.window.set_cursor(gtk.gdk.Cursor(cursor))
-
- self.__last_cursor = cursor
-
-
- """ mouse events """
- def __on_mouse_move(self, area, event):
- state = event.state
-
-
- if self._mouse_down_sprite and self._mouse_down_sprite.draggable \
- and gtk.gdk.BUTTON1_MASK & event.state:
- # dragging around
- if not self.__drag_started:
- drag_started = (self.__drag_start_x is not None and \
- (self.__drag_start_x - event.x) ** 2 + \
- (self.__drag_start_y - event.y) ** 2 > self.drag_distance ** 2)
-
- if drag_started:
- self._drag_sprite = self._mouse_down_sprite
-
- self._drag_sprite.drag_x, self._drag_sprite.drag_y = self._drag_sprite.x, self._drag_sprite.y
-
- self._drag_sprite.emit("on-drag-start", event)
- self.emit("on-drag-start", self._drag_sprite, event)
-
- self.__drag_started = True
-
- if self.__drag_started:
- diff_x, diff_y = event.x - self.__drag_start_x, event.y - self.__drag_start_y
- if isinstance(self._drag_sprite.parent, Sprite):
- matrix = self._drag_sprite.parent.get_matrix()
- matrix.invert()
- diff_x, diff_y = matrix.transform_distance(diff_x, diff_y)
-
- self._drag_sprite.x, self._drag_sprite.y = self._drag_sprite.drag_x + diff_x, self._drag_sprite.drag_y + diff_y
-
- self._drag_sprite.emit("on-drag", event)
- self.emit("on-drag", self._drag_sprite, event)
-
- else:
- # avoid double mouse checks - the redraw will also check for mouse!
- if not self.__drawing_queued:
- self.__check_mouse(event.x, event.y)
-
- self.emit("on-mouse-move", event)
-
- def __on_mouse_enter(self, area, event):
- self._mouse_in = True
-
- def __on_mouse_leave(self, area, event):
- self._mouse_in = False
- if self._mouse_sprite:
- self.emit("on-mouse-out", self._mouse_sprite)
- self._mouse_sprite = None
-
-
- def __on_button_press(self, area, event):
- target = self.get_sprite_at_position(event.x, event.y)
- self.__drag_start_x, self.__drag_start_y = event.x, event.y
-
- self._mouse_down_sprite = target
-
- if target:
- target.emit("on-mouse-down", event)
- self.emit("on-mouse-down", event)
-
- def __on_button_release(self, area, event):
- target = self.get_sprite_at_position(event.x, event.y)
-
- if target:
- target.emit("on-mouse-up", event)
- self.emit("on-mouse-up", event)
-
- # trying to not emit click and drag-finish at the same time
- click = not self.__drag_started or (event.x - self.__drag_start_x) ** 2 + \
- (event.y - self.__drag_start_y) ** 2 < self.drag_distance
- if (click and self.__drag_started == False) or not self._drag_sprite:
- if target:
- target.emit("on-click", event)
-
- self.emit("on-click", event, target)
-
- if self._drag_sprite:
- self._drag_sprite.emit("on-drag-finish", event)
- self.emit("on-drag-finish", self._drag_sprite, event)
-
- self._drag_sprite.drag_x, self._drag_sprite.drag_y = None, None
- self._drag_sprite = None
- self._mouse_down_sprite = None
-
- self.__drag_started = False
- self.__drag_start_x, self__drag_start_y = None, None
-
- def __on_scroll(self, area, event):
- self.emit("on-scroll", event)
diff --git a/jinja2/__init__.py b/lib/jinja2/__init__.py
index f944e11..f944e11 100644
--- a/jinja2/__init__.py
+++ b/lib/jinja2/__init__.py
diff --git a/jinja2/_debugsupport.c b/lib/jinja2/_debugsupport.c
index e756d8e..e756d8e 100644
--- a/jinja2/_debugsupport.c
+++ b/lib/jinja2/_debugsupport.c
diff --git a/jinja2/_markupsafe/__init__.py b/lib/jinja2/_markupsafe/__init__.py
index ec7bd57..ec7bd57 100644
--- a/jinja2/_markupsafe/__init__.py
+++ b/lib/jinja2/_markupsafe/__init__.py
diff --git a/jinja2/_markupsafe/_bundle.py b/lib/jinja2/_markupsafe/_bundle.py
index e694faf..e694faf 100644
--- a/jinja2/_markupsafe/_bundle.py
+++ b/lib/jinja2/_markupsafe/_bundle.py
diff --git a/jinja2/_markupsafe/_constants.py b/lib/jinja2/_markupsafe/_constants.py
index 919bf03..919bf03 100644
--- a/jinja2/_markupsafe/_constants.py
+++ b/lib/jinja2/_markupsafe/_constants.py
diff --git a/jinja2/_markupsafe/_native.py b/lib/jinja2/_markupsafe/_native.py
index 7b95828..7b95828 100644
--- a/jinja2/_markupsafe/_native.py
+++ b/lib/jinja2/_markupsafe/_native.py
diff --git a/jinja2/_markupsafe/tests.py b/lib/jinja2/_markupsafe/tests.py
index c1ce394..c1ce394 100644
--- a/jinja2/_markupsafe/tests.py
+++ b/lib/jinja2/_markupsafe/tests.py
diff --git a/jinja2/_stringdefs.py b/lib/jinja2/_stringdefs.py
index 1161b7f..1161b7f 100644
--- a/jinja2/_stringdefs.py
+++ b/lib/jinja2/_stringdefs.py
diff --git a/jinja2/bccache.py b/lib/jinja2/bccache.py
index 1e2236c..1e2236c 100644
--- a/jinja2/bccache.py
+++ b/lib/jinja2/bccache.py
diff --git a/jinja2/compiler.py b/lib/jinja2/compiler.py
index 5764159..5764159 100644
--- a/jinja2/compiler.py
+++ b/lib/jinja2/compiler.py
diff --git a/jinja2/constants.py b/lib/jinja2/constants.py
index cab203c..cab203c 100644
--- a/jinja2/constants.py
+++ b/lib/jinja2/constants.py
diff --git a/jinja2/debug.py b/lib/jinja2/debug.py
index eb15456..eb15456 100644
--- a/jinja2/debug.py
+++ b/lib/jinja2/debug.py
diff --git a/jinja2/defaults.py b/lib/jinja2/defaults.py
index d2d4544..d2d4544 100644
--- a/jinja2/defaults.py
+++ b/lib/jinja2/defaults.py
diff --git a/jinja2/environment.py b/lib/jinja2/environment.py
index ac74a5c..ac74a5c 100644
--- a/jinja2/environment.py
+++ b/lib/jinja2/environment.py
diff --git a/jinja2/exceptions.py b/lib/jinja2/exceptions.py
index 771f6a8..771f6a8 100644
--- a/jinja2/exceptions.py
+++ b/lib/jinja2/exceptions.py
diff --git a/jinja2/ext.py b/lib/jinja2/ext.py
index ceb3895..ceb3895 100644
--- a/jinja2/ext.py
+++ b/lib/jinja2/ext.py
diff --git a/jinja2/filters.py b/lib/jinja2/filters.py
index d1848e4..d1848e4 100644
--- a/jinja2/filters.py
+++ b/lib/jinja2/filters.py
diff --git a/jinja2/lexer.py b/lib/jinja2/lexer.py
index 0d3f696..0d3f696 100644
--- a/jinja2/lexer.py
+++ b/lib/jinja2/lexer.py
diff --git a/jinja2/loaders.py b/lib/jinja2/loaders.py
index bd435e8..bd435e8 100644
--- a/jinja2/loaders.py
+++ b/lib/jinja2/loaders.py
diff --git a/jinja2/meta.py b/lib/jinja2/meta.py
index 3a779a5..3a779a5 100644
--- a/jinja2/meta.py
+++ b/lib/jinja2/meta.py
diff --git a/jinja2/nodes.py b/lib/jinja2/nodes.py
index 6446c70..6446c70 100644
--- a/jinja2/nodes.py
+++ b/lib/jinja2/nodes.py
diff --git a/jinja2/optimizer.py b/lib/jinja2/optimizer.py
index 00eab11..00eab11 100644
--- a/jinja2/optimizer.py
+++ b/lib/jinja2/optimizer.py
diff --git a/jinja2/parser.py b/lib/jinja2/parser.py
index d44229a..d44229a 100644
--- a/jinja2/parser.py
+++ b/lib/jinja2/parser.py
diff --git a/jinja2/runtime.py b/lib/jinja2/runtime.py
index 6fea3aa..6fea3aa 100644
--- a/jinja2/runtime.py
+++ b/lib/jinja2/runtime.py
diff --git a/jinja2/sandbox.py b/lib/jinja2/sandbox.py
index 7497195..7497195 100644
--- a/jinja2/sandbox.py
+++ b/lib/jinja2/sandbox.py
diff --git a/jinja2/tests.py b/lib/jinja2/tests.py
index d257eca..d257eca 100644
--- a/jinja2/tests.py
+++ b/lib/jinja2/tests.py
diff --git a/jinja2/utils.py b/lib/jinja2/utils.py
index 7b77b8e..7b77b8e 100644
--- a/jinja2/utils.py
+++ b/lib/jinja2/utils.py
diff --git a/jinja2/visitor.py b/lib/jinja2/visitor.py
index 413e7c3..413e7c3 100644
--- a/jinja2/visitor.py
+++ b/lib/jinja2/visitor.py
diff --git a/lib/png.py b/lib/png.py
deleted file mode 100755
index 5519407..0000000
--- a/lib/png.py
+++ /dev/null
@@ -1,3785 +0,0 @@
-#!/usr/bin/env python
-
-# $URL: http://pypng.googlecode.com/svn/trunk/code/png.py $
-# $Rev: 228 $
-
-# png.py - PNG encoder/decoder in pure Python
-#
-# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
-# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
-# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
-#
-# Original concept by Johann C. Rocholl.
-#
-# LICENSE (The MIT License)
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation files
-# (the "Software"), to deal in the Software without restriction,
-# including without limitation the rights to use, copy, modify, merge,
-# publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-#
-# Changelog (recent first):
-# 2009-03-11 David: interlaced bit depth < 8 (writing).
-# 2009-03-10 David: interlaced bit depth < 8 (reading).
-# 2009-03-04 David: Flat and Boxed pixel formats.
-# 2009-02-26 David: Palette support (writing).
-# 2009-02-23 David: Bit-depths < 8; better PNM support.
-# 2006-06-17 Nicko: Reworked into a class, faster interlacing.
-# 2006-06-17 Johann: Very simple prototype PNG decoder.
-# 2006-06-17 Nicko: Test suite with various image generators.
-# 2006-06-17 Nicko: Alpha-channel, grey-scale, 16-bit/plane support.
-# 2006-06-15 Johann: Scanline iterator interface for large input files.
-# 2006-06-09 Johann: Very simple prototype PNG encoder.
-
-# Incorporated into Bangai-O Development Tools by drj on 2009-02-11 from
-# http://trac.browsershots.org/browser/trunk/pypng/lib/png.py?rev=2885
-
-# Incorporated into pypng by drj on 2009-03-12 from
-# //depot/prj/bangaio/master/code/png.py#67
-
-
-"""
-Pure Python PNG Reader/Writer
-
-This Python module implements support for PNG images (see PNG
-specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
-and writes PNG files with all allowable bit depths (1/2/4/8/16/24/32/48/64
-bits per pixel) and colour combinations: greyscale (1/2/4/8/16 bit); RGB,
-RGBA, LA (greyscale with alpha) with 8/16 bits per channel; colour mapped
-images (1/2/4/8 bit). Adam7 interlacing is supported for reading and
-writing. A number of optional chunks can be specified (when writing)
-and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
-
-For help, type ``import png; help(png)`` in your python interpreter.
-
-A good place to start is the :class:`Reader` and :class:`Writer` classes.
-
-Requires Python 2.3. Limited support is available for Python 2.2, but
-not everything works. Best with Python 2.4 and higher. Installation is
-trivial, but see the ``README.txt`` file (with the source distribution)
-for details.
-
-This file can also be used as a command-line utility to convert
-`Netpbm <http://netpbm.sourceforge.net/>`_ PNM files to PNG, and the reverse conversion from PNG to
-PNM. The interface is similar to that of the ``pnmtopng`` program from
-Netpbm. Type ``python png.py --help`` at the shell prompt
-for usage and a list of options.
-
-A note on spelling and terminology
-----------------------------------
-
-Generally British English spelling is used in the documentation. So
-that's "greyscale" and "colour". This not only matches the author's
-native language, it's also used by the PNG specification.
-
-The major colour models supported by PNG (and hence by PyPNG) are:
-greyscale, RGB, greyscale--alpha, RGB--alpha. These are sometimes
-referred to using the abbreviations: L, RGB, LA, RGBA. In this case
-each letter abbreviates a single channel: *L* is for Luminance or Luma or
-Lightness which is the channel used in greyscale images; *R*, *G*, *B* stand
-for Red, Green, Blue, the components of a colour image; *A* stands for
-Alpha, the opacity channel (used for transparency effects, but higher
-values are more opaque, so it makes sense to call it opacity).
-
-A note on formats
------------------
-
-When getting pixel data out of this module (reading) and presenting
-data to this module (writing) there are a number of ways the data could
-be represented as a Python value. Generally this module uses one of
-three formats called "flat row flat pixel", "boxed row flat pixel", and
-"boxed row boxed pixel". Basically the concern is whether each pixel
-and each row comes in its own little tuple (box), or not.
-
-Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
-has RGB components:
-
-Boxed row flat pixel::
-
- list([R,G,B, R,G,B, R,G,B],
- [R,G,B, R,G,B, R,G,B])
-
-Each row appears as its own list, but the pixels are flattened so that
-three values for one pixel simply follow the three values for the previous
-pixel. This is the most common format used, because it provides a good
-compromise between space and convenience. PyPNG regards itself as
-at liberty to replace any sequence type with any sufficiently compatible
-other sequence type; in practice each row is an array (from the array
-module), and the outer list is sometimes an iterator rather than an
-explicit list (so that streaming is possible).
-
-Flat row flat pixel::
-
- [R,G,B, R,G,B, R,G,B,
- R,G,B, R,G,B, R,G,B]
-
-The entire image is one single giant sequence of colour values.
-Generally an array will be used (to save space), not a list.
-
-Boxed row boxed pixel::
-
- list([ (R,G,B), (R,G,B), (R,G,B) ],
- [ (R,G,B), (R,G,B), (R,G,B) ])
-
-Each row appears in its own list, but each pixel also appears in its own
-tuple. A serious memory burn in Python.
-
-In all cases the top row comes first, and for each row the pixels are
-ordered from left-to-right. Within a pixel the values appear in the
-order, R-G-B-A (or L-A for greyscale--alpha).
-
-There is a fourth format, mentioned because it is used internally,
-is close to what lies inside a PNG file itself, and has some support
-from the public API. This format is called packed. When packed,
-each row is a sequence of bytes (integers from 0 to 255), just as
-it is before PNG scanline filtering is applied. When the bit depth
-is 8 this is essentially the same as boxed row flat pixel; when the
-bit depth is less than 8, several pixels are packed into each byte;
-when the bit depth is 16 (the only value more than 8 that is supported
-by the PNG image format) each pixel value is decomposed into 2 bytes
-(and `packed` is a misnomer). This format is used by the
-:meth:`Writer.write_packed` method. It isn't usually a convenient
-format, but may be just right if the source data for the PNG image
-comes from something that uses a similar format (for example, 1-bit
-BMPs, or another PNG file).
-
-And now, my famous members
---------------------------
-"""
-
-# http://www.python.org/doc/2.2.3/whatsnew/node5.html
-from __future__ import generators
-
-__version__ = "$URL: http://pypng.googlecode.com/svn/trunk/code/png.py $ $Rev: 228 $"
-
-from array import array
-try: # See :pyver:old
- import itertools
-except:
- pass
-import math
-# http://www.python.org/doc/2.4.4/lib/module-operator.html
-import operator
-import struct
-import sys
-import zlib
-# http://www.python.org/doc/2.4.4/lib/module-warnings.html
-import warnings
-
-
-__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
-
-
-# The PNG signature.
-# http://www.w3.org/TR/PNG/#5PNG-file-signature
-_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
-
-_adam7 = ((0, 0, 8, 8),
- (4, 0, 8, 8),
- (0, 4, 4, 8),
- (2, 0, 4, 4),
- (0, 2, 2, 4),
- (1, 0, 2, 2),
- (0, 1, 1, 2))
-
-def group(s, n):
- # See
- # http://www.python.org/doc/2.6/library/functions.html#zip
- return zip(*[iter(s)]*n)
-
-def isarray(x):
- """Same as ``isinstance(x, array)`` except on Python 2.2, where it
- always returns ``False``. This helps PyPNG work on Python 2.2.
- """
-
- try:
- return isinstance(x, array)
- except:
- return False
-
-try: # see :pyver:old
- array.tostring
-except:
- def tostring(row):
- l = len(row)
- return struct.pack('%dB' % l, *row)
-else:
- def tostring(row):
- """Convert row of bytes to string. Expects `row` to be an
- ``array``.
- """
- return row.tostring()
-
-# Conditionally convert to bytes. Works on Python 2 and Python 3.
-try:
- bytes('', 'ascii')
- def strtobytes(x): return bytes(x, 'iso8859-1')
- def bytestostr(x): return str(x, 'iso8859-1')
-except:
- strtobytes = str
- bytestostr = str
-
-def interleave_planes(ipixels, apixels, ipsize, apsize):
- """
- Interleave (colour) planes, e.g. RGB + A = RGBA.
-
- Return an array of pixels consisting of the `ipsize` elements of data
- from each pixel in `ipixels` followed by the `apsize` elements of data
- from each pixel in `apixels`. Conventionally `ipixels` and
- `apixels` are byte arrays so the sizes are bytes, but it actually
- works with any arrays of the same type. The returned array is the
- same type as the input arrays which should be the same type as each other.
- """
-
- itotal = len(ipixels)
- atotal = len(apixels)
- newtotal = itotal + atotal
- newpsize = ipsize + apsize
- # Set up the output buffer
- # See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
- out = array(ipixels.typecode)
- # It's annoying that there is no cheap way to set the array size :-(
- out.extend(ipixels)
- out.extend(apixels)
- # Interleave in the pixel data
- for i in range(ipsize):
- out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
- for i in range(apsize):
- out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
- return out
-
-def check_palette(palette):
- """Check a palette argument (to the :class:`Writer` class) for validity.
- Returns the palette as a list if okay; raises an exception otherwise.
- """
-
- # None is the default and is allowed.
- if palette is None:
- return None
-
- p = list(palette)
- if not (0 < len(p) <= 256):
- raise ValueError("a palette must have between 1 and 256 entries")
- seen_triple = False
- for i,t in enumerate(p):
- if len(t) not in (3,4):
- raise ValueError(
- "palette entry %d: entries must be 3- or 4-tuples." % i)
- if len(t) == 3:
- seen_triple = True
- if seen_triple and len(t) == 4:
- raise ValueError(
- "palette entry %d: all 4-tuples must precede all 3-tuples" % i)
- for x in t:
- if int(x) != x or not(0 <= x <= 255):
- raise ValueError(
- "palette entry %d: values must be integer: 0 <= x <= 255" % i)
- return p
-
-class Error(Exception):
- prefix = 'Error'
- def __str__(self):
- return self.prefix + ': ' + ' '.join(self.args)
-
-class FormatError(Error):
- """Problem with input file format. In other words, PNG file does
- not conform to the specification in some way and is invalid.
- """
-
- prefix = 'FormatError'
-
-class ChunkError(FormatError):
- prefix = 'ChunkError'
-
-
-class Writer:
- """
- PNG encoder in pure Python.
- """
-
- def __init__(self, width=None, height=None,
- size=None,
- greyscale=False,
- alpha=False,
- bitdepth=8,
- palette=None,
- transparent=None,
- background=None,
- gamma=None,
- compression=None,
- interlace=False,
- bytes_per_sample=None, # deprecated
- planes=None,
- colormap=None,
- maxval=None,
- chunk_limit=2**20):
- """
- Create a PNG encoder object.
-
- Arguments:
-
- width, height
- Image size in pixels, as two separate arguments.
- size
- Image size (w,h) in pixels, as single argument.
- greyscale
- Input data is greyscale, not RGB.
- alpha
- Input data has alpha channel (RGBA or LA).
- bitdepth
- Bit depth: from 1 to 16.
- palette
- Create a palette for a colour mapped image (colour type 3).
- transparent
- Specify a transparent colour (create a ``tRNS`` chunk).
- background
- Specify a default background colour (create a ``bKGD`` chunk).
- gamma
- Specify a gamma value (create a ``gAMA`` chunk).
- compression
- zlib compression level (1-9).
- interlace
- Create an interlaced image.
- chunk_limit
- Write multiple ``IDAT`` chunks to save memory.
-
- The image size (in pixels) can be specified either by using the
- `width` and `height` arguments, or with the single `size`
- argument. If `size` is used it should be a pair (*width*,
- *height*).
-
- `greyscale` and `alpha` are booleans that specify whether
- an image is greyscale (or colour), and whether it has an
- alpha channel (or not).
-
- `bitdepth` specifies the bit depth of the source pixel values.
- Each source pixel value must be an integer between 0 and
- ``2**bitdepth-1``. For example, 8-bit images have values
- between 0 and 255. PNG only stores images with bit depths of
- 1,2,4,8, or 16. When `bitdepth` is not one of these values,
- the next highest valid bit depth is selected, and an ``sBIT``
- (significant bits) chunk is generated that specifies the original
- precision of the source image. In this case the supplied pixel
- values will be rescaled to fit the range of the selected bit depth.
-
- The details of which bit depth / colour model combinations the
- PNG file format supports directly, are somewhat arcane
- (refer to the PNG specification for full details). Briefly:
- "small" bit depths (1,2,4) are only allowed with greyscale and
- colour mapped images; colour mapped images cannot have bit depth
- 16.
-
- For colour mapped images (in other words, when the `palette`
- argument is specified) the `bitdepth` argument must match one of
- the valid PNG bit depths: 1, 2, 4, or 8. (It is valid to have a
- PNG image with a palette and an ``sBIT`` chunk, but the meaning
- is slightly different; it would be awkward to press the
- `bitdepth` argument into service for this.)
-
- The `palette` option, when specified, causes a colour mapped image
- to be created: the PNG colour type is set to 3; greyscale
- must not be set; alpha must not be set; transparent must
- not be set; the bit depth must be 1,2,4, or 8. When a colour
- mapped image is created, the pixel values are palette indexes
- and the `bitdepth` argument specifies the size of these indexes
- (not the size of the colour values in the palette).
-
- The palette argument value should be a sequence of 3- or
- 4-tuples. 3-tuples specify RGB palette entries; 4-tuples
- specify RGBA palette entries. If both 4-tuples and 3-tuples
- appear in the sequence then all the 4-tuples must come
- before all the 3-tuples. A ``PLTE`` chunk is created; if there
- are 4-tuples then a ``tRNS`` chunk is created as well. The
- ``PLTE`` chunk will contain all the RGB triples in the same
- sequence; the ``tRNS`` chunk will contain the alpha channel for
- all the 4-tuples, in the same sequence. Palette entries
- are always 8-bit.
-
- If specified, the `transparent` and `background` parameters must
- be a tuple with three integer values for red, green, blue, or
- a simple integer (or singleton tuple) for a greyscale image.
-
- If specified, the `gamma` parameter must be a positive number
- (generally, a float). A ``gAMA`` chunk will be created. Note that
- this will not change the values of the pixels as they appear in
- the PNG file, they are assumed to have already been converted
- appropriately for the gamma specified.
-
- The `compression` argument specifies the compression level
- to be used by the ``zlib`` module. Higher values are likely
- to compress better, but will be slower to compress. The
- default for this argument is ``None``; this does not mean
- no compression, rather it means that the default from the
- ``zlib`` module is used (which is generally acceptable).
-
- If `interlace` is true then an interlaced image is created
- (using PNG's so far only interace method, *Adam7*). This does not
- affect how the pixels should be presented to the encoder, rather
- it changes how they are arranged into the PNG file. On slow
- connexions interlaced images can be partially decoded by the
- browser to give a rough view of the image that is successively
- refined as more image data appears.
-
- .. note ::
-
- Enabling the `interlace` option requires the entire image
- to be processed in working memory.
-
- `chunk_limit` is used to limit the amount of memory used whilst
- compressing the image. In order to avoid using large amounts of
- memory, multiple ``IDAT`` chunks may be created.
- """
-
- # At the moment the `planes` argument is ignored;
- # its purpose is to act as a dummy so that
- # ``Writer(x, y, **info)`` works, where `info` is a dictionary
- # returned by Reader.read and friends.
- # Ditto for `colormap`.
-
- # A couple of helper functions come first. Best skipped if you
- # are reading through.
-
- def isinteger(x):
- try:
- return int(x) == x
- except:
- return False
-
- def check_color(c, which):
- """Checks that a colour argument for transparent or
- background options is the right form. Also "corrects" bare
- integers to 1-tuples.
- """
-
- if c is None:
- return c
- if greyscale:
- try:
- l = len(c)
- except TypeError:
- c = (c,)
- if len(c) != 1:
- raise ValueError("%s for greyscale must be 1-tuple" %
- which)
- if not isinteger(c[0]):
- raise ValueError(
- "%s colour for greyscale must be integer" %
- which)
- else:
- if not (len(c) == 3 and
- isinteger(c[0]) and
- isinteger(c[1]) and
- isinteger(c[2])):
- raise ValueError(
- "%s colour must be a triple of integers" %
- which)
- return c
-
- if size:
- if len(size) != 2:
- raise ValueError(
- "size argument should be a pair (width, height)")
- if width is not None and width != size[0]:
- raise ValueError(
- "size[0] (%r) and width (%r) should match when both are used."
- % (size[0], width))
- if height is not None and height != size[1]:
- raise ValueError(
- "size[1] (%r) and height (%r) should match when both are used."
- % (size[1], height))
- width,height = size
- del size
-
- if width <= 0 or height <= 0:
- raise ValueError("width and height must be greater than zero")
- if not isinteger(width) or not isinteger(height):
- raise ValueError("width and height must be integers")
- # http://www.w3.org/TR/PNG/#7Integers-and-byte-order
- if width > 2**32-1 or height > 2**32-1:
- raise ValueError("width and height cannot exceed 2**32-1")
-
- if alpha and transparent is not None:
- raise ValueError(
- "transparent colour not allowed with alpha channel")
-
- if bytes_per_sample is not None:
- warnings.warn('please use bitdepth instead of bytes_per_sample',
- DeprecationWarning)
- if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
- raise ValueError(
- "bytes per sample must be .125, .25, .5, 1, or 2")
- bitdepth = int(8*bytes_per_sample)
- del bytes_per_sample
- if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
- raise ValueError("bitdepth (%r) must be a postive integer <= 16" %
- bitdepth)
-
- self.rescale = None
- if palette:
- if bitdepth not in (1,2,4,8):
- raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
- if transparent is not None:
- raise ValueError("transparent and palette not compatible")
- if alpha:
- raise ValueError("alpha and palette not compatible")
- if greyscale:
- raise ValueError("greyscale and palette not compatible")
- else:
- # No palette, check for sBIT chunk generation.
- if alpha or not greyscale:
- if bitdepth not in (8,16):
- targetbitdepth = (8,16)[bitdepth > 8]
- self.rescale = (bitdepth, targetbitdepth)
- bitdepth = targetbitdepth
- del targetbitdepth
- else:
- assert greyscale
- assert not alpha
- if bitdepth not in (1,2,4,8,16):
- if bitdepth > 8:
- targetbitdepth = 16
- elif bitdepth == 3:
- targetbitdepth = 4
- else:
- assert bitdepth in (5,6,7)
- targetbitdepth = 8
- self.rescale = (bitdepth, targetbitdepth)
- bitdepth = targetbitdepth
- del targetbitdepth
-
- if bitdepth < 8 and (alpha or not greyscale and not palette):
- raise ValueError(
- "bitdepth < 8 only permitted with greyscale or palette")
- if bitdepth > 8 and palette:
- raise ValueError(
- "bit depth must be 8 or less for images with palette")
-
- transparent = check_color(transparent, 'transparent')
- background = check_color(background, 'background')
-
- # It's important that the true boolean values (greyscale, alpha,
- # colormap, interlace) are converted to bool because Iverson's
- # convention is relied upon later on.
- self.width = width
- self.height = height
- self.transparent = transparent
- self.background = background
- self.gamma = gamma
- self.greyscale = bool(greyscale)
- self.alpha = bool(alpha)
- self.colormap = bool(palette)
- self.bitdepth = int(bitdepth)
- self.compression = compression
- self.chunk_limit = chunk_limit
- self.interlace = bool(interlace)
- self.palette = check_palette(palette)
-
- self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
- assert self.color_type in (0,2,3,4,6)
-
- self.color_planes = (3,1)[self.greyscale or self.colormap]
- self.planes = self.color_planes + self.alpha
- # :todo: fix for bitdepth < 8
- self.psize = (self.bitdepth/8) * self.planes
-
- def make_palette(self):
- """Create the byte sequences for a ``PLTE`` and if necessary a
- ``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be
- ``None`` if no ``tRNS`` chunk is necessary.
- """
-
- p = array('B')
- t = array('B')
-
- for x in self.palette:
- p.extend(x[0:3])
- if len(x) > 3:
- t.append(x[3])
- p = tostring(p)
- t = tostring(t)
- if t:
- return p,t
- return p,None
-
- def write(self, outfile, rows):
- """Write a PNG image to the output file. `rows` should be
- an iterable that yields each row in boxed row flat pixel format.
- The rows should be the rows of the original image, so there
- should be ``self.height`` rows of ``self.width * self.planes`` values.
- If `interlace` is specified (when creating the instance), then
- an interlaced PNG file will be written. Supply the rows in the
- normal image order; the interlacing is carried out internally.
-
- .. note ::
-
- Interlacing will require the entire image to be in working memory.
- """
-
- if self.interlace:
- fmt = 'BH'[self.bitdepth > 8]
- a = array(fmt, itertools.chain(*rows))
- return self.write_array(outfile, a)
- else:
- nrows = self.write_passes(outfile, rows)
- if nrows != self.height:
- raise ValueError(
- "rows supplied (%d) does not match height (%d)" %
- (nrows, self.height))
-
- def write_passes(self, outfile, rows, packed=False):
- """
- Write a PNG image to the output file.
-
- Most users are expected to find the :meth:`write` or
- :meth:`write_array` method more convenient.
-
- The rows should be given to this method in the order that
- they appear in the output file. For straightlaced images,
- this is the usual top to bottom ordering, but for interlaced
- images the rows should have already been interlaced before
- passing them to this function.
-
- `rows` should be an iterable that yields each row. When
- `packed` is ``False`` the rows should be in boxed row flat pixel
- format; when `packed` is ``True`` each row should be a packed
- sequence of bytes.
-
- """
-
- # http://www.w3.org/TR/PNG/#5PNG-file-signature
- outfile.write(_signature)
-
- # http://www.w3.org/TR/PNG/#11IHDR
- write_chunk(outfile, 'IHDR',
- struct.pack("!2I5B", self.width, self.height,
- self.bitdepth, self.color_type,
- 0, 0, self.interlace))
-
- # See :chunk:order
- # http://www.w3.org/TR/PNG/#11gAMA
- if self.gamma is not None:
- write_chunk(outfile, 'gAMA',
- struct.pack("!L", int(round(self.gamma*1e5))))
-
- # See :chunk:order
- # http://www.w3.org/TR/PNG/#11sBIT
- if self.rescale:
- write_chunk(outfile, 'sBIT',
- struct.pack('%dB' % self.planes,
- *[self.rescale[0]]*self.planes))
-
- # :chunk:order: Without a palette (PLTE chunk), ordering is
- # relatively relaxed. With one, gAMA chunk must precede PLTE
- # chunk which must precede tRNS and bKGD.
- # See http://www.w3.org/TR/PNG/#5ChunkOrdering
- if self.palette:
- p,t = self.make_palette()
- write_chunk(outfile, 'PLTE', p)
- if t:
- # tRNS chunk is optional. Only needed if palette entries
- # have alpha.
- write_chunk(outfile, 'tRNS', t)
-
- # http://www.w3.org/TR/PNG/#11tRNS
- if self.transparent is not None:
- if self.greyscale:
- write_chunk(outfile, 'tRNS',
- struct.pack("!1H", *self.transparent))
- else:
- write_chunk(outfile, 'tRNS',
- struct.pack("!3H", *self.transparent))
-
- # http://www.w3.org/TR/PNG/#11bKGD
- if self.background is not None:
- if self.greyscale:
- write_chunk(outfile, 'bKGD',
- struct.pack("!1H", *self.background))
- else:
- write_chunk(outfile, 'bKGD',
- struct.pack("!3H", *self.background))
-
- # http://www.w3.org/TR/PNG/#11IDAT
- if self.compression is not None:
- compressor = zlib.compressobj(self.compression)
- else:
- compressor = zlib.compressobj()
-
- # Choose an extend function based on the bitdepth. The extend
- # function packs/decomposes the pixel values into bytes and
- # stuffs them onto the data array.
- data = array('B')
- if self.bitdepth == 8 or packed:
- extend = data.extend
- elif self.bitdepth == 16:
- # Decompose into bytes
- def extend(sl):
- fmt = '!%dH' % len(sl)
- data.extend(array('B', struct.pack(fmt, *sl)))
- else:
- # Pack into bytes
- assert self.bitdepth < 8
- # samples per byte
- spb = int(8/self.bitdepth)
- def extend(sl):
- a = array('B', sl)
- # Adding padding bytes so we can group into a whole
- # number of spb-tuples.
- l = float(len(a))
- extra = math.ceil(l / float(spb))*spb - l
- a.extend([0]*int(extra))
- # Pack into bytes
- l = group(a, spb)
- l = map(lambda e: reduce(lambda x,y:
- (x << self.bitdepth) + y, e), l)
- data.extend(l)
- if self.rescale:
- oldextend = extend
- factor = \
- float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
- def extend(sl):
- oldextend(map(lambda x: int(round(factor*x)), sl))
-
- # Build the first row, testing mostly to see if we need to
- # changed the extend function to cope with NumPy integer types
- # (they cause our ordinary definition of extend to fail, so we
- # wrap it). See
- # http://code.google.com/p/pypng/issues/detail?id=44
- enumrows = enumerate(rows)
- del rows
-
- # First row's filter type.
- data.append(0)
- # :todo: Certain exceptions in the call to ``.next()`` or the
- # following try would indicate no row data supplied.
- # Should catch.
- i,row = enumrows.next()
- try:
- # If this fails...
- extend(row)
- except:
- # ... try a version that converts the values to int first.
- # Not only does this work for the (slightly broken) NumPy
- # types, there are probably lots of other, unknown, "nearly"
- # int types it works for.
- def wrapmapint(f):
- return lambda sl: f(map(int, sl))
- extend = wrapmapint(extend)
- del wrapmapint
- extend(row)
-
- for i,row in enumrows:
- # Add "None" filter type. Currently, it's essential that
- # this filter type be used for every scanline as we do not
- # mark the first row of a reduced pass image; that means we
- # could accidentally compute the wrong filtered scanline if
- # we used "up", "average", or "paeth" on such a line.
- data.append(0)
- extend(row)
- if len(data) > self.chunk_limit:
- compressed = compressor.compress(tostring(data))
- if len(compressed):
- # print >> sys.stderr, len(data), len(compressed)
- write_chunk(outfile, 'IDAT', compressed)
- # Because of our very witty definition of ``extend``,
- # above, we must re-use the same ``data`` object. Hence
- # we use ``del`` to empty this one, rather than create a
- # fresh one (which would be my natural FP instinct).
- del data[:]
- if len(data):
- compressed = compressor.compress(tostring(data))
- else:
- compressed = ''
- flushed = compressor.flush()
- if len(compressed) or len(flushed):
- # print >> sys.stderr, len(data), len(compressed), len(flushed)
- write_chunk(outfile, 'IDAT', compressed + flushed)
- # http://www.w3.org/TR/PNG/#11IEND
- write_chunk(outfile, 'IEND')
- return i+1
-
- def write_array(self, outfile, pixels):
- """
- Write an array in flat row flat pixel format as a PNG file on
- the output file. See also :meth:`write` method.
- """
-
- if self.interlace:
- self.write_passes(outfile, self.array_scanlines_interlace(pixels))
- else:
- self.write_passes(outfile, self.array_scanlines(pixels))
-
- def write_packed(self, outfile, rows):
- """
- Write PNG file to `outfile`. The pixel data comes from `rows`
- which should be in boxed row packed format. Each row should be
- a sequence of packed bytes.
-
- Technically, this method does work for interlaced images but it
- is best avoided. For interlaced images, the rows should be
- presented in the order that they appear in the file.
-
- This method should not be used when the source image bit depth
- is not one naturally supported by PNG; the bit depth should be
- 1, 2, 4, 8, or 16.
- """
-
- if self.rescale:
- raise Error("write_packed method not suitable for bit depth %d" %
- self.rescale[0])
- return self.write_passes(outfile, rows, packed=True)
-
- def convert_pnm(self, infile, outfile):
- """
- Convert a PNM file containing raw pixel data into a PNG file
- with the parameters set in the writer object. Works for
- (binary) PGM, PPM, and PAM formats.
- """
-
- if self.interlace:
- pixels = array('B')
- pixels.fromfile(infile,
- (self.bitdepth/8) * self.color_planes *
- self.width * self.height)
- self.write_passes(outfile, self.array_scanlines_interlace(pixels))
- else:
- self.write_passes(outfile, self.file_scanlines(infile))
-
- def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
- """
- Convert a PPM and PGM file containing raw pixel data into a
- PNG outfile with the parameters set in the writer object.
- """
- pixels = array('B')
- pixels.fromfile(ppmfile,
- (self.bitdepth/8) * self.color_planes *
- self.width * self.height)
- apixels = array('B')
- apixels.fromfile(pgmfile,
- (self.bitdepth/8) *
- self.width * self.height)
- pixels = interleave_planes(pixels, apixels,
- (self.bitdepth/8) * self.color_planes,
- (self.bitdepth/8))
- if self.interlace:
- self.write_passes(outfile, self.array_scanlines_interlace(pixels))
- else:
- self.write_passes(outfile, self.array_scanlines(pixels))
-
- def file_scanlines(self, infile):
- """
- Generates boxed rows in flat pixel format, from the input file
- `infile`. It assumes that the input file is in a "Netpbm-like"
- binary format, and is positioned at the beginning of the first
- pixel. The number of pixels to read is taken from the image
- dimensions (`width`, `height`, `planes`) and the number of bytes
- per value is implied by the image `bitdepth`.
- """
-
- # Values per row
- vpr = self.width * self.planes
- row_bytes = vpr
- if self.bitdepth > 8:
- assert self.bitdepth == 16
- row_bytes *= 2
- fmt = '>%dH' % vpr
- def line():
- return array('H', struct.unpack(fmt, infile.read(row_bytes)))
- else:
- def line():
- scanline = array('B', infile.read(row_bytes))
- return scanline
- for y in range(self.height):
- yield line()
-
- def array_scanlines(self, pixels):
- """
- Generates boxed rows (flat pixels) from flat rows (flat pixels)
- in an array.
- """
-
- # Values per row
- vpr = self.width * self.planes
- stop = 0
- for y in range(self.height):
- start = stop
- stop = start + vpr
- yield pixels[start:stop]
-
- def array_scanlines_interlace(self, pixels):
- """
- Generator for interlaced scanlines from an array. `pixels` is
- the full source image in flat row flat pixel format. The
- generator yields each scanline of the reduced passes in turn, in
- boxed row flat pixel format.
- """
-
- # http://www.w3.org/TR/PNG/#8InterlaceMethods
- # Array type.
- fmt = 'BH'[self.bitdepth > 8]
- # Value per row
- vpr = self.width * self.planes
- for xstart, ystart, xstep, ystep in _adam7:
- if xstart >= self.width:
- continue
- # Pixels per row (of reduced image)
- ppr = int(math.ceil((self.width-xstart)/float(xstep)))
- # number of values in reduced image row.
- row_len = ppr*self.planes
- for y in range(ystart, self.height, ystep):
- if xstep == 1:
- offset = y * vpr
- yield pixels[offset:offset+vpr]
- else:
- row = array(fmt)
- # There's no easier way to set the length of an array
- row.extend(pixels[0:row_len])
- offset = y * vpr + xstart * self.planes
- end_offset = (y+1) * vpr
- skip = self.planes * xstep
- for i in range(self.planes):
- row[i::self.planes] = \
- pixels[offset+i:end_offset:skip]
- yield row
-
-def write_chunk(outfile, tag, data=strtobytes('')):
- """
- Write a PNG chunk to the output file, including length and
- checksum.
- """
-
- # http://www.w3.org/TR/PNG/#5Chunk-layout
- outfile.write(struct.pack("!I", len(data)))
- tag = strtobytes(tag)
- outfile.write(tag)
- outfile.write(data)
- checksum = zlib.crc32(tag)
- checksum = zlib.crc32(data, checksum)
- checksum &= 2**32-1
- outfile.write(struct.pack("!I", checksum))
-
-def write_chunks(out, chunks):
- """Create a PNG file by writing out the chunks."""
-
- out.write(_signature)
- for chunk in chunks:
- write_chunk(out, *chunk)
-
-def filter_scanline(type, line, fo, prev=None):
- """Apply a scanline filter to a scanline. `type` specifies the
- filter type (0 to 4); `line` specifies the current (unfiltered)
- scanline as a sequence of bytes; `prev` specifies the previous
- (unfiltered) scanline as a sequence of bytes. `fo` specifies the
- filter offset; normally this is size of a pixel in bytes (the number
- of bytes per sample times the number of channels), but when this is
- < 1 (for bit depths < 8) then the filter offset is 1.
- """
-
- assert 0 <= type < 5
-
- # The output array. Which, pathetically, we extend one-byte at a
- # time (fortunately this is linear).
- out = array('B', [type])
-
- def sub():
- ai = -fo
- for x in line:
- if ai >= 0:
- x = (x - line[ai]) & 0xff
- out.append(x)
- ai += 1
- def up():
- for i,x in enumerate(line):
- x = (x - prev[i]) & 0xff
- out.append(x)
- def average():
- ai = -fo
- for i,x in enumerate(line):
- if ai >= 0:
- x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
- else:
- x = (x - (prev[i] >> 1)) & 0xff
- out.append(x)
- ai += 1
- def paeth():
- # http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
- ai = -fo # also used for ci
- for i,x in enumerate(line):
- a = 0
- b = prev[i]
- c = 0
-
- if ai >= 0:
- a = line[ai]
- c = prev[ai]
- p = a + b - c
- pa = abs(p - a)
- pb = abs(p - b)
- pc = abs(p - c)
- if pa <= pb and pa <= pc: Pr = a
- elif pb <= pc: Pr = b
- else: Pr = c
-
- x = (x - Pr) & 0xff
- out.append(x)
- ai += 1
-
- if not prev:
- # We're on the first line. Some of the filters can be reduced
- # to simpler cases which makes handling the line "off the top"
- # of the image simpler. "up" becomes "none"; "paeth" becomes
- # "left" (non-trivial, but true). "average" needs to be handled
- # specially.
- if type == 2: # "up"
- return line # type = 0
- elif type == 3:
- prev = [0]*len(line)
- elif type == 4: # "paeth"
- type = 1
- if type == 0:
- out.extend(line)
- elif type == 1:
- sub()
- elif type == 2:
- up()
- elif type == 3:
- average()
- else: # type == 4
- paeth()
- return out
-
-
-def from_array(a, mode=None, info={}):
- """Create a PNG :class:`Image` object from a 2- or 3-dimensional array.
- One application of this function is easy PIL-style saving:
- ``png.from_array(pixels, 'L').save('foo.png')``.
-
- .. note :
-
- The use of the term *3-dimensional* is for marketing purposes
- only. It doesn't actually work. Please bear with us. Meanwhile
- enjoy the complimentary snacks (on request) and please use a
- 2-dimensional array.
-
- Unless they are specified using the *info* parameter, the PNG's
- height and width are taken from the array size. For a 3 dimensional
- array the first axis is the height; the second axis is the width;
- and the third axis is the channel number. Thus an RGB image that is
- 16 pixels high and 8 wide will use an array that is 16x8x3. For 2
- dimensional arrays the first axis is the height, but the second axis
- is ``width*channels``, so an RGB image that is 16 pixels high and 8
- wide will use a 2-dimensional array that is 16x24 (each row will be
- 8*3==24 sample values).
-
- *mode* is a string that specifies the image colour format in a
- PIL-style mode. It can be:
-
- ``'L'``
- greyscale (1 channel)
- ``'LA'``
- greyscale with alpha (2 channel)
- ``'RGB'``
- colour image (3 channel)
- ``'RGBA'``
- colour image with alpha (4 channel)
-
- The mode string can also specify the bit depth (overriding how this
- function normally derives the bit depth, see below). Appending
- ``';16'`` to the mode will cause the PNG to be 16 bits per channel;
- any decimal from 1 to 16 can be used to specify the bit depth.
-
- When a 2-dimensional array is used *mode* determines how many
- channels the image has, and so allows the width to be derived from
- the second array dimension.
-
- The array is expected to be a ``numpy`` array, but it can be any
- suitable Python sequence. For example, a list of lists can be used:
- ``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
- rules are: ``len(a)`` gives the first dimension, height;
- ``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
- third dimension, unless an exception is raised in which case a
- 2-dimensional array is assumed. It's slightly more complicated than
- that because an iterator of rows can be used, and it all still
- works. Using an iterator allows data to be streamed efficiently.
-
- The bit depth of the PNG is normally taken from the array element's
- datatype (but if *mode* specifies a bitdepth then that is used
- instead). The array element's datatype is determined in a way which
- is supposed to work both for ``numpy`` arrays and for Python
- ``array.array`` objects. A 1 byte datatype will give a bit depth of
- 8, a 2 byte datatype will give a bit depth of 16. If the datatype
- does not have an implicit size, for example it is a plain Python
- list of lists, as above, then a default of 8 is used.
-
- The *info* parameter is a dictionary that can be used to specify
- metadata (in the same style as the arguments to the
- :class:``png.Writer`` class). For this function the keys that are
- useful are:
-
- height
- overrides the height derived from the array dimensions and allows
- *a* to be an iterable.
- width
- overrides the width derived from the array dimensions.
- bitdepth
- overrides the bit depth derived from the element datatype (but
- must match *mode* if that also specifies a bit depth).
-
- Generally anything specified in the
- *info* dictionary will override any implicit choices that this
- function would otherwise make, but must match any explicit ones.
- For example, if the *info* dictionary has a ``greyscale`` key then
- this must be true when mode is ``'L'`` or ``'LA'`` and false when
- mode is ``'RGB'`` or ``'RGBA'``.
- """
-
- # We abuse the *info* parameter by modifying it. Take a copy here.
- # (Also typechecks *info* to some extent).
- info = dict(info)
-
- # Syntax check mode string.
- bitdepth = None
- try:
- mode = mode.split(';')
- if len(mode) not in (1,2):
- raise Error()
- if mode[0] not in ('L', 'LA', 'RGB', 'RGBA'):
- raise Error()
- if len(mode) == 2:
- try:
- bitdepth = int(mode[1])
- except:
- raise Error()
- except Error:
- raise Error("mode string should be 'RGB' or 'L;16' or similar.")
- mode = mode[0]
-
- # Get bitdepth from *mode* if possible.
- if bitdepth:
- if info.get('bitdepth') and bitdepth != info['bitdepth']:
- raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
- (bitdepth, info['bitdepth']))
- info['bitdepth'] = bitdepth
-
- # Fill in and/or check entries in *info*.
- # Dimensions.
- if 'size' in info:
- # Check width, height, size all match where used.
- for dimension,axis in [('width', 0), ('height', 1)]:
- if dimension in info:
- if info[dimension] != info['size'][axis]:
- raise Error(
- "info[%r] shhould match info['size'][%r]." %
- (dimension, axis))
- info['width'],info['height'] = info['size']
- if 'height' not in info:
- try:
- l = len(a)
- except:
- raise Error(
- "len(a) does not work, supply info['height'] instead.")
- info['height'] = l
- # Colour format.
- if 'greyscale' in info:
- if bool(info['greyscale']) != ('L' in mode):
- raise Error("info['greyscale'] should match mode.")
- info['greyscale'] = 'L' in mode
- if 'alpha' in info:
- if bool(info['alpha']) != ('A' in mode):
- raise Error("info['alpha'] should match mode.")
- info['alpha'] = 'A' in mode
-
- planes = len(mode)
- if 'planes' in info:
- if info['planes'] != planes:
- raise Error("info['planes'] should match mode.")
-
- # In order to work out whether we the array is 2D or 3D we need its
- # first row, which requires that we take a copy of its iterator.
- # We may also need the first row to derive width and bitdepth.
- a,t = itertools.tee(a)
- row = t.next()
- del t
- try:
- row[0][0]
- threed = True
- testelement = row[0]
- except:
- threed = False
- testelement = row
- if 'width' not in info:
- if threed:
- width = len(row)
- else:
- width = len(row) // planes
- info['width'] = width
-
- # Not implemented yet
- assert not threed
-
- if 'bitdepth' not in info:
- try:
- dtype = testelement.dtype
- # goto the "else:" clause. Sorry.
- except:
- try:
- # Try a Python array.array.
- bitdepth = 8 * testelement.itemsize
- except:
- # We can't determine it from the array element's
- # datatype, use a default of 8.
- bitdepth = 8
- else:
- # If we got here without exception, we now assume that
- # the array is a numpy array.
- if dtype.kind == 'b':
- bitdepth = 1
- else:
- bitdepth = 8 * dtype.itemsize
- info['bitdepth'] = bitdepth
-
- for thing in 'width height bitdepth greyscale alpha'.split():
- assert thing in info
- return Image(a, info)
-
-# So that refugee's from PIL feel more at home. Not documented.
-fromarray = from_array
-
-class Image:
- """A PNG image.
- You can create an :class:`Image` object from an array of pixels by calling
- :meth:`png.from_array`. It can be saved to disk with the
- :meth:`save` method."""
- def __init__(self, rows, info):
- """
- .. note ::
-
- The constructor is not public. Please do not call it.
- """
-
- self.rows = rows
- self.info = info
-
- def save(self, file):
- """Save the image to *file*. If *file* looks like an open file
- descriptor then it is used, otherwise it is treated as a
- filename and a fresh file is opened.
-
- In general, you can only call this method once; after it has
- been called the first time and the PNG image has been saved, the
- source data will have been streamed, and cannot be streamed
- again.
- """
-
- w = Writer(**self.info)
-
- try:
- file.write
- def close(): pass
- except:
- file = open(file, 'wb')
- def close(): file.close()
-
- try:
- w.write(file, self.rows)
- finally:
- close()
-
-class _readable:
- """
- A simple file-like interface for strings and arrays.
- """
-
- def __init__(self, buf):
- self.buf = buf
- self.offset = 0
-
- def read(self, n):
- r = self.buf[self.offset:self.offset+n]
- if isarray(r):
- r = r.tostring()
- self.offset += n
- return r
-
-
-class Reader:
- """
- PNG decoder in pure Python.
- """
-
- def __init__(self, _guess=None, **kw):
- """
- Create a PNG decoder object.
-
- The constructor expects exactly one keyword argument. If you
- supply a positional argument instead, it will guess the input
- type. You can choose among the following keyword arguments:
-
- filename
- Name of input file (a PNG file).
- file
- A file-like object (object with a read() method).
- bytes
- ``array`` or ``string`` with PNG data.
-
- """
- if ((_guess is not None and len(kw) != 0) or
- (_guess is None and len(kw) != 1)):
- raise TypeError("Reader() takes exactly 1 argument")
-
- # Will be the first 8 bytes, later on. See validate_signature.
- self.signature = None
- self.transparent = None
- # A pair of (len,type) if a chunk has been read but its data and
- # checksum have not (in other words the file position is just
- # past the 4 bytes that specify the chunk type). See preamble
- # method for how this is used.
- self.atchunk = None
-
- if _guess is not None:
- if isarray(_guess):
- kw["bytes"] = _guess
- elif isinstance(_guess, str):
- kw["filename"] = _guess
- elif isinstance(_guess, file):
- kw["file"] = _guess
-
- if "filename" in kw:
- self.file = open(kw["filename"], "rb")
- elif "file" in kw:
- self.file = kw["file"]
- elif "bytes" in kw:
- self.file = _readable(kw["bytes"])
- else:
- raise TypeError("expecting filename, file or bytes array")
-
- def chunk(self, seek=None):
- """
- Read the next PNG chunk from the input file; returns a
- (*type*,*data*) tuple. *type* is the chunk's type as a string
- (all PNG chunk types are 4 characters long). *data* is the
- chunk's data content, as a string.
-
- If the optional `seek` argument is
- specified then it will keep reading chunks until it either runs
- out of file or finds the type specified by the argument. Note
- that in general the order of chunks in PNGs is unspecified, so
- using `seek` can cause you to miss chunks.
- """
-
- self.validate_signature()
-
- while True:
- # http://www.w3.org/TR/PNG/#5Chunk-layout
- if not self.atchunk:
- self.atchunk = self.chunklentype()
- length,type = self.atchunk
- self.atchunk = None
- data = self.file.read(length)
- if len(data) != length:
- raise ChunkError('Chunk %s too short for required %i octets.'
- % (type, length))
- checksum = self.file.read(4)
- if len(checksum) != 4:
- raise ValueError('Chunk %s too short for checksum.', tag)
- if seek and type != seek:
- continue
- verify = zlib.crc32(strtobytes(type))
- verify = zlib.crc32(data, verify)
- # Whether the output from zlib.crc32 is signed or not varies
- # according to hideous implementation details, see
- # http://bugs.python.org/issue1202 .
- # We coerce it to be positive here (in a way which works on
- # Python 2.3 and older).
- verify &= 2**32 - 1
- verify = struct.pack('!I', verify)
- if checksum != verify:
- # print repr(checksum)
- (a, ) = struct.unpack('!I', checksum)
- (b, ) = struct.unpack('!I', verify)
- raise ChunkError(
- "Checksum error in %s chunk: 0x%08X != 0x%08X." %
- (type, a, b))
- return type, data
-
- def chunks(self):
- """Return an iterator that will yield each chunk as a
- (*chunktype*, *content*) pair.
- """
-
- while True:
- t,v = self.chunk()
- yield t,v
- if t == 'IEND':
- break
-
- def undo_filter(self, filter_type, scanline, previous):
- """Undo the filter for a scanline. `scanline` is a sequence of
- bytes that does not include the initial filter type byte.
- `previous` is decoded previous scanline (for straightlaced
- images this is the previous pixel row, but for interlaced
- images, it is the previous scanline in the reduced image, which
- in general is not the previous pixel row in the final image).
- When there is no previous scanline (the first row of a
- straightlaced image, or the first row in one of the passes in an
- interlaced image), then this argument should be ``None``.
-
- The scanline will have the effects of filtering removed, and the
- result will be returned as a fresh sequence of bytes.
- """
-
- # :todo: Would it be better to update scanline in place?
-
- # Create the result byte array. It seems that the best way to
- # create the array to be the right size is to copy from an
- # existing sequence. *sigh*
- # If we fill the result with scanline, then this allows a
- # micro-optimisation in the "null" and "sub" cases.
- result = array('B', scanline)
-
- if filter_type == 0:
- # And here, we _rely_ on filling the result with scanline,
- # above.
- return result
-
- if filter_type not in (1,2,3,4):
- raise FormatError('Invalid PNG Filter Type.'
- ' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
-
- # Filter unit. The stride from one pixel to the corresponding
- # byte from the previous previous. Normally this is the pixel
- # size in bytes, but when this is smaller than 1, the previous
- # byte is used instead.
- fu = max(1, self.psize)
-
- # For the first line of a pass, synthesize a dummy previous
- # line. An alternative approach would be to observe that on the
- # first line 'up' is the same as 'null', 'paeth' is the same
- # as 'sub', with only 'average' requiring any special case.
- if not previous:
- previous = array('B', [0]*len(scanline))
-
- def sub():
- """Undo sub filter."""
-
- ai = 0
- # Loops starts at index fu. Observe that the initial part
- # of the result is already filled in correctly with
- # scanline.
- for i in range(fu, len(result)):
- x = scanline[i]
- a = result[ai]
- result[i] = (x + a) & 0xff
- ai += 1
-
- def up():
- """Undo up filter."""
-
- for i in range(len(result)):
- x = scanline[i]
- b = previous[i]
- result[i] = (x + b) & 0xff
-
- def average():
- """Undo average filter."""
-
- ai = -fu
- for i in range(len(result)):
- x = scanline[i]
- if ai < 0:
- a = 0
- else:
- a = result[ai]
- b = previous[i]
- result[i] = (x + ((a + b) >> 1)) & 0xff
- ai += 1
-
- def paeth():
- """Undo Paeth filter."""
-
- # Also used for ci.
- ai = -fu
- for i in range(len(result)):
- x = scanline[i]
- if ai < 0:
- a = c = 0
- else:
- a = result[ai]
- c = previous[ai]
- b = previous[i]
- p = a + b - c
- pa = abs(p - a)
- pb = abs(p - b)
- pc = abs(p - c)
- if pa <= pb and pa <= pc:
- pr = a
- elif pb <= pc:
- pr = b
- else:
- pr = c
- result[i] = (x + pr) & 0xff
- ai += 1
-
- # Call appropriate filter algorithm. Note that 0 has already
- # been dealt with.
- (None, sub, up, average, paeth)[filter_type]()
- return result
-
- def deinterlace(self, raw):
- """
- Read raw pixel data, undo filters, deinterlace, and flatten.
- Return in flat row flat pixel format.
- """
-
- # print >> sys.stderr, ("Reading interlaced, w=%s, r=%s, planes=%s," +
- # " bpp=%s") % (self.width, self.height, self.planes, self.bps)
- # Values per row (of the target image)
- vpr = self.width * self.planes
-
- # Make a result array, and make it big enough. Interleaving
- # writes to the output array randomly (well, not quite), so the
- # entire output array must be in memory.
- fmt = 'BH'[self.bitdepth > 8]
- a = array(fmt, [0]*vpr*self.height)
- source_offset = 0
-
- for xstart, ystart, xstep, ystep in _adam7:
- # print >> sys.stderr, "Adam7: start=%s,%s step=%s,%s" % (
- # xstart, ystart, xstep, ystep)
- if xstart >= self.width:
- continue
- # The previous (reconstructed) scanline. None at the
- # beginning of a pass to indicate that there is no previous
- # line.
- recon = None
- # Pixels per row (reduced pass image)
- ppr = int(math.ceil((self.width-xstart)/float(xstep)))
- # Row size in bytes for this pass.
- row_size = int(math.ceil(self.psize * ppr))
- for y in range(ystart, self.height, ystep):
- filter_type = raw[source_offset]
- source_offset += 1
- scanline = raw[source_offset:source_offset+row_size]
- source_offset += row_size
- recon = self.undo_filter(filter_type, scanline, recon)
- # Convert so that there is one element per pixel value
- flat = self.serialtoflat(recon, ppr)
- if xstep == 1:
- assert xstart == 0
- offset = y * vpr
- a[offset:offset+vpr] = flat
- else:
- offset = y * vpr + xstart * self.planes
- end_offset = (y+1) * vpr
- skip = self.planes * xstep
- for i in range(self.planes):
- a[offset+i:end_offset:skip] = \
- flat[i::self.planes]
- return a
-
- def iterboxed(self, rows):
- """Iterator that yields each scanline in boxed row flat pixel
- format. `rows` should be an iterator that yields the bytes of
- each row in turn.
- """
-
- def asvalues(raw):
- """Convert a row of raw bytes into a flat row. Result may
- or may not share with argument"""
-
- if self.bitdepth == 8:
- return raw
- if self.bitdepth == 16:
- raw = tostring(raw)
- return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
- assert self.bitdepth < 8
- width = self.width
- # Samples per byte
- spb = 8//self.bitdepth
- out = array('B')
- mask = 2**self.bitdepth - 1
- shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
- for o in raw:
- out.extend(map(lambda i: mask&(o>>i), shifts))
- return out[:width]
-
- return itertools.imap(asvalues, rows)
-
- def serialtoflat(self, bytes, width=None):
- """Convert serial format (byte stream) pixel data to flat row
- flat pixel.
- """
-
- if self.bitdepth == 8:
- return bytes
- if self.bitdepth == 16:
- bytes = tostring(bytes)
- return array('H',
- struct.unpack('!%dH' % (len(bytes)//2), bytes))
- assert self.bitdepth < 8
- if width is None:
- width = self.width
- # Samples per byte
- spb = 8//self.bitdepth
- out = array('B')
- mask = 2**self.bitdepth - 1
- shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
- l = width
- for o in bytes:
- out.extend([(mask&(o>>s)) for s in shifts][:l])
- l -= spb
- if l <= 0:
- l = width
- return out
-
- def iterstraight(self, raw):
- """Iterator that undoes the effect of filtering, and yields each
- row in serialised format (as a sequence of bytes). Assumes input
- is straightlaced. `raw` should be an iterable that yields the
- raw bytes in chunks of arbitrary size."""
-
- # length of row, in bytes
- rb = self.row_bytes
- a = array('B')
- # The previous (reconstructed) scanline. None indicates first
- # line of image.
- recon = None
- for some in raw:
- a.extend(some)
- while len(a) >= rb + 1:
- filter_type = a[0]
- scanline = a[1:rb+1]
- del a[:rb+1]
- recon = self.undo_filter(filter_type, scanline, recon)
- yield recon
- if len(a) != 0:
- # :file:format We get here with a file format error: when the
- # available bytes (after decompressing) do not pack into exact
- # rows.
- raise FormatError(
- 'Wrong size for decompressed IDAT chunk.')
- assert len(a) == 0
-
- def validate_signature(self):
- """If signature (header) has not been read then read and
- validate it; otherwise do nothing.
- """
-
- if self.signature:
- return
- self.signature = self.file.read(8)
- if self.signature != _signature:
- raise FormatError("PNG file has invalid signature.")
-
- def preamble(self):
- """
- Extract the image metadata by reading the initial part of the PNG
- file up to the start of the ``IDAT`` chunk. All the chunks that
- precede the ``IDAT`` chunk are read and either processed for
- metadata or discarded.
- """
-
- self.validate_signature()
-
- while True:
- if not self.atchunk:
- self.atchunk = self.chunklentype()
- if self.atchunk is None:
- raise FormatError(
- 'This PNG file has no IDAT chunks.')
- if self.atchunk[1] == 'IDAT':
- return
- self.process_chunk()
-
- def chunklentype(self):
- """Reads just enough of the input to determine the next
- chunk's length and type, returned as a (*length*, *type*) pair
- where *type* is a string. If there are no more chunks, ``None``
- is returned.
- """
-
- x = self.file.read(8)
- if not x:
- return None
- if len(x) != 8:
- raise FormatError(
- 'End of file whilst reading chunk length and type.')
- length,type = struct.unpack('!I4s', x)
- type = bytestostr(type)
- if length > 2**31-1:
- raise FormatError('Chunk %s is too large: %d.' % (type,length))
- return length,type
-
- def process_chunk(self):
- """Process the next chunk and its data. This only processes the
- following chunk types, all others are ignored: ``IHDR``,
- ``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``.
- """
-
- type, data = self.chunk()
- if type == 'IHDR':
- # http://www.w3.org/TR/PNG/#11IHDR
- if len(data) != 13:
- raise FormatError('IHDR chunk has incorrect length.')
- (self.width, self.height, self.bitdepth, self.color_type,
- self.compression, self.filter,
- self.interlace) = struct.unpack("!2I5B", data)
-
- # Check that the header specifies only valid combinations.
- if self.bitdepth not in (1,2,4,8,16):
- raise Error("invalid bit depth %d" % self.bitdepth)
- if self.color_type not in (0,2,3,4,6):
- raise Error("invalid colour type %d" % self.color_type)
- # Check indexed (palettized) images have 8 or fewer bits
- # per pixel; check only indexed or greyscale images have
- # fewer than 8 bits per pixel.
- if ((self.color_type & 1 and self.bitdepth > 8) or
- (self.bitdepth < 8 and self.color_type not in (0,3))):
- raise FormatError("Illegal combination of bit depth (%d)"
- " and colour type (%d)."
- " See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
- % (self.bitdepth, self.color_type))
- if self.compression != 0:
- raise Error("unknown compression method %d" % self.compression)
- if self.filter != 0:
- raise FormatError("Unknown filter method %d,"
- " see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
- % self.filter)
- if self.interlace not in (0,1):
- raise FormatError("Unknown interlace method %d,"
- " see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
- % self.interlace)
-
- # Derived values
- # http://www.w3.org/TR/PNG/#6Colour-values
- colormap = bool(self.color_type & 1)
- greyscale = not (self.color_type & 2)
- alpha = bool(self.color_type & 4)
- color_planes = (3,1)[greyscale or colormap]
- planes = color_planes + alpha
-
- self.colormap = colormap
- self.greyscale = greyscale
- self.alpha = alpha
- self.color_planes = color_planes
- self.planes = planes
- self.psize = float(self.bitdepth)/float(8) * planes
- if int(self.psize) == self.psize:
- self.psize = int(self.psize)
- self.row_bytes = int(math.ceil(self.width * self.psize))
- # Stores PLTE chunk if present, and is used to check
- # chunk ordering constraints.
- self.plte = None
- # Stores tRNS chunk if present, and is used to check chunk
- # ordering constraints.
- self.trns = None
- # Stores sbit chunk if present.
- self.sbit = None
- elif type == 'PLTE':
- # http://www.w3.org/TR/PNG/#11PLTE
- if self.plte:
- warnings.warn("Multiple PLTE chunks present.")
- self.plte = data
- if len(data) % 3 != 0:
- raise FormatError(
- "PLTE chunk's length should be a multiple of 3.")
- if len(data) > (2**self.bitdepth)*3:
- raise FormatError("PLTE chunk is too long.")
- if len(data) == 0:
- raise FormatError("Empty PLTE is not allowed.")
- elif type == 'bKGD':
- try:
- if self.colormap:
- if not self.plte:
- warnings.warn(
- "PLTE chunk is required before bKGD chunk.")
- self.background = struct.unpack('B', data)
- else:
- self.background = struct.unpack("!%dH" % self.color_planes,
- data)
- except struct.error:
- raise FormatError("bKGD chunk has incorrect length.")
- elif type == 'tRNS':
- # http://www.w3.org/TR/PNG/#11tRNS
- self.trns = data
- if self.colormap:
- if not self.plte:
- warnings.warn("PLTE chunk is required before tRNS chunk.")
- else:
- if len(data) > len(self.plte)/3:
- # Was warning, but promoted to Error as it
- # would otherwise cause pain later on.
- raise FormatError("tRNS chunk is too long.")
- else:
- if self.alpha:
- raise FormatError(
- "tRNS chunk is not valid with colour type %d." %
- self.color_type)
- try:
- self.transparent = \
- struct.unpack("!%dH" % self.color_planes, data)
- except struct.error:
- raise FormatError("tRNS chunk has incorrect length.")
- elif type == 'gAMA':
- try:
- self.gamma = struct.unpack("!L", data)[0] / 100000.0
- except struct.error:
- raise FormatError("gAMA chunk has incorrect length.")
- elif type == 'sBIT':
- self.sbit = data
- if (self.colormap and len(data) != 3 or
- not self.colormap and len(data) != self.planes):
- raise FormatError("sBIT chunk has incorrect length.")
-
- def read(self):
- """
- Read the PNG file and decode it. Returns (`width`, `height`,
- `pixels`, `metadata`).
-
- May use excessive memory.
-
- `pixels` are returned in boxed row flat pixel format.
- """
-
- def iteridat():
- """Iterator that yields all the ``IDAT`` chunks as strings."""
- while True:
- try:
- type, data = self.chunk()
- except ValueError, e:
- raise ChunkError(e.args[0])
- if type == 'IEND':
- # http://www.w3.org/TR/PNG/#11IEND
- break
- if type != 'IDAT':
- continue
- # type == 'IDAT'
- # http://www.w3.org/TR/PNG/#11IDAT
- if self.colormap and not self.plte:
- warnings.warn("PLTE chunk is required before IDAT chunk")
- yield data
-
- def iterdecomp(idat):
- """Iterator that yields decompressed strings. `idat` should
- be an iterator that yields the ``IDAT`` chunk data.
- """
-
- # Currently, with no max_length paramter to decompress, this
- # routine will do one yield per IDAT chunk. So not very
- # incremental.
- d = zlib.decompressobj()
- # Each IDAT chunk is passed to the decompressor, then any
- # remaining state is decompressed out.
- for data in idat:
- # :todo: add a max_length argument here to limit output
- # size.
- yield array('B', d.decompress(data))
- yield array('B', d.flush())
-
- self.preamble()
- raw = iterdecomp(iteridat())
-
- if self.interlace:
- raw = array('B', itertools.chain(*raw))
- arraycode = 'BH'[self.bitdepth>8]
- # Like :meth:`group` but producing an array.array object for
- # each row.
- pixels = itertools.imap(lambda *row: array(arraycode, row),
- *[iter(self.deinterlace(raw))]*self.width*self.planes)
- else:
- pixels = self.iterboxed(self.iterstraight(raw))
- meta = dict()
- for attr in 'greyscale alpha planes bitdepth interlace'.split():
- meta[attr] = getattr(self, attr)
- meta['size'] = (self.width, self.height)
- for attr in 'gamma transparent background'.split():
- a = getattr(self, attr, None)
- if a is not None:
- meta[attr] = a
- return self.width, self.height, pixels, meta
-
-
- def read_flat(self):
- """
- Read a PNG file and decode it into flat row flat pixel format.
- Returns (*width*, *height*, *pixels*, *metadata*).
-
- May use excessive memory.
-
- `pixels` are returned in flat row flat pixel format.
-
- See also the :meth:`read` method which returns pixels in the
- more stream-friendly boxed row flat pixel format.
- """
-
- x, y, pixel, meta = self.read()
- arraycode = 'BH'[meta['bitdepth']>8]
- pixel = array(arraycode, itertools.chain(*pixel))
- return x, y, pixel, meta
-
- def palette(self, alpha='natural'):
- """Returns a palette that is a sequence of 3-tuples or 4-tuples,
- synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
- chunks should have already been processed (for example, by
- calling the :meth:`preamble` method). All the tuples are the
- same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
- there is a ``tRNS`` chunk. Assumes that the image is colour type
- 3 and therefore a ``PLTE`` chunk is required.
-
- If the `alpha` argument is ``'force'`` then an alpha channel is
- always added, forcing the result to be a sequence of 4-tuples.
- """
-
- if not self.plte:
- raise FormatError(
- "Required PLTE chunk is missing in colour type 3 image.")
- plte = group(array('B', self.plte), 3)
- if self.trns or alpha == 'force':
- trns = array('B', self.trns or '')
- trns.extend([255]*(len(plte)-len(trns)))
- plte = map(operator.add, plte, group(trns, 1))
- return plte
-
- def asDirect(self):
- """Returns the image data as a direct representation of an
- ``x * y * planes`` array. This method is intended to remove the
- need for callers to deal with palettes and transparency
- themselves. Images with a palette (colour type 3)
- are converted to RGB or RGBA; images with transparency (a
- ``tRNS`` chunk) are converted to LA or RGBA as appropriate.
- When returned in this format the pixel values represent the
- colour value directly without needing to refer to palettes or
- transparency information.
-
- Like the :meth:`read` method this method returns a 4-tuple:
-
- (*width*, *height*, *pixels*, *meta*)
-
- This method normally returns pixel values with the bit depth
- they have in the source image, but when the source PNG has an
- ``sBIT`` chunk it is inspected and can reduce the bit depth of
- the result pixels; pixel values will be reduced according to
- the bit depth specified in the ``sBIT`` chunk (PNG nerds should
- note a single result bit depth is used for all channels; the
- maximum of the ones specified in the ``sBIT`` chunk. An RGB565
- image will be rescaled to 6-bit RGB666).
-
- The *meta* dictionary that is returned reflects the `direct`
- format and not the original source image. For example, an RGB
- source image with a ``tRNS`` chunk to represent a transparent
- colour, will have ``planes=3`` and ``alpha=False`` for the
- source image, but the *meta* dictionary returned by this method
- will have ``planes=4`` and ``alpha=True`` because an alpha
- channel is synthesized and added.
-
- *pixels* is the pixel data in boxed row flat pixel format (just
- like the :meth:`read` method).
-
- All the other aspects of the image data are not changed.
- """
-
- self.preamble()
-
- # Simple case, no conversion necessary.
- if not self.colormap and not self.trns and not self.sbit:
- return self.read()
-
- x,y,pixels,meta = self.read()
-
- if self.colormap:
- meta['colormap'] = False
- meta['alpha'] = bool(self.trns)
- meta['bitdepth'] = 8
- meta['planes'] = 3 + bool(self.trns)
- plte = self.palette()
- def iterpal(pixels):
- for row in pixels:
- row = map(plte.__getitem__, row)
- yield array('B', itertools.chain(*row))
- pixels = iterpal(pixels)
- elif self.trns:
- # It would be nice if there was some reasonable way of doing
- # this without generating a whole load of intermediate tuples.
- # But tuples does seem like the easiest way, with no other way
- # clearly much simpler or much faster. (Actually, the L to LA
- # conversion could perhaps go faster (all those 1-tuples!), but
- # I still wonder whether the code proliferation is worth it)
- it = self.transparent
- maxval = 2**meta['bitdepth']-1
- planes = meta['planes']
- meta['alpha'] = True
- meta['planes'] += 1
- typecode = 'BH'[meta['bitdepth']>8]
- def itertrns(pixels):
- for row in pixels:
- # For each row we group it into pixels, then form a
- # characterisation vector that says whether each pixel
- # is opaque or not. Then we convert True/False to
- # 0/maxval (by multiplication), and add it as the extra
- # channel.
- row = group(row, planes)
- opa = map(it.__ne__, row)
- opa = map(maxval.__mul__, opa)
- opa = zip(opa) # convert to 1-tuples
- yield array(typecode,
- itertools.chain(*map(operator.add, row, opa)))
- pixels = itertrns(pixels)
- targetbitdepth = None
- if self.sbit:
- sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
- targetbitdepth = max(sbit)
- if targetbitdepth > meta['bitdepth']:
- raise Error('sBIT chunk %r exceeds bitdepth %d' %
- (sbit,self.bitdepth))
- if min(sbit) <= 0:
- raise Error('sBIT chunk %r has a 0-entry' % sbit)
- if targetbitdepth == meta['bitdepth']:
- targetbitdepth = None
- if targetbitdepth:
- shift = meta['bitdepth'] - targetbitdepth
- meta['bitdepth'] = targetbitdepth
- def itershift(pixels):
- for row in pixels:
- yield map(shift.__rrshift__, row)
- pixels = itershift(pixels)
- return x,y,pixels,meta
-
- def asFloat(self, maxval=1.0):
- """Return image pixels as per :meth:`asDirect` method, but scale
- all pixel values to be floating point values between 0.0 and
- *maxval*.
- """
-
- x,y,pixels,info = self.asDirect()
- sourcemaxval = 2**info['bitdepth']-1
- del info['bitdepth']
- info['maxval'] = float(maxval)
- factor = float(maxval)/float(sourcemaxval)
- def iterfloat():
- for row in pixels:
- yield map(factor.__mul__, row)
- return x,y,iterfloat(),info
-
- def _as_rescale(self, get, targetbitdepth):
- """Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
-
- width,height,pixels,meta = get()
- maxval = 2**meta['bitdepth'] - 1
- targetmaxval = 2**targetbitdepth - 1
- factor = float(targetmaxval) / float(maxval)
- meta['bitdepth'] = targetbitdepth
- def iterscale():
- for row in pixels:
- yield map(lambda x: int(round(x*factor)), row)
- return width, height, iterscale(), meta
-
- def asRGB8(self):
- """Return the image data as an RGB pixels with 8-bits per
- sample. This is like the :meth:`asRGB` method except that
- this method additionally rescales the values so that they
- are all between 0 and 255 (8-bit). In the case where the
- source image has a bit depth < 8 the transformation preserves
- all the information; where the source image has bit depth
- > 8, then rescaling to 8-bit values loses precision. No
- dithering is performed. Like :meth:`asRGB`, an alpha channel
- in the source image will raise an exception.
-
- This function returns a 4-tuple:
- (*width*, *height*, *pixels*, *metadata*).
- *width*, *height*, *metadata* are as per the :meth:`read` method.
-
- *pixels* is the pixel data in boxed row flat pixel format.
- """
-
- return self._as_rescale(self.asRGB, 8)
-
- def asRGBA8(self):
- """Return the image data as RGBA pixels with 8-bits per
- sample. This method is similar to :meth:`asRGB8` and
- :meth:`asRGBA`: The result pixels have an alpha channel, *and*
- values are rescaled to the range 0 to 255. The alpha channel is
- synthesized if necessary (with a small speed penalty).
- """
-
- return self._as_rescale(self.asRGBA, 8)
-
- def asRGB(self):
- """Return image as RGB pixels. RGB colour images are passed
- through unchanged; greyscales are expanded into RGB
- triplets (there is a small speed overhead for doing this).
-
- An alpha channel in the source image will raise an
- exception.
-
- The return values are as for the :meth:`read` method
- except that the *metadata* reflect the returned pixels, not the
- source image. In particular, for this method
- ``metadata['greyscale']`` will be ``False``.
- """
-
- width,height,pixels,meta = self.asDirect()
- if meta['alpha']:
- raise Error("will not convert image with alpha channel to RGB")
- if not meta['greyscale']:
- return width,height,pixels,meta
- meta['greyscale'] = False
- typecode = 'BH'[meta['bitdepth'] > 8]
- def iterrgb():
- for row in pixels:
- a = array(typecode, [0]) * 3 * width
- for i in range(3):
- a[i::3] = row
- yield a
- return width,height,iterrgb(),meta
-
- def asRGBA(self):
- """Return image as RGBA pixels. Greyscales are expanded into
- RGB triplets; an alpha channel is synthesized if necessary.
- The return values are as for the :meth:`read` method
- except that the *metadata* reflect the returned pixels, not the
- source image. In particular, for this method
- ``metadata['greyscale']`` will be ``False``, and
- ``metadata['alpha']`` will be ``True``.
- """
-
- width,height,pixels,meta = self.asDirect()
- if meta['alpha'] and not meta['greyscale']:
- return width,height,pixels,meta
- typecode = 'BH'[meta['bitdepth'] > 8]
- maxval = 2**meta['bitdepth'] - 1
- def newarray():
- return array(typecode, [0]) * 4 * width
- if meta['alpha'] and meta['greyscale']:
- # LA to RGBA
- def convert():
- for row in pixels:
- # Create a fresh target row, then copy L channel
- # into first three target channels, and A channel
- # into fourth channel.
- a = newarray()
- for i in range(3):
- a[i::4] = row[0::2]
- a[3::4] = row[1::2]
- yield a
- elif meta['greyscale']:
- # L to RGBA
- def convert():
- for row in pixels:
- a = newarray()
- for i in range(3):
- a[i::4] = row
- a[3::4] = array(typecode, [maxval]) * width
- yield a
- else:
- assert not meta['alpha'] and not meta['greyscale']
- # RGB to RGBA
- def convert():
- for row in pixels:
- a = newarray()
- for i in range(3):
- a[i::4] = row[i::3]
- a[3::4] = array(typecode, [maxval]) * width
- yield a
- meta['alpha'] = True
- meta['greyscale'] = False
- return width,height,convert(),meta
-
-
-# === Legacy Version Support ===
-
-# :pyver:old: PyPNG works on Python versions 2.3 and 2.2, but not
-# without some awkward problems. Really PyPNG works on Python 2.4 (and
-# above); it works on Pythons 2.3 and 2.2 by virtue of fixing up
-# problems here. It's a bit ugly (which is why it's hidden down here).
-#
-# Generally the strategy is one of pretending that we're running on
-# Python 2.4 (or above), and patching up the library support on earlier
-# versions so that it looks enough like Python 2.4. When it comes to
-# Python 2.2 there is one thing we cannot patch: extended slices
-# http://www.python.org/doc/2.3/whatsnew/section-slices.html.
-# Instead we simply declare that features that are implemented using
-# extended slices will not work on Python 2.2.
-#
-# In order to work on Python 2.3 we fix up a recurring annoyance involving
-# the array type. In Python 2.3 an array cannot be initialised with an
-# array, and it cannot be extended with a list (or other sequence).
-# Both of those are repeated issues in the code. Whilst I would not
-# normally tolerate this sort of behaviour, here we "shim" a replacement
-# for array into place (and hope no-ones notices). You never read this.
-#
-# In an amusing case of warty hacks on top of warty hacks... the array
-# shimming we try and do only works on Python 2.3 and above (you can't
-# subclass array.array in Python 2.2). So to get it working on Python
-# 2.2 we go for something much simpler and (probably) way slower.
-try:
- array('B').extend([])
- array('B', array('B'))
-except:
- # Expect to get here on Python 2.3
- try:
- class _array_shim(array):
- true_array = array
- def __new__(cls, typecode, init=None):
- super_new = super(_array_shim, cls).__new__
- it = super_new(cls, typecode)
- if init is None:
- return it
- it.extend(init)
- return it
- def extend(self, extension):
- super_extend = super(_array_shim, self).extend
- if isinstance(extension, self.true_array):
- return super_extend(extension)
- if not isinstance(extension, (list, str)):
- # Convert to list. Allows iterators to work.
- extension = list(extension)
- return super_extend(self.true_array(self.typecode, extension))
- array = _array_shim
- except:
- # Expect to get here on Python 2.2
- def array(typecode, init=()):
- if type(init) == str:
- return map(ord, init)
- return list(init)
-
-# Further hacks to get it limping along on Python 2.2
-try:
- enumerate
-except:
- def enumerate(seq):
- i=0
- for x in seq:
- yield i,x
- i += 1
-
-try:
- reversed
-except:
- def reversed(l):
- l = list(l)
- l.reverse()
- for x in l:
- yield x
-
-try:
- itertools
-except:
- class _dummy_itertools:
- pass
- itertools = _dummy_itertools()
- def _itertools_imap(f, seq):
- for x in seq:
- yield f(x)
- itertools.imap = _itertools_imap
- def _itertools_chain(*iterables):
- for it in iterables:
- for element in it:
- yield element
- itertools.chain = _itertools_chain
-
-
-
-# === Internal Test Support ===
-
-# This section comprises the tests that are internally validated (as
-# opposed to tests which produce output files that are externally
-# validated). Primarily they are unittests.
-
-# Note that it is difficult to internally validate the results of
-# writing a PNG file. The only thing we can do is read it back in
-# again, which merely checks consistency, not that the PNG file we
-# produce is valid.
-
-# Run the tests from the command line:
-# python -c 'import png;png.test()'
-
-# (For an in-memory binary file IO object) We use BytesIO where
-# available, otherwise we use StringIO, but name it BytesIO.
-try:
- from io import BytesIO
-except:
- from StringIO import StringIO as BytesIO
-import tempfile
-# http://www.python.org/doc/2.4.4/lib/module-unittest.html
-import unittest
-
-
-def test():
- unittest.main(__name__)
-
-def topngbytes(name, rows, x, y, **k):
- """Convenience function for creating a PNG file "in memory" as a
- string. Creates a :class:`Writer` instance using the keyword arguments,
- then passes `rows` to its :meth:`Writer.write` method. The resulting
- PNG file is returned as a string. `name` is used to identify the file for
- debugging.
- """
-
- import os
-
- print name
- f = BytesIO()
- w = Writer(x, y, **k)
- w.write(f, rows)
- if os.environ.get('PYPNG_TEST_TMP'):
- w = open(name, 'wb')
- w.write(f.getvalue())
- w.close()
- return f.getvalue()
-
-def testWithIO(inp, out, f):
- """Calls the function `f` with ``sys.stdin`` changed to `inp`
- and ``sys.stdout`` changed to `out`. They are restored when `f`
- returns. This function returns whatever `f` returns.
- """
-
- import os
-
- try:
- oldin,sys.stdin = sys.stdin,inp
- oldout,sys.stdout = sys.stdout,out
- x = f()
- finally:
- sys.stdin = oldin
- sys.stdout = oldout
- if os.environ.get('PYPNG_TEST_TMP') and hasattr(out,'getvalue'):
- name = mycallersname()
- if name:
- w = open(name+'.png', 'wb')
- w.write(out.getvalue())
- w.close()
- return x
-
-def mycallersname():
- """Returns the name of the caller of the caller of this function
- (hence the name of the caller of the function in which
- "mycallersname()" textually appears). Returns None if this cannot
- be determined."""
-
- # http://docs.python.org/library/inspect.html#the-interpreter-stack
- import inspect
-
- frame = inspect.currentframe()
- if not frame:
- return None
- frame_,filename_,lineno_,funname,linelist_,listi_ = (
- inspect.getouterframes(frame)[2])
- return funname
-
-def seqtobytes(s):
- """Convert a sequence of integers to a *bytes* instance. Good for
- plastering over Python 2 / Python 3 cracks.
- """
-
- return strtobytes(''.join(chr(x) for x in s))
-
-class Test(unittest.TestCase):
- # This member is used by the superclass. If we don't define a new
- # class here then when we use self.assertRaises() and the PyPNG code
- # raises an assertion then we get no proper traceback. I can't work
- # out why, but defining a new class here means we get a proper
- # traceback.
- class failureException(Exception):
- pass
-
- def helperLN(self, n):
- mask = (1 << n) - 1
- # Use small chunk_limit so that multiple chunk writing is
- # tested. Making it a test for Issue 20.
- w = Writer(15, 17, greyscale=True, bitdepth=n, chunk_limit=99)
- f = BytesIO()
- w.write_array(f, array('B', map(mask.__and__, range(1, 256))))
- r = Reader(bytes=f.getvalue())
- x,y,pixels,meta = r.read()
- self.assertEqual(x, 15)
- self.assertEqual(y, 17)
- self.assertEqual(list(itertools.chain(*pixels)),
- map(mask.__and__, range(1,256)))
- def testL8(self):
- return self.helperLN(8)
- def testL4(self):
- return self.helperLN(4)
- def testL2(self):
- "Also tests asRGB8."
- w = Writer(1, 4, greyscale=True, bitdepth=2)
- f = BytesIO()
- w.write_array(f, array('B', range(4)))
- r = Reader(bytes=f.getvalue())
- x,y,pixels,meta = r.asRGB8()
- self.assertEqual(x, 1)
- self.assertEqual(y, 4)
- for i,row in enumerate(pixels):
- self.assertEqual(len(row), 3)
- self.assertEqual(list(row), [0x55*i]*3)
- def testP2(self):
- "2-bit palette."
- a = (255,255,255)
- b = (200,120,120)
- c = (50,99,50)
- w = Writer(1, 4, bitdepth=2, palette=[a,b,c])
- f = BytesIO()
- w.write_array(f, array('B', (0,1,1,2)))
- r = Reader(bytes=f.getvalue())
- x,y,pixels,meta = r.asRGB8()
- self.assertEqual(x, 1)
- self.assertEqual(y, 4)
- self.assertEqual(list(pixels), map(list, [a, b, b, c]))
- def testPtrns(self):
- "Test colour type 3 and tRNS chunk (and 4-bit palette)."
- a = (50,99,50,50)
- b = (200,120,120,80)
- c = (255,255,255)
- d = (200,120,120)
- e = (50,99,50)
- w = Writer(3, 3, bitdepth=4, palette=[a,b,c,d,e])
- f = BytesIO()
- w.write_array(f, array('B', (4, 3, 2, 3, 2, 0, 2, 0, 1)))
- r = Reader(bytes=f.getvalue())
- x,y,pixels,meta = r.asRGBA8()
- self.assertEqual(x, 3)
- self.assertEqual(y, 3)
- c = c+(255,)
- d = d+(255,)
- e = e+(255,)
- boxed = [(e,d,c),(d,c,a),(c,a,b)]
- flat = map(lambda row: itertools.chain(*row), boxed)
- self.assertEqual(map(list, pixels), map(list, flat))
- def testRGBtoRGBA(self):
- "asRGBA8() on colour type 2 source."""
- # Test for Issue 26
- r = Reader(bytes=_pngsuite['basn2c08'])
- x,y,pixels,meta = r.asRGBA8()
- # Test the pixels at row 9 columns 0 and 1.
- row9 = list(pixels)[9]
- self.assertEqual(row9[0:8],
- [0xff, 0xdf, 0xff, 0xff, 0xff, 0xde, 0xff, 0xff])
- def testLtoRGBA(self):
- "asRGBA() on grey source."""
- # Test for Issue 60
- r = Reader(bytes=_pngsuite['basi0g08'])
- x,y,pixels,meta = r.asRGBA()
- row9 = list(list(pixels)[9])
- self.assertEqual(row9[0:8],
- [222, 222, 222, 255, 221, 221, 221, 255])
- def testCtrns(self):
- "Test colour type 2 and tRNS chunk."
- # Test for Issue 25
- r = Reader(bytes=_pngsuite['tbrn2c08'])
- x,y,pixels,meta = r.asRGBA8()
- # I just happen to know that the first pixel is transparent.
- # In particular it should be #7f7f7f00
- row0 = list(pixels)[0]
- self.assertEqual(tuple(row0[0:4]), (0x7f, 0x7f, 0x7f, 0x00))
- def testAdam7read(self):
- """Adam7 interlace reading.
- Specifically, test that for images in the PngSuite that
- have both an interlaced and straightlaced pair that both
- images from the pair produce the same array of pixels."""
- for candidate in _pngsuite:
- if not candidate.startswith('basn'):
- continue
- candi = candidate.replace('n', 'i')
- if candi not in _pngsuite:
- continue
- print 'adam7 read', candidate
- straight = Reader(bytes=_pngsuite[candidate])
- adam7 = Reader(bytes=_pngsuite[candi])
- # Just compare the pixels. Ignore x,y (because they're
- # likely to be correct?); metadata is ignored because the
- # "interlace" member differs. Lame.
- straight = straight.read()[2]
- adam7 = adam7.read()[2]
- self.assertEqual(map(list, straight), map(list, adam7))
- def testAdam7write(self):
- """Adam7 interlace writing.
- For each test image in the PngSuite, write an interlaced
- and a straightlaced version. Decode both, and compare results.
- """
- # Not such a great test, because the only way we can check what
- # we have written is to read it back again.
-
- for name,bytes in _pngsuite.items():
- # Only certain colour types supported for this test.
- if name[3:5] not in ['n0', 'n2', 'n4', 'n6']:
- continue
- it = Reader(bytes=bytes)
- x,y,pixels,meta = it.read()
- pngi = topngbytes('adam7wn'+name+'.png', pixels,
- x=x, y=y, bitdepth=it.bitdepth,
- greyscale=it.greyscale, alpha=it.alpha,
- transparent=it.transparent,
- interlace=False)
- x,y,ps,meta = Reader(bytes=pngi).read()
- it = Reader(bytes=bytes)
- x,y,pixels,meta = it.read()
- pngs = topngbytes('adam7wi'+name+'.png', pixels,
- x=x, y=y, bitdepth=it.bitdepth,
- greyscale=it.greyscale, alpha=it.alpha,
- transparent=it.transparent,
- interlace=True)
- x,y,pi,meta = Reader(bytes=pngs).read()
- self.assertEqual(map(list, ps), map(list, pi))
- def testPGMin(self):
- """Test that the command line tool can read PGM files."""
- def do():
- return _main(['testPGMin'])
- s = BytesIO()
- s.write(strtobytes('P5 2 2 3\n'))
- s.write(strtobytes('\x00\x01\x02\x03'))
- s.flush()
- s.seek(0)
- o = BytesIO()
- testWithIO(s, o, do)
- r = Reader(bytes=o.getvalue())
- x,y,pixels,meta = r.read()
- self.assertTrue(r.greyscale)
- self.assertEqual(r.bitdepth, 2)
- def testPAMin(self):
- """Test that the command line tool can read PAM file."""
- def do():
- return _main(['testPAMin'])
- s = BytesIO()
- s.write(strtobytes('P7\nWIDTH 3\nHEIGHT 1\nDEPTH 4\nMAXVAL 255\n'
- 'TUPLTYPE RGB_ALPHA\nENDHDR\n'))
- # The pixels in flat row flat pixel format
- flat = [255,0,0,255, 0,255,0,120, 0,0,255,30]
- asbytes = seqtobytes(flat)
- s.write(asbytes)
- s.flush()
- s.seek(0)
- o = BytesIO()
- testWithIO(s, o, do)
- r = Reader(bytes=o.getvalue())
- x,y,pixels,meta = r.read()
- self.assertTrue(r.alpha)
- self.assertTrue(not r.greyscale)
- self.assertEqual(list(itertools.chain(*pixels)), flat)
- def testLA4(self):
- """Create an LA image with bitdepth 4."""
- bytes = topngbytes('la4.png', [[5, 12]], 1, 1,
- greyscale=True, alpha=True, bitdepth=4)
- sbit = Reader(bytes=bytes).chunk('sBIT')[1]
- self.assertEqual(sbit, strtobytes('\x04\x04'))
- def testPNMsbit(self):
- """Test that PNM files can generates sBIT chunk."""
- def do():
- return _main(['testPNMsbit'])
- s = BytesIO()
- s.write(strtobytes('P6 8 1 1\n'))
- for pixel in range(8):
- s.write(struct.pack('<I', (0x4081*pixel)&0x10101)[:3])
- s.flush()
- s.seek(0)
- o = BytesIO()
- testWithIO(s, o, do)
- r = Reader(bytes=o.getvalue())
- sbit = r.chunk('sBIT')[1]
- self.assertEqual(sbit, strtobytes('\x01\x01\x01'))
- def testLtrns0(self):
- """Create greyscale image with tRNS chunk."""
- return self.helperLtrns(0)
- def testLtrns1(self):
- """Using 1-tuple for transparent arg."""
- return self.helperLtrns((0,))
- def helperLtrns(self, transparent):
- """Helper used by :meth:`testLtrns*`."""
- pixels = zip([0x00, 0x38, 0x4c, 0x54, 0x5c, 0x40, 0x38, 0x00])
- o = BytesIO()
- w = Writer(8, 8, greyscale=True, bitdepth=1, transparent=transparent)
- w.write_packed(o, pixels)
- r = Reader(bytes=o.getvalue())
- x,y,pixels,meta = r.asDirect()
- self.assertTrue(meta['alpha'])
- self.assertTrue(meta['greyscale'])
- self.assertEqual(meta['bitdepth'], 1)
- def testWinfo(self):
- """Test the dictionary returned by a `read` method can be used
- as args for :meth:`Writer`.
- """
- r = Reader(bytes=_pngsuite['basn2c16'])
- info = r.read()[3]
- w = Writer(**info)
- def testPackedIter(self):
- """Test iterator for row when using write_packed.
-
- Indicative for Issue 47.
- """
- w = Writer(16, 2, greyscale=True, alpha=False, bitdepth=1)
- o = BytesIO()
- w.write_packed(o, [itertools.chain([0x0a], [0xaa]),
- itertools.chain([0x0f], [0xff])])
- r = Reader(bytes=o.getvalue())
- x,y,pixels,info = r.asDirect()
- pixels = list(pixels)
- self.assertEqual(len(pixels), 2)
- self.assertEqual(len(pixels[0]), 16)
- def testInterlacedArray(self):
- """Test that reading an interlaced PNG yields each row as an
- array."""
- r = Reader(bytes=_pngsuite['basi0g08'])
- list(r.read()[2])[0].tostring
- def testTrnsArray(self):
- """Test that reading a type 2 PNG with tRNS chunk yields each
- row as an array (using asDirect)."""
- r = Reader(bytes=_pngsuite['tbrn2c08'])
- list(r.asDirect()[2])[0].tostring
-
- # Invalid file format tests. These construct various badly
- # formatted PNG files, then feed them into a Reader. When
- # everything is working properly, we should get FormatError
- # exceptions raised.
- def testEmpty(self):
- """Test empty file."""
-
- r = Reader(bytes='')
- self.assertRaises(FormatError, r.asDirect)
- def testSigOnly(self):
- """Test file containing just signature bytes."""
-
- r = Reader(bytes=_signature)
- self.assertRaises(FormatError, r.asDirect)
- def testExtraPixels(self):
- """Test file that contains too many pixels."""
-
- def eachchunk(chunk):
- if chunk[0] != 'IDAT':
- return chunk
- data = zlib.decompress(chunk[1])
- data += strtobytes('\x00garbage')
- data = zlib.compress(data)
- chunk = (chunk[0], data)
- return chunk
- self.assertRaises(FormatError, self.helperFormat, eachchunk)
- def testNotEnoughPixels(self):
- def eachchunk(chunk):
- if chunk[0] != 'IDAT':
- return chunk
- # Remove last byte.
- data = zlib.decompress(chunk[1])
- data = data[:-1]
- data = zlib.compress(data)
- return (chunk[0], data)
- self.assertRaises(FormatError, self.helperFormat, eachchunk)
- def helperFormat(self, f):
- r = Reader(bytes=_pngsuite['basn0g01'])
- o = BytesIO()
- def newchunks():
- for chunk in r.chunks():
- yield f(chunk)
- write_chunks(o, newchunks())
- r = Reader(bytes=o.getvalue())
- return list(r.asDirect()[2])
- def testBadFilter(self):
- def eachchunk(chunk):
- if chunk[0] != 'IDAT':
- return chunk
- data = zlib.decompress(chunk[1])
- # Corrupt the first filter byte
- data = strtobytes('\x99') + data[1:]
- data = zlib.compress(data)
- return (chunk[0], data)
- self.assertRaises(FormatError, self.helperFormat, eachchunk)
- def testFlat(self):
- """Test read_flat."""
- import hashlib
-
- r = Reader(bytes=_pngsuite['basn0g02'])
- x,y,pixel,meta = r.read_flat()
- d = hashlib.md5(seqtobytes(pixel)).digest()
- self.assertEqual(_enhex(d), '255cd971ab8cd9e7275ff906e5041aa0')
- def testfromarray(self):
- img = from_array([[0, 0x33, 0x66], [0xff, 0xcc, 0x99]], 'L')
- img.save('testfromarray.png')
- def testfromarrayL16(self):
- img = from_array(group(range(2**16), 256), 'L;16')
- img.save('testL16.png')
- def testfromarrayRGB(self):
- img = from_array([[0,0,0, 0,0,1, 0,1,0, 0,1,1],
- [1,0,0, 1,0,1, 1,1,0, 1,1,1]], 'RGB;1')
- o = BytesIO()
- img.save(o)
- def testfromarrayIter(self):
- import itertools
-
- i = itertools.islice(itertools.count(10), 20)
- i = itertools.imap(lambda x: [x, x, x], i)
- img = from_array(i, 'RGB;5', dict(height=20))
- f = open('testiter.png', 'wb')
- img.save(f)
- f.close()
-
- # numpy dependent tests. These are skipped (with a message to
- # sys.stderr) if numpy cannot be imported.
- def testNumpyuint16(self):
- """numpy uint16."""
-
- try:
- import numpy
- except ImportError:
- print >>sys.stderr, "skipping numpy test"
- return
-
- rows = [map(numpy.uint16, range(0,0x10000,0x5555))]
- b = topngbytes('numpyuint16.png', rows, 4, 1,
- greyscale=True, alpha=False, bitdepth=16)
- def testNumpyuint8(self):
- """numpy uint8."""
-
- try:
- import numpy
- except ImportError:
- print >>sys.stderr, "skipping numpy test"
- return
-
- rows = [map(numpy.uint8, range(0,0x100,0x55))]
- b = topngbytes('numpyuint8.png', rows, 4, 1,
- greyscale=True, alpha=False, bitdepth=8)
- def testNumpybool(self):
- """numpy bool."""
-
- try:
- import numpy
- except ImportError:
- print >>sys.stderr, "skipping numpy test"
- return
-
- rows = [map(numpy.bool, [0,1])]
- b = topngbytes('numpybool.png', rows, 2, 1,
- greyscale=True, alpha=False, bitdepth=1)
- def testNumpyarray(self):
- """numpy array."""
- try:
- import numpy
- except ImportError:
- print >>sys.stderr, "skipping numpy test"
- return
-
- pixels = numpy.array([[0,0x5555],[0x5555,0xaaaa]], numpy.uint16)
- img = from_array(pixels, 'L')
- img.save('testnumpyL16.png')
-
-# === Command Line Support ===
-
-def _dehex(s):
- """Liberally convert from hex string to binary string."""
- import re
- import binascii
-
- # Remove all non-hexadecimal digits
- s = re.sub(r'[^a-fA-F\d]', '', s)
- # binscii.unhexlify works in Python 2 and Python 3 (unlike
- # thing.decode('hex')).
- return binascii.unhexlify(strtobytes(s))
-def _enhex(s):
- """Convert from binary string (bytes) to hex string (str)."""
-
- import binascii
-
- return bytestostr(binascii.hexlify(s))
-
-# Copies of PngSuite test files taken
-# from http://www.schaik.com/pngsuite/pngsuite_bas_png.html
-# on 2009-02-19 by drj and converted to hex.
-# Some of these are not actually in PngSuite (but maybe they should
-# be?), they use the same naming scheme, but start with a capital
-# letter.
-_pngsuite = {
- 'basi0g01': _dehex("""
-89504e470d0a1a0a0000000d49484452000000200000002001000000012c0677
-cf0000000467414d41000186a031e8965f0000009049444154789c2d8d310ec2
-300c45dfc682c415187a00a42e197ab81e83b127e00c5639001363a580d8582c
-65c910357c4b78b0bfbfdf4f70168c19e7acb970a3f2d1ded9695ce5bf5963df
-d92aaf4c9fd927ea449e6487df5b9c36e799b91bdf082b4d4bd4014fe4014b01
-ab7a17aee694d28d328a2d63837a70451e1648702d9a9ff4a11d2f7a51aa21e5
-a18c7ffd0094e3511d661822f20000000049454e44ae426082
-"""),
- 'basi0g02': _dehex("""
-89504e470d0a1a0a0000000d49484452000000200000002002000000016ba60d
-1f0000000467414d41000186a031e8965f0000005149444154789c635062e860
-00e17286bb609c93c370ec189494960631366e4467b3ae675dcf10f521ea0303
-90c1ca006444e11643482064114a4852c710baea3f18c31918020c30410403a6
-0ac1a09239009c52804d85b6d97d0000000049454e44ae426082
-"""),
- 'basi0g04': _dehex("""
-89504e470d0a1a0a0000000d4948445200000020000000200400000001e4e6f8
-bf0000000467414d41000186a031e8965f000000ae49444154789c658e5111c2
-301044171c141c141c041c843a287510ea20d441c041c141c141c04191102454
-03994998cecd7edcecedbb9bdbc3b2c2b6457545fbc4bac1be437347f7c66a77
-3c23d60db15e88f5c5627338a5416c2e691a9b475a89cd27eda12895ae8dfdab
-43d61e590764f5c83a226b40d669bec307f93247701687723abf31ff83a2284b
-a5b4ae6b63ac6520ad730ca4ed7b06d20e030369bd6720ed383290360406d24e
-13811f2781eba9d34d07160000000049454e44ae426082
-"""),
- 'basi0g08': _dehex("""
-89504e470d0a1a0a0000000d4948445200000020000000200800000001211615
-be0000000467414d41000186a031e8965f000000b549444154789cb5905d0ac2
-3010849dbac81c42c47bf843cf253e8878b0aa17110f214bdca6be240f5d21a5
-94ced3e49bcd322c1624115515154998aa424822a82a5624a1aa8a8b24c58f99
-999908130989a04a00d76c2c09e76cf21adcb209393a6553577da17140a2c59e
-70ecbfa388dff1f03b82fb82bd07f05f7cb13f80bb07ad2fd60c011c3c588eef
-f1f4e03bbec7ce832dca927aea005e431b625796345307b019c845e6bfc3bb98
-769d84f9efb02ea6c00f9bb9ff45e81f9f280000000049454e44ae426082
-"""),
- 'basi0g16': _dehex("""
-89504e470d0a1a0a0000000d49484452000000200000002010000000017186c9
-fd0000000467414d41000186a031e8965f000000e249444154789cb5913b0ec2
-301044c7490aa8f85d81c3e4301c8f53a4ca0da8902c8144b3920b4043111282
-23bc4956681a6bf5fc3c5a3ba0448912d91a4de2c38dd8e380231eede4c4f7a1
-4677700bec7bd9b1d344689315a3418d1a6efbe5b8305ba01f8ff4808c063e26
-c60d5c81edcf6c58c535e252839e93801b15c0a70d810ae0d306b205dc32b187
-272b64057e4720ff0502154034831520154034c3df81400510cdf0015c86e5cc
-5c79c639fddba9dcb5456b51d7980eb52d8e7d7fa620a75120d6064641a05120
-b606771a05626b401a05f1f589827cf0fe44c1f0bae0055698ee8914fffffe00
-00000049454e44ae426082
-"""),
- 'basi2c08': _dehex("""
-89504e470d0a1a0a0000000d49484452000000200000002008020000018b1fdd
-350000000467414d41000186a031e8965f000000f249444154789cd59341aa04
-210c44abc07b78133d59d37333bd89d76868b566d10cf4675af8596431a11662
-7c5688919280e312257dd6a0a4cf1a01008ee312a5f3c69c37e6fcc3f47e6776
-a07f8bdaf5b40feed2d33e025e2ff4fe2d4a63e1a16d91180b736d8bc45854c5
-6d951863f4a7e0b66dcf09a900f3ffa2948d4091e53ca86c048a64390f662b50
-4a999660ced906182b9a01a8be00a56404a6ede182b1223b4025e32c4de34304
-63457680c93aada6c99b73865aab2fc094920d901a203f5ddfe1970d28456783
-26cffbafeffcd30654f46d119be4793f827387fc0d189d5bc4d69a3c23d45a7f
-db803146578337df4d0a3121fc3d330000000049454e44ae426082
-"""),
- 'basi2c16': _dehex("""
-89504e470d0a1a0a0000000d4948445200000020000000201002000001db8f01
-760000000467414d41000186a031e8965f0000020a49444154789cd5962173e3
-3010853fcf1838cc61a1818185a53e56787fa13fa130852e3b5878b4b0b03081
-b97f7030070b53e6b057a0a8912bbb9163b9f109ececbc59bd7dcf2b45492409
-d66f00eb1dd83cb5497d65456aeb8e1040913b3b2c04504c936dd5a9c7e2c6eb
-b1b8f17a58e8d043da56f06f0f9f62e5217b6ba3a1b76f6c9e99e8696a2a72e2
-c4fb1e4d452e92ec9652b807486d12b6669be00db38d9114b0c1961e375461a5
-5f76682a85c367ad6f682ff53a9c2a353191764b78bb07d8ddc3c97c1950f391
-6745c7b9852c73c2f212605a466a502705c8338069c8b9e84efab941eb393a97
-d4c9fd63148314209f1c1d3434e847ead6380de291d6f26a25c1ebb5047f5f24
-d85c49f0f22cc1d34282c72709cab90477bf25b89d49f0f351822297e0ea9704
-f34c82bc94002448ede51866e5656aef5d7c6a385cb4d80e6a538ceba04e6df2
-480e9aa84ddedb413bb5c97b3838456df2d4fec2c7a706983e7474d085fae820
-a841776a83073838973ac0413fea2f1dc4a06e71108fda73109bdae48954ad60
-bf867aac3ce44c7c1589a711cf8a81df9b219679d96d1cec3d8bbbeaa2012626
-df8c7802eda201b2d2e0239b409868171fc104ba8b76f10b4da09f6817ffc609
-c413ede267fd1fbab46880c90f80eccf0013185eb48b47ba03df2bdaadef3181
-cb8976f18e13188768170f98c0f844bb78cb04c62ddac59d09fc3fa25dfc1da4
-14deb3df1344f70000000049454e44ae426082
-"""),
- 'basi3p08': _dehex("""
-89504e470d0a1a0a0000000d494844520000002000000020080300000133a3ba
-500000000467414d41000186a031e8965f00000300504c5445224400f5ffed77
-ff77cbffff110a003a77002222ffff11ff110000222200ffac5566ff66ff6666
-ff01ff221200dcffffccff994444ff005555220000cbcbff44440055ff55cbcb
-00331a00ffecdcedffffe4ffcbffdcdc44ff446666ff330000442200ededff66
-6600ffa444ffffaaeded0000cbcbfefffffdfffeffff0133ff33552a000101ff
-8888ff00aaaa010100440000888800ffe4cbba5b0022ff22663200ffff99aaaa
-ff550000aaaa00cb630011ff11d4ffaa773a00ff4444dc6b0066000001ff0188
-4200ecffdc6bdc00ffdcba00333300ed00ed7300ffff88994a0011ffff770000
-ff8301ffbabafe7b00fffeff00cb00ff999922ffff880000ffff77008888ffdc
-ff1a33000000aa33ffff009900990000000001326600ffbaff44ffffffaaff00
-770000fefeaa00004a9900ffff66ff22220000998bff1155ffffff0101ff88ff
-005500001111fffffefffdfea4ff4466ffffff66ff003300ffff55ff77770000
-88ff44ff00110077ffff006666ffffed000100fff5ed1111ffffff44ff22ffff
-eded11110088ffff00007793ff2200dcdc3333fffe00febabaff99ffff333300
-63cb00baba00acff55ffffdcffff337bfe00ed00ed5555ffaaffffdcdcff5555
-00000066dcdc00dc00dc83ff017777fffefeffffffcbff5555777700fefe00cb
-00cb0000fe010200010000122200ffff220044449bff33ffd4aa0000559999ff
-999900ba00ba2a5500ffcbcbb4ff66ff9b33ffffbaaa00aa42880053aa00ffaa
-aa0000ed00babaffff1100fe00000044009999990099ffcc99ba000088008800
-dc00ff93220000dcfefffeaa5300770077020100cb0000000033ffedff00ba00
-ff3333edffedffc488bcff7700aa00660066002222dc0000ffcbffdcffdcff8b
-110000cb00010155005500880000002201ffffcbffcbed0000ff88884400445b
-ba00ffbc77ff99ff006600baffba00777773ed00fe00003300330000baff77ff
-004400aaffaafffefe000011220022c4ff8800eded99ff99ff55ff002200ffb4
-661100110a1100ff1111dcffbabaffff88ff88010001ff33ffb98ed362000002
-a249444154789c65d0695c0b001806f03711a9904a94d24dac63292949e5a810
-d244588a14ca5161d1a1323973252242d62157d12ae498c8124d25ca3a11398a
-16e55a3cdffab0ffe7f77d7fcff3528645349b584c3187824d9d19d4ec2e3523
-9eb0ae975cf8de02f2486d502191841b42967a1ad49e5ddc4265f69a899e26b5
-e9e468181baae3a71a41b95669da8df2ea3594c1b31046d7b17bfb86592e4cbe
-d89b23e8db0af6304d756e60a8f4ad378bdc2552ae5948df1d35b52143141533
-33bbbbababebeb3b3bc9c9c9c6c6c0c0d7b7b535323225a5aa8a02024a4bedec
-0a0a2a2bcdcd7d7cf2f3a9a9c9cdcdd8b8adcdd5b5ababa828298982824a4ab2
-b21212acadbdbc1414e2e24859b9a72730302f4f49292c4c57373c9c0a0b7372
-8c8c1c1c3a3a92936d6dfdfd293e3e26262a4a4eaea2424b4b5fbfbc9c323278
-3c0b0ba1303abaae8ecdeeed950d6669a9a7a7a141d4de9e9d5d5cdcd2229b94
-c572716132f97cb1d8db9bc3110864a39795d9db6b6a26267a7a9a98d4d6a6a7
-cb76090ef6f030354d4d75766e686030545464cb393a1a1ac6c68686eae8f8f9
-a9aa4644c8b66d6e1689dcdd2512a994cb35330b0991ad9f9b6b659596a6addd
-d8282fafae5e5323fb8f41d01f76c22fd8061be01bfc041a0323e1002c81cd30
-0b9ec027a0c930014ec035580fc3e112bc069a0b53e11c0c8095f00176c163a0
-e5301baec06a580677600ddc05ba0f13e120bc81a770133ec355a017300d4ec2
-0c7800bbe1219c02fa08f3e13c1c85dbb00a2ec05ea0dff00a6ec15a98027360
-070c047a06d7e1085c84f1b014f6c03fa0b33018b6c0211801ebe018fc00da0a
-6f61113c877eb01d4ec317a085700f26c130f80efbe132bc039a0733e106fc81
-f7f017f6c10aa0d1300a0ec374780943e1382c06fa0a9b60238c83473016cec0
-02f80f73fefe1072afc1e50000000049454e44ae426082
-"""),
- 'basi6a08': _dehex("""
-89504e470d0a1a0a0000000d4948445200000020000000200806000001047d4a
-620000000467414d41000186a031e8965f0000012049444154789cc595414ec3
-3010459fa541b8bbb26641b8069b861e8b4d12c1c112c1452a710a2a65d840d5
-949041fc481ec98ae27c7f3f8d27e3e4648047600fec0d1f390fbbe2633a31e2
-9389e4e4ea7bfdbf3d9a6b800ab89f1bd6b553cfcbb0679e960563d72e0a9293
-b7337b9f988cc67f5f0e186d20e808042f1c97054e1309da40d02d7e27f92e03
-6cbfc64df0fc3117a6210a1b6ad1a00df21c1abcf2a01944c7101b0cb568a001
-909c9cf9e399cf3d8d9d4660a875405d9a60d000b05e2de55e25780b7a5268e0
-622118e2399aab063a815808462f1ab86890fc2e03e48bb109ded7d26ce4bf59
-0db91bac0050747fec5015ce80da0e5700281be533f0ce6d5900b59bcb00ea6d
-200314cf801faab200ea752803a8d7a90c503a039f824a53f4694e7342000000
-0049454e44ae426082
-"""),
- 'basn0g01': _dehex("""
-89504e470d0a1a0a0000000d49484452000000200000002001000000005b0147
-590000000467414d41000186a031e8965f0000005b49444154789c2dccb10903
-300c05d1ebd204b24a200b7a346f90153c82c18d0a61450751f1e08a2faaead2
-a4846ccea9255306e753345712e211b221bf4b263d1b427325255e8bdab29e6f
-6aca30692e9d29616ee96f3065f0bf1f1087492fd02f14c90000000049454e44
-ae426082
-"""),
- 'basn0g02': _dehex("""
-89504e470d0a1a0a0000000d49484452000000200000002002000000001ca13d
-890000000467414d41000186a031e8965f0000001f49444154789c6360085df5
-1f8cf1308850c20053868f0133091f6390b90700bd497f818b0989a900000000
-49454e44ae426082
-"""),
- # A version of basn0g04 dithered down to 3 bits.
- 'Basn0g03': _dehex("""
-89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
-2900000001734249540371d88211000000fd49444154789c6d90d18906210c84
-c356f22356b2889588604301b112112b11d94a96bb495cf7fe87f32d996f2689
-44741cc658e39c0b118f883e1f63cc89dafbc04c0f619d7d898396c54b875517
-83f3a2e7ac09a2074430e7f497f00f1138a5444f82839c5206b1f51053cca968
-63258821e7f2b5438aac16fbecc052b646e709de45cf18996b29648508728612
-952ca606a73566d44612b876845e9a347084ea4868d2907ff06be4436c4b41a3
-a3e1774285614c5affb40dbd931a526619d9fa18e4c2be420858de1df0e69893
-a0e3e5523461be448561001042b7d4a15309ce2c57aef2ba89d1c13794a109d7
-b5880aa27744fc5c4aecb5e7bcef5fe528ec6293a930690000000049454e44ae
-426082
-"""),
- 'basn0g04': _dehex("""
-89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
-290000000467414d41000186a031e8965f0000004849444154789c6360601014
-545232367671090d4d4b2b2f6720430095dbd1418e002a77e64c720450b9ab56
-912380caddbd9b1c0154ee9933e408a072efde25470095fbee1d1902001f14ee
-01eaff41fa0000000049454e44ae426082
-"""),
- 'basn0g08': _dehex("""
-89504e470d0a1a0a0000000d4948445200000020000000200800000000561125
-280000000467414d41000186a031e8965f0000004149444154789c6364602400
-1408c8b30c05058c0f0829f8f71f3f6079301c1430ca11906764a2795c0c0605
-8c8ff0cafeffcff887e67131181430cae0956564040050e5fe7135e2d8590000
-000049454e44ae426082
-"""),
- 'basn0g16': _dehex("""
-89504e470d0a1a0a0000000d49484452000000200000002010000000000681f9
-6b0000000467414d41000186a031e8965f0000005e49444154789cd5d2310ac0
-300c4351395bef7fc6dca093c0287b32d52a04a3d98f3f3880a7b857131363a0
-3a82601d089900dd82f640ca04e816dc06422640b7a03d903201ba05b7819009
-d02d680fa44c603f6f07ec4ff41938cf7f0016d84bd85fae2b9fd70000000049
-454e44ae426082
-"""),
- 'basn2c08': _dehex("""
-89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
-a30000000467414d41000186a031e8965f0000004849444154789cedd5c10900
-300c024085ec91fdb772133b442bf4a1f8cee12bb40d043b800a14f81ca0ede4
-7d4c784081020f4a871fc284071428f0a0743823a94081bb7077a3c00182b1f9
-5e0f40cf4b0000000049454e44ae426082
-"""),
- 'basn2c16': _dehex("""
-89504e470d0a1a0a0000000d4948445200000020000000201002000000ac8831
-e00000000467414d41000186a031e8965f000000e549444154789cd596c10a83
-301044a7e0417fcb7eb7fdadf6961e06039286266693cc7a188645e43dd6a08f
-1042003e2fe09aef6472737e183d27335fcee2f35a77b702ebce742870a23397
-f3edf2705dd10160f3b2815fe8ecf2027974a6b0c03f74a6e4192843e75c6c03
-35e8ec3202f5e84c0181bbe8cca967a00d9df3491bb040671f2e6087ce1c2860
-8d1e05f8c7ee0f1d00b667e70df44467ef26d01fbd9bc028f42860f71d188bce
-fb8d3630039dbd59601e7ab3c06cf428507f0634d039afdc80123a7bb1801e7a
-b1802a7a14c89f016d74ce331bf080ce9e08f8414f04bca133bfe642fe5e07bb
-c4ec0000000049454e44ae426082
-"""),
- 'basn6a08': _dehex("""
-89504e470d0a1a0a0000000d4948445200000020000000200806000000737a7a
-f40000000467414d41000186a031e8965f0000006f49444154789cedd6310a80
-300c46e12764684fa1f73f55048f21c4ddc545781d52e85028fc1f4d28d98a01
-305e7b7e9cffba33831d75054703ca06a8f90d58a0074e351e227d805c8254e3
-1bb0420f5cdc2e0079208892ffe2a00136a07b4007943c1004d900195036407f
-011bf00052201a9c160fb84c0000000049454e44ae426082
-"""),
- 'cs3n3p08': _dehex("""
-89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
-c60000000467414d41000186a031e8965f0000000373424954030303a392a042
-00000054504c544592ff0000ff9200ffff00ff0000dbff00ff6dffb600006dff
-b6ff00ff9200dbff000049ffff2400ff000024ff0049ff0000ffdb00ff4900ff
-b6ffff0000ff2400b6ffffdb000092ffff6d000024ffff49006dff00df702b17
-0000004b49444154789c85cac70182000000b1b3625754b0edbfa72324ef7486
-184ed0177a437b680bcdd0031c0ed00ea21f74852ed00a1c9ed0086da0057487
-6ed0121cd6d004bda0013a421ff803224033e177f4ae260000000049454e44ae
-426082
-"""),
- 's09n3p02': _dehex("""
-89504e470d0a1a0a0000000d49484452000000090000000902030000009dffee
-830000000467414d41000186a031e8965f000000037342495404040477f8b5a3
-0000000c504c544500ff000077ffff00ffff7700ff5600640000001f49444154
-789c63600002fbff0c0c56ab19182ca381581a4283f82071200000696505c36a
-437f230000000049454e44ae426082
-"""),
- 'tbgn3p08': _dehex("""
-89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
-c60000000467414d41000186a031e8965f00000207504c54457f7f7fafafafab
-abab110000222200737300999999510d00444400959500959595e6e600919191
-8d8d8d620d00898989666600b7b700911600000000730d007373736f6f6faaaa
-006b6b6b676767c41a00cccc0000f30000ef00d51e0055555567670000dd0051
-515100d1004d4d4de61e0038380000b700160d0d00ab00560d00090900009500
-009100008d003333332f2f2f2f2b2f2b2b000077007c7c001a05002b27000073
-002b2b2b006f00bb1600272727780d002323230055004d4d00cc1e00004d00cc
-1a000d00003c09006f6f00002f003811271111110d0d0d55554d090909001100
-4d0900050505000d00e2e200000900000500626200a6a6a6a2a2a29e9e9e8484
-00fb00fbd5d500801100800d00ea00ea555500a6a600e600e6f7f700e200e233
-0500888888d900d9848484c01a007777003c3c05c8c8008080804409007c7c7c
-bb00bbaa00aaa600a61e09056262629e009e9a009af322005e5e5e05050000ee
-005a5a5adddd00a616008d008d00e20016050027270088110078780000c40078
-00787300736f006f44444400aa00c81e004040406600663c3c3c090000550055
-1a1a00343434d91e000084004d004d007c004500453c3c00ea1e00222222113c
-113300331e1e1efb22001a1a1a004400afaf00270027003c001616161e001e0d
-160d2f2f00808000001e00d1d1001100110d000db7b7b7090009050005b3b3b3
-6d34c4230000000174524e530040e6d86600000001624b474402660b7c640000
-01f249444154789c6360c0048c8c58049100575f215ee92e6161ef109cd2a15e
-4b9645ce5d2c8f433aa4c24f3cbd4c98833b2314ab74a186f094b9c2c27571d2
-6a2a58e4253c5cda8559057a392363854db4d9d0641973660b0b0bb76bb16656
-06970997256877a07a95c75a1804b2fbcd128c80b482a0b0300f8a824276a9a8
-ec6e61612b3e57ee06fbf0009619d5fac846ac5c60ed20e754921625a2daadc6
-1967e29e97d2239c8aec7e61fdeca9cecebef54eb36c848517164514af16169e
-866444b2b0b7b55534c815cc2ec22d89cd1353800a8473100a4485852d924a6a
-412adc74e7ad1016ceed043267238c901716f633a812022998a4072267c4af02
-92127005c0f811b62830054935ce017b38bf0948cc5c09955f030a24617d9d46
-63371fd940b0827931cbfdf4956076ac018b592f72d45594a9b1f307f3261b1a
-084bc2ad50018b1900719ba6ba4ca325d0427d3f6161449486f981144cf3100e
-2a5f2a1ce8683e4ddf1b64275240c8438d98af0c729bbe07982b8a1c94201dc2
-b3174c9820bcc06201585ad81b25b64a2146384e3798290c05ad280a18c0a62e
-e898260c07fca80a24c076cc864b777131a00190cdfa3069035eccbc038c30e1
-3e88b46d16b6acc5380d6ac202511c392f4b789aa7b0b08718765990111606c2
-9e854c38e5191878fbe471e749b0112bb18902008dc473b2b2e8e72700000000
-49454e44ae426082
-"""),
- 'Tp2n3p08': _dehex("""
-89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
-c60000000467414d41000186a031e8965f00000300504c544502ffff80ff05ff
-7f0703ff7f0180ff04ff00ffff06ff000880ff05ff7f07ffff06ff000804ff00
-0180ff02ffff03ff7f02ffff80ff0503ff7f0180ffff0008ff7f0704ff00ffff
-06ff000802ffffff7f0704ff0003ff7fffff0680ff050180ff04ff000180ffff
-0008ffff0603ff7f80ff05ff7f0702ffffff000880ff05ffff0603ff7f02ffff
-ff7f070180ff04ff00ffff06ff000880ff050180ffff7f0702ffff04ff0003ff
-7fff7f0704ff0003ff7f0180ffffff06ff000880ff0502ffffffff0603ff7fff
-7f0702ffff04ff000180ff80ff05ff0008ff7f07ffff0680ff0504ff00ff0008
-0180ff03ff7f02ffff02ffffffff0604ff0003ff7f0180ffff000880ff05ff7f
-0780ff05ff00080180ff02ffffff7f0703ff7fffff0604ff00ff7f07ff0008ff
-ff0680ff0504ff0002ffff0180ff03ff7fff0008ffff0680ff0504ff000180ff
-02ffff03ff7fff7f070180ff02ffff04ff00ffff06ff0008ff7f0780ff0503ff
-7fffff06ff0008ff7f0780ff0502ffff03ff7f0180ff04ff0002ffffff7f07ff
-ff0604ff0003ff7fff00080180ff80ff05ffff0603ff7f0180ffff000804ff00
-80ff0502ffffff7f0780ff05ffff0604ff000180ffff000802ffffff7f0703ff
-7fff0008ff7f070180ff03ff7f02ffff80ff05ffff0604ff00ff0008ffff0602
-ffff0180ff04ff0003ff7f80ff05ff7f070180ff04ff00ff7f0780ff0502ffff
-ff000803ff7fffff0602ffffff7f07ffff0680ff05ff000804ff0003ff7f0180
-ff02ffff0180ffff7f0703ff7fff000804ff0080ff05ffff0602ffff04ff00ff
-ff0603ff7fff7f070180ff80ff05ff000803ff7f0180ffff7f0702ffffff0008
-04ff00ffff0680ff0503ff7f0180ff04ff0080ff05ffff06ff000802ffffff7f
-0780ff05ff0008ff7f070180ff03ff7f04ff0002ffffffff0604ff00ff7f07ff
-000880ff05ffff060180ff02ffff03ff7f80ff05ffff0602ffff0180ff03ff7f
-04ff00ff7f07ff00080180ffff000880ff0502ffff04ff00ff7f0703ff7fffff
-06ff0008ffff0604ff00ff7f0780ff0502ffff03ff7f0180ffdeb83387000000
-f874524e53000000000000000008080808080808081010101010101010181818
-1818181818202020202020202029292929292929293131313131313131393939
-393939393941414141414141414a4a4a4a4a4a4a4a52525252525252525a5a5a
-5a5a5a5a5a62626262626262626a6a6a6a6a6a6a6a73737373737373737b7b7b
-7b7b7b7b7b83838383838383838b8b8b8b8b8b8b8b94949494949494949c9c9c
-9c9c9c9c9ca4a4a4a4a4a4a4a4acacacacacacacacb4b4b4b4b4b4b4b4bdbdbd
-bdbdbdbdbdc5c5c5c5c5c5c5c5cdcdcdcdcdcdcdcdd5d5d5d5d5d5d5d5dedede
-dededededee6e6e6e6e6e6e6e6eeeeeeeeeeeeeeeef6f6f6f6f6f6f6f6b98ac5
-ca0000012c49444154789c6360e7169150d230b475f7098d4ccc28a96ced9e32
-63c1da2d7b8e9fb97af3d1fb8f3f18e8a0808953544a4dd7c4c2c9233c2621bf
-b4aab17fdacce5ab36ee3a72eafaad87efbefea68702362e7159652d031b07cf
-c0b8a4cce28aa68e89f316aedfb4ffd0b92bf79fbcfcfe931e0a183904e55435
-8decdcbcc22292b3caaadb7b27cc5db67af3be63e72fdf78fce2d31f7a2860e5
-119356d037b374f10e8a4fc92eaa6fee99347fc9caad7b0f9ebd74f7c1db2fbf
-e8a180995f484645dbdccad12f38363dafbcb6a573faeca5ebb6ed3e7ce2c29d
-e76fbefda38702063e0149751d537b67ff80e8d4dcc29a86bea97316add9b0e3
-c0e96bf79ebdfafc971e0a587885e515f58cad5d7d43a2d2720aeadaba26cf5a
-bc62fbcea3272fde7efafac37f3a28000087c0fe101bc2f85f0000000049454e
-44ae426082
-"""),
- 'tbbn1g04': _dehex("""
-89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
-290000000467414d41000186a031e8965f0000000274524e530007e8f7589b00
-000002624b47440000aa8d23320000013e49444154789c55d1cd4b024118c7f1
-efbe6419045b6a48a72d352808b435284f9187ae9b098627a1573a19945beba5
-e8129e8222af11d81e3a4545742de8ef6af6d5762e0fbf0fc33c33f36085cb76
-bc4204778771b867260683ee57e13f0c922df5c719c2b3b6c6c25b2382cea4b9
-9f7d4f244370746ac71f4ca88e0f173a6496749af47de8e44ba8f3bf9bdfa98a
-0faf857a7dd95c7dc8d7c67c782c99727997f41eb2e3c1e554152465bb00fe8e
-b692d190b718d159f4c0a45c4435915a243c58a7a4312a7a57913f05747594c6
-46169866c57101e4d4ce4d511423119c419183a3530cc63db88559ae28e7342a
-1e9c8122b71139b8872d6e913153224bc1f35b60e4445bd4004e20ed6682c759
-1d9873b3da0fbf50137dc5c9bde84fdb2ec8bde1189e0448b63584735993c209
-7a601bd2710caceba6158797285b7f2084a2f82c57c01a0000000049454e44ae
-426082
-"""),
- 'tbrn2c08': _dehex("""
-89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
-a30000000467414d41000186a031e8965f0000000674524e53007f007f007f8a
-33334f00000006624b474400ff0000000033277cf3000004d649444154789cad
-965f68537714c73fd912d640235e692f34d0406fa0c1663481045ab060065514
-56660a295831607df0a1488715167060840a1614e6431e9cb34fd2c00a762c85
-f6a10f816650c13b0cf40612e1822ddc4863bd628a8924d23d6464f9d3665dd9
-f7e977ce3dbff3cd3939bfdfef6bb87dfb364782dbed065ebe7cd93acc78b4ec
-a228debd7bb7bfbfbfbbbbfb7f261045311a8d261209405194274f9ea4d3e916
-f15f1c3eb5dd6e4fa5fecce526239184a2b0b8486f6f617171b1f5ae4311381c
-8e57af5e5dbd7a351088150a78bd389d44222c2f93cdfe66b7db8f4ee07038b6
-b6b6bebf766d7e7e7e60a06432313b4ba984c3c1c4049a46b95c5a58583822c1
-dbb76f27272733d1b9df853c3030c0f232562b9108cf9eb1b888d7cbf030abab
-31abd5fa1f08dc6ef7e7cf9f1f3f7e1c8944745d4f1400c62c001313acad21cb
-b8dd2c2c603271eb1640341aad4c6d331aa7e8c48913a150a861307ecc11e964
-74899919bc5e14e56fffc404f1388502f178dceff7ef4bf0a5cfe7abb533998c
-e5f9ea2f1dd88c180d64cb94412df3dd57e83a6b3b3c7a84c98420100c72fd3a
-636348bae726379fe69e8e8d8dbd79f3a6558b0607079796965256479b918085
-7b02db12712b6181950233023f3f647494ee6e2e5ea45864cce5b8a7fe3acffc
-3aebb22c2bd5d20e22d0757d7b7bbbbdbd3d94a313bed1b0aa3cd069838b163a
-8d4c59585f677292d0b84d9a995bd337def3fe6bbe5e6001989b9b6bfe27ea08
-36373781542ab56573248b4c5bc843ac4048c7ab21aa24ca00534c25482828a3
-8c9ee67475bbaaaab22cb722c8e57240a150301a8d219de94e44534d7d90e885
-87acb0e2c4f9800731629b6c5ee14a35a6b9887d2a0032994cb9cf15dbe59650
-ff7b46a04c9a749e7cc5112214266cc65c31354d5b5d5d3d90209bcd5616a552
-a95c2e87f2a659bd9ee01c2cd73964e438f129a6aa9e582c363838b80f81d7eb
-5555b56a2a8ad2d9d7affd0409f8015c208013fea00177b873831b0282c964f2
-783c1e8fa7582cee5f81a669b5e6eeeeaee58e8559b0c233d8843c7c0b963a82
-34e94b5cb2396d7d7d7db22c8ba258fb0afd43f0e2c58b919191ba9de9b4d425
-118329b0c3323c8709d02041b52b4ea7f39de75d2a934a2693c0a953a76a93d4
-5d157ebf7f6565a5542a553df97c5e10045dd731c130b86113cc300cbd489224
-08422a952a140a95788fc763b1d41558d7a2d7af5f5fb870a1d6a3aaaacd6603
-18802da84c59015bd2e6897b745d9765b99a1df0f97c0daf74e36deaf7fbcd66
-73ad2797cb89a2c839880188a2e8743a8bc5a22ccbba5e376466b3b9bdbdbd21
-6123413a9d0e0402b51e4dd3bababa788eb022b85caeb6b6364551b6b7b76942
-43f7f727007a7a7a04a1ee8065b3595fde2768423299ac1ec6669c3973e65004
-c0f8f878ad69341a33994ced2969c0d0d0502412f9f8f163f3a7fd654b474787
-288ad53e74757535df6215b85cae60302849d2410aecc037f9f2e5cbd5b5c160
-680eb0dbede170381c0e7ff8f0a185be3b906068684892a4ca7a6f6faff69328
-8ad3d3d3f7efdfdfdbdbfb57e96868a14d0d0643381c96242997cbe5f3794010
-84603078fcf8f1d6496bd14a3aba5c2ea7d369341a5555b5582c8140e0fcf9f3
-1b1b1b87cf4eeb0a8063c78e45a3d19e9e1ebfdfdf5a831e844655d18093274f
-9e3d7bf6d3a74f3b3b3b47c80efc05ff7af28fefb70d9b0000000049454e44ae
-426082
-"""),
- 'basn6a16': _dehex("""
-89504e470d0a1a0a0000000d494844520000002000000020100600000023eaa6
-b70000000467414d41000186a031e8965f00000d2249444154789cdd995f6c1c
-d775c67ff38fb34b724d2ee55a8e4b04a0ac87049100cab4dbd8c6528902cb4d
-10881620592e52d4325ac0905bc98a94025e71fd622cb5065ac98a0c283050c0
-728a00b6e542a1d126885cd3298928891d9a0444037e904434951d4b90b84b2f
-c9dde1fcebc33977a95555348f411e16dfce9d3b77ee77eebde77ce78c95a669
-0ad07c17009a13edd898b87dfb1fcb7d2b4d1bff217f33df80deb1e6267df0ff
-c1e6e6dfafdf1f5a7fd30f9aef66b6d546dd355bf02c40662e3307f9725a96c6
-744c3031f83782f171c148dbc3bf1774f5dad1e79d6f095a3f54d4fbec5234ef
-d9a2f8d73afe4f14f57ef4f42def7b44f19060f06b45bddf1c5534d77fd922be
-2973a15a82e648661c6e3240aa3612ead952b604bde57458894f29deaf133bac
-13d2766f5227a4a3b8cf08da7adfd6fbd6bd8a4fe9dbb43d35e3dfa3f844fbf8
-9119bf4f7144094fb56333abf8a86063ca106f94b3a3b512343765e60082097f
-1bb86ba72439a653519b09f5cee1ce61c897d37eedf5553580ae60f4af8af33a
-b14fd400b6a0f34535c0434afc0b3a9f07147527a5fa7ca218ff56c74d74dc3f
-155cfd3325fc278acf2ae1cb4a539f5f9937c457263b0bd51234c732a300cdd1
-cc1840f0aaff54db0e4874ed5a9b5d6d27d4bb36746d80de72baa877ff4b275a
-d7895ed1897ea4139b5143fcbb1a62560da1ed9662aaed895ec78a91c18795b8
-5e07ab4af8ba128e95e682e0728bf8f2e5ae815a091a53d902ac1920d8e05f06
-589de8d8d66680789f4e454fb9d9ec66cd857af796ee2d902fa73fd5bba775a2
-153580ae44705ed0d37647d15697cb8f14bfa3e3e8fdf8031d47af571503357c
-f30d25acedcbbf135c9a35c49766ba07ab255859e8ec03684e66860182dff8f7
-0304bff6ff1c20fc81b7afdd00a71475539a536e36bb5973a19e3b923b02bde5
-e4efd4003ac170eb2d13fe274157afedbd82d6fb3a9a1e85e4551d47cf7078f8
-9671fe4289ebf5f2bf08d63f37c4eb4773c55a0996efeefa0ca011671d8060ca
-2f0004c7fcc300e166ef0240f825efe3361f106d57d423d0723f7acacd66376b
-2ed47b7a7a7a205f4ef4ac4691e0aad9aa0d41cf13741c3580a506487574ddca
-61a8c403c1863ebfbcac3475168b2de28b8b3d77544bb05ce92a02aceced3c0d
-d0cc65ea371b201cf1c601c24dde1c4078cedbdeb60322f50126a019bf6edc9b
-39e566b39b3517eaf97c3e0fbde5e4491d45bd74537145d155b476aa0176e868
-c6abebf30dbd5e525c54ac8e18e2d56abeb756827a3d970358a97416019a6f64
-f60004fdfe1580d5c98e618070cc1b05887eee7e0d209a70db7d8063029889b4
-c620ead78d7b33a7dc6c76b3e6427ddddbebde867c393aa7845e5403e8ca794a
-d0d6fb897af5f03525fe5782f5e7046bdaef468bf88d1debc6ab25583cd17310
-6079b9ab0ba059c914018245bf076075b5a303200c3c1f209a733701444fbbaf
-00c4134ebb016c5d0b23614c243701cdf875e3decce9349bddacb9505fbf7dfd
-76e82d87736a00f5d2b5ffd4b7dce2719a4d25ae717ee153c1abef18e257cfad
-7fa45682da48ef38c052b53b0fd06864b300c151ff08c0ea431de701a287dd5f
-004497dc7b01a253ee3e80b8c7f91c20f967fb6fdb7c80ada7d8683723614c24
-3701cdf875e3decc29379bddacb950ef3fd47f08f2e5a61ea4aa2a3eb757cd55
-13345efcfa59c12b2f19e2578ef77fb75a82854ffbee01a83f977b11a031931d
-040802df07082b5e11207cc17b1e209a770700e2df0a83e409fb7580f827c230
-99b06fd901fb058d6835dacd481813c94d40337eddb83773cacd66376b2ed437
-bebcf165e82d2f4e4beb7f3fa6e652c2d7ee10bc78c010bfb87fe3c95a09ae9f
-bd732740bd2fb700d0f865f64180e059ff044018ca0ca28a5b04883f701e0088
-bfec7c0c909cb71f0448c6ec518074b375012079d9dedf66004bcfbc51eb2dd1
-aadacd481813c94d40337eddb83773cacd66376b2ed487868686205fbe7c49ef
-5605a73f34c4a7a787eeab96e0da81bb4e022c15ba27019a5b339300e16bf286
-a8eae601e25866907cdf3e0890acb36f00245fb57f05904e59c300e92561946e
-b2e600d209ab7d07f04d458dfb46ad1bd16ab49b913026929b8066fcba716fe6
-949bcd6ed65ca8ef7e7cf7e3d05b7e7c8f217ee6cdddbb6a25a856f37980e0c7
-fe4e80a82623c48193014846ec7180f4acf518409aca0cd28a5504e03b32c374
-de1a00608a0240faaa327a4b19fe946fb6f90054dbb5f2333d022db56eb4966a
-3723614c243701cdf8f556bea8a7dc6c76b3e66bd46584ddbbcebc0990cf4b0f
-ff4070520c282338a7e26700ec725202b01e4bcf0258963c6f1d4d8f0030cb20
-805549c520930c03584fa522b676f11600ffc03fde3e1b3489a9c9054c9aa23b
-c08856a3dd8c843191dc0434e3d78d7b33a75c36fb993761f7ae5a69f72ef97f
-e6ad336fed7e1c60e8bee96980bbdebbb60da07b7069062033d9dc0ae03d296f
-70ab511ec071640676252902d833c916007b3e1900b0a6d2028035968e025861
-ea01581369fb11488c34d18cbc95989afccca42baad65ba2d5683723614c24d7
-8066fcbab8b7e96918baaf5aaa56219f975fb50a43f7c9bde90fa73f1c1a02d8
-78f2e27e803b77ca08b90519315b6fe400fc1392097a9eccc0ad444500e70199
-a1331f0f00d8934901c07e5d526ceb87c2d07e2579badd005a2b31a5089391b7
-1253358049535a6add8856dd0146c298482e01ede27ed878b256ba7600ee3a09
-c18fc1df09fe01084ec25defc1b56db0f1a4f4bd78e0e2818d2f0334e7330300
-7df7c888b917e50dd9c1c60c80efcb0cbc63e1f700bce7c31700dccbd1060027
-8add9b0de06c8e2f00d84962b7d7030e2a61538331b98051f92631bd253f336a
-dd8856a3dd44c25c390efddfad96ae9f853b77c25201ba27c533b8bdf28b6ad0
-3d084b33d2e7fa59099e9901b8f2d29597fa0f01848f78e70082117f1ca07b76
-6910209b9519f895a008d031bbba05c09d8f06005c5b18b8fba25300cea6780e
-c03e911c6ccf06d507b48a4fa606634a114609de929f9934c5a87511ad57cfc1
-fa476aa5854fa1ef1e3910b905686e85cc24c40138198915f133d2d6dc2a7dea
-7df2ccc2a752faf2cec1d577aebeb37e3b4034eeee0008dff3be0e6b923773b4
-7904c0ef9119767cb4fa1500ef1361e08e452500f71561e84cc4ed3e20fab6a2
-c905f40cb76a3026bf3319b91ac2e46792a6dcd801ebc6aba5da08f48ecb81c8
-bd088d5f42f6417191de93908c803d0e76199292b485af41b60e8d9c3c537f0e
-8211f0c7211a077707dc18b931b2ee6d80a4d7ae024491ebc24d4a708ff70680
-7f25e807e8785f1878e322d6ddaf453f0770ff2dfa769b01423dbbad72a391b6
-5a7c3235985629423372494cab55c8f7d64a8b27a0e7202c55a13b0f8d19c80e
-4ae9ca3f015115dc3ca467c17a4c7ee95970ab10e5a54ff0ac3cd39881ee5958
-1a84f03df0be0e492fd855a8d6aa35d10b4962dbb0a604a3d3ee5e80a8eee600
-a24977f8660378bf0bbf00e01d0a8fb7f980f04b8aa6ce6aca8d5a7533c52753
-839152c4e222f4dc512dd5eb90cbc981e8ea12cf90cd8a8bf47d89159e2741d3
-7124f65b96fcd254dae258fa84a13c13043246a32129574787e49eae2b49b86d
-c3e2e78b9ff7f4002415bb08907c66df0d103b4e0c104db90500ff70700c203a
-ee1e82dba4c3e16e256c0acca6ceaae9afd1f612d7eb472157ac95962bd05594
-7dd1598466053245088e827f44628657942a825b84e4fb601f84b4025611aca3
-901e01bb024911dc0a4445f08e41f83df02b10142173149ab71baf027611ea95
-7a257704201d14cd9af4d90b00f194530088cb4e09c0df1c5c0088f7393f6833
-c0aa3ac156655de3bca9b34ab9716906ba07aba5e5bba1eb3358d90b9da7c533
-64f6888bf47b60f521e8380fe10be03d2feac17900927560df40f4e48f805960
-50328d648bf4893f9067c217a0631656b7c898c122847bc07b03a2d3e0ee85e4
-33b0ef867450c4fad2ecd26cf7168074c0ba0c904cdac300c9cfec4701924df6
-1cdca61e10685c6f7d52d0caba1498972f43d740adb4b2009d7d7220b20e3473
-90a943d00ffe959bb6eac3e0fe42ea49ee00c45f06e76329b1dabf127d690d80
-5581b408f63c2403e0cc433c00ee658836803b0fd100747c04ab5f917704fd10
-d5c1cd41ec801343d207f602a403605d86e5f9e5f9ae0d00e994556833806685
-c931fb709b0f08b4e869bea5c827859549e82c544b8d29c816a0390999613920
-7e610d5727a16318c2003c1fa24be0de2b32caf92224e7c17e5004b6350c4c01
-05601218066b0ad28224e149019c086257ca315102de2712903bde97b8144d82
-3b2c6ac52d403c054e019249b087f53d0558995a99ea946c70cc927458b3c1ff
-550f30050df988d4284376b4566a8e416654cc921985e037e0df0fc131f00f4b
-acf0c6211c036f14a239703741740adc7da227edd7e56b833d0ae92549b4d357
-25dfb49ed2ff63908e6adf27d6d0dda7638d4154d2778daca17f58e61297c129
-41f233b01f5dc3740cac51688c35c6b22580f48224fee9b83502569a66b629f1
-09f3713473413e2666e7fe6f6c6efefdfafda1f56f6e06f93496d9d67cb7366a
-9964b6f92e64b689196ec6c604646fd3fe4771ff1bf03f65d8ecc3addbb5f300
-00000049454e44ae426082
-"""),
-}
-
-def test_suite(options, args):
- """
- Create a PNG test image and write the file to stdout.
- """
-
- # Below is a big stack of test image generators.
- # They're all really tiny, so PEP 8 rules are suspended.
-
- def test_gradient_horizontal_lr(x, y): return x
- def test_gradient_horizontal_rl(x, y): return 1-x
- def test_gradient_vertical_tb(x, y): return y
- def test_gradient_vertical_bt(x, y): return 1-y
- def test_radial_tl(x, y): return max(1-math.sqrt(x*x+y*y), 0.0)
- def test_radial_center(x, y): return test_radial_tl(x-0.5, y-0.5)
- def test_radial_tr(x, y): return test_radial_tl(1-x, y)
- def test_radial_bl(x, y): return test_radial_tl(x, 1-y)
- def test_radial_br(x, y): return test_radial_tl(1-x, 1-y)
- def test_stripe(x, n): return float(int(x*n) & 1)
- def test_stripe_h_2(x, y): return test_stripe(x, 2)
- def test_stripe_h_4(x, y): return test_stripe(x, 4)
- def test_stripe_h_10(x, y): return test_stripe(x, 10)
- def test_stripe_v_2(x, y): return test_stripe(y, 2)
- def test_stripe_v_4(x, y): return test_stripe(y, 4)
- def test_stripe_v_10(x, y): return test_stripe(y, 10)
- def test_stripe_lr_10(x, y): return test_stripe(x+y, 10)
- def test_stripe_rl_10(x, y): return test_stripe(1+x-y, 10)
- def test_checker(x, y, n): return float((int(x*n) & 1) ^ (int(y*n) & 1))
- def test_checker_8(x, y): return test_checker(x, y, 8)
- def test_checker_15(x, y): return test_checker(x, y, 15)
- def test_zero(x, y): return 0
- def test_one(x, y): return 1
-
- test_patterns = {
- 'GLR': test_gradient_horizontal_lr,
- 'GRL': test_gradient_horizontal_rl,
- 'GTB': test_gradient_vertical_tb,
- 'GBT': test_gradient_vertical_bt,
- 'RTL': test_radial_tl,
- 'RTR': test_radial_tr,
- 'RBL': test_radial_bl,
- 'RBR': test_radial_br,
- 'RCTR': test_radial_center,
- 'HS2': test_stripe_h_2,
- 'HS4': test_stripe_h_4,
- 'HS10': test_stripe_h_10,
- 'VS2': test_stripe_v_2,
- 'VS4': test_stripe_v_4,
- 'VS10': test_stripe_v_10,
- 'LRS': test_stripe_lr_10,
- 'RLS': test_stripe_rl_10,
- 'CK8': test_checker_8,
- 'CK15': test_checker_15,
- 'ZERO': test_zero,
- 'ONE': test_one,
- }
-
- def test_pattern(width, height, bitdepth, pattern):
- """Create a single plane (monochrome) test pattern. Returns a
- flat row flat pixel array.
- """
-
- maxval = 2**bitdepth-1
- if maxval > 255:
- a = array('H')
- else:
- a = array('B')
- fw = float(width)
- fh = float(height)
- pfun = test_patterns[pattern]
- for y in range(height):
- fy = float(y)/fh
- for x in range(width):
- a.append(int(round(pfun(float(x)/fw, fy) * maxval)))
- return a
-
- def test_rgba(size=256, bitdepth=8,
- red="GTB", green="GLR", blue="RTL", alpha=None):
- """
- Create a test image. Each channel is generated from the
- specified pattern; any channel apart from red can be set to
- None, which will cause it not to be in the image. It
- is possible to create all PNG channel types (L, RGB, LA, RGBA),
- as well as non PNG channel types (RGA, and so on).
- """
-
- i = test_pattern(size, size, bitdepth, red)
- psize = 1
- for channel in (green, blue, alpha):
- if channel:
- c = test_pattern(size, size, bitdepth, channel)
- i = interleave_planes(i, c, psize, 1)
- psize += 1
- return i
-
- def pngsuite_image(name):
- """
- Create a test image by reading an internal copy of the files
- from the PngSuite. Returned in flat row flat pixel format.
- """
-
- if name not in _pngsuite:
- raise NotImplementedError("cannot find PngSuite file %s (use -L for a list)" % name)
- r = Reader(bytes=_pngsuite[name])
- w,h,pixels,meta = r.asDirect()
- assert w == h
- # LAn for n < 8 is a special case for which we need to rescale
- # the data.
- if meta['greyscale'] and meta['alpha'] and meta['bitdepth'] < 8:
- factor = 255 // (2**meta['bitdepth']-1)
- def rescale(data):
- for row in data:
- yield map(factor.__mul__, row)
- pixels = rescale(pixels)
- meta['bitdepth'] = 8
- arraycode = 'BH'[meta['bitdepth']>8]
- return w, array(arraycode, itertools.chain(*pixels)), meta
-
- # The body of test_suite()
- size = 256
- if options.test_size:
- size = options.test_size
- options.bitdepth = options.test_depth
- options.greyscale=bool(options.test_black)
-
- kwargs = {}
- if options.test_red:
- kwargs["red"] = options.test_red
- if options.test_green:
- kwargs["green"] = options.test_green
- if options.test_blue:
- kwargs["blue"] = options.test_blue
- if options.test_alpha:
- kwargs["alpha"] = options.test_alpha
- if options.greyscale:
- if options.test_red or options.test_green or options.test_blue:
- raise ValueError("cannot specify colours (R, G, B) when greyscale image (black channel, K) is specified")
- kwargs["red"] = options.test_black
- kwargs["green"] = None
- kwargs["blue"] = None
- options.alpha = bool(options.test_alpha)
- if not args:
- pixels = test_rgba(size, options.bitdepth, **kwargs)
- else:
- size,pixels,meta = pngsuite_image(args[0])
- for k in ['bitdepth', 'alpha', 'greyscale']:
- setattr(options, k, meta[k])
-
- writer = Writer(size, size,
- bitdepth=options.bitdepth,
- transparent=options.transparent,
- background=options.background,
- gamma=options.gamma,
- greyscale=options.greyscale,
- alpha=options.alpha,
- compression=options.compression,
- interlace=options.interlace)
- writer.write_array(sys.stdout, pixels)
-
-def read_pam_header(infile):
- """
- Read (the rest of a) PAM header. `infile` should be positioned
- immediately after the initial 'P7' line (at the beginning of the
- second line). Returns are as for `read_pnm_header`.
- """
-
- # Unlike PBM, PGM, and PPM, we can read the header a line at a time.
- header = dict()
- while True:
- l = infile.readline().strip()
- if l == strtobytes('ENDHDR'):
- break
- if not l:
- raise EOFError('PAM ended prematurely')
- if l[0] == strtobytes('#'):
- continue
- l = l.split(None, 1)
- if l[0] not in header:
- header[l[0]] = l[1]
- else:
- header[l[0]] += strtobytes(' ') + l[1]
-
- required = ['WIDTH', 'HEIGHT', 'DEPTH', 'MAXVAL']
- required = [strtobytes(x) for x in required]
- WIDTH,HEIGHT,DEPTH,MAXVAL = required
- present = [x for x in required if x in header]
- if len(present) != len(required):
- raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
- width = int(header[WIDTH])
- height = int(header[HEIGHT])
- depth = int(header[DEPTH])
- maxval = int(header[MAXVAL])
- if (width <= 0 or
- height <= 0 or
- depth <= 0 or
- maxval <= 0):
- raise Error(
- 'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
- return 'P7', width, height, depth, maxval
-
-def read_pnm_header(infile, supported=('P5','P6')):
- """
- Read a PNM header, returning (format,width,height,depth,maxval).
- `width` and `height` are in pixels. `depth` is the number of
- channels in the image; for PBM and PGM it is synthesized as 1, for
- PPM as 3; for PAM images it is read from the header. `maxval` is
- synthesized (as 1) for PBM images.
- """
-
- # Generally, see http://netpbm.sourceforge.net/doc/ppm.html
- # and http://netpbm.sourceforge.net/doc/pam.html
-
- supported = [strtobytes(x) for x in supported]
-
- # Technically 'P7' must be followed by a newline, so by using
- # rstrip() we are being liberal in what we accept. I think this
- # is acceptable.
- type = infile.read(3).rstrip()
- if type not in supported:
- raise NotImplementedError('file format %s not supported' % type)
- if type == strtobytes('P7'):
- # PAM header parsing is completely different.
- return read_pam_header(infile)
- # Expected number of tokens in header (3 for P4, 4 for P6)
- expected = 4
- pbm = ('P1', 'P4')
- if type in pbm:
- expected = 3
- header = [type]
-
- # We have to read the rest of the header byte by byte because the
- # final whitespace character (immediately following the MAXVAL in
- # the case of P6) may not be a newline. Of course all PNM files in
- # the wild use a newline at this point, so it's tempting to use
- # readline; but it would be wrong.
- def getc():
- c = infile.read(1)
- if not c:
- raise Error('premature EOF reading PNM header')
- return c
-
- c = getc()
- while True:
- # Skip whitespace that precedes a token.
- while c.isspace():
- c = getc()
- # Skip comments.
- while c == '#':
- while c not in '\n\r':
- c = getc()
- if not c.isdigit():
- raise Error('unexpected character %s found in header' % c)
- # According to the specification it is legal to have comments
- # that appear in the middle of a token.
- # This is bonkers; I've never seen it; and it's a bit awkward to
- # code good lexers in Python (no goto). So we break on such
- # cases.
- token = strtobytes('')
- while c.isdigit():
- token += c
- c = getc()
- # Slight hack. All "tokens" are decimal integers, so convert
- # them here.
- header.append(int(token))
- if len(header) == expected:
- break
- # Skip comments (again)
- while c == '#':
- while c not in '\n\r':
- c = getc()
- if not c.isspace():
- raise Error('expected header to end with whitespace, not %s' % c)
-
- if type in pbm:
- # synthesize a MAXVAL
- header.append(1)
- depth = (1,3)[type == strtobytes('P6')]
- return header[0], header[1], header[2], depth, header[3]
-
-def write_pnm(file, width, height, pixels, meta):
- """Write a Netpbm PNM/PAM file."""
-
- bitdepth = meta['bitdepth']
- maxval = 2**bitdepth - 1
- # Rudely, the number of image planes can be used to determine
- # whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM).
- planes = meta['planes']
- # Can be an assert as long as we assume that pixels and meta came
- # from a PNG file.
- assert planes in (1,2,3,4)
- if planes in (1,3):
- if 1 == planes:
- # PGM
- # Could generate PBM if maxval is 1, but we don't (for one
- # thing, we'd have to convert the data, not just blat it
- # out).
- fmt = 'P5'
- else:
- # PPM
- fmt = 'P6'
- file.write('%s %d %d %d\n' % (fmt, width, height, maxval))
- if planes in (2,4):
- # PAM
- # See http://netpbm.sourceforge.net/doc/pam.html
- if 2 == planes:
- tupltype = 'GRAYSCALE_ALPHA'
- else:
- tupltype = 'RGB_ALPHA'
- file.write('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
- 'TUPLTYPE %s\nENDHDR\n' %
- (width, height, planes, maxval, tupltype))
- # Values per row
- vpr = planes * width
- # struct format
- fmt = '>%d' % vpr
- if maxval > 0xff:
- fmt = fmt + 'H'
- else:
- fmt = fmt + 'B'
- for row in pixels:
- file.write(struct.pack(fmt, *row))
- file.flush()
-
-def color_triple(color):
- """
- Convert a command line colour value to a RGB triple of integers.
- FIXME: Somewhere we need support for greyscale backgrounds etc.
- """
- if color.startswith('#') and len(color) == 4:
- return (int(color[1], 16),
- int(color[2], 16),
- int(color[3], 16))
- if color.startswith('#') and len(color) == 7:
- return (int(color[1:3], 16),
- int(color[3:5], 16),
- int(color[5:7], 16))
- elif color.startswith('#') and len(color) == 13:
- return (int(color[1:5], 16),
- int(color[5:9], 16),
- int(color[9:13], 16))
-
-
-def _main(argv):
- """
- Run the PNG encoder with options from the command line.
- """
-
- # Parse command line arguments
- from optparse import OptionParser
- import re
- version = '%prog ' + re.sub(r'( ?\$|URL: |Rev:)', '', __version__)
- parser = OptionParser(version=version)
- parser.set_usage("%prog [options] [imagefile]")
- parser.add_option('-r', '--read-png', default=False,
- action='store_true',
- help='Read PNG, write PNM')
- parser.add_option("-i", "--interlace",
- default=False, action="store_true",
- help="create an interlaced PNG file (Adam7)")
- parser.add_option("-t", "--transparent",
- action="store", type="string", metavar="color",
- help="mark the specified colour (#RRGGBB) as transparent")
- parser.add_option("-b", "--background",
- action="store", type="string", metavar="color",
- help="save the specified background colour")
- parser.add_option("-a", "--alpha",
- action="store", type="string", metavar="pgmfile",
- help="alpha channel transparency (RGBA)")
- parser.add_option("-g", "--gamma",
- action="store", type="float", metavar="value",
- help="save the specified gamma value")
- parser.add_option("-c", "--compression",
- action="store", type="int", metavar="level",
- help="zlib compression level (0-9)")
- parser.add_option("-T", "--test",
- default=False, action="store_true",
- help="create a test image (a named PngSuite image if an argument is supplied)")
- parser.add_option('-L', '--list',
- default=False, action='store_true',
- help="print list of named test images")
- parser.add_option("-R", "--test-red",
- action="store", type="string", metavar="pattern",
- help="test pattern for the red image layer")
- parser.add_option("-G", "--test-green",
- action="store", type="string", metavar="pattern",
- help="test pattern for the green image layer")
- parser.add_option("-B", "--test-blue",
- action="store", type="string", metavar="pattern",
- help="test pattern for the blue image layer")
- parser.add_option("-A", "--test-alpha",
- action="store", type="string", metavar="pattern",
- help="test pattern for the alpha image layer")
- parser.add_option("-K", "--test-black",
- action="store", type="string", metavar="pattern",
- help="test pattern for greyscale image")
- parser.add_option("-d", "--test-depth",
- default=8, action="store", type="int",
- metavar='NBITS',
- help="create test PNGs that are NBITS bits per channel")
- parser.add_option("-S", "--test-size",
- action="store", type="int", metavar="size",
- help="width and height of the test image")
- (options, args) = parser.parse_args(args=argv[1:])
-
- # Convert options
- if options.transparent is not None:
- options.transparent = color_triple(options.transparent)
- if options.background is not None:
- options.background = color_triple(options.background)
-
- if options.list:
- names = list(_pngsuite)
- names.sort()
- for name in names:
- print name
- return
-
- # Run regression tests
- if options.test:
- return test_suite(options, args)
-
- # Prepare input and output files
- if len(args) == 0:
- infilename = '-'
- infile = sys.stdin
- elif len(args) == 1:
- infilename = args[0]
- infile = open(infilename, 'rb')
- else:
- parser.error("more than one input file")
- outfile = sys.stdout
-
- if options.read_png:
- # Encode PNG to PPM
- png = Reader(file=infile)
- width,height,pixels,meta = png.asDirect()
- write_pnm(outfile, width, height, pixels, meta)
- else:
- # Encode PNM to PNG
- format, width, height, depth, maxval = \
- read_pnm_header(infile, ('P5','P6','P7'))
- # When it comes to the variety of input formats, we do something
- # rather rude. Observe that L, LA, RGB, RGBA are the 4 colour
- # types supported by PNG and that they correspond to 1, 2, 3, 4
- # channels respectively. So we use the number of channels in
- # the source image to determine which one we have. We do not
- # care about TUPLTYPE.
- greyscale = depth <= 2
- pamalpha = depth in (2,4)
- supported = map(lambda x: 2**x-1, range(1,17))
- try:
- mi = supported.index(maxval)
- except ValueError:
- raise NotImplementedError(
- 'your maxval (%s) not in supported list %s' %
- (maxval, str(supported)))
- bitdepth = mi+1
- writer = Writer(width, height,
- greyscale=greyscale,
- bitdepth=bitdepth,
- interlace=options.interlace,
- transparent=options.transparent,
- background=options.background,
- alpha=bool(pamalpha or options.alpha),
- gamma=options.gamma,
- compression=options.compression)
- if options.alpha:
- pgmfile = open(options.alpha, 'rb')
- format, awidth, aheight, adepth, amaxval = \
- read_pnm_header(pgmfile, 'P5')
- if amaxval != '255':
- raise NotImplementedError(
- 'maxval %s not supported for alpha channel' % amaxval)
- if (awidth, aheight) != (width, height):
- raise ValueError("alpha channel image size mismatch"
- " (%s has %sx%s but %s has %sx%s)"
- % (infilename, width, height,
- options.alpha, awidth, aheight))
- writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
- else:
- writer.convert_pnm(infile, outfile)
-
-
-if __name__ == '__main__':
- try:
- _main(sys.argv)
- except Error, e:
- print >>sys.stderr, e
diff --git a/lib/proximity.py b/lib/proximity.py
deleted file mode 100644
index fc4d6a1..0000000
--- a/lib/proximity.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env python
-# - coding: utf-8 -
-# Copyright (C) 2010 Toms Bauģis <toms.baugis at gmail.com>
-
-"""
- Proximity calculations
-"""
-
-from bisect import bisect
-
-class ProximityStore(object):
- def __init__(self):
- self.positions = {}
- self.reverse_positions = {}
-
- def update_position(position):
- """Update position of the element"""
- pass
-
- def find_neighbours(location, radius):
- pass
-
-
-# A AbstractProximityDatabase-style wrapper for the LQ bin lattice system
-class LQProximityStore(ProximityStore):
- __slots__ = ['point1', 'point2', 'stride', 'grid_x', 'grid_y']
- def __init__(self, point1, point2, stride):
- ProximityStore.__init__(self)
- self.point1, self.point2, self.stride = point1, point2, stride
-
- # create the bin grid where we will be throwing in our friends
- self.grid_x = range(point1.x, point2.x, stride)
- self.grid_y = range(point1.y, point2.y, stride)
-
- self.velocity_weight = 10
-
-
- def update_position(self, boid):
- bin = (bisect(self.grid_x, boid.location.x), bisect(self.grid_y, boid.location.y))
- old_bin = self.reverse_positions.setdefault(boid, [])
-
- #if bin has changed, move
- if old_bin != bin:
- if old_bin:
- self.positions[old_bin].remove(boid)
-
- self.positions.setdefault(bin, [])
- self.positions[bin].append(boid)
- self.reverse_positions[boid] = bin
-
-
- def find_bins(self, boid, radius):
- # TODO, would be neat to operate with vectors here
- # create a bounding box and return all bins within it
- velocity_weight = self.velocity_weight
- min_x = bisect(self.grid_x, min(boid.location.x - radius,
- boid.location.x + boid.velocity.x * velocity_weight - radius))
- min_y = bisect(self.grid_y, min(boid.location.y - radius,
- boid.location.y + boid.velocity.y * velocity_weight - radius))
- max_x = bisect(self.grid_x, max(boid.location.x + radius,
- boid.location.x + boid.velocity.x * velocity_weight + radius))
- max_y = bisect(self.grid_y, max(boid.location.y + radius,
- boid.location.y + boid.velocity.y * velocity_weight + radius))
-
- bins = []
- for x in range(min_x, max_x + 1):
- for y in range(min_y, max_y + 1):
- bins.append(self.positions.setdefault((x,y), []))
- return bins
-
-
- def find_neighbours(self, boid, radius):
- bins = self.find_bins(boid, radius)
-
- neighbours = []
-
- for bin in bins:
- for boid2 in bin:
- if boid is boid2:
- continue
-
- dx = boid.location.x - boid2.location.x
- dy = boid.location.y - boid2.location.y
- d = dx * dx + dy * dy
- if d < radius * radius:
- neighbours.append((boid2, d))
-
- return neighbours
diff --git a/lib/pytweener.py b/lib/pytweener.py
deleted file mode 100644
index f5cacd7..0000000
--- a/lib/pytweener.py
+++ /dev/null
@@ -1,343 +0,0 @@
-# pyTweener
-#
-# Tweening functions for python
-#
-# Heavily based on caurina Tweener: http://code.google.com/p/tweener/
-#
-# Released under M.I.T License - see above url
-# Python version by Ben Harling 2009
-# All kinds of slashing and dashing by Toms Baugis 2010
-import math
-import collections
-import datetime as dt
-import time
-import re
-
-class Tweener(object):
- def __init__(self, default_duration = None, tween = None):
- """Tweener
- This class manages all active tweens, and provides a factory for
- creating and spawning tween motions."""
- self.current_tweens = collections.defaultdict(set)
- self.default_easing = tween or Easing.Cubic.ease_in_out
- self.default_duration = default_duration or 1.0
-
- def has_tweens(self):
- return len(self.current_tweens) > 0
-
-
- def add_tween(self, obj, duration = None, easing = None, on_complete = None, on_update = None, **kwargs):
- """
- Add tween for the object to go from current values to set ones.
- Example: add_tween(sprite, x = 500, y = 200, duration = 0.4)
- This will move the sprite to coordinates (500, 200) in 0.4 seconds.
- For parameter "easing" you can use one of the pytweener.Easing
- functions, or specify your own.
- The tweener can handle numbers, dates and color strings in hex ("#ffffff").
- This function performs overwrite style conflict solving - in case
- if a previous tween operates on same attributes, the attributes in
- question are removed from that tween.
- """
- if duration is None:
- duration = self.default_duration
-
- easing = easing or self.default_easing
-
- tw = Tween(obj, duration, easing, on_complete, on_update, **kwargs )
-
- if obj in self.current_tweens:
- for current_tween in tuple(self.current_tweens[obj]):
- prev_keys = set((key for (key, tweenable) in current_tween.tweenables))
- dif = prev_keys & set(kwargs.keys())
-
- for key, tweenable in tuple(current_tween.tweenables):
- if key in dif:
- current_tween.tweenables.remove((key, tweenable))
-
- if not current_tween.tweenables:
- current_tween.finish()
- self.current_tweens[obj].remove(current_tween)
-
-
- self.current_tweens[obj].add(tw)
- return tw
-
-
- def get_tweens(self, obj):
- """Get a list of all tweens acting on the specified object
- Useful for manipulating tweens on the fly"""
- return self.current_tweens.get(obj, None)
-
- def kill_tweens(self, obj = None):
- """Stop tweening an object, without completing the motion or firing the
- on_complete"""
- if obj:
- try:
- del self.current_tweens[obj]
- except:
- pass
- else:
- self.current_tweens = collections.defaultdict(set)
-
- def remove_tween(self, tween):
- """"remove given tween without completing the motion or firing the on_complete"""
- if tween.target in self.current_tweens and tween in self.current_tweens[tween.target]:
- self.current_tweens[tween.target].remove(tween)
- if not self.current_tweens[tween.target]:
- del self.current_tweens[tween.target]
-
- def finish(self):
- """jump the the last frame of all tweens"""
- for obj in self.current_tweens:
- for tween in self.current_tweens[obj]:
- tween.finish()
- self.current_tweens = {}
-
- def update(self, delta_seconds):
- """update tweeners. delta_seconds is time in seconds since last frame"""
-
- for obj in tuple(self.current_tweens):
- for tween in tuple(self.current_tweens[obj]):
- done = tween._update(delta_seconds)
- if done:
- self.current_tweens[obj].remove(tween)
- if tween.on_complete: tween.on_complete(tween.target)
-
- if not self.current_tweens[obj]:
- del self.current_tweens[obj]
-
- return self.current_tweens
-
-
-class Tween(object):
- __slots__ = ('tweenables', 'target', 'delta', 'duration', 'ease', 'delta',
- 'on_complete', 'on_update', 'complete')
-
- def __init__(self, obj, duration, easing, on_complete, on_update, **kwargs):
- """Tween object use Tweener.add_tween( ... ) to create"""
- self.duration = duration
- self.target = obj
- self.ease = easing
-
- # list of (property, start_value, delta)
- self.tweenables = set()
- for key, value in kwargs.items():
- self.tweenables.add((key, Tweenable(self.target.__dict__[key], value)))
-
- self.delta = 0
- self.on_complete = on_complete
- self.on_update = on_update
- self.complete = False
-
- def finish(self):
- self._update(self.duration)
-
- def _update(self, ptime):
- """Update tween with the time since the last frame"""
- self.delta = self.delta + ptime
- if self.delta > self.duration:
- self.delta = self.duration
-
- if self.delta == self.duration:
- for key, tweenable in self.tweenables:
- self.target.__setattr__(key, tweenable.target_value)
- else:
- fraction = self.ease(self.delta / self.duration)
-
- for key, tweenable in self.tweenables:
- self.target.__setattr__(key, tweenable.update(fraction))
-
- if self.delta == self.duration or len(self.tweenables) == 0:
- self.complete = True
-
- if self.on_update:
- self.on_update(self.target)
-
- return self.complete
-
-
-
-
-class Tweenable(object):
- """a single attribute that has to be tweened from start to target"""
- __slots__ = ('start_value', 'change', 'decode_func', 'target_value', 'update')
-
- hex_color_normal = re.compile("#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})")
- hex_color_short = re.compile("#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])")
-
-
- def __init__(self, start_value, target_value):
- self.decode_func = lambda x: x
- self.target_value = target_value
-
- def float_update(fraction):
- return self.start_value + self.change * fraction
-
- def date_update(fraction):
- return dt.date.fromtimestamp(self.start_value + self.change * fraction)
-
- def datetime_update(fraction):
- return dt.datetime.fromtimestamp(self.start_value + self.change * fraction)
-
- def color_update(fraction):
- val = [max(min(self.start_value[i] + self.change[i] * fraction, 255), 0) for i in range(3)]
- return "#%02x%02x%02x" % (val[0], val[1], val[2])
-
-
- if isinstance(start_value, int) or isinstance(start_value, float):
- self.start_value = start_value
- self.change = target_value - start_value
- self.update = float_update
- else:
- if isinstance(start_value, dt.datetime) or isinstance(start_value, dt.date):
- if isinstance(start_value, dt.datetime):
- self.update = datetime_update
- else:
- self.update = date_update
-
- self.decode_func = lambda x: time.mktime(x.timetuple())
- self.start_value = self.decode_func(start_value)
- self.change = self.decode_func(target_value) - self.start_value
-
- elif isinstance(start_value, basestring) \
- and (self.hex_color_normal.match(start_value) or self.hex_color_short.match(start_value)):
- self.update = color_update
- if self.hex_color_normal.match(start_value):
- self.decode_func = lambda val: [int(match, 16)
- for match in self.hex_color_normal.match(val).groups()]
-
- elif self.hex_color_short.match(start_value):
- self.decode_func = lambda val: [int(match + match, 16)
- for match in self.hex_color_short.match(val).groups()]
-
- if self.hex_color_normal.match(target_value):
- target_value = [int(match, 16)
- for match in self.hex_color_normal.match(target_value).groups()]
- else:
- target_value = [int(match + match, 16)
- for match in self.hex_color_short.match(target_value).groups()]
-
- self.start_value = self.decode_func(start_value)
- self.change = [target - start for start, target in zip(self.start_value, target_value)]
-
-
-
-"""Robert Penner's classes stripped from the repetetive c,b,d mish-mash
-(discovery of Patryk Zawadzki). This way we do the math once and apply to
-all the tweenables instead of repeating it for each attribute
-"""
-
-def inverse(method):
- def real_inverse(t, *args, **kwargs):
- t = 1 - t
- return 1 - method(t, *args, **kwargs)
- return real_inverse
-
-def symmetric(ease_in, ease_out):
- def real_symmetric(t, *args, **kwargs):
- if t < 0.5:
- return ease_in(t * 2, *args, **kwargs) / 2
-
- return ease_out((t - 0.5) * 2, *args, **kwargs) / 2 + 0.5
- return real_symmetric
-
-class Symmetric(object):
- def __init__(self, ease_in = None, ease_out = None):
- self.ease_in = ease_in or inverse(ease_out)
- self.ease_out = ease_out or inverse(ease_in)
- self.ease_in_out = symmetric(self.ease_in, self.ease_out)
-
-
-class Easing(object):
- """Class containing easing classes to use together with the tweener.
- All of the classes have :func:`ease_in`, :func:`ease_out` and
- :func:`ease_in_out` functions."""
-
- Linear = Symmetric(lambda t: t, lambda t: t)
- Quad = Symmetric(lambda t: t*t)
- Cubic = Symmetric(lambda t: t*t*t)
- Quart = Symmetric(lambda t: t*t*t*t)
- Quint = Symmetric(lambda t: t*t*t*t*t)
- Strong = Quint #oh i wonder why but the ported code is the same as in Quint
-
- Circ = Symmetric(lambda t: 1 - math.sqrt(1 - t * t))
- Sine = Symmetric(lambda t: 1 - math.cos(t * (math.pi / 2)))
-
-
- def _back_in(t, s=1.70158):
- return t * t * ((s + 1) * t - s)
- Back = Symmetric(_back_in)
-
-
- def _bounce_out(t):
- if t < 1 / 2.75:
- return 7.5625 * t * t
- elif t < 2 / 2.75:
- t = t - 1.5 / 2.75
- return 7.5625 * t * t + 0.75
- elif t < 2.5 / 2.75:
- t = t - 2.25 / 2.75
- return 7.5625 * t * t + .9375
- else:
- t = t - 2.625 / 2.75
- return 7.5625 * t * t + 0.984375
- Bounce = Symmetric(ease_out = _bounce_out)
-
-
- def _elastic_in(t, springiness = 0, wave_length = 0):
- if t in(0, 1):
- return t
-
- wave_length = wave_length or (1 - t) * 0.3
-
- if springiness <= 1:
- springiness = t
- s = wave_length / 4
- else:
- s = wave_length / (2 * math.pi) * math.asin(t / springiness)
-
- t = t - 1
- return -(springiness * math.pow(2, 10 * t) * math.sin((t * t - s) * (2 * math.pi) / wave_length))
- Elastic = Symmetric(_elastic_in)
-
-
- def _expo_in(t):
- if t in (0, 1): return t
- return math.pow(2, 10 * t) * 0.001
- Expo = Symmetric(_expo_in)
-
-
-
-class _Dummy(object):
- def __init__(self, a, b, c):
- self.a = a
- self.b = b
- self.c = c
-
-if __name__ == "__main__":
- import datetime as dt
-
- tweener = Tweener()
- objects = []
-
- for i in range(10000):
- objects.append(_Dummy(dt.datetime.now(), i-100, i-100))
-
-
- total = dt.datetime.now()
-
- t = dt.datetime.now()
- print "Adding 10000 objects..."
- for i, o in enumerate(objects):
- tweener.add_tween(o, a = dt.datetime.now() - dt.timedelta(days=3),
- b = i,
- c = i,
- duration = 1.0,
- easing=Easing.Circ.ease_in_out)
- print dt.datetime.now() - t
-
- t = dt.datetime.now()
- print "Updating 10 times......"
- for i in range(11): #update 1000 times
- tweener.update(0.1)
- print dt.datetime.now() - t
diff --git a/lib/server/__init__.py b/lib/server/__init__.py
new file mode 100644
index 0000000..a779594
--- /dev/null
+++ b/lib/server/__init__.py
@@ -0,0 +1,2 @@
+# server import
+from server._server import Server
diff --git a/lib/server/_server.py b/lib/server/_server.py
new file mode 100644
index 0000000..eb7f94c
--- /dev/null
+++ b/lib/server/_server.py
@@ -0,0 +1,20 @@
+# python import
+import atexit, multiprocessing
+
+# server import
+from server import config
+from server.flask import app, run_app
+
+class Server(object):
+
+ def __init__(self):
+ # start the server
+ self._server = multiprocessing.Process(target=run_app)
+ self._server.start()
+ # .. required to close properly
+ atexit.register(self.close)
+
+ def close(self):
+ # stop web thread
+ self._server.terminate()
+ self._server.join()
diff --git a/atoideweb/tools/config.py b/lib/server/config.py
index 00b4c5d..9180e54 100644
--- a/atoideweb/tools/config.py
+++ b/lib/server/config.py
@@ -4,7 +4,7 @@ import logging
from ConfigParser import SafeConfigParser
# get application logger
-logger = logging.getLogger('atoidejouer')
+logger = logging.getLogger('atoideweb')
class Config(object):
@@ -13,14 +13,12 @@ class Config(object):
"""Our singleton object.
"""
- def __init__(self, config=None, debug=False):
+ def __init__(self, config=None):
"""Create the new singleton with the application config.
:param config: SafeConfigParser object for all the application
:see: `ConfigParser.SafeConfigParser`
"""
- # dummy flag
- self.debug = debug
# ensure config
if config is None:
# ...
@@ -31,62 +29,6 @@ class Config(object):
else:
self.__config = config
- def get_mode(self):
- # get mode value
- _mode = self.get('activity>mode')
- # ensure mode
- if _mode in ['easy', 'advanced']:
- pass
- else:
- # is easy by default
- _mode = 'easy'
- # ensure config for further use
- self.set('activity>mode', _mode)
- # return it
- return _mode
-
- def get_rate(self):
- # get rate value
- _rate = self.get('activity>rate')
- # ensure rate
- if _rate in ['normal', 'smooth', 'faster']:
- pass
- else:
- # is easy by default
- _rate = 'normal'
- # ensure config for further use
- self.set('activity>rate', _rate)
- # return it
- return _rate
-
- def get_rate_value(self):
- # ..
- _rate = self.get_rate()
- # simple rate factory
- if _rate == 'faster':
- return 0.2
- elif _rate == 'smooth':
- return 0.5
- else:
- return 1.0
-
- def get_dnd(self):
- # get dnd value
- _dnd = self.get('activity>dnd')
- # ensure dnd
- if _dnd in ['yes', 'no']:
- pass
- else:
- # is no by default
- _dnd = 'no'
- # ensure config for further use
- self.set('activity>dnd', _dnd)
- # return it
- return _dnd
-
- def use_dnd(self):
- return self.get_dnd() == 'yes'
-
def set(self, path, value, type_=str):
# set value
self.set_value(*path.split(">"), value=value, type_=type_)
@@ -157,13 +99,13 @@ class Config(object):
# singleton instance
instance = None
- def __new__(c, config=None, force=False, debug=False):
+ def __new__(c, config=None, force=False):
"""Singleton new init.
"""
# if doesn't already initialized
if not Config.instance \
or force is True:
# create a new instance
- Config.instance = Config.__Singleton(config=config, debug=debug)
+ Config.instance = Config.__Singleton(config=config)
# return the manager object
return Config.instance
diff --git a/lib/server/flask/__init__.py b/lib/server/flask/__init__.py
new file mode 100644
index 0000000..7d75791
--- /dev/null
+++ b/lib/server/flask/__init__.py
@@ -0,0 +1,5 @@
+# flask import
+from flask import request, jsonify
+
+# server import
+from server.flask._app import logger, app, render, run_app
diff --git a/lib/server/flask/_app.py b/lib/server/flask/_app.py
new file mode 100644
index 0000000..cb2d4c3
--- /dev/null
+++ b/lib/server/flask/_app.py
@@ -0,0 +1,57 @@
+# python import
+import os, logging
+
+# common gettext import
+from gettext import gettext
+
+# server import
+from server import config
+
+# get APP_NAME or default name
+APP_NAME = config.Config().get('activity>name')
+APP_NAME = 'my_activity' if APP_NAME is None else APP_NAME
+
+# get app config values
+_debug = config.Config().get('server>debug')
+_key = config.Config().get('server>secret_key')
+
+# our activity root directory relative to the current one
+PROJET_ROOT = os.path.join(os.path.dirname(__file__), '..', '..', '..')
+
+# flask import
+import flask
+
+# init app
+app = flask.Flask(__name__)
+app.debug = True if _debug is None else _debug
+app.secret_key = 'NO_KEY_OOPS' if _key is None else _key
+# override jinja template path
+app.jinja_loader.searchpath = [os.path.join(PROJET_ROOT, 'templates')]
+
+# init static folder path
+from werkzeug import SharedDataMiddleware
+app.wsgi_app = SharedDataMiddleware(app.wsgi_app,
+ {'/static': os.path.join(PROJET_ROOT, 'static')})
+
+# set basic logging out of sugar
+try:
+ import sugar
+ # get sugar logger
+ logger = logging.getLogger(APP_NAME)
+ logger.setLevel(logging.DEBUG)
+except Exception:
+ # .. or flask logger
+ logger = app.logger
+
+
+def run_app():
+ """run method to trigger at from python class.
+ """
+ app.run()
+
+
+def render(template, **context):
+ """Crappy hack for gettext issue in templates!
+ """
+ context['_'] = gettext
+ return flask.render_template(template, **context)
diff --git a/werkzeug/__init__.py b/lib/werkzeug/__init__.py
index 3bde347..3bde347 100644
--- a/werkzeug/__init__.py
+++ b/lib/werkzeug/__init__.py
diff --git a/werkzeug/_internal.py b/lib/werkzeug/_internal.py
index 7cbf2d2..7cbf2d2 100644
--- a/werkzeug/_internal.py
+++ b/lib/werkzeug/_internal.py
diff --git a/werkzeug/contrib/__init__.py b/lib/werkzeug/contrib/__init__.py
index 318e4cb..318e4cb 100644
--- a/werkzeug/contrib/__init__.py
+++ b/lib/werkzeug/contrib/__init__.py
diff --git a/werkzeug/contrib/atom.py b/lib/werkzeug/contrib/atom.py
index 8a68857..8a68857 100644
--- a/werkzeug/contrib/atom.py
+++ b/lib/werkzeug/contrib/atom.py
diff --git a/werkzeug/contrib/cache.py b/lib/werkzeug/contrib/cache.py
index a99d947..a99d947 100644
--- a/werkzeug/contrib/cache.py
+++ b/lib/werkzeug/contrib/cache.py
diff --git a/werkzeug/contrib/fixers.py b/lib/werkzeug/contrib/fixers.py
index 6ff7b20..6ff7b20 100644
--- a/werkzeug/contrib/fixers.py
+++ b/lib/werkzeug/contrib/fixers.py
diff --git a/werkzeug/contrib/iterio.py b/lib/werkzeug/contrib/iterio.py
index 02167de..02167de 100644
--- a/werkzeug/contrib/iterio.py
+++ b/lib/werkzeug/contrib/iterio.py
diff --git a/werkzeug/contrib/jsrouting.py b/lib/werkzeug/contrib/jsrouting.py
index f1da16e..f1da16e 100644
--- a/werkzeug/contrib/jsrouting.py
+++ b/lib/werkzeug/contrib/jsrouting.py
diff --git a/werkzeug/contrib/kickstart.py b/lib/werkzeug/contrib/kickstart.py
index 173569f..173569f 100644
--- a/werkzeug/contrib/kickstart.py
+++ b/lib/werkzeug/contrib/kickstart.py
diff --git a/werkzeug/contrib/limiter.py b/lib/werkzeug/contrib/limiter.py
index 8a70326..8a70326 100644
--- a/werkzeug/contrib/limiter.py
+++ b/lib/werkzeug/contrib/limiter.py
diff --git a/werkzeug/contrib/lint.py b/lib/werkzeug/contrib/lint.py
index 55413ab..55413ab 100644
--- a/werkzeug/contrib/lint.py
+++ b/lib/werkzeug/contrib/lint.py
diff --git a/werkzeug/contrib/profiler.py b/lib/werkzeug/contrib/profiler.py
index cea15f8..cea15f8 100644
--- a/werkzeug/contrib/profiler.py
+++ b/lib/werkzeug/contrib/profiler.py
diff --git a/werkzeug/contrib/securecookie.py b/lib/werkzeug/contrib/securecookie.py
index 33a99f6..33a99f6 100644
--- a/werkzeug/contrib/securecookie.py
+++ b/lib/werkzeug/contrib/securecookie.py
diff --git a/werkzeug/contrib/sessions.py b/lib/werkzeug/contrib/sessions.py
index 437311a..437311a 100644
--- a/werkzeug/contrib/sessions.py
+++ b/lib/werkzeug/contrib/sessions.py
diff --git a/werkzeug/contrib/testtools.py b/lib/werkzeug/contrib/testtools.py
index faef150..faef150 100644
--- a/werkzeug/contrib/testtools.py
+++ b/lib/werkzeug/contrib/testtools.py
diff --git a/werkzeug/contrib/wrappers.py b/lib/werkzeug/contrib/wrappers.py
index 8c90f66..8c90f66 100644
--- a/werkzeug/contrib/wrappers.py
+++ b/lib/werkzeug/contrib/wrappers.py
diff --git a/werkzeug/datastructures.py b/lib/werkzeug/datastructures.py
index e8d9830..e8d9830 100644
--- a/werkzeug/datastructures.py
+++ b/lib/werkzeug/datastructures.py
diff --git a/werkzeug/debug/__init__.py b/lib/werkzeug/debug/__init__.py
index be0a55f..be0a55f 100644
--- a/werkzeug/debug/__init__.py
+++ b/lib/werkzeug/debug/__init__.py
diff --git a/werkzeug/debug/console.py b/lib/werkzeug/debug/console.py
index 320b1fd..320b1fd 100644
--- a/werkzeug/debug/console.py
+++ b/lib/werkzeug/debug/console.py
diff --git a/werkzeug/debug/render.py b/lib/werkzeug/debug/render.py
index 7f0692a..7f0692a 100644
--- a/werkzeug/debug/render.py
+++ b/lib/werkzeug/debug/render.py
diff --git a/werkzeug/debug/repr.py b/lib/werkzeug/debug/repr.py
index 47abf43..47abf43 100644
--- a/werkzeug/debug/repr.py
+++ b/lib/werkzeug/debug/repr.py
diff --git a/werkzeug/debug/shared/body.tmpl b/lib/werkzeug/debug/shared/body.tmpl
index 71cee20..71cee20 100644
--- a/werkzeug/debug/shared/body.tmpl
+++ b/lib/werkzeug/debug/shared/body.tmpl
diff --git a/werkzeug/debug/shared/codetable.tmpl b/lib/werkzeug/debug/shared/codetable.tmpl
index 48665ee..48665ee 100644
--- a/werkzeug/debug/shared/codetable.tmpl
+++ b/lib/werkzeug/debug/shared/codetable.tmpl
diff --git a/werkzeug/debug/shared/console.png b/lib/werkzeug/debug/shared/console.png
index c28dd63..c28dd63 100644
--- a/werkzeug/debug/shared/console.png
+++ b/lib/werkzeug/debug/shared/console.png
Binary files differ
diff --git a/werkzeug/debug/shared/debugger.js b/lib/werkzeug/debug/shared/debugger.js
index 5ebbb76..5ebbb76 100644
--- a/werkzeug/debug/shared/debugger.js
+++ b/lib/werkzeug/debug/shared/debugger.js
diff --git a/werkzeug/debug/shared/jquery.js b/lib/werkzeug/debug/shared/jquery.js
index b1ae21d..b1ae21d 100644
--- a/werkzeug/debug/shared/jquery.js
+++ b/lib/werkzeug/debug/shared/jquery.js
diff --git a/werkzeug/debug/shared/less.png b/lib/werkzeug/debug/shared/less.png
index 5efefd6..5efefd6 100644
--- a/werkzeug/debug/shared/less.png
+++ b/lib/werkzeug/debug/shared/less.png
Binary files differ
diff --git a/werkzeug/debug/shared/more.png b/lib/werkzeug/debug/shared/more.png
index 804fa22..804fa22 100644
--- a/werkzeug/debug/shared/more.png
+++ b/lib/werkzeug/debug/shared/more.png
Binary files differ
diff --git a/werkzeug/debug/shared/source.png b/lib/werkzeug/debug/shared/source.png
index f7ea904..f7ea904 100644
--- a/werkzeug/debug/shared/source.png
+++ b/lib/werkzeug/debug/shared/source.png
Binary files differ
diff --git a/werkzeug/debug/shared/style.css b/lib/werkzeug/debug/shared/style.css
index 6e64eb2..6e64eb2 100644
--- a/werkzeug/debug/shared/style.css
+++ b/lib/werkzeug/debug/shared/style.css
diff --git a/werkzeug/debug/shared/vartable.tmpl b/lib/werkzeug/debug/shared/vartable.tmpl
index e653a99..e653a99 100644
--- a/werkzeug/debug/shared/vartable.tmpl
+++ b/lib/werkzeug/debug/shared/vartable.tmpl
diff --git a/werkzeug/debug/tbtools.py b/lib/werkzeug/debug/tbtools.py
index 253d23d..253d23d 100644
--- a/werkzeug/debug/tbtools.py
+++ b/lib/werkzeug/debug/tbtools.py
diff --git a/werkzeug/debug/templates/console.html b/lib/werkzeug/debug/templates/console.html
index ada2730..ada2730 100644
--- a/werkzeug/debug/templates/console.html
+++ b/lib/werkzeug/debug/templates/console.html
diff --git a/werkzeug/debug/templates/dump_object.html b/lib/werkzeug/debug/templates/dump_object.html
index 59d80bf..59d80bf 100644
--- a/werkzeug/debug/templates/dump_object.html
+++ b/lib/werkzeug/debug/templates/dump_object.html
diff --git a/werkzeug/debug/templates/frame.html b/lib/werkzeug/debug/templates/frame.html
index 5378d1c..5378d1c 100644
--- a/werkzeug/debug/templates/frame.html
+++ b/lib/werkzeug/debug/templates/frame.html
diff --git a/werkzeug/debug/templates/help_command.html b/lib/werkzeug/debug/templates/help_command.html
index 3ce949b..3ce949b 100644
--- a/werkzeug/debug/templates/help_command.html
+++ b/lib/werkzeug/debug/templates/help_command.html
diff --git a/werkzeug/debug/templates/source.html b/lib/werkzeug/debug/templates/source.html
index 529dfbc..529dfbc 100644
--- a/werkzeug/debug/templates/source.html
+++ b/lib/werkzeug/debug/templates/source.html
diff --git a/werkzeug/debug/templates/traceback_full.html b/lib/werkzeug/debug/templates/traceback_full.html
index edfc64b..edfc64b 100644
--- a/werkzeug/debug/templates/traceback_full.html
+++ b/lib/werkzeug/debug/templates/traceback_full.html
diff --git a/werkzeug/debug/templates/traceback_plaintext.html b/lib/werkzeug/debug/templates/traceback_plaintext.html
index 91901d2..91901d2 100644
--- a/werkzeug/debug/templates/traceback_plaintext.html
+++ b/lib/werkzeug/debug/templates/traceback_plaintext.html
diff --git a/werkzeug/debug/templates/traceback_summary.html b/lib/werkzeug/debug/templates/traceback_summary.html
index 5cafe0a..5cafe0a 100644
--- a/werkzeug/debug/templates/traceback_summary.html
+++ b/lib/werkzeug/debug/templates/traceback_summary.html
diff --git a/werkzeug/debug/utils.py b/lib/werkzeug/debug/utils.py
index 6e479e4..6e479e4 100644
--- a/werkzeug/debug/utils.py
+++ b/lib/werkzeug/debug/utils.py
diff --git a/werkzeug/exceptions.py b/lib/werkzeug/exceptions.py
index cce1230..cce1230 100644
--- a/werkzeug/exceptions.py
+++ b/lib/werkzeug/exceptions.py
diff --git a/werkzeug/formparser.py b/lib/werkzeug/formparser.py
index 54e952f..54e952f 100644
--- a/werkzeug/formparser.py
+++ b/lib/werkzeug/formparser.py
diff --git a/werkzeug/http.py b/lib/werkzeug/http.py
index fcd224f..fcd224f 100644
--- a/werkzeug/http.py
+++ b/lib/werkzeug/http.py
diff --git a/werkzeug/local.py b/lib/werkzeug/local.py
index 595c87d..595c87d 100644
--- a/werkzeug/local.py
+++ b/lib/werkzeug/local.py
diff --git a/werkzeug/posixemulation.py b/lib/werkzeug/posixemulation.py
index 42171b8..42171b8 100644
--- a/werkzeug/posixemulation.py
+++ b/lib/werkzeug/posixemulation.py
diff --git a/werkzeug/routing.py b/lib/werkzeug/routing.py
index 374409e..374409e 100644
--- a/werkzeug/routing.py
+++ b/lib/werkzeug/routing.py
diff --git a/werkzeug/script.py b/lib/werkzeug/script.py
index efebba0..efebba0 100644
--- a/werkzeug/script.py
+++ b/lib/werkzeug/script.py
diff --git a/werkzeug/security.py b/lib/werkzeug/security.py
index be42cec..be42cec 100644
--- a/werkzeug/security.py
+++ b/lib/werkzeug/security.py
diff --git a/werkzeug/serving.py b/lib/werkzeug/serving.py
index 6523049..6523049 100644
--- a/werkzeug/serving.py
+++ b/lib/werkzeug/serving.py
diff --git a/werkzeug/templates.py b/lib/werkzeug/templates.py
index 5f82ba8..5f82ba8 100644
--- a/werkzeug/templates.py
+++ b/lib/werkzeug/templates.py
diff --git a/werkzeug/test.py b/lib/werkzeug/test.py
index 16a3927..16a3927 100644
--- a/werkzeug/test.py
+++ b/lib/werkzeug/test.py
diff --git a/werkzeug/testapp.py b/lib/werkzeug/testapp.py
index 9997be7..9997be7 100644
--- a/werkzeug/testapp.py
+++ b/lib/werkzeug/testapp.py
diff --git a/werkzeug/urls.py b/lib/werkzeug/urls.py
index 8e8446e..8e8446e 100644
--- a/werkzeug/urls.py
+++ b/lib/werkzeug/urls.py
diff --git a/werkzeug/useragents.py b/lib/werkzeug/useragents.py
index c9e4c86..c9e4c86 100644
--- a/werkzeug/useragents.py
+++ b/lib/werkzeug/useragents.py
diff --git a/werkzeug/utils.py b/lib/werkzeug/utils.py
index 072de7e..072de7e 100644
--- a/werkzeug/utils.py
+++ b/lib/werkzeug/utils.py
diff --git a/werkzeug/wrappers.py b/lib/werkzeug/wrappers.py
index d9b0089..d9b0089 100644
--- a/werkzeug/wrappers.py
+++ b/lib/werkzeug/wrappers.py
diff --git a/werkzeug/wsgi.py b/lib/werkzeug/wsgi.py
index 3a8aa76..3a8aa76 100644
--- a/werkzeug/wsgi.py
+++ b/lib/werkzeug/wsgi.py
diff --git a/peak/__init__.py b/peak/__init__.py
deleted file mode 100644
index 8d17c21..0000000
--- a/peak/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-__import__('pkg_resources').declare_namespace(__name__)
-
diff --git a/peak/util/__init__.py b/peak/util/__init__.py
deleted file mode 100644
index de40ea7..0000000
--- a/peak/util/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__import__('pkg_resources').declare_namespace(__name__)
diff --git a/peak/util/imports.py b/peak/util/imports.py
deleted file mode 100644
index 36ab04d..0000000
--- a/peak/util/imports.py
+++ /dev/null
@@ -1,410 +0,0 @@
-"""Tools for doing dynamic imports"""
-
-__all__ = [
- 'importString', 'importObject', 'importSequence', 'importSuite',
- 'lazyModule', 'joinPath', 'whenImported', 'getModuleHooks',
-]
-
-import __main__, sys
-
-from types import StringTypes, ModuleType
-from sys import modules
-from imp import acquire_lock, release_lock
-
-defaultGlobalDict = __main__.__dict__
-
-try:
- from peak.util.EigenData import AlreadyRead
-except ImportError:
- class AlreadyRead(Exception):pass
-
-
-def importSuite(specs, globalDict=defaultGlobalDict):
- """Create a test suite from import specs"""
-
- from unittest import TestSuite
-
- return TestSuite(
- [t() for t in importSequence(specs,globalDict)]
- )
-
-
-
-
-
-
-
-
-
-
-
-
-def joinPath(modname, relativePath):
- """Adjust a module name by a '/'-separated, relative or absolute path"""
-
- module = modname.split('.')
- for p in relativePath.split('/'):
-
- if p=='..':
- module.pop()
- elif not p:
- module = []
- elif p!='.':
- module.append(p)
-
- return '.'.join(module)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-def importString(name, globalDict=defaultGlobalDict):
- """Import an item specified by a string
-
- Example Usage::
-
- attribute1 = importString('some.module:attribute1')
- attribute2 = importString('other.module:nested.attribute2')
-
- 'importString' imports an object from a module, according to an
- import specification string: a dot-delimited path to an object
- in the Python package namespace. For example, the string
- '"some.module.attribute"' is equivalent to the result of
- 'from some.module import attribute'.
-
- For readability of import strings, it's sometimes helpful to use a ':' to
- separate a module name from items it contains. It's optional, though,
- as 'importString' will convert the ':' to a '.' internally anyway."""
-
- if ':' in name:
- name = name.replace(':','.')
-
- parts = filter(None,name.split('.'))
- item = __import__(parts.pop(0), globalDict, globalDict, ['__name__'])
-
- # Fast path for the common case, where everything is imported already
- for attr in parts:
- try:
- item = getattr(item, attr)
- except AttributeError:
- break # either there's an error, or something needs importing
- else:
- return item
-
- # We couldn't get there with just getattrs from the base import. So now
- # we loop *backwards* trying to import longer names, then shorter, until
- # we find the longest possible name that can be handled with __import__,
- # then loop forward again with getattr. This lets us give more meaningful
- # error messages than if we only went forwards.
- attrs = []
- exc = None
-
- try:
- while True:
- try:
- # Exit as soon as we find a prefix of the original `name`
- # that's an importable *module* or package
- item = __import__(name, globalDict, globalDict, ['__name__'])
- break
- except ImportError:
- if not exc:
- # Save the first ImportError, as it's usually the most
- # informative, especially w/Python < 2.4
- exc = sys.exc_info()
-
- if '.' not in name:
- # We've backed up all the way to the beginning, so reraise
- # the first ImportError we got
- raise exc[0],exc[1],exc[2]
-
- # Otherwise back up one position and try again
- parts = name.split('.')
- attrs.append(parts[-1])
- name = '.'.join(parts[:-1])
- finally:
- exc = None
-
- # Okay, the module object is now in 'item', so we can just loop forward
- # to retrieving the desired attribute.
- #
- while attrs:
- attr = attrs.pop()
- try:
- item = getattr(item,attr)
- except AttributeError:
- raise ImportError("%r has no %r attribute" % (item,attr))
-
- return item
-
-
-
-
-
-def lazyModule(modname, relativePath=None):
-
- """Return module 'modname', but with its contents loaded "on demand"
-
- This function returns 'sys.modules[modname]', if present. Otherwise
- it creates a 'LazyModule' object for the specified module, caches it
- in 'sys.modules', and returns it.
-
- 'LazyModule' is a subclass of the standard Python module type, that
- remains empty until an attempt is made to access one of its
- attributes. At that moment, the module is loaded into memory, and
- any hooks that were defined via 'whenImported()' are invoked.
-
- Note that calling 'lazyModule' with the name of a non-existent or
- unimportable module will delay the 'ImportError' until the moment
- access is attempted. The 'ImportError' will occur every time an
- attribute access is attempted, until the problem is corrected.
-
- This function also takes an optional second parameter, 'relativePath',
- which will be interpreted as a '/'-separated path string relative to
- 'modname'. If a 'relativePath' is supplied, the module found by
- traversing the path will be loaded instead of 'modname'. In the path,
- '.' refers to the current module, and '..' to the current module's
- parent. For example::
-
- fooBaz = lazyModule('foo.bar','../baz')
-
- will return the module 'foo.baz'. The main use of the 'relativePath'
- feature is to allow relative imports in modules that are intended for
- use with module inheritance. Where an absolute import would be carried
- over as-is into the inheriting module, an import relative to '__name__'
- will be relative to the inheriting module, e.g.::
-
- something = lazyModule(__name__,'../path/to/something')
-
- The above code will have different results in each module that inherits
- it.
-
- (Note: 'relativePath' can also be an absolute path (starting with '/');
- this is mainly useful for module '__bases__' lists.)"""
-
- def _loadModule(module):
- oldGA = LazyModule.__getattribute__
- oldSA = LazyModule.__setattr__
-
- modGA = ModuleType.__getattribute__
- modSA = ModuleType.__setattr__
-
- LazyModule.__getattribute__ = modGA
- LazyModule.__setattr__ = modSA
-
- acquire_lock()
- try:
- try:
- # don't reload if already loaded!
- if module.__dict__.keys()==['__name__']:
- # Get Python to do the real import!
- reload(module)
- try:
- for hook in getModuleHooks(module.__name__):
- hook(module)
- finally:
- # Ensure hooks are not called again, even if they fail
- postLoadHooks[module.__name__] = None
- except:
- # Reset our state so that we can retry later
- if '__file__' not in module.__dict__:
- LazyModule.__getattribute__ = oldGA.im_func
- LazyModule.__setattr__ = oldSA.im_func
- raise
-
- try:
- # Convert to a real module (if under 2.2)
- module.__class__ = ModuleType
- except TypeError:
- pass # 2.3 will fail, but no big deal
-
- finally:
- release_lock()
-
-
-
- class LazyModule(ModuleType):
- __slots__ = ()
- def __init__(self, name):
- ModuleType.__setattr__(self,'__name__',name)
- #super(LazyModule,self).__init__(name)
-
- def __getattribute__(self,attr):
- _loadModule(self)
- return ModuleType.__getattribute__(self,attr)
-
- def __setattr__(self,attr,value):
- _loadModule(self)
- return ModuleType.__setattr__(self,attr,value)
-
- if relativePath:
- modname = joinPath(modname, relativePath)
-
- acquire_lock()
- try:
- if modname not in modules:
- getModuleHooks(modname) # force an empty hook list into existence
- modules[modname] = LazyModule(modname)
- if '.' in modname:
- # ensure parent module/package is in sys.modules
- # and parent.modname=module, as soon as the parent is imported
- splitpos = modname.rindex('.')
- whenImported(
- modname[:splitpos],
- lambda m: setattr(m,modname[splitpos+1:],modules[modname])
- )
- return modules[modname]
- finally:
- release_lock()
-
-
-postLoadHooks = {}
-
-
-
-
-
-def getModuleHooks(moduleName):
-
- """Get list of hooks for 'moduleName'; error if module already loaded"""
-
- acquire_lock()
- try:
- hooks = postLoadHooks.setdefault(moduleName,[])
- if hooks is None:
- raise AlreadyRead("Module already imported", moduleName)
- return hooks
- finally:
- release_lock()
-
-
-def _setModuleHook(moduleName, hook):
- acquire_lock()
- try:
- if moduleName in modules and postLoadHooks.get(moduleName) is None:
- # Module is already imported/loaded, just call the hook
- module = modules[moduleName]
- hook(module)
- return module
-
- getModuleHooks(moduleName).append(hook)
- return lazyModule(moduleName)
- finally:
- release_lock()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-def whenImported(moduleName, hook=None):
-
- """Call 'hook(module)' when module named 'moduleName' is first used
-
- 'hook' must accept one argument: the module object named by 'moduleName',
- which must be a fully qualified (i.e. absolute) module name. The hook
- should not raise any exceptions, or it may prevent later hooks from
- running.
-
- If the module has already been imported normally, 'hook(module)' is
- called immediately, and the module object is returned from this function.
- If the module has not been imported, or has only been imported lazily,
- then the hook is called when the module is first used, and a lazy import
- of the module is returned from this function. If the module was imported
- lazily and used before calling this function, the hook is called
- immediately, and the loaded module is returned from this function.
-
- Note that using this function implies a possible lazy import of the
- specified module, and lazy importing means that any 'ImportError' will be
- deferred until the module is used.
- """
- if hook is None:
- def decorate(func):
- whenImported(moduleName, func)
- return func
- return decorate
-
- if '.' in moduleName:
- # If parent is not yet imported, delay hook installation until the
- # parent is imported.
- splitpos = moduleName.rindex('.')
- whenImported(
- moduleName[:splitpos], lambda m: _setModuleHook(moduleName,hook)
- )
- else:
- return _setModuleHook(moduleName,hook)
-
-
-
-
-
-def importObject(spec, globalDict=defaultGlobalDict):
-
- """Convert a possible string specifier to an object
-
- If 'spec' is a string or unicode object, import it using 'importString()',
- otherwise return it as-is.
- """
-
- if isinstance(spec,StringTypes):
- return importString(spec, globalDict)
-
- return spec
-
-
-def importSequence(specs, globalDict=defaultGlobalDict):
-
- """Convert a string or list specifier to a list of objects.
-
- If 'specs' is a string or unicode object, treat it as a
- comma-separated list of import specifications, and return a
- list of the imported objects.
-
- If the result is not a string but is iterable, return a list
- with any string/unicode items replaced with their corresponding
- imports.
- """
-
- if isinstance(specs,StringTypes):
- return [importString(x.strip(),globalDict) for x in specs.split(',')]
- else:
- return [importObject(s,globalDict) for s in specs]
-
-
-
-
-
-
-
-
-
-
diff --git a/pkg_resources.py b/pkg_resources.py
deleted file mode 100644
index 79db00b..0000000
--- a/pkg_resources.py
+++ /dev/null
@@ -1,2625 +0,0 @@
-"""Package resource API
---------------------
-
-A resource is a logical file contained within a package, or a logical
-subdirectory thereof. The package resource API expects resource names
-to have their path parts separated with ``/``, *not* whatever the local
-path separator is. Do not use os.path operations to manipulate resource
-names being passed into the API.
-
-The package resource API is designed to work with normal filesystem packages,
-.egg files, and unpacked .egg files. It can also work in a limited way with
-.zip files and with custom PEP 302 loaders that support the ``get_data()``
-method.
-"""
-
-import sys, os, zipimport, time, re, imp
-
-try:
- frozenset
-except NameError:
- from sets import ImmutableSet as frozenset
-
-# capture these to bypass sandboxing
-from os import utime, rename, unlink, mkdir
-from os import open as os_open
-from os.path import isdir, split
-
-
-def _bypass_ensure_directory(name, mode=0777):
- # Sandbox-bypassing version of ensure_directory()
- dirname, filename = split(name)
- if dirname and filename and not isdir(dirname):
- _bypass_ensure_directory(dirname)
- mkdir(dirname, mode)
-
-
-
-
-
-
-
-_state_vars = {}
-
-def _declare_state(vartype, **kw):
- g = globals()
- for name, val in kw.iteritems():
- g[name] = val
- _state_vars[name] = vartype
-
-def __getstate__():
- state = {}
- g = globals()
- for k, v in _state_vars.iteritems():
- state[k] = g['_sget_'+v](g[k])
- return state
-
-def __setstate__(state):
- g = globals()
- for k, v in state.iteritems():
- g['_sset_'+_state_vars[k]](k, g[k], v)
- return state
-
-def _sget_dict(val):
- return val.copy()
-
-def _sset_dict(key, ob, state):
- ob.clear()
- ob.update(state)
-
-def _sget_object(val):
- return val.__getstate__()
-
-def _sset_object(key, ob, state):
- ob.__setstate__(state)
-
-_sget_none = _sset_none = lambda *args: None
-
-
-
-
-
-
-def get_supported_platform():
- """Return this platform's maximum compatible version.
-
- distutils.util.get_platform() normally reports the minimum version
- of Mac OS X that would be required to *use* extensions produced by
- distutils. But what we want when checking compatibility is to know the
- version of Mac OS X that we are *running*. To allow usage of packages that
- explicitly require a newer version of Mac OS X, we must also know the
- current version of the OS.
-
- If this condition occurs for any other platform with a version in its
- platform strings, this function should be extended accordingly.
- """
- plat = get_build_platform(); m = macosVersionString.match(plat)
- if m is not None and sys.platform == "darwin":
- try:
- plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
- except ValueError:
- pass # not Mac OS X
- return plat
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-__all__ = [
- # Basic resource access and distribution/entry point discovery
- 'require', 'run_script', 'get_provider', 'get_distribution',
- 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
- 'resource_string', 'resource_stream', 'resource_filename',
- 'resource_listdir', 'resource_exists', 'resource_isdir',
-
- # Environmental control
- 'declare_namespace', 'working_set', 'add_activation_listener',
- 'find_distributions', 'set_extraction_path', 'cleanup_resources',
- 'get_default_cache',
-
- # Primary implementation classes
- 'Environment', 'WorkingSet', 'ResourceManager',
- 'Distribution', 'Requirement', 'EntryPoint',
-
- # Exceptions
- 'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
- 'ExtractionError',
-
- # Parsing functions and string utilities
- 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
- 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
- 'safe_extra', 'to_filename',
-
- # filesystem utilities
- 'ensure_directory', 'normalize_path',
-
- # Distribution "precedence" constants
- 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
-
- # "Provider" interfaces, implementations, and registration/lookup APIs
- 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
- 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
- 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
- 'register_finder', 'register_namespace_handler', 'register_loader_type',
- 'fixup_namespace_packages', 'get_importer',
-
- # Deprecated/backward compatibility only
- 'run_main', 'AvailableDistributions',
-]
-class ResolutionError(Exception):
- """Abstract base for dependency resolution errors"""
- def __repr__(self): return self.__class__.__name__+repr(self.args)
-
-class VersionConflict(ResolutionError):
- """An already-installed version conflicts with the requested version"""
-
-class DistributionNotFound(ResolutionError):
- """A requested distribution was not found"""
-
-class UnknownExtra(ResolutionError):
- """Distribution doesn't have an "extra feature" of the given name"""
-_provider_factories = {}
-PY_MAJOR = sys.version[:3]
-EGG_DIST = 3
-BINARY_DIST = 2
-SOURCE_DIST = 1
-CHECKOUT_DIST = 0
-DEVELOP_DIST = -1
-
-def register_loader_type(loader_type, provider_factory):
- """Register `provider_factory` to make providers for `loader_type`
-
- `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
- and `provider_factory` is a function that, passed a *module* object,
- returns an ``IResourceProvider`` for that module.
- """
- _provider_factories[loader_type] = provider_factory
-
-def get_provider(moduleOrReq):
- """Return an IResourceProvider for the named module or requirement"""
- if isinstance(moduleOrReq,Requirement):
- return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
- try:
- module = sys.modules[moduleOrReq]
- except KeyError:
- __import__(moduleOrReq)
- module = sys.modules[moduleOrReq]
- loader = getattr(module, '__loader__', None)
- return _find_adapter(_provider_factories, loader)(module)
-
-def _macosx_vers(_cache=[]):
- if not _cache:
- from platform import mac_ver
- _cache.append(mac_ver()[0].split('.'))
- return _cache[0]
-
-def _macosx_arch(machine):
- return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
-
-def get_build_platform():
- """Return this platform's string for platform-specific distributions
-
- XXX Currently this is the same as ``distutils.util.get_platform()``, but it
- needs some hacks for Linux and Mac OS X.
- """
- from distutils.util import get_platform
- plat = get_platform()
- if sys.platform == "darwin" and not plat.startswith('macosx-'):
- try:
- version = _macosx_vers()
- machine = os.uname()[4].replace(" ", "_")
- return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
- _macosx_arch(machine))
- except ValueError:
- # if someone is running a non-Mac darwin system, this will fall
- # through to the default implementation
- pass
- return plat
-
-macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
-darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
-get_platform = get_build_platform # XXX backward compat
-
-
-
-
-
-
-
-
-
-def compatible_platforms(provided,required):
- """Can code for the `provided` platform run on the `required` platform?
-
- Returns true if either platform is ``None``, or the platforms are equal.
-
- XXX Needs compatibility checks for Linux and other unixy OSes.
- """
- if provided is None or required is None or provided==required:
- return True # easy case
-
- # Mac OS X special cases
- reqMac = macosVersionString.match(required)
- if reqMac:
- provMac = macosVersionString.match(provided)
-
- # is this a Mac package?
- if not provMac:
- # this is backwards compatibility for packages built before
- # setuptools 0.6. All packages built after this point will
- # use the new macosx designation.
- provDarwin = darwinVersionString.match(provided)
- if provDarwin:
- dversion = int(provDarwin.group(1))
- macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
- if dversion == 7 and macosversion >= "10.3" or \
- dversion == 8 and macosversion >= "10.4":
-
- #import warnings
- #warnings.warn("Mac eggs should be rebuilt to "
- # "use the macosx designation instead of darwin.",
- # category=DeprecationWarning)
- return True
- return False # egg isn't macosx or legacy darwin
-
- # are they the same major version and machine type?
- if provMac.group(1) != reqMac.group(1) or \
- provMac.group(3) != reqMac.group(3):
- return False
-
-
-
- # is the required OS major update >= the provided one?
- if int(provMac.group(2)) > int(reqMac.group(2)):
- return False
-
- return True
-
- # XXX Linux and other platforms' special cases should go here
- return False
-
-
-def run_script(dist_spec, script_name):
- """Locate distribution `dist_spec` and run its `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- require(dist_spec)[0].run_script(script_name, ns)
-
-run_main = run_script # backward compatibility
-
-def get_distribution(dist):
- """Return a current distribution object for a Requirement or string"""
- if isinstance(dist,basestring): dist = Requirement.parse(dist)
- if isinstance(dist,Requirement): dist = get_provider(dist)
- if not isinstance(dist,Distribution):
- raise TypeError("Expected string, Requirement, or Distribution", dist)
- return dist
-
-def load_entry_point(dist, group, name):
- """Return `name` entry point of `group` for `dist` or raise ImportError"""
- return get_distribution(dist).load_entry_point(group, name)
-
-def get_entry_map(dist, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- return get_distribution(dist).get_entry_map(group)
-
-def get_entry_info(dist, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return get_distribution(dist).get_entry_info(group, name)
-
-
-class IMetadataProvider:
-
- def has_metadata(name):
- """Does the package's distribution contain the named metadata?"""
-
- def get_metadata(name):
- """The named metadata resource as a string"""
-
- def get_metadata_lines(name):
- """Yield named metadata resource as list of non-blank non-comment lines
-
- Leading and trailing whitespace is stripped from each line, and lines
- with ``#`` as the first non-blank character are omitted."""
-
- def metadata_isdir(name):
- """Is the named metadata a directory? (like ``os.path.isdir()``)"""
-
- def metadata_listdir(name):
- """List of metadata names in the directory (like ``os.listdir()``)"""
-
- def run_script(script_name, namespace):
- """Execute the named script in the supplied namespace dictionary"""
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-class IResourceProvider(IMetadataProvider):
- """An object that provides access to package resources"""
-
- def get_resource_filename(manager, resource_name):
- """Return a true filesystem path for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_stream(manager, resource_name):
- """Return a readable file-like object for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_string(manager, resource_name):
- """Return a string containing the contents of `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def has_resource(resource_name):
- """Does the package contain the named resource?"""
-
- def resource_isdir(resource_name):
- """Is the named resource a directory? (like ``os.path.isdir()``)"""
-
- def resource_listdir(resource_name):
- """List of resource names in the directory (like ``os.listdir()``)"""
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-class WorkingSet(object):
- """A collection of active distributions on sys.path (or a similar list)"""
-
- def __init__(self, entries=None):
- """Create working set from list of path entries (default=sys.path)"""
- self.entries = []
- self.entry_keys = {}
- self.by_key = {}
- self.callbacks = []
-
- if entries is None:
- entries = sys.path
-
- for entry in entries:
- self.add_entry(entry)
-
-
- def add_entry(self, entry):
- """Add a path item to ``.entries``, finding any distributions on it
-
- ``find_distributions(entry, True)`` is used to find distributions
- corresponding to the path entry, and they are added. `entry` is
- always appended to ``.entries``, even if it is already present.
- (This is because ``sys.path`` can contain the same value more than
- once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
- equal ``sys.path``.)
- """
- self.entry_keys.setdefault(entry, [])
- self.entries.append(entry)
- for dist in find_distributions(entry, True):
- self.add(dist, entry, False)
-
-
- def __contains__(self,dist):
- """True if `dist` is the active distribution for its project"""
- return self.by_key.get(dist.key) == dist
-
-
-
-
-
- def find(self, req):
- """Find a distribution matching requirement `req`
-
- If there is an active distribution for the requested project, this
- returns it as long as it meets the version requirement specified by
- `req`. But, if there is an active distribution for the project and it
- does *not* meet the `req` requirement, ``VersionConflict`` is raised.
- If there is no active distribution for the requested project, ``None``
- is returned.
- """
- dist = self.by_key.get(req.key)
- if dist is not None and dist not in req:
- raise VersionConflict(dist,req) # XXX add more info
- else:
- return dist
-
- def iter_entry_points(self, group, name=None):
- """Yield entry point objects from `group` matching `name`
-
- If `name` is None, yields all entry points in `group` from all
- distributions in the working set, otherwise only ones matching
- both `group` and `name` are yielded (in distribution order).
- """
- for dist in self:
- entries = dist.get_entry_map(group)
- if name is None:
- for ep in entries.values():
- yield ep
- elif name in entries:
- yield entries[name]
-
- def run_script(self, requires, script_name):
- """Locate distribution for `requires` and run `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- self.require(requires)[0].run_script(script_name, ns)
-
-
-
- def __iter__(self):
- """Yield distributions for non-duplicate projects in the working set
-
- The yield order is the order in which the items' path entries were
- added to the working set.
- """
- seen = {}
- for item in self.entries:
- for key in self.entry_keys[item]:
- if key not in seen:
- seen[key]=1
- yield self.by_key[key]
-
- def add(self, dist, entry=None, insert=True):
- """Add `dist` to working set, associated with `entry`
-
- If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
- On exit from this routine, `entry` is added to the end of the working
- set's ``.entries`` (if it wasn't already present).
-
- `dist` is only added to the working set if it's for a project that
- doesn't already have a distribution in the set. If it's added, any
- callbacks registered with the ``subscribe()`` method will be called.
- """
- if insert:
- dist.insert_on(self.entries, entry)
-
- if entry is None:
- entry = dist.location
- keys = self.entry_keys.setdefault(entry,[])
- keys2 = self.entry_keys.setdefault(dist.location,[])
- if dist.key in self.by_key:
- return # ignore hidden distros
-
- self.by_key[dist.key] = dist
- if dist.key not in keys:
- keys.append(dist.key)
- if dist.key not in keys2:
- keys2.append(dist.key)
- self._added_new(dist)
-
- def resolve(self, requirements, env=None, installer=None):
- """List all distributions needed to (recursively) meet `requirements`
-
- `requirements` must be a sequence of ``Requirement`` objects. `env`,
- if supplied, should be an ``Environment`` instance. If
- not supplied, it defaults to all distributions available within any
- entry or distribution in the working set. `installer`, if supplied,
- will be invoked with each requirement that cannot be met by an
- already-installed distribution; it should return a ``Distribution`` or
- ``None``.
- """
-
- requirements = list(requirements)[::-1] # set up the stack
- processed = {} # set of processed requirements
- best = {} # key -> dist
- to_activate = []
-
- while requirements:
- req = requirements.pop(0) # process dependencies breadth-first
- if req in processed:
- # Ignore cyclic or redundant dependencies
- continue
- dist = best.get(req.key)
- if dist is None:
- # Find the best distribution and add it to the map
- dist = self.by_key.get(req.key)
- if dist is None:
- if env is None:
- env = Environment(self.entries)
- dist = best[req.key] = env.best_match(req, self, installer)
- if dist is None:
- raise DistributionNotFound(req) # XXX put more info here
- to_activate.append(dist)
- if dist not in req:
- # Oops, the "best" so far conflicts with a dependency
- raise VersionConflict(dist,req) # XXX put more info here
- requirements.extend(dist.requires(req.extras)[::-1])
- processed[req] = True
-
- return to_activate # return list of distros to activate
-
- def find_plugins(self,
- plugin_env, full_env=None, installer=None, fallback=True
- ):
- """Find all activatable distributions in `plugin_env`
-
- Example usage::
-
- distributions, errors = working_set.find_plugins(
- Environment(plugin_dirlist)
- )
- map(working_set.add, distributions) # add plugins+libs to sys.path
- print "Couldn't load", errors # display errors
-
- The `plugin_env` should be an ``Environment`` instance that contains
- only distributions that are in the project's "plugin directory" or
- directories. The `full_env`, if supplied, should be an ``Environment``
- contains all currently-available distributions. If `full_env` is not
- supplied, one is created automatically from the ``WorkingSet`` this
- method is called on, which will typically mean that every directory on
- ``sys.path`` will be scanned for distributions.
-
- `installer` is a standard installer callback as used by the
- ``resolve()`` method. The `fallback` flag indicates whether we should
- attempt to resolve older versions of a plugin if the newest version
- cannot be resolved.
-
- This method returns a 2-tuple: (`distributions`, `error_info`), where
- `distributions` is a list of the distributions found in `plugin_env`
- that were loadable, along with any other distributions that are needed
- to resolve their dependencies. `error_info` is a dictionary mapping
- unloadable plugin distributions to an exception instance describing the
- error that occurred. Usually this will be a ``DistributionNotFound`` or
- ``VersionConflict`` instance.
- """
-
- plugin_projects = list(plugin_env)
- plugin_projects.sort() # scan project names in alphabetic order
-
- error_info = {}
- distributions = {}
-
- if full_env is None:
- env = Environment(self.entries)
- env += plugin_env
- else:
- env = full_env + plugin_env
-
- shadow_set = self.__class__([])
- map(shadow_set.add, self) # put all our entries in shadow_set
-
- for project_name in plugin_projects:
-
- for dist in plugin_env[project_name]:
-
- req = [dist.as_requirement()]
-
- try:
- resolvees = shadow_set.resolve(req, env, installer)
-
- except ResolutionError,v:
- error_info[dist] = v # save error info
- if fallback:
- continue # try the next older version of project
- else:
- break # give up on this project, keep going
-
- else:
- map(shadow_set.add, resolvees)
- distributions.update(dict.fromkeys(resolvees))
-
- # success, no need to try any more versions of this project
- break
-
- distributions = list(distributions)
- distributions.sort()
-
- return distributions, error_info
-
-
-
-
-
- def require(self, *requirements):
- """Ensure that distributions matching `requirements` are activated
-
- `requirements` must be a string or a (possibly-nested) sequence
- thereof, specifying the distributions and versions required. The
- return value is a sequence of the distributions that needed to be
- activated to fulfill the requirements; all relevant distributions are
- included, even if they were already activated in this working set.
- """
- needed = self.resolve(parse_requirements(requirements))
-
- for dist in needed:
- self.add(dist)
-
- return needed
-
- def subscribe(self, callback):
- """Invoke `callback` for all distributions (including existing ones)"""
- if callback in self.callbacks:
- return
- self.callbacks.append(callback)
- for dist in self:
- callback(dist)
-
- def _added_new(self, dist):
- for callback in self.callbacks:
- callback(dist)
-
- def __getstate__(self):
- return (
- self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
- self.callbacks[:]
- )
-
- def __setstate__(self, (entries, keys, by_key, callbacks)):
- self.entries = entries[:]
- self.entry_keys = keys.copy()
- self.by_key = by_key.copy()
- self.callbacks = callbacks[:]
-
-
-class Environment(object):
- """Searchable snapshot of distributions on a search path"""
-
- def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
- """Snapshot distributions available on a search path
-
- Any distributions found on `search_path` are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used.
-
- `platform` is an optional string specifying the name of the platform
- that platform-specific distributions must be compatible with. If
- unspecified, it defaults to the current platform. `python` is an
- optional string naming the desired version of Python (e.g. ``'2.4'``);
- it defaults to the current version.
-
- You may explicitly set `platform` (and/or `python`) to ``None`` if you
- wish to map *all* distributions, not just those compatible with the
- running platform or Python version.
- """
- self._distmap = {}
- self._cache = {}
- self.platform = platform
- self.python = python
- self.scan(search_path)
-
- def can_add(self, dist):
- """Is distribution `dist` acceptable for this environment?
-
- The distribution must match the platform and python version
- requirements specified when this environment was created, or False
- is returned.
- """
- return (self.python is None or dist.py_version is None
- or dist.py_version==self.python) \
- and compatible_platforms(dist.platform,self.platform)
-
- def remove(self, dist):
- """Remove `dist` from the environment"""
- self._distmap[dist.key].remove(dist)
-
- def scan(self, search_path=None):
- """Scan `search_path` for distributions usable in this environment
-
- Any distributions found are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used. Only distributions conforming to
- the platform/python version defined at initialization are added.
- """
- if search_path is None:
- search_path = sys.path
-
- for item in search_path:
- for dist in find_distributions(item):
- self.add(dist)
-
- def __getitem__(self,project_name):
- """Return a newest-to-oldest list of distributions for `project_name`
- """
- try:
- return self._cache[project_name]
- except KeyError:
- project_name = project_name.lower()
- if project_name not in self._distmap:
- return []
-
- if project_name not in self._cache:
- dists = self._cache[project_name] = self._distmap[project_name]
- _sort_dists(dists)
-
- return self._cache[project_name]
-
- def add(self,dist):
- """Add `dist` if we ``can_add()`` it and it isn't already added"""
- if self.can_add(dist) and dist.has_version():
- dists = self._distmap.setdefault(dist.key,[])
- if dist not in dists:
- dists.append(dist)
- if dist.key in self._cache:
- _sort_dists(self._cache[dist.key])
-
-
- def best_match(self, req, working_set, installer=None):
- """Find distribution best matching `req` and usable on `working_set`
-
- This calls the ``find(req)`` method of the `working_set` to see if a
- suitable distribution is already active. (This may raise
- ``VersionConflict`` if an unsuitable version of the project is already
- active in the specified `working_set`.) If a suitable distribution
- isn't active, this method returns the newest distribution in the
- environment that meets the ``Requirement`` in `req`. If no suitable
- distribution is found, and `installer` is supplied, then the result of
- calling the environment's ``obtain(req, installer)`` method will be
- returned.
- """
- dist = working_set.find(req)
- if dist is not None:
- return dist
- for dist in self[req.key]:
- if dist in req:
- return dist
- return self.obtain(req, installer) # try and download/install
-
- def obtain(self, requirement, installer=None):
- """Obtain a distribution matching `requirement` (e.g. via download)
-
- Obtain a distro that matches requirement (e.g. via download). In the
- base ``Environment`` class, this routine just returns
- ``installer(requirement)``, unless `installer` is None, in which case
- None is returned instead. This method is a hook that allows subclasses
- to attempt other ways of obtaining a distribution before falling back
- to the `installer` argument."""
- if installer is not None:
- return installer(requirement)
-
- def __iter__(self):
- """Yield the unique project names of the available distributions"""
- for key in self._distmap.keys():
- if self[key]: yield key
-
-
-
-
- def __iadd__(self, other):
- """In-place addition of a distribution or environment"""
- if isinstance(other,Distribution):
- self.add(other)
- elif isinstance(other,Environment):
- for project in other:
- for dist in other[project]:
- self.add(dist)
- else:
- raise TypeError("Can't add %r to environment" % (other,))
- return self
-
- def __add__(self, other):
- """Add an environment or distribution to an environment"""
- new = self.__class__([], platform=None, python=None)
- for env in self, other:
- new += env
- return new
-
-
-AvailableDistributions = Environment # XXX backward compatibility
-
-
-class ExtractionError(RuntimeError):
- """An error occurred extracting a resource
-
- The following attributes are available from instances of this exception:
-
- manager
- The resource manager that raised this exception
-
- cache_path
- The base directory for resource extraction
-
- original_error
- The exception instance that caused extraction to fail
- """
-
-
-
-
-class ResourceManager:
- """Manage resource extraction and packages"""
- extraction_path = None
-
- def __init__(self):
- self.cached_files = {}
-
- def resource_exists(self, package_or_requirement, resource_name):
- """Does the named resource exist?"""
- return get_provider(package_or_requirement).has_resource(resource_name)
-
- def resource_isdir(self, package_or_requirement, resource_name):
- """Is the named resource an existing directory?"""
- return get_provider(package_or_requirement).resource_isdir(
- resource_name
- )
-
- def resource_filename(self, package_or_requirement, resource_name):
- """Return a true filesystem path for specified resource"""
- return get_provider(package_or_requirement).get_resource_filename(
- self, resource_name
- )
-
- def resource_stream(self, package_or_requirement, resource_name):
- """Return a readable file-like object for specified resource"""
- return get_provider(package_or_requirement).get_resource_stream(
- self, resource_name
- )
-
- def resource_string(self, package_or_requirement, resource_name):
- """Return specified resource as a string"""
- return get_provider(package_or_requirement).get_resource_string(
- self, resource_name
- )
-
- def resource_listdir(self, package_or_requirement, resource_name):
- """List the contents of the named resource directory"""
- return get_provider(package_or_requirement).resource_listdir(
- resource_name
- )
-
- def extraction_error(self):
- """Give an error message for problems extracting file(s)"""
-
- old_exc = sys.exc_info()[1]
- cache_path = self.extraction_path or get_default_cache()
-
- err = ExtractionError("""Can't extract file(s) to egg cache
-
-The following error occurred while trying to extract file(s) to the Python egg
-cache:
-
- %s
-
-The Python egg cache directory is currently set to:
-
- %s
-
-Perhaps your account does not have write access to this directory? You can
-change the cache directory by setting the PYTHON_EGG_CACHE environment
-variable to point to an accessible directory.
-""" % (old_exc, cache_path)
- )
- err.manager = self
- err.cache_path = cache_path
- err.original_error = old_exc
- raise err
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- def get_cache_path(self, archive_name, names=()):
- """Return absolute location in cache for `archive_name` and `names`
-
- The parent directory of the resulting path will be created if it does
- not already exist. `archive_name` should be the base filename of the
- enclosing egg (which may not be the name of the enclosing zipfile!),
- including its ".egg" extension. `names`, if provided, should be a
- sequence of path name parts "under" the egg's extraction location.
-
- This method should only be called by resource providers that need to
- obtain an extraction location, and only for names they intend to
- extract, as it tracks the generated names for possible cleanup later.
- """
- extract_path = self.extraction_path or get_default_cache()
- target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
- try:
- _bypass_ensure_directory(target_path)
- except:
- self.extraction_error()
-
- self.cached_files[target_path] = 1
- return target_path
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- def postprocess(self, tempname, filename):
- """Perform any platform-specific postprocessing of `tempname`
-
- This is where Mac header rewrites should be done; other platforms don't
- have anything special they should do.
-
- Resource providers should call this method ONLY after successfully
- extracting a compressed resource. They must NOT call it on resources
- that are already in the filesystem.
-
- `tempname` is the current (temporary) name of the file, and `filename`
- is the name it will be renamed to by the caller after this routine
- returns.
- """
-
- if os.name == 'posix':
- # Make the resource executable
- mode = ((os.stat(tempname).st_mode) | 0555) & 07777
- os.chmod(tempname, mode)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- def set_extraction_path(self, path):
- """Set the base path where resources will be extracted to, if needed.
-
- If you do not call this routine before any extractions take place, the
- path defaults to the return value of ``get_default_cache()``. (Which
- is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
- platform-specific fallbacks. See that routine's documentation for more
- details.)
-
- Resources are extracted to subdirectories of this path based upon
- information given by the ``IResourceProvider``. You may set this to a
- temporary directory, but then you must call ``cleanup_resources()`` to
- delete the extracted files when done. There is no guarantee that
- ``cleanup_resources()`` will be able to remove all extracted files.
-
- (Note: you may not change the extraction path for a given resource
- manager once resources have been extracted, unless you first call
- ``cleanup_resources()``.)
- """
- if self.cached_files:
- raise ValueError(
- "Can't change extraction path, files already extracted"
- )
-
- self.extraction_path = path
-
- def cleanup_resources(self, force=False):
- """
- Delete all extracted resource files and directories, returning a list
- of the file and directory names that could not be successfully removed.
- This function does not have any concurrency protection, so it should
- generally only be called when the extraction path is a temporary
- directory exclusive to a single process. This method is not
- automatically called; you must call it explicitly or register it as an
- ``atexit`` function if you wish to ensure cleanup of a temporary
- directory used for extractions.
- """
- # XXX
-
-
-
-def get_default_cache():
- """Determine the default cache location
-
- This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
- Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
- "Application Data" directory. On all other systems, it's "~/.python-eggs".
- """
- try:
- return os.environ['PYTHON_EGG_CACHE']
- except KeyError:
- pass
-
- if os.name!='nt':
- return os.path.expanduser('~/.python-eggs')
-
- app_data = 'Application Data' # XXX this may be locale-specific!
- app_homes = [
- (('APPDATA',), None), # best option, should be locale-safe
- (('USERPROFILE',), app_data),
- (('HOMEDRIVE','HOMEPATH'), app_data),
- (('HOMEPATH',), app_data),
- (('HOME',), None),
- (('WINDIR',), app_data), # 95/98/ME
- ]
-
- for keys, subdir in app_homes:
- dirname = ''
- for key in keys:
- if key in os.environ:
- dirname = os.path.join(dirname, os.environ[key])
- else:
- break
- else:
- if subdir:
- dirname = os.path.join(dirname,subdir)
- return os.path.join(dirname, 'Python-Eggs')
- else:
- raise RuntimeError(
- "Please set the PYTHON_EGG_CACHE enviroment variable"
- )
-
-def safe_name(name):
- """Convert an arbitrary string to a standard distribution name
-
- Any runs of non-alphanumeric/. characters are replaced with a single '-'.
- """
- return re.sub('[^A-Za-z0-9.]+', '-', name)
-
-
-def safe_version(version):
- """Convert an arbitrary string to a standard version string
-
- Spaces become dots, and all other non-alphanumeric characters become
- dashes, with runs of multiple dashes condensed to a single dash.
- """
- version = version.replace(' ','.')
- return re.sub('[^A-Za-z0-9.]+', '-', version)
-
-
-def safe_extra(extra):
- """Convert an arbitrary string to a standard 'extra' name
-
- Any runs of non-alphanumeric characters are replaced with a single '_',
- and the result is always lowercased.
- """
- return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
-
-
-def to_filename(name):
- """Convert a project or version name to its filename-escaped form
-
- Any '-' characters are currently replaced with '_'.
- """
- return name.replace('-','_')
-
-
-
-
-
-
-
-
-class NullProvider:
- """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
-
- egg_name = None
- egg_info = None
- loader = None
-
- def __init__(self, module):
- self.loader = getattr(module, '__loader__', None)
- self.module_path = os.path.dirname(getattr(module, '__file__', ''))
-
- def get_resource_filename(self, manager, resource_name):
- return self._fn(self.module_path, resource_name)
-
- def get_resource_stream(self, manager, resource_name):
- return StringIO(self.get_resource_string(manager, resource_name))
-
- def get_resource_string(self, manager, resource_name):
- return self._get(self._fn(self.module_path, resource_name))
-
- def has_resource(self, resource_name):
- return self._has(self._fn(self.module_path, resource_name))
-
- def has_metadata(self, name):
- return self.egg_info and self._has(self._fn(self.egg_info,name))
-
- def get_metadata(self, name):
- if not self.egg_info:
- return ""
- return self._get(self._fn(self.egg_info,name))
-
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
- def resource_isdir(self,resource_name):
- return self._isdir(self._fn(self.module_path, resource_name))
-
- def metadata_isdir(self,name):
- return self.egg_info and self._isdir(self._fn(self.egg_info,name))
-
-
- def resource_listdir(self,resource_name):
- return self._listdir(self._fn(self.module_path,resource_name))
-
- def metadata_listdir(self,name):
- if self.egg_info:
- return self._listdir(self._fn(self.egg_info,name))
- return []
-
- def run_script(self,script_name,namespace):
- script = 'scripts/'+script_name
- if not self.has_metadata(script):
- raise ResolutionError("No script named %r" % script_name)
- script_text = self.get_metadata(script).replace('\r\n','\n')
- script_text = script_text.replace('\r','\n')
- script_filename = self._fn(self.egg_info,script)
- namespace['__file__'] = script_filename
- if os.path.exists(script_filename):
- execfile(script_filename, namespace, namespace)
- else:
- from linecache import cache
- cache[script_filename] = (
- len(script_text), 0, script_text.split('\n'), script_filename
- )
- script_code = compile(script_text,script_filename,'exec')
- exec script_code in namespace, namespace
-
- def _has(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _isdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _listdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _fn(self, base, resource_name):
- if resource_name:
- return os.path.join(base, *resource_name.split('/'))
- return base
-
- def _get(self, path):
- if hasattr(self.loader, 'get_data'):
- return self.loader.get_data(path)
- raise NotImplementedError(
- "Can't perform this operation for loaders without 'get_data()'"
- )
-
-register_loader_type(object, NullProvider)
-
-
-class EggProvider(NullProvider):
- """Provider based on a virtual filesystem"""
-
- def __init__(self,module):
- NullProvider.__init__(self,module)
- self._setup_prefix()
-
- def _setup_prefix(self):
- # we assume here that our metadata may be nested inside a "basket"
- # of multiple eggs; that's why we use module_path instead of .archive
- path = self.module_path
- old = None
- while path!=old:
- if path.lower().endswith('.egg'):
- self.egg_name = os.path.basename(path)
- self.egg_info = os.path.join(path, 'EGG-INFO')
- self.egg_root = path
- break
- old = path
- path, base = os.path.split(path)
-
-
-
-
-
-
-class DefaultProvider(EggProvider):
- """Provides access to package resources in the filesystem"""
-
- def _has(self, path):
- return os.path.exists(path)
-
- def _isdir(self,path):
- return os.path.isdir(path)
-
- def _listdir(self,path):
- return os.listdir(path)
-
- def get_resource_stream(self, manager, resource_name):
- return open(self._fn(self.module_path, resource_name), 'rb')
-
- def _get(self, path):
- stream = open(path, 'rb')
- try:
- return stream.read()
- finally:
- stream.close()
-
-register_loader_type(type(None), DefaultProvider)
-
-
-class EmptyProvider(NullProvider):
- """Provider that returns nothing for all requests"""
-
- _isdir = _has = lambda self,path: False
- _get = lambda self,path: ''
- _listdir = lambda self,path: []
- module_path = None
-
- def __init__(self):
- pass
-
-empty_provider = EmptyProvider()
-
-
-
-
-class ZipProvider(EggProvider):
- """Resource support for zips and eggs"""
-
- eagers = None
-
- def __init__(self, module):
- EggProvider.__init__(self,module)
- self.zipinfo = zipimport._zip_directory_cache[self.loader.archive]
- self.zip_pre = self.loader.archive+os.sep
-
- def _zipinfo_name(self, fspath):
- # Convert a virtual filename (full path to file) into a zipfile subpath
- # usable with the zipimport directory cache for our target archive
- if fspath.startswith(self.zip_pre):
- return fspath[len(self.zip_pre):]
- raise AssertionError(
- "%s is not a subpath of %s" % (fspath,self.zip_pre)
- )
-
- def _parts(self,zip_path):
- # Convert a zipfile subpath into an egg-relative path part list
- fspath = self.zip_pre+zip_path # pseudo-fs path
- if fspath.startswith(self.egg_root+os.sep):
- return fspath[len(self.egg_root)+1:].split(os.sep)
- raise AssertionError(
- "%s is not a subpath of %s" % (fspath,self.egg_root)
- )
-
- def get_resource_filename(self, manager, resource_name):
- if not self.egg_name:
- raise NotImplementedError(
- "resource_filename() only supported for .egg, not .zip"
- )
- # no need to lock for extraction, since we use temp names
- zip_path = self._resource_to_zip(resource_name)
- eagers = self._get_eager_resources()
- if '/'.join(self._parts(zip_path)) in eagers:
- for name in eagers:
- self._extract_resource(manager, self._eager_to_zip(name))
- return self._extract_resource(manager, zip_path)
-
- def _extract_resource(self, manager, zip_path):
-
- if zip_path in self._index():
- for name in self._index()[zip_path]:
- last = self._extract_resource(
- manager, os.path.join(zip_path, name)
- )
- return os.path.dirname(last) # return the extracted directory name
-
- zip_stat = self.zipinfo[zip_path]
- t,d,size = zip_stat[5], zip_stat[6], zip_stat[3]
- date_time = (
- (d>>9)+1980, (d>>5)&0xF, d&0x1F, # ymd
- (t&0xFFFF)>>11, (t>>5)&0x3F, (t&0x1F) * 2, 0, 0, -1 # hms, etc.
- )
- timestamp = time.mktime(date_time)
-
- try:
- real_path = manager.get_cache_path(
- self.egg_name, self._parts(zip_path)
- )
-
- if os.path.isfile(real_path):
- stat = os.stat(real_path)
- if stat.st_size==size and stat.st_mtime==timestamp:
- # size and stamp match, don't bother extracting
- return real_path
-
- outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
- os.write(outf, self.loader.get_data(zip_path))
- os.close(outf)
- utime(tmpnam, (timestamp,timestamp))
- manager.postprocess(tmpnam, real_path)
-
- try:
- rename(tmpnam, real_path)
-
- except os.error:
- if os.path.isfile(real_path):
- stat = os.stat(real_path)
-
- if stat.st_size==size and stat.st_mtime==timestamp:
- # size and stamp match, somebody did it just ahead of
- # us, so we're done
- return real_path
- elif os.name=='nt': # Windows, del old file and retry
- unlink(real_path)
- rename(tmpnam, real_path)
- return real_path
- raise
-
- except os.error:
- manager.extraction_error() # report a user-friendly error
-
- return real_path
-
- def _get_eager_resources(self):
- if self.eagers is None:
- eagers = []
- for name in ('native_libs.txt', 'eager_resources.txt'):
- if self.has_metadata(name):
- eagers.extend(self.get_metadata_lines(name))
- self.eagers = eagers
- return self.eagers
-
- def _index(self):
- try:
- return self._dirindex
- except AttributeError:
- ind = {}
- for path in self.zipinfo:
- parts = path.split(os.sep)
- while parts:
- parent = os.sep.join(parts[:-1])
- if parent in ind:
- ind[parent].append(parts[-1])
- break
- else:
- ind[parent] = [parts.pop()]
- self._dirindex = ind
- return ind
-
- def _has(self, fspath):
- zip_path = self._zipinfo_name(fspath)
- return zip_path in self.zipinfo or zip_path in self._index()
-
- def _isdir(self,fspath):
- return self._zipinfo_name(fspath) in self._index()
-
- def _listdir(self,fspath):
- return list(self._index().get(self._zipinfo_name(fspath), ()))
-
- def _eager_to_zip(self,resource_name):
- return self._zipinfo_name(self._fn(self.egg_root,resource_name))
-
- def _resource_to_zip(self,resource_name):
- return self._zipinfo_name(self._fn(self.module_path,resource_name))
-
-register_loader_type(zipimport.zipimporter, ZipProvider)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-class FileMetadata(EmptyProvider):
- """Metadata handler for standalone PKG-INFO files
-
- Usage::
-
- metadata = FileMetadata("/path/to/PKG-INFO")
-
- This provider rejects all data and metadata requests except for PKG-INFO,
- which is treated as existing, and will be the contents of the file at
- the provided location.
- """
-
- def __init__(self,path):
- self.path = path
-
- def has_metadata(self,name):
- return name=='PKG-INFO'
-
- def get_metadata(self,name):
- if name=='PKG-INFO':
- return open(self.path,'rU').read()
- raise KeyError("No metadata except PKG-INFO is available")
-
- def get_metadata_lines(self,name):
- return yield_lines(self.get_metadata(name))
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-class PathMetadata(DefaultProvider):
- """Metadata provider for egg directories
-
- Usage::
-
- # Development eggs:
-
- egg_info = "/path/to/PackageName.egg-info"
- base_dir = os.path.dirname(egg_info)
- metadata = PathMetadata(base_dir, egg_info)
- dist_name = os.path.splitext(os.path.basename(egg_info))[0]
- dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
-
- # Unpacked egg directories:
-
- egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
- metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
- dist = Distribution.from_filename(egg_path, metadata=metadata)
- """
-
- def __init__(self, path, egg_info):
- self.module_path = path
- self.egg_info = egg_info
-
-
-class EggMetadata(ZipProvider):
- """Metadata provider for .egg files"""
-
- def __init__(self, importer):
- """Create a metadata provider from a zipimporter"""
-
- self.zipinfo = zipimport._zip_directory_cache[importer.archive]
- self.zip_pre = importer.archive+os.sep
- self.loader = importer
- if importer.prefix:
- self.module_path = os.path.join(importer.archive, importer.prefix)
- else:
- self.module_path = importer.archive
- self._setup_prefix()
-
-
-class ImpWrapper:
- """PEP 302 Importer that wraps Python's "normal" import algorithm"""
-
- def __init__(self, path=None):
- self.path = path
-
- def find_module(self, fullname, path=None):
- subname = fullname.split(".")[-1]
- if subname != fullname and self.path is None:
- return None
- if self.path is None:
- path = None
- else:
- path = [self.path]
- try:
- file, filename, etc = imp.find_module(subname, path)
- except ImportError:
- return None
- return ImpLoader(file, filename, etc)
-
-
-class ImpLoader:
- """PEP 302 Loader that wraps Python's "normal" import algorithm"""
-
- def __init__(self, file, filename, etc):
- self.file = file
- self.filename = filename
- self.etc = etc
-
- def load_module(self, fullname):
- try:
- mod = imp.load_module(fullname, self.file, self.filename, self.etc)
- finally:
- if self.file: self.file.close()
- # Note: we don't set __loader__ because we want the module to look
- # normal; i.e. this is just a wrapper for standard import machinery
- return mod
-
-
-
-
-def get_importer(path_item):
- """Retrieve a PEP 302 "importer" for the given path item
-
- If there is no importer, this returns a wrapper around the builtin import
- machinery. The returned importer is only cached if it was created by a
- path hook.
- """
- try:
- importer = sys.path_importer_cache[path_item]
- except KeyError:
- for hook in sys.path_hooks:
- try:
- importer = hook(path_item)
- except ImportError:
- pass
- else:
- break
- else:
- importer = None
-
- sys.path_importer_cache.setdefault(path_item,importer)
- if importer is None:
- try:
- importer = ImpWrapper(path_item)
- except ImportError:
- pass
- return importer
-
-try:
- from pkgutil import get_importer, ImpImporter
-except ImportError:
- pass # Python 2.3 or 2.4, use our own implementation
-else:
- ImpWrapper = ImpImporter # Python 2.5, use pkgutil's implementation
- del ImpLoader, ImpImporter
-
-
-
-
-
-
-_declare_state('dict', _distribution_finders = {})
-
-def register_finder(importer_type, distribution_finder):
- """Register `distribution_finder` to find distributions in sys.path items
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `distribution_finder` is a callable that, passed a path
- item and the importer instance, yields ``Distribution`` instances found on
- that path item. See ``pkg_resources.find_on_path`` for an example."""
- _distribution_finders[importer_type] = distribution_finder
-
-
-def find_distributions(path_item, only=False):
- """Yield distributions accessible via `path_item`"""
- importer = get_importer(path_item)
- finder = _find_adapter(_distribution_finders, importer)
- return finder(importer, path_item, only)
-
-def find_in_zip(importer, path_item, only=False):
- metadata = EggMetadata(importer)
- if metadata.has_metadata('PKG-INFO'):
- yield Distribution.from_filename(path_item, metadata=metadata)
- if only:
- return # don't yield nested distros
- for subitem in metadata.resource_listdir('/'):
- if subitem.endswith('.egg'):
- subpath = os.path.join(path_item, subitem)
- for dist in find_in_zip(zipimport.zipimporter(subpath), subpath):
- yield dist
-
-register_finder(zipimport.zipimporter, find_in_zip)
-
-def StringIO(*args, **kw):
- """Thunk to load the real StringIO on demand"""
- global StringIO
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- return StringIO(*args,**kw)
-
-def find_nothing(importer, path_item, only=False):
- return ()
-register_finder(object,find_nothing)
-
-def find_on_path(importer, path_item, only=False):
- """Yield distributions accessible on a sys.path directory"""
- path_item = _normalize_cached(path_item)
-
- if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
- if path_item.lower().endswith('.egg'):
- # unpacked egg
- yield Distribution.from_filename(
- path_item, metadata=PathMetadata(
- path_item, os.path.join(path_item,'EGG-INFO')
- )
- )
- else:
- # scan for .egg and .egg-info in directory
- for entry in os.listdir(path_item):
- lower = entry.lower()
- if lower.endswith('.egg-info'):
- fullpath = os.path.join(path_item, entry)
- if os.path.isdir(fullpath):
- # egg-info directory, allow getting metadata
- metadata = PathMetadata(path_item, fullpath)
- else:
- metadata = FileMetadata(fullpath)
- yield Distribution.from_location(
- path_item,entry,metadata,precedence=DEVELOP_DIST
- )
- elif not only and lower.endswith('.egg'):
- for dist in find_distributions(os.path.join(path_item, entry)):
- yield dist
- elif not only and lower.endswith('.egg-link'):
- for line in file(os.path.join(path_item, entry)):
- if not line.strip(): continue
- for item in find_distributions(os.path.join(path_item,line.rstrip())):
- yield item
- break
-register_finder(ImpWrapper,find_on_path)
-
-_declare_state('dict', _namespace_handlers = {})
-_declare_state('dict', _namespace_packages = {})
-
-def register_namespace_handler(importer_type, namespace_handler):
- """Register `namespace_handler` to declare namespace packages
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `namespace_handler` is a callable like this::
-
- def namespace_handler(importer,path_entry,moduleName,module):
- # return a path_entry to use for child packages
-
- Namespace handlers are only called if the importer object has already
- agreed that it can handle the relevant path item, and they should only
- return a subpath if the module __path__ does not already contain an
- equivalent subpath. For an example namespace handler, see
- ``pkg_resources.file_ns_handler``.
- """
- _namespace_handlers[importer_type] = namespace_handler
-
-def _handle_ns(packageName, path_item):
- """Ensure that named package includes a subpath of path_item (if needed)"""
- importer = get_importer(path_item)
- if importer is None:
- return None
- loader = importer.find_module(packageName)
- if loader is None:
- return None
- module = sys.modules.get(packageName)
- if module is None:
- module = sys.modules[packageName] = imp.new_module(packageName)
- module.__path__ = []; _set_parent_ns(packageName)
- elif not hasattr(module,'__path__'):
- raise TypeError("Not a package:", packageName)
- handler = _find_adapter(_namespace_handlers, importer)
- subpath = handler(importer,path_item,packageName,module)
- if subpath is not None:
- path = module.__path__; path.append(subpath)
- loader.load_module(packageName); module.__path__ = path
- return subpath
-
-def declare_namespace(packageName):
- """Declare that package 'packageName' is a namespace package"""
-
- imp.acquire_lock()
- try:
- if packageName in _namespace_packages:
- return
-
- path, parent = sys.path, None
- if '.' in packageName:
- parent = '.'.join(packageName.split('.')[:-1])
- declare_namespace(parent)
- __import__(parent)
- try:
- path = sys.modules[parent].__path__
- except AttributeError:
- raise TypeError("Not a package:", parent)
-
- # Track what packages are namespaces, so when new path items are added,
- # they can be updated
- _namespace_packages.setdefault(parent,[]).append(packageName)
- _namespace_packages.setdefault(packageName,[])
-
- for path_item in path:
- # Ensure all the parent's path items are reflected in the child,
- # if they apply
- _handle_ns(packageName, path_item)
-
- finally:
- imp.release_lock()
-
-def fixup_namespace_packages(path_item, parent=None):
- """Ensure that previously-declared namespace packages include path_item"""
- imp.acquire_lock()
- try:
- for package in _namespace_packages.get(parent,()):
- subpath = _handle_ns(package, path_item)
- if subpath: fixup_namespace_packages(subpath,package)
- finally:
- imp.release_lock()
-
-def file_ns_handler(importer, path_item, packageName, module):
- """Compute an ns-package subpath for a filesystem or zipfile importer"""
-
- subpath = os.path.join(path_item, packageName.split('.')[-1])
- normalized = _normalize_cached(subpath)
- for item in module.__path__:
- if _normalize_cached(item)==normalized:
- break
- else:
- # Only return the path if it's not already there
- return subpath
-
-register_namespace_handler(ImpWrapper,file_ns_handler)
-register_namespace_handler(zipimport.zipimporter,file_ns_handler)
-
-
-def null_ns_handler(importer, path_item, packageName, module):
- return None
-
-register_namespace_handler(object,null_ns_handler)
-
-
-def normalize_path(filename):
- """Normalize a file/dir name for comparison purposes"""
- return os.path.normcase(os.path.realpath(filename))
-
-def _normalize_cached(filename,_cache={}):
- try:
- return _cache[filename]
- except KeyError:
- _cache[filename] = result = normalize_path(filename)
- return result
-
-def _set_parent_ns(packageName):
- parts = packageName.split('.')
- name = parts.pop()
- if parts:
- parent = '.'.join(parts)
- setattr(sys.modules[parent], name, sys.modules[packageName])
-
-
-def yield_lines(strs):
- """Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
- if isinstance(strs,basestring):
- for s in strs.splitlines():
- s = s.strip()
- if s and not s.startswith('#'): # skip blank lines/comments
- yield s
- else:
- for ss in strs:
- for s in yield_lines(ss):
- yield s
-
-LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
-CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
-DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
-VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
-COMMA = re.compile(r"\s*,").match # comma between items
-OBRACKET = re.compile(r"\s*\[").match
-CBRACKET = re.compile(r"\s*\]").match
-MODULE = re.compile(r"\w+(\.\w+)*$").match
-EGG_NAME = re.compile(
- r"(?P<name>[^-]+)"
- r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
- re.VERBOSE | re.IGNORECASE
-).match
-
-component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
-replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
-
-def _parse_version_parts(s):
- for part in component_re.split(s):
- part = replace(part,part)
- if not part or part=='.':
- continue
- if part[:1] in '0123456789':
- yield part.zfill(8) # pad for numeric comparison
- else:
- yield '*'+part
-
- yield '*final' # ensure that alpha/beta/candidate are before final
-
-def parse_version(s):
- """Convert a version string to a chronologically-sortable key
-
- This is a rough cross between distutils' StrictVersion and LooseVersion;
- if you give it versions that would work with StrictVersion, then it behaves
- the same; otherwise it acts like a slightly-smarter LooseVersion. It is
- *possible* to create pathological version coding schemes that will fool
- this parser, but they should be very rare in practice.
-
- The returned value will be a tuple of strings. Numeric portions of the
- version are padded to 8 digits so they will compare numerically, but
- without relying on how numbers compare relative to strings. Dots are
- dropped, but dashes are retained. Trailing zeros between alpha segments
- or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
- "2.4". Alphanumeric parts are lower-cased.
-
- The algorithm assumes that strings like "-" and any alpha string that
- alphabetically follows "final" represents a "patch level". So, "2.4-1"
- is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
- considered newer than "2.4-1", which in turn is newer than "2.4".
-
- Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
- come before "final" alphabetically) are assumed to be pre-release versions,
- so that the version "2.4" is considered newer than "2.4a1".
-
- Finally, to handle miscellaneous cases, the strings "pre", "preview", and
- "rc" are treated as if they were "c", i.e. as though they were release
- candidates, and therefore are not as new as a version string that does not
- contain them, and "dev" is replaced with an '@' so that it sorts lower than
- than any other pre-release tag.
- """
- parts = []
- for part in _parse_version_parts(s.lower()):
- if part.startswith('*'):
- if part<'*final': # remove '-' before a prerelease tag
- while parts and parts[-1]=='*final-': parts.pop()
- # remove trailing zeros from each series of numeric parts
- while parts and parts[-1]=='00000000':
- parts.pop()
- parts.append(part)
- return tuple(parts)
-
-class EntryPoint(object):
- """Object representing an advertised importable object"""
-
- def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
- if not MODULE(module_name):
- raise ValueError("Invalid module name", module_name)
- self.name = name
- self.module_name = module_name
- self.attrs = tuple(attrs)
- self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
- self.dist = dist
-
- def __str__(self):
- s = "%s = %s" % (self.name, self.module_name)
- if self.attrs:
- s += ':' + '.'.join(self.attrs)
- if self.extras:
- s += ' [%s]' % ','.join(self.extras)
- return s
-
- def __repr__(self):
- return "EntryPoint.parse(%r)" % str(self)
-
- def load(self, require=True, env=None, installer=None):
- if require: self.require(env, installer)
- entry = __import__(self.module_name, globals(),globals(), ['__name__'])
- for attr in self.attrs:
- try:
- entry = getattr(entry,attr)
- except AttributeError:
- raise ImportError("%r has no %r attribute" % (entry,attr))
- return entry
-
- def require(self, env=None, installer=None):
- if self.extras and not self.dist:
- raise UnknownExtra("Can't require() without a distribution", self)
- map(working_set.add,
- working_set.resolve(self.dist.requires(self.extras),env,installer))
-
-
-
- #@classmethod
- def parse(cls, src, dist=None):
- """Parse a single entry point from string `src`
-
- Entry point syntax follows the form::
-
- name = some.module:some.attr [extra1,extra2]
-
- The entry name and module name are required, but the ``:attrs`` and
- ``[extras]`` parts are optional
- """
- try:
- attrs = extras = ()
- name,value = src.split('=',1)
- if '[' in value:
- value,extras = value.split('[',1)
- req = Requirement.parse("x["+extras)
- if req.specs: raise ValueError
- extras = req.extras
- if ':' in value:
- value,attrs = value.split(':',1)
- if not MODULE(attrs.rstrip()):
- raise ValueError
- attrs = attrs.rstrip().split('.')
- except ValueError:
- raise ValueError(
- "EntryPoint must be in 'name=module:attrs [extras]' format",
- src
- )
- else:
- return cls(name.strip(), value.strip(), attrs, extras, dist)
-
- parse = classmethod(parse)
-
-
-
-
-
-
-
-
- #@classmethod
- def parse_group(cls, group, lines, dist=None):
- """Parse an entry point group"""
- if not MODULE(group):
- raise ValueError("Invalid group name", group)
- this = {}
- for line in yield_lines(lines):
- ep = cls.parse(line, dist)
- if ep.name in this:
- raise ValueError("Duplicate entry point", group, ep.name)
- this[ep.name]=ep
- return this
-
- parse_group = classmethod(parse_group)
-
- #@classmethod
- def parse_map(cls, data, dist=None):
- """Parse a map of entry point groups"""
- if isinstance(data,dict):
- data = data.items()
- else:
- data = split_sections(data)
- maps = {}
- for group, lines in data:
- if group is None:
- if not lines:
- continue
- raise ValueError("Entry points must be listed in groups")
- group = group.strip()
- if group in maps:
- raise ValueError("Duplicate group name", group)
- maps[group] = cls.parse_group(group, lines, dist)
- return maps
-
- parse_map = classmethod(parse_map)
-
-
-
-
-
-
-class Distribution(object):
- """Wrap an actual or potential sys.path entry w/metadata"""
- def __init__(self,
- location=None, metadata=None, project_name=None, version=None,
- py_version=PY_MAJOR, platform=None, precedence = EGG_DIST
- ):
- self.project_name = safe_name(project_name or 'Unknown')
- if version is not None:
- self._version = safe_version(version)
- self.py_version = py_version
- self.platform = platform
- self.location = location
- self.precedence = precedence
- self._provider = metadata or empty_provider
-
- #@classmethod
- def from_location(cls,location,basename,metadata=None,**kw):
- project_name, version, py_version, platform = [None]*4
- basename, ext = os.path.splitext(basename)
- if ext.lower() in (".egg",".egg-info"):
- match = EGG_NAME(basename)
- if match:
- project_name, version, py_version, platform = match.group(
- 'name','ver','pyver','plat'
- )
- return cls(
- location, metadata, project_name=project_name, version=version,
- py_version=py_version, platform=platform, **kw
- )
- from_location = classmethod(from_location)
-
- hashcmp = property(
- lambda self: (
- getattr(self,'parsed_version',()), self.precedence, self.key,
- -len(self.location or ''), self.location, self.py_version,
- self.platform
- )
- )
- def __cmp__(self, other): return cmp(self.hashcmp, other)
- def __hash__(self): return hash(self.hashcmp)
-
- # These properties have to be lazy so that we don't have to load any
- # metadata until/unless it's actually needed. (i.e., some distributions
- # may not know their name or version without loading PKG-INFO)
-
- #@property
- def key(self):
- try:
- return self._key
- except AttributeError:
- self._key = key = self.project_name.lower()
- return key
- key = property(key)
-
- #@property
- def parsed_version(self):
- try:
- return self._parsed_version
- except AttributeError:
- self._parsed_version = pv = parse_version(self.version)
- return pv
-
- parsed_version = property(parsed_version)
-
- #@property
- def version(self):
- try:
- return self._version
- except AttributeError:
- for line in self._get_metadata('PKG-INFO'):
- if line.lower().startswith('version:'):
- self._version = safe_version(line.split(':',1)[1].strip())
- return self._version
- else:
- raise ValueError(
- "Missing 'Version:' header and/or PKG-INFO file", self
- )
- version = property(version)
-
-
-
-
- #@property
- def _dep_map(self):
- try:
- return self.__dep_map
- except AttributeError:
- dm = self.__dep_map = {None: []}
- for name in 'requires.txt', 'depends.txt':
- for extra,reqs in split_sections(self._get_metadata(name)):
- if extra: extra = safe_extra(extra)
- dm.setdefault(extra,[]).extend(parse_requirements(reqs))
- return dm
- _dep_map = property(_dep_map)
-
- def requires(self,extras=()):
- """List of Requirements needed for this distro if `extras` are used"""
- dm = self._dep_map
- deps = []
- deps.extend(dm.get(None,()))
- for ext in extras:
- try:
- deps.extend(dm[safe_extra(ext)])
- except KeyError:
- raise UnknownExtra(
- "%s has no such extra feature %r" % (self, ext)
- )
- return deps
-
- def _get_metadata(self,name):
- if self.has_metadata(name):
- for line in self.get_metadata_lines(name):
- yield line
-
- def activate(self,path=None):
- """Ensure distribution is importable on `path` (default=sys.path)"""
- if path is None: path = sys.path
- self.insert_on(path)
- if path is sys.path:
- fixup_namespace_packages(self.location)
- map(declare_namespace, self._get_metadata('namespace_packages.txt'))
-
-
- def egg_name(self):
- """Return what this distribution's standard .egg filename should be"""
- filename = "%s-%s-py%s" % (
- to_filename(self.project_name), to_filename(self.version),
- self.py_version or PY_MAJOR
- )
-
- if self.platform:
- filename += '-'+self.platform
- return filename
-
- def __repr__(self):
- if self.location:
- return "%s (%s)" % (self,self.location)
- else:
- return str(self)
-
- def __str__(self):
- try: version = getattr(self,'version',None)
- except ValueError: version = None
- version = version or "[unknown version]"
- return "%s %s" % (self.project_name,version)
-
- def __getattr__(self,attr):
- """Delegate all unrecognized public attributes to .metadata provider"""
- if attr.startswith('_'):
- raise AttributeError,attr
- return getattr(self._provider, attr)
-
- #@classmethod
- def from_filename(cls,filename,metadata=None, **kw):
- return cls.from_location(
- _normalize_cached(filename), os.path.basename(filename), metadata,
- **kw
- )
- from_filename = classmethod(from_filename)
-
- def as_requirement(self):
- """Return a ``Requirement`` that matches this distribution exactly"""
- return Requirement.parse('%s==%s' % (self.project_name, self.version))
-
- def load_entry_point(self, group, name):
- """Return the `name` entry point of `group` or raise ImportError"""
- ep = self.get_entry_info(group,name)
- if ep is None:
- raise ImportError("Entry point %r not found" % ((group,name),))
- return ep.load()
-
- def get_entry_map(self, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- try:
- ep_map = self._ep_map
- except AttributeError:
- ep_map = self._ep_map = EntryPoint.parse_map(
- self._get_metadata('entry_points.txt'), self
- )
- if group is not None:
- return ep_map.get(group,{})
- return ep_map
-
- def get_entry_info(self, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return self.get_entry_map(group).get(name)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- def insert_on(self, path, loc = None):
- """Insert self.location in path before its nearest parent directory"""
-
- loc = loc or self.location
- if not loc:
- return
-
- nloc = _normalize_cached(loc)
- bdir = os.path.dirname(nloc)
- npath= [(p and _normalize_cached(p) or p) for p in path]
-
- bp = None
- for p, item in enumerate(npath):
- if item==nloc:
- break
- elif item==bdir and self.precedence==EGG_DIST:
- # if it's an .egg, give it precedence over its directory
- if path is sys.path:
- self.check_version_conflict()
- path.insert(p, loc)
- npath.insert(p, nloc)
- break
- else:
- if path is sys.path:
- self.check_version_conflict()
- path.append(loc)
- return
-
- # p is the spot where we found or inserted loc; now remove duplicates
- while 1:
- try:
- np = npath.index(nloc, p+1)
- except ValueError:
- break
- else:
- del npath[np], path[np]
- p = np # ha!
-
- return
-
-
- def check_version_conflict(self):
- if self.key=='setuptools':
- return # ignore the inevitable setuptools self-conflicts :(
-
- nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
- loc = normalize_path(self.location)
- for modname in self._get_metadata('top_level.txt'):
- if (modname not in sys.modules or modname in nsp
- or modname in _namespace_packages
- ):
- continue
-
- fn = getattr(sys.modules[modname], '__file__', None)
- if fn and (normalize_path(fn).startswith(loc) or fn.startswith(loc)):
- continue
- issue_warning(
- "Module %s was already imported from %s, but %s is being added"
- " to sys.path" % (modname, fn, self.location),
- )
-
- def has_version(self):
- try:
- self.version
- except ValueError:
- issue_warning("Unbuilt egg for "+repr(self))
- return False
- return True
-
- def clone(self,**kw):
- """Copy this distribution, substituting in any changed keyword args"""
- for attr in (
- 'project_name', 'version', 'py_version', 'platform', 'location',
- 'precedence'
- ):
- kw.setdefault(attr, getattr(self,attr,None))
- kw.setdefault('metadata', self._provider)
- return self.__class__(**kw)
-
-
-
-
- #@property
- def extras(self):
- return [dep for dep in self._dep_map if dep]
- extras = property(extras)
-
-
-def issue_warning(*args,**kw):
- level = 1
- g = globals()
- try:
- # find the first stack frame that is *not* code in
- # the pkg_resources module, to use for the warning
- while sys._getframe(level).f_globals is g:
- level += 1
- except ValueError:
- pass
- from warnings import warn
- warn(stacklevel = level+1, *args, **kw)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-def parse_requirements(strs):
- """Yield ``Requirement`` objects for each specification in `strs`
-
- `strs` must be an instance of ``basestring``, or a (possibly-nested)
- iterable thereof.
- """
- # create a steppable iterator, so we can handle \-continuations
- lines = iter(yield_lines(strs))
-
- def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
-
- items = []
-
- while not TERMINATOR(line,p):
- if CONTINUE(line,p):
- try:
- line = lines.next(); p = 0
- except StopIteration:
- raise ValueError(
- "\\ must not appear on the last nonblank line"
- )
-
- match = ITEM(line,p)
- if not match:
- raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
-
- items.append(match.group(*groups))
- p = match.end()
-
- match = COMMA(line,p)
- if match:
- p = match.end() # skip the comma
- elif not TERMINATOR(line,p):
- raise ValueError(
- "Expected ',' or end-of-list in",line,"at",line[p:]
- )
-
- match = TERMINATOR(line,p)
- if match: p = match.end() # skip the terminator, if any
- return line, p, items
-
- for line in lines:
- match = DISTRO(line)
- if not match:
- raise ValueError("Missing distribution spec", line)
- project_name = match.group(1)
- p = match.end()
- extras = []
-
- match = OBRACKET(line,p)
- if match:
- p = match.end()
- line, p, extras = scan_list(
- DISTRO, CBRACKET, line, p, (1,), "'extra' name"
- )
-
- line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
- specs = [(op,safe_version(val)) for op,val in specs]
- yield Requirement(project_name, specs, extras)
-
-
-def _sort_dists(dists):
- tmp = [(dist.hashcmp,dist) for dist in dists]
- tmp.sort()
- dists[::-1] = [d for hc,d in tmp]
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-class Requirement:
- def __init__(self, project_name, specs, extras):
- """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
- self.unsafe_name, project_name = project_name, safe_name(project_name)
- self.project_name, self.key = project_name, project_name.lower()
- index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
- index.sort()
- self.specs = [(op,ver) for parsed,trans,op,ver in index]
- self.index, self.extras = index, tuple(map(safe_extra,extras))
- self.hashCmp = (
- self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
- frozenset(self.extras)
- )
- self.__hash = hash(self.hashCmp)
-
- def __str__(self):
- specs = ','.join([''.join(s) for s in self.specs])
- extras = ','.join(self.extras)
- if extras: extras = '[%s]' % extras
- return '%s%s%s' % (self.project_name, extras, specs)
-
- def __eq__(self,other):
- return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
-
- def __contains__(self,item):
- if isinstance(item,Distribution):
- if item.key != self.key: return False
- if self.index: item = item.parsed_version # only get if we need it
- elif isinstance(item,basestring):
- item = parse_version(item)
- last = None
- for parsed,trans,op,ver in self.index:
- action = trans[cmp(item,parsed)]
- if action=='F': return False
- elif action=='T': return True
- elif action=='+': last = True
- elif action=='-' or last is None: last = False
- if last is None: last = True # no rules encountered
- return last
-
-
- def __hash__(self):
- return self.__hash
-
- def __repr__(self): return "Requirement.parse(%r)" % str(self)
-
- #@staticmethod
- def parse(s):
- reqs = list(parse_requirements(s))
- if reqs:
- if len(reqs)==1:
- return reqs[0]
- raise ValueError("Expected only one requirement", s)
- raise ValueError("No requirements found", s)
-
- parse = staticmethod(parse)
-
-state_machine = {
- # =><
- '<' : '--T',
- '<=': 'T-T',
- '>' : 'F+F',
- '>=': 'T+F',
- '==': 'T..',
- '!=': 'F++',
-}
-
-
-def _get_mro(cls):
- """Get an mro for a type or classic class"""
- if not isinstance(cls,type):
- class cls(cls,object): pass
- return cls.__mro__[1:]
- return cls.__mro__
-
-def _find_adapter(registry, ob):
- """Return an adapter factory for `ob` from `registry`"""
- for t in _get_mro(getattr(ob, '__class__', type(ob))):
- if t in registry:
- return registry[t]
-
-
-def ensure_directory(path):
- """Ensure that the parent directory of `path` exists"""
- dirname = os.path.dirname(path)
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
-
-def split_sections(s):
- """Split a string or iterable thereof into (section,content) pairs
-
- Each ``section`` is a stripped version of the section header ("[section]")
- and each ``content`` is a list of stripped lines excluding blank lines and
- comment-only lines. If there are any such lines before the first section
- header, they're returned in a first ``section`` of ``None``.
- """
- section = None
- content = []
- for line in yield_lines(s):
- if line.startswith("["):
- if line.endswith("]"):
- if section or content:
- yield section, content
- section = line[1:-1].strip()
- content = []
- else:
- raise ValueError("Invalid section heading", line)
- else:
- content.append(line)
-
- # wrap up last segment
- yield section, content
-
-def _mkstemp(*args,**kw):
- from tempfile import mkstemp
- old_open = os.open
- try:
- os.open = os_open # temporarily bypass sandboxing
- return mkstemp(*args,**kw)
- finally:
- os.open = old_open # and then put it back
-
-
-# Set up global resource manager (deliberately not state-saved)
-_manager = ResourceManager()
-def _initialize(g):
- for name in dir(_manager):
- if not name.startswith('_'):
- g[name] = getattr(_manager, name)
-_initialize(globals())
-
-# Prepare the master working set and make the ``require()`` API available
-_declare_state('object', working_set = WorkingSet())
-try:
- # Does the main program list any requirements?
- from __main__ import __requires__
-except ImportError:
- pass # No: just use the default working set based on sys.path
-else:
- # Yes: ensure the requirements are met, by prefixing sys.path if necessary
- try:
- working_set.require(__requires__)
- except VersionConflict: # try it without defaults already on sys.path
- working_set = WorkingSet([]) # by starting with an empty path
- for dist in working_set.resolve(
- parse_requirements(__requires__), Environment()
- ):
- working_set.add(dist)
- for entry in sys.path: # add any missing entries from sys.path
- if entry not in working_set.entries:
- working_set.add_entry(entry)
- sys.path[:] = working_set.entries # then copy back to sys.path
-
-require = working_set.require
-iter_entry_points = working_set.iter_entry_points
-add_activation_listener = working_set.subscribe
-run_script = working_set.run_script
-run_main = run_script # backward compatibility
-# Activate all distributions already on sys.path, and ensure that
-# all distributions added to the working set in the future (e.g. by
-# calling ``require()``) will get activated as well.
-add_activation_listener(lambda dist: dist.activate())
-working_set.entries=[]; map(working_set.add_entry,sys.path) # match order
-
diff --git a/po/README b/po/README
index 46db666..252b980 100644
--- a/po/README
+++ b/po/README
@@ -1,5 +1,5 @@
# create pot file
python mki18n.py -p --domain=org.laptop.AToiDeWebActivity
-
# gen mo files
python mki18n.py -m -e --domain=org.laptop.AToiDeWebActivity --moTarget=../locale
+
diff --git a/po/messages.pot b/po/messages.pot
index 70ea2cf..e69de29 100644
--- a/po/messages.pot
+++ b/po/messages.pot
@@ -1,27 +0,0 @@
-# SOME DESCRIPTIVE TITLE.
-# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
-# This file is distributed under the same license as the PACKAGE package.
-# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: PACKAGE VERSION\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2011-04-29 12:42+0200\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-
-#:
-msgid "Activity"
-msgstr ""
-
-#:
-msgid "Story"
-msgstr ""
-
-
diff --git a/po/org.laptop.AToiDeWebActivity_en.po b/po/org.laptop.AToiDeWebActivity_en.po
index c9e5173..e69de29 100644
--- a/po/org.laptop.AToiDeWebActivity_en.po
+++ b/po/org.laptop.AToiDeWebActivity_en.po
@@ -1,26 +0,0 @@
-# SOME DESCRIPTIVE TITLE.
-# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
-# This file is distributed under the same license as the PACKAGE package.
-# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: PACKAGE VERSION\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2011-04-29 12:42+0200\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-
-#:
-msgid "Activity"
-msgstr ""
-
-#:
-msgid "Story"
-msgstr ""
-
diff --git a/po/org.laptop.AToiDeWebActivity_fr.po b/po/org.laptop.AToiDeWebActivity_fr.po
index 2748fc6..e69de29 100644
--- a/po/org.laptop.AToiDeWebActivity_fr.po
+++ b/po/org.laptop.AToiDeWebActivity_fr.po
@@ -1,27 +0,0 @@
-# SOME DESCRIPTIVE TITLE.
-# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
-# This file is distributed under the same license as the PACKAGE package.
-# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: PACKAGE VERSION\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2011-04-29 12:42+0200\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-
-#:
-msgid "Activity"
-msgstr "Activité"
-
-#:
-msgid "Story"
-msgstr "Histoire"
-
-
diff --git a/run.py b/run.py
new file mode 100644
index 0000000..d6d37bd
--- /dev/null
+++ b/run.py
@@ -0,0 +1,18 @@
+# python import
+import sys
+# add lib path to current python path
+sys.path.append('lib')
+
+# import server first
+from server import Server
+
+# atoideweb import
+from atoideweb.controllers import app
+
+# simple server entry point
+if __name__ == "__main__":
+ # init the server
+ Server()
+ # while ....
+ while True:
+ continue
diff --git a/templates/_helpers.html b/templates/_helpers.html
new file mode 100644
index 0000000..18d7fbb
--- /dev/null
+++ b/templates/_helpers.html
@@ -0,0 +1,43 @@
+{% macro link_to(text, endpoint) -%}
+ <a href="{{ url_for(endpoint, **kwargs) }}">{{ text }}</a>
+{%- endmacro %}
+
+{% macro options(_, name, option_list, current, action) -%}
+ <div id='{{ name }}'>
+ <h3>{{ _(name) }}</h3>
+ <form name='{{ name }}' action='{{ action }}' method="post">
+ {% for opt in option_list %}
+ <input type='radio' name='{{ name }}' value='{{ opt }}' {% if opt == current %}checked{% endif %} />{{ _(opt) }}<br />
+ {% endfor %}
+ <input type="submit" value="submit" ></input>
+ </form>
+ </div>
+{%- endmacro %}
+
+{% macro options_ajax(_, name, option_list, current, action) -%}
+ <div id='{{ name }}'>
+ <h3>{{ _(name) }}</h3>
+ <form name='{{ name }}'>
+ {% for opt in option_list %}
+ <input type='radio' name='{{ name }}' value='{{ opt }}' {% if opt == current %}checked{% endif %} />{{ _(opt) }}<br />
+ {% endfor %}
+ </form>
+ <script>
+ <!--
+ $(document).ready(function() {
+ $("#{{ name }} input[type='radio']").change( function() {
+ $.ajax({
+ url: '{{ action }}',
+ type: 'POST',
+ data: ({ '{{ name }}' : $(this).val()}),
+ success: function(data) {
+ var _el = $('#{{ name }}-result');
+ _el.html(data.result);
+ }
+ });
+ });
+ });
+ -->
+ </script>
+ </div>
+{%- endmacro %}
diff --git a/templates/atoideweb/ajax.html b/templates/atoideweb/ajax.html
new file mode 100644
index 0000000..8eaaca6
--- /dev/null
+++ b/templates/atoideweb/ajax.html
@@ -0,0 +1,41 @@
+{% extends "layout.html" %}
+
+{% block content %}
+<h1>{{ title }}</h1>
+
+<div id='ajax-sample' class='form'>
+ <h3>{{ _('ajax-sample') }}</h3>
+ <form method="post" action="/ajax" name="ajax-sample">
+ <input type="radio" value="option 1" name="ajax-sample">option 1<br>
+ <input type="radio" value="option 2" name="ajax-sample">option 2<br>
+ </form>
+<div>
+
+<script>
+<!--
+$(document).ready(function() {
+ $("#ajax-sample input[type='radio']").change( function() {
+ $.ajax({
+ url: '{{ action }}',
+ type: 'POST',
+ data: ({ 'ajax-sample' : $(this).val()}),
+ success: function(data) {
+ var _el = $('#ajax-sample-result');
+ _el.html(data.result);
+ }
+ });
+ });
+});
+-->
+</script>
+
+<!-- HELPER WAY
+{ options_ajax(_, 'ajax-sample', ['option 1', 'option 2'], '', '/ajax') }
+-->
+
+<div id='ajax-result' class='result'>
+ <h3>{{ _('ajax-result') }}</h3>
+ => <span id="ajax-sample-result"></span>
+</div>
+
+{% endblock %}
diff --git a/templates/atoideweb/post.html b/templates/atoideweb/post.html
new file mode 100644
index 0000000..79bb3e2
--- /dev/null
+++ b/templates/atoideweb/post.html
@@ -0,0 +1,24 @@
+{% extends "layout.html" %}
+
+{% block content %}
+<h1>{{ title }}</h1>
+
+<div class='post-sample' class='form'>
+ <h3>{{ _('post-sample') }}</h3>
+ <form method="post" action="/post" name="post-sample">
+ <input type="radio" value="option-1" name="post-sample">option 1<br>
+ <input type="radio" value="option-2" name="post-sample">option 2<br>
+ <input type="submit" value="submit" ></input>
+ </form>
+<div>
+
+<!-- HELPER WAY
+{ options(_, 'post-sample', ['option 1', 'option 2'], result, '/post') }
+-->
+
+<div id='post-result' class='result'>
+ <h3>{{ _('post-result') }}</h3>
+ => {{ result }}
+</div>
+
+{% endblock %}
diff --git a/atoideweb/controllers/templates/layout.html b/templates/layout.html
index ba91e6e..a00af4d 100644
--- a/atoideweb/controllers/templates/layout.html
+++ b/templates/layout.html
@@ -1,6 +1,7 @@
<!doctype html>
{% from "_helpers.html" import link_to %}
{% from "_helpers.html" import options %}
+{% from "_helpers.html" import options_ajax %}
<head>
<title>{{ title }}</title>
<script src='/static/js/jquery.js' }}" type="text/javascript"></script>