Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/websdk/werkzeug/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'websdk/werkzeug/contrib')
-rw-r--r--websdk/werkzeug/contrib/__init__.py16
-rw-r--r--websdk/werkzeug/contrib/atom.py343
-rw-r--r--websdk/werkzeug/contrib/cache.py635
-rw-r--r--websdk/werkzeug/contrib/fixers.py224
-rw-r--r--websdk/werkzeug/contrib/iterio.py277
-rw-r--r--websdk/werkzeug/contrib/jsrouting.py259
-rw-r--r--websdk/werkzeug/contrib/kickstart.py288
-rw-r--r--websdk/werkzeug/contrib/limiter.py36
-rw-r--r--websdk/werkzeug/contrib/lint.py333
-rw-r--r--websdk/werkzeug/contrib/profiler.py118
-rw-r--r--websdk/werkzeug/contrib/securecookie.py332
-rw-r--r--websdk/werkzeug/contrib/sessions.py344
-rw-r--r--websdk/werkzeug/contrib/testtools.py71
-rw-r--r--websdk/werkzeug/contrib/wrappers.py275
14 files changed, 3551 insertions, 0 deletions
diff --git a/websdk/werkzeug/contrib/__init__.py b/websdk/werkzeug/contrib/__init__.py
new file mode 100644
index 0000000..ffc48c9
--- /dev/null
+++ b/websdk/werkzeug/contrib/__init__.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib
+ ~~~~~~~~~~~~~~~~
+
+ Contains user-submitted code that other users may find useful, but which
+ is not part of the Werkzeug core. Anyone can write code for inclusion in
+ the `contrib` package. All modules in this package are distributed as an
+ add-on library and thus are not part of Werkzeug itself.
+
+ This file itself is mostly for informational purposes and to tell the
+ Python interpreter that `contrib` is a package.
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
diff --git a/websdk/werkzeug/contrib/atom.py b/websdk/werkzeug/contrib/atom.py
new file mode 100644
index 0000000..7aaa2fb
--- /dev/null
+++ b/websdk/werkzeug/contrib/atom.py
@@ -0,0 +1,343 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib.atom
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ This module provides a class called :class:`AtomFeed` which can be
+ used to generate feeds in the Atom syndication format (see :rfc:`4287`).
+
+ Example::
+
+ def atom_feed(request):
+ feed = AtomFeed("My Blog", feed_url=request.url,
+ url=request.host_url,
+ subtitle="My example blog for a feed test.")
+ for post in Post.query.limit(10).all():
+ feed.add(post.title, post.body, content_type='html',
+ author=post.author, url=post.url, id=post.uid,
+ updated=post.last_update, published=post.pub_date)
+ return feed.get_response()
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+from datetime import datetime
+from werkzeug.utils import escape
+from werkzeug.wrappers import BaseResponse
+
+
+XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
+
+
+def _make_text_block(name, content, content_type=None):
+ """Helper function for the builder that creates an XML text block."""
+ if content_type == 'xhtml':
+ return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
+ (name, XHTML_NAMESPACE, content, name)
+ if not content_type:
+ return u'<%s>%s</%s>\n' % (name, escape(content), name)
+ return u'<%s type="%s">%s</%s>\n' % (name, content_type,
+ escape(content), name)
+
+
+def format_iso8601(obj):
+ """Format a datetime object for iso8601"""
+ return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
+
+
+class AtomFeed(object):
+ """A helper class that creates Atom feeds.
+
+ :param title: the title of the feed. Required.
+ :param title_type: the type attribute for the title element. One of
+ ``'html'``, ``'text'`` or ``'xhtml'``.
+ :param url: the url for the feed (not the url *of* the feed)
+ :param id: a globally unique id for the feed. Must be an URI. If
+ not present the `feed_url` is used, but one of both is
+ required.
+ :param updated: the time the feed was modified the last time. Must
+ be a :class:`datetime.datetime` object. If not
+ present the latest entry's `updated` is used.
+ :param feed_url: the URL to the feed. Should be the URL that was
+ requested.
+ :param author: the author of the feed. Must be either a string (the
+ name) or a dict with name (required) and uri or
+ email (both optional). Can be a list of (may be
+ mixed, too) strings and dicts, too, if there are
+ multiple authors. Required if not every entry has an
+ author element.
+ :param icon: an icon for the feed.
+ :param logo: a logo for the feed.
+ :param rights: copyright information for the feed.
+ :param rights_type: the type attribute for the rights element. One of
+ ``'html'``, ``'text'`` or ``'xhtml'``. Default is
+ ``'text'``.
+ :param subtitle: a short description of the feed.
+ :param subtitle_type: the type attribute for the subtitle element.
+ One of ``'text'``, ``'html'``, ``'text'``
+ or ``'xhtml'``. Default is ``'text'``.
+ :param links: additional links. Must be a list of dictionaries with
+ href (required) and rel, type, hreflang, title, length
+ (all optional)
+ :param generator: the software that generated this feed. This must be
+ a tuple in the form ``(name, url, version)``. If
+ you don't want to specify one of them, set the item
+ to `None`.
+ :param entries: a list with the entries for the feed. Entries can also
+ be added later with :meth:`add`.
+
+ For more information on the elements see
+ http://www.atomenabled.org/developers/syndication/
+
+ Everywhere where a list is demanded, any iterable can be used.
+ """
+
+ default_generator = ('Werkzeug', None, None)
+
+ def __init__(self, title=None, entries=None, **kwargs):
+ self.title = title
+ self.title_type = kwargs.get('title_type', 'text')
+ self.url = kwargs.get('url')
+ self.feed_url = kwargs.get('feed_url', self.url)
+ self.id = kwargs.get('id', self.feed_url)
+ self.updated = kwargs.get('updated')
+ self.author = kwargs.get('author', ())
+ self.icon = kwargs.get('icon')
+ self.logo = kwargs.get('logo')
+ self.rights = kwargs.get('rights')
+ self.rights_type = kwargs.get('rights_type')
+ self.subtitle = kwargs.get('subtitle')
+ self.subtitle_type = kwargs.get('subtitle_type', 'text')
+ self.generator = kwargs.get('generator')
+ if self.generator is None:
+ self.generator = self.default_generator
+ self.links = kwargs.get('links', [])
+ self.entries = entries and list(entries) or []
+
+ if not hasattr(self.author, '__iter__') \
+ or isinstance(self.author, (basestring, dict)):
+ self.author = [self.author]
+ for i, author in enumerate(self.author):
+ if not isinstance(author, dict):
+ self.author[i] = {'name': author}
+
+ if not self.title:
+ raise ValueError('title is required')
+ if not self.id:
+ raise ValueError('id is required')
+ for author in self.author:
+ if 'name' not in author:
+ raise TypeError('author must contain at least a name')
+
+ def add(self, *args, **kwargs):
+ """Add a new entry to the feed. This function can either be called
+ with a :class:`FeedEntry` or some keyword and positional arguments
+ that are forwarded to the :class:`FeedEntry` constructor.
+ """
+ if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
+ self.entries.append(args[0])
+ else:
+ kwargs['feed_url'] = self.feed_url
+ self.entries.append(FeedEntry(*args, **kwargs))
+
+ def __repr__(self):
+ return '<%s %r (%d entries)>' % (
+ self.__class__.__name__,
+ self.title,
+ len(self.entries)
+ )
+
+ def generate(self):
+ """Return a generator that yields pieces of XML."""
+ # atom demands either an author element in every entry or a global one
+ if not self.author:
+ if False in map(lambda e: bool(e.author), self.entries):
+ self.author = ({'name': 'Unknown author'},)
+
+ if not self.updated:
+ dates = sorted([entry.updated for entry in self.entries])
+ self.updated = dates and dates[-1] or datetime.utcnow()
+
+ yield u'<?xml version="1.0" encoding="utf-8"?>\n'
+ yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
+ yield ' ' + _make_text_block('title', self.title, self.title_type)
+ yield u' <id>%s</id>\n' % escape(self.id)
+ yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
+ if self.url:
+ yield u' <link href="%s" />\n' % escape(self.url, True)
+ if self.feed_url:
+ yield u' <link href="%s" rel="self" />\n' % \
+ escape(self.feed_url, True)
+ for link in self.links:
+ yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
+ (k, escape(link[k], True)) for k in link)
+ for author in self.author:
+ yield u' <author>\n'
+ yield u' <name>%s</name>\n' % escape(author['name'])
+ if 'uri' in author:
+ yield u' <uri>%s</uri>\n' % escape(author['uri'])
+ if 'email' in author:
+ yield ' <email>%s</email>\n' % escape(author['email'])
+ yield ' </author>\n'
+ if self.subtitle:
+ yield ' ' + _make_text_block('subtitle', self.subtitle,
+ self.subtitle_type)
+ if self.icon:
+ yield u' <icon>%s</icon>\n' % escape(self.icon)
+ if self.logo:
+ yield u' <logo>%s</logo>\n' % escape(self.logo)
+ if self.rights:
+ yield ' ' + _make_text_block('rights', self.rights,
+ self.rights_type)
+ generator_name, generator_url, generator_version = self.generator
+ if generator_name or generator_url or generator_version:
+ tmp = [u' <generator']
+ if generator_url:
+ tmp.append(u' uri="%s"' % escape(generator_url, True))
+ if generator_version:
+ tmp.append(u' version="%s"' % escape(generator_version, True))
+ tmp.append(u'>%s</generator>\n' % escape(generator_name))
+ yield u''.join(tmp)
+ for entry in self.entries:
+ for line in entry.generate():
+ yield u' ' + line
+ yield u'</feed>\n'
+
+ def to_string(self):
+ """Convert the feed into a string."""
+ return u''.join(self.generate())
+
+ def get_response(self):
+ """Return a response object for the feed."""
+ return BaseResponse(self.to_string(), mimetype='application/atom+xml')
+
+ def __call__(self, environ, start_response):
+ """Use the class as WSGI response object."""
+ return self.get_response()(environ, start_response)
+
+ def __unicode__(self):
+ return self.to_string()
+
+ def __str__(self):
+ return self.to_string().encode('utf-8')
+
+
+class FeedEntry(object):
+ """Represents a single entry in a feed.
+
+ :param title: the title of the entry. Required.
+ :param title_type: the type attribute for the title element. One of
+ ``'html'``, ``'text'`` or ``'xhtml'``.
+ :param content: the content of the entry.
+ :param content_type: the type attribute for the content element. One
+ of ``'html'``, ``'text'`` or ``'xhtml'``.
+ :param summary: a summary of the entry's content.
+ :param summary_type: the type attribute for the summary element. One
+ of ``'html'``, ``'text'`` or ``'xhtml'``.
+ :param url: the url for the entry.
+ :param id: a globally unique id for the entry. Must be an URI. If
+ not present the URL is used, but one of both is required.
+ :param updated: the time the entry was modified the last time. Must
+ be a :class:`datetime.datetime` object. Required.
+ :param author: the author of the feed. Must be either a string (the
+ name) or a dict with name (required) and uri or
+ email (both optional). Can be a list of (may be
+ mixed, too) strings and dicts, too, if there are
+ multiple authors. Required if not every entry has an
+ author element.
+ :param published: the time the entry was initially published. Must
+ be a :class:`datetime.datetime` object.
+ :param rights: copyright information for the entry.
+ :param rights_type: the type attribute for the rights element. One of
+ ``'html'``, ``'text'`` or ``'xhtml'``. Default is
+ ``'text'``.
+ :param links: additional links. Must be a list of dictionaries with
+ href (required) and rel, type, hreflang, title, length
+ (all optional)
+ :param xml_base: The xml base (url) for this feed item. If not provided
+ it will default to the item url.
+
+ For more information on the elements see
+ http://www.atomenabled.org/developers/syndication/
+
+ Everywhere where a list is demanded, any iterable can be used.
+ """
+
+ def __init__(self, title=None, content=None, feed_url=None, **kwargs):
+ self.title = title
+ self.title_type = kwargs.get('title_type', 'text')
+ self.content = content
+ self.content_type = kwargs.get('content_type', 'html')
+ self.url = kwargs.get('url')
+ self.id = kwargs.get('id', self.url)
+ self.updated = kwargs.get('updated')
+ self.summary = kwargs.get('summary')
+ self.summary_type = kwargs.get('summary_type', 'html')
+ self.author = kwargs.get('author')
+ self.published = kwargs.get('published')
+ self.rights = kwargs.get('rights')
+ self.links = kwargs.get('links', [])
+ self.xml_base = kwargs.get('xml_base', feed_url)
+
+ if not hasattr(self.author, '__iter__') \
+ or isinstance(self.author, (basestring, dict)):
+ self.author = [self.author]
+ for i, author in enumerate(self.author):
+ if not isinstance(author, dict):
+ self.author[i] = {'name': author}
+
+ if not self.title:
+ raise ValueError('title is required')
+ if not self.id:
+ raise ValueError('id is required')
+ if not self.updated:
+ raise ValueError('updated is required')
+
+ def __repr__(self):
+ return '<%s %r>' % (
+ self.__class__.__name__,
+ self.title
+ )
+
+ def generate(self):
+ """Yields pieces of ATOM XML."""
+ base = ''
+ if self.xml_base:
+ base = ' xml:base="%s"' % escape(self.xml_base, True)
+ yield u'<entry%s>\n' % base
+ yield u' ' + _make_text_block('title', self.title, self.title_type)
+ yield u' <id>%s</id>\n' % escape(self.id)
+ yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
+ if self.published:
+ yield u' <published>%s</published>\n' % \
+ format_iso8601(self.published)
+ if self.url:
+ yield u' <link href="%s" />\n' % escape(self.url)
+ for author in self.author:
+ yield u' <author>\n'
+ yield u' <name>%s</name>\n' % escape(author['name'])
+ if 'uri' in author:
+ yield u' <uri>%s</uri>\n' % escape(author['uri'])
+ if 'email' in author:
+ yield u' <email>%s</email>\n' % escape(author['email'])
+ yield u' </author>\n'
+ for link in self.links:
+ yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
+ (k, escape(link[k], True)) for k in link)
+ if self.summary:
+ yield u' ' + _make_text_block('summary', self.summary,
+ self.summary_type)
+ if self.content:
+ yield u' ' + _make_text_block('content', self.content,
+ self.content_type)
+ yield u'</entry>\n'
+
+ def to_string(self):
+ """Convert the feed item into a unicode object."""
+ return u''.join(self.generate())
+
+ def __unicode__(self):
+ return self.to_string()
+
+ def __str__(self):
+ return self.to_string().encode('utf-8')
diff --git a/websdk/werkzeug/contrib/cache.py b/websdk/werkzeug/contrib/cache.py
new file mode 100644
index 0000000..0526c48
--- /dev/null
+++ b/websdk/werkzeug/contrib/cache.py
@@ -0,0 +1,635 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib.cache
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ The main problem with dynamic Web sites is, well, they're dynamic. Each
+ time a user requests a page, the webserver executes a lot of code, queries
+ the database, renders templates until the visitor gets the page he sees.
+
+ This is a lot more expensive than just loading a file from the file system
+ and sending it to the visitor.
+
+ For most Web applications, this overhead isn't a big deal but once it
+ becomes, you will be glad to have a cache system in place.
+
+ How Caching Works
+ =================
+
+ Caching is pretty simple. Basically you have a cache object lurking around
+ somewhere that is connected to a remote cache or the file system or
+ something else. When the request comes in you check if the current page
+ is already in the cache and if so, you're returning it from the cache.
+ Otherwise you generate the page and put it into the cache. (Or a fragment
+ of the page, you don't have to cache the full thing)
+
+ Here is a simple example of how to cache a sidebar for a template::
+
+ def get_sidebar(user):
+ identifier = 'sidebar_for/user%d' % user.id
+ value = cache.get(identifier)
+ if value is not None:
+ return value
+ value = generate_sidebar_for(user=user)
+ cache.set(identifier, value, timeout=60 * 5)
+ return value
+
+ Creating a Cache Object
+ =======================
+
+ To create a cache object you just import the cache system of your choice
+ from the cache module and instantiate it. Then you can start working
+ with that object:
+
+ >>> from werkzeug.contrib.cache import SimpleCache
+ >>> c = SimpleCache()
+ >>> c.set("foo", "value")
+ >>> c.get("foo")
+ 'value'
+ >>> c.get("missing") is None
+ True
+
+ Please keep in mind that you have to create the cache and put it somewhere
+ you have access to it (either as a module global you can import or you just
+ put it into your WSGI application).
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+import os
+import re
+import tempfile
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import new as md5
+from itertools import izip
+from time import time
+from cPickle import loads, dumps, load, dump, HIGHEST_PROTOCOL
+from werkzeug.posixemulation import rename
+
+def _items(mappingorseq):
+ """Wrapper for efficient iteration over mappings represented by dicts
+ or sequences::
+
+ >>> for k, v in _items((i, i*i) for i in xrange(5)):
+ ... assert k*k == v
+
+ >>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
+ ... assert k*k == v
+
+ """
+ return mappingorseq.iteritems() if hasattr(mappingorseq, 'iteritems') \
+ else mappingorseq
+
+class BaseCache(object):
+ """Baseclass for the cache systems. All the cache systems implement this
+ API or a superset of it.
+
+ :param default_timeout: the default timeout that is used if no timeout is
+ specified on :meth:`set`.
+ """
+
+ def __init__(self, default_timeout=300):
+ self.default_timeout = default_timeout
+
+ def get(self, key):
+ """Looks up key in the cache and returns the value for it.
+ If the key does not exist `None` is returned instead.
+
+ :param key: the key to be looked up.
+ """
+ return None
+
+ def delete(self, key):
+ """Deletes `key` from the cache. If it does not exist in the cache
+ nothing happens.
+
+ :param key: the key to delete.
+ """
+ pass
+
+ def get_many(self, *keys):
+ """Returns a list of values for the given keys.
+ For each key a item in the list is created. Example::
+
+ foo, bar = cache.get_many("foo", "bar")
+
+ If a key can't be looked up `None` is returned for that key
+ instead.
+
+ :param keys: The function accepts multiple keys as positional
+ arguments.
+ """
+ return map(self.get, keys)
+
+ def get_dict(self, *keys):
+ """Works like :meth:`get_many` but returns a dict::
+
+ d = cache.get_dict("foo", "bar")
+ foo = d["foo"]
+ bar = d["bar"]
+
+ :param keys: The function accepts multiple keys as positional
+ arguments.
+ """
+ return dict(izip(keys, self.get_many(*keys)))
+
+ def set(self, key, value, timeout=None):
+ """Adds a new key/value to the cache (overwrites value, if key already
+ exists in the cache).
+
+ :param key: the key to set
+ :param value: the value for the key
+ :param timeout: the cache timeout for the key (if not specified,
+ it uses the default timeout).
+ """
+ pass
+
+ def add(self, key, value, timeout=None):
+ """Works like :meth:`set` but does not overwrite the values of already
+ existing keys.
+
+ :param key: the key to set
+ :param value: the value for the key
+ :param timeout: the cache timeout for the key or the default
+ timeout if not specified.
+ """
+ pass
+
+ def set_many(self, mapping, timeout=None):
+ """Sets multiple keys and values from a mapping.
+
+ :param mapping: a mapping with the keys/values to set.
+ :param timeout: the cache timeout for the key (if not specified,
+ it uses the default timeout).
+ """
+ for key, value in _items(mapping):
+ self.set(key, value, timeout)
+
+ def delete_many(self, *keys):
+ """Deletes multiple keys at once.
+
+ :param keys: The function accepts multiple keys as positional
+ arguments.
+ """
+ for key in keys:
+ self.delete(key)
+
+ def clear(self):
+ """Clears the cache. Keep in mind that not all caches support
+ completely clearing the cache.
+ """
+ pass
+
+ def inc(self, key, delta=1):
+ """Increments the value of a key by `delta`. If the key does
+ not yet exist it is initialized with `delta`.
+
+ For supporting caches this is an atomic operation.
+
+ :param key: the key to increment.
+ :param delta: the delta to add.
+ """
+ self.set(key, (self.get(key) or 0) + delta)
+
+ def dec(self, key, delta=1):
+ """Decrements the value of a key by `delta`. If the key does
+ not yet exist it is initialized with `-delta`.
+
+ For supporting caches this is an atomic operation.
+
+ :param key: the key to increment.
+ :param delta: the delta to subtract.
+ """
+ self.set(key, (self.get(key) or 0) - delta)
+
+
+class NullCache(BaseCache):
+ """A cache that doesn't cache. This can be useful for unit testing.
+
+ :param default_timeout: a dummy parameter that is ignored but exists
+ for API compatibility with other caches.
+ """
+
+
+class SimpleCache(BaseCache):
+ """Simple memory cache for single process environments. This class exists
+ mainly for the development server and is not 100% thread safe. It tries
+ to use as many atomic operations as possible and no locks for simplicity
+ but it could happen under heavy load that keys are added multiple times.
+
+ :param threshold: the maximum number of items the cache stores before
+ it starts deleting some.
+ :param default_timeout: the default timeout that is used if no timeout is
+ specified on :meth:`~BaseCache.set`.
+ """
+
+ def __init__(self, threshold=500, default_timeout=300):
+ BaseCache.__init__(self, default_timeout)
+ self._cache = {}
+ self.clear = self._cache.clear
+ self._threshold = threshold
+
+ def _prune(self):
+ if len(self._cache) > self._threshold:
+ now = time()
+ for idx, (key, (expires, _)) in enumerate(self._cache.items()):
+ if expires <= now or idx % 3 == 0:
+ self._cache.pop(key, None)
+
+ def get(self, key):
+ now = time()
+ expires, value = self._cache.get(key, (0, None))
+ if expires > time():
+ return loads(value)
+
+ def set(self, key, value, timeout=None):
+ if timeout is None:
+ timeout = self.default_timeout
+ self._prune()
+ self._cache[key] = (time() + timeout, dumps(value, HIGHEST_PROTOCOL))
+
+ def add(self, key, value, timeout=None):
+ if timeout is None:
+ timeout = self.default_timeout
+ if len(self._cache) > self._threshold:
+ self._prune()
+ item = (time() + timeout, dumps(value, HIGHEST_PROTOCOL))
+ self._cache.setdefault(key, item)
+
+ def delete(self, key):
+ self._cache.pop(key, None)
+
+
+_test_memcached_key = re.compile(r'[^\x00-\x21\xff]{1,250}$').match
+
+class MemcachedCache(BaseCache):
+ """A cache that uses memcached as backend.
+
+ The first argument can either be an object that resembles the API of a
+ :class:`memcache.Client` or a tuple/list of server addresses. In the
+ event that a tuple/list is passed, Werkzeug tries to import the best
+ available memcache library.
+
+ Implementation notes: This cache backend works around some limitations in
+ memcached to simplify the interface. For example unicode keys are encoded
+ to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return
+ the keys in the same format as passed. Furthermore all get methods
+ silently ignore key errors to not cause problems when untrusted user data
+ is passed to the get methods which is often the case in web applications.
+
+ :param servers: a list or tuple of server addresses or alternatively
+ a :class:`memcache.Client` or a compatible client.
+ :param default_timeout: the default timeout that is used if no timeout is
+ specified on :meth:`~BaseCache.set`.
+ :param key_prefix: a prefix that is added before all keys. This makes it
+ possible to use the same memcached server for different
+ applications. Keep in mind that
+ :meth:`~BaseCache.clear` will also clear keys with a
+ different prefix.
+ """
+
+ def __init__(self, servers, default_timeout=300, key_prefix=None):
+ BaseCache.__init__(self, default_timeout)
+ if isinstance(servers, (list, tuple)):
+ self._client = self.import_preferred_memcache_lib(servers)
+ else:
+ # NOTE: servers is actually an already initialized memcache
+ # client.
+ self._client = servers
+
+ self.key_prefix = key_prefix
+
+
+ def get(self, key):
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if self.key_prefix:
+ key = self.key_prefix + key
+ # memcached doesn't support keys longer than that. Because often
+ # checks for so long keys can occour because it's tested from user
+ # submitted data etc we fail silently for getting.
+ if _test_memcached_key(key):
+ return self._client.get(key)
+
+ def get_dict(self, *keys):
+ key_mapping = {}
+ have_encoded_keys = False
+ for key in keys:
+ if isinstance(key, unicode):
+ encoded_key = key.encode('utf-8')
+ have_encoded_keys = True
+ else:
+ encoded_key = key
+ if self.key_prefix:
+ encoded_key = self.key_prefix + encoded_key
+ if _test_memcached_key(key):
+ key_mapping[encoded_key] = key
+ d = rv = self._client.get_multi(key_mapping.keys())
+ if have_encoded_keys or self.key_prefix:
+ rv = {}
+ for key, value in d.iteritems():
+ rv[key_mapping[key]] = value
+ if len(rv) < len(keys):
+ for key in keys:
+ if key not in rv:
+ rv[key] = None
+ return rv
+
+ def add(self, key, value, timeout=None):
+ if timeout is None:
+ timeout = self.default_timeout
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if self.key_prefix:
+ key = self.key_prefix + key
+ self._client.add(key, value, timeout)
+
+ def set(self, key, value, timeout=None):
+ if timeout is None:
+ timeout = self.default_timeout
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if self.key_prefix:
+ key = self.key_prefix + key
+ self._client.set(key, value, timeout)
+
+ def get_many(self, *keys):
+ d = self.get_dict(*keys)
+ return [d[key] for key in keys]
+
+ def set_many(self, mapping, timeout=None):
+ if timeout is None:
+ timeout = self.default_timeout
+ new_mapping = {}
+ for key, value in _items(mapping):
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if self.key_prefix:
+ key = self.key_prefix + key
+ new_mapping[key] = value
+ self._client.set_multi(new_mapping, timeout)
+
+ def delete(self, key):
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if self.key_prefix:
+ key = self.key_prefix + key
+ if _test_memcached_key(key):
+ self._client.delete(key)
+
+ def delete_many(self, *keys):
+ new_keys = []
+ for key in keys:
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if self.key_prefix:
+ key = self.key_prefix + key
+ if _test_memcached_key(key):
+ new_keys.append(key)
+ self._client.delete_multi(new_keys)
+
+ def clear(self):
+ self._client.flush_all()
+
+ def inc(self, key, delta=1):
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if self.key_prefix:
+ key = self.key_prefix + key
+ self._client.incr(key, delta)
+
+ def dec(self, key, delta=1):
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if self.key_prefix:
+ key = self.key_prefix + key
+ self._client.decr(key, delta)
+
+
+ def import_preferred_memcache_lib(self, servers):
+ """ Returns an initialized memcache client """
+ try:
+ import pylibmc
+ return pylibmc.Client(servers)
+ except ImportError:
+ pass
+
+ try:
+ from google.appengine.api import memcache
+ return memcache.Client()
+ except ImportError:
+ pass
+
+ try:
+ import memcache
+ return memcache.Client(servers)
+ except ImportError:
+ pass
+
+ # If you're seeing this, either you need to install a memcache client
+ # or you need to monkey patch this method to support your
+ # environment.
+ raise RuntimeError('no memcache module found')
+
+
+# backwards compatibility
+GAEMemcachedCache = MemcachedCache
+
+
+class RedisCache(BaseCache):
+ """Uses the Redis key-value store as a cache backend.
+
+ The first argument can be either a string denoting address of the Redis
+ server or an object resembling an instance of a redis.Redis class.
+
+ Note: Python Redis API already takes care of encoding unicode strings on
+ the fly.
+
+ .. versionadded:: 0.7
+
+ .. versionadded:: 0.8
+ `key_prefix` was added.
+
+ :param host: address of the Redis server or an object which API is
+ compatible with the official Python Redis client (redis-py).
+ :param port: port number on which Redis server listens for connections
+ :param default_timeout: the default timeout that is used if no timeout is
+ specified on :meth:`~BaseCache.set`.
+ :param key_prefix: A prefix that should be added to all keys.
+ """
+ def __init__(self, host='localhost', port=6379, default_timeout=300,
+ key_prefix=None):
+ BaseCache.__init__(self, default_timeout)
+ if isinstance(host, basestring):
+ try:
+ import redis
+ except ImportError:
+ raise RuntimeError('no redis module found')
+ self._client = redis.Redis(host=host, port=port)
+ else:
+ self._client = host
+ self.key_prefix = key_prefix or ''
+
+ def get(self, key):
+ return self._client.get(self.key_prefix + key)
+
+ def get_many(self, *keys):
+ if self.key_prefix:
+ keys = [self.key_prefix + key for key in keys]
+ return self._client.mget(keys)
+
+ def set(self, key, value, timeout=None):
+ if timeout is None:
+ timeout = self.default_timeout
+ self._client.setex(self.key_prefix + key, value, timeout)
+
+ def add(self, key, value, timeout=None):
+ if timeout is None:
+ timeout = self.default_timeout
+ added = self._client.setnx(self.key_prefix + key, value)
+ if added:
+ self._client.expire(self.key_prefix + key, timeout)
+
+ def set_many(self, mapping, timeout=None):
+ if timeout is None:
+ timeout = self.default_timeout
+ pipe = self._client.pipeline()
+ for key, value in _items(mapping):
+ pipe.setex(self.key_prefix + key, value, timeout)
+ pipe.execute()
+
+ def delete(self, key):
+ self._client.delete(self.key_prefix + key)
+
+ def delete_many(self, *keys):
+ if not keys:
+ return
+ if self.key_prefix:
+ keys = [self.key_prefix + key for key in keys]
+ self._client.delete(*keys)
+
+ def clear(self):
+ if self.key_prefix:
+ keys = self._client.keys(self.key_prefix + '*')
+ if keys:
+ self._client.delete(*keys)
+ else:
+ self._client.flushdb()
+
+ def inc(self, key, delta=1):
+ return self._client.incr(self.key_prefix + key, delta)
+
+ def dec(self, key, delta=1):
+ return self._client.decr(self.key_prefix + key, delta)
+
+
+class FileSystemCache(BaseCache):
+ """A cache that stores the items on the file system. This cache depends
+ on being the only user of the `cache_dir`. Make absolutely sure that
+ nobody but this cache stores files there or otherwise the cache will
+ randomly delete files therein.
+
+ :param cache_dir: the directory where cache files are stored.
+ :param threshold: the maximum number of items the cache stores before
+ it starts deleting some.
+ :param default_timeout: the default timeout that is used if no timeout is
+ specified on :meth:`~BaseCache.set`.
+ :param mode: the file mode wanted for the cache files, default 0600
+ """
+
+ #: used for temporary files by the FileSystemCache
+ _fs_transaction_suffix = '.__wz_cache'
+
+ def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0600):
+ BaseCache.__init__(self, default_timeout)
+ self._path = cache_dir
+ self._threshold = threshold
+ self._mode = mode
+ if not os.path.exists(self._path):
+ os.makedirs(self._path)
+
+ def _list_dir(self):
+ """return a list of (fully qualified) cache filenames
+ """
+ return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
+ if not fn.endswith(self._fs_transaction_suffix)]
+
+ def _prune(self):
+ entries = self._list_dir()
+ if len(entries) > self._threshold:
+ now = time()
+ for idx, fname in enumerate(entries):
+ remove = False
+ f = None
+ try:
+ try:
+ f = open(fname, 'rb')
+ expires = load(f)
+ remove = expires <= now or idx % 3 == 0
+ finally:
+ if f is not None:
+ f.close()
+ except Exception:
+ pass
+ if remove:
+ try:
+ os.remove(fname)
+ except (IOError, OSError):
+ pass
+
+ def clear(self):
+ for fname in self._list_dir():
+ try:
+ os.remove(fname)
+ except (IOError, OSError):
+ pass
+
+ def _get_filename(self, key):
+ hash = md5(key).hexdigest()
+ return os.path.join(self._path, hash)
+
+ def get(self, key):
+ filename = self._get_filename(key)
+ try:
+ f = open(filename, 'rb')
+ try:
+ if load(f) >= time():
+ return load(f)
+ finally:
+ f.close()
+ os.remove(filename)
+ except Exception:
+ return None
+
+ def add(self, key, value, timeout=None):
+ filename = self._get_filename(key)
+ if not os.path.exists(filename):
+ self.set(key, value, timeout)
+
+ def set(self, key, value, timeout=None):
+ if timeout is None:
+ timeout = self.default_timeout
+ filename = self._get_filename(key)
+ self._prune()
+ try:
+ fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
+ dir=self._path)
+ f = os.fdopen(fd, 'wb')
+ try:
+ dump(int(time() + timeout), f, 1)
+ dump(value, f, HIGHEST_PROTOCOL)
+ finally:
+ f.close()
+ rename(tmp, filename)
+ os.chmod(filename, self._mode)
+ except (IOError, OSError):
+ pass
+
+ def delete(self, key):
+ try:
+ os.remove(self._get_filename(key))
+ except (IOError, OSError):
+ pass
+
diff --git a/websdk/werkzeug/contrib/fixers.py b/websdk/werkzeug/contrib/fixers.py
new file mode 100644
index 0000000..6286e6c
--- /dev/null
+++ b/websdk/werkzeug/contrib/fixers.py
@@ -0,0 +1,224 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib.fixers
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ .. versionadded:: 0.5
+
+ This module includes various helpers that fix bugs in web servers. They may
+ be necessary for some versions of a buggy web server but not others. We try
+ to stay updated with the status of the bugs as good as possible but you have
+ to make sure whether they fix the problem you encounter.
+
+ If you notice bugs in webservers not fixed in this module consider
+ contributing a patch.
+
+ :copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+from urllib import unquote
+from werkzeug.http import parse_options_header, parse_cache_control_header, \
+ parse_set_header
+from werkzeug.useragents import UserAgent
+from werkzeug.datastructures import Headers, ResponseCacheControl
+
+
+class LighttpdCGIRootFix(object):
+ """Wrap the application in this middleware if you are using lighttpd
+ with FastCGI or CGI and the application is mounted on the URL root.
+
+ :param app: the WSGI application
+ """
+
+ def __init__(self, app):
+ self.app = app
+
+ def __call__(self, environ, start_response):
+ # only set PATH_INFO for older versions of Lighty or if no
+ # server software is provided. That's because the test was
+ # added in newer Werkzeug versions and we don't want to break
+ # people's code if they are using this fixer in a test that
+ # does not set the SERVER_SOFTWARE key.
+ if 'SERVER_SOFTWARE' not in environ or \
+ environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
+ environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
+ environ.get('PATH_INFO', '')
+ environ['SCRIPT_NAME'] = ''
+ return self.app(environ, start_response)
+
+
+class PathInfoFromRequestUriFix(object):
+ """On windows environment variables are limited to the system charset
+ which makes it impossible to store the `PATH_INFO` variable in the
+ environment without loss of information on some systems.
+
+ This is for example a problem for CGI scripts on a Windows Apache.
+
+ This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
+ `REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
+ fix can only be applied if the webserver supports either of these
+ variables.
+
+ :param app: the WSGI application
+ """
+
+ def __init__(self, app):
+ self.app = app
+
+ def __call__(self, environ, start_response):
+ for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
+ if key not in environ:
+ continue
+ request_uri = unquote(environ[key])
+ script_name = unquote(environ.get('SCRIPT_NAME', ''))
+ if request_uri.startswith(script_name):
+ environ['PATH_INFO'] = request_uri[len(script_name):] \
+ .split('?', 1)[0]
+ break
+ return self.app(environ, start_response)
+
+
+class ProxyFix(object):
+ """This middleware can be applied to add HTTP proxy support to an
+ application that was not designed with HTTP proxies in mind. It
+ sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers.
+
+ Do not use this middleware in non-proxy setups for security reasons.
+
+ The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
+ the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
+ `werkzeug.proxy_fix.orig_http_host`.
+
+ :param app: the WSGI application
+ """
+
+ def __init__(self, app):
+ self.app = app
+
+ def get_remote_addr(self, forwarded_for):
+ """Selects the new remote addr from the given list of ips in
+ X-Forwarded-For. By default the first one is picked.
+
+ .. versionadded:: 0.8
+ """
+ if forwarded_for:
+ return forwarded_for[0]
+
+ def __call__(self, environ, start_response):
+ getter = environ.get
+ forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
+ forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
+ forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
+ environ.update({
+ 'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
+ 'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
+ 'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
+ })
+ forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
+ remote_addr = self.get_remote_addr(forwarded_for)
+ if remote_addr is not None:
+ environ['REMOTE_ADDR'] = remote_addr
+ if forwarded_host:
+ environ['HTTP_HOST'] = forwarded_host
+ if forwarded_proto:
+ environ['wsgi.url_scheme'] = forwarded_proto
+ return self.app(environ, start_response)
+
+
+class HeaderRewriterFix(object):
+ """This middleware can remove response headers and add others. This
+ is for example useful to remove the `Date` header from responses if you
+ are using a server that adds that header, no matter if it's present or
+ not or to add `X-Powered-By` headers::
+
+ app = HeaderRewriterFix(app, remove_headers=['Date'],
+ add_headers=[('X-Powered-By', 'WSGI')])
+
+ :param app: the WSGI application
+ :param remove_headers: a sequence of header keys that should be
+ removed.
+ :param add_headers: a sequence of ``(key, value)`` tuples that should
+ be added.
+ """
+
+ def __init__(self, app, remove_headers=None, add_headers=None):
+ self.app = app
+ self.remove_headers = set(x.lower() for x in (remove_headers or ()))
+ self.add_headers = list(add_headers or ())
+
+ def __call__(self, environ, start_response):
+ def rewriting_start_response(status, headers, exc_info=None):
+ new_headers = []
+ for key, value in headers:
+ if key.lower() not in self.remove_headers:
+ new_headers.append((key, value))
+ new_headers += self.add_headers
+ return start_response(status, new_headers, exc_info)
+ return self.app(environ, rewriting_start_response)
+
+
+class InternetExplorerFix(object):
+ """This middleware fixes a couple of bugs with Microsoft Internet
+ Explorer. Currently the following fixes are applied:
+
+ - removing of `Vary` headers for unsupported mimetypes which
+ causes troubles with caching. Can be disabled by passing
+ ``fix_vary=False`` to the constructor.
+ see: http://support.microsoft.com/kb/824847/en-us
+
+ - removes offending headers to work around caching bugs in
+ Internet Explorer if `Content-Disposition` is set. Can be
+ disabled by passing ``fix_attach=False`` to the constructor.
+
+ If it does not detect affected Internet Explorer versions it won't touch
+ the request / response.
+ """
+
+ # This code was inspired by Django fixers for the same bugs. The
+ # fix_vary and fix_attach fixers were originally implemented in Django
+ # by Michael Axiak and is available as part of the Django project:
+ # http://code.djangoproject.com/ticket/4148
+
+ def __init__(self, app, fix_vary=True, fix_attach=True):
+ self.app = app
+ self.fix_vary = fix_vary
+ self.fix_attach = fix_attach
+
+ def fix_headers(self, environ, headers, status=None):
+ if self.fix_vary:
+ header = headers.get('content-type', '')
+ mimetype, options = parse_options_header(header)
+ if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
+ headers.pop('vary', None)
+
+ if self.fix_attach and 'content-disposition' in headers:
+ pragma = parse_set_header(headers.get('pragma', ''))
+ pragma.discard('no-cache')
+ header = pragma.to_header()
+ if not header:
+ headers.pop('pragma', '')
+ else:
+ headers['Pragma'] = header
+ header = headers.get('cache-control', '')
+ if header:
+ cc = parse_cache_control_header(header,
+ cls=ResponseCacheControl)
+ cc.no_cache = None
+ cc.no_store = False
+ header = cc.to_header()
+ if not header:
+ headers.pop('cache-control', '')
+ else:
+ headers['Cache-Control'] = header
+
+ def run_fixed(self, environ, start_response):
+ def fixing_start_response(status, headers, exc_info=None):
+ self.fix_headers(environ, Headers.linked(headers), status)
+ return start_response(status, headers, exc_info)
+ return self.app(environ, fixing_start_response)
+
+ def __call__(self, environ, start_response):
+ ua = UserAgent(environ)
+ if ua.browser != 'msie':
+ return self.app(environ, start_response)
+ return self.run_fixed(environ, start_response)
diff --git a/websdk/werkzeug/contrib/iterio.py b/websdk/werkzeug/contrib/iterio.py
new file mode 100644
index 0000000..0718659
--- /dev/null
+++ b/websdk/werkzeug/contrib/iterio.py
@@ -0,0 +1,277 @@
+# -*- coding: utf-8 -*-
+r"""
+ werkzeug.contrib.iterio
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ This module implements a :class:`IterIO` that converts an iterator into
+ a stream object and the other way round. Converting streams into
+ iterators requires the `greenlet`_ module.
+
+ To convert an iterator into a stream all you have to do is to pass it
+ directly to the :class:`IterIO` constructor. In this example we pass it
+ a newly created generator::
+
+ def foo():
+ yield "something\n"
+ yield "otherthings"
+ stream = IterIO(foo())
+ print stream.read() # read the whole iterator
+
+ The other way round works a bit different because we have to ensure that
+ the code execution doesn't take place yet. An :class:`IterIO` call with a
+ callable as first argument does two things. The function itself is passed
+ an :class:`IterIO` stream it can feed. The object returned by the
+ :class:`IterIO` constructor on the other hand is not an stream object but
+ an iterator::
+
+ def foo(stream):
+ stream.write("some")
+ stream.write("thing")
+ stream.flush()
+ stream.write("otherthing")
+ iterator = IterIO(foo)
+ print iterator.next() # prints something
+ print iterator.next() # prints otherthing
+ iterator.next() # raises StopIteration
+
+ .. _greenlet: http://codespeak.net/py/dist/greenlet.html
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+try:
+ import greenlet
+except ImportError:
+ greenlet = None
+
+
+class IterIO(object):
+ """Instances of this object implement an interface compatible with the
+ standard Python :class:`file` object. Streams are either read-only or
+ write-only depending on how the object is created.
+ """
+
+ def __new__(cls, obj):
+ try:
+ iterator = iter(obj)
+ except TypeError:
+ return IterI(obj)
+ return IterO(iterator)
+
+ def __iter__(self):
+ return self
+
+ def tell(self):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ return self.pos
+
+ def isatty(self):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ return False
+
+ def seek(self, pos, mode=0):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ raise IOError(9, 'Bad file descriptor')
+
+ def truncate(self, size=None):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ raise IOError(9, 'Bad file descriptor')
+
+ def write(self, s):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ raise IOError(9, 'Bad file descriptor')
+
+ def writelines(self, list):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ raise IOError(9, 'Bad file descriptor')
+
+ def read(self, n=-1):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ raise IOError(9, 'Bad file descriptor')
+
+ def readlines(self, sizehint=0):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ raise IOError(9, 'Bad file descriptor')
+
+ def readline(self, length=None):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ raise IOError(9, 'Bad file descriptor')
+
+ def flush(self):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ raise IOError(9, 'Bad file descriptor')
+
+ def next(self):
+ if self.closed:
+ raise StopIteration()
+ line = self.readline()
+ if not line:
+ raise StopIteration()
+ return line
+
+
+class IterI(IterIO):
+ """Convert an stream into an iterator."""
+
+ def __new__(cls, func):
+ if greenlet is None:
+ raise RuntimeError('IterI requires greenlet support')
+ stream = object.__new__(cls)
+ stream._parent = greenlet.getcurrent()
+ stream._buffer = []
+ stream.closed = False
+ stream.pos = 0
+
+ def run():
+ func(stream)
+ stream.flush()
+
+ g = greenlet.greenlet(run, stream._parent)
+ while 1:
+ rv = g.switch()
+ if not rv:
+ return
+ yield rv[0]
+
+ def close(self):
+ if not self.closed:
+ self.closed = True
+
+ def write(self, s):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ self.pos += len(s)
+ self._buffer.append(s)
+
+ def writelines(self, list):
+ self.write(''.join(list))
+
+ def flush(self):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ data = ''.join(self._buffer)
+ self._buffer = []
+ self._parent.switch((data,))
+
+
+class IterO(IterIO):
+ """Iter output. Wrap an iterator and give it a stream like interface."""
+
+ def __new__(cls, gen):
+ self = object.__new__(cls)
+ self._gen = gen
+ self._buf = ''
+ self.closed = False
+ self.pos = 0
+ return self
+
+ def __iter__(self):
+ return self
+
+ def close(self):
+ if not self.closed:
+ self.closed = True
+ if hasattr(self._gen, 'close'):
+ self._gen.close()
+
+ def seek(self, pos, mode=0):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ if mode == 1:
+ pos += self.pos
+ elif mode == 2:
+ self.read()
+ self.pos = min(self.pos, self.pos + pos)
+ return
+ elif mode != 0:
+ raise IOError('Invalid argument')
+ buf = []
+ try:
+ tmp_end_pos = len(self._buf)
+ while pos > tmp_end_pos:
+ item = self._gen.next()
+ tmp_end_pos += len(item)
+ buf.append(item)
+ except StopIteration:
+ pass
+ if buf:
+ self._buf += ''.join(buf)
+ self.pos = max(0, pos)
+
+ def read(self, n=-1):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ if n < 0:
+ self._buf += ''.join(self._gen)
+ result = self._buf[self.pos:]
+ self.pos += len(result)
+ return result
+ new_pos = self.pos + n
+ buf = []
+ try:
+ tmp_end_pos = len(self._buf)
+ while new_pos > tmp_end_pos:
+ item = self._gen.next()
+ tmp_end_pos += len(item)
+ buf.append(item)
+ except StopIteration:
+ pass
+ if buf:
+ self._buf += ''.join(buf)
+ new_pos = max(0, new_pos)
+ try:
+ return self._buf[self.pos:new_pos]
+ finally:
+ self.pos = min(new_pos, len(self._buf))
+
+ def readline(self, length=None):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+ nl_pos = self._buf.find('\n', self.pos)
+ buf = []
+ try:
+ pos = self.pos
+ while nl_pos < 0:
+ item = self._gen.next()
+ local_pos = item.find('\n')
+ buf.append(item)
+ if local_pos >= 0:
+ nl_pos = pos + local_pos
+ break
+ pos += len(item)
+ except StopIteration:
+ pass
+ if buf:
+ self._buf += ''.join(buf)
+ if nl_pos < 0:
+ new_pos = len(self._buf)
+ else:
+ new_pos = nl_pos + 1
+ if length is not None and self.pos + length < new_pos:
+ new_pos = self.pos + length
+ try:
+ return self._buf[self.pos:new_pos]
+ finally:
+ self.pos = min(new_pos, len(self._buf))
+
+ def readlines(self, sizehint=0):
+ total = 0
+ lines = []
+ line = self.readline()
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline()
+ return lines
diff --git a/websdk/werkzeug/contrib/jsrouting.py b/websdk/werkzeug/contrib/jsrouting.py
new file mode 100644
index 0000000..9b7d0c0
--- /dev/null
+++ b/websdk/werkzeug/contrib/jsrouting.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib.jsrouting
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Addon module that allows to create a JavaScript function from a map
+ that generates rules.
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+try:
+ from simplejson import dumps
+except ImportError:
+ try:
+ from json import dumps
+ except ImportError:
+ def dumps(*args):
+ raise RuntimeError('simplejson required for jsrouting')
+
+from inspect import getmro
+from werkzeug.routing import NumberConverter
+
+
+def render_template(name_parts, rules, converters):
+ result = u''
+ if name_parts:
+ for idx in xrange(0, len(name_parts) - 1):
+ name = u'.'.join(name_parts[:idx + 1])
+ result += u"if (typeof %s === 'undefined') %s = {}\n" % (name, name)
+ result += '%s = ' % '.'.join(name_parts)
+ result += """(function (server_name, script_name, subdomain, url_scheme) {
+ var converters = %(converters)s;
+ var rules = $rules;
+ function in_array(array, value) {
+ if (array.indexOf != undefined) {
+ return array.indexOf(value) != -1;
+ }
+ for (var i = 0; i < array.length; i++) {
+ if (array[i] == value) {
+ return true;
+ }
+ }
+ return false;
+ }
+ function array_diff(array1, array2) {
+ array1 = array1.slice();
+ for (var i = array1.length-1; i >= 0; i--) {
+ if (in_array(array2, array1[i])) {
+ array1.splice(i, 1);
+ }
+ }
+ return array1;
+ }
+ function split_obj(obj) {
+ var names = [];
+ var values = [];
+ for (var name in obj) {
+ if (typeof(obj[name]) != 'function') {
+ names.push(name);
+ values.push(obj[name]);
+ }
+ }
+ return {names: names, values: values, original: obj};
+ }
+ function suitable(rule, args) {
+ var default_args = split_obj(rule.defaults || {});
+ var diff_arg_names = array_diff(rule.arguments, default_args.names);
+
+ for (var i = 0; i < diff_arg_names.length; i++) {
+ if (!in_array(args.names, diff_arg_names[i])) {
+ return false;
+ }
+ }
+
+ if (array_diff(rule.arguments, args.names).length == 0) {
+ if (rule.defaults == null) {
+ return true;
+ }
+ for (var i = 0; i < default_args.names.length; i++) {
+ var key = default_args.names[i];
+ var value = default_args.values[i];
+ if (value != args.original[key]) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+ function build(rule, args) {
+ var tmp = [];
+ var processed = rule.arguments.slice();
+ for (var i = 0; i < rule.trace.length; i++) {
+ var part = rule.trace[i];
+ if (part.is_dynamic) {
+ var converter = converters[rule.converters[part.data]];
+ var data = converter(args.original[part.data]);
+ if (data == null) {
+ return null;
+ }
+ tmp.push(data);
+ processed.push(part.name);
+ } else {
+ tmp.push(part.data);
+ }
+ }
+ tmp = tmp.join('');
+ var pipe = tmp.indexOf('|');
+ var subdomain = tmp.substring(0, pipe);
+ var url = tmp.substring(pipe+1);
+
+ var unprocessed = array_diff(args.names, processed);
+ var first_query_var = true;
+ for (var i = 0; i < unprocessed.length; i++) {
+ if (first_query_var) {
+ url += '?';
+ } else {
+ url += '&';
+ }
+ first_query_var = false;
+ url += encodeURIComponent(unprocessed[i]);
+ url += '=';
+ url += encodeURIComponent(args.original[unprocessed[i]]);
+ }
+ return {subdomain: subdomain, path: url};
+ }
+ function lstrip(s, c) {
+ while (s && s.substring(0, 1) == c) {
+ s = s.substring(1);
+ }
+ return s;
+ }
+ function rstrip(s, c) {
+ while (s && s.substring(s.length-1, s.length) == c) {
+ s = s.substring(0, s.length-1);
+ }
+ return s;
+ }
+ return function(endpoint, args, force_external) {
+ args = split_obj(args);
+ var rv = null;
+ for (var i = 0; i < rules.length; i++) {
+ var rule = rules[i];
+ if (rule.endpoint != endpoint) continue;
+ if (suitable(rule, args)) {
+ rv = build(rule, args);
+ if (rv != null) {
+ break;
+ }
+ }
+ }
+ if (rv == null) {
+ return null;
+ }
+ if (!force_external && rv.subdomain == subdomain) {
+ return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/');
+ } else {
+ return url_scheme + '://'
+ + (rv.subdomain ? rv.subdomain + '.' : '')
+ + server_name + rstrip(script_name, '/')
+ + '/' + lstrip(rv.path, '/');
+ }
+ };
+})""" % {'converters': u', '.join(converters)}
+ return result
+
+
+def generate_map(map, name='url_map'):
+ """
+ Generates a JavaScript function containing the rules defined in
+ this map, to be used with a MapAdapter's generate_javascript
+ method. If you don't pass a name the returned JavaScript code is
+ an expression that returns a function. Otherwise it's a standalone
+ script that assigns the function with that name. Dotted names are
+ resolved (so you an use a name like 'obj.url_for')
+
+ In order to use JavaScript generation, simplejson must be installed.
+
+ Note that using this feature will expose the rules
+ defined in your map to users. If your rules contain sensitive
+ information, don't use JavaScript generation!
+ """
+ map.update()
+ rules = []
+ converters = []
+ for rule in map.iter_rules():
+ trace = [{
+ 'is_dynamic': is_dynamic,
+ 'data': data
+ } for is_dynamic, data in rule._trace]
+ rule_converters = {}
+ for key, converter in rule._converters.iteritems():
+ js_func = js_to_url_function(converter)
+ try:
+ index = converters.index(js_func)
+ except ValueError:
+ converters.append(js_func)
+ index = len(converters) - 1
+ rule_converters[key] = index
+ rules.append({
+ u'endpoint': rule.endpoint,
+ u'arguments': list(rule.arguments),
+ u'converters': rule_converters,
+ u'trace': trace,
+ u'defaults': rule.defaults
+ })
+
+ return render_template(name_parts=name and name.split('.') or [],
+ rules=dumps(rules),
+ converters=converters)
+
+
+def generate_adapter(adapter, name='url_for', map_name='url_map'):
+ """Generates the url building function for a map."""
+ values = {
+ u'server_name': dumps(adapter.server_name),
+ u'script_name': dumps(adapter.script_name),
+ u'subdomain': dumps(adapter.subdomain),
+ u'url_scheme': dumps(adapter.url_scheme),
+ u'name': name,
+ u'map_name': map_name
+ }
+ return u'''\
+var %(name)s = %(map_name)s(
+ %(server_name)s,
+ %(script_name)s,
+ %(subdomain)s,
+ %(url_scheme)s
+);''' % values
+
+
+def js_to_url_function(converter):
+ """Get the JavaScript converter function from a rule."""
+ if hasattr(converter, 'js_to_url_function'):
+ data = converter.js_to_url_function()
+ else:
+ for cls in getmro(type(converter)):
+ if cls in js_to_url_functions:
+ data = js_to_url_functions[cls](converter)
+ break
+ else:
+ return 'encodeURIComponent'
+ return '(function(value) { %s })' % data
+
+
+def NumberConverter_js_to_url(conv):
+ if conv.fixed_digits:
+ return u'''\
+var result = value.toString();
+while (result.length < %s)
+ result = '0' + result;
+return result;''' % conv.fixed_digits
+ return u'return value.toString();'
+
+
+js_to_url_functions = {
+ NumberConverter: NumberConverter_js_to_url
+}
diff --git a/websdk/werkzeug/contrib/kickstart.py b/websdk/werkzeug/contrib/kickstart.py
new file mode 100644
index 0000000..43c0e7c
--- /dev/null
+++ b/websdk/werkzeug/contrib/kickstart.py
@@ -0,0 +1,288 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib.kickstart
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This module provides some simple shortcuts to make using Werkzeug simpler
+ for small scripts.
+
+ These improvements include predefined `Request` and `Response` objects as
+ well as a predefined `Application` object which can be customized in child
+ classes, of course. The `Request` and `Reponse` objects handle URL
+ generation as well as sessions via `werkzeug.contrib.sessions` and are
+ purely optional.
+
+ There is also some integration of template engines. The template loaders
+ are, of course, not neccessary to use the template engines in Werkzeug,
+ but they provide a common interface. Currently supported template engines
+ include Werkzeug's minitmpl and Genshi_. Support for other engines can be
+ added in a trivial way. These loaders provide a template interface
+ similar to the one used by Django_.
+
+ .. _Genshi: http://genshi.edgewall.org/
+ .. _Django: http://www.djangoproject.com/
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+from os import path
+from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
+from werkzeug.templates import Template
+from werkzeug.exceptions import HTTPException
+from werkzeug.routing import RequestRedirect
+
+__all__ = ['Request', 'Response', 'TemplateNotFound', 'TemplateLoader',
+ 'GenshiTemplateLoader', 'Application']
+
+from warnings import warn
+warn(DeprecationWarning('werkzeug.contrib.kickstart is deprecated and '
+ 'will be removed in Werkzeug 1.0'))
+
+
+class Request(RequestBase):
+ """A handy subclass of the base request that adds a URL builder.
+ It when supplied a session store, it is also able to handle sessions.
+ """
+
+ def __init__(self, environ, url_map,
+ session_store=None, cookie_name=None):
+ # call the parent for initialization
+ RequestBase.__init__(self, environ)
+ # create an adapter
+ self.url_adapter = url_map.bind_to_environ(environ)
+ # create all stuff for sessions
+ self.session_store = session_store
+ self.cookie_name = cookie_name
+
+ if session_store is not None and cookie_name is not None:
+ if cookie_name in self.cookies:
+ # get the session out of the storage
+ self.session = session_store.get(self.cookies[cookie_name])
+ else:
+ # create a new session
+ self.session = session_store.new()
+
+ def url_for(self, callback, **values):
+ return self.url_adapter.build(callback, values)
+
+
+class Response(ResponseBase):
+ """
+ A subclass of base response which sets the default mimetype to text/html.
+ It the `Request` that came in is using Werkzeug sessions, this class
+ takes care of saving that session.
+ """
+ default_mimetype = 'text/html'
+
+ def __call__(self, environ, start_response):
+ # get the request object
+ request = environ['werkzeug.request']
+
+ if request.session_store is not None:
+ # save the session if neccessary
+ request.session_store.save_if_modified(request.session)
+
+ # set the cookie for the browser if it is not there:
+ if request.cookie_name not in request.cookies:
+ self.set_cookie(request.cookie_name, request.session.sid)
+
+ # go on with normal response business
+ return ResponseBase.__call__(self, environ, start_response)
+
+
+class Processor(object):
+ """A request and response processor - it is what Django calls a
+ middleware, but Werkzeug also includes straight-foward support for real
+ WSGI middlewares, so another name was chosen.
+
+ The code of this processor is derived from the example in the Werkzeug
+ trac, called `Request and Response Processor
+ <http://dev.pocoo.org/projects/werkzeug/wiki/RequestResponseProcessor>`_
+ """
+
+ def process_request(self, request):
+ return request
+
+ def process_response(self, request, response):
+ return response
+
+ def process_view(self, request, view_func, view_args, view_kwargs):
+ """process_view() is called just before the Application calls the
+ function specified by view_func.
+
+ If this returns None, the Application processes the next Processor,
+ and if it returns something else (like a Response instance), that
+ will be returned without any further processing.
+ """
+ return None
+
+ def process_exception(self, request, exception):
+ return None
+
+
+class Application(object):
+ """A generic WSGI application which can be used to start with Werkzeug in
+ an easy, straightforward way.
+ """
+
+ def __init__(self, name, url_map, session=False, processors=None):
+ # save the name and the URL-map, as it'll be needed later on
+ self.name = name
+ self.url_map = url_map
+ # save the list of processors if supplied
+ self.processors = processors or []
+ # create an instance of the storage
+ if session:
+ self.store = session
+ else:
+ self.store = None
+
+ def __call__(self, environ, start_response):
+ # create a request - with or without session support
+ if self.store is not None:
+ request = Request(environ, self.url_map,
+ session_store=self.store, cookie_name='%s_sid' % self.name)
+ else:
+ request = Request(environ, self.url_map)
+
+ # apply the request processors
+ for processor in self.processors:
+ request = processor.process_request(request)
+
+ try:
+ # find the callback to which the URL is mapped
+ callback, args = request.url_adapter.match(request.path)
+ except (HTTPException, RequestRedirect), e:
+ response = e
+ else:
+ # check all view processors
+ for processor in self.processors:
+ action = processor.process_view(request, callback, (), args)
+ if action is not None:
+ # it is overriding the default behaviour, this is
+ # short-circuiting the processing, so it returns here
+ return action(environ, start_response)
+
+ try:
+ response = callback(request, **args)
+ except Exception, exception:
+ # the callback raised some exception, need to process that
+ for processor in reversed(self.processors):
+ # filter it through the exception processor
+ action = processor.process_exception(request, exception)
+ if action is not None:
+ # the exception processor returned some action
+ return action(environ, start_response)
+ # still not handled by a exception processor, so re-raise
+ raise
+
+ # apply the response processors
+ for processor in reversed(self.processors):
+ response = processor.process_response(request, response)
+
+ # return the completely processed response
+ return response(environ, start_response)
+
+
+ def config_session(self, store, expiration='session'):
+ """
+ Configures the setting for cookies. You can also disable cookies by
+ setting store to None.
+ """
+ self.store = store
+ # expiration=session is the default anyway
+ # TODO: add settings to define the expiration date, the domain, the
+ # path any maybe the secure parameter.
+
+
+class TemplateNotFound(IOError, LookupError):
+ """
+ A template was not found by the template loader.
+ """
+
+ def __init__(self, name):
+ IOError.__init__(self, name)
+ self.name = name
+
+
+class TemplateLoader(object):
+ """
+ A simple loader interface for the werkzeug minitmpl
+ template language.
+ """
+
+ def __init__(self, search_path, encoding='utf-8'):
+ self.search_path = path.abspath(search_path)
+ self.encoding = encoding
+
+ def get_template(self, name):
+ """Get a template from a given name."""
+ filename = path.join(self.search_path, *[p for p in name.split('/')
+ if p and p[0] != '.'])
+ if not path.exists(filename):
+ raise TemplateNotFound(name)
+ return Template.from_file(filename, self.encoding)
+
+ def render_to_response(self, *args, **kwargs):
+ """Load and render a template into a response object."""
+ return Response(self.render_to_string(*args, **kwargs))
+
+ def render_to_string(self, *args, **kwargs):
+ """Load and render a template into a unicode string."""
+ try:
+ template_name, args = args[0], args[1:]
+ except IndexError:
+ raise TypeError('name of template required')
+ return self.get_template(template_name).render(*args, **kwargs)
+
+
+class GenshiTemplateLoader(TemplateLoader):
+ """A unified interface for loading Genshi templates. Actually a quite thin
+ wrapper for Genshi's TemplateLoader.
+
+ It sets some defaults that differ from the Genshi loader, most notably
+ auto_reload is active. All imporant options can be passed through to
+ Genshi.
+ The default output type is 'html', but can be adjusted easily by changing
+ the `output_type` attribute.
+ """
+ def __init__(self, search_path, encoding='utf-8', **kwargs):
+ TemplateLoader.__init__(self, search_path, encoding)
+ # import Genshi here, because we don't want a general Genshi
+ # dependency, only a local one
+ from genshi.template import TemplateLoader as GenshiLoader
+ from genshi.template.loader import TemplateNotFound
+
+ self.not_found_exception = TemplateNotFound
+ # set auto_reload to True per default
+ reload_template = kwargs.pop('auto_reload', True)
+ # get rid of default_encoding as this template loaders overwrites it
+ # with the value of encoding
+ kwargs.pop('default_encoding', None)
+
+ # now, all arguments are clean, pass them on
+ self.loader = GenshiLoader(search_path, default_encoding=encoding,
+ auto_reload=reload_template, **kwargs)
+
+ # the default output is HTML but can be overridden easily
+ self.output_type = 'html'
+ self.encoding = encoding
+
+ def get_template(self, template_name):
+ """Get the template which is at the given name"""
+ try:
+ return self.loader.load(template_name, encoding=self.encoding)
+ except self.not_found_exception, e:
+ # catch the exception raised by Genshi, convert it into a werkzeug
+ # exception (for the sake of consistency)
+ raise TemplateNotFound(template_name)
+
+ def render_to_string(self, template_name, context=None):
+ """Load and render a template into an unicode string"""
+ # create an empty context if no context was specified
+ context = context or {}
+ tmpl = self.get_template(template_name)
+ # render the template into a unicode string (None means unicode)
+ return tmpl. \
+ generate(**context). \
+ render(self.output_type, encoding=None)
diff --git a/websdk/werkzeug/contrib/limiter.py b/websdk/werkzeug/contrib/limiter.py
new file mode 100644
index 0000000..d0dbcc5
--- /dev/null
+++ b/websdk/werkzeug/contrib/limiter.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib.limiter
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ A middleware that limits incoming data. This works around problems with
+ Trac_ or Django_ because those directly stream into the memory.
+
+ .. _Trac: http://trac.edgewall.org/
+ .. _Django: http://www.djangoproject.com/
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+from warnings import warn
+
+from werkzeug.wsgi import LimitedStream
+
+
+class StreamLimitMiddleware(object):
+ """Limits the input stream to a given number of bytes. This is useful if
+ you have a WSGI application that reads form data into memory (django for
+ example) and you don't want users to harm the server by uploading tons of
+ data.
+
+ Default is 10MB
+ """
+
+ def __init__(self, app, maximum_size=1024 * 1024 * 10):
+ self.app = app
+ self.maximum_size = maximum_size
+
+ def __call__(self, environ, start_response):
+ limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
+ environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
+ return self.app(environ, start_response)
diff --git a/websdk/werkzeug/contrib/lint.py b/websdk/werkzeug/contrib/lint.py
new file mode 100644
index 0000000..c7adff9
--- /dev/null
+++ b/websdk/werkzeug/contrib/lint.py
@@ -0,0 +1,333 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib.lint
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ .. versionadded:: 0.5
+
+ This module provides a middleware that performs sanity checks of the WSGI
+ application. It checks that :pep:`333` is properly implemented and warns
+ on some common HTTP errors such as non-empty responses for 304 status
+ codes.
+
+ This module provides a middleware, the :class:`LintMiddleware`. Wrap your
+ application with it and it will warn about common problems with WSGI and
+ HTTP while your application is running.
+
+ It's strongly recommended to use it during development.
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+from urlparse import urlparse
+from warnings import warn
+
+from werkzeug.datastructures import Headers
+from werkzeug.http import is_entity_header
+from werkzeug.wsgi import FileWrapper
+
+
+class WSGIWarning(Warning):
+ """Warning class for WSGI warnings."""
+
+
+class HTTPWarning(Warning):
+ """Warning class for HTTP warnings."""
+
+
+def check_string(context, obj, stacklevel=3):
+ if type(obj) is not str:
+ warn(WSGIWarning('%s requires bytestrings, got %s' %
+ (context, obj.__class__.__name__)))
+
+
+class InputStream(object):
+
+ def __init__(self, stream):
+ self._stream = stream
+
+ def read(self, *args):
+ if len(args) == 0:
+ warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
+ 'input stream, thus making calls to '
+ 'wsgi.input.read() unsafe. Conforming servers '
+ 'may never return from this call.'),
+ stacklevel=2)
+ elif len(args) != 1:
+ warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
+ stacklevel=2)
+ return self._stream.read(*args)
+
+ def readline(self, *args):
+ if len(args) == 0:
+ warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
+ ' are unsafe. Use wsgi.input.read() instead.'),
+ stacklevel=2)
+ elif len(args) == 1:
+ warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
+ 'WSGI does not support this, although it\'s available '
+ 'on all major servers.'),
+ stacklevel=2)
+ else:
+ raise TypeError('too many arguments passed to wsgi.input.readline()')
+ return self._stream.readline(*args)
+
+ def __iter__(self):
+ try:
+ return iter(self._stream)
+ except TypeError:
+ warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
+ return iter(())
+
+ def close(self):
+ warn(WSGIWarning('application closed the input stream!'),
+ stacklevel=2)
+ self._stream.close()
+
+
+class ErrorStream(object):
+
+ def __init__(self, stream):
+ self._stream = stream
+
+ def write(self, s):
+ check_string('wsgi.error.write()', s)
+ self._stream.write(s)
+
+ def flush(self):
+ self._stream.flush()
+
+ def writelines(self, seq):
+ for line in seq:
+ self.write(seq)
+
+ def close(self):
+ warn(WSGIWarning('application closed the error stream!'),
+ stacklevel=2)
+ self._stream.close()
+
+
+class GuardedWrite(object):
+
+ def __init__(self, write, chunks):
+ self._write = write
+ self._chunks = chunks
+
+ def __call__(self, s):
+ check_string('write()', s)
+ self._write.write(s)
+ self._chunks.append(len(s))
+
+
+class GuardedIterator(object):
+
+ def __init__(self, iterator, headers_set, chunks):
+ self._iterator = iterator
+ self._next = iter(iterator).next
+ self.closed = False
+ self.headers_set = headers_set
+ self.chunks = chunks
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.closed:
+ warn(WSGIWarning('iterated over closed app_iter'),
+ stacklevel=2)
+ rv = self._next()
+ if not self.headers_set:
+ warn(WSGIWarning('Application returned before it '
+ 'started the response'), stacklevel=2)
+ check_string('application iterator items', rv)
+ self.chunks.append(len(rv))
+ return rv
+
+ def close(self):
+ self.closed = True
+ if hasattr(self._iterator, 'close'):
+ self._iterator.close()
+
+ if self.headers_set:
+ status_code, headers = self.headers_set
+ bytes_sent = sum(self.chunks)
+ content_length = headers.get('content-length', type=int)
+
+ if status_code == 304:
+ for key, value in headers:
+ key = key.lower()
+ if key not in ('expires', 'content-location') and \
+ is_entity_header(key):
+ warn(HTTPWarning('entity header %r found in 304 '
+ 'response' % key))
+ if bytes_sent:
+ warn(HTTPWarning('304 responses must not have a body'))
+ elif 100 <= status_code < 200 or status_code == 204:
+ if content_length != 0:
+ warn(HTTPWarning('%r responses must have an empty '
+ 'content length') % status_code)
+ if bytes_sent:
+ warn(HTTPWarning('%r responses must not have a body' %
+ status_code))
+ elif content_length is not None and content_length != bytes_sent:
+ warn(WSGIWarning('Content-Length and the number of bytes '
+ 'sent to the client do not match.'))
+
+ def __del__(self):
+ if not self.closed:
+ try:
+ warn(WSGIWarning('Iterator was garbage collected before '
+ 'it was closed.'))
+ except Exception:
+ pass
+
+
+class LintMiddleware(object):
+ """This middleware wraps an application and warns on common errors.
+ Among other thing it currently checks for the following problems:
+
+ - invalid status codes
+ - non-bytestrings sent to the WSGI server
+ - strings returned from the WSGI application
+ - non-empty conditional responses
+ - unquoted etags
+ - relative URLs in the Location header
+ - unsafe calls to wsgi.input
+ - unclosed iterators
+
+ Detected errors are emitted using the standard Python :mod:`warnings`
+ system and usually end up on :data:`stderr`.
+
+ ::
+
+ from werkzeug.contrib.lint import LintMiddleware
+ app = LintMiddleware(app)
+
+ :param app: the application to wrap
+ """
+
+ def __init__(self, app):
+ self.app = app
+
+ def check_environ(self, environ):
+ if type(environ) is not dict:
+ warn(WSGIWarning('WSGI environment is not a standard python dict.'),
+ stacklevel=4)
+ for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
+ 'wsgi.version', 'wsgi.input', 'wsgi.errors',
+ 'wsgi.multithread', 'wsgi.multiprocess',
+ 'wsgi.run_once'):
+ if key not in environ:
+ warn(WSGIWarning('required environment key %r not found'
+ % key), stacklevel=3)
+ if environ['wsgi.version'] != (1, 0):
+ warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
+ stacklevel=3)
+
+ script_name = environ.get('SCRIPT_NAME', '')
+ if script_name and script_name[:1] != '/':
+ warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
+ % script_name), stacklevel=3)
+ path_info = environ.get('PATH_INFO', '')
+ if path_info[:1] != '/':
+ warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
+ % path_info), stacklevel=3)
+
+
+ def check_start_response(self, status, headers, exc_info):
+ check_string('status', status)
+ status_code = status.split(None, 1)[0]
+ if len(status_code) != 3 or not status_code.isdigit():
+ warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
+ if len(status) < 4 or status[3] != ' ':
+ warn(WSGIWarning('Invalid value for status %r. Valid '
+ 'status strings are three digits, a space '
+ 'and a status explanation'), stacklevel=3)
+ status_code = int(status_code)
+ if status_code < 100:
+ warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
+
+ if type(headers) is not list:
+ warn(WSGIWarning('header list is not a list'), stacklevel=3)
+ for item in headers:
+ if type(item) is not tuple or len(item) != 2:
+ warn(WSGIWarning('Headers must tuple 2-item tuples'),
+ stacklevel=3)
+ name, value = item
+ if type(name) is not str or type(value) is not str:
+ warn(WSGIWarning('header items must be strings'),
+ stacklevel=3)
+ if name.lower() == 'status':
+ warn(WSGIWarning('The status header is not supported due to '
+ 'conflicts with the CGI spec.'),
+ stacklevel=3)
+
+ if exc_info is not None and not isinstance(exc_info, tuple):
+ warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
+
+ headers = Headers(headers)
+ self.check_headers(headers)
+
+ return status_code, headers
+
+ def check_headers(self, headers):
+ etag = headers.get('etag')
+ if etag is not None:
+ if etag.startswith('w/'):
+ etag = etag[2:]
+ if not (etag[:1] == etag[-1:] == '"'):
+ warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
+
+ location = headers.get('location')
+ if location is not None:
+ if not urlparse(location).netloc:
+ warn(HTTPWarning('absolute URLs required for location header'),
+ stacklevel=4)
+
+ def check_iterator(self, app_iter):
+ if isinstance(app_iter, basestring):
+ warn(WSGIWarning('application returned string. Response will '
+ 'send character for character to the client '
+ 'which will kill the performance. Return a '
+ 'list or iterable instead.'), stacklevel=3)
+
+ def __call__(self, *args, **kwargs):
+ if len(args) != 2:
+ warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
+ if kwargs:
+ warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
+ stacklevel=2)
+ environ, start_response = args
+
+ self.check_environ(environ)
+ environ['wsgi.input'] = InputStream(environ['wsgi.input'])
+ environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
+
+ # hook our own file wrapper in so that applications will always
+ # iterate to the end and we can check the content length
+ environ['wsgi.file_wrapper'] = FileWrapper
+
+ headers_set = []
+ chunks = []
+
+ def checking_start_response(*args, **kwargs):
+ if len(args) not in (2, 3):
+ warn(WSGIWarning('Invalid number of arguments: %s, expected '
+ '2 or 3' % len(args), stacklevel=2))
+ if kwargs:
+ warn(WSGIWarning('no keyword arguments allowed.'))
+
+ status, headers = args[:2]
+ if len(args) == 3:
+ exc_info = args[2]
+ else:
+ exc_info = None
+
+ headers_set[:] = self.check_start_response(status, headers,
+ exc_info)
+ return GuardedWrite(start_response(status, headers, exc_info),
+ chunks)
+
+ app_iter = self.app(environ, checking_start_response)
+ self.check_iterator(app_iter)
+ return GuardedIterator(app_iter, headers_set, chunks)
diff --git a/websdk/werkzeug/contrib/profiler.py b/websdk/werkzeug/contrib/profiler.py
new file mode 100644
index 0000000..58d1465
--- /dev/null
+++ b/websdk/werkzeug/contrib/profiler.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib.profiler
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This module provides a simple WSGI profiler middleware for finding
+ bottlenecks in web application. It uses the :mod:`profile` or
+ :mod:`cProfile` module to do the profiling and writes the stats to the
+ stream provided (defaults to stderr).
+
+ Example usage::
+
+ from werkzeug.contrib.profiler import ProfilerMiddleware
+ app = ProfilerMiddleware(app)
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+import sys
+try:
+ try:
+ from cProfile import Profile
+ except ImportError:
+ from profile import Profile
+ from pstats import Stats
+ available = True
+except ImportError:
+ available = False
+
+
+class MergeStream(object):
+ """An object that redirects `write` calls to multiple streams.
+ Use this to log to both `sys.stdout` and a file::
+
+ f = open('profiler.log', 'w')
+ stream = MergeStream(sys.stdout, f)
+ profiler = ProfilerMiddleware(app, stream)
+ """
+
+ def __init__(self, *streams):
+ if not streams:
+ raise TypeError('at least one stream must be given')
+ self.streams = streams
+
+ def write(self, data):
+ for stream in self.streams:
+ stream.write(data)
+
+
+class ProfilerMiddleware(object):
+ """Simple profiler middleware. Wraps a WSGI application and profiles
+ a request. This intentionally buffers the response so that timings are
+ more exact.
+
+ For the exact meaning of `sort_by` and `restrictions` consult the
+ :mod:`profile` documentation.
+
+ :param app: the WSGI application to profile.
+ :param stream: the stream for the profiled stats. defaults to stderr.
+ :param sort_by: a tuple of columns to sort the result by.
+ :param restrictions: a tuple of profiling strictions.
+ """
+
+ def __init__(self, app, stream=None,
+ sort_by=('time', 'calls'), restrictions=()):
+ if not available:
+ raise RuntimeError('the profiler is not available because '
+ 'profile or pstat is not installed.')
+ self._app = app
+ self._stream = stream or sys.stdout
+ self._sort_by = sort_by
+ self._restrictions = restrictions
+
+ def __call__(self, environ, start_response):
+ response_body = []
+
+ def catching_start_response(status, headers, exc_info=None):
+ start_response(status, headers, exc_info)
+ return response_body.append
+
+ def runapp():
+ appiter = self._app(environ, catching_start_response)
+ response_body.extend(appiter)
+ if hasattr(appiter, 'close'):
+ appiter.close()
+
+ p = Profile()
+ p.runcall(runapp)
+ body = ''.join(response_body)
+ stats = Stats(p, stream=self._stream)
+ stats.sort_stats(*self._sort_by)
+
+ self._stream.write('-' * 80)
+ self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
+ stats.print_stats(*self._restrictions)
+ self._stream.write('-' * 80 + '\n\n')
+
+ return [body]
+
+
+def make_action(app_factory, hostname='localhost', port=5000,
+ threaded=False, processes=1, stream=None,
+ sort_by=('time', 'calls'), restrictions=()):
+ """Return a new callback for :mod:`werkzeug.script` that starts a local
+ server with the profiler enabled.
+
+ ::
+
+ from werkzeug.contrib import profiler
+ action_profile = profiler.make_action(make_app)
+ """
+ def action(hostname=('h', hostname), port=('p', port),
+ threaded=threaded, processes=processes):
+ """Start a new development server."""
+ from werkzeug.serving import run_simple
+ app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
+ run_simple(hostname, port, app, False, None, threaded, processes)
+ return action
diff --git a/websdk/werkzeug/contrib/securecookie.py b/websdk/werkzeug/contrib/securecookie.py
new file mode 100644
index 0000000..9e6feeb
--- /dev/null
+++ b/websdk/werkzeug/contrib/securecookie.py
@@ -0,0 +1,332 @@
+# -*- coding: utf-8 -*-
+r"""
+ werkzeug.contrib.securecookie
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This module implements a cookie that is not alterable from the client
+ because it adds a checksum the server checks for. You can use it as
+ session replacement if all you have is a user id or something to mark
+ a logged in user.
+
+ Keep in mind that the data is still readable from the client as a
+ normal cookie is. However you don't have to store and flush the
+ sessions you have at the server.
+
+ Example usage:
+
+ >>> from werkzeug.contrib.securecookie import SecureCookie
+ >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
+
+ Dumping into a string so that one can store it in a cookie:
+
+ >>> value = x.serialize()
+
+ Loading from that string again:
+
+ >>> x = SecureCookie.unserialize(value, "deadbeef")
+ >>> x["baz"]
+ (1, 2, 3)
+
+ If someone modifies the cookie and the checksum is wrong the unserialize
+ method will fail silently and return a new empty `SecureCookie` object.
+
+ Keep in mind that the values will be visible in the cookie so do not
+ store data in a cookie you don't want the user to see.
+
+ Application Integration
+ =======================
+
+ If you are using the werkzeug request objects you could integrate the
+ secure cookie into your application like this::
+
+ from werkzeug.utils import cached_property
+ from werkzeug.wrappers import BaseRequest
+ from werkzeug.contrib.securecookie import SecureCookie
+
+ # don't use this key but a different one; you could just use
+ # os.urandom(20) to get something random
+ SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
+
+ class Request(BaseRequest):
+
+ @cached_property
+ def client_session(self):
+ data = self.cookies.get('session_data')
+ if not data:
+ return SecureCookie(secret_key=SECRET_KEY)
+ return SecureCookie.unserialize(data, SECRET_KEY)
+
+ def application(environ, start_response):
+ request = Request(environ, start_response)
+
+ # get a response object here
+ response = ...
+
+ if request.client_session.should_save:
+ session_data = request.client_session.serialize()
+ response.set_cookie('session_data', session_data,
+ httponly=True)
+ return response(environ, start_response)
+
+ A less verbose integration can be achieved by using shorthand methods::
+
+ class Request(BaseRequest):
+
+ @cached_property
+ def client_session(self):
+ return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
+
+ def application(environ, start_response):
+ request = Request(environ, start_response)
+
+ # get a response object here
+ response = ...
+
+ request.client_session.save_cookie(response)
+ return response(environ, start_response)
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+import sys
+import cPickle as pickle
+from hmac import new as hmac
+from time import time
+from werkzeug.urls import url_quote_plus, url_unquote_plus
+from werkzeug._internal import _date_to_unix
+from werkzeug.contrib.sessions import ModificationTrackingDict
+from werkzeug.security import safe_str_cmp
+
+
+# rather ugly way to import the correct hash method. Because
+# hmac either accepts modules with a new method (sha, md5 etc.)
+# or a hashlib factory function we have to figure out what to
+# pass to it. If we have 2.5 or higher (so not 2.4 with a
+# custom hashlib) we import from hashlib and fail if it does
+# not exist (have seen that in old OS X versions).
+# in all other cases the now deprecated sha module is used.
+_default_hash = None
+if sys.version_info >= (2, 5):
+ try:
+ from hashlib import sha1 as _default_hash
+ except ImportError:
+ pass
+if _default_hash is None:
+ import sha as _default_hash
+
+
+class UnquoteError(Exception):
+ """Internal exception used to signal failures on quoting."""
+
+
+class SecureCookie(ModificationTrackingDict):
+ """Represents a secure cookie. You can subclass this class and provide
+ an alternative mac method. The import thing is that the mac method
+ is a function with a similar interface to the hashlib. Required
+ methods are update() and digest().
+
+ Example usage:
+
+ >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
+ >>> x["foo"]
+ 42
+ >>> x["baz"]
+ (1, 2, 3)
+ >>> x["blafasel"] = 23
+ >>> x.should_save
+ True
+
+ :param data: the initial data. Either a dict, list of tuples or `None`.
+ :param secret_key: the secret key. If not set `None` or not specified
+ it has to be set before :meth:`serialize` is called.
+ :param new: The initial value of the `new` flag.
+ """
+
+ #: The hash method to use. This has to be a module with a new function
+ #: or a function that creates a hashlib object. Such as `hashlib.md5`
+ #: Subclasses can override this attribute. The default hash is sha1.
+ #: Make sure to wrap this in staticmethod() if you store an arbitrary
+ #: function there such as hashlib.sha1 which might be implemented
+ #: as a function.
+ hash_method = staticmethod(_default_hash)
+
+ #: the module used for serialization. Unless overriden by subclasses
+ #: the standard pickle module is used.
+ serialization_method = pickle
+
+ #: if the contents should be base64 quoted. This can be disabled if the
+ #: serialization process returns cookie safe strings only.
+ quote_base64 = True
+
+ def __init__(self, data=None, secret_key=None, new=True):
+ ModificationTrackingDict.__init__(self, data or ())
+ # explicitly convert it into a bytestring because python 2.6
+ # no longer performs an implicit string conversion on hmac
+ if secret_key is not None:
+ secret_key = str(secret_key)
+ self.secret_key = secret_key
+ self.new = new
+
+ def __repr__(self):
+ return '<%s %s%s>' % (
+ self.__class__.__name__,
+ dict.__repr__(self),
+ self.should_save and '*' or ''
+ )
+
+ @property
+ def should_save(self):
+ """True if the session should be saved. By default this is only true
+ for :attr:`modified` cookies, not :attr:`new`.
+ """
+ return self.modified
+
+ @classmethod
+ def quote(cls, value):
+ """Quote the value for the cookie. This can be any object supported
+ by :attr:`serialization_method`.
+
+ :param value: the value to quote.
+ """
+ if cls.serialization_method is not None:
+ value = cls.serialization_method.dumps(value)
+ if cls.quote_base64:
+ value = ''.join(value.encode('base64').splitlines()).strip()
+ return value
+
+ @classmethod
+ def unquote(cls, value):
+ """Unquote the value for the cookie. If unquoting does not work a
+ :exc:`UnquoteError` is raised.
+
+ :param value: the value to unquote.
+ """
+ try:
+ if cls.quote_base64:
+ value = value.decode('base64')
+ if cls.serialization_method is not None:
+ value = cls.serialization_method.loads(value)
+ return value
+ except Exception:
+ # unfortunately pickle and other serialization modules can
+ # cause pretty every error here. if we get one we catch it
+ # and convert it into an UnquoteError
+ raise UnquoteError()
+
+ def serialize(self, expires=None):
+ """Serialize the secure cookie into a string.
+
+ If expires is provided, the session will be automatically invalidated
+ after expiration when you unseralize it. This provides better
+ protection against session cookie theft.
+
+ :param expires: an optional expiration date for the cookie (a
+ :class:`datetime.datetime` object)
+ """
+ if self.secret_key is None:
+ raise RuntimeError('no secret key defined')
+ if expires:
+ self['_expires'] = _date_to_unix(expires)
+ result = []
+ mac = hmac(self.secret_key, None, self.hash_method)
+ for key, value in sorted(self.items()):
+ result.append('%s=%s' % (
+ url_quote_plus(key),
+ self.quote(value)
+ ))
+ mac.update('|' + result[-1])
+ return '%s?%s' % (
+ mac.digest().encode('base64').strip(),
+ '&'.join(result)
+ )
+
+ @classmethod
+ def unserialize(cls, string, secret_key):
+ """Load the secure cookie from a serialized string.
+
+ :param string: the cookie value to unserialize.
+ :param secret_key: the secret key used to serialize the cookie.
+ :return: a new :class:`SecureCookie`.
+ """
+ if isinstance(string, unicode):
+ string = string.encode('utf-8', 'replace')
+ try:
+ base64_hash, data = string.split('?', 1)
+ except (ValueError, IndexError):
+ items = ()
+ else:
+ items = {}
+ mac = hmac(secret_key, None, cls.hash_method)
+ for item in data.split('&'):
+ mac.update('|' + item)
+ if not '=' in item:
+ items = None
+ break
+ key, value = item.split('=', 1)
+ # try to make the key a string
+ key = url_unquote_plus(key)
+ try:
+ key = str(key)
+ except UnicodeError:
+ pass
+ items[key] = value
+
+ # no parsing error and the mac looks okay, we can now
+ # sercurely unpickle our cookie.
+ try:
+ client_hash = base64_hash.decode('base64')
+ except Exception:
+ items = client_hash = None
+ if items is not None and safe_str_cmp(client_hash, mac.digest()):
+ try:
+ for key, value in items.iteritems():
+ items[key] = cls.unquote(value)
+ except UnquoteError:
+ items = ()
+ else:
+ if '_expires' in items:
+ if time() > items['_expires']:
+ items = ()
+ else:
+ del items['_expires']
+ else:
+ items = ()
+ return cls(items, secret_key, False)
+
+ @classmethod
+ def load_cookie(cls, request, key='session', secret_key=None):
+ """Loads a :class:`SecureCookie` from a cookie in request. If the
+ cookie is not set, a new :class:`SecureCookie` instanced is
+ returned.
+
+ :param request: a request object that has a `cookies` attribute
+ which is a dict of all cookie values.
+ :param key: the name of the cookie.
+ :param secret_key: the secret key used to unquote the cookie.
+ Always provide the value even though it has
+ no default!
+ """
+ data = request.cookies.get(key)
+ if not data:
+ return cls(secret_key=secret_key)
+ return cls.unserialize(data, secret_key)
+
+ def save_cookie(self, response, key='session', expires=None,
+ session_expires=None, max_age=None, path='/', domain=None,
+ secure=None, httponly=False, force=False):
+ """Saves the SecureCookie in a cookie on response object. All
+ parameters that are not described here are forwarded directly
+ to :meth:`~BaseResponse.set_cookie`.
+
+ :param response: a response object that has a
+ :meth:`~BaseResponse.set_cookie` method.
+ :param key: the name of the cookie.
+ :param session_expires: the expiration date of the secure cookie
+ stored information. If this is not provided
+ the cookie `expires` date is used instead.
+ """
+ if force or self.should_save:
+ data = self.serialize(session_expires or expires)
+ response.set_cookie(key, data, expires=expires, max_age=max_age,
+ path=path, domain=domain, secure=secure,
+ httponly=httponly)
diff --git a/websdk/werkzeug/contrib/sessions.py b/websdk/werkzeug/contrib/sessions.py
new file mode 100644
index 0000000..b81351a
--- /dev/null
+++ b/websdk/werkzeug/contrib/sessions.py
@@ -0,0 +1,344 @@
+# -*- coding: utf-8 -*-
+r"""
+ werkzeug.contrib.sessions
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This module contains some helper classes that help one to add session
+ support to a python WSGI application. For full client-side session
+ storage see :mod:`~werkzeug.contrib.securecookie` which implements a
+ secure, client-side session storage.
+
+
+ Application Integration
+ =======================
+
+ ::
+
+ from werkzeug.contrib.sessions import SessionMiddleware, \
+ FilesystemSessionStore
+
+ app = SessionMiddleware(app, FilesystemSessionStore())
+
+ The current session will then appear in the WSGI environment as
+ `werkzeug.session`. However it's recommended to not use the middleware
+ but the stores directly in the application. However for very simple
+ scripts a middleware for sessions could be sufficient.
+
+ This module does not implement methods or ways to check if a session is
+ expired. That should be done by a cronjob and storage specific. For
+ example to prune unused filesystem sessions one could check the modified
+ time of the files. It sessions are stored in the database the new()
+ method should add an expiration timestamp for the session.
+
+ For better flexibility it's recommended to not use the middleware but the
+ store and session object directly in the application dispatching::
+
+ session_store = FilesystemSessionStore()
+
+ def application(environ, start_response):
+ request = Request(environ)
+ sid = request.cookies.get('cookie_name')
+ if sid is None:
+ request.session = session_store.new()
+ else:
+ request.session = session_store.get(sid)
+ response = get_the_response_object(request)
+ if request.session.should_save:
+ session_store.save(request.session)
+ response.set_cookie('cookie_name', request.session.sid)
+ return response(environ, start_response)
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+import re
+import os
+import sys
+import tempfile
+from os import path
+from time import time
+from random import random
+try:
+ from hashlib import sha1
+except ImportError:
+ from sha import new as sha1
+from cPickle import dump, load, HIGHEST_PROTOCOL
+
+from werkzeug.datastructures import CallbackDict
+from werkzeug.utils import dump_cookie, parse_cookie
+from werkzeug.wsgi import ClosingIterator
+from werkzeug.posixemulation import rename
+
+
+_sha1_re = re.compile(r'^[a-f0-9]{40}$')
+
+
+def _urandom():
+ if hasattr(os, 'urandom'):
+ return os.urandom(30)
+ return random()
+
+
+def generate_key(salt=None):
+ return sha1('%s%s%s' % (salt, time(), _urandom())).hexdigest()
+
+
+class ModificationTrackingDict(CallbackDict):
+ __slots__ = ('modified',)
+
+ def __init__(self, *args, **kwargs):
+ def on_update(self):
+ self.modified = True
+ self.modified = False
+ CallbackDict.__init__(self, on_update=on_update)
+ dict.update(self, *args, **kwargs)
+
+ def copy(self):
+ """Create a flat copy of the dict."""
+ missing = object()
+ result = object.__new__(self.__class__)
+ for name in self.__slots__:
+ val = getattr(self, name, missing)
+ if val is not missing:
+ setattr(result, name, val)
+ return result
+
+ def __copy__(self):
+ return self.copy()
+
+
+class Session(ModificationTrackingDict):
+ """Subclass of a dict that keeps track of direct object changes. Changes
+ in mutable structures are not tracked, for those you have to set
+ `modified` to `True` by hand.
+ """
+ __slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new')
+
+ def __init__(self, data, sid, new=False):
+ ModificationTrackingDict.__init__(self, data)
+ self.sid = sid
+ self.new = new
+
+ def __repr__(self):
+ return '<%s %s%s>' % (
+ self.__class__.__name__,
+ dict.__repr__(self),
+ self.should_save and '*' or ''
+ )
+
+ @property
+ def should_save(self):
+ """True if the session should be saved.
+
+ .. versionchanged:: 0.6
+ By default the session is now only saved if the session is
+ modified, not if it is new like it was before.
+ """
+ return self.modified
+
+
+class SessionStore(object):
+ """Baseclass for all session stores. The Werkzeug contrib module does not
+ implement any useful stores besides the filesystem store, application
+ developers are encouraged to create their own stores.
+
+ :param session_class: The session class to use. Defaults to
+ :class:`Session`.
+ """
+
+ def __init__(self, session_class=None):
+ if session_class is None:
+ session_class = Session
+ self.session_class = session_class
+
+ def is_valid_key(self, key):
+ """Check if a key has the correct format."""
+ return _sha1_re.match(key) is not None
+
+ def generate_key(self, salt=None):
+ """Simple function that generates a new session key."""
+ return generate_key(salt)
+
+ def new(self):
+ """Generate a new session."""
+ return self.session_class({}, self.generate_key(), True)
+
+ def save(self, session):
+ """Save a session."""
+
+ def save_if_modified(self, session):
+ """Save if a session class wants an update."""
+ if session.should_save:
+ self.save(session)
+
+ def delete(self, session):
+ """Delete a session."""
+
+ def get(self, sid):
+ """Get a session for this sid or a new session object. This method
+ has to check if the session key is valid and create a new session if
+ that wasn't the case.
+ """
+ return self.session_class({}, sid, True)
+
+
+#: used for temporary files by the filesystem session store
+_fs_transaction_suffix = '.__wz_sess'
+
+
+class FilesystemSessionStore(SessionStore):
+ """Simple example session store that saves sessions on the filesystem.
+ This store works best on POSIX systems and Windows Vista / Windows
+ Server 2008 and newer.
+
+ .. versionchanged:: 0.6
+ `renew_missing` was added. Previously this was considered `True`,
+ now the default changed to `False` and it can be explicitly
+ deactivated.
+
+ :param path: the path to the folder used for storing the sessions.
+ If not provided the default temporary directory is used.
+ :param filename_template: a string template used to give the session
+ a filename. ``%s`` is replaced with the
+ session id.
+ :param session_class: The session class to use. Defaults to
+ :class:`Session`.
+ :param renew_missing: set to `True` if you want the store to
+ give the user a new sid if the session was
+ not yet saved.
+ """
+
+ def __init__(self, path=None, filename_template='werkzeug_%s.sess',
+ session_class=None, renew_missing=False, mode=0644):
+ SessionStore.__init__(self, session_class)
+ if path is None:
+ path = tempfile.gettempdir()
+ self.path = path
+ if isinstance(filename_template, unicode):
+ filename_template = filename_template.encode(
+ sys.getfilesystemencoding() or 'utf-8')
+ assert not filename_template.endswith(_fs_transaction_suffix), \
+ 'filename templates may not end with %s' % _fs_transaction_suffix
+ self.filename_template = filename_template
+ self.renew_missing = renew_missing
+ self.mode = mode
+
+ def get_session_filename(self, sid):
+ # out of the box, this should be a strict ASCII subset but
+ # you might reconfigure the session object to have a more
+ # arbitrary string.
+ if isinstance(sid, unicode):
+ sid = sid.encode(sys.getfilesystemencoding() or 'utf-8')
+ return path.join(self.path, self.filename_template % sid)
+
+ def save(self, session):
+ fn = self.get_session_filename(session.sid)
+ fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix,
+ dir=self.path)
+ f = os.fdopen(fd, 'wb')
+ try:
+ dump(dict(session), f, HIGHEST_PROTOCOL)
+ finally:
+ f.close()
+ try:
+ rename(tmp, fn)
+ os.chmod(fn, self.mode)
+ except (IOError, OSError):
+ pass
+
+ def delete(self, session):
+ fn = self.get_session_filename(session.sid)
+ try:
+ os.unlink(fn)
+ except OSError:
+ pass
+
+ def get(self, sid):
+ if not self.is_valid_key(sid):
+ return self.new()
+ try:
+ f = open(self.get_session_filename(sid), 'rb')
+ except IOError:
+ if self.renew_missing:
+ return self.new()
+ data = {}
+ else:
+ try:
+ try:
+ data = load(f)
+ except Exception:
+ data = {}
+ finally:
+ f.close()
+ return self.session_class(data, sid, False)
+
+ def list(self):
+ """Lists all sessions in the store.
+
+ .. versionadded:: 0.6
+ """
+ before, after = self.filename_template.split('%s', 1)
+ filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
+ re.escape(after)))
+ result = []
+ for filename in os.listdir(self.path):
+ #: this is a session that is still being saved.
+ if filename.endswith(_fs_transaction_suffix):
+ continue
+ match = filename_re.match(filename)
+ if match is not None:
+ result.append(match.group(1))
+ return result
+
+
+class SessionMiddleware(object):
+ """A simple middleware that puts the session object of a store provided
+ into the WSGI environ. It automatically sets cookies and restores
+ sessions.
+
+ However a middleware is not the preferred solution because it won't be as
+ fast as sessions managed by the application itself and will put a key into
+ the WSGI environment only relevant for the application which is against
+ the concept of WSGI.
+
+ The cookie parameters are the same as for the :func:`~dump_cookie`
+ function just prefixed with ``cookie_``. Additionally `max_age` is
+ called `cookie_age` and not `cookie_max_age` because of backwards
+ compatibility.
+ """
+
+ def __init__(self, app, store, cookie_name='session_id',
+ cookie_age=None, cookie_expires=None, cookie_path='/',
+ cookie_domain=None, cookie_secure=None,
+ cookie_httponly=False, environ_key='werkzeug.session'):
+ self.app = app
+ self.store = store
+ self.cookie_name = cookie_name
+ self.cookie_age = cookie_age
+ self.cookie_expires = cookie_expires
+ self.cookie_path = cookie_path
+ self.cookie_domain = cookie_domain
+ self.cookie_secure = cookie_secure
+ self.cookie_httponly = cookie_httponly
+ self.environ_key = environ_key
+
+ def __call__(self, environ, start_response):
+ cookie = parse_cookie(environ.get('HTTP_COOKIE', ''))
+ sid = cookie.get(self.cookie_name, None)
+ if sid is None:
+ session = self.store.new()
+ else:
+ session = self.store.get(sid)
+ environ[self.environ_key] = session
+
+ def injecting_start_response(status, headers, exc_info=None):
+ if session.should_save:
+ self.store.save(session)
+ headers.append(('Set-Cookie', dump_cookie(self.cookie_name,
+ session.sid, self.cookie_age,
+ self.cookie_expires, self.cookie_path,
+ self.cookie_domain, self.cookie_secure,
+ self.cookie_httponly)))
+ return start_response(status, headers, exc_info)
+ return ClosingIterator(self.app(environ, injecting_start_response),
+ lambda: self.store.save_if_modified(session))
diff --git a/websdk/werkzeug/contrib/testtools.py b/websdk/werkzeug/contrib/testtools.py
new file mode 100644
index 0000000..9bbf76a
--- /dev/null
+++ b/websdk/werkzeug/contrib/testtools.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib.testtools
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This module implements extended wrappers for simplified testing.
+
+ `TestResponse`
+ A response wrapper which adds various cached attributes for
+ simplified assertions on various content types.
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+from werkzeug.utils import cached_property, import_string
+from werkzeug.wrappers import Response
+
+from warnings import warn
+warn(DeprecationWarning('werkzeug.contrib.testtools is deprecated and '
+ 'will be removed with Werkzeug 1.0'))
+
+
+class ContentAccessors(object):
+ """
+ A mixin class for response objects that provides a couple of useful
+ accessors for unittesting.
+ """
+
+ def xml(self):
+ """Get an etree if possible."""
+ if 'xml' not in self.mimetype:
+ raise AttributeError(
+ 'Not a XML response (Content-Type: %s)'
+ % self.mimetype)
+ for module in ['xml.etree.ElementTree', 'ElementTree',
+ 'elementtree.ElementTree']:
+ etree = import_string(module, silent=True)
+ if etree is not None:
+ return etree.XML(self.body)
+ raise RuntimeError('You must have ElementTree installed '
+ 'to use TestResponse.xml')
+ xml = cached_property(xml)
+
+ def lxml(self):
+ """Get an lxml etree if possible."""
+ if ('html' not in self.mimetype and 'xml' not in self.mimetype):
+ raise AttributeError('Not an HTML/XML response')
+ from lxml import etree
+ try:
+ from lxml.html import fromstring
+ except ImportError:
+ fromstring = etree.HTML
+ if self.mimetype=='text/html':
+ return fromstring(self.data)
+ return etree.XML(self.data)
+ lxml = cached_property(lxml)
+
+ def json(self):
+ """Get the result of simplejson.loads if possible."""
+ if 'json' not in self.mimetype:
+ raise AttributeError('Not a JSON response')
+ try:
+ from simplejson import loads
+ except ImportError:
+ from json import loads
+ return loads(self.data)
+ json = cached_property(json)
+
+
+class TestResponse(Response, ContentAccessors):
+ """Pass this to `werkzeug.test.Client` for easier unittesting."""
diff --git a/websdk/werkzeug/contrib/wrappers.py b/websdk/werkzeug/contrib/wrappers.py
new file mode 100644
index 0000000..bd6a2d4
--- /dev/null
+++ b/websdk/werkzeug/contrib/wrappers.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+"""
+ werkzeug.contrib.wrappers
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Extra wrappers or mixins contributed by the community. These wrappers can
+ be mixed in into request objects to add extra functionality.
+
+ Example::
+
+ from werkzeug.wrappers import Request as RequestBase
+ from werkzeug.contrib.wrappers import JSONRequestMixin
+
+ class Request(RequestBase, JSONRequestMixin):
+ pass
+
+ Afterwards this request object provides the extra functionality of the
+ :class:`JSONRequestMixin`.
+
+ :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+import codecs
+from werkzeug.exceptions import BadRequest
+from werkzeug.utils import cached_property
+from werkzeug.http import dump_options_header, parse_options_header
+from werkzeug._internal import _decode_unicode
+try:
+ from simplejson import loads
+except ImportError:
+ from json import loads
+
+
+def is_known_charset(charset):
+ """Checks if the given charset is known to Python."""
+ try:
+ codecs.lookup(charset)
+ except LookupError:
+ return False
+ return True
+
+
+class JSONRequestMixin(object):
+ """Add json method to a request object. This will parse the input data
+ through simplejson if possible.
+
+ :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
+ is not json or if the data itself cannot be parsed as json.
+ """
+
+ @cached_property
+ def json(self):
+ """Get the result of simplejson.loads if possible."""
+ if 'json' not in self.environ.get('CONTENT_TYPE', ''):
+ raise BadRequest('Not a JSON request')
+ try:
+ return loads(self.data)
+ except Exception:
+ raise BadRequest('Unable to read JSON request')
+
+
+class ProtobufRequestMixin(object):
+ """Add protobuf parsing method to a request object. This will parse the
+ input data through `protobuf`_ if possible.
+
+ :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
+ is not protobuf or if the data itself cannot be parsed property.
+
+ .. _protobuf: http://code.google.com/p/protobuf/
+ """
+
+ #: by default the :class:`ProtobufRequestMixin` will raise a
+ #: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
+ #: initialized. You can bypass that check by setting this
+ #: attribute to `False`.
+ protobuf_check_initialization = True
+
+ def parse_protobuf(self, proto_type):
+ """Parse the data into an instance of proto_type."""
+ if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
+ raise BadRequest('Not a Protobuf request')
+
+ obj = proto_type()
+ try:
+ obj.ParseFromString(self.data)
+ except Exception:
+ raise BadRequest("Unable to parse Protobuf request")
+
+ # Fail if not all required fields are set
+ if self.protobuf_check_initialization and not obj.IsInitialized():
+ raise BadRequest("Partial Protobuf request")
+
+ return obj
+
+
+class RoutingArgsRequestMixin(object):
+ """This request mixin adds support for the wsgiorg routing args
+ `specification`_.
+
+ .. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
+ """
+
+ def _get_routing_args(self):
+ return self.environ.get('wsgiorg.routing_args', (()))[0]
+
+ def _set_routing_args(self, value):
+ if self.shallow:
+ raise RuntimeError('A shallow request tried to modify the WSGI '
+ 'environment. If you really want to do that, '
+ 'set `shallow` to False.')
+ self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
+
+ routing_args = property(_get_routing_args, _set_routing_args, doc='''
+ The positional URL arguments as `tuple`.''')
+ del _get_routing_args, _set_routing_args
+
+ def _get_routing_vars(self):
+ rv = self.environ.get('wsgiorg.routing_args')
+ if rv is not None:
+ return rv[1]
+ rv = {}
+ if not self.shallow:
+ self.routing_vars = rv
+ return rv
+
+ def _set_routing_vars(self, value):
+ if self.shallow:
+ raise RuntimeError('A shallow request tried to modify the WSGI '
+ 'environment. If you really want to do that, '
+ 'set `shallow` to False.')
+ self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
+
+ routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
+ The keyword URL arguments as `dict`.''')
+ del _get_routing_vars, _set_routing_vars
+
+
+class ReverseSlashBehaviorRequestMixin(object):
+ """This mixin reverses the trailing slash behavior of :attr:`script_root`
+ and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
+ directly on the paths.
+
+ Because it changes the behavior or :class:`Request` this class has to be
+ mixed in *before* the actual request class::
+
+ class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
+ pass
+
+ This example shows the differences (for an application mounted on
+ `/application` and the request going to `/application/foo/bar`):
+
+ +---------------+-------------------+---------------------+
+ | | normal behavior | reverse behavior |
+ +===============+===================+=====================+
+ | `script_root` | ``/application`` | ``/application/`` |
+ +---------------+-------------------+---------------------+
+ | `path` | ``/foo/bar`` | ``foo/bar`` |
+ +---------------+-------------------+---------------------+
+ """
+
+ @cached_property
+ def path(self):
+ """Requested path as unicode. This works a bit like the regular path
+ info in the WSGI environment but will not include a leading slash.
+ """
+ path = (self.environ.get('PATH_INFO') or '').lstrip('/')
+ return _decode_unicode(path, self.charset, self.encoding_errors)
+
+ @cached_property
+ def script_root(self):
+ """The root path of the script includling a trailing slash."""
+ path = (self.environ.get('SCRIPT_NAME') or '').rstrip('/') + '/'
+ return _decode_unicode(path, self.charset, self.encoding_errors)
+
+
+class DynamicCharsetRequestMixin(object):
+ """"If this mixin is mixed into a request class it will provide
+ a dynamic `charset` attribute. This means that if the charset is
+ transmitted in the content type headers it's used from there.
+
+ Because it changes the behavior or :class:`Request` this class has
+ to be mixed in *before* the actual request class::
+
+ class MyRequest(DynamicCharsetRequestMixin, Request):
+ pass
+
+ By default the request object assumes that the URL charset is the
+ same as the data charset. If the charset varies on each request
+ based on the transmitted data it's not a good idea to let the URLs
+ change based on that. Most browsers assume either utf-8 or latin1
+ for the URLs if they have troubles figuring out. It's strongly
+ recommended to set the URL charset to utf-8::
+
+ class MyRequest(DynamicCharsetRequestMixin, Request):
+ url_charset = 'utf-8'
+
+ .. versionadded:: 0.6
+ """
+
+ #: the default charset that is assumed if the content type header
+ #: is missing or does not contain a charset parameter. The default
+ #: is latin1 which is what HTTP specifies as default charset.
+ #: You may however want to set this to utf-8 to better support
+ #: browsers that do not transmit a charset for incoming data.
+ default_charset = 'latin1'
+
+ def unknown_charset(self, charset):
+ """Called if a charset was provided but is not supported by
+ the Python codecs module. By default latin1 is assumed then
+ to not lose any information, you may override this method to
+ change the behavior.
+
+ :param charset: the charset that was not found.
+ :return: the replacement charset.
+ """
+ return 'latin1'
+
+ @cached_property
+ def charset(self):
+ """The charset from the content type."""
+ header = self.environ.get('CONTENT_TYPE')
+ if header:
+ ct, options = parse_options_header(header)
+ charset = options.get('charset')
+ if charset:
+ if is_known_charset(charset):
+ return charset
+ return self.unknown_charset(charset)
+ return self.default_charset
+
+
+class DynamicCharsetResponseMixin(object):
+ """If this mixin is mixed into a response class it will provide
+ a dynamic `charset` attribute. This means that if the charset is
+ looked up and stored in the `Content-Type` header and updates
+ itself automatically. This also means a small performance hit but
+ can be useful if you're working with different charsets on
+ responses.
+
+ Because the charset attribute is no a property at class-level, the
+ default value is stored in `default_charset`.
+
+ Because it changes the behavior or :class:`Response` this class has
+ to be mixed in *before* the actual response class::
+
+ class MyResponse(DynamicCharsetResponseMixin, Response):
+ pass
+
+ .. versionadded:: 0.6
+ """
+
+ #: the default charset.
+ default_charset = 'utf-8'
+
+ def _get_charset(self):
+ header = self.headers.get('content-type')
+ if header:
+ charset = parse_options_header(header)[1].get('charset')
+ if charset:
+ return charset
+ return self.default_charset
+
+ def _set_charset(self, charset):
+ header = self.headers.get('content-type')
+ ct, options = parse_options_header(header)
+ if not ct:
+ raise TypeError('Cannot set charset if Content-Type '
+ 'header is missing.')
+ options['charset'] = charset
+ self.headers['Content-Type'] = dump_options_header(ct, options)
+
+ charset = property(_get_charset, _set_charset, doc="""
+ The charset for the response. It's stored inside the
+ Content-Type header as a parameter.""")
+ del _get_charset, _set_charset