diff options
Diffstat (limited to 'tutorius')
21 files changed, 3533 insertions, 225 deletions
diff --git a/tutorius/TProbe.py b/tutorius/TProbe.py index 076476c..0c79690 100644 --- a/tutorius/TProbe.py +++ b/tutorius/TProbe.py @@ -130,8 +130,8 @@ class TProbe(dbus.service.Object): if action._props: action._props.update(loaded_action._props) - action.do() - + action.do(activity=self._activity) + return address @dbus.service.method("org.tutorius.ProbeInterface", diff --git a/tutorius/apilib/__init__.py b/tutorius/apilib/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/tutorius/apilib/__init__.py diff --git a/tutorius/apilib/httplib2/.svn/all-wcprops b/tutorius/apilib/httplib2/.svn/all-wcprops new file mode 100644 index 0000000..601feb8 --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/all-wcprops @@ -0,0 +1,29 @@ +K 25 +svn:wc:ra_dav:version-url +V 30 +/svn/!svn/ver/2/trunk/httplib2 +END +iri2uri.pyc +K 25 +svn:wc:ra_dav:version-url +V 42 +/svn/!svn/ver/2/trunk/httplib2/iri2uri.pyc +END +__init__.py +K 25 +svn:wc:ra_dav:version-url +V 42 +/svn/!svn/ver/2/trunk/httplib2/__init__.py +END +__init__.pyc +K 25 +svn:wc:ra_dav:version-url +V 43 +/svn/!svn/ver/2/trunk/httplib2/__init__.pyc +END +iri2uri.py +K 25 +svn:wc:ra_dav:version-url +V 41 +/svn/!svn/ver/2/trunk/httplib2/iri2uri.py +END diff --git a/tutorius/apilib/httplib2/.svn/entries b/tutorius/apilib/httplib2/.svn/entries new file mode 100644 index 0000000..1a3c5d2 --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/entries @@ -0,0 +1,66 @@ +8 + +dir +2 +https://python-rest-client.googlecode.com/svn/trunk/httplib2 +https://python-rest-client.googlecode.com/svn + + + +2008-05-14T17:00:19.245332Z +2 +bosteen + + +svn:special svn:externals svn:needs-lock + +iri2uri.pyc +file + + + + +2008-05-14T14:48:03.000000Z +6f9a0833a6dc59c42b7aec0dfdf39dd0 +2008-05-14T17:00:19.245332Z +2 +bosteen +has-props + +__init__.py +file + + + + +2007-10-23T15:25:46.000000Z +00c607566b698248d5a5c40508843cd7 +2008-05-14T17:00:19.245332Z +2 +bosteen + +__init__.pyc +file + + + + +2008-05-14T14:48:03.000000Z +bdf8607edad61c67d890de558db8006c +2008-05-14T17:00:19.245332Z +2 +bosteen +has-props + +iri2uri.py +file + + + + +2007-09-04T04:02:06.000000Z +c0f9c5cb229a22e21575322b4ba77741 +2008-05-14T17:00:19.245332Z +2 +bosteen + diff --git a/tutorius/apilib/httplib2/.svn/format b/tutorius/apilib/httplib2/.svn/format new file mode 100644 index 0000000..45a4fb7 --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/format @@ -0,0 +1 @@ +8 diff --git a/tutorius/apilib/httplib2/.svn/prop-base/__init__.pyc.svn-base b/tutorius/apilib/httplib2/.svn/prop-base/__init__.pyc.svn-base new file mode 100644 index 0000000..5e9587e --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/prop-base/__init__.pyc.svn-base @@ -0,0 +1,5 @@ +K 13 +svn:mime-type +V 24 +application/octet-stream +END diff --git a/tutorius/apilib/httplib2/.svn/prop-base/iri2uri.pyc.svn-base b/tutorius/apilib/httplib2/.svn/prop-base/iri2uri.pyc.svn-base new file mode 100644 index 0000000..5e9587e --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/prop-base/iri2uri.pyc.svn-base @@ -0,0 +1,5 @@ +K 13 +svn:mime-type +V 24 +application/octet-stream +END diff --git a/tutorius/apilib/httplib2/.svn/text-base/__init__.py.svn-base b/tutorius/apilib/httplib2/.svn/text-base/__init__.py.svn-base new file mode 100644 index 0000000..982bf8a --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/text-base/__init__.py.svn-base @@ -0,0 +1,1123 @@ +from __future__ import generators +""" +httplib2 + +A caching http interface that supports ETags and gzip +to conserve bandwidth. + +Requires Python 2.3 or later + +Changelog: +2007-08-18, Rick: Modified so it's able to use a socks proxy if needed. + +""" + +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)", + "James Antill", + "Xavier Verges Farrero", + "Jonathan Feinberg", + "Blair Zajac", + "Sam Ruby", + "Louis Nyffenegger"] +__license__ = "MIT" +__version__ = "$Rev: 259 $" + +import re +import sys +import md5 +import email +import email.Utils +import email.Message +import StringIO +import gzip +import zlib +import httplib +import urlparse +import base64 +import os +import copy +import calendar +import time +import random +import sha +import hmac +from gettext import gettext as _ +import socket + +try: + import socks +except ImportError: + socks = None + +if sys.version_info >= (2,3): + from iri2uri import iri2uri +else: + def iri2uri(uri): + return uri + +__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error', + 'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent', + 'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError', + 'debuglevel'] + + +# The httplib debug level, set to a non-zero value to get debug output +debuglevel = 0 + +# Python 2.3 support +if sys.version_info < (2,4): + def sorted(seq): + seq.sort() + return seq + +# Python 2.3 support +def HTTPResponse__getheaders(self): + """Return list of (header, value) tuples.""" + if self.msg is None: + raise httplib.ResponseNotReady() + return self.msg.items() + +if not hasattr(httplib.HTTPResponse, 'getheaders'): + httplib.HTTPResponse.getheaders = HTTPResponse__getheaders + +# All exceptions raised here derive from HttpLib2Error +class HttpLib2Error(Exception): pass + +# Some exceptions can be caught and optionally +# be turned back into responses. +class HttpLib2ErrorWithResponse(HttpLib2Error): + def __init__(self, desc, response, content): + self.response = response + self.content = content + HttpLib2Error.__init__(self, desc) + +class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass +class RedirectLimit(HttpLib2ErrorWithResponse): pass +class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass +class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass +class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass + +class RelativeURIError(HttpLib2Error): pass +class ServerNotFoundError(HttpLib2Error): pass + +# Open Items: +# ----------- +# Proxy support + +# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) + +# Pluggable cache storage (supports storing the cache in +# flat files by default. We need a plug-in architecture +# that can support Berkeley DB and Squid) + +# == Known Issues == +# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. +# Does not handle Cache-Control: max-stale +# Does not use Age: headers when calculating cache freshness. + + +# The number of redirections to follow before giving up. +# Note that only GET redirects are automatically followed. +# Will also honor 301 requests by saving that info and never +# requesting that URI again. +DEFAULT_MAX_REDIRECTS = 5 + +# Which headers are hop-by-hop headers by default +HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] + +def _get_end2end_headers(response): + hopbyhop = list(HOP_BY_HOP) + hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')]) + return [header for header in response.keys() if header not in hopbyhop] + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + +def urlnorm(uri): + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) + authority = authority.lower() + scheme = scheme.lower() + if not path: + path = "/" + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + scheme = scheme.lower() + defrag_uri = scheme + "://" + authority + request_uri + return scheme, authority, request_uri, defrag_uri + + +# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) +re_url_scheme = re.compile(r'^\w+://') +re_slash = re.compile(r'[?/:|]+') + +def safename(filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + + try: + if re_url_scheme.match(filename): + if isinstance(filename,str): + filename = filename.decode('utf-8') + filename = filename.encode('idna') + else: + filename = filename.encode('idna') + except UnicodeError: + pass + if isinstance(filename,unicode): + filename=filename.encode('utf-8') + filemd5 = md5.new(filename).hexdigest() + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + + # limit length of filename + if len(filename)>200: + filename=filename[:200] + return ",".join((filename, filemd5)) + +NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+') +def _normalize_headers(headers): + return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()]) + +def _parse_cache_control(headers): + retval = {} + if headers.has_key('cache-control'): + parts = headers['cache-control'].split(',') + parts_with_args = [tuple([x.strip() for x in part.split("=")]) for part in parts if -1 != part.find("=")] + parts_wo_args = [(name.strip(), 1) for name in parts if -1 == name.find("=")] + retval = dict(parts_with_args + parts_wo_args) + return retval + +# Whether to use a strict mode to parse WWW-Authenticate headers +# Might lead to bad results in case of ill-formed header value, +# so disabled by default, falling back to relaxed parsing. +# Set to true to turn on, usefull for testing servers. +USE_WWW_AUTH_STRICT_PARSING = 0 + +# In regex below: +# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP +# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space +# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: +# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"? +WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$") +WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$") +UNQUOTE_PAIRS = re.compile(r'\\(.)') +def _parse_www_authenticate(headers, headername='www-authenticate'): + """Returns a dictionary of dictionaries, one dict + per auth_scheme.""" + retval = {} + if headers.has_key(headername): + authenticate = headers[headername].strip() + www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED + while authenticate: + # Break off the scheme at the beginning of the line + if headername == 'authentication-info': + (auth_scheme, the_rest) = ('digest', authenticate) + else: + (auth_scheme, the_rest) = authenticate.split(" ", 1) + # Now loop over all the key value pairs that come after the scheme, + # being careful not to roll into the next scheme + match = www_auth.search(the_rest) + auth_params = {} + while match: + if match and len(match.groups()) == 3: + (key, value, the_rest) = match.groups() + auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')]) + match = www_auth.search(the_rest) + retval[auth_scheme.lower()] = auth_params + authenticate = the_rest.strip() + return retval + + +def _entry_disposition(response_headers, request_headers): + """Determine freshness from the Date, Expires and Cache-Control headers. + + We don't handle the following: + + 1. Cache-Control: max-stale + 2. Age: headers are not used in the calculations. + + Not that this algorithm is simpler than you might think + because we are operating as a private (non-shared) cache. + This lets us ignore 's-maxage'. We can also ignore + 'proxy-invalidate' since we aren't a proxy. + We will never return a stale document as + fresh as a design decision, and thus the non-implementation + of 'max-stale'. This also lets us safely ignore 'must-revalidate' + since we operate as if every server has sent 'must-revalidate'. + Since we are private we get to ignore both 'public' and + 'private' parameters. We also ignore 'no-transform' since + we don't do any transformations. + The 'no-store' parameter is handled at a higher level. + So the only Cache-Control parameters we look at are: + + no-cache + only-if-cached + max-age + min-fresh + """ + + retval = "STALE" + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + + if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1: + retval = "TRANSPARENT" + if 'cache-control' not in request_headers: + request_headers['cache-control'] = 'no-cache' + elif cc.has_key('no-cache'): + retval = "TRANSPARENT" + elif cc_response.has_key('no-cache'): + retval = "STALE" + elif cc.has_key('only-if-cached'): + retval = "FRESH" + elif response_headers.has_key('date'): + date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date'])) + now = time.time() + current_age = max(0, now - date) + if cc_response.has_key('max-age'): + try: + freshness_lifetime = int(cc_response['max-age']) + except ValueError: + freshness_lifetime = 0 + elif response_headers.has_key('expires'): + expires = email.Utils.parsedate_tz(response_headers['expires']) + if None == expires: + freshness_lifetime = 0 + else: + freshness_lifetime = max(0, calendar.timegm(expires) - date) + else: + freshness_lifetime = 0 + if cc.has_key('max-age'): + try: + freshness_lifetime = int(cc['max-age']) + except ValueError: + freshness_lifetime = 0 + if cc.has_key('min-fresh'): + try: + min_fresh = int(cc['min-fresh']) + except ValueError: + min_fresh = 0 + current_age += min_fresh + if freshness_lifetime > current_age: + retval = "FRESH" + return retval + +def _decompressContent(response, new_content): + content = new_content + try: + encoding = response.get('content-encoding', None) + if encoding in ['gzip', 'deflate']: + if encoding == 'gzip': + content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() + if encoding == 'deflate': + content = zlib.decompress(content) + response['content-length'] = str(len(content)) + del response['content-encoding'] + except IOError: + content = "" + raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content) + return content + +def _updateCache(request_headers, response_headers, content, cache, cachekey): + if cachekey: + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + if cc.has_key('no-store') or cc_response.has_key('no-store'): + cache.delete(cachekey) + else: + info = email.Message.Message() + for key, value in response_headers.iteritems(): + if key not in ['status','content-encoding','transfer-encoding']: + info[key] = value + + status = response_headers.status + if status == 304: + status = 200 + + status_header = 'status: %d\r\n' % response_headers.status + + header_str = info.as_string() + + header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str) + text = "".join([status_header, header_str, content]) + + cache.set(cachekey, text) + +def _cnonce(): + dig = md5.new("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest() + return dig[:16] + +def _wsse_username_token(cnonce, iso_now, password): + return base64.encodestring(sha.new("%s%s%s" % (cnonce, iso_now, password)).digest()).strip() + + +# For credentials we need two things, first +# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.) +# Then we also need a list of URIs that have already demanded authentication +# That list is tricky since sub-URIs can take the same auth, or the +# auth scheme may change as you descend the tree. +# So we also need each Auth instance to be able to tell us +# how close to the 'top' it is. + +class Authentication(object): + def __init__(self, credentials, host, request_uri, headers, response, content, http): + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + self.path = path + self.host = host + self.credentials = credentials + self.http = http + + def depth(self, request_uri): + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + return request_uri[len(self.path):].count("/") + + def inscope(self, host, request_uri): + # XXX Should we normalize the request_uri? + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + return (host == self.host) and path.startswith(self.path) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header. Over-rise this in sub-classes.""" + pass + + def response(self, response, content): + """Gives us a chance to update with new nonces + or such returned from the last authorized response. + Over-rise this in sub-classes if necessary. + + Return TRUE is the request is to be retried, for + example Digest may return stale=true. + """ + return False + + + +class BasicAuthentication(Authentication): + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['authorization'] = 'Basic ' + base64.encodestring("%s:%s" % self.credentials).strip() + + +class DigestAuthentication(Authentication): + """Only do qop='auth' and MD5, since that + is all Apache currently implements""" + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + challenge = _parse_www_authenticate(response, 'www-authenticate') + self.challenge = challenge['digest'] + qop = self.challenge.get('qop') + self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None + if self.challenge['qop'] is None: + raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop)) + self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5') + if self.challenge['algorithm'] != 'MD5': + raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) + self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]]) + self.challenge['nc'] = 1 + + def request(self, method, request_uri, headers, content, cnonce = None): + """Modify the request headers""" + H = lambda x: md5.new(x).hexdigest() + KD = lambda s, d: H("%s:%s" % (s, d)) + A2 = "".join([method, ":", request_uri]) + self.challenge['cnonce'] = cnonce or _cnonce() + request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'], + '%08x' % self.challenge['nc'], + self.challenge['cnonce'], + self.challenge['qop'], H(A2) + )) + headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % ( + self.credentials[0], + self.challenge['realm'], + self.challenge['nonce'], + request_uri, + self.challenge['algorithm'], + request_digest, + self.challenge['qop'], + self.challenge['nc'], + self.challenge['cnonce'], + ) + self.challenge['nc'] += 1 + + def response(self, response, content): + if not response.has_key('authentication-info'): + challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {}) + if 'true' == challenge.get('stale'): + self.challenge['nonce'] = challenge['nonce'] + self.challenge['nc'] = 1 + return True + else: + updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {}) + + if updated_challenge.has_key('nextnonce'): + self.challenge['nonce'] = updated_challenge['nextnonce'] + self.challenge['nc'] = 1 + return False + + +class HmacDigestAuthentication(Authentication): + """Adapted from Robert Sayre's code and DigestAuthentication above.""" + __author__ = "Thomas Broyer (t.broyer@ltgt.net)" + + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + challenge = _parse_www_authenticate(response, 'www-authenticate') + self.challenge = challenge['hmacdigest'] + # TODO: self.challenge['domain'] + self.challenge['reason'] = self.challenge.get('reason', 'unauthorized') + if self.challenge['reason'] not in ['unauthorized', 'integrity']: + self.challenge['reason'] = 'unauthorized' + self.challenge['salt'] = self.challenge.get('salt', '') + if not self.challenge.get('snonce'): + raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty.")) + self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1') + if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']: + raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) + self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1') + if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']: + raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm'])) + if self.challenge['algorithm'] == 'HMAC-MD5': + self.hashmod = md5 + else: + self.hashmod = sha + if self.challenge['pw-algorithm'] == 'MD5': + self.pwhashmod = md5 + else: + self.pwhashmod = sha + self.key = "".join([self.credentials[0], ":", + self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(), + ":", self.challenge['realm'] + ]) + self.key = self.pwhashmod.new(self.key).hexdigest().lower() + + def request(self, method, request_uri, headers, content): + """Modify the request headers""" + keys = _get_end2end_headers(headers) + keylist = "".join(["%s " % k for k in keys]) + headers_val = "".join([headers[k] for k in keys]) + created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime()) + cnonce = _cnonce() + request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val) + request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower() + headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % ( + self.credentials[0], + self.challenge['realm'], + self.challenge['snonce'], + cnonce, + request_uri, + created, + request_digest, + keylist, + ) + + def response(self, response, content): + challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {}) + if challenge.get('reason') in ['integrity', 'stale']: + return True + return False + + +class WsseAuthentication(Authentication): + """This is thinly tested and should not be relied upon. + At this time there isn't any third party server to test against. + Blogger and TypePad implemented this algorithm at one point + but Blogger has since switched to Basic over HTTPS and + TypePad has implemented it wrong, by never issuing a 401 + challenge but instead requiring your client to telepathically know that + their endpoint is expecting WSSE profile="UsernameToken".""" + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['Authorization'] = 'WSSE profile="UsernameToken"' + iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + cnonce = _cnonce() + password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) + headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % ( + self.credentials[0], + password_digest, + cnonce, + iso_now) + +class GoogleLoginAuthentication(Authentication): + def __init__(self, credentials, host, request_uri, headers, response, content, http): + from urllib import urlencode + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + challenge = _parse_www_authenticate(response, 'www-authenticate') + service = challenge['googlelogin'].get('service', 'xapi') + # Bloggger actually returns the service in the challenge + # For the rest we guess based on the URI + if service == 'xapi' and request_uri.find("calendar") > 0: + service = "cl" + # No point in guessing Base or Spreadsheet + #elif request_uri.find("spreadsheets") > 0: + # service = "wise" + + auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent']) + resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'}) + lines = content.split('\n') + d = dict([tuple(line.split("=", 1)) for line in lines if line]) + if resp.status == 403: + self.Auth = "" + else: + self.Auth = d['Auth'] + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['authorization'] = 'GoogleLogin Auth=' + self.Auth + + +AUTH_SCHEME_CLASSES = { + "basic": BasicAuthentication, + "wsse": WsseAuthentication, + "digest": DigestAuthentication, + "hmacdigest": HmacDigestAuthentication, + "googlelogin": GoogleLoginAuthentication +} + +AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] + +def _md5(s): + return + +class FileCache(object): + """Uses a local directory as a store for cached files. + Not really safe to use if multiple threads or processes are going to + be running on the same cache. + """ + def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior + self.cache = cache + self.safe = safe + if not os.path.exists(cache): + os.makedirs(self.cache) + + def get(self, key): + retval = None + cacheFullPath = os.path.join(self.cache, self.safe(key)) + try: + f = file(cacheFullPath, "r") + retval = f.read() + f.close() + except IOError: + pass + return retval + + def set(self, key, value): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + f = file(cacheFullPath, "w") + f.write(value) + f.close() + + def delete(self, key): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + if os.path.exists(cacheFullPath): + os.remove(cacheFullPath) + +class Credentials(object): + def __init__(self): + self.credentials = [] + + def add(self, name, password, domain=""): + self.credentials.append((domain.lower(), name, password)) + + def clear(self): + self.credentials = [] + + def iter(self, domain): + for (cdomain, name, password) in self.credentials: + if cdomain == "" or domain == cdomain: + yield (name, password) + +class KeyCerts(Credentials): + """Identical to Credentials except that + name/password are mapped to key/cert.""" + pass + + +class ProxyInfo(object): + """Collect information required to use a proxy.""" + def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None): + """The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX + constants. For example: + +p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000) + """ + self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass + + def astuple(self): + return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, + self.proxy_user, self.proxy_pass) + + def isgood(self): + return socks and (self.proxy_host != None) and (self.proxy_port != None) + + +class HTTPConnectionWithTimeout(httplib.HTTPConnection): + """HTTPConnection subclass that supports timeouts""" + + def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): + httplib.HTTPConnection.__init__(self, host, port, strict) + self.timeout = timeout + self.proxy_info = proxy_info + + def connect(self): + """Connect to the host and port specified in __init__.""" + # Mostly verbatim from httplib.py. + msg = "getaddrinfo returns an empty list" + for res in socket.getaddrinfo(self.host, self.port, 0, + socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + if self.proxy_info and self.proxy_info.isgood(): + self.sock = socks.socksocket(af, socktype, proto) + self.sock.setproxy(*self.proxy_info.astuple()) + else: + self.sock = socket.socket(af, socktype, proto) + # Different from httplib: support timeouts. + if self.timeout is not None: + self.sock.settimeout(self.timeout) + # End of difference from httplib. + if self.debuglevel > 0: + print "connect: (%s, %s)" % (self.host, self.port) + self.sock.connect(sa) + except socket.error, msg: + if self.debuglevel > 0: + print 'connect fail:', (self.host, self.port) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + +class HTTPSConnectionWithTimeout(httplib.HTTPSConnection): + "This class allows communication via SSL." + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=None, proxy_info=None): + self.timeout = timeout + self.proxy_info = proxy_info + httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file, + cert_file=cert_file, strict=strict) + + def connect(self): + "Connect to a host on a given (SSL) port." + + if self.proxy_info and self.proxy_info.isgood(): + self.sock.setproxy(*self.proxy_info.astuple()) + sock.setproxy(*self.proxy_info.astuple()) + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if self.timeout is not None: + sock.settimeout(self.timeout) + sock.connect((self.host, self.port)) + ssl = socket.ssl(sock, self.key_file, self.cert_file) + self.sock = httplib.FakeSocket(sock, ssl) + + + +class Http(object): + """An HTTP client that handles: +- all methods +- caching +- ETags +- compression, +- HTTPS +- Basic +- Digest +- WSSE + +and more. + """ + def __init__(self, cache=None, timeout=None, proxy_info=None): + """The value of proxy_info is a ProxyInfo instance. + +If 'cache' is a string then it is used as a directory name +for a disk cache. Otherwise it must be an object that supports +the same interface as FileCache.""" + self.proxy_info = proxy_info + # Map domain name to an httplib connection + self.connections = {} + # The location of the cache, for now a directory + # where cached responses are held. + if cache and isinstance(cache, str): + self.cache = FileCache(cache) + else: + self.cache = cache + + # Name/password + self.credentials = Credentials() + + # Key/cert + self.certificates = KeyCerts() + + # authorization objects + self.authorizations = [] + + # If set to False then no redirects are followed, even safe ones. + self.follow_redirects = True + + # If 'follow_redirects' is True, and this is set to True then + # all redirecs are followed, including unsafe ones. + self.follow_all_redirects = False + + self.ignore_etag = False + + self.force_exception_to_status_code = False + + self.timeout = timeout + + def _auth_from_challenge(self, host, request_uri, headers, response, content): + """A generator that creates Authorization objects + that can be applied to requests. + """ + challenges = _parse_www_authenticate(response, 'www-authenticate') + for cred in self.credentials.iter(host): + for scheme in AUTH_SCHEME_ORDER: + if challenges.has_key(scheme): + yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) + + def add_credentials(self, name, password, domain=""): + """Add a name and password that will be used + any time a request requires authentication.""" + self.credentials.add(name, password, domain) + + def add_certificate(self, key, cert, domain): + """Add a key and cert that will be used + any time a request requires authentication.""" + self.certificates.add(key, cert, domain) + + def clear_credentials(self): + """Remove all the names and passwords + that are used for authentication""" + self.credentials.clear() + self.authorizations = [] + + def _conn_request(self, conn, request_uri, method, body, headers): + for i in range(2): + try: + conn.request(method, request_uri, body, headers) + response = conn.getresponse() + except socket.gaierror: + conn.close() + raise ServerNotFoundError("Unable to find the server at %s" % conn.host) + except httplib.HTTPException, e: + if i == 0: + conn.close() + conn.connect() + continue + else: + raise + else: + content = response.read() + response = Response(response) + if method != "HEAD": + content = _decompressContent(response, content) + + break; + return (response, content) + + + def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey): + """Do the actual request using the connection object + and also follow one level of redirects if necessary""" + + auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] + auth = auths and sorted(auths)[0][1] or None + if auth: + auth.request(method, request_uri, headers, body) + + (response, content) = self._conn_request(conn, request_uri, method, body, headers) + + if auth: + if auth.response(response, body): + auth.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers ) + response._stale_digest = 1 + + if response.status == 401: + for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): + authorization.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers, ) + if response.status != 401: + self.authorizations.append(authorization) + authorization.response(response, body) + break + + if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303): + if self.follow_redirects and response.status in [300, 301, 302, 303, 307]: + # Pick out the location header and basically start from the beginning + # remembering first to strip the ETag header and decrement our 'depth' + if redirections: + if not response.has_key('location') and response.status != 300: + raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content) + # Fix-up relative redirects (which violate an RFC 2616 MUST) + if response.has_key('location'): + location = response['location'] + (scheme, authority, path, query, fragment) = parse_uri(location) + if authority == None: + response['location'] = urlparse.urljoin(absolute_uri, location) + if response.status == 301 and method in ["GET", "HEAD"]: + response['-x-permanent-redirect-url'] = response['location'] + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + if headers.has_key('if-none-match'): + del headers['if-none-match'] + if headers.has_key('if-modified-since'): + del headers['if-modified-since'] + if response.has_key('location'): + location = response['location'] + old_response = copy.deepcopy(response) + if not old_response.has_key('content-location'): + old_response['content-location'] = absolute_uri + redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method + (response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1) + response.previous = old_response + else: + raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content) + elif response.status in [200, 203] and method == "GET": + # Don't cache 206's since we aren't going to handle byte range requests + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + + return (response, content) + + +# Need to catch and rebrand some exceptions +# Then need to optionally turn all exceptions into status codes +# including all socket.* and httplib.* exceptions. + + + def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None): + """ Performs a single HTTP request. +The 'uri' is the URI of the HTTP resource and can begin +with either 'http' or 'https'. The value of 'uri' must be an absolute URI. + +The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. +There is no restriction on the methods allowed. + +The 'body' is the entity body to be sent with the request. It is a string +object. + +Any extra headers that are to be sent with the request should be provided in the +'headers' dictionary. + +The maximum number of redirect to follow before raising an +exception is 'redirections. The default is 5. + +The return value is a tuple of (response, content), the first +being and instance of the 'Response' class, the second being +a string that contains the response entity body. + """ + try: + if headers is None: + headers = {} + else: + headers = _normalize_headers(headers) + + if not headers.has_key('user-agent'): + headers['user-agent'] = "Python-httplib2/%s" % __version__ + + uri = iri2uri(uri) + + (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) + + conn_key = scheme+":"+authority + if conn_key in self.connections: + conn = self.connections[conn_key] + else: + if not connection_type: + connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout + certs = list(self.certificates.iter(authority)) + if scheme == 'https' and certs: + conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0], + cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info) + else: + conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info) + conn.set_debuglevel(debuglevel) + + if method in ["GET", "HEAD"] and 'range' not in headers: + headers['accept-encoding'] = 'compress, gzip' + + info = email.Message.Message() + cached_value = None + if self.cache: + cachekey = defrag_uri + cached_value = self.cache.get(cachekey) + if cached_value: + info = email.message_from_string(cached_value) + try: + content = cached_value.split('\r\n\r\n', 1)[1] + except IndexError: + self.cache.delete(cachekey) + cachekey = None + cached_value = None + else: + cachekey = None + + if method in ["PUT"] and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers: + # http://www.w3.org/1999/04/Editing/ + headers['if-match'] = info['etag'] + + if method not in ["GET", "HEAD"] and self.cache and cachekey: + # RFC 2616 Section 13.10 + self.cache.delete(cachekey) + + if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers: + if info.has_key('-x-permanent-redirect-url'): + # Should cached permanent redirects be counted in our redirection count? For now, yes. + (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1) + response.previous = Response(info) + response.previous.fromcache = True + else: + # Determine our course of action: + # Is the cached entry fresh or stale? + # Has the client requested a non-cached response? + # + # There seems to be three possible answers: + # 1. [FRESH] Return the cache entry w/o doing a GET + # 2. [STALE] Do the GET (but add in cache validators if available) + # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request + entry_disposition = _entry_disposition(info, headers) + + if entry_disposition == "FRESH": + if not cached_value: + info['status'] = '504' + content = "" + response = Response(info) + if cached_value: + response.fromcache = True + return (response, content) + + if entry_disposition == "STALE": + if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers: + headers['if-none-match'] = info['etag'] + if info.has_key('last-modified') and not 'last-modified' in headers: + headers['if-modified-since'] = info['last-modified'] + elif entry_disposition == "TRANSPARENT": + pass + + (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + + if response.status == 304 and method == "GET": + # Rewrite the cache entry with the new end-to-end headers + # Take all headers that are in response + # and overwrite their values in info. + # unless they are hop-by-hop, or are listed in the connection header. + + for key in _get_end2end_headers(response): + info[key] = response[key] + merged_response = Response(info) + if hasattr(response, "_stale_digest"): + merged_response._stale_digest = response._stale_digest + _updateCache(headers, merged_response, content, self.cache, cachekey) + response = merged_response + response.status = 200 + response.fromcache = True + + elif response.status == 200: + content = new_content + else: + self.cache.delete(cachekey) + content = new_content + else: + (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + except Exception, e: + if self.force_exception_to_status_code: + if isinstance(e, HttpLib2ErrorWithResponse): + response = e.response + content = e.content + response.status = 500 + response.reason = str(e) + elif isinstance(e, socket.timeout): + content = "Request Timeout" + response = Response( { + "content-type": "text/plain", + "status": "408", + "content-length": len(content) + }) + response.reason = "Request Timeout" + else: + content = str(e) + response = Response( { + "content-type": "text/plain", + "status": "400", + "content-length": len(content) + }) + response.reason = "Bad Request" + else: + raise + + + return (response, content) + + + +class Response(dict): + """An object more like email.Message than httplib.HTTPResponse.""" + + """Is this response from our local cache""" + fromcache = False + + """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """ + version = 11 + + "Status code returned by server. " + status = 200 + + """Reason phrase returned by server.""" + reason = "Ok" + + previous = None + + def __init__(self, info): + # info is either an email.Message or + # an httplib.HTTPResponse object. + if isinstance(info, httplib.HTTPResponse): + for key, value in info.getheaders(): + self[key] = value + self.status = info.status + self['status'] = str(self.status) + self.reason = info.reason + self.version = info.version + elif isinstance(info, email.Message.Message): + for key, value in info.items(): + self[key] = value + self.status = int(self['status']) + else: + for key, value in info.iteritems(): + self[key] = value + self.status = int(self.get('status', self.status)) + + + def __getattr__(self, name): + if name == 'dict': + return self + else: + raise AttributeError, name diff --git a/tutorius/apilib/httplib2/.svn/text-base/__init__.pyc.svn-base b/tutorius/apilib/httplib2/.svn/text-base/__init__.pyc.svn-base Binary files differnew file mode 100644 index 0000000..f092204 --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/text-base/__init__.pyc.svn-base diff --git a/tutorius/apilib/httplib2/.svn/text-base/iri2uri.py.svn-base b/tutorius/apilib/httplib2/.svn/text-base/iri2uri.py.svn-base new file mode 100644 index 0000000..70667ed --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/text-base/iri2uri.py.svn-base @@ -0,0 +1,110 @@ +""" +iri2uri + +Converts an IRI to a URI. + +""" +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = [] +__version__ = "1.0.0" +__license__ = "MIT" +__history__ = """ +""" + +import urlparse + + +# Convert an IRI to a URI following the rules in RFC 3987 +# +# The characters we need to enocde and escape are defined in the spec: +# +# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD +# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF +# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD +# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD +# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD +# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD +# / %xD0000-DFFFD / %xE1000-EFFFD + +escape_range = [ + (0xA0, 0xD7FF ), + (0xE000, 0xF8FF ), + (0xF900, 0xFDCF ), + (0xFDF0, 0xFFEF), + (0x10000, 0x1FFFD ), + (0x20000, 0x2FFFD ), + (0x30000, 0x3FFFD), + (0x40000, 0x4FFFD ), + (0x50000, 0x5FFFD ), + (0x60000, 0x6FFFD), + (0x70000, 0x7FFFD ), + (0x80000, 0x8FFFD ), + (0x90000, 0x9FFFD), + (0xA0000, 0xAFFFD ), + (0xB0000, 0xBFFFD ), + (0xC0000, 0xCFFFD), + (0xD0000, 0xDFFFD ), + (0xE1000, 0xEFFFD), + (0xF0000, 0xFFFFD ), + (0x100000, 0x10FFFD) +] + +def encode(c): + retval = c + i = ord(c) + for low, high in escape_range: + if i < low: + break + if i >= low and i <= high: + retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')]) + break + return retval + + +def iri2uri(uri): + """Convert an IRI to a URI. Note that IRIs must be + passed in a unicode strings. That is, do not utf-8 encode + the IRI before passing it into the function.""" + if isinstance(uri ,unicode): + (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri) + authority = authority.encode('idna') + # For each character in 'ucschar' or 'iprivate' + # 1. encode as utf-8 + # 2. then %-encode each octet of that utf-8 + uri = urlparse.urlunsplit((scheme, authority, path, query, fragment)) + uri = "".join([encode(c) for c in uri]) + return uri + +if __name__ == "__main__": + import unittest + + class Test(unittest.TestCase): + + def test_uris(self): + """Test that URIs are invariant under the transformation.""" + invariant = [ + u"ftp://ftp.is.co.za/rfc/rfc1808.txt", + u"http://www.ietf.org/rfc/rfc2396.txt", + u"ldap://[2001:db8::7]/c=GB?objectClass?one", + u"mailto:John.Doe@example.com", + u"news:comp.infosystems.www.servers.unix", + u"tel:+1-816-555-1212", + u"telnet://192.0.2.16:80/", + u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ] + for uri in invariant: + self.assertEqual(uri, iri2uri(uri)) + + def test_iri(self): + """ Test that the right type of escaping is done for each part of the URI.""" + self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}")) + self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}")) + self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}")) + self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))) + self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8'))) + + unittest.main() + + diff --git a/tutorius/apilib/httplib2/.svn/text-base/iri2uri.pyc.svn-base b/tutorius/apilib/httplib2/.svn/text-base/iri2uri.pyc.svn-base Binary files differnew file mode 100644 index 0000000..e16a3db --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/text-base/iri2uri.pyc.svn-base diff --git a/tutorius/apilib/httplib2/__init__.py b/tutorius/apilib/httplib2/__init__.py new file mode 100644 index 0000000..982bf8a --- /dev/null +++ b/tutorius/apilib/httplib2/__init__.py @@ -0,0 +1,1123 @@ +from __future__ import generators +""" +httplib2 + +A caching http interface that supports ETags and gzip +to conserve bandwidth. + +Requires Python 2.3 or later + +Changelog: +2007-08-18, Rick: Modified so it's able to use a socks proxy if needed. + +""" + +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)", + "James Antill", + "Xavier Verges Farrero", + "Jonathan Feinberg", + "Blair Zajac", + "Sam Ruby", + "Louis Nyffenegger"] +__license__ = "MIT" +__version__ = "$Rev: 259 $" + +import re +import sys +import md5 +import email +import email.Utils +import email.Message +import StringIO +import gzip +import zlib +import httplib +import urlparse +import base64 +import os +import copy +import calendar +import time +import random +import sha +import hmac +from gettext import gettext as _ +import socket + +try: + import socks +except ImportError: + socks = None + +if sys.version_info >= (2,3): + from iri2uri import iri2uri +else: + def iri2uri(uri): + return uri + +__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error', + 'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent', + 'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError', + 'debuglevel'] + + +# The httplib debug level, set to a non-zero value to get debug output +debuglevel = 0 + +# Python 2.3 support +if sys.version_info < (2,4): + def sorted(seq): + seq.sort() + return seq + +# Python 2.3 support +def HTTPResponse__getheaders(self): + """Return list of (header, value) tuples.""" + if self.msg is None: + raise httplib.ResponseNotReady() + return self.msg.items() + +if not hasattr(httplib.HTTPResponse, 'getheaders'): + httplib.HTTPResponse.getheaders = HTTPResponse__getheaders + +# All exceptions raised here derive from HttpLib2Error +class HttpLib2Error(Exception): pass + +# Some exceptions can be caught and optionally +# be turned back into responses. +class HttpLib2ErrorWithResponse(HttpLib2Error): + def __init__(self, desc, response, content): + self.response = response + self.content = content + HttpLib2Error.__init__(self, desc) + +class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass +class RedirectLimit(HttpLib2ErrorWithResponse): pass +class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass +class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass +class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass + +class RelativeURIError(HttpLib2Error): pass +class ServerNotFoundError(HttpLib2Error): pass + +# Open Items: +# ----------- +# Proxy support + +# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) + +# Pluggable cache storage (supports storing the cache in +# flat files by default. We need a plug-in architecture +# that can support Berkeley DB and Squid) + +# == Known Issues == +# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. +# Does not handle Cache-Control: max-stale +# Does not use Age: headers when calculating cache freshness. + + +# The number of redirections to follow before giving up. +# Note that only GET redirects are automatically followed. +# Will also honor 301 requests by saving that info and never +# requesting that URI again. +DEFAULT_MAX_REDIRECTS = 5 + +# Which headers are hop-by-hop headers by default +HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] + +def _get_end2end_headers(response): + hopbyhop = list(HOP_BY_HOP) + hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')]) + return [header for header in response.keys() if header not in hopbyhop] + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + +def urlnorm(uri): + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) + authority = authority.lower() + scheme = scheme.lower() + if not path: + path = "/" + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + scheme = scheme.lower() + defrag_uri = scheme + "://" + authority + request_uri + return scheme, authority, request_uri, defrag_uri + + +# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) +re_url_scheme = re.compile(r'^\w+://') +re_slash = re.compile(r'[?/:|]+') + +def safename(filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + + try: + if re_url_scheme.match(filename): + if isinstance(filename,str): + filename = filename.decode('utf-8') + filename = filename.encode('idna') + else: + filename = filename.encode('idna') + except UnicodeError: + pass + if isinstance(filename,unicode): + filename=filename.encode('utf-8') + filemd5 = md5.new(filename).hexdigest() + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + + # limit length of filename + if len(filename)>200: + filename=filename[:200] + return ",".join((filename, filemd5)) + +NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+') +def _normalize_headers(headers): + return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()]) + +def _parse_cache_control(headers): + retval = {} + if headers.has_key('cache-control'): + parts = headers['cache-control'].split(',') + parts_with_args = [tuple([x.strip() for x in part.split("=")]) for part in parts if -1 != part.find("=")] + parts_wo_args = [(name.strip(), 1) for name in parts if -1 == name.find("=")] + retval = dict(parts_with_args + parts_wo_args) + return retval + +# Whether to use a strict mode to parse WWW-Authenticate headers +# Might lead to bad results in case of ill-formed header value, +# so disabled by default, falling back to relaxed parsing. +# Set to true to turn on, usefull for testing servers. +USE_WWW_AUTH_STRICT_PARSING = 0 + +# In regex below: +# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP +# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space +# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: +# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"? +WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$") +WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$") +UNQUOTE_PAIRS = re.compile(r'\\(.)') +def _parse_www_authenticate(headers, headername='www-authenticate'): + """Returns a dictionary of dictionaries, one dict + per auth_scheme.""" + retval = {} + if headers.has_key(headername): + authenticate = headers[headername].strip() + www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED + while authenticate: + # Break off the scheme at the beginning of the line + if headername == 'authentication-info': + (auth_scheme, the_rest) = ('digest', authenticate) + else: + (auth_scheme, the_rest) = authenticate.split(" ", 1) + # Now loop over all the key value pairs that come after the scheme, + # being careful not to roll into the next scheme + match = www_auth.search(the_rest) + auth_params = {} + while match: + if match and len(match.groups()) == 3: + (key, value, the_rest) = match.groups() + auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')]) + match = www_auth.search(the_rest) + retval[auth_scheme.lower()] = auth_params + authenticate = the_rest.strip() + return retval + + +def _entry_disposition(response_headers, request_headers): + """Determine freshness from the Date, Expires and Cache-Control headers. + + We don't handle the following: + + 1. Cache-Control: max-stale + 2. Age: headers are not used in the calculations. + + Not that this algorithm is simpler than you might think + because we are operating as a private (non-shared) cache. + This lets us ignore 's-maxage'. We can also ignore + 'proxy-invalidate' since we aren't a proxy. + We will never return a stale document as + fresh as a design decision, and thus the non-implementation + of 'max-stale'. This also lets us safely ignore 'must-revalidate' + since we operate as if every server has sent 'must-revalidate'. + Since we are private we get to ignore both 'public' and + 'private' parameters. We also ignore 'no-transform' since + we don't do any transformations. + The 'no-store' parameter is handled at a higher level. + So the only Cache-Control parameters we look at are: + + no-cache + only-if-cached + max-age + min-fresh + """ + + retval = "STALE" + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + + if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1: + retval = "TRANSPARENT" + if 'cache-control' not in request_headers: + request_headers['cache-control'] = 'no-cache' + elif cc.has_key('no-cache'): + retval = "TRANSPARENT" + elif cc_response.has_key('no-cache'): + retval = "STALE" + elif cc.has_key('only-if-cached'): + retval = "FRESH" + elif response_headers.has_key('date'): + date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date'])) + now = time.time() + current_age = max(0, now - date) + if cc_response.has_key('max-age'): + try: + freshness_lifetime = int(cc_response['max-age']) + except ValueError: + freshness_lifetime = 0 + elif response_headers.has_key('expires'): + expires = email.Utils.parsedate_tz(response_headers['expires']) + if None == expires: + freshness_lifetime = 0 + else: + freshness_lifetime = max(0, calendar.timegm(expires) - date) + else: + freshness_lifetime = 0 + if cc.has_key('max-age'): + try: + freshness_lifetime = int(cc['max-age']) + except ValueError: + freshness_lifetime = 0 + if cc.has_key('min-fresh'): + try: + min_fresh = int(cc['min-fresh']) + except ValueError: + min_fresh = 0 + current_age += min_fresh + if freshness_lifetime > current_age: + retval = "FRESH" + return retval + +def _decompressContent(response, new_content): + content = new_content + try: + encoding = response.get('content-encoding', None) + if encoding in ['gzip', 'deflate']: + if encoding == 'gzip': + content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() + if encoding == 'deflate': + content = zlib.decompress(content) + response['content-length'] = str(len(content)) + del response['content-encoding'] + except IOError: + content = "" + raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content) + return content + +def _updateCache(request_headers, response_headers, content, cache, cachekey): + if cachekey: + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + if cc.has_key('no-store') or cc_response.has_key('no-store'): + cache.delete(cachekey) + else: + info = email.Message.Message() + for key, value in response_headers.iteritems(): + if key not in ['status','content-encoding','transfer-encoding']: + info[key] = value + + status = response_headers.status + if status == 304: + status = 200 + + status_header = 'status: %d\r\n' % response_headers.status + + header_str = info.as_string() + + header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str) + text = "".join([status_header, header_str, content]) + + cache.set(cachekey, text) + +def _cnonce(): + dig = md5.new("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest() + return dig[:16] + +def _wsse_username_token(cnonce, iso_now, password): + return base64.encodestring(sha.new("%s%s%s" % (cnonce, iso_now, password)).digest()).strip() + + +# For credentials we need two things, first +# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.) +# Then we also need a list of URIs that have already demanded authentication +# That list is tricky since sub-URIs can take the same auth, or the +# auth scheme may change as you descend the tree. +# So we also need each Auth instance to be able to tell us +# how close to the 'top' it is. + +class Authentication(object): + def __init__(self, credentials, host, request_uri, headers, response, content, http): + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + self.path = path + self.host = host + self.credentials = credentials + self.http = http + + def depth(self, request_uri): + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + return request_uri[len(self.path):].count("/") + + def inscope(self, host, request_uri): + # XXX Should we normalize the request_uri? + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + return (host == self.host) and path.startswith(self.path) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header. Over-rise this in sub-classes.""" + pass + + def response(self, response, content): + """Gives us a chance to update with new nonces + or such returned from the last authorized response. + Over-rise this in sub-classes if necessary. + + Return TRUE is the request is to be retried, for + example Digest may return stale=true. + """ + return False + + + +class BasicAuthentication(Authentication): + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['authorization'] = 'Basic ' + base64.encodestring("%s:%s" % self.credentials).strip() + + +class DigestAuthentication(Authentication): + """Only do qop='auth' and MD5, since that + is all Apache currently implements""" + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + challenge = _parse_www_authenticate(response, 'www-authenticate') + self.challenge = challenge['digest'] + qop = self.challenge.get('qop') + self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None + if self.challenge['qop'] is None: + raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop)) + self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5') + if self.challenge['algorithm'] != 'MD5': + raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) + self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]]) + self.challenge['nc'] = 1 + + def request(self, method, request_uri, headers, content, cnonce = None): + """Modify the request headers""" + H = lambda x: md5.new(x).hexdigest() + KD = lambda s, d: H("%s:%s" % (s, d)) + A2 = "".join([method, ":", request_uri]) + self.challenge['cnonce'] = cnonce or _cnonce() + request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'], + '%08x' % self.challenge['nc'], + self.challenge['cnonce'], + self.challenge['qop'], H(A2) + )) + headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % ( + self.credentials[0], + self.challenge['realm'], + self.challenge['nonce'], + request_uri, + self.challenge['algorithm'], + request_digest, + self.challenge['qop'], + self.challenge['nc'], + self.challenge['cnonce'], + ) + self.challenge['nc'] += 1 + + def response(self, response, content): + if not response.has_key('authentication-info'): + challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {}) + if 'true' == challenge.get('stale'): + self.challenge['nonce'] = challenge['nonce'] + self.challenge['nc'] = 1 + return True + else: + updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {}) + + if updated_challenge.has_key('nextnonce'): + self.challenge['nonce'] = updated_challenge['nextnonce'] + self.challenge['nc'] = 1 + return False + + +class HmacDigestAuthentication(Authentication): + """Adapted from Robert Sayre's code and DigestAuthentication above.""" + __author__ = "Thomas Broyer (t.broyer@ltgt.net)" + + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + challenge = _parse_www_authenticate(response, 'www-authenticate') + self.challenge = challenge['hmacdigest'] + # TODO: self.challenge['domain'] + self.challenge['reason'] = self.challenge.get('reason', 'unauthorized') + if self.challenge['reason'] not in ['unauthorized', 'integrity']: + self.challenge['reason'] = 'unauthorized' + self.challenge['salt'] = self.challenge.get('salt', '') + if not self.challenge.get('snonce'): + raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty.")) + self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1') + if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']: + raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) + self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1') + if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']: + raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm'])) + if self.challenge['algorithm'] == 'HMAC-MD5': + self.hashmod = md5 + else: + self.hashmod = sha + if self.challenge['pw-algorithm'] == 'MD5': + self.pwhashmod = md5 + else: + self.pwhashmod = sha + self.key = "".join([self.credentials[0], ":", + self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(), + ":", self.challenge['realm'] + ]) + self.key = self.pwhashmod.new(self.key).hexdigest().lower() + + def request(self, method, request_uri, headers, content): + """Modify the request headers""" + keys = _get_end2end_headers(headers) + keylist = "".join(["%s " % k for k in keys]) + headers_val = "".join([headers[k] for k in keys]) + created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime()) + cnonce = _cnonce() + request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val) + request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower() + headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % ( + self.credentials[0], + self.challenge['realm'], + self.challenge['snonce'], + cnonce, + request_uri, + created, + request_digest, + keylist, + ) + + def response(self, response, content): + challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {}) + if challenge.get('reason') in ['integrity', 'stale']: + return True + return False + + +class WsseAuthentication(Authentication): + """This is thinly tested and should not be relied upon. + At this time there isn't any third party server to test against. + Blogger and TypePad implemented this algorithm at one point + but Blogger has since switched to Basic over HTTPS and + TypePad has implemented it wrong, by never issuing a 401 + challenge but instead requiring your client to telepathically know that + their endpoint is expecting WSSE profile="UsernameToken".""" + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['Authorization'] = 'WSSE profile="UsernameToken"' + iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + cnonce = _cnonce() + password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) + headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % ( + self.credentials[0], + password_digest, + cnonce, + iso_now) + +class GoogleLoginAuthentication(Authentication): + def __init__(self, credentials, host, request_uri, headers, response, content, http): + from urllib import urlencode + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + challenge = _parse_www_authenticate(response, 'www-authenticate') + service = challenge['googlelogin'].get('service', 'xapi') + # Bloggger actually returns the service in the challenge + # For the rest we guess based on the URI + if service == 'xapi' and request_uri.find("calendar") > 0: + service = "cl" + # No point in guessing Base or Spreadsheet + #elif request_uri.find("spreadsheets") > 0: + # service = "wise" + + auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent']) + resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'}) + lines = content.split('\n') + d = dict([tuple(line.split("=", 1)) for line in lines if line]) + if resp.status == 403: + self.Auth = "" + else: + self.Auth = d['Auth'] + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['authorization'] = 'GoogleLogin Auth=' + self.Auth + + +AUTH_SCHEME_CLASSES = { + "basic": BasicAuthentication, + "wsse": WsseAuthentication, + "digest": DigestAuthentication, + "hmacdigest": HmacDigestAuthentication, + "googlelogin": GoogleLoginAuthentication +} + +AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] + +def _md5(s): + return + +class FileCache(object): + """Uses a local directory as a store for cached files. + Not really safe to use if multiple threads or processes are going to + be running on the same cache. + """ + def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior + self.cache = cache + self.safe = safe + if not os.path.exists(cache): + os.makedirs(self.cache) + + def get(self, key): + retval = None + cacheFullPath = os.path.join(self.cache, self.safe(key)) + try: + f = file(cacheFullPath, "r") + retval = f.read() + f.close() + except IOError: + pass + return retval + + def set(self, key, value): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + f = file(cacheFullPath, "w") + f.write(value) + f.close() + + def delete(self, key): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + if os.path.exists(cacheFullPath): + os.remove(cacheFullPath) + +class Credentials(object): + def __init__(self): + self.credentials = [] + + def add(self, name, password, domain=""): + self.credentials.append((domain.lower(), name, password)) + + def clear(self): + self.credentials = [] + + def iter(self, domain): + for (cdomain, name, password) in self.credentials: + if cdomain == "" or domain == cdomain: + yield (name, password) + +class KeyCerts(Credentials): + """Identical to Credentials except that + name/password are mapped to key/cert.""" + pass + + +class ProxyInfo(object): + """Collect information required to use a proxy.""" + def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None): + """The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX + constants. For example: + +p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000) + """ + self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass + + def astuple(self): + return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, + self.proxy_user, self.proxy_pass) + + def isgood(self): + return socks and (self.proxy_host != None) and (self.proxy_port != None) + + +class HTTPConnectionWithTimeout(httplib.HTTPConnection): + """HTTPConnection subclass that supports timeouts""" + + def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): + httplib.HTTPConnection.__init__(self, host, port, strict) + self.timeout = timeout + self.proxy_info = proxy_info + + def connect(self): + """Connect to the host and port specified in __init__.""" + # Mostly verbatim from httplib.py. + msg = "getaddrinfo returns an empty list" + for res in socket.getaddrinfo(self.host, self.port, 0, + socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + if self.proxy_info and self.proxy_info.isgood(): + self.sock = socks.socksocket(af, socktype, proto) + self.sock.setproxy(*self.proxy_info.astuple()) + else: + self.sock = socket.socket(af, socktype, proto) + # Different from httplib: support timeouts. + if self.timeout is not None: + self.sock.settimeout(self.timeout) + # End of difference from httplib. + if self.debuglevel > 0: + print "connect: (%s, %s)" % (self.host, self.port) + self.sock.connect(sa) + except socket.error, msg: + if self.debuglevel > 0: + print 'connect fail:', (self.host, self.port) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + +class HTTPSConnectionWithTimeout(httplib.HTTPSConnection): + "This class allows communication via SSL." + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=None, proxy_info=None): + self.timeout = timeout + self.proxy_info = proxy_info + httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file, + cert_file=cert_file, strict=strict) + + def connect(self): + "Connect to a host on a given (SSL) port." + + if self.proxy_info and self.proxy_info.isgood(): + self.sock.setproxy(*self.proxy_info.astuple()) + sock.setproxy(*self.proxy_info.astuple()) + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if self.timeout is not None: + sock.settimeout(self.timeout) + sock.connect((self.host, self.port)) + ssl = socket.ssl(sock, self.key_file, self.cert_file) + self.sock = httplib.FakeSocket(sock, ssl) + + + +class Http(object): + """An HTTP client that handles: +- all methods +- caching +- ETags +- compression, +- HTTPS +- Basic +- Digest +- WSSE + +and more. + """ + def __init__(self, cache=None, timeout=None, proxy_info=None): + """The value of proxy_info is a ProxyInfo instance. + +If 'cache' is a string then it is used as a directory name +for a disk cache. Otherwise it must be an object that supports +the same interface as FileCache.""" + self.proxy_info = proxy_info + # Map domain name to an httplib connection + self.connections = {} + # The location of the cache, for now a directory + # where cached responses are held. + if cache and isinstance(cache, str): + self.cache = FileCache(cache) + else: + self.cache = cache + + # Name/password + self.credentials = Credentials() + + # Key/cert + self.certificates = KeyCerts() + + # authorization objects + self.authorizations = [] + + # If set to False then no redirects are followed, even safe ones. + self.follow_redirects = True + + # If 'follow_redirects' is True, and this is set to True then + # all redirecs are followed, including unsafe ones. + self.follow_all_redirects = False + + self.ignore_etag = False + + self.force_exception_to_status_code = False + + self.timeout = timeout + + def _auth_from_challenge(self, host, request_uri, headers, response, content): + """A generator that creates Authorization objects + that can be applied to requests. + """ + challenges = _parse_www_authenticate(response, 'www-authenticate') + for cred in self.credentials.iter(host): + for scheme in AUTH_SCHEME_ORDER: + if challenges.has_key(scheme): + yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) + + def add_credentials(self, name, password, domain=""): + """Add a name and password that will be used + any time a request requires authentication.""" + self.credentials.add(name, password, domain) + + def add_certificate(self, key, cert, domain): + """Add a key and cert that will be used + any time a request requires authentication.""" + self.certificates.add(key, cert, domain) + + def clear_credentials(self): + """Remove all the names and passwords + that are used for authentication""" + self.credentials.clear() + self.authorizations = [] + + def _conn_request(self, conn, request_uri, method, body, headers): + for i in range(2): + try: + conn.request(method, request_uri, body, headers) + response = conn.getresponse() + except socket.gaierror: + conn.close() + raise ServerNotFoundError("Unable to find the server at %s" % conn.host) + except httplib.HTTPException, e: + if i == 0: + conn.close() + conn.connect() + continue + else: + raise + else: + content = response.read() + response = Response(response) + if method != "HEAD": + content = _decompressContent(response, content) + + break; + return (response, content) + + + def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey): + """Do the actual request using the connection object + and also follow one level of redirects if necessary""" + + auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] + auth = auths and sorted(auths)[0][1] or None + if auth: + auth.request(method, request_uri, headers, body) + + (response, content) = self._conn_request(conn, request_uri, method, body, headers) + + if auth: + if auth.response(response, body): + auth.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers ) + response._stale_digest = 1 + + if response.status == 401: + for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): + authorization.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers, ) + if response.status != 401: + self.authorizations.append(authorization) + authorization.response(response, body) + break + + if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303): + if self.follow_redirects and response.status in [300, 301, 302, 303, 307]: + # Pick out the location header and basically start from the beginning + # remembering first to strip the ETag header and decrement our 'depth' + if redirections: + if not response.has_key('location') and response.status != 300: + raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content) + # Fix-up relative redirects (which violate an RFC 2616 MUST) + if response.has_key('location'): + location = response['location'] + (scheme, authority, path, query, fragment) = parse_uri(location) + if authority == None: + response['location'] = urlparse.urljoin(absolute_uri, location) + if response.status == 301 and method in ["GET", "HEAD"]: + response['-x-permanent-redirect-url'] = response['location'] + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + if headers.has_key('if-none-match'): + del headers['if-none-match'] + if headers.has_key('if-modified-since'): + del headers['if-modified-since'] + if response.has_key('location'): + location = response['location'] + old_response = copy.deepcopy(response) + if not old_response.has_key('content-location'): + old_response['content-location'] = absolute_uri + redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method + (response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1) + response.previous = old_response + else: + raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content) + elif response.status in [200, 203] and method == "GET": + # Don't cache 206's since we aren't going to handle byte range requests + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + + return (response, content) + + +# Need to catch and rebrand some exceptions +# Then need to optionally turn all exceptions into status codes +# including all socket.* and httplib.* exceptions. + + + def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None): + """ Performs a single HTTP request. +The 'uri' is the URI of the HTTP resource and can begin +with either 'http' or 'https'. The value of 'uri' must be an absolute URI. + +The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. +There is no restriction on the methods allowed. + +The 'body' is the entity body to be sent with the request. It is a string +object. + +Any extra headers that are to be sent with the request should be provided in the +'headers' dictionary. + +The maximum number of redirect to follow before raising an +exception is 'redirections. The default is 5. + +The return value is a tuple of (response, content), the first +being and instance of the 'Response' class, the second being +a string that contains the response entity body. + """ + try: + if headers is None: + headers = {} + else: + headers = _normalize_headers(headers) + + if not headers.has_key('user-agent'): + headers['user-agent'] = "Python-httplib2/%s" % __version__ + + uri = iri2uri(uri) + + (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) + + conn_key = scheme+":"+authority + if conn_key in self.connections: + conn = self.connections[conn_key] + else: + if not connection_type: + connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout + certs = list(self.certificates.iter(authority)) + if scheme == 'https' and certs: + conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0], + cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info) + else: + conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info) + conn.set_debuglevel(debuglevel) + + if method in ["GET", "HEAD"] and 'range' not in headers: + headers['accept-encoding'] = 'compress, gzip' + + info = email.Message.Message() + cached_value = None + if self.cache: + cachekey = defrag_uri + cached_value = self.cache.get(cachekey) + if cached_value: + info = email.message_from_string(cached_value) + try: + content = cached_value.split('\r\n\r\n', 1)[1] + except IndexError: + self.cache.delete(cachekey) + cachekey = None + cached_value = None + else: + cachekey = None + + if method in ["PUT"] and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers: + # http://www.w3.org/1999/04/Editing/ + headers['if-match'] = info['etag'] + + if method not in ["GET", "HEAD"] and self.cache and cachekey: + # RFC 2616 Section 13.10 + self.cache.delete(cachekey) + + if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers: + if info.has_key('-x-permanent-redirect-url'): + # Should cached permanent redirects be counted in our redirection count? For now, yes. + (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1) + response.previous = Response(info) + response.previous.fromcache = True + else: + # Determine our course of action: + # Is the cached entry fresh or stale? + # Has the client requested a non-cached response? + # + # There seems to be three possible answers: + # 1. [FRESH] Return the cache entry w/o doing a GET + # 2. [STALE] Do the GET (but add in cache validators if available) + # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request + entry_disposition = _entry_disposition(info, headers) + + if entry_disposition == "FRESH": + if not cached_value: + info['status'] = '504' + content = "" + response = Response(info) + if cached_value: + response.fromcache = True + return (response, content) + + if entry_disposition == "STALE": + if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers: + headers['if-none-match'] = info['etag'] + if info.has_key('last-modified') and not 'last-modified' in headers: + headers['if-modified-since'] = info['last-modified'] + elif entry_disposition == "TRANSPARENT": + pass + + (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + + if response.status == 304 and method == "GET": + # Rewrite the cache entry with the new end-to-end headers + # Take all headers that are in response + # and overwrite their values in info. + # unless they are hop-by-hop, or are listed in the connection header. + + for key in _get_end2end_headers(response): + info[key] = response[key] + merged_response = Response(info) + if hasattr(response, "_stale_digest"): + merged_response._stale_digest = response._stale_digest + _updateCache(headers, merged_response, content, self.cache, cachekey) + response = merged_response + response.status = 200 + response.fromcache = True + + elif response.status == 200: + content = new_content + else: + self.cache.delete(cachekey) + content = new_content + else: + (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + except Exception, e: + if self.force_exception_to_status_code: + if isinstance(e, HttpLib2ErrorWithResponse): + response = e.response + content = e.content + response.status = 500 + response.reason = str(e) + elif isinstance(e, socket.timeout): + content = "Request Timeout" + response = Response( { + "content-type": "text/plain", + "status": "408", + "content-length": len(content) + }) + response.reason = "Request Timeout" + else: + content = str(e) + response = Response( { + "content-type": "text/plain", + "status": "400", + "content-length": len(content) + }) + response.reason = "Bad Request" + else: + raise + + + return (response, content) + + + +class Response(dict): + """An object more like email.Message than httplib.HTTPResponse.""" + + """Is this response from our local cache""" + fromcache = False + + """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """ + version = 11 + + "Status code returned by server. " + status = 200 + + """Reason phrase returned by server.""" + reason = "Ok" + + previous = None + + def __init__(self, info): + # info is either an email.Message or + # an httplib.HTTPResponse object. + if isinstance(info, httplib.HTTPResponse): + for key, value in info.getheaders(): + self[key] = value + self.status = info.status + self['status'] = str(self.status) + self.reason = info.reason + self.version = info.version + elif isinstance(info, email.Message.Message): + for key, value in info.items(): + self[key] = value + self.status = int(self['status']) + else: + for key, value in info.iteritems(): + self[key] = value + self.status = int(self.get('status', self.status)) + + + def __getattr__(self, name): + if name == 'dict': + return self + else: + raise AttributeError, name diff --git a/tutorius/apilib/httplib2/iri2uri.py b/tutorius/apilib/httplib2/iri2uri.py new file mode 100644 index 0000000..70667ed --- /dev/null +++ b/tutorius/apilib/httplib2/iri2uri.py @@ -0,0 +1,110 @@ +""" +iri2uri + +Converts an IRI to a URI. + +""" +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = [] +__version__ = "1.0.0" +__license__ = "MIT" +__history__ = """ +""" + +import urlparse + + +# Convert an IRI to a URI following the rules in RFC 3987 +# +# The characters we need to enocde and escape are defined in the spec: +# +# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD +# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF +# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD +# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD +# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD +# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD +# / %xD0000-DFFFD / %xE1000-EFFFD + +escape_range = [ + (0xA0, 0xD7FF ), + (0xE000, 0xF8FF ), + (0xF900, 0xFDCF ), + (0xFDF0, 0xFFEF), + (0x10000, 0x1FFFD ), + (0x20000, 0x2FFFD ), + (0x30000, 0x3FFFD), + (0x40000, 0x4FFFD ), + (0x50000, 0x5FFFD ), + (0x60000, 0x6FFFD), + (0x70000, 0x7FFFD ), + (0x80000, 0x8FFFD ), + (0x90000, 0x9FFFD), + (0xA0000, 0xAFFFD ), + (0xB0000, 0xBFFFD ), + (0xC0000, 0xCFFFD), + (0xD0000, 0xDFFFD ), + (0xE1000, 0xEFFFD), + (0xF0000, 0xFFFFD ), + (0x100000, 0x10FFFD) +] + +def encode(c): + retval = c + i = ord(c) + for low, high in escape_range: + if i < low: + break + if i >= low and i <= high: + retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')]) + break + return retval + + +def iri2uri(uri): + """Convert an IRI to a URI. Note that IRIs must be + passed in a unicode strings. That is, do not utf-8 encode + the IRI before passing it into the function.""" + if isinstance(uri ,unicode): + (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri) + authority = authority.encode('idna') + # For each character in 'ucschar' or 'iprivate' + # 1. encode as utf-8 + # 2. then %-encode each octet of that utf-8 + uri = urlparse.urlunsplit((scheme, authority, path, query, fragment)) + uri = "".join([encode(c) for c in uri]) + return uri + +if __name__ == "__main__": + import unittest + + class Test(unittest.TestCase): + + def test_uris(self): + """Test that URIs are invariant under the transformation.""" + invariant = [ + u"ftp://ftp.is.co.za/rfc/rfc1808.txt", + u"http://www.ietf.org/rfc/rfc2396.txt", + u"ldap://[2001:db8::7]/c=GB?objectClass?one", + u"mailto:John.Doe@example.com", + u"news:comp.infosystems.www.servers.unix", + u"tel:+1-816-555-1212", + u"telnet://192.0.2.16:80/", + u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ] + for uri in invariant: + self.assertEqual(uri, iri2uri(uri)) + + def test_iri(self): + """ Test that the right type of escaping is done for each part of the URI.""" + self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}")) + self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}")) + self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}")) + self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))) + self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8'))) + + unittest.main() + + diff --git a/tutorius/apilib/mimeTypes.py b/tutorius/apilib/mimeTypes.py new file mode 100644 index 0000000..ff8f641 --- /dev/null +++ b/tutorius/apilib/mimeTypes.py @@ -0,0 +1,57 @@ +""" + Copyright (C) 2008 Benjamin O'Steen + + This file is part of python-fedoracommons. + + python-fedoracommons is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + python-fedoracommons is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with python-fedoracommons. If not, see <http://www.gnu.org/licenses/>. +""" + +__license__ = 'GPL http://www.gnu.org/licenses/gpl.txt' +__author__ = "Benjamin O'Steen <bosteen@gmail.com>" +__version__ = '0.1' + +class mimeTypes(object): + def getDictionary(self): + mimetype_to_extension = {} + extension_to_mimetype = {} + mimetype_to_extension['text/plain'] = 'txt' + mimetype_to_extension['text/xml'] = 'xml' + mimetype_to_extension['text/css'] = 'css' + mimetype_to_extension['text/javascript'] = 'js' + mimetype_to_extension['text/rtf'] = 'rtf' + mimetype_to_extension['text/calendar'] = 'ics' + mimetype_to_extension['application/msword'] = 'doc' + mimetype_to_extension['application/msexcel'] = 'xls' + mimetype_to_extension['application/x-msword'] = 'doc' + mimetype_to_extension['application/vnd.ms-excel'] = 'xls' + mimetype_to_extension['application/vnd.ms-powerpoint'] = 'ppt' + mimetype_to_extension['application/pdf'] = 'pdf' + mimetype_to_extension['text/comma-separated-values'] = 'csv' + + + mimetype_to_extension['image/jpeg'] = 'jpg' + mimetype_to_extension['image/gif'] = 'gif' + mimetype_to_extension['image/jpg'] = 'jpg' + mimetype_to_extension['image/tiff'] = 'tiff' + mimetype_to_extension['image/png'] = 'png' + + # And hacky reverse lookups + for mimetype in mimetype_to_extension: + extension_to_mimetype[mimetype_to_extension[mimetype]] = mimetype + + mimetype_extension_mapping = {} + mimetype_extension_mapping.update(mimetype_to_extension) + mimetype_extension_mapping.update(extension_to_mimetype) + + return mimetype_extension_mapping diff --git a/tutorius/apilib/restful_lib.py b/tutorius/apilib/restful_lib.py new file mode 100644 index 0000000..e1ee0af --- /dev/null +++ b/tutorius/apilib/restful_lib.py @@ -0,0 +1,129 @@ +""" + Copyright (C) 2008 Benjamin O'Steen + + This file is part of python-fedoracommons. + + python-fedoracommons is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + python-fedoracommons is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with python-fedoracommons. If not, see <http://www.gnu.org/licenses/>. +""" + +__license__ = 'GPL http://www.gnu.org/licenses/gpl.txt' +__author__ = "Benjamin O'Steen <bosteen@gmail.com>" +__version__ = '0.1' + +import httplib2 +import urlparse +import urllib +import base64 +from base64 import encodestring + +from mimeTypes import * + +import mimetypes + +from cStringIO import StringIO + +class Connection: + def __init__(self, base_url, username=None, password=None): + self.base_url = base_url + self.username = username + m = mimeTypes() + self.mimetypes = m.getDictionary() + + self.url = urlparse.urlparse(base_url) + + (scheme, netloc, path, query, fragment) = urlparse.urlsplit(base_url) + + self.scheme = scheme + self.host = netloc + self.path = path + + # Create Http class with support for Digest HTTP Authentication, if necessary + self.h = httplib2.Http(".cache") + self.h.follow_all_redirects = True + if username and password: + self.h.add_credentials(username, password) + + def request_get(self, resource, args = None, headers={}): + return self.request(resource, "get", args, headers=headers) + + def request_delete(self, resource, args = None, headers={}): + return self.request(resource, "delete", args, headers=headers) + + def request_head(self, resource, args = None, headers={}): + return self.request(resource, "head", args, headers=headers) + + def request_post(self, resource, args = None, body = None, filename=None, headers={}): + return self.request(resource, "post", args , body = body, filename=filename, headers=headers) + + def request_put(self, resource, args = None, body = None, filename=None, headers={}): + return self.request(resource, "put", args , body = body, filename=filename, headers=headers) + + def get_content_type(self, filename): + extension = filename.split('.')[-1] + guessed_mimetype = self.mimetypes.get(extension, mimetypes.guess_type(filename)[0]) + return guessed_mimetype or 'application/octet-stream' + + def request(self, resource, method = "get", args = None, body = None, filename=None, headers={}): + params = None + path = resource + headers['User-Agent'] = 'Basic Agent' + + BOUNDARY = u'00hoYUXOnLD5RQ8SKGYVgLLt64jejnMwtO7q8XE1' + CRLF = u'\r\n' + + if filename and body: + #fn = open(filename ,'r') + #chunks = fn.read() + #fn.close() + + # Attempt to find the Mimetype + content_type = self.get_content_type(filename) + headers['Content-Type']='multipart/form-data; boundary='+BOUNDARY + encode_string = StringIO() + encode_string.write(CRLF) + encode_string.write(u'--' + BOUNDARY + CRLF) + encode_string.write(u'Content-Disposition: form-data; name="file"; filename="%s"' % filename) + encode_string.write(CRLF) + encode_string.write(u'Content-Type: %s' % content_type + CRLF) + encode_string.write(CRLF) + encode_string.write(body) + encode_string.write(CRLF) + encode_string.write(u'--' + BOUNDARY + u'--' + CRLF) + + body = encode_string.getvalue() + headers['Content-Length'] = str(len(body)) + elif body: + if not headers.get('Content-Type', None): + headers['Content-Type']='text/xml' + headers['Content-Length'] = str(len(body)) + else: + headers['Content-Type']='text/xml' + + if args: + path += u"?" + urllib.urlencode(args) + + request_path = [] + if self.path != "/": + if self.path.endswith('/'): + request_path.append(self.path[:-1]) + else: + request_path.append(self.path) + if path.startswith('/'): + request_path.append(path[1:]) + else: + request_path.append(path) + + resp, content = self.h.request(u"%s://%s%s" % (self.scheme, self.host, u'/'.join(request_path)), method.upper(), body=body, headers=headers ) + + return {u'headers':resp, u'body':content.decode('UTF-8')} diff --git a/tutorius/engine.py b/tutorius/engine.py index e77a018..b0a49a8 100644 --- a/tutorius/engine.py +++ b/tutorius/engine.py @@ -4,17 +4,100 @@ from jarabe.model import shell from sugar.bundle.activitybundle import ActivityBundle from .vault import Vault +from .TProbe import ProbeManager +from .dbustools import save_args + + +class TutorialRunner(object): + """ + Driver for the execution of one tutorial + """ + def __init__(self, tutorial, probeManager): + """Constructor + @param tutorial Tutorial to execute + @param probeManager probeManager to use + """ + self._tutorial = tutorial + self._pM = probeManager + + #State + self._state = None + self._sEvents = set() #Subscribed Events + + #Cached objects + self._actions = {} + + #Temp FIX until event/actions have an activity id + self._activity_id = None + + #Temp FIX until event, actions have an activity id + def setCurrentActivity(self): + self._pM.currentActivity = self._activity_id + + def start(self): + self.setCurrentActivity() #Temp Hack until activity in events/actions + self.setState(self._tutorial.INIT) + + def stop(self): + self.setCurrentActivity() #Temp Hack until activity in events/actions + self.setState(self._tutorial.END) + self._teardownState() + self._state = None + + def _handleEvent(self, next_state, event): + #FIXME sanity check + self.setState(next_state) + + def _teardownState(self): + if self._state is None: + #No state, no teardown + return + + #Clear the current actions + for action in self._actions.values(): + self._pM.uninstall(action) + self._actions = {} + + #Clear the EventFilters + for event in self._sEvents: + self._pM.unsubscribe(event) + self._sEvents.clear() + + def _setupState(self): + if self._state is None: + raise RuntimeError("Attempting to setupState without a state") + + self._actions = self._tutorial.get_action_dict(self._state) + transitions = self._tutorial.get_transition_dict(self._state) + for (event, next_state) in transitions.values(): + self._sEvents.add(self._pM.subscribe(event, save_args(self._handleEvent, next_state))) + for action in self._actions.values(): + self._pM.install(action) + + def setState(self, state_name): + self.setCurrentActivity() #Temp Hack until activity in events/actions + if state_name == self._state: + #Nothing to do + return + + self._teardownState() + self._state = state_name + self._setupState() + class Engine: """ Driver for the execution of tutorials """ - def __init__(self): - # FIXME Probe management should be in the probe manager + def __init__(self, probeManager=None): + """Constructor + @param probeManager (optional) ProbeManager instance to use + """ dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) #FIXME shell.get_model() will only be useful in the shell process self._shell = shell.get_model() + self._probeManager = probeManager or ProbeManager() self._tutorial = None def launch(self, tutorialID): @@ -22,25 +105,33 @@ class Engine: @param tutorialID unique tutorial identifier used to retrieve it from the disk """ if self._tutorial: - self._tutorial.detach() - self._tutorial = None + self.stop() + + self._tutorial = TutorialRunner(Vault.loadTutorial(tutorialID), self._probeManager) #Get the active activity from the shell activity = self._shell.get_active_activity() - self._tutorial = Vault.loadTutorial(tutorialID) - #TProbes automatically use the bundle id, available from the ActivityBundle bundle = ActivityBundle(activity.get_bundle_path()) - self._tutorial.attach(bundle.get_bundle_id()) - def stop(self): + self._tutorial._activity_id = bundle.get_bundle_id() #HACK until we have activity id's in action/events + + self._tutorial.start() + + def stop(self, tutorialID=None): """ Stop the current tutorial """ - self._tutorial.detach() + if tutorialID is None: + logging.warning( + "stop() without a tutorialID will become deprecated") + self._tutorial.stop() self._tutorial = None - def pause(self): + def pause(self, tutorialID=None): """ Interrupt the current tutorial and save its state in the journal """ + if tutorialID is None: + logging.warning( \ + "pause() without a tutorialID will become deprecated") raise NotImplementedError("Unable to store tutorial state") diff --git a/tutorius/gtkutils.py b/tutorius/gtkutils.py index 1a9cb0f..c96a73f 100644 --- a/tutorius/gtkutils.py +++ b/tutorius/gtkutils.py @@ -33,7 +33,7 @@ def raddr_lookup(widget): return ".".join(name) -def find_widget(base, target_fqdn): +def find_widget(base, target_fqdn, ignore_errors=True): """Find a widget by digging into a parent widget's children tree @param base the parent widget @param target_fqdn fqdn-style target object name @@ -57,7 +57,9 @@ def find_widget(base, target_fqdn): try: obj = get_children(obj)[int(path.pop(0))] except: - break + if ignore_errors: + break + return None return obj diff --git a/tutorius/service.py b/tutorius/service.py index 8694cb5..11a94a5 100644 --- a/tutorius/service.py +++ b/tutorius/service.py @@ -38,7 +38,7 @@ class Service(dbus.service.Object): @param tutorialID unique tutorial identifier used to retrieve it from the disk """ if self._engine == None: - self._engine = Engine() + self._engine = Engine(self._probeMgr) self._engine.launch(tutorialID) @dbus.service.method(_DBUS_SERVICE_IFACE, diff --git a/tutorius/store.py b/tutorius/store.py index 9c8bdff..cf20dd0 100644 --- a/tutorius/store.py +++ b/tutorius/store.py @@ -15,6 +15,10 @@ # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import urllib +import urllib2 +from xml.dom import minidom +from apilib.restful_lib import Connection +from array import array class StoreProxy(object): """ @@ -23,78 +27,232 @@ class StoreProxy(object): shop to implement all the requests that could be made to the Store. """ + def __init__(self, base_url): + + # Base Urls for the api + self.base_url = base_url + self.remora_api = "api/1.4" + self.tutorius_api = "TutoriusApi" + self.bandwagon_api = "api/1.4/sharing" + + self.api_auth_key = None + + # Prepares the connection with the api + self.conn = Connection(self.base_url) + + # Setup the helper + self.helper = StoreProxyHelper() + def get_categories(self): """ Returns all the categories registered in the store. Categories are used to classify tutorials according to a theme. (e.g. Mathematics, History, etc...) - + @return The list of category names stored on the server. """ - raise NotImplementedError("get_categories() not implemented") - def get_tutorials(self, keywords=None, category=None, startIndex=0, numResults=10, sortBy='name'): + request_url = "/%s/categories" % (self.tutorius_api) + + response = self.conn.request_get(request_url) + + if self.helper.iserror(response): + return None + + xml_response = minidom.parseString(response['body']) + + xml_categories = xml_response.getElementsByTagName('category') + + categories = list() + + # Loop through the categories and create the list to be returned + for xml_category in xml_categories: + category = {} + + category['id'] = xml_category.getElementsByTagName('id')[0].firstChild.nodeValue + category['name'] = xml_category.getElementsByTagName('name')[0].firstChild.nodeValue + + categories.append(category) + + return categories + + def search(self, keywords, category='all', page=1, numResults=10, sortBy='name'): """ - Returns the list of tutorials that correspond to the given search criteria. + Returns a list of tutorials that correspond to the given search criteria. + + @param keywords The keywords to search for + @param page The page in the result set from which to return results. This is + used to allow applications to fetch results one set at a time. + @param numResults The max number of results that can be returned in a page + @param sortBy The field on which to sort the results + @return A list of tutorial meta-data that corresponds to the query + """ + request_url = "/%s/search/%s/%s/%d/%d/%s" % (self.tutorius_api, keywords, category, page, numResults, sortBy) + + response = self.conn.request_get(request_url) + + if (self.helper.iserror(response)): + return None + + xml_response = minidom.parseString(response['body']) - @param keywords The list of keywords that should be matched inside the tutorial title - or description. If None, the search will not filter the results - according to the keywords. + xml_tutorials = xml_response.getElementsByTagName('tutorial') + + tutorials = list() + + for xml_tutorial in xml_tutorials: + tutorial = self.helper.parse_tutorial(xml_tutorial) + tutorials.append(tutorial) + + return tutorials + + def get_tutorials(self, category='all', page=1, numResults=10, sortBy='name'): + """ + Returns the list of tutorials that correspond to the given search criteria. + @param category The category in which to restrict the search. - @param startIndex The index in the result set from which to return results. This is + @param page The page in the result set from which to return results. This is used to allow applications to fetch results one set at a time. - @param numResults The max number of results that can be returned + @param numResults The max number of results that can be returned in a page @param sortBy The field on which to sort the results @return A list of tutorial meta-data that corresponds to the query """ - raise NotImplementedError("get_tutorials() not implemented") + + request_url = "/%s/tutorials/%s/%d/%d/%s" % (self.tutorius_api, category, page, numResults, sortBy) + + response = self.conn.request_get(request_url) + + if (self.helper.iserror(response)): + return None + + xml_response = minidom.parseString(response['body']) + + xml_tutorials = xml_response.getElementsByTagName('tutorial') + + tutorials = list() - def get_tutorial_collection(self, collection_name): + for xml_tutorial in xml_tutorials: + tutorial = self.helper.parse_tutorial(xml_tutorial) + tutorials.append(tutorial) + + return tutorials + + def list(self, type='recommended', numResults=3): """ - Returns a list of tutorials corresponding to the given collection name. - Collections can be groups like '5 most downloaded' or 'Top 10 ratings'. + Returns a list of tutorials corresponding to the type specified. + Type examples: 'Most downloaded', 'recommended', etc. - @param collection_name The name of the collection from which we want the - meta-data - @return A list of tutorial meta-data corresponding to the given group + @param type The type of list (Most downloaded, recommended, etc.) + @return A list of tutorials """ - raise NotImplementedError("get_tutorial_collection() not implemented... yet!") + request_url = "/%s/list/%s/tutorial/%s" % (self.remora_api, type, numResults) + + response = self.conn.request_get(request_url) + + if (self.helper.iserror(response)): + return None + + xml_response = minidom.parseString(response['body']) + + xml_tutorials = xml_response.getElementsByTagName('addon') + + tutorials = list() + + for xml_tutorial in xml_tutorials: + tutorial = self.helper.parse_tutorial(xml_tutorial) + tutorials.append(tutorial) + + return tutorials + def get_latest_version(self, tutorial_id_list): """ Returns the latest version number on the server, for each tutorial ID in the list. - + @param tutorial_id_list The list of tutorial IDs from which we want to known the latest version number. - @return A dictionary having the tutorial ID as the key and the version + @return A dictionary having the tutorial ID as the key and the version as the value. """ - raise NotImplementedError("get_latest_version() not implemented") - + + versions = {} + + for tutorial_id in tutorial_id_list: + + request_url = "/%s/addon/%s/" % (self.remora_api, tutorial_id) + + response = self.conn.request_get(request_url) + + if (self.helper.iserror(response)): + return None + + xml = minidom.parseString(response['body']) + + versionnode = xml.getElementsByTagName("version")[0] + + version = versionnode.firstChild.nodeValue + + versions[tutorial_id] = version + + return versions + def download_tutorial(self, tutorial_id, version=None): """ - Fetches the tutorial file from the server and returns the - + Fetches the tutorial file from the server and returns the + @param tutorial_id The tutorial that we want to get @param version The version number that we want to download. If None, the latest version will be downloaded. @return The downloaded file itself (an in-memory representation of the file, not a path to it on the disk) - + TODO : We should decide if we're saving to disk or in mem. """ - raise NotImplementedError("downloadTutorial() not implemented") + request_url = "/%s/addon/%s/" % (self.remora_api, tutorial_id) + + response = self.conn.request_get(request_url) + if (self.helper.iserror(response)): + return None + + xml = minidom.parseString(response['body']) + + installnode = xml.getElementsByTagName("install")[0] + installurl = installnode.firstChild.nodeValue + + fp = urllib.urlopen(installurl) + + return fp + def login(self, username, password): """ Logs in the user on the store and saves the login status in the proxy state. After a successful logon, the operation requiring a login will be successful. - + + @param username + @param password @return True if the login was successful, False otherwise """ - raise NotImplementedError("login() not implemented yet") + request_url = "/%s/auth/" % (self.tutorius_api) + + params = {'username': username, 'password': password} + + response = self.conn.request_post(request_url, params) + if (self.helper.iserror(response)): + return False + + xml_response = minidom.parseString(response['body']) + + keynode = xml_response.getElementsByTagName("token")[0] + + key = keynode.getAttribute('value') + + self.api_auth_key = key + + return True + def close_session(self): """ Ends the user's session on the server and changes the state of the proxy @@ -102,72 +260,218 @@ class StoreProxy(object): @return True if the user was disconnected, False otherwise """ - raise NotImplementedError("close_session() not implemented yet") + request_url = "/%s/auth/%s" % (self.tutorius_api, self.api_auth_key) + + headers = { 'X-API-Auth' : self.api_auth_key } + + response = self.conn.request_delete(request_url, None, headers) + if (self.helper.iserror(response)): + return False + + self.api_auth_key = None + + return True + def get_session_id(self): """ Gives the current session ID cached in the Store Proxy, or returns None is the user is not logged yet. - + @return The current session's ID, or None if the user is not logged """ - raise NotImplementedError("get_session_id() not implemented yet") - + return self.api_auth_key + def rate(self, value, tutorial_store_id): """ Sends a rating for the given tutorial. - + This function requires the user to be logged in. - - @param value The value of the rating. It must be an integer with a value + + @param value The value of the rating. It must be an integer with a value from 1 to 5. @param tutorial_store_id The ID of the tutorial that was rated @return True if the rating was sent to the Store, False otherwise. """ - raise NotImplementedError("rate() not implemented") + request_url = "/%s/review/%s" % (self.tutorius_api, tutorial_store_id) + + params = {'title': 'from api', 'body': 'from api', 'rating': value} + headers = { 'X-API-Auth' : self.api_auth_key } + + response = self.conn.request_post(request_url, params, None, None, headers) - def publish(self, tutorial): + if self.helper.iserror(response): + return False + + return True + + def publish(self, tutorial, tutorial_info=None, tutorial_store_id = None): """ Sends a tutorial to the store. - + This function requires the user to be logged in. - @param tutorial The tutorial file to be sent. Note that this is the + @param tutorial The tutorial file to be sent. Note that this is the content itself and not the path to the file. + @param tutorial_info An array containing the tutorial information @return True if the tutorial was sent correctly, False otherwise. """ - raise NotImplementedError("publish() not implemented") + + # This is in the case we have to re-publish a tutorial + if tutorial_store_id is not None: + request_url = "/%s/publish/%s" % (self.tutorius_api, tutorial_store_id) + headers = { 'X-API-Auth' : self.api_auth_key } + + response = self.conn.request_post(request_url, None, None, None, headers) + if self.helper.iserror(response): + return -1 + + return tutorial_store_id + + # Otherwise, we want to publish a new tutorial + if tutorial_info == None: + return -1 + + request_url = "/%s/publish/" % (self.tutorius_api) + + headers = { 'X-API-Auth' : self.api_auth_key } + + response = self.conn.request_post(request_url, tutorial_info, tutorial, tutorial_info['filename'], headers) + + if self.helper.iserror(response): + return -1 + + xml_response = minidom.parseString(response['body']) + + id_node = xml_response.getElementsByTagName("id")[0] + + id = id_node.getAttribute('value') + + return id def unpublish(self, tutorial_store_id): """ Removes a tutorial from the server. The user in the current session - needs to be the creator for it to be unpublished. This will remove + needs to be the creator for it to be unpublished. This will remove the file from the server and from all its collections and categories. - + This function requires the user to be logged in. - + @param tutorial_store_id The ID of the tutorial to be removed @return True if the tutorial was properly removed from the server """ - raise NotImplementedError("unpublish() not implemented") + request_url = "/%s/publish/%s" % (self.tutorius_api, tutorial_store_id) - def update_published_tutorial(self, tutorial_id, tutorial): + headers = { 'X-API-Auth' : self.api_auth_key } + response = self.conn.request_delete(request_url, None, headers) + + if self.helper.iserror(response): + return False + + return True + + def update_published_tutorial(self, tutorial_id, tutorial, tutorial_info): """ Sends the new content for the tutorial with the given ID. - + This function requires the user to be logged in. - + @param tutorial_id The ID of the tutorial to be updated @param tutorial The bundled tutorial file content (not a path!) @return True if the tutorial was sent and updated, False otherwise """ - raise NotImplementedError("update_published_tutorial() not implemented yet") + request_url = "/%s/update/%s" % (self.tutorius_api, tutorial_id) + + headers = { 'X-API-Auth' : self.api_auth_key } + response = self.conn.request_post(request_url, tutorial_info, tutorial, tutorial_info['filename'], headers) + + if self.helper.iserror(response): + return False + + return True + + def register_new_user(self, user_info): """ - Creates a new user from the given user information. + Creates a new user from the given user information. @param user_info A structure containing all the data required to do a login. @return True if the new account was created, false otherwise """ - raise NotImplementedError("register_new_user() not implemented") + request_url = "/%s/registerNewUser" % (self.tutorius_api) + + params = {'nickname': user_info['nickname'], 'password': user_info['password'], 'email': user_info['email']} + + response = self.conn.request_post(request_url, params) + + if self.helper.iserror(response): + return False + + return True + + +class StoreProxyHelper(object): + """ + Implements helper methods for the Store, more specifically + methods to handle xml responses and errors + """ + def iserror(self, response): + """ + Check if the response received from the server is an error + + @param response The XML response from the server + @return True if the response is an error + """ + + # first look for HTTP errors + http_status = response['headers']['status'] + + if http_status in ['400', '401', '403', '500' ]: + return True + + # Now check if the response is valid XML + try: + minidom.parseString(response['body']) + except Exception, e: + return True + + # The response is valid XML, parse it and look for + # an error in xml format + xml_response = minidom.parseString(response['body']) + + errors = xml_response.getElementsByTagName('error') + + if (len(errors) > 0): + return True + + return False + + def parse_tutorial(self, xml_tutorial): + """ + Parse a tutorial's XML metadata and returns a dictionnary + containing the metadata + + @param xml_tutorial The tutorial metadata in XML format + @return A dictionnary containing the metadata + """ + tutorial = {} + + params = [ + 'name', + 'summary', + 'version', + 'description', + 'author', + 'rating' + ] + + for param in params: + xml_node = xml_tutorial.getElementsByTagName(param)[0].firstChild + + if xml_node != None: + tutorial[param] = xml_node.nodeValue + else: + tutorial[param] = '' + + return tutorial diff --git a/tutorius/tutorial.py b/tutorius/tutorial.py index 9831a7b..b45363f 100644 --- a/tutorius/tutorial.py +++ b/tutorius/tutorial.py @@ -67,7 +67,8 @@ class Tutorial(object): self.add_transition(Tutorial.INIT, \ (AutomaticTransitionEvent(), Tutorial.END)) else: - raise NotImplementedError("Tutorial: Initilization from a dictionary is not supported yet") + self._state_dict = state_dict + # Minimally check for the presence of an INIT and an END @@ -528,15 +529,20 @@ class Tutorial(object): def _generate_unique_state_name(self): name = "State" + str(self._state_name_nb) - self._state_name_nb += 1 + while name in self._state_dict: + self._state_name_nb += 1 + name = "State" + str(self._state_name_nb) return name + # Python Magic Methods def __str__(self): """ Return a string representation of the tutorial """ return str(self._state_dict) + def __eq__(self, other): + return isinstance(other, type(self)) and self.get_state_dict() == other.get_state_dict() class State(object): """ @@ -548,16 +554,20 @@ class State(object): inputs, the validation should be done by the containing class. """ - def __init__(self, name, action_list=(), transition_list=()): + def __init__(self, name, actions={}, transitions={}): """ Initializes the content of the state, such as loading the actions that are required and building the correct transitions. - @param action_list The list of actions to execute when entering this + @param actions list or dict of actions to perform when entering the state - @param transition_list A list of tuples of the form + @param transitions list or dict of tuples of the form (event, next_state_name), that explains the outgoing links for this state + + For actions and transitions, dictionaries allow specifying the name. + If lists are given, their contents will be added with add_action or + add_transition """ object.__init__(self) @@ -567,13 +577,19 @@ class State(object): self.action_name_nb = 0 self.transition_name_nb = 0 - self._actions = {} - for action in action_list: - self.add_action(action) - - self._transitions = {} - for transition in transition_list: - self.add_transition(transition) + if type(actions) is dict: + self._actions = dict(actions) + else: + self._actions = {} + for action in actions: + self.add_action(action) + + if type(transitions) is dict: + self._transitions = dict(transitions) + else: + self._transitions = {} + for transition in transitions: + self.add_transition(transition) # Action manipulations @@ -741,7 +757,9 @@ class State(object): # to make it easier to debug and know what we are # manipulating name = self.name + _NAME_SEPARATOR + "action" + str(self.action_name_nb) - self.action_name_nb += 1 + while name in self._actions: + self.action_name_nb += 1 + name = self.name + _NAME_SEPARATOR + "action" + str(self.action_name_nb) return name def _generate_unique_transition_name(self, transition): @@ -757,7 +775,9 @@ class State(object): # generate a name to make it easier to debug and know # what we are manipulating name = self.name + _NAME_SEPARATOR + "transition" + str(self.transition_name_nb) - self.transition_name_nb += 1 + while name in self._transitions: + self.transition_name_nb += 1 + name = self.name + _NAME_SEPARATOR + "transition" + str(self.transition_name_nb) return name def __eq__(self, otherState): @@ -775,12 +795,15 @@ class State(object): @param otherState The state that will be compared to this one @return True if the states are the same, False otherwise ` """ - raise NotImplementedError + return isinstance(otherState, type(self)) and \ + self.get_action_dict() == otherState.get_action_dict() and \ + self.get_transition_dict() == otherState.get_transition_dict() #TODO: Define the automatic transition in the same way as # other events class AutomaticTransitionEvent(TPropContainer): - pass + def __repr__(self): + return str(self.__class__.__name__) ################## Error Handling and Exceptions ############################## diff --git a/tutorius/vault.py b/tutorius/vault.py index b455a52..7ec0a23 100644 --- a/tutorius/vault.py +++ b/tutorius/vault.py @@ -31,7 +31,7 @@ import zipfile from ConfigParser import SafeConfigParser from . import addon -from .core import Tutorial, State, FiniteStateMachine +from .tutorial import Tutorial, State, AutomaticTransitionEvent logger = logging.getLogger("tutorius") @@ -58,10 +58,23 @@ INI_XML_FSM_PROPERTY = "fsm_filename" INI_VERSION_PROPERTY = 'version' INI_FILENAME = "meta.ini" TUTORIAL_FILENAME = "tutorial.xml" +RESOURCES_FOLDER = 'resources' + +###################################################################### +# XML Tag names and attributes +###################################################################### +ELEM_FSM = "FSM" +ELEM_STATES = "States" +ELEM_STATE = "State" +ELEM_ACTIONS = "Actions" +ELEM_TRANS = "Transitions" +ELEM_AUTOTRANS = "AutomaticTransition" NODE_COMPONENT = "Component" NODE_SUBCOMPONENT = "property" NODE_SUBCOMPONENTLIST = "listproperty" -NEXT_STATE_ATTR = "next_state" +NAME_ATTR = "__name__" +NEXT_STATE_ATTR = "__next_state__" +START_STATE_ATTR = "__start_state__" class Vault(object): @@ -73,7 +86,7 @@ class Vault(object): given activity. @param activity_name the name of the activity associated with this tutorial. None means ALL activities - @param activity_vers the version number of the activity to find tutorail for. 0 means find for ANY version. If activity_name is None, version number is not used + @param activity_vers the version number of the activity to find tutorial for. 0 means find for ANY version. If activity_name is None, version number is not used @returns a map of tutorial {names : GUID}. """ # check both under the activity data and user installed folders @@ -237,12 +250,14 @@ class Vault(object): # Return tutorial list return tutorial_list + @staticmethod def loadTutorial(Guid): """ Creates an executable version of a tutorial from its saved representation. - @returns an executable representation of a tutorial + @param Guid Unique identifier of the tutorial + @returns Tutorial object """ bundle = TutorialBundler(Guid) @@ -253,15 +268,20 @@ class Vault(object): serializer = XMLSerializer() name = config.get(INI_METADATA_SECTION, INI_NAME_PROPERTY) - fsm = serializer.load_fsm(Guid, bundle_path) - tuto = Tutorial(name, fsm) - return tuto + # Open the XML file + tutorial_file = os.path.join(bundle_path, TUTORIAL_FILENAME) + with open(tutorial_file, 'r') as tfile: + tutorial = serializer.load_tutorial(tfile) + + return tutorial @staticmethod def saveTutorial(tutorial, metadata_dict): """ Creates a persistent version of a tutorial in the Vault. + @param tutorial Tutorial + @param metadata_dict dictionary of metadata for the Tutorial @returns true if the tutorial was saved correctly """ @@ -275,7 +295,9 @@ class Vault(object): # Serialize the tutorial and write it to disk xml_ser = XMLSerializer() os.makedirs(tutorial_path) - xml_ser.save_fsm(tutorial.state_machine, TUTORIAL_FILENAME, tutorial_path) + + with open(os.path.join(tutorial_path, TUTORIAL_FILENAME), 'w') as fsmfile: + xml_ser.save_tutorial(tutorial, fsmfile) # Create the metadata file ini_file_path = os.path.join(tutorial_path, "meta.ini") @@ -303,7 +325,7 @@ class Vault(object): @staticmethod - def deleteTutorial(Tutorial): + def deleteTutorial(Guid): """ Removes the tutorial from the Vault. It will unpublish the tutorial if need be, and it will also wipe it from the persistent storage. @@ -321,13 +343,82 @@ class Vault(object): return False + @staticmethod + def add_resource(tutorial_guid, file_path): + """ + Add given resource file in the vault and returns a unique name for this resource + composed from the original name of the file and a suffix to make it unique + ( ex: name_1.jpg ). + @param tutorial_guid The guid of the tutorial + @param file_path the file path of the resource to add + @returns the resource_id of the resource + """ + assert os.path.isfile(file_path) + # Get the tutorial path + bundler = TutorialBundler(tutorial_guid) + tutorial_path = bundler.get_tutorial_path(tutorial_guid) + # Get the file name + file_name = os.path.basename(file_path) + #fname_splitted = file_path.rsplit('/') + #file_name = fname_splitted[fname_splitted.__len__() - 1] + base_name, extension = os.path.splitext(file_name) + # Append unique name to file name + file_name_appended = base_name + '_' + str(uuid.uuid1()) + extension + # Check if the resource file already exists + new_file_path = os.path.join(tutorial_path, RESOURCES_FOLDER, file_name_appended) + if os.path.isfile(new_file_path) == False: + # Copy the resource file in the vault + if os.path.isdir(os.path.join(tutorial_path, RESOURCES_FOLDER)) == False: + os.makedirs(os.path.join(tutorial_path, RESOURCES_FOLDER)) + shutil.copyfile(file_path, new_file_path) + + return file_name_appended + + + @staticmethod + def delete_resource(tutorial_guid, resource_id): + """ + Delete the resource from the resources of the tutorial. + @param tutorial_guid the guid of the tutorial + @param resource_id the resource id of the resource to delete + """ + # Get the tutorial path + bundler = TutorialBundler(tutorial_guid) + tutorial_path = bundler.get_tutorial_path(tutorial_guid) + # Check if the resource file exists + file_path = os.path.join(tutorial_path, RESOURCES_FOLDER, resource_id) + if os.path.isfile(file_path): + # Delete the resource + os.remove(file_path) + else: + logging.info('File not found, no delete took place') + + @staticmethod + def get_resource_path(tutorial_guid, resource_id): + """ + Returns the absolute file path to the resourceID + @param tutorial_guid the guid of the tutorial + @param resource_id the resource id of the resource to find the path for + @returns the absolute path of the resource file + """ + # Get the tutorial path + bundler = TutorialBundler(tutorial_guid) + tutorial_path = bundler.get_tutorial_path(tutorial_guid) + # Check if the resource file exists + file_path = os.path.join(tutorial_path, RESOURCES_FOLDER, resource_id) + if os.path.isfile(file_path): + return file_path + else: + return None + + class Serializer(object): """ Interface that provide serializing and deserializing of the FSM used in the tutorials to/from disk. Must be inherited. """ - def save_fsm(self,fsm): + def save_tutorial(self,fsm): """ Save fsm to disk. If a GUID parameter is provided, the existing GUID is located in the .ini files in the store root and bundle root and @@ -337,7 +428,7 @@ class Serializer(object): """ raise NotImplementedError() - def load_fsm(self): + def load_tutorial(self): """ Load fsm from disk. """ @@ -348,21 +439,26 @@ class XMLSerializer(Serializer): Class that provide serializing and deserializing of the FSM used in the tutorials to/from a .xml file. Inherit from Serializer """ - - def _create_state_dict_node(self, state_dict, doc): + + @classmethod + def _create_state_dict_node(cls, state_dict, doc): """ Create and return a xml Node from a State dictionnary. + @param state_dict dictionary of State objects + @param doc The XML document root (used to create nodes only + @return xml Element containing the states """ - statesList = doc.createElement("States") + statesList = doc.createElement(ELEM_STATES) for state_name, state in state_dict.items(): - stateNode = doc.createElement("State") + stateNode = doc.createElement(ELEM_STATE) statesList.appendChild(stateNode) stateNode.setAttribute("Name", state_name) - actionsList = stateNode.appendChild(self._create_action_list_node(state.get_action_list(), doc)) - eventfiltersList = stateNode.appendChild(self._create_event_filters_node(state.get_event_filter_list(), doc)) + actionsList = stateNode.appendChild(cls._create_action_list_node(state.get_action_dict(), doc)) + transitionsList = stateNode.appendChild(cls._create_transitions_node(state.get_transition_dict(), doc)) return statesList - - def _create_addon_component_node(self, parent_attr_name, comp, doc): + + @classmethod + def _create_addon_component_node(cls, parent_attr_name, comp, doc): """ Takes a component that is embedded in another component (e.g. the content of a OnceWrapper) and encapsulate it in a node with the property name. @@ -389,13 +485,14 @@ class XMLSerializer(Serializer): subCompNode = doc.createElement(NODE_SUBCOMPONENT) subCompNode.setAttribute("name", parent_attr_name) - subNode = self._create_component_node(comp, doc) + subNode = cls._create_component_node(comp, doc) subCompNode.appendChild(subNode) return subCompNode - def _create_addonlist_component_node(self, parent_attr_name, comp_list, doc): + @classmethod + def _create_addonlist_component_node(cls, parent_attr_name, comp_list, doc): """ Takes a list of components that are embedded in another component (ex. the content of a ChainAction) and encapsulate them in a node with the property @@ -422,12 +519,13 @@ class XMLSerializer(Serializer): subCompListNode.setAttribute("name", parent_attr_name) for comp in comp_list: - compNode = self._create_component_node(comp, doc) + compNode = cls._create_component_node(comp, doc) subCompListNode.appendChild(compNode) return subCompListNode - def _create_component_node(self, comp, doc): + @classmethod + def _create_component_node(cls, comp, doc): """ Takes a single component (action or eventfilter) and transforms it into a xml node. @@ -446,68 +544,86 @@ class XMLSerializer(Serializer): for propname in comp.get_properties(): propval = getattr(comp, propname) if getattr(type(comp), propname).type == "addonlist": - compNode.appendChild(self._create_addonlist_component_node(propname, propval, doc)) + compNode.appendChild(cls._create_addonlist_component_node(propname, propval, doc)) elif getattr(type(comp), propname).type == "addon": #import rpdb2; rpdb2.start_embedded_debugger('pass') - compNode.appendChild(self._create_addon_component_node(propname, propval, doc)) + compNode.appendChild(cls._create_addon_component_node(propname, propval, doc)) else: # repr instead of str, as we want to be able to eval() it into a # valid object. compNode.setAttribute(propname, repr(propval)) return compNode - - def _create_action_list_node(self, action_list, doc): + + @classmethod + def _create_action_list_node(cls, action_dict, doc): """ Create and return a xml Node from a Action list. - @param action_list A list of actions + @param action_dict Dictionary of actions with names as keys @param doc The XML document root (used to create new nodes only) @return A XML Node object with the Actions tag name and a serie of Action children """ - actionsList = doc.createElement("Actions") - for action in action_list: + actionsList = doc.createElement(ELEM_ACTIONS) + for name, action in action_dict.items(): # Create the action node - actionNode = self._create_component_node(action, doc) + actionNode = cls._create_component_node(action, doc) + actionNode.setAttribute(NAME_ATTR, name) # Append it to the list actionsList.appendChild(actionNode) return actionsList - - def _create_event_filters_node(self, event_filters, doc): - """ - Create and return a xml Node from an event filters. + + @classmethod + def _create_transitions_node(cls, transition_dict, doc): """ - eventFiltersList = doc.createElement("EventFiltersList") - for event, state in event_filters: - eventFilterNode = self._create_component_node(event, doc) - eventFilterNode.setAttribute(NEXT_STATE_ATTR, str(state)) + Create and return a xml Node from a transition dictionary. + @param transition_dict dictionary of (event, next_state) transitions. + @param doc The XML document root (used to create nodes only + @return xml Element containing the transitions + """ + eventFiltersList = doc.createElement(ELEM_TRANS) + for transition_name, (event, end_state) in transition_dict.items(): + #start_state = transition_name.split(Tutorial._NAME_SEPARATOR)[0] + #XXX The addon is not in the cache and cannot be loaded so we + # store it differently for now + if type(event) == AutomaticTransitionEvent: + eventFilterNode = doc.createElement(ELEM_AUTOTRANS) + else: + eventFilterNode = cls._create_component_node(event, doc) + #eventFilterNode.setAttribute(START_STATE_ATTR, unicode(start_state)) + eventFilterNode.setAttribute(NEXT_STATE_ATTR, unicode(end_state)) + eventFilterNode.setAttribute(NAME_ATTR, transition_name) eventFiltersList.appendChild(eventFilterNode) return eventFiltersList - def save_fsm(self, fsm, xml_filename, path): + @classmethod + def save_tutorial(cls, fsm, file_obj): """ - Save fsm to disk, in the xml file specified by "xml_filename", in the - "path" folder. If the specified file doesn't exist, it will be created. + Save fsm to file + + @param fsm Tutorial to save + @param file_obj file-like object in which the serialized fsm is saved + + Side effects: + A serialized version of the Tutorial is written to file_obj. + The file is not closed automatically. """ - self.doc = doc = xml.dom.minidom.Document() - fsm_element = doc.createElement("FSM") + doc = xml.dom.minidom.Document() + fsm_element = doc.createElement(ELEM_FSM) doc.appendChild(fsm_element) + fsm_element.setAttribute("Name", fsm.name) - fsm_element.setAttribute("StartStateName", fsm.start_state_name) - statesDict = fsm_element.appendChild(self._create_state_dict_node(fsm._states, doc)) - - fsm_actions_node = self._create_action_list_node(fsm.actions, doc) - fsm_actions_node.tagName = "FSMActions" - actionsList = fsm_element.appendChild(fsm_actions_node) - - file_object = open(os.path.join(path, xml_filename), "w") - file_object.write(doc.toprettyxml()) - file_object.close() - def _get_direct_descendants_by_tag_name(self, node, name): + states = cls._create_state_dict_node(fsm.get_state_dict(), doc) + fsm_element.appendChild(states) + + file_obj.write(doc.toprettyxml()) + + @classmethod + def _get_direct_descendants_by_tag_name(cls, node, name): """ Searches in the list of direct descendants of a node to find all the node that have the given name. @@ -528,40 +644,63 @@ class XMLSerializer(Serializer): return_list.append(childNode) return return_list - -## def _load_xml_properties(self, properties_elem): -## """ -## Changes a list of properties into fully instanciated properties. -## -## @param properties_elem An XML element reprensenting a list of -## properties -## """ -## return [] - - def _load_xml_event_filters(self, filters_elem): + @classmethod + def _load_xml_transitions(cls, filters_elem): """ Loads up a list of Event Filters. @param filters_elem An XML Element representing a list of event filters + @return dict of (event, next_state) transitions, keyed by name """ - transition_list = [] - event_filter_element_list = self._get_direct_descendants_by_tag_name(filters_elem, NODE_COMPONENT) - new_event_filter = None + transition_dict = {} + + #Retrieve normal transitions + transition_element_list = cls._get_direct_descendants_by_tag_name(filters_elem, NODE_COMPONENT) + new_transition = None - for event_filter in event_filter_element_list: - next_state = event_filter.getAttribute(NEXT_STATE_ATTR) + for transition in transition_element_list: + #start_state = transition.getAttribute(START_STATE_ATTR) + next_state = transition.getAttribute(NEXT_STATE_ATTR) + transition_name = transition.getAttribute(NAME_ATTR) + try: + #The attributes must be removed so that they are not + # viewed as a property in load_xml_component + # transition.removeAttribute(START_STATE_ATTR) + transition.removeAttribute(NEXT_STATE_ATTR) + transition.removeAttribute(NAME_ATTR) + except NotFoundErr: + continue + + new_transition = cls._load_xml_component(transition) + + if new_transition is not None: + transition_dict[transition_name] = (new_transition, next_state) + + #Retrieve automatic transitions + # XXX This is done differently as the AutomaticTransitionEvent + # cannot be loaded dynamically (yet?) + transition_element_list = cls._get_direct_descendants_by_tag_name(filters_elem, ELEM_AUTOTRANS) + new_transition = None + + for transition in transition_element_list: + #start_state = transition.getAttribute(START_STATE_ATTR) + next_state = transition.getAttribute(NEXT_STATE_ATTR) + transition_name = transition.getAttribute(NAME_ATTR) try: - event_filter.removeAttribute(NEXT_STATE_ATTR) + #The attributes must be removed so that they are not + # viewed as a property in load_xml_component + # transition.removeAttribute(START_STATE_ATTR) + transition.removeAttribute(NEXT_STATE_ATTR) + transition.removeAttribute(NAME_ATTR) except NotFoundErr: - next_state = None - new_event_filter = self._load_xml_component(event_filter) + continue - if new_event_filter is not None: - transition_list.append((new_event_filter, next_state)) + transition_dict[transition_name] = (AutomaticTransitionEvent(), next_state) - return transition_list - - def _load_xml_subcomponents(self, node, properties): + return transition_dict + + @classmethod + def _load_xml_subcomponents(cls, node, properties): """ Loads all the subcomponent node below the given node and inserts them with the right property name inside the properties dictionnary. @@ -571,15 +710,16 @@ class XMLSerializer(Serializer): and the instantiated components will be stored @returns Nothing. The properties dict will contain the property->comp mapping. """ - subCompList = self._get_direct_descendants_by_tag_name(node, NODE_SUBCOMPONENT) + subCompList = cls._get_direct_descendants_by_tag_name(node, NODE_SUBCOMPONENT) for subComp in subCompList: property_name = subComp.getAttribute("name") - internal_comp_node = self._get_direct_descendants_by_tag_name(subComp, NODE_COMPONENT)[0] - internal_comp = self._load_xml_component(internal_comp_node) + internal_comp_node = cls._get_direct_descendants_by_tag_name(subComp, NODE_COMPONENT)[0] + internal_comp = cls._load_xml_component(internal_comp_node) properties[str(property_name)] = internal_comp - def _load_xml_subcomponent_lists(self, node, properties): + @classmethod + def _load_xml_subcomponent_lists(cls, node, properties): """ Loads all the subcomponent lists below the given node and stores them under the correct property name for that node. @@ -588,16 +728,17 @@ class XMLSerializer(Serializer): @param properties The dictionnary that will contain the mapping of prop->subCompList @returns Nothing. The values are returns inside the properties dict. """ - listOf_subCompListNode = self._get_direct_descendants_by_tag_name(node, NODE_SUBCOMPONENTLIST) + listOf_subCompListNode = cls._get_direct_descendants_by_tag_name(node, NODE_SUBCOMPONENTLIST) for subCompListNode in listOf_subCompListNode: property_name = subCompListNode.getAttribute("name") subCompList = [] - for subCompNode in self._get_direct_descendants_by_tag_name(subCompListNode, NODE_COMPONENT): - subComp = self._load_xml_component(subCompNode) + for subCompNode in cls._get_direct_descendants_by_tag_name(subCompListNode, NODE_COMPONENT): + subComp = cls._load_xml_component(subCompNode) subCompList.append(subComp) properties[str(property_name)] = subCompList - def _load_xml_component(self, node): + @classmethod + def _load_xml_component(cls, node): """ Loads a single addon component instance from an Xml node. @@ -616,8 +757,8 @@ class XMLSerializer(Serializer): properties[str(prop)] = eval(node.getAttribute(prop)) # Read the complex attributes - self._load_xml_subcomponents(node, properties) - self._load_xml_subcomponent_lists(node, properties) + cls._load_xml_subcomponents(node, properties) + cls._load_xml_subcomponent_lists(node, properties) new_action = addon.create(class_name, **properties) @@ -625,99 +766,88 @@ class XMLSerializer(Serializer): return None return new_action - - def _load_xml_actions(self, actions_elem): + + @classmethod + def _load_xml_actions(cls, actions_elem): """ - Transforms an Actions element into a list of instanciated Action. + Transforms an Actions element into a dict of instanciated Action. @param actions_elem An XML Element representing a list of Actions + @return dictionary of actions keyed by name """ - reformed_actions_list = [] - actions_element_list = self._get_direct_descendants_by_tag_name(actions_elem, NODE_COMPONENT) + action_dict = {} + actions_element_list = cls._get_direct_descendants_by_tag_name(actions_elem, NODE_COMPONENT) for action in actions_element_list: - new_action = self._load_xml_component(action) + action_name = action.getAttribute(NAME_ATTR) + try: + #The name attribute must be removed so that it is not + # viewed as a property in load_xml_component + action.removeAttribute(NAME_ATTR) + except NotFoundErr: + continue + new_action = cls._load_xml_component(action) - reformed_actions_list.append(new_action) + action_dict[action_name] = new_action - return reformed_actions_list - - def _load_xml_states(self, states_elem): + return action_dict + + @classmethod + def _load_xml_states(cls, states_elem): """ Takes in a States element and fleshes out a complete list of State objects. @param states_elem An XML Element that represents a list of States + @return dictionary of States """ - reformed_state_list = [] + state_dict = {} # item(0) because there is always only one <States> tag in the xml file # so states_elem should always contain only one element - states_element_list = states_elem.item(0).getElementsByTagName("State") + states_element_list = states_elem.item(0).getElementsByTagName(ELEM_STATE) for state in states_element_list: stateName = state.getAttribute("Name") # Using item 0 in the list because there is always only one # Actions and EventFilterList element per State node. - actions_list = self._load_xml_actions(state.getElementsByTagName("Actions")[0]) - event_filters_list = self._load_xml_event_filters(state.getElementsByTagName("EventFiltersList")[0]) - reformed_state_list.append(State(stateName, actions_list, event_filters_list)) + actions_list = cls._load_xml_actions(state.getElementsByTagName(ELEM_ACTIONS)[0]) + transitions_list = cls._load_xml_transitions(state.getElementsByTagName(ELEM_TRANS)[0]) + + state_dict[stateName] = State(stateName, actions_list, transitions_list) - return reformed_state_list + return state_dict - def load_xml_fsm(self, fsm_elem): + @classmethod + def load_xml_tutorial(cls, fsm_elem): """ Takes in an XML element representing an FSM and returns the fully crafted FSM. @param fsm_elem The XML element that describes a FSM + @return Tutorial loaded from xml element """ # Load the FSM's name and start state's name fsm_name = fsm_elem.getAttribute("Name") - fsm_start_state_name = None - try: - fsm_start_state_name = fsm_elem.getAttribute("StartStateName") - except: - pass - - fsm = FiniteStateMachine(fsm_name, start_state_name=fsm_start_state_name) - # Load the states - states = self._load_xml_states(fsm_elem.getElementsByTagName("States")) - for state in states: - fsm.add_state(state) - - # Load the actions on this FSM - actions = self._load_xml_actions(fsm_elem.getElementsByTagName("FSMActions")[0]) - for action in actions: - fsm.add_action(action) - - # Load the event filters - events = self._load_xml_event_filters(fsm_elem.getElementsByTagName("EventFiltersList")[0]) - for event, next_state in events: - fsm.add_event_filter(event, next_state) - - return fsm + states_dict = cls._load_xml_states(fsm_elem.getElementsByTagName(ELEM_STATES)) + fsm = Tutorial(fsm_name, states_dict) - - def load_fsm(self, guid, path=None): + return fsm + + @classmethod + def load_tutorial(cls, tutorial_file): """ - Load fsm from xml file whose .ini file guid match argument guid. + Load fsm from xml file + @param tutorial_file file-like object to read the fsm from + @return Tutorial object that was loaded from the file """ - # Fetch the directory (if any) - bundler = TutorialBundler(guid) - tutorial_dir = bundler.get_tutorial_path(guid) - - # Open the XML file - tutorial_file = os.path.join(tutorial_dir, TUTORIAL_FILENAME) - xml_dom = xml.dom.minidom.parse(tutorial_file) - fsm_elem = xml_dom.getElementsByTagName("FSM")[0] - - return self.load_xml_fsm(fsm_elem) + fsm_elem = xml_dom.getElementsByTagName(ELEM_FSM)[0] - + return cls.load_xml_tutorial(fsm_elem) + class TutorialBundler(object): """ This class provide the various data handling methods useable by the tutorial @@ -850,11 +980,11 @@ class TutorialBundler(object): path = os.path.join(self.Path, "meta.ini") config.read(path) xml_filename = config.get(INI_METADATA_SECTION, INI_XML_FSM_PROPERTY) - serializer.save_fsm(fsm, xml_filename, self.Path) + serializer.save_tutorial(fsm, xml_filename, self.Path) @staticmethod def add_resources(typename, file): """ - Add ressources to metadata. + Add resources to metadata. """ raise NotImplementedError("add_resources not implemented") |