From 897113f89fc2fef9cf3ca1da834f104c861382c6 Mon Sep 17 00:00:00 2001 From: Benoit Tremblay Date: Thu, 29 Oct 2009 14:15:22 +0000 Subject: Changed to the store and added the api library --- (limited to 'tutorius') diff --git a/tutorius/apilib/__init__.py b/tutorius/apilib/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/tutorius/apilib/__init__.py diff --git a/tutorius/apilib/__init__.pyc b/tutorius/apilib/__init__.pyc new file mode 100644 index 0000000..5ccf259 --- /dev/null +++ b/tutorius/apilib/__init__.pyc Binary files differ diff --git a/tutorius/apilib/httplib2/.svn/all-wcprops b/tutorius/apilib/httplib2/.svn/all-wcprops new file mode 100644 index 0000000..601feb8 --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/all-wcprops @@ -0,0 +1,29 @@ +K 25 +svn:wc:ra_dav:version-url +V 30 +/svn/!svn/ver/2/trunk/httplib2 +END +iri2uri.pyc +K 25 +svn:wc:ra_dav:version-url +V 42 +/svn/!svn/ver/2/trunk/httplib2/iri2uri.pyc +END +__init__.py +K 25 +svn:wc:ra_dav:version-url +V 42 +/svn/!svn/ver/2/trunk/httplib2/__init__.py +END +__init__.pyc +K 25 +svn:wc:ra_dav:version-url +V 43 +/svn/!svn/ver/2/trunk/httplib2/__init__.pyc +END +iri2uri.py +K 25 +svn:wc:ra_dav:version-url +V 41 +/svn/!svn/ver/2/trunk/httplib2/iri2uri.py +END diff --git a/tutorius/apilib/httplib2/.svn/entries b/tutorius/apilib/httplib2/.svn/entries new file mode 100644 index 0000000..1a3c5d2 --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/entries @@ -0,0 +1,66 @@ +8 + +dir +2 +https://python-rest-client.googlecode.com/svn/trunk/httplib2 +https://python-rest-client.googlecode.com/svn + + + +2008-05-14T17:00:19.245332Z +2 +bosteen + + +svn:special svn:externals svn:needs-lock + +iri2uri.pyc +file + + + + +2008-05-14T14:48:03.000000Z +6f9a0833a6dc59c42b7aec0dfdf39dd0 +2008-05-14T17:00:19.245332Z +2 +bosteen +has-props + +__init__.py +file + + + + +2007-10-23T15:25:46.000000Z +00c607566b698248d5a5c40508843cd7 +2008-05-14T17:00:19.245332Z +2 +bosteen + +__init__.pyc +file + + + + +2008-05-14T14:48:03.000000Z +bdf8607edad61c67d890de558db8006c +2008-05-14T17:00:19.245332Z +2 +bosteen +has-props + +iri2uri.py +file + + + + +2007-09-04T04:02:06.000000Z +c0f9c5cb229a22e21575322b4ba77741 +2008-05-14T17:00:19.245332Z +2 +bosteen + diff --git a/tutorius/apilib/httplib2/.svn/format b/tutorius/apilib/httplib2/.svn/format new file mode 100644 index 0000000..45a4fb7 --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/format @@ -0,0 +1 @@ +8 diff --git a/tutorius/apilib/httplib2/.svn/prop-base/__init__.pyc.svn-base b/tutorius/apilib/httplib2/.svn/prop-base/__init__.pyc.svn-base new file mode 100644 index 0000000..5e9587e --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/prop-base/__init__.pyc.svn-base @@ -0,0 +1,5 @@ +K 13 +svn:mime-type +V 24 +application/octet-stream +END diff --git a/tutorius/apilib/httplib2/.svn/prop-base/iri2uri.pyc.svn-base b/tutorius/apilib/httplib2/.svn/prop-base/iri2uri.pyc.svn-base new file mode 100644 index 0000000..5e9587e --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/prop-base/iri2uri.pyc.svn-base @@ -0,0 +1,5 @@ +K 13 +svn:mime-type +V 24 +application/octet-stream +END diff --git a/tutorius/apilib/httplib2/.svn/text-base/__init__.py.svn-base b/tutorius/apilib/httplib2/.svn/text-base/__init__.py.svn-base new file mode 100644 index 0000000..982bf8a --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/text-base/__init__.py.svn-base @@ -0,0 +1,1123 @@ +from __future__ import generators +""" +httplib2 + +A caching http interface that supports ETags and gzip +to conserve bandwidth. + +Requires Python 2.3 or later + +Changelog: +2007-08-18, Rick: Modified so it's able to use a socks proxy if needed. + +""" + +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)", + "James Antill", + "Xavier Verges Farrero", + "Jonathan Feinberg", + "Blair Zajac", + "Sam Ruby", + "Louis Nyffenegger"] +__license__ = "MIT" +__version__ = "$Rev: 259 $" + +import re +import sys +import md5 +import email +import email.Utils +import email.Message +import StringIO +import gzip +import zlib +import httplib +import urlparse +import base64 +import os +import copy +import calendar +import time +import random +import sha +import hmac +from gettext import gettext as _ +import socket + +try: + import socks +except ImportError: + socks = None + +if sys.version_info >= (2,3): + from iri2uri import iri2uri +else: + def iri2uri(uri): + return uri + +__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error', + 'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent', + 'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError', + 'debuglevel'] + + +# The httplib debug level, set to a non-zero value to get debug output +debuglevel = 0 + +# Python 2.3 support +if sys.version_info < (2,4): + def sorted(seq): + seq.sort() + return seq + +# Python 2.3 support +def HTTPResponse__getheaders(self): + """Return list of (header, value) tuples.""" + if self.msg is None: + raise httplib.ResponseNotReady() + return self.msg.items() + +if not hasattr(httplib.HTTPResponse, 'getheaders'): + httplib.HTTPResponse.getheaders = HTTPResponse__getheaders + +# All exceptions raised here derive from HttpLib2Error +class HttpLib2Error(Exception): pass + +# Some exceptions can be caught and optionally +# be turned back into responses. +class HttpLib2ErrorWithResponse(HttpLib2Error): + def __init__(self, desc, response, content): + self.response = response + self.content = content + HttpLib2Error.__init__(self, desc) + +class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass +class RedirectLimit(HttpLib2ErrorWithResponse): pass +class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass +class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass +class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass + +class RelativeURIError(HttpLib2Error): pass +class ServerNotFoundError(HttpLib2Error): pass + +# Open Items: +# ----------- +# Proxy support + +# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) + +# Pluggable cache storage (supports storing the cache in +# flat files by default. We need a plug-in architecture +# that can support Berkeley DB and Squid) + +# == Known Issues == +# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. +# Does not handle Cache-Control: max-stale +# Does not use Age: headers when calculating cache freshness. + + +# The number of redirections to follow before giving up. +# Note that only GET redirects are automatically followed. +# Will also honor 301 requests by saving that info and never +# requesting that URI again. +DEFAULT_MAX_REDIRECTS = 5 + +# Which headers are hop-by-hop headers by default +HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] + +def _get_end2end_headers(response): + hopbyhop = list(HOP_BY_HOP) + hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')]) + return [header for header in response.keys() if header not in hopbyhop] + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + +def urlnorm(uri): + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) + authority = authority.lower() + scheme = scheme.lower() + if not path: + path = "/" + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + scheme = scheme.lower() + defrag_uri = scheme + "://" + authority + request_uri + return scheme, authority, request_uri, defrag_uri + + +# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) +re_url_scheme = re.compile(r'^\w+://') +re_slash = re.compile(r'[?/:|]+') + +def safename(filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + + try: + if re_url_scheme.match(filename): + if isinstance(filename,str): + filename = filename.decode('utf-8') + filename = filename.encode('idna') + else: + filename = filename.encode('idna') + except UnicodeError: + pass + if isinstance(filename,unicode): + filename=filename.encode('utf-8') + filemd5 = md5.new(filename).hexdigest() + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + + # limit length of filename + if len(filename)>200: + filename=filename[:200] + return ",".join((filename, filemd5)) + +NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+') +def _normalize_headers(headers): + return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()]) + +def _parse_cache_control(headers): + retval = {} + if headers.has_key('cache-control'): + parts = headers['cache-control'].split(',') + parts_with_args = [tuple([x.strip() for x in part.split("=")]) for part in parts if -1 != part.find("=")] + parts_wo_args = [(name.strip(), 1) for name in parts if -1 == name.find("=")] + retval = dict(parts_with_args + parts_wo_args) + return retval + +# Whether to use a strict mode to parse WWW-Authenticate headers +# Might lead to bad results in case of ill-formed header value, +# so disabled by default, falling back to relaxed parsing. +# Set to true to turn on, usefull for testing servers. +USE_WWW_AUTH_STRICT_PARSING = 0 + +# In regex below: +# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP +# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space +# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: +# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?@,;:\\\"/[\]?={} \t]+(?!\"))\"? +WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$") +WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(? current_age: + retval = "FRESH" + return retval + +def _decompressContent(response, new_content): + content = new_content + try: + encoding = response.get('content-encoding', None) + if encoding in ['gzip', 'deflate']: + if encoding == 'gzip': + content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() + if encoding == 'deflate': + content = zlib.decompress(content) + response['content-length'] = str(len(content)) + del response['content-encoding'] + except IOError: + content = "" + raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content) + return content + +def _updateCache(request_headers, response_headers, content, cache, cachekey): + if cachekey: + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + if cc.has_key('no-store') or cc_response.has_key('no-store'): + cache.delete(cachekey) + else: + info = email.Message.Message() + for key, value in response_headers.iteritems(): + if key not in ['status','content-encoding','transfer-encoding']: + info[key] = value + + status = response_headers.status + if status == 304: + status = 200 + + status_header = 'status: %d\r\n' % response_headers.status + + header_str = info.as_string() + + header_str = re.sub("\r(?!\n)|(? 0: + service = "cl" + # No point in guessing Base or Spreadsheet + #elif request_uri.find("spreadsheets") > 0: + # service = "wise" + + auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent']) + resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'}) + lines = content.split('\n') + d = dict([tuple(line.split("=", 1)) for line in lines if line]) + if resp.status == 403: + self.Auth = "" + else: + self.Auth = d['Auth'] + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['authorization'] = 'GoogleLogin Auth=' + self.Auth + + +AUTH_SCHEME_CLASSES = { + "basic": BasicAuthentication, + "wsse": WsseAuthentication, + "digest": DigestAuthentication, + "hmacdigest": HmacDigestAuthentication, + "googlelogin": GoogleLoginAuthentication +} + +AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] + +def _md5(s): + return + +class FileCache(object): + """Uses a local directory as a store for cached files. + Not really safe to use if multiple threads or processes are going to + be running on the same cache. + """ + def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior + self.cache = cache + self.safe = safe + if not os.path.exists(cache): + os.makedirs(self.cache) + + def get(self, key): + retval = None + cacheFullPath = os.path.join(self.cache, self.safe(key)) + try: + f = file(cacheFullPath, "r") + retval = f.read() + f.close() + except IOError: + pass + return retval + + def set(self, key, value): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + f = file(cacheFullPath, "w") + f.write(value) + f.close() + + def delete(self, key): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + if os.path.exists(cacheFullPath): + os.remove(cacheFullPath) + +class Credentials(object): + def __init__(self): + self.credentials = [] + + def add(self, name, password, domain=""): + self.credentials.append((domain.lower(), name, password)) + + def clear(self): + self.credentials = [] + + def iter(self, domain): + for (cdomain, name, password) in self.credentials: + if cdomain == "" or domain == cdomain: + yield (name, password) + +class KeyCerts(Credentials): + """Identical to Credentials except that + name/password are mapped to key/cert.""" + pass + + +class ProxyInfo(object): + """Collect information required to use a proxy.""" + def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None): + """The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX + constants. For example: + +p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000) + """ + self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass + + def astuple(self): + return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, + self.proxy_user, self.proxy_pass) + + def isgood(self): + return socks and (self.proxy_host != None) and (self.proxy_port != None) + + +class HTTPConnectionWithTimeout(httplib.HTTPConnection): + """HTTPConnection subclass that supports timeouts""" + + def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): + httplib.HTTPConnection.__init__(self, host, port, strict) + self.timeout = timeout + self.proxy_info = proxy_info + + def connect(self): + """Connect to the host and port specified in __init__.""" + # Mostly verbatim from httplib.py. + msg = "getaddrinfo returns an empty list" + for res in socket.getaddrinfo(self.host, self.port, 0, + socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + if self.proxy_info and self.proxy_info.isgood(): + self.sock = socks.socksocket(af, socktype, proto) + self.sock.setproxy(*self.proxy_info.astuple()) + else: + self.sock = socket.socket(af, socktype, proto) + # Different from httplib: support timeouts. + if self.timeout is not None: + self.sock.settimeout(self.timeout) + # End of difference from httplib. + if self.debuglevel > 0: + print "connect: (%s, %s)" % (self.host, self.port) + self.sock.connect(sa) + except socket.error, msg: + if self.debuglevel > 0: + print 'connect fail:', (self.host, self.port) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + +class HTTPSConnectionWithTimeout(httplib.HTTPSConnection): + "This class allows communication via SSL." + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=None, proxy_info=None): + self.timeout = timeout + self.proxy_info = proxy_info + httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file, + cert_file=cert_file, strict=strict) + + def connect(self): + "Connect to a host on a given (SSL) port." + + if self.proxy_info and self.proxy_info.isgood(): + self.sock.setproxy(*self.proxy_info.astuple()) + sock.setproxy(*self.proxy_info.astuple()) + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if self.timeout is not None: + sock.settimeout(self.timeout) + sock.connect((self.host, self.port)) + ssl = socket.ssl(sock, self.key_file, self.cert_file) + self.sock = httplib.FakeSocket(sock, ssl) + + + +class Http(object): + """An HTTP client that handles: +- all methods +- caching +- ETags +- compression, +- HTTPS +- Basic +- Digest +- WSSE + +and more. + """ + def __init__(self, cache=None, timeout=None, proxy_info=None): + """The value of proxy_info is a ProxyInfo instance. + +If 'cache' is a string then it is used as a directory name +for a disk cache. Otherwise it must be an object that supports +the same interface as FileCache.""" + self.proxy_info = proxy_info + # Map domain name to an httplib connection + self.connections = {} + # The location of the cache, for now a directory + # where cached responses are held. + if cache and isinstance(cache, str): + self.cache = FileCache(cache) + else: + self.cache = cache + + # Name/password + self.credentials = Credentials() + + # Key/cert + self.certificates = KeyCerts() + + # authorization objects + self.authorizations = [] + + # If set to False then no redirects are followed, even safe ones. + self.follow_redirects = True + + # If 'follow_redirects' is True, and this is set to True then + # all redirecs are followed, including unsafe ones. + self.follow_all_redirects = False + + self.ignore_etag = False + + self.force_exception_to_status_code = False + + self.timeout = timeout + + def _auth_from_challenge(self, host, request_uri, headers, response, content): + """A generator that creates Authorization objects + that can be applied to requests. + """ + challenges = _parse_www_authenticate(response, 'www-authenticate') + for cred in self.credentials.iter(host): + for scheme in AUTH_SCHEME_ORDER: + if challenges.has_key(scheme): + yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) + + def add_credentials(self, name, password, domain=""): + """Add a name and password that will be used + any time a request requires authentication.""" + self.credentials.add(name, password, domain) + + def add_certificate(self, key, cert, domain): + """Add a key and cert that will be used + any time a request requires authentication.""" + self.certificates.add(key, cert, domain) + + def clear_credentials(self): + """Remove all the names and passwords + that are used for authentication""" + self.credentials.clear() + self.authorizations = [] + + def _conn_request(self, conn, request_uri, method, body, headers): + for i in range(2): + try: + conn.request(method, request_uri, body, headers) + response = conn.getresponse() + except socket.gaierror: + conn.close() + raise ServerNotFoundError("Unable to find the server at %s" % conn.host) + except httplib.HTTPException, e: + if i == 0: + conn.close() + conn.connect() + continue + else: + raise + else: + content = response.read() + response = Response(response) + if method != "HEAD": + content = _decompressContent(response, content) + + break; + return (response, content) + + + def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey): + """Do the actual request using the connection object + and also follow one level of redirects if necessary""" + + auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] + auth = auths and sorted(auths)[0][1] or None + if auth: + auth.request(method, request_uri, headers, body) + + (response, content) = self._conn_request(conn, request_uri, method, body, headers) + + if auth: + if auth.response(response, body): + auth.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers ) + response._stale_digest = 1 + + if response.status == 401: + for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): + authorization.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers, ) + if response.status != 401: + self.authorizations.append(authorization) + authorization.response(response, body) + break + + if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303): + if self.follow_redirects and response.status in [300, 301, 302, 303, 307]: + # Pick out the location header and basically start from the beginning + # remembering first to strip the ETag header and decrement our 'depth' + if redirections: + if not response.has_key('location') and response.status != 300: + raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content) + # Fix-up relative redirects (which violate an RFC 2616 MUST) + if response.has_key('location'): + location = response['location'] + (scheme, authority, path, query, fragment) = parse_uri(location) + if authority == None: + response['location'] = urlparse.urljoin(absolute_uri, location) + if response.status == 301 and method in ["GET", "HEAD"]: + response['-x-permanent-redirect-url'] = response['location'] + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + if headers.has_key('if-none-match'): + del headers['if-none-match'] + if headers.has_key('if-modified-since'): + del headers['if-modified-since'] + if response.has_key('location'): + location = response['location'] + old_response = copy.deepcopy(response) + if not old_response.has_key('content-location'): + old_response['content-location'] = absolute_uri + redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method + (response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1) + response.previous = old_response + else: + raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content) + elif response.status in [200, 203] and method == "GET": + # Don't cache 206's since we aren't going to handle byte range requests + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + + return (response, content) + + +# Need to catch and rebrand some exceptions +# Then need to optionally turn all exceptions into status codes +# including all socket.* and httplib.* exceptions. + + + def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None): + """ Performs a single HTTP request. +The 'uri' is the URI of the HTTP resource and can begin +with either 'http' or 'https'. The value of 'uri' must be an absolute URI. + +The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. +There is no restriction on the methods allowed. + +The 'body' is the entity body to be sent with the request. It is a string +object. + +Any extra headers that are to be sent with the request should be provided in the +'headers' dictionary. + +The maximum number of redirect to follow before raising an +exception is 'redirections. The default is 5. + +The return value is a tuple of (response, content), the first +being and instance of the 'Response' class, the second being +a string that contains the response entity body. + """ + try: + if headers is None: + headers = {} + else: + headers = _normalize_headers(headers) + + if not headers.has_key('user-agent'): + headers['user-agent'] = "Python-httplib2/%s" % __version__ + + uri = iri2uri(uri) + + (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) + + conn_key = scheme+":"+authority + if conn_key in self.connections: + conn = self.connections[conn_key] + else: + if not connection_type: + connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout + certs = list(self.certificates.iter(authority)) + if scheme == 'https' and certs: + conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0], + cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info) + else: + conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info) + conn.set_debuglevel(debuglevel) + + if method in ["GET", "HEAD"] and 'range' not in headers: + headers['accept-encoding'] = 'compress, gzip' + + info = email.Message.Message() + cached_value = None + if self.cache: + cachekey = defrag_uri + cached_value = self.cache.get(cachekey) + if cached_value: + info = email.message_from_string(cached_value) + try: + content = cached_value.split('\r\n\r\n', 1)[1] + except IndexError: + self.cache.delete(cachekey) + cachekey = None + cached_value = None + else: + cachekey = None + + if method in ["PUT"] and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers: + # http://www.w3.org/1999/04/Editing/ + headers['if-match'] = info['etag'] + + if method not in ["GET", "HEAD"] and self.cache and cachekey: + # RFC 2616 Section 13.10 + self.cache.delete(cachekey) + + if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers: + if info.has_key('-x-permanent-redirect-url'): + # Should cached permanent redirects be counted in our redirection count? For now, yes. + (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1) + response.previous = Response(info) + response.previous.fromcache = True + else: + # Determine our course of action: + # Is the cached entry fresh or stale? + # Has the client requested a non-cached response? + # + # There seems to be three possible answers: + # 1. [FRESH] Return the cache entry w/o doing a GET + # 2. [STALE] Do the GET (but add in cache validators if available) + # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request + entry_disposition = _entry_disposition(info, headers) + + if entry_disposition == "FRESH": + if not cached_value: + info['status'] = '504' + content = "" + response = Response(info) + if cached_value: + response.fromcache = True + return (response, content) + + if entry_disposition == "STALE": + if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers: + headers['if-none-match'] = info['etag'] + if info.has_key('last-modified') and not 'last-modified' in headers: + headers['if-modified-since'] = info['last-modified'] + elif entry_disposition == "TRANSPARENT": + pass + + (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + + if response.status == 304 and method == "GET": + # Rewrite the cache entry with the new end-to-end headers + # Take all headers that are in response + # and overwrite their values in info. + # unless they are hop-by-hop, or are listed in the connection header. + + for key in _get_end2end_headers(response): + info[key] = response[key] + merged_response = Response(info) + if hasattr(response, "_stale_digest"): + merged_response._stale_digest = response._stale_digest + _updateCache(headers, merged_response, content, self.cache, cachekey) + response = merged_response + response.status = 200 + response.fromcache = True + + elif response.status == 200: + content = new_content + else: + self.cache.delete(cachekey) + content = new_content + else: + (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + except Exception, e: + if self.force_exception_to_status_code: + if isinstance(e, HttpLib2ErrorWithResponse): + response = e.response + content = e.content + response.status = 500 + response.reason = str(e) + elif isinstance(e, socket.timeout): + content = "Request Timeout" + response = Response( { + "content-type": "text/plain", + "status": "408", + "content-length": len(content) + }) + response.reason = "Request Timeout" + else: + content = str(e) + response = Response( { + "content-type": "text/plain", + "status": "400", + "content-length": len(content) + }) + response.reason = "Bad Request" + else: + raise + + + return (response, content) + + + +class Response(dict): + """An object more like email.Message than httplib.HTTPResponse.""" + + """Is this response from our local cache""" + fromcache = False + + """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """ + version = 11 + + "Status code returned by server. " + status = 200 + + """Reason phrase returned by server.""" + reason = "Ok" + + previous = None + + def __init__(self, info): + # info is either an email.Message or + # an httplib.HTTPResponse object. + if isinstance(info, httplib.HTTPResponse): + for key, value in info.getheaders(): + self[key] = value + self.status = info.status + self['status'] = str(self.status) + self.reason = info.reason + self.version = info.version + elif isinstance(info, email.Message.Message): + for key, value in info.items(): + self[key] = value + self.status = int(self['status']) + else: + for key, value in info.iteritems(): + self[key] = value + self.status = int(self.get('status', self.status)) + + + def __getattr__(self, name): + if name == 'dict': + return self + else: + raise AttributeError, name diff --git a/tutorius/apilib/httplib2/.svn/text-base/__init__.pyc.svn-base b/tutorius/apilib/httplib2/.svn/text-base/__init__.pyc.svn-base new file mode 100644 index 0000000..f092204 --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/text-base/__init__.pyc.svn-base Binary files differ diff --git a/tutorius/apilib/httplib2/.svn/text-base/iri2uri.py.svn-base b/tutorius/apilib/httplib2/.svn/text-base/iri2uri.py.svn-base new file mode 100644 index 0000000..70667ed --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/text-base/iri2uri.py.svn-base @@ -0,0 +1,110 @@ +""" +iri2uri + +Converts an IRI to a URI. + +""" +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = [] +__version__ = "1.0.0" +__license__ = "MIT" +__history__ = """ +""" + +import urlparse + + +# Convert an IRI to a URI following the rules in RFC 3987 +# +# The characters we need to enocde and escape are defined in the spec: +# +# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD +# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF +# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD +# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD +# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD +# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD +# / %xD0000-DFFFD / %xE1000-EFFFD + +escape_range = [ + (0xA0, 0xD7FF ), + (0xE000, 0xF8FF ), + (0xF900, 0xFDCF ), + (0xFDF0, 0xFFEF), + (0x10000, 0x1FFFD ), + (0x20000, 0x2FFFD ), + (0x30000, 0x3FFFD), + (0x40000, 0x4FFFD ), + (0x50000, 0x5FFFD ), + (0x60000, 0x6FFFD), + (0x70000, 0x7FFFD ), + (0x80000, 0x8FFFD ), + (0x90000, 0x9FFFD), + (0xA0000, 0xAFFFD ), + (0xB0000, 0xBFFFD ), + (0xC0000, 0xCFFFD), + (0xD0000, 0xDFFFD ), + (0xE1000, 0xEFFFD), + (0xF0000, 0xFFFFD ), + (0x100000, 0x10FFFD) +] + +def encode(c): + retval = c + i = ord(c) + for low, high in escape_range: + if i < low: + break + if i >= low and i <= high: + retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')]) + break + return retval + + +def iri2uri(uri): + """Convert an IRI to a URI. Note that IRIs must be + passed in a unicode strings. That is, do not utf-8 encode + the IRI before passing it into the function.""" + if isinstance(uri ,unicode): + (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri) + authority = authority.encode('idna') + # For each character in 'ucschar' or 'iprivate' + # 1. encode as utf-8 + # 2. then %-encode each octet of that utf-8 + uri = urlparse.urlunsplit((scheme, authority, path, query, fragment)) + uri = "".join([encode(c) for c in uri]) + return uri + +if __name__ == "__main__": + import unittest + + class Test(unittest.TestCase): + + def test_uris(self): + """Test that URIs are invariant under the transformation.""" + invariant = [ + u"ftp://ftp.is.co.za/rfc/rfc1808.txt", + u"http://www.ietf.org/rfc/rfc2396.txt", + u"ldap://[2001:db8::7]/c=GB?objectClass?one", + u"mailto:John.Doe@example.com", + u"news:comp.infosystems.www.servers.unix", + u"tel:+1-816-555-1212", + u"telnet://192.0.2.16:80/", + u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ] + for uri in invariant: + self.assertEqual(uri, iri2uri(uri)) + + def test_iri(self): + """ Test that the right type of escaping is done for each part of the URI.""" + self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}")) + self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}")) + self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}")) + self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))) + self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8'))) + + unittest.main() + + diff --git a/tutorius/apilib/httplib2/.svn/text-base/iri2uri.pyc.svn-base b/tutorius/apilib/httplib2/.svn/text-base/iri2uri.pyc.svn-base new file mode 100644 index 0000000..e16a3db --- /dev/null +++ b/tutorius/apilib/httplib2/.svn/text-base/iri2uri.pyc.svn-base Binary files differ diff --git a/tutorius/apilib/httplib2/__init__.py b/tutorius/apilib/httplib2/__init__.py new file mode 100644 index 0000000..982bf8a --- /dev/null +++ b/tutorius/apilib/httplib2/__init__.py @@ -0,0 +1,1123 @@ +from __future__ import generators +""" +httplib2 + +A caching http interface that supports ETags and gzip +to conserve bandwidth. + +Requires Python 2.3 or later + +Changelog: +2007-08-18, Rick: Modified so it's able to use a socks proxy if needed. + +""" + +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)", + "James Antill", + "Xavier Verges Farrero", + "Jonathan Feinberg", + "Blair Zajac", + "Sam Ruby", + "Louis Nyffenegger"] +__license__ = "MIT" +__version__ = "$Rev: 259 $" + +import re +import sys +import md5 +import email +import email.Utils +import email.Message +import StringIO +import gzip +import zlib +import httplib +import urlparse +import base64 +import os +import copy +import calendar +import time +import random +import sha +import hmac +from gettext import gettext as _ +import socket + +try: + import socks +except ImportError: + socks = None + +if sys.version_info >= (2,3): + from iri2uri import iri2uri +else: + def iri2uri(uri): + return uri + +__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error', + 'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent', + 'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError', + 'debuglevel'] + + +# The httplib debug level, set to a non-zero value to get debug output +debuglevel = 0 + +# Python 2.3 support +if sys.version_info < (2,4): + def sorted(seq): + seq.sort() + return seq + +# Python 2.3 support +def HTTPResponse__getheaders(self): + """Return list of (header, value) tuples.""" + if self.msg is None: + raise httplib.ResponseNotReady() + return self.msg.items() + +if not hasattr(httplib.HTTPResponse, 'getheaders'): + httplib.HTTPResponse.getheaders = HTTPResponse__getheaders + +# All exceptions raised here derive from HttpLib2Error +class HttpLib2Error(Exception): pass + +# Some exceptions can be caught and optionally +# be turned back into responses. +class HttpLib2ErrorWithResponse(HttpLib2Error): + def __init__(self, desc, response, content): + self.response = response + self.content = content + HttpLib2Error.__init__(self, desc) + +class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass +class RedirectLimit(HttpLib2ErrorWithResponse): pass +class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass +class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass +class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass + +class RelativeURIError(HttpLib2Error): pass +class ServerNotFoundError(HttpLib2Error): pass + +# Open Items: +# ----------- +# Proxy support + +# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) + +# Pluggable cache storage (supports storing the cache in +# flat files by default. We need a plug-in architecture +# that can support Berkeley DB and Squid) + +# == Known Issues == +# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. +# Does not handle Cache-Control: max-stale +# Does not use Age: headers when calculating cache freshness. + + +# The number of redirections to follow before giving up. +# Note that only GET redirects are automatically followed. +# Will also honor 301 requests by saving that info and never +# requesting that URI again. +DEFAULT_MAX_REDIRECTS = 5 + +# Which headers are hop-by-hop headers by default +HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] + +def _get_end2end_headers(response): + hopbyhop = list(HOP_BY_HOP) + hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')]) + return [header for header in response.keys() if header not in hopbyhop] + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + +def urlnorm(uri): + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) + authority = authority.lower() + scheme = scheme.lower() + if not path: + path = "/" + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + scheme = scheme.lower() + defrag_uri = scheme + "://" + authority + request_uri + return scheme, authority, request_uri, defrag_uri + + +# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) +re_url_scheme = re.compile(r'^\w+://') +re_slash = re.compile(r'[?/:|]+') + +def safename(filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + + try: + if re_url_scheme.match(filename): + if isinstance(filename,str): + filename = filename.decode('utf-8') + filename = filename.encode('idna') + else: + filename = filename.encode('idna') + except UnicodeError: + pass + if isinstance(filename,unicode): + filename=filename.encode('utf-8') + filemd5 = md5.new(filename).hexdigest() + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + + # limit length of filename + if len(filename)>200: + filename=filename[:200] + return ",".join((filename, filemd5)) + +NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+') +def _normalize_headers(headers): + return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()]) + +def _parse_cache_control(headers): + retval = {} + if headers.has_key('cache-control'): + parts = headers['cache-control'].split(',') + parts_with_args = [tuple([x.strip() for x in part.split("=")]) for part in parts if -1 != part.find("=")] + parts_wo_args = [(name.strip(), 1) for name in parts if -1 == name.find("=")] + retval = dict(parts_with_args + parts_wo_args) + return retval + +# Whether to use a strict mode to parse WWW-Authenticate headers +# Might lead to bad results in case of ill-formed header value, +# so disabled by default, falling back to relaxed parsing. +# Set to true to turn on, usefull for testing servers. +USE_WWW_AUTH_STRICT_PARSING = 0 + +# In regex below: +# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP +# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space +# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: +# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?@,;:\\\"/[\]?={} \t]+(?!\"))\"? +WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$") +WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(? current_age: + retval = "FRESH" + return retval + +def _decompressContent(response, new_content): + content = new_content + try: + encoding = response.get('content-encoding', None) + if encoding in ['gzip', 'deflate']: + if encoding == 'gzip': + content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() + if encoding == 'deflate': + content = zlib.decompress(content) + response['content-length'] = str(len(content)) + del response['content-encoding'] + except IOError: + content = "" + raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content) + return content + +def _updateCache(request_headers, response_headers, content, cache, cachekey): + if cachekey: + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + if cc.has_key('no-store') or cc_response.has_key('no-store'): + cache.delete(cachekey) + else: + info = email.Message.Message() + for key, value in response_headers.iteritems(): + if key not in ['status','content-encoding','transfer-encoding']: + info[key] = value + + status = response_headers.status + if status == 304: + status = 200 + + status_header = 'status: %d\r\n' % response_headers.status + + header_str = info.as_string() + + header_str = re.sub("\r(?!\n)|(? 0: + service = "cl" + # No point in guessing Base or Spreadsheet + #elif request_uri.find("spreadsheets") > 0: + # service = "wise" + + auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent']) + resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'}) + lines = content.split('\n') + d = dict([tuple(line.split("=", 1)) for line in lines if line]) + if resp.status == 403: + self.Auth = "" + else: + self.Auth = d['Auth'] + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['authorization'] = 'GoogleLogin Auth=' + self.Auth + + +AUTH_SCHEME_CLASSES = { + "basic": BasicAuthentication, + "wsse": WsseAuthentication, + "digest": DigestAuthentication, + "hmacdigest": HmacDigestAuthentication, + "googlelogin": GoogleLoginAuthentication +} + +AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] + +def _md5(s): + return + +class FileCache(object): + """Uses a local directory as a store for cached files. + Not really safe to use if multiple threads or processes are going to + be running on the same cache. + """ + def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior + self.cache = cache + self.safe = safe + if not os.path.exists(cache): + os.makedirs(self.cache) + + def get(self, key): + retval = None + cacheFullPath = os.path.join(self.cache, self.safe(key)) + try: + f = file(cacheFullPath, "r") + retval = f.read() + f.close() + except IOError: + pass + return retval + + def set(self, key, value): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + f = file(cacheFullPath, "w") + f.write(value) + f.close() + + def delete(self, key): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + if os.path.exists(cacheFullPath): + os.remove(cacheFullPath) + +class Credentials(object): + def __init__(self): + self.credentials = [] + + def add(self, name, password, domain=""): + self.credentials.append((domain.lower(), name, password)) + + def clear(self): + self.credentials = [] + + def iter(self, domain): + for (cdomain, name, password) in self.credentials: + if cdomain == "" or domain == cdomain: + yield (name, password) + +class KeyCerts(Credentials): + """Identical to Credentials except that + name/password are mapped to key/cert.""" + pass + + +class ProxyInfo(object): + """Collect information required to use a proxy.""" + def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None): + """The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX + constants. For example: + +p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000) + """ + self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass + + def astuple(self): + return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, + self.proxy_user, self.proxy_pass) + + def isgood(self): + return socks and (self.proxy_host != None) and (self.proxy_port != None) + + +class HTTPConnectionWithTimeout(httplib.HTTPConnection): + """HTTPConnection subclass that supports timeouts""" + + def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): + httplib.HTTPConnection.__init__(self, host, port, strict) + self.timeout = timeout + self.proxy_info = proxy_info + + def connect(self): + """Connect to the host and port specified in __init__.""" + # Mostly verbatim from httplib.py. + msg = "getaddrinfo returns an empty list" + for res in socket.getaddrinfo(self.host, self.port, 0, + socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + if self.proxy_info and self.proxy_info.isgood(): + self.sock = socks.socksocket(af, socktype, proto) + self.sock.setproxy(*self.proxy_info.astuple()) + else: + self.sock = socket.socket(af, socktype, proto) + # Different from httplib: support timeouts. + if self.timeout is not None: + self.sock.settimeout(self.timeout) + # End of difference from httplib. + if self.debuglevel > 0: + print "connect: (%s, %s)" % (self.host, self.port) + self.sock.connect(sa) + except socket.error, msg: + if self.debuglevel > 0: + print 'connect fail:', (self.host, self.port) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + +class HTTPSConnectionWithTimeout(httplib.HTTPSConnection): + "This class allows communication via SSL." + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=None, proxy_info=None): + self.timeout = timeout + self.proxy_info = proxy_info + httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file, + cert_file=cert_file, strict=strict) + + def connect(self): + "Connect to a host on a given (SSL) port." + + if self.proxy_info and self.proxy_info.isgood(): + self.sock.setproxy(*self.proxy_info.astuple()) + sock.setproxy(*self.proxy_info.astuple()) + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if self.timeout is not None: + sock.settimeout(self.timeout) + sock.connect((self.host, self.port)) + ssl = socket.ssl(sock, self.key_file, self.cert_file) + self.sock = httplib.FakeSocket(sock, ssl) + + + +class Http(object): + """An HTTP client that handles: +- all methods +- caching +- ETags +- compression, +- HTTPS +- Basic +- Digest +- WSSE + +and more. + """ + def __init__(self, cache=None, timeout=None, proxy_info=None): + """The value of proxy_info is a ProxyInfo instance. + +If 'cache' is a string then it is used as a directory name +for a disk cache. Otherwise it must be an object that supports +the same interface as FileCache.""" + self.proxy_info = proxy_info + # Map domain name to an httplib connection + self.connections = {} + # The location of the cache, for now a directory + # where cached responses are held. + if cache and isinstance(cache, str): + self.cache = FileCache(cache) + else: + self.cache = cache + + # Name/password + self.credentials = Credentials() + + # Key/cert + self.certificates = KeyCerts() + + # authorization objects + self.authorizations = [] + + # If set to False then no redirects are followed, even safe ones. + self.follow_redirects = True + + # If 'follow_redirects' is True, and this is set to True then + # all redirecs are followed, including unsafe ones. + self.follow_all_redirects = False + + self.ignore_etag = False + + self.force_exception_to_status_code = False + + self.timeout = timeout + + def _auth_from_challenge(self, host, request_uri, headers, response, content): + """A generator that creates Authorization objects + that can be applied to requests. + """ + challenges = _parse_www_authenticate(response, 'www-authenticate') + for cred in self.credentials.iter(host): + for scheme in AUTH_SCHEME_ORDER: + if challenges.has_key(scheme): + yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) + + def add_credentials(self, name, password, domain=""): + """Add a name and password that will be used + any time a request requires authentication.""" + self.credentials.add(name, password, domain) + + def add_certificate(self, key, cert, domain): + """Add a key and cert that will be used + any time a request requires authentication.""" + self.certificates.add(key, cert, domain) + + def clear_credentials(self): + """Remove all the names and passwords + that are used for authentication""" + self.credentials.clear() + self.authorizations = [] + + def _conn_request(self, conn, request_uri, method, body, headers): + for i in range(2): + try: + conn.request(method, request_uri, body, headers) + response = conn.getresponse() + except socket.gaierror: + conn.close() + raise ServerNotFoundError("Unable to find the server at %s" % conn.host) + except httplib.HTTPException, e: + if i == 0: + conn.close() + conn.connect() + continue + else: + raise + else: + content = response.read() + response = Response(response) + if method != "HEAD": + content = _decompressContent(response, content) + + break; + return (response, content) + + + def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey): + """Do the actual request using the connection object + and also follow one level of redirects if necessary""" + + auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] + auth = auths and sorted(auths)[0][1] or None + if auth: + auth.request(method, request_uri, headers, body) + + (response, content) = self._conn_request(conn, request_uri, method, body, headers) + + if auth: + if auth.response(response, body): + auth.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers ) + response._stale_digest = 1 + + if response.status == 401: + for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): + authorization.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers, ) + if response.status != 401: + self.authorizations.append(authorization) + authorization.response(response, body) + break + + if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303): + if self.follow_redirects and response.status in [300, 301, 302, 303, 307]: + # Pick out the location header and basically start from the beginning + # remembering first to strip the ETag header and decrement our 'depth' + if redirections: + if not response.has_key('location') and response.status != 300: + raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content) + # Fix-up relative redirects (which violate an RFC 2616 MUST) + if response.has_key('location'): + location = response['location'] + (scheme, authority, path, query, fragment) = parse_uri(location) + if authority == None: + response['location'] = urlparse.urljoin(absolute_uri, location) + if response.status == 301 and method in ["GET", "HEAD"]: + response['-x-permanent-redirect-url'] = response['location'] + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + if headers.has_key('if-none-match'): + del headers['if-none-match'] + if headers.has_key('if-modified-since'): + del headers['if-modified-since'] + if response.has_key('location'): + location = response['location'] + old_response = copy.deepcopy(response) + if not old_response.has_key('content-location'): + old_response['content-location'] = absolute_uri + redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method + (response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1) + response.previous = old_response + else: + raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content) + elif response.status in [200, 203] and method == "GET": + # Don't cache 206's since we aren't going to handle byte range requests + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + + return (response, content) + + +# Need to catch and rebrand some exceptions +# Then need to optionally turn all exceptions into status codes +# including all socket.* and httplib.* exceptions. + + + def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None): + """ Performs a single HTTP request. +The 'uri' is the URI of the HTTP resource and can begin +with either 'http' or 'https'. The value of 'uri' must be an absolute URI. + +The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. +There is no restriction on the methods allowed. + +The 'body' is the entity body to be sent with the request. It is a string +object. + +Any extra headers that are to be sent with the request should be provided in the +'headers' dictionary. + +The maximum number of redirect to follow before raising an +exception is 'redirections. The default is 5. + +The return value is a tuple of (response, content), the first +being and instance of the 'Response' class, the second being +a string that contains the response entity body. + """ + try: + if headers is None: + headers = {} + else: + headers = _normalize_headers(headers) + + if not headers.has_key('user-agent'): + headers['user-agent'] = "Python-httplib2/%s" % __version__ + + uri = iri2uri(uri) + + (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) + + conn_key = scheme+":"+authority + if conn_key in self.connections: + conn = self.connections[conn_key] + else: + if not connection_type: + connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout + certs = list(self.certificates.iter(authority)) + if scheme == 'https' and certs: + conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0], + cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info) + else: + conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info) + conn.set_debuglevel(debuglevel) + + if method in ["GET", "HEAD"] and 'range' not in headers: + headers['accept-encoding'] = 'compress, gzip' + + info = email.Message.Message() + cached_value = None + if self.cache: + cachekey = defrag_uri + cached_value = self.cache.get(cachekey) + if cached_value: + info = email.message_from_string(cached_value) + try: + content = cached_value.split('\r\n\r\n', 1)[1] + except IndexError: + self.cache.delete(cachekey) + cachekey = None + cached_value = None + else: + cachekey = None + + if method in ["PUT"] and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers: + # http://www.w3.org/1999/04/Editing/ + headers['if-match'] = info['etag'] + + if method not in ["GET", "HEAD"] and self.cache and cachekey: + # RFC 2616 Section 13.10 + self.cache.delete(cachekey) + + if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers: + if info.has_key('-x-permanent-redirect-url'): + # Should cached permanent redirects be counted in our redirection count? For now, yes. + (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1) + response.previous = Response(info) + response.previous.fromcache = True + else: + # Determine our course of action: + # Is the cached entry fresh or stale? + # Has the client requested a non-cached response? + # + # There seems to be three possible answers: + # 1. [FRESH] Return the cache entry w/o doing a GET + # 2. [STALE] Do the GET (but add in cache validators if available) + # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request + entry_disposition = _entry_disposition(info, headers) + + if entry_disposition == "FRESH": + if not cached_value: + info['status'] = '504' + content = "" + response = Response(info) + if cached_value: + response.fromcache = True + return (response, content) + + if entry_disposition == "STALE": + if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers: + headers['if-none-match'] = info['etag'] + if info.has_key('last-modified') and not 'last-modified' in headers: + headers['if-modified-since'] = info['last-modified'] + elif entry_disposition == "TRANSPARENT": + pass + + (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + + if response.status == 304 and method == "GET": + # Rewrite the cache entry with the new end-to-end headers + # Take all headers that are in response + # and overwrite their values in info. + # unless they are hop-by-hop, or are listed in the connection header. + + for key in _get_end2end_headers(response): + info[key] = response[key] + merged_response = Response(info) + if hasattr(response, "_stale_digest"): + merged_response._stale_digest = response._stale_digest + _updateCache(headers, merged_response, content, self.cache, cachekey) + response = merged_response + response.status = 200 + response.fromcache = True + + elif response.status == 200: + content = new_content + else: + self.cache.delete(cachekey) + content = new_content + else: + (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + except Exception, e: + if self.force_exception_to_status_code: + if isinstance(e, HttpLib2ErrorWithResponse): + response = e.response + content = e.content + response.status = 500 + response.reason = str(e) + elif isinstance(e, socket.timeout): + content = "Request Timeout" + response = Response( { + "content-type": "text/plain", + "status": "408", + "content-length": len(content) + }) + response.reason = "Request Timeout" + else: + content = str(e) + response = Response( { + "content-type": "text/plain", + "status": "400", + "content-length": len(content) + }) + response.reason = "Bad Request" + else: + raise + + + return (response, content) + + + +class Response(dict): + """An object more like email.Message than httplib.HTTPResponse.""" + + """Is this response from our local cache""" + fromcache = False + + """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """ + version = 11 + + "Status code returned by server. " + status = 200 + + """Reason phrase returned by server.""" + reason = "Ok" + + previous = None + + def __init__(self, info): + # info is either an email.Message or + # an httplib.HTTPResponse object. + if isinstance(info, httplib.HTTPResponse): + for key, value in info.getheaders(): + self[key] = value + self.status = info.status + self['status'] = str(self.status) + self.reason = info.reason + self.version = info.version + elif isinstance(info, email.Message.Message): + for key, value in info.items(): + self[key] = value + self.status = int(self['status']) + else: + for key, value in info.iteritems(): + self[key] = value + self.status = int(self.get('status', self.status)) + + + def __getattr__(self, name): + if name == 'dict': + return self + else: + raise AttributeError, name diff --git a/tutorius/apilib/httplib2/__init__.pyc b/tutorius/apilib/httplib2/__init__.pyc new file mode 100644 index 0000000..f4d8036 --- /dev/null +++ b/tutorius/apilib/httplib2/__init__.pyc Binary files differ diff --git a/tutorius/apilib/httplib2/iri2uri.py b/tutorius/apilib/httplib2/iri2uri.py new file mode 100644 index 0000000..70667ed --- /dev/null +++ b/tutorius/apilib/httplib2/iri2uri.py @@ -0,0 +1,110 @@ +""" +iri2uri + +Converts an IRI to a URI. + +""" +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = [] +__version__ = "1.0.0" +__license__ = "MIT" +__history__ = """ +""" + +import urlparse + + +# Convert an IRI to a URI following the rules in RFC 3987 +# +# The characters we need to enocde and escape are defined in the spec: +# +# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD +# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF +# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD +# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD +# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD +# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD +# / %xD0000-DFFFD / %xE1000-EFFFD + +escape_range = [ + (0xA0, 0xD7FF ), + (0xE000, 0xF8FF ), + (0xF900, 0xFDCF ), + (0xFDF0, 0xFFEF), + (0x10000, 0x1FFFD ), + (0x20000, 0x2FFFD ), + (0x30000, 0x3FFFD), + (0x40000, 0x4FFFD ), + (0x50000, 0x5FFFD ), + (0x60000, 0x6FFFD), + (0x70000, 0x7FFFD ), + (0x80000, 0x8FFFD ), + (0x90000, 0x9FFFD), + (0xA0000, 0xAFFFD ), + (0xB0000, 0xBFFFD ), + (0xC0000, 0xCFFFD), + (0xD0000, 0xDFFFD ), + (0xE1000, 0xEFFFD), + (0xF0000, 0xFFFFD ), + (0x100000, 0x10FFFD) +] + +def encode(c): + retval = c + i = ord(c) + for low, high in escape_range: + if i < low: + break + if i >= low and i <= high: + retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')]) + break + return retval + + +def iri2uri(uri): + """Convert an IRI to a URI. Note that IRIs must be + passed in a unicode strings. That is, do not utf-8 encode + the IRI before passing it into the function.""" + if isinstance(uri ,unicode): + (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri) + authority = authority.encode('idna') + # For each character in 'ucschar' or 'iprivate' + # 1. encode as utf-8 + # 2. then %-encode each octet of that utf-8 + uri = urlparse.urlunsplit((scheme, authority, path, query, fragment)) + uri = "".join([encode(c) for c in uri]) + return uri + +if __name__ == "__main__": + import unittest + + class Test(unittest.TestCase): + + def test_uris(self): + """Test that URIs are invariant under the transformation.""" + invariant = [ + u"ftp://ftp.is.co.za/rfc/rfc1808.txt", + u"http://www.ietf.org/rfc/rfc2396.txt", + u"ldap://[2001:db8::7]/c=GB?objectClass?one", + u"mailto:John.Doe@example.com", + u"news:comp.infosystems.www.servers.unix", + u"tel:+1-816-555-1212", + u"telnet://192.0.2.16:80/", + u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ] + for uri in invariant: + self.assertEqual(uri, iri2uri(uri)) + + def test_iri(self): + """ Test that the right type of escaping is done for each part of the URI.""" + self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}")) + self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}")) + self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}")) + self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))) + self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8'))) + + unittest.main() + + diff --git a/tutorius/apilib/httplib2/iri2uri.pyc b/tutorius/apilib/httplib2/iri2uri.pyc new file mode 100644 index 0000000..670ac45 --- /dev/null +++ b/tutorius/apilib/httplib2/iri2uri.pyc Binary files differ diff --git a/tutorius/apilib/mimeTypes.py b/tutorius/apilib/mimeTypes.py new file mode 100644 index 0000000..ff8f641 --- /dev/null +++ b/tutorius/apilib/mimeTypes.py @@ -0,0 +1,57 @@ +""" + Copyright (C) 2008 Benjamin O'Steen + + This file is part of python-fedoracommons. + + python-fedoracommons is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + python-fedoracommons is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with python-fedoracommons. If not, see . +""" + +__license__ = 'GPL http://www.gnu.org/licenses/gpl.txt' +__author__ = "Benjamin O'Steen " +__version__ = '0.1' + +class mimeTypes(object): + def getDictionary(self): + mimetype_to_extension = {} + extension_to_mimetype = {} + mimetype_to_extension['text/plain'] = 'txt' + mimetype_to_extension['text/xml'] = 'xml' + mimetype_to_extension['text/css'] = 'css' + mimetype_to_extension['text/javascript'] = 'js' + mimetype_to_extension['text/rtf'] = 'rtf' + mimetype_to_extension['text/calendar'] = 'ics' + mimetype_to_extension['application/msword'] = 'doc' + mimetype_to_extension['application/msexcel'] = 'xls' + mimetype_to_extension['application/x-msword'] = 'doc' + mimetype_to_extension['application/vnd.ms-excel'] = 'xls' + mimetype_to_extension['application/vnd.ms-powerpoint'] = 'ppt' + mimetype_to_extension['application/pdf'] = 'pdf' + mimetype_to_extension['text/comma-separated-values'] = 'csv' + + + mimetype_to_extension['image/jpeg'] = 'jpg' + mimetype_to_extension['image/gif'] = 'gif' + mimetype_to_extension['image/jpg'] = 'jpg' + mimetype_to_extension['image/tiff'] = 'tiff' + mimetype_to_extension['image/png'] = 'png' + + # And hacky reverse lookups + for mimetype in mimetype_to_extension: + extension_to_mimetype[mimetype_to_extension[mimetype]] = mimetype + + mimetype_extension_mapping = {} + mimetype_extension_mapping.update(mimetype_to_extension) + mimetype_extension_mapping.update(extension_to_mimetype) + + return mimetype_extension_mapping diff --git a/tutorius/apilib/mimeTypes.pyc b/tutorius/apilib/mimeTypes.pyc new file mode 100644 index 0000000..e9bd261 --- /dev/null +++ b/tutorius/apilib/mimeTypes.pyc Binary files differ diff --git a/tutorius/apilib/restful_lib.py b/tutorius/apilib/restful_lib.py new file mode 100644 index 0000000..e1ee0af --- /dev/null +++ b/tutorius/apilib/restful_lib.py @@ -0,0 +1,129 @@ +""" + Copyright (C) 2008 Benjamin O'Steen + + This file is part of python-fedoracommons. + + python-fedoracommons is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + python-fedoracommons is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with python-fedoracommons. If not, see . +""" + +__license__ = 'GPL http://www.gnu.org/licenses/gpl.txt' +__author__ = "Benjamin O'Steen " +__version__ = '0.1' + +import httplib2 +import urlparse +import urllib +import base64 +from base64 import encodestring + +from mimeTypes import * + +import mimetypes + +from cStringIO import StringIO + +class Connection: + def __init__(self, base_url, username=None, password=None): + self.base_url = base_url + self.username = username + m = mimeTypes() + self.mimetypes = m.getDictionary() + + self.url = urlparse.urlparse(base_url) + + (scheme, netloc, path, query, fragment) = urlparse.urlsplit(base_url) + + self.scheme = scheme + self.host = netloc + self.path = path + + # Create Http class with support for Digest HTTP Authentication, if necessary + self.h = httplib2.Http(".cache") + self.h.follow_all_redirects = True + if username and password: + self.h.add_credentials(username, password) + + def request_get(self, resource, args = None, headers={}): + return self.request(resource, "get", args, headers=headers) + + def request_delete(self, resource, args = None, headers={}): + return self.request(resource, "delete", args, headers=headers) + + def request_head(self, resource, args = None, headers={}): + return self.request(resource, "head", args, headers=headers) + + def request_post(self, resource, args = None, body = None, filename=None, headers={}): + return self.request(resource, "post", args , body = body, filename=filename, headers=headers) + + def request_put(self, resource, args = None, body = None, filename=None, headers={}): + return self.request(resource, "put", args , body = body, filename=filename, headers=headers) + + def get_content_type(self, filename): + extension = filename.split('.')[-1] + guessed_mimetype = self.mimetypes.get(extension, mimetypes.guess_type(filename)[0]) + return guessed_mimetype or 'application/octet-stream' + + def request(self, resource, method = "get", args = None, body = None, filename=None, headers={}): + params = None + path = resource + headers['User-Agent'] = 'Basic Agent' + + BOUNDARY = u'00hoYUXOnLD5RQ8SKGYVgLLt64jejnMwtO7q8XE1' + CRLF = u'\r\n' + + if filename and body: + #fn = open(filename ,'r') + #chunks = fn.read() + #fn.close() + + # Attempt to find the Mimetype + content_type = self.get_content_type(filename) + headers['Content-Type']='multipart/form-data; boundary='+BOUNDARY + encode_string = StringIO() + encode_string.write(CRLF) + encode_string.write(u'--' + BOUNDARY + CRLF) + encode_string.write(u'Content-Disposition: form-data; name="file"; filename="%s"' % filename) + encode_string.write(CRLF) + encode_string.write(u'Content-Type: %s' % content_type + CRLF) + encode_string.write(CRLF) + encode_string.write(body) + encode_string.write(CRLF) + encode_string.write(u'--' + BOUNDARY + u'--' + CRLF) + + body = encode_string.getvalue() + headers['Content-Length'] = str(len(body)) + elif body: + if not headers.get('Content-Type', None): + headers['Content-Type']='text/xml' + headers['Content-Length'] = str(len(body)) + else: + headers['Content-Type']='text/xml' + + if args: + path += u"?" + urllib.urlencode(args) + + request_path = [] + if self.path != "/": + if self.path.endswith('/'): + request_path.append(self.path[:-1]) + else: + request_path.append(self.path) + if path.startswith('/'): + request_path.append(path[1:]) + else: + request_path.append(path) + + resp, content = self.h.request(u"%s://%s%s" % (self.scheme, self.host, u'/'.join(request_path)), method.upper(), body=body, headers=headers ) + + return {u'headers':resp, u'body':content.decode('UTF-8')} diff --git a/tutorius/apilib/restful_lib.pyc b/tutorius/apilib/restful_lib.pyc new file mode 100644 index 0000000..9cf5277 --- /dev/null +++ b/tutorius/apilib/restful_lib.pyc Binary files differ diff --git a/tutorius/store.py b/tutorius/store.py index 9c8bdff..225cdd4 100644 --- a/tutorius/store.py +++ b/tutorius/store.py @@ -15,6 +15,9 @@ # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import urllib +import urllib2 +from xml.dom import minidom +from apilib.restful_lib import Connection class StoreProxy(object): """ @@ -23,78 +26,215 @@ class StoreProxy(object): shop to implement all the requests that could be made to the Store. """ + def __init__(self): + + # Base Urls for the api + self.base_url = "http://tutorius.dev/en-US/tutorius" + self.remora_api = "api/1.4" + self.tutorius_api = "TutoriusApi" + self.bandwagon_api = "api/1.4/sharing" + + self.api_auth_key = "" + + # Prepares the connection with the api + self.conn = Connection(self.base_url) + + # Setup the helper + self.helper = StoreProxyHelper() + def get_categories(self): """ Returns all the categories registered in the store. Categories are used to classify tutorials according to a theme. (e.g. Mathematics, History, etc...) - + @return The list of category names stored on the server. """ - raise NotImplementedError("get_categories() not implemented") + categories = {} + + request_url = "/%s/categories" % (self.tutorius_api) + + response = self.conn.request_get(request_url) + + if self.helper.iserror(response): + return categories + + xml_response = minidom.parseString(response['body']) + + xml_categories = xml_response.getElementsByTagName('category') + + categories = list() + + # Loop through the categories and create the list to be returned + for xml_category in xml_categories: + category = {} + + category['id'] = xml_category.getElementsByTagName('id')[0].firstChild.nodeValue + category['name'] = xml_category.getElementsByTagName('name')[0].firstChild.nodeValue - def get_tutorials(self, keywords=None, category=None, startIndex=0, numResults=10, sortBy='name'): + categories.append(category) + + return categories + + + def get_tutorials(self, category='all', page=1, numResults=10, sortBy='name'): """ Returns the list of tutorials that correspond to the given search criteria. - + @param keywords The list of keywords that should be matched inside the tutorial title or description. If None, the search will not filter the results according to the keywords. @param category The category in which to restrict the search. - @param startIndex The index in the result set from which to return results. This is + @param page The page in the result set from which to return results. This is used to allow applications to fetch results one set at a time. - @param numResults The max number of results that can be returned + @param numResults The max number of results that can be returned in a page @param sortBy The field on which to sort the results @return A list of tutorial meta-data that corresponds to the query """ - raise NotImplementedError("get_tutorials() not implemented") + + request_url = "/%s/tutorials/%s/%d/%d/%s" % (self.tutorius_api, category, page, numResults, sortBy) + + response = self.conn.request_get(request_url) + + if (self.helper.iserror(response)): + return False + + xml_response = minidom.parseString(response['body']) + + xml_tutorials = xml_response.getElementsByTagName('tutorial') + + tutorials = list() + + for xml_tutorial in xml_tutorials: + tutorial = self.helper.parse_tutorial(xml_tutorial) + tutorials.append(tutorial) + + return tutorials + + def list(self, type='recommended', numResults=3): + """ + Returns a list of tutorials corresponding to the type specified. + Type examples: 'Most downloaded', 'recommended', etc. + + @param type The type of list (Most downloaded, recommended, etc.) + @return A list of tutorials + """ + request_url = "/%s/list/%s/tutorial/%s" % (self.remora_api, type, numResults) + + response = self.conn.request_get(request_url) + + if (self.helper.iserror(response)): + return False + + xml_response = minidom.parseString(response['body']) + xml_tutorials = xml_response.getElementsByTagName('addon') + + tutorials = list() + + for xml_tutorial in xml_tutorials: + tutorial = self.helper.parse_tutorial(xml_tutorial) + tutorials.append(tutorial) + + return tutorials + + def get_tutorial_collection(self, collection_name): """ Returns a list of tutorials corresponding to the given collection name. Collections can be groups like '5 most downloaded' or 'Top 10 ratings'. - + @param collection_name The name of the collection from which we want the meta-data @return A list of tutorial meta-data corresponding to the given group """ raise NotImplementedError("get_tutorial_collection() not implemented... yet!") - + def get_latest_version(self, tutorial_id_list): """ Returns the latest version number on the server, for each tutorial ID in the list. - + @param tutorial_id_list The list of tutorial IDs from which we want to known the latest version number. - @return A dictionary having the tutorial ID as the key and the version + @return A dictionary having the tutorial ID as the key and the version as the value. """ - raise NotImplementedError("get_latest_version() not implemented") - + + versions = {} + + for tutorial_id in tutorial_id_list: + + request_url = "/%s/addon/%s/" % (self.remora_api, tutorial_id) + + response = self.conn.request_get(request_url) + + xml = minidom.parseString(response['body']) + + versionnode = xml.getElementsByTagName("version")[0] + + version = versionnode.firstChild.nodeValue + + versions[tutorial_id] = version + + return versions + def download_tutorial(self, tutorial_id, version=None): """ - Fetches the tutorial file from the server and returns the - + Fetches the tutorial file from the server and returns the + @param tutorial_id The tutorial that we want to get @param version The version number that we want to download. If None, the latest version will be downloaded. @return The downloaded file itself (an in-memory representation of the file, not a path to it on the disk) - + TODO : We should decide if we're saving to disk or in mem. """ - raise NotImplementedError("downloadTutorial() not implemented") + request_url = "/%s/addon/%s/" % (self.remora_api, tutorial_id) + + response = self.conn.request_get(request_url) + if (self.helper.iserror(response)): + return False + + xml = minidom.parseString(response['body']) + + installnode = xml.getElementsByTagName("install")[0] + installurl = installnode.firstChild.nodeValue + + fp = urllib.urlopen(installurl) + + return fp + def login(self, username, password): """ Logs in the user on the store and saves the login status in the proxy state. After a successful logon, the operation requiring a login will be successful. - + + @param username + @param password @return True if the login was successful, False otherwise """ - raise NotImplementedError("login() not implemented yet") + request_url = "/%s/auth/" % (self.tutorius_api) + + params = {'username': username, 'password': password} + + response = self.conn.request_post(request_url, params) + if (self.helper.iserror(response)): + return False + + xml_response = minidom.parseString(response['body']) + + keynode = xml_response.getElementsByTagName("token")[0] + + key = keynode.getAttribute('value') + + self.api_auth_key = key + + return True + def close_session(self): """ Ends the user's session on the server and changes the state of the proxy @@ -102,72 +242,203 @@ class StoreProxy(object): @return True if the user was disconnected, False otherwise """ - raise NotImplementedError("close_session() not implemented yet") + request_url = "/%s/auth/%s" % (self.tutorius_api, self.api_auth_key) + + headers = { 'X-API-Auth' : self.api_auth_key } + + response = self.conn.request_delete(request_url, None, headers) + if (self.helper.iserror(response)): + return False + + self.api_auth_key = "" + + return True + def get_session_id(self): """ Gives the current session ID cached in the Store Proxy, or returns None is the user is not logged yet. - + @return The current session's ID, or None if the user is not logged """ - raise NotImplementedError("get_session_id() not implemented yet") - + return self.api_auth_key + def rate(self, value, tutorial_store_id): """ Sends a rating for the given tutorial. - + This function requires the user to be logged in. - - @param value The value of the rating. It must be an integer with a value + + @param value The value of the rating. It must be an integer with a value from 1 to 5. @param tutorial_store_id The ID of the tutorial that was rated @return True if the rating was sent to the Store, False otherwise. """ - raise NotImplementedError("rate() not implemented") + request_url = "/%s/review/%s" % (self.tutorius_api, tutorial_store_id) + + params = {'title': 'from api', 'body': 'from api', 'rating': value} + headers = { 'X-API-Auth' : self.api_auth_key } + + response = self.conn.request_post(request_url, params, None, None, headers) - def publish(self, tutorial): + if self.helper.iserror(response): + return False + + return True + + def publish(self, tutorial, tutorial_info=None, tutorial_store_id = None): """ Sends a tutorial to the store. - + This function requires the user to be logged in. - @param tutorial The tutorial file to be sent. Note that this is the + @param tutorial The tutorial file to be sent. Note that this is the content itself and not the path to the file. + @param tutorial_info An array containing the tutorial information @return True if the tutorial was sent correctly, False otherwise. """ - raise NotImplementedError("publish() not implemented") + + # This is in the case we have to re-publish a tutorial + if tutorial_store_id != None: + request_url = "/%s/publish/%s" % (self.tutorius_api, tutorial_store_id) + + response = self.conn.request_post(request_url) + + if self.helper.iserror(response): + return False + + return True + + # Otherwise, we want to publish a new tutorial + if tutorial_info == None: + return False + + request_url = "/%s/publish/" % (self.tutorius_api) + + headers = { 'X-API-Auth' : self.api_auth_key } + + response = self.conn.request_post(request_url, tutorial_info, tutorial, tutorial_info['filename'], headers) + + if self.helper.iserror(response): + return False + return True + + def unpublish(self, tutorial_store_id): """ Removes a tutorial from the server. The user in the current session - needs to be the creator for it to be unpublished. This will remove + needs to be the creator for it to be unpublished. This will remove the file from the server and from all its collections and categories. - + This function requires the user to be logged in. - + @param tutorial_store_id The ID of the tutorial to be removed @return True if the tutorial was properly removed from the server """ - raise NotImplementedError("unpublish() not implemented") + request_url = "/%s/publish/%s" % (self.tutorius_api, tutorial_store_id) - def update_published_tutorial(self, tutorial_id, tutorial): + headers = { 'X-API-Auth' : self.api_auth_key } + response = self.conn.request_delete(request_url, None, headers) + + if self.helper.iserror(response): + return False + + return True + + def update_published_tutorial(self, tutorial_id, tutorial, tutorial_info): """ Sends the new content for the tutorial with the given ID. - + This function requires the user to be logged in. - + @param tutorial_id The ID of the tutorial to be updated @param tutorial The bundled tutorial file content (not a path!) @return True if the tutorial was sent and updated, False otherwise """ - raise NotImplementedError("update_published_tutorial() not implemented yet") + request_url = "/%s/update/%s" % (self.tutorius_api, tutorial_id) + + headers = { 'X-API-Auth' : self.api_auth_key } + response = self.conn.request_post(request_url, tutorial_info, tutorial, tutorial_info['filename'], headers) + + if self.helper.iserror(response): + return False + + return True + + def register_new_user(self, user_info): """ - Creates a new user from the given user information. + Creates a new user from the given user information. @param user_info A structure containing all the data required to do a login. @return True if the new account was created, false otherwise """ - raise NotImplementedError("register_new_user() not implemented") + request_url = "/%s/registerNewUser" % (self.tutorius_api) + + params = {'nickname': user_info['nickname'], 'password': user_info['password'], 'email': user_info['email']} + + response = self.conn.request_post(request_url, params) + + if self.helper.iserror(response): + return False + + return True + + +class StoreProxyHelper(object): + """ + Implements helper methods for the Store, more specifically + methods to handle xml responses and errors + """ + def iserror(self, response): + """ + Check if the response received from the server is an error + + @param response The XML response from the server + @return True if the response is an error + """ + + # first look for HTTP errors + http_status = response['headers']['status'] + + if http_status in ['400', '401', '403', '500' ]: + return True + + # Now check if the response is valid XML + try: + minidom.parseString(response['body']) + except Exception, e: + return True + + # The response is valid XML, parse it and look for + # an error in xml format + xml_response = minidom.parseString(response['body']) + + errors = xml_response.getElementsByTagName('error') + + if (len(errors) > 0): + return True + + return False + + def parse_tutorial(self, xml_tutorial): + """ + Parse a tutorial's XML metadata and returns a dictionnary + containing the metadata + + @param xml_tutorial The tutorial metadata in XML format + @return A dictionnary containing the metadata + """ + tutorial = {} + + tutorial['name'] = xml_tutorial.getElementsByTagName('name')[0].firstChild.nodeValue + tutorial['summary'] = xml_tutorial.getElementsByTagName('summary')[0].firstChild.nodeValue + tutorial['version'] = xml_tutorial.getElementsByTagName('version')[0].firstChild.nodeValue + tutorial['description'] ="" + tutorial['author'] = "" + tutorial['rating'] = "" + + return tutorial -- cgit v0.9.1