Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGonzalo Odiard <godiard@gmail.com>2013-04-23 21:10:56 (GMT)
committer Gonzalo Odiard <godiard@gmail.com>2013-04-23 21:17:53 (GMT)
commit6fb4593079e98f0cf8e29d97a82dfef7be118a10 (patch)
tree5ace8942885f3f5e781526ae2fdbdd04e83f8124
parent72b24125d67ce26d2cd0887b434107c2ecd73ce1 (diff)
Upload & download the journal objects packaged
To upload or download a journal object, is zipped with a file with the preview and another with the metadata in json format. The save methods are used to package/unpackage when download or upload. This solve a few issues: * metadata & preview transference. * simplify the code because only one file is transfered. * the downloadmanager is always used, because webkit don't know how manage the mime type used (solve error with images captured by webkit) Signed-off-by: Gonzalo Odiard <gonzalo@laptop.org>
-rw-r--r--activity.py44
-rw-r--r--downloadmanager.py45
-rw-r--r--filepicker.py22
-rw-r--r--server.py150
-rw-r--r--utils.py134
-rw-r--r--web/index.html2
6 files changed, 131 insertions, 266 deletions
diff --git a/activity.py b/activity.py
index 3044c6a..16af039 100644
--- a/activity.py
+++ b/activity.py
@@ -25,7 +25,6 @@ from gi.repository import WebKit
import telepathy
import dbus
import os.path
-import base64
import json
from sugar3.activity import activity
@@ -41,6 +40,7 @@ from sugar3.graphics.objectchooser import ObjectChooser
import downloadmanager
from filepicker import FilePicker
import server
+import utils
JOURNAL_STREAM_SERVICE = 'journal-activity-http'
@@ -276,10 +276,7 @@ class JournalShare(activity.Activity):
picker.destroy()
if chosen:
logging.error('CHOSEN %s', chosen)
- tmp_dir = os.path.dirname(chosen)
- preview_file = os.path.join(tmp_dir, 'preview')
- metadata_file = os.path.join(tmp_dir, 'metadata')
- request.select_files([chosen, preview_file, metadata_file])
+ request.select_files([chosen])
elif hasattr(request, 'cancel'):
# WebKit2 only
request.cancel()
@@ -391,14 +388,14 @@ class JournalManager(GObject.GObject):
logging.error('INFO %s', info)
return json.dumps(info)
- def create_object(self, file_path, metadata_content, preview_content):
+ def create_object(self, file_path, metadata, preview_content):
new_dsobject = datastore.create()
#Set the file_path in the datastore.
new_dsobject.set_file_path(file_path)
- if metadata_content is not None:
- metadata = json.loads(metadata_content)
- for key in metadata.keys():
- new_dsobject.metadata[key] = metadata[key]
+
+ for key in metadata.keys():
+ new_dsobject.metadata[key] = metadata[key]
+
if preview_content is not None and preview_content != '':
new_dsobject.metadata['preview'] = \
dbus.ByteArray(preview_content)
@@ -438,35 +435,10 @@ class JournalManager(GObject.GObject):
comment = json.loads(dsobj.metadata['comments'])
except:
comment = []
- if 'preview' in dsobj.metadata:
- # TODO: copied from expandedentry.py
- # is needed because record is saving the preview encoded
- if dsobj.metadata['preview'][1:4] == 'PNG':
- preview = dsobj.metadata['preview']
- else:
- # TODO: We are close to be able to drop this.
- preview = base64.b64decode(
- dsobj.metadata['preview'])
- preview_path = self._instance_path + 'preview_id_' + \
- object_id
- preview_file = open(preview_path, 'w')
- preview_file.write(preview)
- preview_file.close()
- if 'mime_type' in dsobj.metadata:
- mime_type_path = self._instance_path + 'mime_type_id_' + \
- object_id
- mime_type_file = open(mime_type_path, 'w')
- mime_type_file.write(dsobj.metadata['title'])
- mime_type_file.close()
-
else:
logging.debug('dsobj has no metadata')
- # create a link to be read from the web server
- file_path = self._instance_path + 'id_' + object_id
- if os.path.isfile(file_path):
- os.remove(file_path)
- os.link(dsobj.file_path, file_path)
+ utils.package_ds_object(dsobj, self._instance_path)
results.append({'title': title, 'desc': desc, 'comment': comment,
'id': object_id})
diff --git a/downloadmanager.py b/downloadmanager.py
index 36bcd43..7cc9418 100644
--- a/downloadmanager.py
+++ b/downloadmanager.py
@@ -31,6 +31,8 @@ from sugar3.graphics.alert import Alert, TimeoutAlert
from sugar3.graphics.icon import Icon
from sugar3.activity import activity
+import utils
+
DS_DBUS_SERVICE = 'org.laptop.sugar.DataStore'
DS_DBUS_INTERFACE = 'org.laptop.sugar.DataStore'
DS_DBUS_PATH = '/org/laptop/sugar/DataStore'
@@ -164,22 +166,33 @@ class Download(object):
self._stop_alert.connect('response', self.__stop_response_cb)
self._stop_alert.show()
- self.dl_jobject.metadata['title'] = \
- self._download.get_suggested_filename()
- self.dl_jobject.metadata['description'] = _('From: %s') \
- % self._source
- self.dl_jobject.metadata['progress'] = '100'
- self.dl_jobject.file_path = self._dest_path
-
- # sniff for a mime type, no way to get headers from WebKit
- sniffed_mime_type = mime.get_for_file(self._dest_path)
- self.dl_jobject.metadata['mime_type'] = sniffed_mime_type
-
- datastore.write(self.dl_jobject,
- transfer_ownership=True,
- reply_handler=self.__internal_save_cb,
- error_handler=self.__internal_error_cb,
- timeout=360)
+ if self._dest_path.endswith('.journal'):
+
+ utils.unpackage_ds_object(self._dest_path, self.dl_jobject)
+
+ datastore.write(self.dl_jobject,
+ transfer_ownership=True,
+ reply_handler=self.__internal_save_cb,
+ error_handler=self.__internal_error_cb,
+ timeout=360)
+
+ else:
+ self.dl_jobject.metadata['title'] = \
+ self._download.get_suggested_filename()
+ self.dl_jobject.metadata['description'] = _('From: %s') \
+ % self._source
+ self.dl_jobject.metadata['progress'] = '100'
+ self.dl_jobject.file_path = self._dest_path
+
+ # sniff for a mime type, no way to get headers from WebKit
+ sniffed_mime_type = mime.get_for_file(self._dest_path)
+ self.dl_jobject.metadata['mime_type'] = sniffed_mime_type
+
+ datastore.write(self.dl_jobject,
+ transfer_ownership=True,
+ reply_handler=self.__internal_save_cb,
+ error_handler=self.__internal_error_cb,
+ timeout=360)
elif state == WebKit.DownloadStatus.CANCELLED:
self.cleanup()
diff --git a/filepicker.py b/filepicker.py
index 8921f03..4ea9354 100644
--- a/filepicker.py
+++ b/filepicker.py
@@ -18,7 +18,7 @@ import logging
import os
import tempfile
import shutil
-import json
+import utils
from gi.repository import Gtk
@@ -55,24 +55,8 @@ class FilePicker(ObjectChooser):
if jobject and jobject.file_path:
tmp_dir = tempfile.mkdtemp(prefix='', \
dir=os.path.join(get_activity_root(), 'tmp'))
- _file = os.path.join(tmp_dir, _basename_strip(jobject))
-
- os.rename(jobject.file_path, _file)
-
- # create a file with the preview...
- _preview_file = open(os.path.join(tmp_dir, 'preview'), 'w')
- _preview_file.write(jobject.metadata['preview'])
- _preview_file.close()
-
- # and another with the metadata
- _metadata_file = open(os.path.join(tmp_dir, 'metadata'),
- 'w')
- metadata = {}
- for key in jobject.metadata.keys():
- if key not in ('object_id', 'preview', 'progress'):
- metadata[key] = jobject.metadata[key]
- _metadata_file.write(json.dumps(metadata))
- _metadata_file.close()
+
+ _file = utils.package_ds_object(jobject, tmp_dir)
global _temp_dirs_to_clean
_temp_dirs_to_clean.append(tmp_dir)
diff --git a/server.py b/server.py
index fae8e6e..187ee76 100644
--- a/server.py
+++ b/server.py
@@ -15,7 +15,6 @@
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
-import sys
import logging
import cgi
@@ -26,107 +25,7 @@ import select
from gi.repository import GLib
-
-from warnings import filterwarnings, catch_warnings
-with catch_warnings():
- if sys.py3kwarning:
- filterwarnings("ignore", ".*mimetools has been removed",
- DeprecationWarning)
- import mimetools
-
-# Maximum input we will accept when REQUEST_METHOD is POST
-# 0 ==> unlimited input
-maxlen = 0
-
-
-def parse_multipart(fp, pdict):
- """Parse multipart input.
- Copied from cgi.py , but modified to get the filename
- Arguments:
- fp : input file
- pdict: dictionary containing other parameters of content-type header
- filenamedict: dictionary containing filenames if available
- """
- boundary = ""
- if 'boundary' in pdict:
- boundary = pdict['boundary']
- if not cgi.valid_boundary(boundary):
- raise ValueError('Invalid boundary in multipart form: %r' % boundary)
-
- nextpart = "--" + boundary
- lastpart = "--" + boundary + "--"
- partdict = {}
- filenamesdict = {}
- terminator = ""
-
- while terminator != lastpart:
- bytes = -1
- data = None
- if terminator:
- # At start of next part. Read headers first.
- headers = mimetools.Message(fp)
- clength = headers.getheader('content-length')
- if clength:
- try:
- bytes = int(clength)
- except ValueError:
- pass
- if bytes > 0:
- if maxlen and bytes > maxlen:
- raise ValueError('Maximum content length exceeded')
- data = fp.read(bytes)
- else:
- data = ""
- # Read lines until end of part.
- lines = []
- while 1:
- line = fp.readline()
- if not line:
- terminator = lastpart # End outer loop
- break
- if line[:2] == "--":
- terminator = line.strip()
- if terminator in (nextpart, lastpart):
- break
- lines.append(line)
- # Done with part.
- if data is None:
- continue
- if bytes < 0:
- if lines:
- # Strip final line terminator
- line = lines[-1]
- if line[-2:] == "\r\n":
- line = line[:-2]
- elif line[-1:] == "\n":
- line = line[:-1]
- lines[-1] = line
- data = "".join(lines)
- line = headers['content-disposition']
- if not line:
- continue
- key, params = cgi.parse_header(line)
- if key != 'form-data':
- continue
- if 'name' in params:
- name = params['name']
- else:
- continue
-
- if name in partdict:
- partdict[name].append(data)
- else:
- partdict[name] = [data]
-
- if 'filename' in params:
- filename = params['filename']
-
- if name in filenamesdict:
- filenamesdict[name].append(filename)
- else:
- filenamesdict[name] = [filename]
-
- return partdict, filenamesdict
+import utils
class JournalHTTPRequestHandler(SimpleHTTPRequestHandler):
@@ -152,30 +51,25 @@ class JournalHTTPRequestHandler(SimpleHTTPRequestHandler):
if not ctype:
return None
ctype, pdict = cgi.parse_header(ctype)
- file_fields, filenames = parse_multipart(self.rfile, pdict)
+ query = cgi.parse_multipart(self.rfile, pdict)
+
+ file_content = query.get('journal_item')[0]
+ # save to the journal
+ zipped_file_path = os.path.join(self.activity_root,
+ 'instance', 'received.journal')
+ f = open(zipped_file_path, 'wb')
+ try:
+ f.write(file_content)
+ finally:
+ f.close()
+
+ metadata, preview_data, file_path = \
+ utils.unpackage_ds_object(zipped_file_path, None)
- i = 0
- preview_content = None
- metadata_content = None
- for file_name in filenames['journal_item']:
- if file_name == 'preview':
- preview_content = file_fields['journal_item'][i]
- elif file_name == 'metadata':
- metadata_content = file_fields['journal_item'][i]
- else:
- file_content = file_fields['journal_item'][i]
- # save to the journal
- file_path = os.path.join(self.activity_root,
- 'instance', file_name)
- f = open(file_path, 'w')
- try:
- f.write(file_content)
- finally:
- f.close()
- i = i + 1
+ logging.error('METADATA %s', metadata)
GLib.idle_add(self.jm.create_object, file_path,
- metadata_content, preview_content)
+ metadata, preview_data)
#redirect to index.html page
self.send_response(301)
@@ -207,14 +101,8 @@ class JournalHTTPRequestHandler(SimpleHTTPRequestHandler):
file_path = self.activity_root + path
mime_type = 'text/html'
- # if is reading a file, try to read the mime_type
- if file_path.find('/id_') > -1:
- mime_tipe_path = file_path.replace('/id_',
- '/mime_type_id_')
- if os.path.isfile(mime_tipe_path):
- f = open(mime_tipe_path)
- mime_type = f.read()
- f.close()
+ if file_path.endswith('.journal'):
+ mime_type = 'application/journal'
self.send_header_response(mime_type)
if os.path.isfile(file_path):
diff --git a/utils.py b/utils.py
index 5626ac7..5d4239d 100644
--- a/utils.py
+++ b/utils.py
@@ -16,67 +16,75 @@
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
-
-from sugar import profile
-from sugar.datastore import datastore
-
-tfile = open('templates', 'r')
-templates = tfile.read()
-tfile.close()
-
-webdir = os.path.join(os.path.dirname(__file__), 'web')
-
-INDEX = open(os.path.join(webdir, 'index.html'), 'w')
-ICONS_DIR = os.path.join(webdir, 'images')
-FILES_DIR = os.path.join(webdir, 'files')
-
-
-def fill_out_template(template, content):
- template = templates.split('#!%s\n' % template)[1].split('\n!#')[0]
- for x in list(content.keys()):
- template = template.replace('{%s}' % x, content[x])
-
- return template
-
-
-def find_icon(mime_type):
- generic_name = mime_type.split('/')[0]
- if generic_name + '.png' in os.listdir(ICONS_DIR):
- return '%s.png' % generic_name
-
+import base64
+import json
+import dbus
+from zipfile import ZipFile
+
+
+def package_ds_object(dsobj, destination_path):
+ """
+ Creates a zipped file with the file associated to a journal object,
+ the preview and the metadata
+ """
+ object_id = dsobj.object_id
+ preview_path = None
+
+ if 'preview' in dsobj.metadata:
+ # TODO: copied from expandedentry.py
+ # is needed because record is saving the preview encoded
+ if dsobj.metadata['preview'][1:4] == 'PNG':
+ preview = dsobj.metadata['preview']
+ else:
+ # TODO: We are close to be able to drop this.
+ preview = base64.b64decode(dsobj.metadata['preview'])
+
+ preview_path = os.path.join(destination_path,
+ 'preview_id_' + object_id)
+ preview_file = open(preview_path, 'w')
+ preview_file.write(preview)
+ preview_file.close()
+
+ # create file with the metadata
+ metadata_path = os.path.join(destination_path,
+ 'metadata_id_' + object_id)
+ metadata_file = open(metadata_path, 'w')
+ metadata = {}
+ for key in dsobj.metadata.keys():
+ if key not in ('object_id', 'preview', 'progress'):
+ metadata[key] = dsobj.metadata[key]
+ metadata_file.write(json.dumps(metadata))
+ metadata_file.close()
+
+ # create a zip fileincluding metadata and preview
+ # to be read from the web server
+ file_path = os.path.join(destination_path, 'id_' + object_id + '.journal')
+
+ with ZipFile(file_path, 'w') as myzip:
+ if preview_path is not None:
+ myzip.write(preview_path, 'preview')
+ myzip.write(metadata_path, 'metadata')
+ myzip.write(dsobj.file_path, 'data')
+ return file_path
+
+
+def unpackage_ds_object(origin_path, dsobj=None):
+ """
+ Receive a path of a zipped file, unzip it, and save the data,
+ preview and metadata on a journal object
+ """
+ tmp_path = os.path.dirname(origin_path)
+ with ZipFile(origin_path) as zipped:
+ metadata = json.loads(zipped.read('metadata'))
+ preview_data = zipped.read('preview')
+ zipped.extract('data', tmp_path)
+
+ if dsobj is not None:
+ for key in metadata.keys():
+ dsobj.metadata[key] = metadata[key]
+
+ dsobj.metadata['preview'] = dbus.ByteArray(preview_data)
+
+ dsobj.file_path = os.path.join(tmp_path, 'data')
else:
- return 'unknown.png'
-
-
-def link_file(file_path):
- link_path = os.path.join(FILES_DIR, os.path.split(file_path)[-1])
- os.link(file_path, link_path)
- return os.path.split(link_path)[-1]
-
-
-def build_journal_page():
- for f in os.listdir(FILES_DIR):
- os.remove(os.path.join(FILES_DIR, f))
-
- objects_starred, no = datastore.find({'keep': '1'})
- objects = []
-
- for dsobj in objects_starred:
- title = dsobj.metadata['title']
- icon = find_icon(dsobj.metadata['mime_type'])
- file_link = link_file(dsobj.file_path)
- objects.append({'file': file_link, 'name': title, 'icon': icon})
-
- objects_html = ''
- for o in objects:
- objects_html += '%s' % fill_out_template('object', o)
-
- index_html = fill_out_template('index', {'nick': profile.get_nick_name(),
- 'objects': objects_html})
-
- INDEX.write(index_html)
- INDEX.flush()
-
-
-if __name__ == "__main__":
- build_journal_page()
+ return metadata, preview_data, os.path.join(tmp_path, 'data')
diff --git a/web/index.html b/web/index.html
index b2cef32..0df51fe 100644
--- a/web/index.html
+++ b/web/index.html
@@ -29,7 +29,7 @@
"<tr><td class='title'>" + selected[i].title + "</td></tr>"+
"<tr><td>" + selected[i].desc + "</td></tr>"+
(!local ? "<tr><td>"+
- "<a class='download_link' href='/datastore/id_" + selected[i].id +"'>"+
+ "<a class='download_link' href='/datastore/id_" + selected[i].id +".journal'>"+
"Download</a></td></tr>" : "") +
"</table>"+
"</td>" +