1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Aneesh Dogra <lionaneesh@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Journal Export
from sugar3.datastore import datastore
from sugar3.activity import activity
import dataretriever
import platform
import StringIO
import os
from server import WPWikiDB
from server import HTMLOutputBuffer
from mwlib import parser, scanner, expander, rendermath, htmlwriter
system_id = "%s%s" % (platform.system().lower(),
platform.architecture()[0][0:2])
instance_dir = os.path.join(activity.get_activity_root(), 'instance')
class JournalExport:
def __init__(self, confvars):
self.wikidb = WPWikiDB(confvars['path'], confvars['lang'],
confvars['templateprefix'], confvars['templateblacklist'])
self.dataretriever = dataretriever.DataRetriever(system_id, confvars['path'])
self.confvars = confvars
def export_article(self, article):
article_list = self.search(article) # get all articles
if len(article_list) == 0: # no such article
return
title = article_list[0]
article_text = self.wikidb.getRawArticle(title).encode('utf8')
article_text = self.wikidb.expandArticle(article_text, title)
tokens = scanner.tokenize(article_text, title)
wiki_parsed = parser.Parser(tokens, title).parse()
wiki_parsed.caption = title
out = StringIO.StringIO()
w = htmlwriter.HTMLWriter(out)
w.write(wiki_parsed)
htmloutput = out.getvalue()
filename = os.path.join(instance_dir, title + '.html')
fp = open(filename, 'w')
fp.write(htmloutput)
fp.close()
journal_entry = datastore.create()
journal_entry.metadata['title'] = title
journal_entry.metadata['title_set_by_user'] = '1'
journal_entry.metadata['mime_type'] = 'text/html'
journal_entry.file_path = filename
datastore.write(journal_entry)
os.remove(filename)
def search(self, article_title):
return self.wikidb.dataretriever.search(article_title)
def getRawArticle(self, title):
# Retrieve article text, recursively following #redirects.
if title == '':
return ''
article_text = \
self.dataretriever.get_text_article(title).decode('utf-8')
# Stripping leading & trailing whitespace fixes template expansion.
article_text = article_text.lstrip()
article_text = article_text.rstrip()
|