Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAleksey Lim <alsroot@member.fsf.org>2009-03-02 02:39:25 (GMT)
committer Aleksey Lim <alsroot@member.fsf.org>2009-03-02 06:32:19 (GMT)
commitb413f2a3e40a2d0873f7b8304c3f7e5466f6c048 (patch)
tree30b0d8cbab0ff304a54b06ca66db217ca00f972c
parentfd1c197ee8e860c743556ecf15fe930bb5283464 (diff)
Store original href for images
-rw-r--r--GUI_Components/Compound_Widgets/Gallery_View.py7
-rw-r--r--Processing/Article/Article_Data.py5
-rw-r--r--Processing/Article/Sentence.py3
-rw-r--r--Processing/Article_Builder.py426
-rw-r--r--book.py39
-rw-r--r--net.py1
6 files changed, 250 insertions, 231 deletions
diff --git a/GUI_Components/Compound_Widgets/Gallery_View.py b/GUI_Components/Compound_Widgets/Gallery_View.py
index 5b75dcb..44fb98e 100644
--- a/GUI_Components/Compound_Widgets/Gallery_View.py
+++ b/GUI_Components/Compound_Widgets/Gallery_View.py
@@ -148,7 +148,9 @@ class Gallery_View( gtk.HBox ):
def drag_data_get_event(self, widget, context, selection_data, info, timestamp, data):
logger.debug("getting data")
atom = gtk.gdk.atom_intern("section")
- imagedata = Picture_Data(self.source_article_id, self.image_list[self.current_index][0])
+ imagedata = Picture_Data(self.source_article_id,
+ self.image_list[self.current_index][0],
+ self.image_list[self.current_index][2])
captiondata = Sentence_Data(0, self.source_article_id, 0, 0, 0, self.image_list[self.current_index][1])
paragraph1data = Paragraph_Data(0, self.source_article_id, 0, 0, [imagedata])
paragraph2data = Paragraph_Data(0, self.source_article_id, 0, 0, [captiondata])
@@ -165,7 +167,8 @@ def _validate_image_list(root, image_list):
for i in xrange(len(image_list)):
if not os.access(image_list[i][0], os.F_OK):
if os.access(os.path.join(root, image_list[i][0]), os.F_OK):
- image_list[i] = (os.path.join(root, image_list[i][0]), image_list[i][1])
+ image_list[i] = (os.path.join(root, image_list[i][0]),
+ image_list[i][1], image_list[i][2])
else:
image = None
#removing during for loop was unreliable
diff --git a/Processing/Article/Article_Data.py b/Processing/Article/Article_Data.py
index 2be0a75..042d9d5 100644
--- a/Processing/Article/Article_Data.py
+++ b/Processing/Article/Article_Data.py
@@ -27,11 +27,12 @@ class Sentence_Data:
class Picture_Data:
- def __init__(self, source_article_id = -1, text = None):
+ def __init__(self, source_article_id = -1, text = None, orig=None):
self.source_article_id = source_article_id
self.id = 0
self.text = text
self.type = "picture"
+ self.orig = orig
class Paragraph_Data:
@@ -75,4 +76,4 @@ class Article_Data:
def get_image_list(self):
return self.image_list
- \ No newline at end of file
+
diff --git a/Processing/Article/Sentence.py b/Processing/Article/Sentence.py
index 42feb2a..09c31f4 100644
--- a/Processing/Article/Sentence.py
+++ b/Processing/Article/Sentence.py
@@ -153,6 +153,7 @@ class Picture( RawSentence ):
formatting = []
self.text = picture_data.text
+ self.orig = picture_data.orig
rightmark = buf.create_mark(None, insertioniter, True)
leftmark = buf.create_mark(None, insertioniter, False)
@@ -172,7 +173,7 @@ class Picture( RawSentence ):
self.type = "picture"
def getData(self):
- return Picture_Data(self.source_article_id, self.text)
+ return Picture_Data(self.source_article_id, self.text, self.orig)
def checkIntegrity(self, nextiter):
sentences = []
diff --git a/Processing/Article_Builder.py b/Processing/Article_Builder.py
index bc59d14..0e98c6f 100644
--- a/Processing/Article_Builder.py
+++ b/Processing/Article_Builder.py
@@ -8,230 +8,230 @@ import logging
logger = logging.getLogger('infoslicer')
-class Article_Builder:
- """
- Created by Christopher Leonard.
-
- ID descriptions:
- 0 - picture
- 1 - heading
- > 1 - anything
-
- This class converts between DITA and article_data representation of articles. Badly in need of refactoring!
- """
+"""
+Created by Christopher Leonard.
- def __init__(self, workingDir):
- self.workingDir = workingDir
+ID descriptions:
+0 - picture
+1 - heading
+> 1 - anything
- def get_article_from_dita(self, dita):
- """
- This method takes an article in DITA format as input, parses the DITA, and outputs the corresponding article_data object
- """
- workingDir = self.workingDir
- self.sentences = []
- has_shortdesc = False
- input = BeautifulStoneSoup(dita)
- article_id = input.resourceid['id']
- current_section_id = ""
- current_p_id = ""
+This class converts between DITA and article_data representation of articles. Badly in need of refactoring!
+"""
+def get_article_from_dita(image_path, dita):
+ """
+ This method takes an article in DITA format as input, parses the DITA, and outputs the corresponding article_data object
+ """
+ has_shortdesc = False
+ input = BeautifulStoneSoup(dita)
+ article_id = input.resourceid['id']
+ current_section_id = ""
+ current_p_id = ""
+ sentence_data_list = []
+ paragraph_data_list = []
+ section_data_list = []
+ if input.find("shortdesc") != None:
+ paragraph_data=[]
+ for ph in input.shortdesc.findAll("ph"):
+ id = ph['id']
+ source_sentence_id = id
+ source_paragraph_id = "shortdesc"
+ source_section_id = "shortdesc"
+ source_article_id = article_id
+ text = ph.renderContents().replace("\n", "").replace("&amp;#160;", "").strip() + " "
+ if text[0:5] == "Satur":
+ logger.debug(unicode(text))
+ sentence_data = Sentence_Data(id, source_article_id, source_section_id, source_paragraph_id, source_sentence_id, text)
+ sentence_data_list.append(sentence_data)
+ paragraph_data.append(Paragraph_Data("shortdesc", article_id, "shortdesc", "shortdesc", sentence_data_list))
+ section_data = Section_Data("shortdesc", article_id, "shortdesc", paragraph_data)
+ section_data_list.append(section_data)
sentence_data_list = []
- paragraph_data_list = []
- section_data_list = []
- if input.find("shortdesc") != None:
- paragraph_data=[]
- for ph in input.shortdesc.findAll("ph"):
- id = ph['id']
- source_sentence_id = id
- source_paragraph_id = "shortdesc"
- source_section_id = "shortdesc"
- source_article_id = article_id
- text = ph.renderContents().replace("\n", "").replace("&amp;#160;", "").strip() + " "
- if text[0:5] == "Satur":
- logger.debug(unicode(text))
- sentence_data = Sentence_Data(id, source_article_id, source_section_id, source_paragraph_id, source_sentence_id, text)
- sentence_data_list.append(sentence_data)
- paragraph_data.append(Paragraph_Data("shortdesc", article_id, "shortdesc", "shortdesc", sentence_data_list))
- section_data = Section_Data("shortdesc", article_id, "shortdesc", paragraph_data)
- section_data_list.append(section_data)
- sentence_data_list = []
- input.shortdesc.extract()
- has_shortdesc = True
- taglist = input.findAll(re.compile("refbody|section|p|ph|image"))
- for i in xrange(len(taglist)):
- tag = taglist[len(taglist) - i - 1]
- if tag.name == "ph":
+ input.shortdesc.extract()
+ has_shortdesc = True
+ taglist = input.findAll(re.compile("refbody|section|p|ph|image"))
+ for i in xrange(len(taglist)):
+ tag = taglist[len(taglist) - i - 1]
+ if tag.name == "ph":
+ id = tag['id']
+ source_sentence_id = id
+ source_paragraph_id = current_p_id
+ source_section_id = current_section_id
+ source_article_id = article_id
+ text = tag.renderContents().replace("\n", "").replace("&amp;#160;", "").strip() + " "
+ sentence_data = Sentence_Data(id, source_article_id, source_section_id, source_paragraph_id, source_sentence_id, text)
+ sentence_data_list.insert(0, sentence_data)
+ elif tag.name == "p":
+ if not tag.has_key("id"):
+ id = -1
+ else:
id = tag['id']
- source_sentence_id = id
- source_paragraph_id = current_p_id
- source_section_id = current_section_id
- source_article_id = article_id
- text = tag.renderContents().replace("\n", "").replace("&amp;#160;", "").strip() + " "
- sentence_data = Sentence_Data(id, source_article_id, source_section_id, source_paragraph_id, source_sentence_id, text)
- sentence_data_list.insert(0, sentence_data)
- elif tag.name == "p":
- if not tag.has_key("id"):
- id = -1
- else:
- id = tag['id']
- source_paragraph_id = id
- source_section_id = current_section_id
- source_article_id = article_id
- paragraph_data = Paragraph_Data(id, source_article_id, source_section_id, source_paragraph_id, sentence_data_list)
- paragraph_data_list.insert(0, paragraph_data)
- sentence_data_list = []
- current_p_id = id
- elif tag.name == "refbody" :
- if tag.findParent("reference").has_key("id"):
- id = "r" + tag.findParent("reference")['id']
- else:
- id = "r90000"
- source_section_id = id
- source_article_id = article_id
- section_data = Section_Data(id, source_article_id, source_section_id, paragraph_data_list)
- if has_shortdesc:
- section_data_list.insert(1,section_data)
- else:
- section_data_list.insert(0,section_data)
- if tag.findChild("title", recursive=False) != None:
- heading = tag.findChild('title').renderContents().replace("\n", "").replace("&amp;#160;", "").strip()
- sen = Sentence_Data(1, source_article_id, source_section_id, 1, 1, heading)
- par = Paragraph_Data(1, source_article_id, source_section_id, 1, [sen])
- headingdata = Section_Data(1, source_article_id, source_section_id, [par])
-
- if has_shortdesc:
- section_data_list.insert(1,headingdata)
- else:
- section_data_list.insert(0,headingdata)
- paragraph_data_list = []
- current_section_id = tag.name[0] + id
+ source_paragraph_id = id
+ source_section_id = current_section_id
+ source_article_id = article_id
+ paragraph_data = Paragraph_Data(id, source_article_id, source_section_id, source_paragraph_id, sentence_data_list)
+ paragraph_data_list.insert(0, paragraph_data)
+ sentence_data_list = []
+ current_p_id = id
+ elif tag.name == "refbody" :
+ if tag.findParent("reference").has_key("id"):
+ id = "r" + tag.findParent("reference")['id']
+ else:
+ id = "r90000"
+ source_section_id = id
+ source_article_id = article_id
+ section_data = Section_Data(id, source_article_id, source_section_id, paragraph_data_list)
+ if has_shortdesc:
+ section_data_list.insert(1,section_data)
+ else:
+ section_data_list.insert(0,section_data)
+ if tag.findChild("title", recursive=False) != None:
+ heading = tag.findChild('title').renderContents().replace("\n", "").replace("&amp;#160;", "").strip()
+ sen = Sentence_Data(1, source_article_id, source_section_id, 1, 1, heading)
+ par = Paragraph_Data(1, source_article_id, source_section_id, 1, [sen])
+ headingdata = Section_Data(1, source_article_id, source_section_id, [par])
- elif tag.name == "section":
- id = "s" + tag['id']
- source_section_id = id
- source_article_id = article_id
-
- section_data = Section_Data(id, source_article_id, source_section_id, paragraph_data_list)
if has_shortdesc:
- section_data_list.insert(1,section_data)
+ section_data_list.insert(1,headingdata)
else:
- section_data_list.insert(0,section_data)
- if tag.findChild("title", recursive=False) != None:
- heading = tag.findChild('title').renderContents().replace("\n", "").replace("&amp;#160;", "").strip()
- sen = Sentence_Data(1, source_article_id, source_section_id, 1, 1, heading)
- par = Paragraph_Data(1, source_article_id, source_section_id, 1, [sen])
- headingdata = Section_Data(1, source_article_id, source_section_id, [par])
-
- if has_shortdesc:
- section_data_list.insert(1,headingdata)
- else:
- section_data_list.insert(0,headingdata)
- paragraph_data_list = []
- current_section_id = id
-
- elif tag.name == "image":
-
- if tag.parent.name == "p":
- source_article_id = article_id
- text = tag['href'].replace("..", workingDir)
- picture_data = Picture_Data(source_article_id, text)
- sentence_data_list.insert(0, picture_data)
+ section_data_list.insert(0,headingdata)
+ paragraph_data_list = []
+ current_section_id = tag.name[0] + id
+
+ elif tag.name == "section":
+ id = "s" + tag['id']
+ source_section_id = id
+ source_article_id = article_id
+
+ section_data = Section_Data(id, source_article_id, source_section_id, paragraph_data_list)
+ if has_shortdesc:
+ section_data_list.insert(1,section_data)
+ else:
+ section_data_list.insert(0,section_data)
+ if tag.findChild("title", recursive=False) != None:
+ heading = tag.findChild('title').renderContents().replace("\n", "").replace("&amp;#160;", "").strip()
+ sen = Sentence_Data(1, source_article_id, source_section_id, 1, 1, heading)
+ par = Paragraph_Data(1, source_article_id, source_section_id, 1, [sen])
+ headingdata = Section_Data(1, source_article_id, source_section_id, [par])
- article_title = input.find("title").renderContents().replace("\n", "").strip()
-
- image_list = []
- imglist_tag = input.find(True, attrs={"id" : "imagelist"})
- if imglist_tag != None:
- for img in imglist_tag.findAll("image"):
- caption = img.findChild("alt")
- if caption != None:
- caption = caption.renderContents().replace("\n", "").strip()
+ if has_shortdesc:
+ section_data_list.insert(1,headingdata)
else:
- caption = ""
- image_list.append((img['href'], caption))
-
- data = Article_Data(article_id, article_id, article_title, "theme", section_data_list, image_list)
-
- return data
+ section_data_list.insert(0,headingdata)
+ paragraph_data_list = []
+ current_section_id = id
+
+ elif tag.name == "image":
+
+ if tag.parent.name == "p":
+ source_article_id = article_id
+ text = image_path + '/' + tag['href']
+ picture_data = Picture_Data(source_article_id, text,
+ tag['orig_href'])
+ sentence_data_list.insert(0, picture_data)
+
+ article_title = input.find("title").renderContents().replace("\n", "").strip()
-
- def get_dita_from_article(self, article):
- """
- This method takes as input an instance of the Article class.
- It calls the getData method of the article class to get the article_data representation of the article.
- It then constructs the corresponding DITA representation of the article.
- """
- workingDir = self.workingDir
- article_data = article.getData()
- output = BeautifulStoneSoup("<?xml version='1.0' encoding='utf-8'?><!DOCTYPE reference PUBLIC \"-//IBM//DTD DITA IBM Reference//EN\" \"ibm-reference.dtd\"><reference><title>%s</title><prolog></prolog></reference>" % article_data.article_title)
- current_ref = output.reference
- current_title = None
- for section in article_data.sections_data:
- #headings check
- if len(section.paragraphs_data) == 1 and len(section.paragraphs_data[0].sentences_data) == 1 and section.paragraphs_data[0].sentences_data[0].id == 1:
- paragraph = section.paragraphs_data[0]
- current_title = paragraph.sentences_data[0].text
- elif str(section.id).startswith("r"):
- reference_tag = self.tag_generator(output, "reference", attrs=[("id", section.id.replace("r", ""))])
- if current_title != None:
- reference_tag.append(self.tag_generator(output, "title", contents=current_title))
- current_title = None
- reference_tag.append(self.tag_generator(output, "refbody"))
- for paragraph in section.paragraphs_data:
- if paragraph.id == "shortdesc":
- paragraph_tag = self.tag_generator(output, "shortdesc")
- else:
- paragraph_tag = self.tag_generator(output, "p", attrs=[("id", str(paragraph.id))])
- for sentence in paragraph.sentences_data:
- ph_tag = self.tag_generator(output, "ph", attrs=[("id", str(sentence.id))], contents = sentence.text)
- paragraph_tag.append(ph_tag)
- reference_tag.refbody.append(paragraph_tag)
- output.reference.append(reference_tag)
- current_ref = reference_tag.refbody
+ image_list = []
+ imglist_tag = input.find(True, attrs={"id" : "imagelist"})
+ if imglist_tag != None:
+ for img in imglist_tag.findAll("image"):
+ caption = img.findChild("alt")
+ if caption != None:
+ caption = caption.renderContents().replace("\n", "").strip()
else:
- if section.id == "shortdesc":
- section_tag = self.tag_generator(output, "section", attrs=[("id", "shortdesc")])
- else:
- section_tag = self.tag_generator(output, "section", attrs=[("id", str(section.id).replace("s", ""))])
- if current_title != None:
- section_tag.append(self.tag_generator(output, "title", contents=current_title))
- current_title = None
- for paragraph in section.paragraphs_data:
- paragraph_tag = self.tag_generator(output, "p", attrs=[("id", str(paragraph.id))])
- for sentence in paragraph.sentences_data:
- if sentence.type == "sentence":
- ph_tag = self.tag_generator(output, "ph", attrs=[("id", str(sentence.id))], contents = sentence.text)
- paragraph_tag.append(ph_tag)
- elif sentence.type == "picture":
- # switch image to relative path
- image_tag = self.tag_generator(output, "image", attrs=[("href", sentence.text.replace(workingDir, ".."))])
- paragraph_tag.append(image_tag)
- else:
- logger.ebiug(sentence.type)
-
- section_tag.append(paragraph_tag)
- current_ref.append(section_tag)
+ caption = ""
+ image_list.append((img['href'], caption, img['orig_href']))
+
+ data = Article_Data(article_id, article_id, article_title, "theme", section_data_list, image_list)
+
+ return data
+
+
+def get_dita_from_article(image_path, article):
+ """
+ This method takes as input an instance of the Article class.
+ It calls the getData method of the article class to get the article_data representation of the article.
+ It then constructs the corresponding DITA representation of the article.
+ """
+ image_sources = {}
+ article_data = article.getData()
+ output = BeautifulStoneSoup("<?xml version='1.0' encoding='utf-8'?><!DOCTYPE reference PUBLIC \"-//IBM//DTD DITA IBM Reference//EN\" \"ibm-reference.dtd\"><reference><title>%s</title><prolog></prolog></reference>" % article_data.article_title)
+ current_ref = output.reference
+ current_title = None
+
+ for section in article_data.sections_data:
+ #headings check
+ if len(section.paragraphs_data) == 1 and len(section.paragraphs_data[0].sentences_data) == 1 and section.paragraphs_data[0].sentences_data[0].id == 1:
+ paragraph = section.paragraphs_data[0]
+ current_title = paragraph.sentences_data[0].text
+ elif str(section.id).startswith("r"):
+ reference_tag = _tag_generator(output, "reference", attrs=[("id", section.id.replace("r", ""))])
if current_title != None:
- current_ref.append('<section id="56756757"><p id="6875534"><ph id="65657657">%s</ph></p></section>' % current_title)
+ reference_tag.append(_tag_generator(output, "title", contents=current_title))
current_title = None
- if article_data.image_list != []:
- for unnecessary_tag in output.findAll(True, attrs={"id" : "imagelist"}):
- unnecessary_tag.extract()
- image_list = self.tag_generator(output, "reference", [("id", "imagelist")])
- output.reference.append(image_list)
- image_list_body = self.tag_generator(output, "refbody")
- image_list.append(image_list_body)
- for image in article_data.image_list:
- image_tag = self.tag_generator(output, "image", [("href", image[0])], "<alt>" + image[-1] + "</alt>")
- image_list_body.append(image_tag)
- dita = output.prettify()
- return dita
-
- def tag_generator(self, soup, name, attrs=[], contents=None):
- if attrs != []:
- new_tag = Tag(soup, name, attrs)
+ reference_tag.append(_tag_generator(output, "refbody"))
+ for paragraph in section.paragraphs_data:
+ if paragraph.id == "shortdesc":
+ paragraph_tag = _tag_generator(output, "shortdesc")
+ else:
+ paragraph_tag = _tag_generator(output, "p", attrs=[("id", str(paragraph.id))])
+ for sentence in paragraph.sentences_data:
+ ph_tag = _tag_generator(output, "ph", attrs=[("id", str(sentence.id))], contents = sentence.text)
+ paragraph_tag.append(ph_tag)
+ reference_tag.refbody.append(paragraph_tag)
+ output.reference.append(reference_tag)
+ current_ref = reference_tag.refbody
else:
- new_tag = Tag(soup, name)
- if contents != None:
- new_tag.insert(0, contents)
- return new_tag
-
+ if section.id == "shortdesc":
+ section_tag = _tag_generator(output, "section", attrs=[("id", "shortdesc")])
+ else:
+ section_tag = _tag_generator(output, "section", attrs=[("id", str(section.id).replace("s", ""))])
+ if current_title != None:
+ section_tag.append(_tag_generator(output, "title", contents=current_title))
+ current_title = None
+ for paragraph in section.paragraphs_data:
+ paragraph_tag = _tag_generator(output, "p", attrs=[("id", str(paragraph.id))])
+ for sentence in paragraph.sentences_data:
+ if sentence.type == "sentence":
+ ph_tag = _tag_generator(output, "ph", attrs=[("id", str(sentence.id))], contents = sentence.text)
+ paragraph_tag.append(ph_tag)
+ elif sentence.type == "picture":
+ # switch image to relative path
+ text = sentence.text.replace(image_path, '') \
+ .lstrip('/')
+ image_sources[text.split('/')[0]] = None
+ image_tag = _tag_generator(output,
+ "image", attrs=[("href", text),
+ ('orig_href', sentence.orig)])
+ paragraph_tag.append(image_tag)
+ else:
+ logger.ebiug(sentence.type)
+
+ section_tag.append(paragraph_tag)
+ current_ref.append(section_tag)
+ if current_title != None:
+ current_ref.append('<section id="56756757"><p id="6875534"><ph id="65657657">%s</ph></p></section>' % current_title)
+ current_title = None
+ if article_data.image_list != []:
+ for unnecessary_tag in output.findAll(True, attrs={"id" : "imagelist"}):
+ unnecessary_tag.extract()
+ image_list = _tag_generator(output, "reference", [("id", "imagelist")])
+ output.reference.append(image_list)
+ image_list_body = _tag_generator(output, "refbody")
+ image_list.append(image_list_body)
+ for image in article_data.image_list:
+ image_tag = _tag_generator(output, "image", [("href", image[0]), ("orig_href", image[2])], "<alt>" + image[-1] + "</alt>")
+ image_list_body.append(image_tag)
+ dita = output.prettify()
+
+ return (dita, image_sources)
+
+def _tag_generator(soup, name, attrs=[], contents=None):
+ if attrs != []:
+ new_tag = Tag(soup, name, attrs)
+ else:
+ new_tag = Tag(soup, name)
+ if contents != None:
+ new_tag.insert(0, contents)
+ return new_tag
diff --git a/book.py b/book.py
index c582dc6..e3ad208 100644
--- a/book.py
+++ b/book.py
@@ -27,13 +27,15 @@ from sugar.activity.activity import get_bundle_path, get_activity_root
import net
from Processing.Article.Article import Article
-from Processing.Article_Builder import Article_Builder
+from Processing import Article_Builder
logger = logging.getLogger('infoslicer')
wiki = None
custom = None
+image_root = os.path.join(get_activity_root(), 'data', 'book')
+
class Book(gobject.GObject):
__gsignals__ = {
'article-selected' : (SIGNAL_RUN_FIRST, None, [TYPE_PYOBJECT]),
@@ -58,7 +60,8 @@ class Book(gobject.GObject):
if entry:
content = self._load(entry['uid'])
if content:
- data = Article_Builder(self.root).get_article_from_dita(content)
+ data = Article_Builder.get_article_from_dita(image_root,
+ content)
self._article = Article(data)
else:
self._article = Article()
@@ -75,14 +78,8 @@ class Book(gobject.GObject):
# save current article
def sync_article(self):
- if not self._article:
- return
-
- self.find_by_uuid(self._article.uid)['title'] = \
- self._article.article_title
- contents = Article_Builder(self.root).get_dita_from_article(
- self._article)
- self._save(self._article.uid, contents)
+ # stub
+ pass
def create(self, title, content):
uid = str(uuid.uuid1())
@@ -154,7 +151,9 @@ class Book(gobject.GObject):
if not self.uid:
self.uid = str(uuid.uuid1())
self.revision = 1
- os.makedirs(self.root, 0775)
+
+ if not os.path.exists(self.root):
+ os.makedirs(self.root, 0775)
for i in preinstalled:
filepath = os.path.join(get_bundle_path(), 'examples', i[1])
@@ -213,8 +212,7 @@ class WikiBook(Book):
(_('Giraffe (from en.wikipedia.org)'), "giraffe-wikipedia.dita"),
(_('Zebra (from en.wikipedia.org)'), "zebra-wikipedia.dita") ]
- root = os.path.join(get_activity_root(), 'data', 'book')
- Book.__init__(self, PREINSTALLED, root)
+ Book.__init__(self, PREINSTALLED, image_root)
class CustomBook(Book):
def __init__(self, filepath=None):
@@ -247,3 +245,18 @@ class CustomBook(Book):
for i in files:
zip.write(os.path.join(root, i), os.path.join(relpath, i))
zip.close()
+
+ def sync_article(self):
+ if not self._article:
+ return
+
+ self.find_by_uuid(self._article.uid)['title'] = \
+ self._article.article_title
+
+ contents, image_sources = Article_Builder.get_dita_from_article(
+ image_root, self._article)
+
+ #for i in image_sources.keys():
+ # image_sources[i] = wiki[i].
+
+ self._save(self._article.uid, contents)
diff --git a/net.py b/net.py
index 8f43009..1d6728b 100644
--- a/net.py
+++ b/net.py
@@ -95,6 +95,7 @@ def image_handler(root, uid, document):
#change to relative paths:
if not fail:
image['href'] = os.path.join(dir_path.replace(os.path.join(root, ""), "", 1), image_title)
+ image['orig_href'] = path
else:
image.extract()