Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/Speak.activity
diff options
context:
space:
mode:
authorAleksey Lim <alsroot@member.fsf.org>2009-02-01 18:47:11 (GMT)
committer Aleksey Lim <alsroot@member.fsf.org>2009-02-02 19:30:15 (GMT)
commit7a91e6c9cc7f37bab43572c43dd15cd3548fc4d2 (patch)
treeb7f5bdef588184b7c25a58e936bba27f15293292 /Speak.activity
parentd30dd27d2214652ff4c1f9c28bb92dc7640c60d5 (diff)
Move face related code to separate file
Diffstat (limited to 'Speak.activity')
-rwxr-xr-xSpeak.activity/activity.py141
-rw-r--r--Speak.activity/audio.py2
-rw-r--r--Speak.activity/face.py167
3 files changed, 206 insertions, 104 deletions
diff --git a/Speak.activity/activity.py b/Speak.activity/activity.py
index 7eaf401..94b94e6 100755
--- a/Speak.activity/activity.py
+++ b/Speak.activity/activity.py
@@ -50,13 +50,13 @@ import pygst
pygst.require("0.10")
import gst
-import audio
import eye
import glasses
import mouth
import fft_mouth
import waveform_mouth
import voice
+import face
class SpeakActivity(activity.Activity):
def __init__(self, handle):
@@ -64,28 +64,11 @@ class SpeakActivity(activity.Activity):
activity.Activity.__init__(self, handle)
bounds = self.get_allocation()
- self.synth = None
- # try:
- # self.synth = speechd.client.SSIPClient("Speak.activity")
- # try:
- # # Try some speechd v0.6.6 features
- # print "Output modules:", self.synth.list_output_modules()
- # print "Voices:", self.synth.list_synthesis_voices()
- # except:
- # pass
- # except:
- # self.synth = None
- # print "Falling back to espeak command line tool."
-
# pick a voice that espeak supports
self.voices = voice.allVoices()
- #print self.voices
- #self.voice = random.choice(self.voices.values())
- self.voice = voice.defaultVoice()
# make an audio device for playing back and rendering audio
self.connect( "notify::active", self._activeCb )
- self.audio = audio.AudioGrab(datastore, self._jobject)
# make a box to type into
self.entrycombo = gtk.combo_box_entry_new_text()
@@ -97,18 +80,12 @@ class SpeakActivity(activity.Activity):
self.input_font = pango.FontDescription(str='sans bold 24')
self.entry.modify_font(self.input_font)
- # make an empty box for some eyes
- self.eyes = None
- self.eyebox = gtk.HBox()
-
- # make an empty box to put the mouth in
- self.mouth = None
- self.mouthbox = gtk.HBox()
-
+ self.face = face.View()
+ self.face.show()
+
# layout the screen
box = gtk.VBox(homogeneous=False)
- box.pack_start(self.eyebox, expand=False)
- box.pack_start(self.mouthbox)
+ box.pack_start(self.face)
box.pack_start(self.entrycombo, expand=False)
self.set_canvas(box)
@@ -167,7 +144,7 @@ class SpeakActivity(activity.Activity):
# say hello to the user
presenceService = presenceservice.get_instance()
xoOwner = presenceService.get_owner()
- self.say(_("Hello %s. Type something.") % xoOwner.props.nick)
+ self.face.say(_("Hello %s. Type something.") % xoOwner.props.nick)
# XXX do it after(possible) read_file() invoking
# have to rely on calling read_file() from map_cb in sugar-toolkit
@@ -182,14 +159,12 @@ class SpeakActivity(activity.Activity):
self.numeyesadj.connect("value_changed", self.eyes_changed_cb, False)
self.eye_shape_combo.connect('changed', self.eyes_changed_cb, False)
self.eyes_changed_cb(None, True)
-
- # start with the eyes straight ahead
- map(lambda e: e.look_ahead(), self.eyes)
+ self.face.look_ahead()
def write_file(self, file_path):
f = open(file_path, "w")
f.write("speak file format v1\n")
- f.write("voice=%s\n" % quote(self.voice.friendlyname))
+ f.write("voice=%s\n" % quote(self.face.voice.friendlyname))
f.write("text=%s\n" % quote(self.entry.props.text))
history = map(lambda i: i[0], self.entrycombo.get_model())
f.write("history=[%s]\n" % ",".join(map(quote, history)))
@@ -266,7 +241,7 @@ class SpeakActivity(activity.Activity):
pos = layout.get_cursor_pos(index)
x = pos[0][0] / pango.SCALE - entry.props.scroll_offset
y = entry.get_allocation().y
- map(lambda e, x=x, y=y: e.look_at(x,y), self.eyes)
+ self.face.look_at(x, y)
def get_mouse(self):
display = gtk.gdk.display_get_default()
@@ -276,7 +251,7 @@ class SpeakActivity(activity.Activity):
def _mouse_moved_cb(self, widget, event):
# make the eyes track the motion of the mouse cursor
x,y = self.get_mouse()
- map(lambda e, x=x, y=y: e.look_at(x,y), self.eyes)
+ self.face.look_at(x, y)
def _mouse_clicked_cb(self, widget, event):
pass
@@ -295,17 +270,14 @@ class SpeakActivity(activity.Activity):
voicenames.sort()
for name in voicenames:
self.voice_combo.append_item(self.voices[name], name)
- self.voice_combo.set_active(voicenames.index(self.voice.friendlyname))
+ self.voice_combo.set_active(voicenames.index(
+ self.face.voice.friendlyname))
combotool = ToolComboBox(self.voice_combo)
voicebar.insert(combotool, -1)
combotool.show()
- if self.synth is not None:
- # speechd uses -100 to 100
- self.pitchadj = gtk.Adjustment(0, -100, 100, 1, 10, 0)
- else:
- # espeak uses 0 to 99
- self.pitchadj = gtk.Adjustment(50, 0, 99, 1, 10, 0)
+ self.pitchadj = gtk.Adjustment(self.face.pitch, 0, face.PITCH_MAX, 1,
+ face.PITCH_MAX/10, 0)
pitchbar = gtk.HScale(self.pitchadj)
pitchbar.set_draw_value(False)
#pitchbar.set_inverted(True)
@@ -317,12 +289,8 @@ class SpeakActivity(activity.Activity):
voicebar.insert(pitchtool, -1)
pitchbar.show()
- if self.synth is not None:
- # speechd uses -100 to 100
- self.rateadj = gtk.Adjustment(0, -100, 100, 1, 10, 0)
- else:
- # espeak uses 80 to 370
- self.rateadj = gtk.Adjustment(100, 80, 370, 1, 10, 0)
+ self.rateadj = gtk.Adjustment(self.face.rate, 0, face.RATE_MAX, 1,
+ face.RATE_MAX/10, 0)
ratebar = gtk.HScale(self.rateadj)
ratebar.set_draw_value(False)
#ratebar.set_inverted(True)
@@ -337,14 +305,16 @@ class SpeakActivity(activity.Activity):
return voicebar
def voice_changed_cb(self, combo):
- self.voice = combo.props.value
- self.say(self.voice.friendlyname)
+ self.face.voice = combo.props.value
+ self.face.say(self.face.voice.friendlyname)
def pitch_adjusted_cb(self, get, data=None):
- self.say(_("pitch adjusted"))
+ self.face.pitch = get.value
+ self.face.say(_("pitch adjusted"))
def rate_adjusted_cb(self, get, data=None):
- self.say(_("rate adjusted"))
+ self.face.rate = get.value
+ self.face.say(_("rate adjusted"))
def make_face_bar(self):
facebar = gtk.Toolbar()
@@ -383,39 +353,22 @@ class SpeakActivity(activity.Activity):
return facebar
def mouth_changed_cb(self, combo, quiet):
- mouth_class = combo.props.value
- if self.mouth:
- self.mouthbox.remove(self.mouth)
- self.mouth = mouth_class(self.audio)
- self.mouthbox.add(self.mouth)
- self.mouth.show()
- # enable mouse move events so we can track the eyes while the mouse is over the mouth
- self.mouth.add_events(gtk.gdk.POINTER_MOTION_MASK)
- # this SegFaults: self.say(combo.get_active_text())
+ self.face.implant_mouth(combo.props.value)
+
+ # this SegFaults: self.face.say(combo.get_active_text())
if not quiet:
- self.say(_("mouth changed"))
+ self.face.say(_("mouth changed"))
def eyes_changed_cb(self, ignored, quiet):
if self.numeyesadj is None:
return
-
- eye_class = self.eye_shape_combo.props.value
- if self.eyes:
- for eye in self.eyes:
- self.eyebox.remove(eye)
-
- self.eyes = []
- numberOfEyes = int(self.numeyesadj.value)
- for i in range(numberOfEyes):
- eye = eye_class()
- self.eyes.append(eye)
- self.eyebox.pack_start(eye)
- eye.set_size_request(300,300)
- eye.show()
-
- # this SegFaults: self.say(self.eye_shape_combo.get_active_text())
+
+ self.face.implant_eyes(self.eye_shape_combo.props.value,
+ self.numeyesadj.value)
+
+ # this SegFaults: self.face.say(self.eye_shape_combo.get_active_text())
if not quiet:
- self.say(_("eyes changed"))
+ self.face.say(_("eyes changed"))
def _combo_changed_cb(self, combo):
# when a new item is chosen, make sure the text is selected
@@ -446,11 +399,10 @@ class SpeakActivity(activity.Activity):
# the user pressed Return, say the text and clear it out
text = entry.props.text
if text:
- # look ahead
- map(lambda e: e.look_ahead(), self.eyes)
+ self.face.look_ahead()
# speak the text
- self.say(text)
+ self.face.say(text)
# add this text to our history unless it is the same as the last item
history = self.entrycombo.get_model()
@@ -467,32 +419,15 @@ class SpeakActivity(activity.Activity):
def _synth_cb(self, callback_type, index_mark=None):
print "synth callback:", callback_type, index_mark
- def say(self, something):
- if self.audio is None:
- return
-
- print self.voice.name, ":", something
-
- if self.synth is not None:
- self.synth.set_rate(int(self.rateadj.value))
- self.synth.set_pitch(int(self.pitchadj.value))
- self.synth.set_language(self.voice.language)
- self.synth.speak(something) #, callback=self._synth_cb)
- else:
- # ideally we would stream the audio instead of writing to disk each time...
- wavpath = "/tmp/speak.wav"
- subprocess.call(["espeak", "-w", wavpath, "-p", str(self.pitchadj.value), "-s", str(self.rateadj.value), "-v", self.voice.name, something], stdout=subprocess.PIPE)
- self.audio.playfile(wavpath)
-
def _activeCb( self, widget, pspec ):
# only generate sound when this activity is active
if not self.props.active:
- self.audio.stop_sound_device()
+ self.face.quiet()
else:
- self.audio.restart_sound_device()
+ self.face.verbose()
- def on_quit(self, data=None):
- self.audio.on_quit()
+ #def on_quit(self, data=None):
+ # self.audio.on_quit()
# activate gtk threads when this module loads
gtk.gdk.threads_init()
diff --git a/Speak.activity/audio.py b/Speak.activity/audio.py
index 9b37edc..1176fb5 100644
--- a/Speak.activity/audio.py
+++ b/Speak.activity/audio.py
@@ -39,7 +39,7 @@ class AudioGrab(gobject.GObject):
'new-buffer': (gobject.SIGNAL_RUN_FIRST, None, [gobject.TYPE_PYOBJECT])
}
- def __init__(self, datastore, _jobject):
+ def __init__(self):
gobject.GObject.__init__(self)
self.pipeline = None
diff --git a/Speak.activity/face.py b/Speak.activity/face.py
new file mode 100644
index 0000000..bf4a02e
--- /dev/null
+++ b/Speak.activity/face.py
@@ -0,0 +1,167 @@
+# Speak.activity
+# A simple front end to the espeak text-to-speech engine on the XO laptop
+# http://wiki.laptop.org/go/Speak
+#
+# Copyright (C) 2008 Joshua Minor
+# This file is part of Speak.activity
+#
+# Parts of Speak.activity are based on code from Measure.activity
+# Copyright (C) 2007 Arjun Sarwal - arjun@laptop.org
+#
+# Speak.activity is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Speak.activity is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Speak.activity. If not, see <http://www.gnu.org/licenses/>.
+
+
+import sys
+import os
+from urllib import (quote, unquote)
+import subprocess
+import random
+from sugar.activity import activity
+from sugar.datastore import datastore
+from sugar.presence import presenceservice
+import logging
+import gtk
+import gobject
+import pango
+from gettext import gettext as _
+
+# try:
+# sys.path.append('/usr/lib/python2.4/site-packages') # for speechd
+# import speechd.client
+# except:
+# print "Speech-dispatcher not found."
+
+from sugar.graphics.toolbutton import ToolButton
+from sugar.graphics.toolcombobox import ToolComboBox
+from sugar.graphics.combobox import ComboBox
+
+import pygst
+pygst.require("0.10")
+import gst
+
+import audio
+import eye
+import glasses
+import mouth
+import voice
+import fft_mouth
+import waveform_mouth
+
+PITCH_MAX = 100
+RATE_MAX = 100
+
+class View(gtk.VBox):
+ def __init__(self):
+ gtk.VBox.__init__(self, homogeneous=False)
+
+ #self.voice = random.choice(self.voices.values())
+ self.voice = voice.defaultVoice()
+ self.pitch = PITCH_MAX/2
+ self.rate = RATE_MAX/2
+
+ self._audio = audio.AudioGrab()
+ self._synth = None
+ # try:
+ # self._synth = speechd.client.SSIPClient("Speak.activity")
+ # try:
+ # # Try some speechd v0.6.6 features
+ # print "Output modules:", self._synth.list_output_modules()
+ # print "Voices:", self._synth.list_synthesis_voices()
+ # except:
+ # pass
+ # except:
+ # self._synth = None
+ # print "Falling back to espeak command line tool."
+
+ # make an empty box for some eyes
+ self._eyes = None
+ self._eyebox = gtk.HBox()
+ self._eyebox.show()
+
+ # make an empty box to put the mouth in
+ self._mouth = None
+ self._mouthbox = gtk.HBox()
+ self._mouthbox.show()
+
+ # layout the screen
+ self.pack_start(self._eyebox, expand=False)
+ self.pack_start(self._mouthbox)
+
+ def look_ahead(self):
+ map(lambda e: e.look_ahead(), self._eyes)
+
+ def look_at(self, x, y):
+ map(lambda e, x=x, y=y: e.look_at(x,y), self._eyes)
+
+ def implant_eyes(self, klass, number):
+ if self._eyes:
+ for eye in self._eyes:
+ self._eyebox.remove(eye)
+
+ self._eyes = []
+
+ for i in range(int(number)):
+ eye = klass()
+ self._eyes.append(eye)
+ self._eyebox.pack_start(eye)
+ eye.set_size_request(300,300)
+ eye.show()
+
+ def implant_mouth(self, klass):
+ if self._mouth:
+ self._mouthbox.remove(self._mouth)
+
+ self._mouth = klass(self._audio)
+ self._mouth.show()
+ self._mouthbox.add(self._mouth)
+
+ # enable mouse move events so we can track the eyes while the mouse is over the mouth
+ #self._mouth.add_events(gtk.gdk.POINTER_MOTION_MASK)
+
+ def say(self, something):
+ if self._audio is None:
+ return
+
+ logging.debug('%s: %s' % (self.voice.name, something))
+ pitch = int(self.pitch)
+ rate = int(self.rate)
+
+ if self._synth is not None:
+ # speechd uses -100 to 100
+ pitch = pitch*2 - 100
+ # speechd uses -100 to 100
+ rate = rate*2 - 100
+
+ self._synth.set_rate(rate)
+ self._synth.set_pitch(pitch)
+ self._synth.set_language(self.voice.language)
+ self._synth.speak(something) #, callback=self._synth_cb)
+ else:
+ # espeak uses 0 to 99
+ pitch = pitch
+ # espeak uses 80 to 370
+ rate = 80 + (370-80) * rate / 100
+
+ logging.error(">> %d:%d" %(pitch, rate))
+
+ # ideally we would stream the audio instead of writing to disk each time...
+ wavpath = "/tmp/speak.wav"
+ subprocess.call(["espeak", "-w", wavpath, "-p", str(pitch), "-s", str(rate), "-v", self.voice.name, something], stdout=subprocess.PIPE)
+ self._audio.playfile(wavpath)
+
+ def quiet(self):
+ self._audio.stop_sound_device()
+
+ def verbose(self):
+ self._audio.restart_sound_device()