Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/Speak.activity
diff options
context:
space:
mode:
authorAleksey Lim <alsroot@member.fsf.org>2009-02-21 12:49:31 (GMT)
committer Aleksey Lim <alsroot@member.fsf.org>2009-02-21 12:49:31 (GMT)
commitf0b74984b013ddf730b7b947318d7dd5826346e4 (patch)
treef4ee02bbbf62d4183d84237f808a213f3ef7344b /Speak.activity
parent5d52643ce4d2341200e390b910f89d45ca98e822 (diff)
Use espeak command when gst-plugins-espeak is not installed(Joshua Minor)
Diffstat (limited to 'Speak.activity')
-rw-r--r--Speak.activity/audio.py49
-rw-r--r--Speak.activity/chat.py5
-rw-r--r--Speak.activity/face.py4
-rw-r--r--Speak.activity/voice.py30
4 files changed, 75 insertions, 13 deletions
diff --git a/Speak.activity/audio.py b/Speak.activity/audio.py
index b667b4b..b7febdf 100644
--- a/Speak.activity/audio.py
+++ b/Speak.activity/audio.py
@@ -23,6 +23,7 @@
# This code is a stripped down version of the audio grabber from Measure
+import subprocess
import pygst
pygst.require("0.10")
import gst
@@ -45,7 +46,17 @@ class AudioGrab(gobject.GObject):
gobject.GObject.__init__(self)
self.pipeline = None
- def playfile(self, status, text):
+ def speak(self, status, text):
+ try:
+ self._speak(status, text)
+ except:
+ # espeak uses 80 to 370
+ rate = 80 + (370-80) * int(status.rate) / 100
+ wavpath = "/tmp/speak.wav"
+ subprocess.call(["espeak", "-w", wavpath, "-p", str(status.pitch), "-s", str(rate), "-v", status.voice.name, text], stdout=subprocess.PIPE)
+ self._playfile(wavpath)
+
+ def _speak(self, status, text):
pitch = int(status.pitch)
rate = int(status.rate)
# espeak uses 80 to 370
@@ -90,6 +101,42 @@ class AudioGrab(gobject.GObject):
# how do we detect when the sample has finished playing?
# we should stop the sound device and stop emitting buffers
# to save on CPU and battery usage when there is no audio playing
+
+
+ def _playfile(self, filename):
+ self.stop_sound_device()
+ self._quiet = False
+
+ # build a pipeline that reads the given file
+ # and sends it to both the real audio output
+ # and a fake one that we use to draw from
+ p = 'filesrc name=file-source ! decodebin ! tee name=tee tee.! audioconvert ! alsasink tee.! queue ! audioconvert name=conv'
+ self.pipeline = gst.parse_launch(p)
+
+ # make a fakesink to capture audio
+ fakesink = gst.element_factory_make("fakesink", "fakesink")
+ fakesink.connect("handoff",self.on_buffer)
+ fakesink.set_property("signal-handoffs",True)
+ self.pipeline.add(fakesink)
+
+ bus = self.pipeline.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', self._gstmessage_cb)
+
+ # attach it to the pipeline
+ conv = self.pipeline.get_by_name("conv")
+ gst.element_link_many(conv, fakesink)
+
+ # set the source file
+ self.pipeline.get_by_name("file-source").set_property('location', filename)
+
+ # play
+ self.restart_sound_device()
+
+ # how do we detect when the sample has finished playing?
+ # we should stop the sound device and stop emitting buffers
+ # to save on CPU and battery usage when there is no audio playing
+
def _gstmessage_cb(self, bus, message):
type = message.type
diff --git a/Speak.activity/chat.py b/Speak.activity/chat.py
index fca8ef8..0b132f1 100644
--- a/Speak.activity/chat.py
+++ b/Speak.activity/chat.py
@@ -160,8 +160,9 @@ class View(hippo.Canvas):
lang_box.props.text = status.voice.friendlyname
if text:
self._chat.add_text(buddy, text)
- if not self.quiet and self.props.window \
- and self.props.window.is_visible():
+ if not self.quiet:
+ # and self.props.window \
+ # and self.props.window.is_visible():
face.say(text)
def farewell(self, buddy):
diff --git a/Speak.activity/face.py b/Speak.activity/face.py
index 0763142..2947ff1 100644
--- a/Speak.activity/face.py
+++ b/Speak.activity/face.py
@@ -47,7 +47,7 @@ class Status:
def __init__(self):
self.voice = voice.defaultVoice()
self.pitch = PITCH_MAX/2
- self.rate = RATE_MAX/2
+ self.rate = RATE_MAX/3
self.eyes = [eye.Eye] * 2
self.mouth = mouth.Mouth
@@ -163,7 +163,7 @@ class View(gtk.EventBox):
#self._mouth.add_events(gtk.gdk.POINTER_MOTION_MASK)
def say(self, something):
- self._audio.playfile(self.status, something)
+ self._audio.speak(self.status, something)
def shut_up(self):
self._audio.stop_sound_device()
diff --git a/Speak.activity/voice.py b/Speak.activity/voice.py
index 9727fb0..256bbf0 100644
--- a/Speak.activity/voice.py
+++ b/Speak.activity/voice.py
@@ -22,6 +22,7 @@
# along with Speak.activity. If not, see <http://www.gnu.org/licenses/>.
+import subprocess
import pygst
pygst.require("0.10")
import gst
@@ -84,16 +85,29 @@ class Voice:
def allVoices():
if len(_allVoices) == 0:
- for i in gst.element_factory_make('espeak').props.voices:
- name, language = i.split(':')
- if name in ('en-rhotic','english_rp','english_wmids'):
- # these voices don't produce sound
- continue
- voice = Voice(language, name)
- _allVoices[voice.friendlyname] = voice
-
+ try:
+ for i in gst.element_factory_make('espeak').props.voices:
+ name, language = i.split(':')
+ if name in ('en-rhotic','english_rp','english_wmids'):
+ # these voices don't produce sound
+ continue
+ voice = Voice(language, name)
+ _allVoices[voice.friendlyname] = voice
+ except:
+ result = subprocess.Popen(["espeak", "--voices"], stdout=subprocess.PIPE).communicate()[0]
+ for line in result.split('\n'):
+ m = re.match(r'\s*\d+\s+([\w-]+)\s+([MF])\s+([\w_-]+)\s+(.+)', line)
+ if m:
+ language, gender, name, stuff = m.groups()
+ if stuff.startswith('mb/') or name in ('en-rhotic','english_rp','english_wmids'):
+ # these voices don't produce sound
+ continue
+ voice = Voice(language, name)
+ _allVoices[voice.friendlyname] = voice
return _allVoices
+
+
def defaultVoice():
"""Try to figure out the default voice, from the current locale ($LANG).
Fall back to espeak's voice called Default."""