Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/speech_gst.py
diff options
context:
space:
mode:
authorGonzalo Odiard <godiard@gmail.com>2012-05-02 18:34:29 (GMT)
committer Gonzalo Odiard <godiard@gmail.com>2012-05-02 20:03:48 (GMT)
commitfa52f2684479170e3c8c988ca82f6d4f70ec8c77 (patch)
treee30a6191a1359ccd8874aa524074a636967ea71d /speech_gst.py
parent628b626b39d7388be2d2c552df18f2b429ad9c02 (diff)
Add text to speech functionality to Write - SL #3266
Ass discussed with the Learning Team, Write need a inmediate access to Text to Speech, the global tts feature is too indirect. Signed-off-by: Gonzalo Odiard <gonzalo@laptop.org>
Diffstat (limited to 'speech_gst.py')
-rw-r--r--speech_gst.py110
1 files changed, 110 insertions, 0 deletions
diff --git a/speech_gst.py b/speech_gst.py
new file mode 100644
index 0000000..b827ad9
--- /dev/null
+++ b/speech_gst.py
@@ -0,0 +1,110 @@
+# Copyright (C) 2009 Aleksey S. Lim
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+import gst
+import logging
+
+import speech
+
+_logger = logging.getLogger('read-etexts-activity')
+
+
+def get_all_voices():
+ all_voices = {}
+ for voice in gst.element_factory_make('espeak').props.voices:
+ name, language, dialect = voice
+ if dialect != 'none':
+ all_voices[language + '_' + dialect] = name
+ else:
+ all_voices[language] = name
+ return all_voices
+
+
+def _message_cb(bus, message, pipe):
+ if message.type == gst.MESSAGE_EOS:
+ pipe.set_state(gst.STATE_NULL)
+ if speech.end_text_cb != None:
+ speech.end_text_cb()
+ if message.type == gst.MESSAGE_ERROR:
+ pipe.set_state(gst.STATE_NULL)
+ if pipe is play_speaker[1]:
+ speech.reset_cb()
+ elif message.type == gst.MESSAGE_ELEMENT and \
+ message.structure.get_name() == 'espeak-mark':
+ mark = message.structure['mark']
+ speech.highlight_cb(int(mark))
+
+
+def _create_pipe():
+ pipe = gst.Pipeline('pipeline')
+
+ source = gst.element_factory_make('espeak', 'source')
+ pipe.add(source)
+
+ sink = gst.element_factory_make('autoaudiosink', 'sink')
+ pipe.add(sink)
+ source.link(sink)
+
+ bus = pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', _message_cb, pipe)
+
+ return (source, pipe)
+
+
+def _speech(speaker, words):
+ speaker[0].props.pitch = speech.pitch
+ speaker[0].props.rate = speech.rate
+ speaker[0].props.voice = speech.voice[1]
+ speaker[0].props.text = words
+ speaker[1].set_state(gst.STATE_NULL)
+ speaker[1].set_state(gst.STATE_PLAYING)
+
+
+info_speaker = _create_pipe()
+play_speaker = _create_pipe()
+play_speaker[0].props.track = 2
+
+
+def voices():
+ return info_speaker[0].props.voices
+
+
+def say(words):
+ _speech(info_speaker, words)
+
+
+def play(words):
+ _speech(play_speaker, words)
+
+
+def pause():
+ play_speaker[1].set_state(gst.STATE_PAUSED)
+
+
+def continue_play():
+ play_speaker[1].set_state(gst.STATE_PLAYING)
+
+
+def is_stopped():
+ for i in play_speaker[1].get_state():
+ if isinstance(i, gst.State) and i == gst.STATE_NULL:
+ return True
+ return False
+
+
+def stop():
+ play_speaker[1].set_state(gst.STATE_NULL)