Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorGonzalo Odiard <godiard@gmail.com>2012-01-31 19:08:15 (GMT)
committer Simon Schampijer <simon@schampijer.de>2012-02-02 11:49:53 (GMT)
commit450569da09b4e81cbe4f11f63fee47a024b14fad (patch)
tree59c116411a73ca4514b7ae0f7ce0d1d6298bc6d0 /src
parent448122189f9a009f8e4b88ca2f71f39fe1ac13f3 (diff)
Implementation of Text to Speech Feature [1]
The patch adds a device icon to the frame with controls to play/pause/stop any text that is selected by the user in the Sugar UI. The palette does also have options to adjust the pitch and rate of the spoken text. These settings are stored in the GConf backend, the two keys are added to the Sugar schema. As backend gstreamer-plugins-espeak is used [2], it has been in use in many activities already and is packaged for Fedora, it will be added as a dependency to Sugar. Signed-off-by: Gonzalo Odiard <gonzalo@laptop.org> Acked-by: Simon Schampijer <simon@laptop.org> [1] http://wiki.sugarlabs.org/go/Features/GlobalTextToSpeech [2] http://wiki.sugarlabs.org/go/Activity_Team/gst-plugins-espeak
Diffstat (limited to 'src')
-rw-r--r--src/jarabe/model/Makefile.am1
-rw-r--r--src/jarabe/model/speech.py230
-rw-r--r--src/jarabe/view/keyhandler.py29
3 files changed, 232 insertions, 28 deletions
diff --git a/src/jarabe/model/Makefile.am b/src/jarabe/model/Makefile.am
index 92e8712..2fc6b1c 100644
--- a/src/jarabe/model/Makefile.am
+++ b/src/jarabe/model/Makefile.am
@@ -16,4 +16,5 @@ sugar_PYTHON = \
screen.py \
session.py \
sound.py \
+ speech.py \
telepathyclient.py
diff --git a/src/jarabe/model/speech.py b/src/jarabe/model/speech.py
new file mode 100644
index 0000000..ffc108c
--- /dev/null
+++ b/src/jarabe/model/speech.py
@@ -0,0 +1,230 @@
+# Copyright (C) 2011 One Laptop Per Child
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+import logging
+
+import gconf
+import gst
+import gtk
+import gobject
+
+
+DEFAULT_PITCH = 0
+
+
+DEFAULT_RATE = 0
+
+_speech_manager = None
+
+
+class SpeechManager(gobject.GObject):
+
+ __gtype_name__ = 'SpeechManager'
+
+ __gsignals__ = {
+ 'play': (gobject.SIGNAL_RUN_FIRST, None, []),
+ 'pause': (gobject.SIGNAL_RUN_FIRST, None, []),
+ 'stop': (gobject.SIGNAL_RUN_FIRST, None, [])
+ }
+
+ MIN_PITCH = -100
+ MAX_PITCH = 100
+
+ MIN_RATE = -100
+ MAX_RATE = 100
+
+ def __init__(self, **kwargs):
+ gobject.GObject.__init__(self, **kwargs)
+ self._player = _GstSpeechPlayer()
+ self._player.connect('play', self._update_state, 'play')
+ self._player.connect('stop', self._update_state, 'stop')
+ self._player.connect('pause', self._update_state, 'pause')
+ self._voice_name = self._player.get_default_voice()
+ self._pitch = DEFAULT_PITCH
+ self._rate = DEFAULT_RATE
+ self._is_playing = False
+ self._is_paused = False
+ self.restore()
+
+ def _update_state(self, player, signal):
+ self._is_playing = (signal == 'play')
+ self._is_paused = (signal == 'pause')
+ self.emit(signal)
+
+ def get_is_playing(self):
+ return self._is_playing
+
+ is_playing = gobject.property(type=bool, getter=get_is_playing,
+ setter=None, default=False)
+
+ def get_is_paused(self):
+ return self._is_paused
+
+ is_paused = gobject.property(type=bool, getter=get_is_paused,
+ setter=None, default=False)
+
+ def get_pitch(self):
+ return self._pitch
+
+ def get_rate(self):
+ return self._rate
+
+ def set_pitch(self, pitch):
+ self._pitch = pitch
+ self.save()
+
+ def set_rate(self, rate):
+ self._rate = rate
+ self.save()
+
+ def say_text(self, text):
+ if text:
+ self._player.speak(self._pitch, self._rate, self._voice_name, text)
+
+ def say_selected_text(self):
+ clipboard = gtk.clipboard_get(selection='PRIMARY')
+ clipboard.request_text(self.__primary_selection_cb)
+
+ def pause(self):
+ self._player.pause_sound_device()
+
+ def restart(self):
+ self._player.restart_sound_device()
+
+ def stop(self):
+ self._player.stop_sound_device()
+
+ def __primary_selection_cb(self, clipboard, text, user_data):
+ self.say_text(text)
+
+ def save(self):
+ client = gconf.client_get_default()
+ client.set_int('/desktop/sugar/speech/pitch', self._pitch)
+ client.set_int('/desktop/sugar/speech/rate', self._rate)
+ logging.debug('saving speech configuration pitch %s rate %s',
+ self._pitch, self._rate)
+
+ def restore(self):
+ client = gconf.client_get_default()
+ self._pitch = client.get_int('/desktop/sugar/speech/pitch')
+ self._rate = client.get_int('/desktop/sugar/speech/rate')
+ logging.debug('loading speech configuration pitch %s rate %s',
+ self._pitch, self._rate)
+
+
+class _GstSpeechPlayer(gobject.GObject):
+
+ __gsignals__ = {
+ 'play': (gobject.SIGNAL_RUN_FIRST, None, []),
+ 'pause': (gobject.SIGNAL_RUN_FIRST, None, []),
+ 'stop': (gobject.SIGNAL_RUN_FIRST, None, [])
+ }
+
+ def __init__(self):
+ gobject.GObject.__init__(self)
+ self._pipeline = None
+
+ def restart_sound_device(self):
+ if self._pipeline is None:
+ logging.debug('Trying to restart not initialized sound device')
+ return
+
+ self._pipeline.set_state(gst.STATE_PLAYING)
+ self.emit('play')
+
+ def pause_sound_device(self):
+ if self._pipeline is None:
+ return
+
+ self._pipeline.set_state(gst.STATE_PAUSED)
+ self.emit('pause')
+
+ def stop_sound_device(self):
+ if self._pipeline is None:
+ return
+
+ self._pipeline.set_state(gst.STATE_NULL)
+ self.emit('stop')
+
+ def make_pipeline(self, command):
+ if self._pipeline is not None:
+ self.stop_sound_device()
+ del self._pipeline
+
+ self._pipeline = gst.parse_launch(command)
+
+ bus = self._pipeline.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message::element', self.__pipe_message_cb)
+
+ def __pipe_message_cb(self, bus, message):
+ if message.structure.get_name() == 'espeak-mark' and \
+ message.structure['mark'] == 'end':
+ self.emit('stop')
+
+ def speak(self, pitch, rate, voice_name, text):
+ # TODO workaround for http://bugs.sugarlabs.org/ticket/1801
+ if not [i for i in text if i.isalnum()]:
+ return
+ text = text + '<mark name="end>"></mark>'
+
+ self.make_pipeline('espeak name=espeak ! autoaudiosink')
+ src = self._pipeline.get_by_name('espeak')
+
+ src.props.text = text
+ src.props.pitch = pitch
+ src.props.rate = rate
+ src.props.voice = voice_name
+ src.props.track = 2 # track for marks
+
+ self.restart_sound_device()
+
+ def get_all_voices(self):
+ all_voices = {}
+ for voice in gst.element_factory_make('espeak').props.voices:
+ name, language, dialect = voice
+ if dialect != 'none':
+ all_voices[language + '_' + dialect] = name
+ else:
+ all_voices[language] = name
+ return all_voices
+
+ def get_default_voice(self):
+ """Try to figure out the default voice, from the current locale ($LANG)
+ Fall back to espeak's voice called Default."""
+ voices = self.get_all_voices()
+
+ locale = os.environ.get('LANG', '')
+ language_location = locale.split('.', 1)[0].lower()
+ language = language_location.split('_')[0]
+ # if the language is es but not es_es default to es_la (latin voice)
+ if language == 'es' and language_location != 'es_es':
+ language_location = 'es_la'
+
+ best = voices.get(language_location) or voices.get(language) \
+ or 'default'
+ logging.debug('Best voice for LANG %s seems to be %s',
+ locale, best)
+ return best
+
+
+def get_speech_manager():
+ global _speech_manager
+
+ if _speech_manager is None:
+ _speech_manager = SpeechManager()
+ return _speech_manager
diff --git a/src/jarabe/view/keyhandler.py b/src/jarabe/view/keyhandler.py
index d79bfe6..a71f260 100644
--- a/src/jarabe/view/keyhandler.py
+++ b/src/jarabe/view/keyhandler.py
@@ -60,13 +60,9 @@ _actions_table = {
'<alt><shift>f': 'frame',
'<alt><shift>q': 'quit_emulator',
'XF86Search': 'open_search',
- '<alt><shift>o': 'open_search',
- '<alt><shift>s': 'say_text',
+ '<alt><shift>o': 'open_search'
}
-SPEECH_DBUS_SERVICE = 'org.laptop.Speech'
-SPEECH_DBUS_PATH = '/org/laptop/Speech'
-SPEECH_DBUS_INTERFACE = 'org.laptop.Speech'
_instance = None
@@ -77,7 +73,6 @@ class KeyHandler(object):
self._key_pressed = None
self._keycode_pressed = 0
self._keystate_pressed = 0
- self._speech_proxy = None
self._key_grabber = KeyGrabber()
self._key_grabber.connect('key-pressed',
@@ -114,28 +109,6 @@ class KeyHandler(object):
sound.set_volume(volume)
sound.set_muted(volume == 0)
- def _get_speech_proxy(self):
- if self._speech_proxy is None:
- bus = dbus.SessionBus()
- speech_obj = bus.get_object(SPEECH_DBUS_SERVICE, SPEECH_DBUS_PATH,
- follow_name_owner_changes=True)
- self._speech_proxy = dbus.Interface(speech_obj,
- SPEECH_DBUS_INTERFACE)
- return self._speech_proxy
-
- def _on_speech_err(self, ex):
- logging.error('An error occurred with the ESpeak service: %r', ex)
-
- def _primary_selection_cb(self, clipboard, text, user_data):
- logging.debug('KeyHandler._primary_selection_cb: %r', text)
- if text:
- self._get_speech_proxy().SayText(text, reply_handler=lambda: None,
- error_handler=self._on_speech_err)
-
- def handle_say_text(self, event_time):
- clipboard = gtk.clipboard_get(selection='PRIMARY')
- clipboard.request_text(self._primary_selection_cb)
-
def handle_previous_window(self, event_time):
self._tabbing_handler.previous_activity(event_time)