From f6eaafd6f873187fcf5ff41478f15aea6281775e Mon Sep 17 00:00:00 2001 From: flavio Date: Tue, 31 Jul 2012 01:15:42 +0000 Subject: Returning to Gstreamer 1.0 - Redefining Pipeline Elements --- diff --git a/espeak.py b/espeak.py index 7a2b888..8743dc4 100644 --- a/espeak.py +++ b/espeak.py @@ -14,7 +14,10 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -import gst +import gi +gi.require_version('Gst', '1.0') + +from gi.repository import Gst from gi.repository import GObject import subprocess @@ -23,91 +26,85 @@ logger = logging.getLogger('speak') supported = True +GObject.threads_init() +Gst.init(None) class BaseAudioGrab(GObject.GObject): __gsignals__ = { - 'new-buffer': (GObject.SIGNAL_RUN_FIRST, None, [GObject.TYPE_PYOBJECT]) - } + 'new-buffer': (GObject.SIGNAL_RUN_FIRST, + None, [GObject.TYPE_PYOBJECT])} def __init__(self): GObject.GObject.__init__(self) self.pipeline = None - self.quiet = True - + self.handle1 = None + self.handle2 = None + def restart_sound_device(self): - self.quiet = False - - self.pipeline.set_state(gst.STATE_NULL) - self.pipeline.set_state(gst.STATE_PLAYING) + self.pipeline.set_state(Gst.State.NULL) + self.pipeline.set_state(Gst.State.PLAYING) def stop_sound_device(self): if self.pipeline is None: return - - self.pipeline.set_state(gst.STATE_NULL) - # Shut theirs mouths down + self.pipeline.set_state(Gst.State.NULL) self._new_buffer('') - self.quiet = True - - def make_pipeline(self, cmd): + def make_pipeline(self, wavpath): if self.pipeline is not None: self.stop_sound_device() del self.pipeline - # build a pipeline that reads the given file - # and sends it to both the real audio output - # and a fake one that we use to draw from - self.pipeline = gst.parse_launch( - cmd + ' ' \ - '! decodebin ' \ - '! tee name=tee ' \ - 'tee.! audioconvert ' \ - '! alsasink ' \ - 'tee.! queue ' \ - '! audioconvert ! fakesink name=sink') - + self.pipeline = Gst.Pipeline() + self.player = Gst.ElementFactory.make("playbin", "espeak") + self.pipeline.add(self.player) + self.player.set_property("uri", Gst.filename_to_uri(wavpath)) + self.pipeline.set_state(Gst.State.PLAYING) + def on_buffer(element, buffer, pad): - # we got a new buffer of data, ask for another - GObject.timeout_add(100, self._new_buffer, str(buffer)) + if self.andle1: + GObject.source_remove(self.self.andle1) + self.andle1 = GObject.timeout_add(100, + self._new_buffer, str(buffer)) return True - + sink = self.pipeline.get_by_name('sink') - sink.props.signal_handoffs = True - sink.connect('handoff', on_buffer) - + def gstmessage_cb(bus, message): self._was_message = True - - if message.type == gst.MESSAGE_WARNING: + + if message.type == Gst.MessageType.WARNING: def check_after_warnings(): if not self._was_message: self.stop_sound_device() return True - + logger.debug(message.type) self._was_message = False - GObject.timeout_add(500, self._new_buffer, str(buffer)) - - elif message.type in (gst.MESSAGE_EOS, gst.MESSAGE_ERROR): + if self.andle2: + GObject.source_remove(self.self.andle2) + self.andle2 = GObject.timeout_add(500, + self._new_buffer, str(buffer)) + + elif message.type in (Gst.MessageType.EOS, Gst.MessageType.ERROR): logger.debug(message.type) self.stop_sound_device() - + self._was_message = False bus = self.pipeline.get_bus() bus.add_signal_watch() bus.connect('message', gstmessage_cb) - + def _new_buffer(self, buf): - if not self.quiet: - # pass captured audio to anyone who is interested - self.emit("new-buffer", buf) + self.emit("new-buffer", buf) return False # load proper espeak plugin try: - import gst - gst.element_factory_make('espeak') + import gi + gi.require_version('Gst', '1.0') + from gi.repository import Gst + Gst.element_factory_make('espeak', 'espeak') from espeak_gst import AudioGrabGst as AudioGrab from espeak_gst import * logger.info('use gst-plugins-espeak') diff --git a/espeak_cmd.py b/espeak_cmd.py index f074207..e119662 100644 --- a/espeak_cmd.py +++ b/espeak_cmd.py @@ -27,9 +27,8 @@ RATE_MAX = 99 class AudioGrabCmd(espeak.BaseAudioGrab): + def speak(self, status, text): - self.make_pipeline('filesrc name=file-source') - # 175 is default value, min is 80 rate = 60 + int(((175 - 80) * 2) * status.rate / RATE_MAX) wavpath = "/tmp/speak.wav" @@ -37,12 +36,11 @@ class AudioGrabCmd(espeak.BaseAudioGrab): subprocess.call(["espeak", "-w", wavpath, "-p", str(status.pitch), "-s", str(rate), "-v", status.voice.name, text], stdout=subprocess.PIPE) - + self.stop_sound_device() - - # set the source file - self.pipeline.get_by_name("file-source").props.location = wavpath - + + self.make_pipeline(wavpath) + # play self.restart_sound_device() diff --git a/espeak_gst.py b/espeak_gst.py index 4da4f9d..9f159c5 100644 --- a/espeak_gst.py +++ b/espeak_gst.py @@ -17,7 +17,9 @@ import logging logger = logging.getLogger('speak') -import gst +import gi +gi.require_version('Gst', '1.0') +from gi.repository import Gst import espeak PITCH_MAX = 200 @@ -26,32 +28,31 @@ RATE_MAX = 200 class AudioGrabGst(espeak.BaseAudioGrab): def speak(self, status, text): - # XXX workaround for http://bugs.sugarlabs.org/ticket/1801 if not [i for i in unicode(text, 'utf-8', errors='ignore') \ if i.isalnum()]: return - + self.make_pipeline('espeak name=espeak ! wavenc') src = self.pipeline.get_by_name('espeak') - + pitch = int(status.pitch) - 120 rate = int(status.rate) - 120 - + logger.debug('pitch=%d rate=%d voice=%s text=%s' % (pitch, rate, status.voice.name, text)) - + src.props.text = text src.props.pitch = pitch src.props.rate = rate src.props.voice = status.voice.name - + self.restart_sound_device() def voices(): out = [] - for i in gst.element_factory_make('espeak').props.voices: + for i in Gst.element_factory_make('espeak').props.voices: name, language, dialect = i #if name in ('en-rhotic','english_rp','english_wmids'): # these voices don't produce sound -- cgit v0.9.1