Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/Speak.activity
diff options
context:
space:
mode:
authorAleksey Lim <alsroot@member.fsf.org>2009-07-14 03:05:09 (GMT)
committer Aleksey Lim <alsroot@member.fsf.org>2009-07-14 03:05:09 (GMT)
commit1eeffe9e24f4f8be117f7775eae0f9388b08dc2f (patch)
tree44574cf5d2f7308d8b0e606e4c07bd2811a048b9 /Speak.activity
parent27509f29c0123a7ce137233f9b21b1a860c7937c (diff)
Simplify piple by using identify plugin; handle warnings in gst bus
Diffstat (limited to 'Speak.activity')
-rw-r--r--Speak.activity/espeak.py44
-rw-r--r--Speak.activity/espeak_cmd.py11
-rw-r--r--Speak.activity/espeak_gst.py14
-rw-r--r--Speak.activity/mouth.py13
4 files changed, 43 insertions, 39 deletions
diff --git a/Speak.activity/espeak.py b/Speak.activity/espeak.py
index 4076e3b..36614f1 100644
--- a/Speak.activity/espeak.py
+++ b/Speak.activity/espeak.py
@@ -21,7 +21,7 @@ logger = logging.getLogger('speak')
supported = True
-class AudioGrab(gobject.GObject):
+class BaseAudioGrab(gobject.GObject):
__gsignals__ = {
'new-buffer': (gobject.SIGNAL_RUN_FIRST, None, [gobject.TYPE_PYOBJECT])
}
@@ -48,31 +48,47 @@ class AudioGrab(gobject.GObject):
self.quiet = True
def make_pipeline(self, cmd):
+ if self.pipeline is not None:
+ self.stop_sound_device()
+ del self.pipeline
+
# build a pipeline that reads the given file
# and sends it to both the real audio output
# and a fake one that we use to draw from
- self.pipeline = gst.parse_launch(cmd)
-
- def on_buffer(element, buffer, pad):
+ self.pipeline = gst.parse_launch(
+ cmd + ' ' \
+ '! decodebin ' \
+ '! queue ' \
+ '! identity name=valve ' \
+ '! autoaudiosink')
+
+ def on_buffer(element, buffer):
# we got a new buffer of data, ask for another
gobject.timeout_add(100, self._new_buffer, str(buffer))
return True
- # make a fakesink to capture audio
- fakesink = gst.element_factory_make("fakesink", "fakesink")
- fakesink.connect("handoff",on_buffer)
- fakesink.set_property("signal-handoffs",True)
- self.pipeline.add(fakesink)
-
- # attach it to the pipeline
- conv = self.pipeline.get_by_name("conv")
- gst.element_link_many(conv, fakesink)
+ valve = self.pipeline.get_by_name('valve')
+ valve.props.signal_handoffs = True
+ valve.connect('handoff', on_buffer)
def gstmessage_cb(bus, message):
- if message.type in (gst.MESSAGE_EOS, gst.MESSAGE_ERROR):
+ self._was_message = True
+
+ if message.type == gst.MESSAGE_WARNING:
+ def check_after_warnings():
+ if not self._was_message:
+ self.stop_sound_device()
+ return True
+
+ logger.debug(message.type)
+ self._was_message = False
+ gobject.timeout_add(500, self._new_buffer, str(buffer))
+
+ elif message.type in (gst.MESSAGE_EOS, gst.MESSAGE_ERROR):
logger.debug(message.type)
self.stop_sound_device()
+ self._was_message = False
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', gstmessage_cb)
diff --git a/Speak.activity/espeak_cmd.py b/Speak.activity/espeak_cmd.py
index f391e5b..682bfcd 100644
--- a/Speak.activity/espeak_cmd.py
+++ b/Speak.activity/espeak_cmd.py
@@ -25,16 +25,9 @@ RATE_MAX = 99
PITCH_DEFAULT = PITCH_MAX/2
RATE_DEFAULT = RATE_MAX/3
-class AudioGrabCmd(espeak.AudioGrab):
+class AudioGrabCmd(espeak.BaseAudioGrab):
def speak(self, status, text):
- self.make_pipeline(
- 'filesrc name=file-source ' \
- '! decodebin ' \
- '! tee name=tee ' \
- 'tee.! audioconvert' \
- '! alsasink ' \
- 'tee.! queue ' \
- '! audioconvert name=conv')
+ self.make_pipeline('filesrc name=file-source')
# espeak uses 80 to 370
rate = 80 + (370-80) * int(status.rate) / 100
diff --git a/Speak.activity/espeak_gst.py b/Speak.activity/espeak_gst.py
index f12453d..85cfa26 100644
--- a/Speak.activity/espeak_gst.py
+++ b/Speak.activity/espeak_gst.py
@@ -23,19 +23,9 @@ RATE_MAX = 200
PITCH_DEFAULT = PITCH_MAX/2
RATE_DEFAULT = RATE_MAX/2
-class AudioGrabGst(espeak.AudioGrab):
+class AudioGrabGst(espeak.BaseAudioGrab):
def speak(self, status, text):
- self.stop_sound_device()
-
- self.make_pipeline(
- 'espeak name=espeak ' \
- '! wavenc ! decodebin ' \
- '! tee name=tee ' \
- 'tee.! audioconvert ' \
- '! alsasink ' \
- 'tee.! queue ' \
- '! audioconvert name=conv')
-
+ self.make_pipeline('espeak name=espeak ! wavenc')
src = self.pipeline.get_by_name('espeak')
pitch = int(status.pitch) - 100
diff --git a/Speak.activity/mouth.py b/Speak.activity/mouth.py
index 8b72f4b..b01155a 100644
--- a/Speak.activity/mouth.py
+++ b/Speak.activity/mouth.py
@@ -42,10 +42,15 @@ class Mouth(gtk.DrawingArea):
audioSource.connect("new-buffer", self._new_buffer)
def _new_buffer(self, obj, buf):
- self.newest_buffer = list(unpack( str(int(len(buf))/2)+'h' , buf))
- self.main_buffers += self.newest_buffer
- if(len(self.main_buffers)>self.buffer_size):
- del self.main_buffers[0:(len(self.main_buffers)-self.buffer_size)]
+ if len(buf) < 28:
+ self.newest_buffer = []
+ else:
+ self.newest_buffer = list(unpack( str(int(len(buf))/2)+'h' , buf))
+ self.main_buffers += self.newest_buffer
+ if(len(self.main_buffers)>self.buffer_size):
+ del self.main_buffers[0:(len(self.main_buffers)- \
+ self.buffer_size)]
+
self.queue_draw()
return True