Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorflavio <fdanesse@gmail.com>2012-07-31 14:53:37 (GMT)
committer flavio <fdanesse@gmail.com>2012-07-31 14:53:37 (GMT)
commit37cc8ed1be82569982626f757be6eaaca83c8cf6 (patch)
tree9c59d7479d4a84912c184eba21c96501bba89205
parentbdee0fb30a0e4d9da08fc7481e20070a3ce7ac0a (diff)
Modularización y correcciones en gstreamer
-rw-r--r--espeak.py55
-rw-r--r--espeak_cmd.py64
-rw-r--r--espeak_gst.py62
-rw-r--r--eye.py6
-rw-r--r--face.py3
-rw-r--r--mouth.py20
6 files changed, 55 insertions, 155 deletions
diff --git a/espeak.py b/espeak.py
index 8743dc4..524dee9 100644
--- a/espeak.py
+++ b/espeak.py
@@ -22,9 +22,12 @@ from gi.repository import GObject
import subprocess
import logging
+import re
+
logger = logging.getLogger('speak')
-supported = True
+PITCH_MAX = 200
+RATE_MAX = 200
GObject.threads_init()
Gst.init(None)
@@ -40,6 +43,22 @@ class BaseAudioGrab(GObject.GObject):
self.handle1 = None
self.handle2 = None
+ def speak(self, status, text):
+ # 175 is default value, min is 80
+ rate = 60 + int(((175 - 80) * 2) * status.rate / RATE_MAX)
+ wavpath = "/tmp/speak.wav"
+
+ subprocess.call(["espeak", "-w", wavpath, "-p", str(status.pitch),
+ "-s", str(rate), "-v", status.voice.name, text],
+ stdout=subprocess.PIPE)
+
+ self.stop_sound_device()
+
+ self.make_pipeline(wavpath)
+
+ # play
+ self.restart_sound_device()
+
def restart_sound_device(self):
self.pipeline.set_state(Gst.State.NULL)
self.pipeline.set_state(Gst.State.PLAYING)
@@ -98,21 +117,21 @@ class BaseAudioGrab(GObject.GObject):
def _new_buffer(self, buf):
self.emit("new-buffer", buf)
return False
+
+def voices():
+ out = []
+ result = subprocess.Popen(["espeak", "--voices"],
+ stdout=subprocess.PIPE).communicate()[0]
+
+ for line in result.split('\n'):
+ m = re.match(r'\s*\d+\s+([\w-]+)\s+([MF])\s+([\w_-]+)\s+(.+)', line)
+ if not m:
+ continue
+ language, gender, name, stuff = m.groups()
+ if stuff.startswith('mb/'): # or \
+ #name in ('en-rhotic','english_rp','english_wmids'):
+ # these voices don't produce sound
+ continue
+ out.append((language, name))
-# load proper espeak plugin
-try:
- import gi
- gi.require_version('Gst', '1.0')
- from gi.repository import Gst
- Gst.element_factory_make('espeak', 'espeak')
- from espeak_gst import AudioGrabGst as AudioGrab
- from espeak_gst import *
- logger.info('use gst-plugins-espeak')
-except Exception, e:
- logger.info('disable gst-plugins-espeak: %s' % e)
- if subprocess.call('which espeak', shell=True) == 0:
- from espeak_cmd import AudioGrabCmd as AudioGrab
- from espeak_cmd import *
- else:
- logger.info('disable espeak_cmd')
- supported = False
+ return out
diff --git a/espeak_cmd.py b/espeak_cmd.py
deleted file mode 100644
index e119662..0000000
--- a/espeak_cmd.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright (C) 2009, Aleksey Lim
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
-import re
-import subprocess
-
-import logging
-logger = logging.getLogger('speak')
-
-import espeak
-
-PITCH_MAX = 99
-RATE_MAX = 99
-
-
-class AudioGrabCmd(espeak.BaseAudioGrab):
-
- def speak(self, status, text):
- # 175 is default value, min is 80
- rate = 60 + int(((175 - 80) * 2) * status.rate / RATE_MAX)
- wavpath = "/tmp/speak.wav"
-
- subprocess.call(["espeak", "-w", wavpath, "-p", str(status.pitch),
- "-s", str(rate), "-v", status.voice.name, text],
- stdout=subprocess.PIPE)
-
- self.stop_sound_device()
-
- self.make_pipeline(wavpath)
-
- # play
- self.restart_sound_device()
-
-
-def voices():
- out = []
- result = subprocess.Popen(["espeak", "--voices"], stdout=subprocess.PIPE) \
- .communicate()[0]
-
- for line in result.split('\n'):
- m = re.match(r'\s*\d+\s+([\w-]+)\s+([MF])\s+([\w_-]+)\s+(.+)', line)
- if not m:
- continue
- language, gender, name, stuff = m.groups()
- if stuff.startswith('mb/'): # or \
- #name in ('en-rhotic','english_rp','english_wmids'):
- # these voices don't produce sound
- continue
- out.append((language, name))
-
- return out
diff --git a/espeak_gst.py b/espeak_gst.py
deleted file mode 100644
index 9f159c5..0000000
--- a/espeak_gst.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (C) 2009, Aleksey Lim
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
-import logging
-logger = logging.getLogger('speak')
-
-import gi
-gi.require_version('Gst', '1.0')
-from gi.repository import Gst
-import espeak
-
-PITCH_MAX = 200
-RATE_MAX = 200
-
-
-class AudioGrabGst(espeak.BaseAudioGrab):
- def speak(self, status, text):
- if not [i for i in unicode(text, 'utf-8', errors='ignore') \
- if i.isalnum()]:
- return
-
- self.make_pipeline('espeak name=espeak ! wavenc')
- src = self.pipeline.get_by_name('espeak')
-
- pitch = int(status.pitch) - 120
- rate = int(status.rate) - 120
-
- logger.debug('pitch=%d rate=%d voice=%s text=%s' % (pitch, rate,
- status.voice.name, text))
-
- src.props.text = text
- src.props.pitch = pitch
- src.props.rate = rate
- src.props.voice = status.voice.name
-
- self.restart_sound_device()
-
-
-def voices():
- out = []
-
- for i in Gst.element_factory_make('espeak').props.voices:
- name, language, dialect = i
- #if name in ('en-rhotic','english_rp','english_wmids'):
- # these voices don't produce sound
- # continue
- out.append((language, name))
-
- return out
diff --git a/eye.py b/eye.py
index 7374f92..66405f6 100644
--- a/eye.py
+++ b/eye.py
@@ -139,10 +139,10 @@ class Glasses(Eye):
Eye.__init__(self, fill_color)
self.show_all()
- self.connect('draw', self.do_draw)
+ self.connect('draw', self.draw_glass)
- def do_draw(self, widget, context):
- rect = self.get_allocation()
+ def draw_glass(self, widget, context):
+ rect = widget.get_allocation()
eyeSize = min(rect.width, rect.height)
outlineWidth = eyeSize / 20.0
diff --git a/face.py b/face.py
index 6e208a5..37a6f97 100644
--- a/face.py
+++ b/face.py
@@ -30,6 +30,7 @@ from gi.repository import Gdk
import sugar3.graphics.style as style
import espeak
+from espeak import BaseAudioGrab
import eye
from eye import Eye
from eye import Glasses
@@ -104,7 +105,7 @@ class View(Gtk.EventBox):
self.fill_color = fill_color
self.modify_bg(0, self.fill_color.get_gdk_color())
- self._audio = espeak.AudioGrab()
+ self._audio = BaseAudioGrab()
self._eyes = []
self._eyebox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
diff --git a/mouth.py b/mouth.py
index 136ec7b..47a021b 100644
--- a/mouth.py
+++ b/mouth.py
@@ -48,6 +48,7 @@ class Mouth(Gtk.DrawingArea):
self.fill_color = fill_color
self.show_all()
+ self.connect('draw', self.draw_mouth)
audioSource.connect("new-buffer", self._new_buffer)
@@ -72,8 +73,8 @@ class Mouth(Gtk.DrawingArea):
self.volume = numpy.core.max(self.main_buffers) # -\
# numpy.core.min(self.main_buffers)
- def do_draw(self, context):
- rect = self.get_allocation()
+ def draw_mouth(self, widget, context):
+ rect = widget.get_allocation()
self.processBuffer()
@@ -114,8 +115,10 @@ class WaveformMouth(Mouth):
self.y_mag_bias_multiplier = 1
self.y_mag = 0.7
+
+ self.show_all()
- def do_draw(self, context):
+ def draw_wave(self, context):
rect = self.get_allocation()
self.param1 = rect.height / 65536.0
self.param2 = rect.height / 2.0
@@ -167,8 +170,11 @@ class FFTMouth(Mouth):
self.scaleX = "10"
self.scaleY = "10"
+
+ self.show_all()
+ self.connect('draw', self.draw_fftmouth)
- def processBuffer(self, rect):
+ def newprocessBuffer(self, rect):
self.param1 = rect.height / 65536.0
self.param2 = rect.height / 2.0
@@ -205,10 +211,10 @@ class FFTMouth(Mouth):
self.peaks = val
- def do_draw(self, context):
- rect = self.get_allocation()
+ def draw_fftmouth(self, widget, context):
+ rect = widget.get_allocation()
- self.processBuffer(rect)
+ self.newprocessBuffer(rect)
# background
context.set_source_rgba(*self.fill_color.get_rgba())