Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRaul Gutierrez Segales <rgs@itevenworks.net>2012-10-21 23:24:41 (GMT)
committer Raul Gutierrez Segales <rgs@itevenworks.net>2012-10-22 07:39:17 (GMT)
commit20d973f926f6c73d09cb064ccbb3d1e0dbd43a73 (patch)
tree9ea96fbeb4e777c86b135ab285327f68545802fa
parent7051918f8d4edfb5b46969667cc7134ce2db65c6 (diff)
Move from gst (static binding) to Gst (dynamic binding)
We still need to fix a few things here and there.
-rw-r--r--TurtleArt/tagplay.py87
-rw-r--r--plugins/audio_sensors/audiograb.py48
-rw-r--r--plugins/camera_sensor/camera_sensor.py4
-rw-r--r--plugins/camera_sensor/tacamera.py11
-rw-r--r--pysamples/grecord.py54
5 files changed, 100 insertions, 104 deletions
diff --git a/TurtleArt/tagplay.py b/TurtleArt/tagplay.py
index 627b2c7..bc38b6e 100644
--- a/TurtleArt/tagplay.py
+++ b/TurtleArt/tagplay.py
@@ -30,15 +30,15 @@ import gi
gi.require_version('Gtk', '3.0')
from gi.repository import GObject
+from gi.repository import Gst, GstVideo
+
GObject.threads_init()
+Gst.init(None)
-import pygst
-import gst
-import gst.interfaces
+from gi.repository import GdkX11
from gi.repository import Gtk
from gi.repository import Gdk
-
def play_audio_from_file(lc, file_path):
""" Called from Show block of audio media """
if lc.gplay is not None and lc.gplay.player is not None:
@@ -117,7 +117,6 @@ class Gplay():
self.bin = Gtk.Window()
self.videowidget = VideoWidget()
- print self.videowidget
self.bin.add(self.videowidget)
self.bin.set_type_hint(Gdk.WindowTypeHint.NORMAL)
self.bin.set_decorated(False)
@@ -137,20 +136,11 @@ class Gplay():
def _player_error_cb(self, widget, message, detail):
self.player.stop()
- self.player.set_uri(None)
logging.debug('Error: %s - %s' % (message, detail))
- def _player_stream_info_cb(self, widget, stream_info):
- if not len(stream_info) or self.got_stream_info:
- return
-
+ def _player_stream_info_cb(self, widget, playbin):
GST_STREAM_TYPE_VIDEO = 2
-
- only_audio = True
- for item in stream_info:
- if item.props.type == GST_STREAM_TYPE_VIDEO:
- only_audio = False
- self.only_audio = only_audio
+ self.only_audio = playbin.props.n_video == 0
self.got_stream_info = True
def start(self, file_path=None):
@@ -173,9 +163,9 @@ class Gplay():
self.player.set_uri(self.playlist[0])
self.currentplaying = 0
self.play_toggled()
- self.show_all()
- except:
- logging.error('Error playing %s' % (self.playlist[0]))
+ self.bin.show_all()
+ except Exception, e:
+ logging.error('Error playing %s: %s' % (self.playlist[0], e))
return False
def play_toggled(self):
@@ -200,7 +190,7 @@ class GstPlayer(GObject.GObject):
self.playing = False
self.error = False
- self.player = gst.element_factory_make('playbin2', 'player')
+ self.player = Gst.ElementFactory.make('playbin', 'player')
videowidget.realize()
self.videowidget = videowidget
@@ -217,42 +207,45 @@ class GstPlayer(GObject.GObject):
self.player.set_property('uri', uri)
def on_sync_message(self, bus, message):
- if message.structure is None:
+ structure = message.get_structure()
+ if structure is None:
return
- if message.structure.get_name() == 'prepare-xwindow-id':
- self.videowidget.set_sink(message.src, self.videowidget_xid)
- message.src.set_property('force-aspect-ratio', True)
+
+ if structure.get_name() == 'prepare-window-handle':
+ print "I am bernie"
+ message.src.set_window_handle(self.videowidget_xid)
+ message.src.force_aspect_ratio = True
def on_message(self, bus, message):
t = message.type
- if t == gst.MESSAGE_ERROR:
+ if t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
logging.debug('Error: %s - %s' % (err, debug))
self.error = True
self.emit('eos')
self.playing = False
self.emit('error', str(err), str(debug))
- elif t == gst.MESSAGE_EOS:
+ elif t == Gst.MessageType.EOS:
self.emit('eos')
self.playing = False
- elif t == gst.MESSAGE_STATE_CHANGED:
+ elif t == Gst.MessageType.STATE_CHANGED:
old, new, pen = message.parse_state_changed()
- if old == gst.STATE_READY and new == gst.STATE_PAUSED:
- self.emit('stream-info',
- self.player.props.stream_info_value_array)
+ if old == Gst.State.READY and new == Gst.State.PAUSED:
+ self.emit('stream-info', self.player)
+
# else:
# logging.debug(message.type)
def _init_video_sink(self):
- self.bin = gst.Bin()
- videoscale = gst.element_factory_make('videoscale')
+ self.bin = Gst.Bin()
+ videoscale = Gst.ElementFactory.make('videoscale', None)
self.bin.add(videoscale)
- pad = videoscale.get_pad('sink')
- ghostpad = gst.GhostPad('sink', pad)
+ pad = videoscale.get_static_pad('sink')
+ ghostpad = Gst.GhostPad.new('sink', pad)
self.bin.add_pad(ghostpad)
videoscale.set_property('method', 0)
- caps_string = 'video/x-raw-yuv, '
+ caps_string = 'video/x-raw, '
r = self.videowidget.get_allocation()
if r.width > 500 and r.height > 500:
# Sigh... xvimagesink on the XOs will scale the video to fit
@@ -263,31 +256,33 @@ class GstPlayer(GObject.GObject):
caps_string += 'width=%d, height=%d' % (w, h)
else:
caps_string += 'width=480, height=360'
- caps = gst.Caps(caps_string)
- self.filter = gst.element_factory_make('capsfilter', 'filter')
+ caps = Gst.Caps.from_string(caps_string)
+ self.filter = Gst.ElementFactory.make('capsfilter', 'filter')
self.bin.add(self.filter)
self.filter.set_property('caps', caps)
- conv = gst.element_factory_make('ffmpegcolorspace', 'conv')
+ conv = Gst.ElementFactory.make('videoconvert', 'conv')
self.bin.add(conv)
- videosink = gst.element_factory_make('autovideosink')
+ videosink = Gst.ElementFactory.make('autovideosink', None)
self.bin.add(videosink)
- gst.element_link_many(videoscale, self.filter, conv, videosink)
+ videoscale.link(self.filter)
+ self.filter.link(conv)
+ conv.link(videosink)
self.player.set_property('video-sink', self.bin)
def pause(self):
- self.player.set_state(gst.STATE_PAUSED)
+ self.player.set_state(Gst.State.PAUSED)
self.playing = False
logging.debug('pausing player')
def play(self):
- self.player.set_state(gst.STATE_PLAYING)
+ self.player.set_state(Gst.State.PLAYING)
self.playing = True
self.error = False
logging.debug('playing player')
def stop(self):
- self.player.set_state(gst.STATE_NULL)
+ self.player.set_state(Gst.State.NULL)
self.playing = False
logging.debug('stopped player')
# return False
@@ -301,19 +296,19 @@ class GstPlayer(GObject.GObject):
class VideoWidget(Gtk.DrawingArea):
def __init__(self):
- GObject.GObject.__init__(self)
- print self
- help (self)
+ Gtk.DrawingArea.__init__(self)
self.set_events(Gdk.EventMask.EXPOSURE_MASK)
self.imagesink = None
self.set_double_buffered(False)
self.set_app_paintable(True)
+
def do_expose_event(self, event):
if self.imagesink:
self.imagesink.expose()
return False
else:
return True
+
def set_sink(self, sink, xid):
self.imagesink = sink
self.imagesink.set_xwindow_id(xid)
diff --git a/plugins/audio_sensors/audiograb.py b/plugins/audio_sensors/audiograb.py
index 6072c61..21c92e9 100644
--- a/plugins/audio_sensors/audiograb.py
+++ b/plugins/audio_sensors/audiograb.py
@@ -16,9 +16,6 @@
# along with this library; if not, write to the Free Software
# Foundation, 51 Franklin Street, Suite 500 Boston, MA 02110-1335 USA
-import pygst
-import gst
-import gst.interfaces
from numpy import fromstring
import subprocess
import traceback
@@ -27,6 +24,7 @@ from threading import Timer
from TurtleArt.taconstants import XO1
from TurtleArt.tautils import debug_output
+from gi.repository import Gst
# Initial device settings
RATE = 48000
@@ -105,31 +103,31 @@ class AudioGrab():
self.pads = []
self.queue = []
self.fakesink = []
- self.pipeline = gst.Pipeline('pipeline')
- self.alsasrc = gst.element_factory_make('alsasrc', 'alsa-source')
+ self.pipeline = Gst.Pipeline('pipeline')
+ self.alsasrc = Gst.ElementFactory.make('alsasrc', 'alsa-source')
self.pipeline.add(self.alsasrc)
- self.caps1 = gst.element_factory_make('capsfilter', 'caps1')
+ self.caps1 = Gst.ElementFactory.make('capsfilter', 'caps1')
self.pipeline.add(self.caps1)
caps_str = 'audio/x-raw-int,rate=%d,channels=%d,depth=16' % (
RATE, self.channels)
- self.caps1.set_property('caps', gst.caps_from_string(caps_str))
+ self.caps1.set_property('caps', Gst.caps_from_string(caps_str))
if self.channels == 1:
- self.fakesink.append(gst.element_factory_make('fakesink', 'fsink'))
+ self.fakesink.append(Gst.ElementFactory.make('fakesink', 'fsink'))
self.pipeline.add(self.fakesink[0])
self.fakesink[0].connect('handoff', self.on_buffer, 0)
self.fakesink[0].set_property('signal-handoffs', True)
- gst.element_link_many(self.alsasrc, self.caps1, self.fakesink[0])
+ Gst.element_link_many(self.alsasrc, self.caps1, self.fakesink[0])
else:
if not hasattr(self, 'splitter'):
- self.splitter = gst.element_factory_make('deinterleave')
+ self.splitter = Gst.ElementFactory.make('deinterleave')
self.pipeline.add(self.splitter)
self.splitter.set_properties('keep-positions=true', 'name=d')
self.splitter.connect('pad-added', self._splitter_pad_added)
- gst.element_link_many(self.alsasrc, self.caps1, self.splitter)
+ Gst.element_link_many(self.alsasrc, self.caps1, self.splitter)
for i in range(self.channels):
- self.queue.append(gst.element_factory_make('queue'))
+ self.queue.append(Gst.ElementFactory.make('queue'))
self.pipeline.add(self.queue[i])
- self.fakesink.append(gst.element_factory_make('fakesink'))
+ self.fakesink.append(Gst.ElementFactory.make('fakesink'))
self.pipeline.add(self.fakesink[i])
self.fakesink[i].connect('handoff', self.on_buffer, i)
self.fakesink[i].set_property('signal-handoffs', True)
@@ -141,9 +139,9 @@ class AudioGrab():
self.capture_interval_sample = False
def _query_mixer(self):
- self._mixer = gst.element_factory_make('alsamixer')
- rc = self._mixer.set_state(gst.STATE_PAUSED)
- assert rc == gst.STATE_CHANGE_SUCCESS
+ self._mixer = Gst.ElementFactory.make('alsamixer')
+ rc = self._mixer.set_state(Gst.State.PAUSED)
+ assert rc == Gst.State.CHANGE_SUCCESS
# Query the available controls
tracks_list = self._mixer.list_tracks()
@@ -186,9 +184,9 @@ class AudioGrab():
'''
self.pads.append(pad)
if self._pad_count < self.channels:
- pad.link(self.queue[self._pad_count].get_pad('sink'))
- self.queue[self._pad_count].get_pad('src').link(
- self.fakesink[self._pad_count].get_pad('sink'))
+ pad.link(self.queue[self._pad_count].get_static_pad('sink'))
+ self.queue[self._pad_count].get_static_pad('src').link(
+ self.fakesink[self._pad_count].get_static_pad('sink'))
self._pad_count += 1
else:
debug_output('ignoring channels > %d' % (self.channels),
@@ -218,13 +216,13 @@ class AudioGrab():
def start_sound_device(self):
'''Start or Restart grabbing data from the audio capture'''
- gst.event_new_flush_start()
- self.pipeline.set_state(gst.STATE_PLAYING)
+ Gst.event_new_flush_start()
+ self.pipeline.set_state(Gst.State.PLAYING)
def stop_sound_device(self):
'''Stop grabbing data from capture device'''
- gst.event_new_flush_stop()
- self.pipeline.set_state(gst.STATE_NULL)
+ Gst.event_new_flush_stop()
+ self.pipeline.set_state(Gst.State.NULL)
def sample_now(self):
''' Log the current sample now. This method is called from the
@@ -245,7 +243,7 @@ class AudioGrab():
self.pause_grabbing()
caps_str = 'audio/x-raw-int,rate=%d,channels=%d,depth=16' % (
sr, self.channels)
- self.caps1.set_property('caps', gst.caps_from_string(caps_str))
+ self.caps1.set_property('caps', Gst.caps_from_string(caps_str))
self.resume_grabbing()
def get_sampling_rate(self):
@@ -342,7 +340,7 @@ class AudioGrab():
'''Get mute status of a control'''
if not control:
return default
- return bool(control.flags & gst.interfaces.MIXER_TRACK_MUTE)
+ return bool(control.flags & Gst.interfaces.MIXER_TRACK_MUTE)
def _set_mute(self, control, name, value):
'''Mute a control'''
diff --git a/plugins/camera_sensor/camera_sensor.py b/plugins/camera_sensor/camera_sensor.py
index f694767..2f01045 100644
--- a/plugins/camera_sensor/camera_sensor.py
+++ b/plugins/camera_sensor/camera_sensor.py
@@ -15,7 +15,6 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-import gst
from fcntl import ioctl
import os
from time import time
@@ -28,6 +27,7 @@ from plugins.camera_sensor.v4l2 import v4l2_control, V4L2_CID_AUTOGAIN, \
from plugins.plugin import Plugin
+from gi.repository import Gst
from TurtleArt.tapalette import make_palette
from TurtleArt.talogo import media_blocks_dictionary, primitive_dictionary
from TurtleArt.tautils import get_path, debug_output
@@ -44,7 +44,7 @@ class Camera_sensor(Plugin):
self._ag_control = None
self.camera = None
- v4l2src = gst.element_factory_make('v4l2src')
+ v4l2src = Gst.ElementFactory.make('v4l2src')
if v4l2src.props.device_name is not None:
self._status = True
diff --git a/plugins/camera_sensor/tacamera.py b/plugins/camera_sensor/tacamera.py
index 40bd53d..ec43641 100644
--- a/plugins/camera_sensor/tacamera.py
+++ b/plugins/camera_sensor/tacamera.py
@@ -20,8 +20,9 @@
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
-import gst, time
+import time
from gi.repository import GObject
+from gi.repository import Gst
from TurtleArt.tautils import debug_output
@@ -34,7 +35,7 @@ class Camera():
def __init__(self):
''' Prepare camera pipeline to pixbuf and signal watch '''
- self.pipe = gst.parse_launch('!'.join(GST_PIPE))
+ self.pipe = Gst.parse_launch('!'.join(GST_PIPE))
self.bus = self.pipe.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message', self._on_message)
@@ -51,11 +52,11 @@ class Camera():
''' Start grabbing '''
self.pixbuf = None
self.image_ready = False
- self.pipe.set_state(gst.STATE_PLAYING)
+ self.pipe.set_state(Gst.State.PLAYING)
while not self.image_ready:
- self.bus.poll(gst.MESSAGE_ANY, -1)
+ self.bus.poll(Gst.MessageType.ANY, -1)
# self.stop_camera_input()
def stop_camera_input(self):
''' Stop grabbing '''
- self.pipe.set_state(gst.STATE_NULL)
+ self.pipe.set_state(Gst.State.NULL)
diff --git a/pysamples/grecord.py b/pysamples/grecord.py
index 3d7efdb..8e5fffc 100644
--- a/pysamples/grecord.py
+++ b/pysamples/grecord.py
@@ -20,6 +20,7 @@ def myblock(tw, arg):
from gi.repository import GObject
GObject.threads_init()
+ Gst.init(None)
from TurtleArt.tautils import get_path
from TurtleArt.tagplay import play_audio_from_file
@@ -45,7 +46,7 @@ def myblock(tw, arg):
self._audio_transcode_handler = None
self._transcode_id = None
- self._pipeline = gst.Pipeline("Record")
+ self._pipeline = Gst.Pipeline("Record")
self._create_audiobin()
self._pipeline.add(self._audiobin)
@@ -55,42 +56,43 @@ def myblock(tw, arg):
def _create_audiobin(self):
''' Assemble all the pieces we need. '''
- src = gst.element_factory_make("alsasrc", "absrc")
+ src = Gst.ElementFactory.make("alsasrc", "absrc")
# attempt to use direct access to the 0,0 device, solving some A/V
# sync issues
src.set_property("device", "plughw:0,0")
- hwdev_available = src.set_state(gst.STATE_PAUSED) != \
- gst.STATE_CHANGE_FAILURE
- src.set_state(gst.STATE_NULL)
+ hwdev_available = src.set_state(Gst.State.PAUSED) != \
+ Gst.State.CHANGE_FAILURE
+ src.set_state(Gst.State.NULL)
if not hwdev_available:
src.set_property("device", "default")
- srccaps = gst.Caps("audio/x-raw-int,rate=16000,channels=1,depth=16")
+ caps_str = "audio/x-raw-int,rate=16000,channels=1,depth=16"
+ srccaps = Gst.Caps.from_string(caps_str)
# guarantee perfect stream, important for A/V sync
- rate = gst.element_factory_make("audiorate")
+ rate = Gst.ElementFactory.make("audiorate")
# without a buffer here, gstreamer struggles at the start of the
# recording and then the A/V sync is bad for the whole video
# (possibly a gstreamer/ALSA bug -- even if it gets caught up, it
# should be able to resync without problem)
- queue = gst.element_factory_make("queue", "audioqueue")
+ queue = Gst.ElementFactory.make("queue", "audioqueue")
queue.set_property("leaky", True) # prefer fresh data
queue.set_property("max-size-time", 5000000000) # 5 seconds
queue.set_property("max-size-buffers", 500)
queue.connect("overrun", self._log_queue_overrun)
- enc = gst.element_factory_make("wavenc", "abenc")
+ enc = Gst.ElementFactory.make("wavenc", "abenc")
- sink = gst.element_factory_make("filesink", "absink")
+ sink = Gst.ElementFactory.make("filesink", "absink")
sink.set_property("location", self.capture_file)
- self._audiobin = gst.Bin("audiobin")
+ self._audiobin = Gst.Bin("audiobin")
self._audiobin.add(src, rate, queue, enc, sink)
src.link(rate, srccaps)
- gst.element_link_many(rate, queue, enc, sink)
+ Gst.element_link_many(rate, queue, enc, sink)
def _log_queue_overrun(self, queue):
''' We use a buffer, which may overflow. '''
@@ -108,15 +110,15 @@ def myblock(tw, arg):
def start_recording_audio(self):
''' Start the stream in order to start recording. '''
- if self._get_state() == gst.STATE_PLAYING:
+ if self._get_state() == Gst.State.PLAYING:
return
- self._pipeline.set_state(gst.STATE_PLAYING)
+ self._pipeline.set_state(Gst.State.PLAYING)
self._recording = True
def stop_recording_audio(self):
''' Stop recording and then convert the results into a
.ogg file using a new stream. '''
- self._pipeline.set_state(gst.STATE_NULL)
+ self._pipeline.set_state(Gst.State.NULL)
self._recording = False
if not os.path.exists(self.capture_file) or \
@@ -128,7 +130,7 @@ def myblock(tw, arg):
os.remove(self.save_file)
line = 'filesrc location=' + self.capture_file + ' name=audioFilesrc ! wavparse name=audioWavparse ! audioconvert name=audioAudioconvert ! vorbisenc name=audioVorbisenc ! oggmux name=audioOggmux ! filesink name=audioFilesink'
- audioline = gst.parse_launch(line)
+ audioline = Gst.parse_launch(line)
vorbis_enc = audioline.get_by_name('audioVorbisenc')
@@ -141,12 +143,12 @@ def myblock(tw, arg):
'message', self._onMuxedAudioMessageCb, audioline)
self._transcode_id = GObject.timeout_add(
200, self._transcodeUpdateCb, audioline)
- audioline.set_state(gst.STATE_PLAYING)
+ audioline.set_state(Gst.State.PLAYING)
def _transcodeUpdateCb(self, pipe):
''' Where are we in the transcoding process? '''
position, duration = self._query_position(pipe)
- if position != gst.CLOCK_TIME_NONE:
+ if position != Gst.CLOCK_TIME_NONE:
value = position * 100.0 / duration
value = value/100.0
return True
@@ -154,27 +156,27 @@ def myblock(tw, arg):
def _query_position(self, pipe):
''' Where are we in the stream? '''
try:
- position, format = pipe.query_position(gst.FORMAT_TIME)
+ position, format = pipe.query_position(Gst.FORMAT_TIME)
except:
- position = gst.CLOCK_TIME_NONE
+ position = Gst.CLOCK_TIME_NONE
try:
- duration, format = pipe.query_duration(gst.FORMAT_TIME)
+ duration, format = pipe.query_duration(Gst.FORMAT_TIME)
except:
- duration = gst.CLOCK_TIME_NONE
+ duration = Gst.CLOCK_TIME_NONE
return (position, duration)
def _onMuxedAudioMessageCb(self, bus, message, pipe):
''' Clean up at end of stream.'''
- if message.type != gst.MESSAGE_EOS:
+ if message.type != Gst.MessageType.EOS:
return True
GObject.source_remove(self._audio_transcode_handler)
self._audio_transcode_handler = None
GObject.source_remove(self._transcode_id)
self._transcode_id = None
- pipe.set_state(gst.STATE_NULL)
+ pipe.set_state(Gst.State.NULL)
pipe.get_bus().remove_signal_watch()
pipe.get_bus().disable_sync_message_emission()
@@ -184,12 +186,12 @@ def myblock(tw, arg):
def _bus_message_handler(self, bus, message):
''' Handle any messages associated with the stream. '''
t = message.type
- if t == gst.MESSAGE_EOS:
+ if t == Gst.MessageType.EOS:
if self._eos_cb:
cb = self._eos_cb
self._eos_cb = None
cb()
- elif t == gst.MESSAGE_ERROR:
+ elif t == Gst.MessageType.ERROR:
# TODO: if we come out of suspend/resume with errors, then
# get us back up and running... TODO: handle "No space
# left on the resource.gstfilesink.c" err, debug =