Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorflavio <fdanesse@gmail.com>2012-12-08 17:08:14 (GMT)
committer flavio <fdanesse@gmail.com>2012-12-08 17:08:14 (GMT)
commit5175a36d9c00664a2ed7d865425efbc056f9e921 (patch)
treea0b946f45665bc110534ad5a47b3224ddf5f47ca
parentdde61b560ad862eef345456340a8791a1dc5c2c7 (diff)
gst-0.10 to Gst-1.0 Fixmes and Corrections. The activity starts.
-rw-r--r--aplay.py12
-rw-r--r--glive.py228
-rw-r--r--gplay.py38
-rw-r--r--mediaview.py4
-rw-r--r--model.py4
-rw-r--r--record.py77
6 files changed, 209 insertions, 154 deletions
diff --git a/aplay.py b/aplay.py
index 40820b5..bf009c8 100644
--- a/aplay.py
+++ b/aplay.py
@@ -21,18 +21,18 @@ import constants
logger = logging.getLogger('record:aplay.py')
def play(file, done_cb=None):
- player.set_state(gst.STATE_NULL)
+ player.set_state(Gst.State.NULL)
def eos_cb(bus, message):
bus.disconnect_by_func(eos_cb)
- player.set_state(gst.STATE_NULL)
+ player.set_state(Gst.State.NULL)
if done_cb is not None:
done_cb()
def error_cb(bus, message):
err, debug = message.parse_error()
logger.error('play_pipe: %s %s' % (err, debug))
- player.set_state(gst.STATE_NULL)
+ player.set_state(Gst.State.NULL)
if done_cb is not None:
done_cb()
@@ -41,10 +41,10 @@ def play(file, done_cb=None):
bus.connect('message::error', error_cb)
player.props.uri = 'file://' + os.path.join(constants.GFX_PATH, file)
- player.set_state(gst.STATE_PLAYING)
+ player.set_state(Gst.State.PLAYING)
-player = gst.element_factory_make('playbin')
-fakesink = gst.element_factory_make('fakesink')
+player = Gst.ElementFactory.make('playbin', 'playbin')
+fakesink = Gst.ElementFactory.make('fakesink', 'fakesink')
player.set_property("video-sink", fakesink)
player.get_bus().add_signal_watch()
diff --git a/glive.py b/glive.py
index c46a215..1c15f11 100644
--- a/glive.py
+++ b/glive.py
@@ -66,7 +66,7 @@ class Glive:
self._detect_camera()
- self._pipeline = gst.Pipeline("Record")
+ self._pipeline = Gst.Pipeline()
self._create_photobin()
self._create_audiobin()
self._create_videobin()
@@ -81,7 +81,7 @@ class Glive:
bus.connect('message', self._bus_message_handler)
def _detect_camera(self):
- v4l2src = gst.element_factory_make('v4l2src')
+ v4l2src = Gst.ElementFactory.make('v4l2src', 'v4l2src')
if v4l2src.props.device_name is None:
return
@@ -93,135 +93,161 @@ class Glive:
# can't find a way to do this (at this time, XO-1 cafe camera driver
# doesn't support framerate changes, but gstreamer caps suggest
# otherwise)
- pipeline = gst.Pipeline()
- caps = gst.Caps('video/x-raw-yuv,framerate=10/1')
- fsink = gst.element_factory_make('fakesink')
- pipeline.add(v4l2src, fsink)
- v4l2src.link(fsink, caps)
- self._can_limit_framerate = pipeline.set_state(gst.STATE_PAUSED) != gst.STATE_CHANGE_FAILURE
- pipeline.set_state(gst.STATE_NULL)
+ pipeline = Gst.Pipeline()
+ caps = Gst.Caps.from_string('video/x-raw-yuv,framerate=10/1')
+ fsink = Gst.ElementFactory.make('fakesink', 'fakesink')
+ pipeline.add(v4l2src)
+ # FIXME: TypeError: argument dest: Expected Gst.Element, but got gi.repository.Gst.Caps
+ #pipeline.add(caps)
+ pipeline.add(fsink)
+ v4l2src.link(fsink)
+ self._can_limit_framerate = pipeline.set_state(Gst.State.PAUSED)# FIXME: no exist != Gst.State.CHANGE_FAILURE
+ pipeline.set_state(Gst.State.NULL)
def get_has_camera(self):
return self._has_camera
def _create_photobin(self):
- queue = gst.element_factory_make("queue", "pbqueue")
+ queue = Gst.ElementFactory.make("queue", "pbqueue")
queue.set_property("leaky", True)
queue.set_property("max-size-buffers", 1)
- colorspace = gst.element_factory_make("ffmpegcolorspace", "pbcolorspace")
- jpeg = gst.element_factory_make("jpegenc", "pbjpeg")
+ colorspace = Gst.ElementFactory.make("videoconvert", "pbcolorspace")
+ jpeg = Gst.ElementFactory.make("jpegenc", "pbjpeg")
- sink = gst.element_factory_make("fakesink", "pbsink")
+ sink = Gst.ElementFactory.make("fakesink", "pbsink")
sink.connect("handoff", self._photo_handoff)
sink.set_property("signal-handoffs", True)
- self._photobin = gst.Bin("photobin")
- self._photobin.add(queue, colorspace, jpeg, sink)
+ self._photobin = Gst.Bin()
+ self._photobin.add(queue)
+ self._photobin.add(colorspace)
+ self._photobin.add(jpeg)
+ self._photobin.add(sink)
- gst.element_link_many(queue, colorspace, jpeg, sink)
+ queue.link(colorspace)
+ colorspace.link(jpeg)
+ jpeg.link(sink)
pad = queue.get_static_pad("sink")
- self._photobin.add_pad(gst.GhostPad("sink", pad))
+ self._photobin.add_pad(Gst.GhostPad.new("sink", pad))
def _create_audiobin(self):
- src = gst.element_factory_make("alsasrc", "absrc")
+ src = Gst.ElementFactory.make("alsasrc", "absrc")
# attempt to use direct access to the 0,0 device, solving some A/V
# sync issues
src.set_property("device", "plughw:0,0")
- hwdev_available = src.set_state(gst.STATE_PAUSED) != gst.STATE_CHANGE_FAILURE
- src.set_state(gst.STATE_NULL)
+ hwdev_available = src.set_state(Gst.State.PAUSED) # FIXME: != Gst.State.CHANGE_FAILURE
+ src.set_state(Gst.State.NULL)
if not hwdev_available:
src.set_property("device", "default")
- srccaps = gst.Caps("audio/x-raw-int,rate=16000,channels=1,depth=16")
+ srccaps = Gst.Caps.from_string("audio/x-raw-int,rate=16000,channels=1,depth=16")
# guarantee perfect stream, important for A/V sync
- rate = gst.element_factory_make("audiorate")
+ rate = Gst.ElementFactory.make("audiorate", 'audiorate')
# without a buffer here, gstreamer struggles at the start of the
# recording and then the A/V sync is bad for the whole video
# (possibly a gstreamer/ALSA bug -- even if it gets caught up, it
# should be able to resync without problem)
- queue = gst.element_factory_make("queue", "audioqueue")
+ queue = Gst.ElementFactory.make("queue", "audioqueue")
queue.set_property("leaky", True) # prefer fresh data
queue.set_property("max-size-time", 5000000000) # 5 seconds
queue.set_property("max-size-buffers", 500)
queue.connect("overrun", self._log_queue_overrun)
- enc = gst.element_factory_make("wavenc", "abenc")
+ enc = Gst.ElementFactory.make("wavenc", "abenc")
- sink = gst.element_factory_make("filesink", "absink")
+ sink = Gst.ElementFactory.make("filesink", "absink")
sink.set_property("location", os.path.join(Instance.instancePath, "output.wav"))
- self._audiobin = gst.Bin("audiobin")
- self._audiobin.add(src, rate, queue, enc, sink)
+ self._audiobin = Gst.Bin()
+ self._audiobin.add(src)
+ self._audiobin.add(rate)
+ self._audiobin.add(queue)
+ self._audiobin.add(enc)
+ self._audiobin.add(sink)
- src.link(rate, srccaps)
- gst.element_link_many(rate, queue, enc, sink)
+ rate.link(rate)
+ rate.link(queue)
+ queue.link(enc)
+ enc.link(sink)
def _create_videobin(self):
- queue = gst.element_factory_make("queue", "videoqueue")
+ queue = Gst.ElementFactory.make("queue", "videoqueue")
queue.set_property("max-size-time", 5000000000) # 5 seconds
queue.set_property("max-size-bytes", 33554432) # 32mb
queue.connect("overrun", self._log_queue_overrun)
- scale = gst.element_factory_make("videoscale", "vbscale")
+ scale = Gst.ElementFactory.make("videoscale", "vbscale")
- scalecapsfilter = gst.element_factory_make("capsfilter", "scalecaps")
+ scalecapsfilter = Gst.ElementFactory.make("capsfilter", "scalecaps")
- scalecaps = gst.Caps('video/x-raw-yuv,width=160,height=120')
+ scalecaps = Gst.Caps.from_string('video/x-raw-yuv,width=160,height=120')
scalecapsfilter.set_property("caps", scalecaps)
- colorspace = gst.element_factory_make("ffmpegcolorspace", "vbcolorspace")
+ colorspace = Gst.ElementFactory.make("videoconvert", "vbcolorspace")
- enc = gst.element_factory_make("theoraenc", "vbenc")
+ enc = Gst.ElementFactory.make("theoraenc", "vbenc")
enc.set_property("quality", 16)
- mux = gst.element_factory_make("oggmux", "vbmux")
+ mux = Gst.ElementFactory.make("oggmux", "vbmux")
- sink = gst.element_factory_make("filesink", "vbfile")
+ sink = Gst.ElementFactory.make("filesink", "vbfile")
sink.set_property("location", os.path.join(Instance.instancePath, "output.ogg"))
- self._videobin = gst.Bin("videobin")
- self._videobin.add(queue, scale, scalecapsfilter, colorspace, enc, mux, sink)
+ self._videobin = Gst.Bin()
+ self._videobin.add(queue)
+ self._videobin.add(scale)
+ self._videobin.add(scalecapsfilter)
+ self._videobin.add(colorspace)
+ self._videobin.add(enc)
+ self._videobin.add(mux)
+ self._videobin.add(sink)
queue.link(scale)
scale.link_pads(None, scalecapsfilter, "sink")
scalecapsfilter.link_pads("src", colorspace, None)
- gst.element_link_many(colorspace, enc, mux, sink)
+ colorspace.link(enc)
+ enc.link(mux)
+ mux.link(sink)
pad = queue.get_static_pad("sink")
- self._videobin.add_pad(gst.GhostPad("sink", pad))
+ self._videobin.add_pad(Gst.GhostPad.new("sink", pad))
def _create_xbin(self):
- scale = gst.element_factory_make("videoscale")
- cspace = gst.element_factory_make("ffmpegcolorspace")
- xsink = gst.element_factory_make("ximagesink", "xsink")
+ scale = Gst.ElementFactory.make("videoscale", 'videoscale')
+ cspace = Gst.ElementFactory.make("videoconvert", 'videoconvert')
+ xsink = Gst.ElementFactory.make("ximagesink", "xsink")
xsink.set_property("force-aspect-ratio", True)
# http://thread.gmane.org/gmane.comp.video.gstreamer.devel/29644
xsink.set_property("sync", False)
- self._xbin = gst.Bin("xbin")
- self._xbin.add(scale, cspace, xsink)
- gst.element_link_many(scale, cspace, xsink)
+ self._xbin = Gst.Bin()
+ self._xbin.add(scale)
+ self._xbin.add(cspace)
+ self._xbin.add(xsink)
+
+ scale.link(cspace)
+ cspace.link(xsink)
pad = scale.get_static_pad("sink")
- self._xbin.add_pad(gst.GhostPad("sink", pad))
+ self._xbin.add_pad(Gst.GhostPad.new("sink", pad))
def _config_videobin(self, quality, width, height):
vbenc = self._videobin.get_by_name("vbenc")
vbenc.set_property("quality", 16)
scaps = self._videobin.get_by_name("scalecaps")
- scaps.set_property("caps", gst.Caps("video/x-raw-yuv,width=%d,height=%d" % (width, height)))
+ scaps.set_property("caps", Gst.Caps.from_string("video/x-raw-yuv,width=%d,height=%d" % (width, height)))
def _create_pipeline(self):
if not self._has_camera:
return
- src = gst.element_factory_make("v4l2src", "camsrc")
+ src = Gst.ElementFactory.make("v4l2src", "camsrc")
try:
# old gst-plugins-good does not have this property
src.set_property("queue-size", 2)
@@ -232,9 +258,9 @@ class Glive:
# on the v4l2src so that it gets communicated all the way down to the
# camera level
if self._can_limit_framerate:
- srccaps = gst.Caps('video/x-raw-yuv,framerate=10/1')
+ srccaps = Gst.Caps.from_string('video/x-raw-yuv,framerate=10/1')
else:
- srccaps = gst.Caps('video/x-raw-yuv')
+ srccaps = Gst.Caps.from_string('video/x-raw-yuv')
# we attempt to limit the framerate on the v4l2src directly, but we
# can't trust this: perhaps we are falling behind in our capture,
@@ -242,24 +268,30 @@ class Glive:
# the videorate element guarantees a perfect framerate and is important
# for A/V sync because OGG does not store timestamps, it just stores
# the FPS value.
- rate = gst.element_factory_make("videorate")
- ratecaps = gst.Caps('video/x-raw-yuv,framerate=10/1')
+ rate = Gst.ElementFactory.make("videorate", 'videorate')
+ ratecaps = Gst.Caps.from_string('video/x-raw-yuv,framerate=10/1')
- tee = gst.element_factory_make("tee", "tee")
- queue = gst.element_factory_make("queue", "dispqueue")
+ tee = Gst.ElementFactory.make("tee", "tee")
+ queue = Gst.ElementFactory.make("queue", "dispqueue")
# prefer fresh frames
queue.set_property("leaky", True)
queue.set_property("max-size-buffers", 2)
- self._pipeline.add(src, rate, tee, queue)
- src.link(rate, srccaps)
- rate.link(tee, ratecaps)
+ self._pipeline.add(src)
+ self._pipeline.add(rate)
+ self._pipeline.add(tee)
+ self._pipeline.add(queue)
+ src.link(rate)
+ # FIXME: TypeError: argument dest: Expected Gst.Element, but got gi.repository.Gst.Caps
+ #rate.link(srccaps)
+ rate.link(tee)
+ #tee.link(ratecaps)
tee.link(queue)
- self._xvsink = gst.element_factory_make("xvimagesink", "xsink")
- self._xv_available = self._xvsink.set_state(gst.STATE_PAUSED) != gst.STATE_CHANGE_FAILURE
- self._xvsink.set_state(gst.STATE_NULL)
+ self._xvsink = Gst.ElementFactory.make("xvimagesink", "xsink")
+ self._xv_available = self._xvsink.set_state(Gst.State.PAUSED) # FIXME: != Gst.State.CHANGE_FAILURE
+ self._xvsink.set_state(Gst.State.NULL)
# http://thread.gmane.org/gmane.comp.video.gstreamer.devel/29644
self._xvsink.set_property("sync", False)
@@ -312,8 +344,10 @@ class Glive:
return self._xbin.get_by_name("xsink")
def play(self, use_xv=True):
- if self._get_state() == gst.STATE_PLAYING:
- return
+ # TypeError: get_state() takes exactly 2 arguments (1 given)
+ #if self._get_state() == Gst.State.PLAYING:
+ # return
+ pass
if self._has_camera:
if use_xv and self._xv_available:
@@ -325,29 +359,31 @@ class Glive:
# the pipeline.
self.activity.set_glive_sink(xsink)
- self._pipeline.set_state(gst.STATE_PLAYING)
+ self._pipeline.set_state(Gst.State.PLAYING)
self._playing = True
def pause(self):
- self._pipeline.set_state(gst.STATE_PAUSED)
+ self._pipeline.set_state(Gst.State.PAUSED)
self._playing = False
def stop(self):
- self._pipeline.set_state(gst.STATE_NULL)
+ self._pipeline.set_state(Gst.State.NULL)
self._playing = False
def is_playing(self):
return self._playing
def _get_state(self):
- return self._pipeline.get_state()[1]
+ # FIXME: TypeError: get_state() takes exactly 2 arguments (1 given)
+ #return self._pipeline.get_state()[1]
+ pass
def stop_recording_audio(self):
# We should be able to simply pause and remove the audiobin, but
# this seems to cause a gstreamer segfault. So we stop the whole
# pipeline while manipulating it.
# http://dev.laptop.org/ticket/10183
- self._pipeline.set_state(gst.STATE_NULL)
+ self._pipeline.set_state(Gst.State.NULL)
self.model.shutter_sound()
self._pipeline.remove(self._audiobin)
@@ -379,20 +415,20 @@ class Glive:
audioBus.add_signal_watch()
self._audio_transcode_handler = audioBus.connect('message', self._onMuxedAudioMessageCb, audioline)
self._transcode_id = GObject.timeout_add(200, self._transcodeUpdateCb, audioline)
- audioline.set_state(gst.STATE_PLAYING)
+ audioline.set_state(Gst.State.PLAYING)
def _get_tags(self, type):
- tl = gst.TagList()
- tl[gst.TAG_ARTIST] = self.model.get_nickname()
- tl[gst.TAG_COMMENT] = "olpc"
+ tl = Gst.TagList()
+ tl[Gst.TAG_ARTIST] = self.model.get_nickname()
+ tl[Gst.TAG_COMMENT] = "olpc"
#this is unfortunately, unreliable
#record.Record.log.debug("self.ca.metadata['title']->" + str(self.ca.metadata['title']) )
- tl[gst.TAG_ALBUM] = "olpc" #self.ca.metadata['title']
- tl[gst.TAG_DATE] = utils.getDateString(int(time.time()))
+ tl[Gst.TAG_ALBUM] = "olpc" #self.ca.metadata['title']
+ tl[Gst.TAG_DATE] = utils.getDateString(int(time.time()))
stringType = constants.MEDIA_INFO[type]['istr']
# Translators: photo by photographer, e.g. "Photo by Mary"
- tl[gst.TAG_TITLE] = _('%(type)s by %(name)s') % {'type': stringType,
+ tl[Gst.TAG_TITLE] = _('%(type)s by %(name)s') % {'type': stringType,
'name': self.model.get_nickname()}
return tl
@@ -404,7 +440,7 @@ class Glive:
self._pic_exposure_open = True
pad = self._photobin.get_static_pad("sink")
self._pipeline.add(self._photobin)
- self._photobin.set_state(gst.STATE_PLAYING)
+ self._photobin.set_state(Gst.State.PLAYING)
self._pipeline.get_by_name("tee").link(self._photobin)
def take_photo(self):
@@ -448,7 +484,7 @@ class Glive:
# If we pause the pipeline while adjusting it, the A/V sync is better
# but not perfect :(
# so we stop the whole thing while reconfiguring to get the best results
- self._pipeline.set_state(gst.STATE_NULL)
+ self._pipeline.set_state(Gst.State.NULL)
self._pipeline.add(self._videobin)
self._pipeline.get_by_name("tee").link(self._videobin)
self._pipeline.add(self._audiobin)
@@ -463,7 +499,7 @@ class Glive:
# this results in several seconds of silence being added at the start
# of the recording. So we stop the whole pipeline while adjusting it.
# SL#2040
- self._pipeline.set_state(gst.STATE_NULL)
+ self._pipeline.set_state(Gst.State.NULL)
self._pipeline.add(self._audiobin)
self.play()
@@ -481,7 +517,7 @@ class Glive:
self._audiobin.get_by_name('absrc').send_event(gst.event_new_eos())
def _video_eos(self):
- self._pipeline.set_state(gst.STATE_NULL)
+ self._pipeline.set_state(Gst.State.NULL)
self._pipeline.get_by_name("tee").unlink(self._videobin)
self._pipeline.remove(self._videobin)
self._pipeline.remove(self._audiobin)
@@ -498,7 +534,7 @@ class Glive:
return
line = 'filesrc location=' + ogg_path + ' name=thumbFilesrc ! oggdemux name=thumbOggdemux ! theoradec name=thumbTheoradec ! tee name=thumb_tee ! queue name=thumb_queue ! ffmpegcolorspace name=thumbFfmpegcolorspace ! jpegenc name=thumbJPegenc ! fakesink name=thumb_fakesink'
- thumbline = gst.parse_launch(line)
+ thumbline = Gst.parse_launch(line)
thumb_queue = thumbline.get_by_name('thumb_queue')
thumb_queue.set_property("leaky", True)
thumb_queue.set_property("max-size-buffers", 1)
@@ -508,7 +544,7 @@ class Glive:
thumb_fakesink.set_property("signal-handoffs", True)
self._thumb_pipes.append(thumbline)
self._thumb_exposure_open = True
- thumbline.set_state(gst.STATE_PLAYING)
+ thumbline.set_state(Gst.State.PLAYING)
def copyThumbPic(self, fsink, buffer, pad, user_data=None):
if not self._thumb_exposure_open:
@@ -527,10 +563,10 @@ class Glive:
wavFilepath = os.path.join(Instance.instancePath, "output.wav")
muxFilepath = os.path.join(Instance.instancePath, "mux.ogg") #ogv
- muxline = gst.parse_launch('filesrc location=' + str(oggFilepath) + ' name=muxVideoFilesrc ! oggdemux name=muxOggdemux ! theoraparse ! oggmux name=muxOggmux ! filesink location=' + str(muxFilepath) + ' name=muxFilesink filesrc location=' + str(wavFilepath) + ' name=muxAudioFilesrc ! wavparse name=muxWavparse ! audioconvert name=muxAudioconvert ! vorbisenc name=muxVorbisenc ! muxOggmux.')
+ muxline = Gst.parse_launch('filesrc location=' + str(oggFilepath) + ' name=muxVideoFilesrc ! oggdemux name=muxOggdemux ! theoraparse ! oggmux name=muxOggmux ! filesink location=' + str(muxFilepath) + ' name=muxFilesink filesrc location=' + str(wavFilepath) + ' name=muxAudioFilesrc ! wavparse name=muxWavparse ! audioconvert name=muxAudioconvert ! vorbisenc name=muxVorbisenc ! muxOggmux.')
taglist = self._get_tags(constants.TYPE_VIDEO)
vorbis_enc = muxline.get_by_name('muxVorbisenc')
- vorbis_enc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)
+ vorbis_enc.merge_tags(taglist, Gst.TAG_MERGE_REPLACE_ALL)
muxBus = muxline.get_bus()
muxBus.add_signal_watch()
@@ -538,11 +574,11 @@ class Glive:
self._mux_pipes.append(muxline)
#add a listener here to monitor % of transcoding...
self._transcode_id = GObject.timeout_add(200, self._transcodeUpdateCb, muxline)
- muxline.set_state(gst.STATE_PLAYING)
+ muxline.set_state(Gst.State.PLAYING)
def _transcodeUpdateCb( self, pipe ):
position, duration = self._query_position( pipe )
- if position != gst.CLOCK_TIME_NONE:
+ if position != Gst.CLOCK_TIME_NONE:
value = position * 100.0 / duration
value = value/100.0
self.model.set_progress(value, _('Saving...'))
@@ -550,26 +586,26 @@ class Glive:
def _query_position(self, pipe):
try:
- position, format = pipe.query_position(gst.FORMAT_TIME)
+ position, format = pipe.query_position(Gst.FORMAT_TIME)
except:
- position = gst.CLOCK_TIME_NONE
+ position = Gste.CLOCK_TIME_NONE
try:
- duration, format = pipe.query_duration(gst.FORMAT_TIME)
+ duration, format = pipe.query_duration(Gst.FORMAT_TIME)
except:
- duration = gst.CLOCK_TIME_NONE
+ duration = Gst.CLOCK_TIME_NONE
return (position, duration)
def _onMuxedVideoMessageCb(self, bus, message, pipe):
- if message.type != gst.MESSAGE_EOS:
+ if message.type != Gst.MESSAGE_EOS:
return True
GObject.source_remove(self._video_transcode_handler)
self._video_transcode_handler = None
GObject.source_remove(self._transcode_id)
self._transcode_id = None
- pipe.set_state(gst.STATE_NULL)
+ pipe.set_state(Gst.State.NULL)
pipe.get_bus().remove_signal_watch()
pipe.get_bus().disable_sync_message_emission()
@@ -582,14 +618,14 @@ class Glive:
return False
def _onMuxedAudioMessageCb(self, bus, message, pipe):
- if message.type != gst.MESSAGE_EOS:
+ if message.type != Gst.MESSAGE_EOS:
return True
GObject.source_remove(self._audio_transcode_handler)
self._audio_transcode_handler = None
GObject.source_remove(self._transcode_id)
self._transcode_id = None
- pipe.set_state(gst.STATE_NULL)
+ pipe.set_state(Gst.State.NULL)
pipe.get_bus().remove_signal_watch()
pipe.get_bus().disable_sync_message_emission()
@@ -601,12 +637,12 @@ class Glive:
def _bus_message_handler(self, bus, message):
t = message.type
- if t == gst.MESSAGE_EOS:
+ if t == Gst.MESSAGE_EOS:
if self._eos_cb:
cb = self._eos_cb
self._eos_cb = None
cb()
- elif t == gst.MESSAGE_ERROR:
+ elif t == Gst.MESSAGE_ERROR:
#todo: if we come out of suspend/resume with errors, then get us back up and running...
#todo: handle "No space left on the resource.gstfilesink.c"
#err, debug = message.parse_error()
diff --git a/gplay.py b/gplay.py
index 6749950..104a247 100644
--- a/gplay.py
+++ b/gplay.py
@@ -37,7 +37,7 @@ class Gplay(GObject.GObject):
super(Gplay, self).__init__()
self.activity = activity_obj
self._playback_monitor_handler = None
- self._player = gst.element_factory_make('playbin')
+ self._player = Gst.ElementFactory.make("playbin", "player")
bus = self._player.get_bus()
bus.add_signal_watch()
@@ -56,43 +56,49 @@ class Gplay(GObject.GObject):
self.seek(0)
return
- self._player.set_state(gst.STATE_READY)
+ self._player.set_state(Gst.State.READY)
self._player.set_property('uri', location)
def seek(self, position):
if position == 0:
location = 0
else:
- duration = self._player.query_duration(gst.FORMAT_TIME, None)[0]
+ duration = self._player.query_duration(Gst.Format.TIME, None)[0]
location = duration * (position / 100)
- event = gst.event_new_seek(1.0, gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE, gst.SEEK_TYPE_SET, location, gst.SEEK_TYPE_NONE, 0)
+ event = gst.event_new_seek(
+ 1.0, Gst.Format.TIME,
+ Gst.SeekFlags.FLUSH |
+ Gst.SeekFlags.ACCURATE,
+ gst.SeekType.SET, location,
+ gst.SeekType.NONE, 0)
+
res = self._player.send_event(event)
if res:
self._player.set_new_stream_time(0L)
def pause(self):
- self._player.set_state(gst.STATE_PAUSED)
+ self._player.set_state(Gst.State.PAUSED)
def play(self):
- if self.get_state() == gst.STATE_PLAYING:
+ if self.get_state() == Gst.State.PLAYING:
return
if not self._player.props.video_sink:
- sink = gst.element_factory_make('xvimagesink')
+ sink = Gst.ElementFactory.make('xvimagesink', 'xvimagesink')
sink.props.force_aspect_ratio = True
self._player.props.video_sink = sink
self.activity.set_gplay_sink(self._player.props.video_sink)
- self._player.set_state(gst.STATE_PLAYING)
+ self._player.set_state(Gst.State.PLAYING)
self._emit_playback_status(0)
self._playback_monitor_handler = GObject.timeout_add(500, self._playback_monitor)
def _playback_monitor(self):
try:
- position = self._player.query_position(gst.FORMAT_TIME)[0]
- duration = self._player.query_duration(gst.FORMAT_TIME)[0]
+ position = self._player.query_position(Gst.Format.TIME)[0]
+ duration = self._player.query_duration(Gst.Format.TIME)[0]
except gst.QueryError:
return True
@@ -101,17 +107,21 @@ class Gplay(GObject.GObject):
return True
def _emit_playback_status(self, position):
- state = self._player.get_state()[1]
- self.emit('playback-status-changed', state, position)
+ # FIXME: TypeError: get_state() takes exactly 2 arguments (1 given)
+ #state = self._player.get_state()[1]
+ #self.emit('playback-status-changed', state, position)
+ pass
def get_state(self):
- return self._player.get_state()[1]
+ # FIXME: TypeError: get_state() takes exactly 2 arguments (1 given)
+ #return self._player.get_state()[1]
+ pass
def stop(self):
if self._playback_monitor_handler:
GObject.source_remove(self._playback_monitor_handler)
self._playback_monitor_handler = None
- self._player.set_state(gst.STATE_NULL)
+ self._player.set_state(Gst.State.NULL)
self._emit_playback_status(0)
diff --git a/mediaview.py b/mediaview.py
index 34c8267..87ef4eb 100644
--- a/mediaview.py
+++ b/mediaview.py
@@ -117,7 +117,9 @@ class InfoView(Gtk.EventBox):
self.show_all()
def hide(self):
- self.hide_all()
+ # FIXME: RuntimeError: maximum recursion depth exceeded
+ #self.hide()
+ pass
def set_author(self, name, stroke, fill):
self._xo_icon.set_colors(stroke, fill)
diff --git a/model.py b/model.py
index 5fdb2f5..671879f 100644
--- a/model.py
+++ b/model.py
@@ -324,7 +324,7 @@ class Model:
def _playback_status_changed(self, widget, status, value):
self.activity.set_playback_scale(value)
- if status == gst.STATE_NULL:
+ if status == Gst.State.NULL:
self.activity.set_paused(True)
def play_audio(self, recd):
@@ -340,7 +340,7 @@ class Model:
self.activity.set_paused(False)
def play_pause(self):
- if self.gplay.get_state() == gst.STATE_PLAYING:
+ if self.gplay.get_state() == Gst.State.PLAYING:
self.gplay.pause()
self.activity.set_paused(True)
else:
diff --git a/record.py b/record.py
index 329460d..6a9cc47 100644
--- a/record.py
+++ b/record.py
@@ -21,12 +21,15 @@
import os
import logging
import shutil
+import cairo
+
from gettext import gettext as _
from gettext import ngettext
+
import gi
from gi.repository import Gtk
from gi.repository import Gdk
-import cairo
+from gi.repository import GObject
from gi.repository import Pango
from gi.repository import PangoCairo
@@ -41,12 +44,15 @@ from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.activity.widgets import StopButton
from sugar3.activity.widgets import ActivityToolbarButton
+GObject.threads_init()
+Gst.init([])
+
from model import Model
from button import RecdButton
import constants
from instance import Instance
import utils
-from tray import HTray
+#from tray import HTray # FIXME: no more HIPPO
from mediaview import MediaView
import hw
from iconcombobox import IconComboBox
@@ -55,15 +61,12 @@ logger = logging.getLogger('record.py')
COLOR_BLACK = Gdk.color_parse('#000000')
COLOR_WHITE = Gdk.color_parse('#ffffff')
-GObject.threads_init()
-Gst.init([])
-
-gst.debug_set_active(True)
-gst.debug_set_colored(False)
+Gst.debug_set_active(True)
+Gst.debug_set_colored(False)
if logging.getLogger().level <= logging.DEBUG:
- gst.debug_set_default_threshold(gst.LEVEL_WARNING)
+ Gst.debug_set_default_threshold(Gst.DebugLevel.WARNING)
else:
- gst.debug_set_default_threshold(gst.LEVEL_ERROR)
+ Gst.debug_set_default_threshold(Gst.DebugLevel.ERROR)
class Record(activity.Activity):
def __init__(self, handle):
@@ -220,11 +223,11 @@ class Record(activity.Activity):
self._record_container = RecordContainer(self._media_view, self._controls_hbox)
main_box.pack_start(self._record_container, True, True, 6)
self._record_container.show()
-
- self._thumb_tray = HTray()
- self._thumb_tray.set_size_request(-1, 150)
- main_box.pack_end(self._thumb_tray, False, False, 0)
- self._thumb_tray.show_all()
+ # FIXME: no more Hippo
+ #self._thumb_tray = HTray()
+ #self._thumb_tray.set_size_request(-1, 150)
+ #main_box.pack_end(self._thumb_tray, False, False, 0)
+ #self._thumb_tray.show_all()
def serialize(self):
data = {}
@@ -350,10 +353,10 @@ class Record(activity.Activity):
def _toggle_fullscreen(self):
if not self._fullscreen:
self._toolbar_box.hide()
- self._thumb_tray.hide()
+ #self._thumb_tray.hide()
else:
self._toolbar_box.show()
- self._thumb_tray.show()
+ #self._thumb_tray.show()
self._fullscreen = not self._fullscreen
self._media_view.set_fullscreen(self._fullscreen)
@@ -419,10 +422,10 @@ class Record(activity.Activity):
remove_handler = button.connect("remove-requested", self._remove_recd)
clipboard_handler = button.connect("copy-clipboard-requested", self._thumbnail_copy_clipboard)
button.set_data('handler-ids', (clicked_handler, remove_handler, clipboard_handler))
- self._thumb_tray.add_item(button)
+ #self._thumb_tray.add_item(button)
button.show()
- if scroll_to_end:
- self._thumb_tray.scroll_to_end()
+ #if scroll_to_end:
+ #self._thumb_tray.scroll_to_end()
def _copy_to_clipboard(self, recd):
if recd == None:
@@ -458,12 +461,13 @@ class Record(activity.Activity):
for handler in handlers:
recdbutton.disconnect(handler)
- self._thumb_tray.remove_item(recdbutton)
+ #self._thumb_tray.remove_item(recdbutton)
recdbutton.cleanup()
def remove_all_thumbnails(self):
- for child in self._thumb_tray.get_children():
- self._remove_thumbnail(child)
+ #for child in self._thumb_tray.get_children():
+ # self._remove_thumbnail(child)
+ pass
def show_still(self, pixbuf):
self._media_view.show_still(pixbuf)
@@ -574,15 +578,16 @@ class RecordContainer(Gtk.Container):
self._show_title = False
self._controls_hbox_height = 0
super(RecordContainer, self).__init__()
- # FIXME: widget.flags() & Gtk.REALIZED ?
for widget in (self._media_view, self._controls_hbox):
- if widget.flags() & Gtk.REALIZED:
- widget.set_parent_window(self.window)
+ # FIXME: AttributeError: 'MediaView' object has no attribute 'flags'
+ #if widget.flags() & Gtk.REALIZED:
+ #widget.set_parent_window(self.window)
widget.set_parent(self)
# FIXME: Gdk.WINDOW_CHILD ? Gdk.INPUT_OUTPUT ?
def do_realize(self):
- self.set_flags(Gtk.REALIZED)
+ # FIXME: AttributeError: 'RecordContainer' object has no attribute 'set_flags'
+ #self.set_flags(Gtk.REALIZED)
self.window = Gdk.Window(
self.get_parent_window(),
@@ -677,9 +682,9 @@ class RecordContainer(Gtk.Container):
alloc.width = media_view_width
alloc.height = self._controls_hbox_height
self._controls_hbox.size_allocate(alloc)
-
- if self.flags() & Gtk.REALIZED:
- self.window.move_resize(*allocation)
+ # FIXME: AttributeError: 'RecordContainer' object has no attribute 'flags'
+ #if self.flags() & Gtk.REALIZED:
+ # self.window.move_resize(*allocation)
def do_forall(self, include_internals, callback, data):
for widget in (self._media_view, self._controls_hbox):
@@ -693,12 +698,12 @@ class PlaybackScale(Gtk.Scale):
def __init__(self, model):
self.model = model
self._change_handler = None
- self._playback_adjustment = Gtk.Adjustment()
- self._playback_adjustment.set(0.0, 0.00, 100.0, 0.1, 1.0, 1.0)
+ self._playback_adjustment = Gtk.Adjustment(0.0, 0.00, 100.0, 0.1, 1.0, 1.0)
super(PlaybackScale, self).__init__(orientation = Gtk.Orientation.HORIZONTAL)
self.set_adjustment(self._playback_adjustment)
self.set_draw_value(False)
- self.set_update_policy(Gtk.UPDATE_CONTINUOUS)
+ # FIXME: AttributeError: 'PlaybackScale' object has no attribute 'set_update_policy'
+ #self.set_update_policy(Gtk.UPDATE_CONTINUOUS)
self.connect('button-press-event', self._button_press)
self.connect('button-release-event', self._button_release)
@@ -797,7 +802,8 @@ class CountdownImage(Gtk.Image):
class ShutterButton(Gtk.Button):
def __init__(self):
Gtk.Button.__init__(self)
- self.set_relief(Gtk.RELIEF_NONE) # FIXME: Gtk.RELIEF_NONE ?
+ # FIXME: Gtk.RELIEF_NONE ?
+ #self.set_relief(Gtk.RELIEF_NONE)
self.set_focus_on_click(False)
self.modify_bg(Gtk.StateType.ACTIVE, COLOR_BLACK)
@@ -829,7 +835,8 @@ class ShutterButton(Gtk.Button):
class PlayButton(Gtk.Button):
def __init__(self):
super(PlayButton, self).__init__()
- self.set_relief(Gtk.RELIEF_NONE)
+ # FIXME: Gtk.RELIEF_NONE ?
+ #self.set_relief(Gtk.RELIEF_NONE)
self.set_focus_on_click(False)
self.modify_bg(Gtk.StateType.ACTIVE, COLOR_BLACK)
@@ -858,7 +865,7 @@ class RecordControl():
toolbar.insert(self._duration_combo, -1)
preferences_toolbar = Gtk.Toolbar()
- combo = Gtk.combo_box_new_text()
+ combo = Gtk.ComboBoxText()
self.quality = ToolComboBox(combo=combo, label_text=_('Quality:'))
self.quality.combo.append_text(_('Low'))
if hw.get_xo_version() != 1: