Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--NEWS4
-rw-r--r--glive.py767
-rw-r--r--glivex.py147
-rw-r--r--gplay.py88
-rw-r--r--model.py16
-rw-r--r--record.py7
-rw-r--r--ui.py47
-rw-r--r--utils.py10
8 files changed, 354 insertions, 732 deletions
diff --git a/NEWS b/NEWS
index a6f687d..05ca93f 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,7 @@
+* Use uuid and do not md5 video files
+* Fallback to ximagesink if xvimagesink is unaccessible
+* Use one pass for encodings
+
60
* pipeline rework; makes the activity work with new gstreamer
diff --git a/glive.py b/glive.py
index e010444..6bf0f1a 100644
--- a/glive.py
+++ b/glive.py
@@ -23,522 +23,300 @@ import os
import gtk
import pygtk
pygtk.require('2.0')
-import sys
import gst
-import gst.interfaces
import pygst
pygst.require('0.10')
import time
-import threading
import gobject
gobject.threads_init()
import logging
-logger = logging.getLogger('record')
+logger = logging.getLogger('record:glive.py')
+
+from sugar.activity.activity import get_activity_root
from instance import Instance
from constants import Constants
-import record
import utils
import ui
-class Glive:
- def __init__(self, pca):
- self.window = None
- self.ca = pca
- self._eos_cb = None
-
- self.playing = False
- self.picExposureOpen = False
-
- self.AUDIO_TRANSCODE_ID = 0
- self.TRANSCODE_ID = 0
- self.VIDEO_TRANSCODE_ID = 0
-
- self.PHOTO_MODE_PHOTO = 0
- self.PHOTO_MODE_AUDIO = 1
-
- self.TRANSCODE_UPDATE_INTERVAL = 200
-
-
- self.VIDEO_WIDTH_SMALL = 160
- self.VIDEO_HEIGHT_SMALL = 120
- self.VIDEO_FRAMERATE_SMALL = 10
-
- self.VIDEO_WIDTH_LARGE = 200
- self.VIDEO_HEIGHT_LARGE = 150
- self.VIDEO_FRAMERATE_SMALL = 10
-
- self.pipeline = gst.Pipeline("my-pipeline")
- self.createPhotoBin()
- self.createAudioBin()
- self.createVideoBin()
- self.createPipeline()
-
- self.thumbPipes = []
- self.muxPipes = []
-
- bus = self.pipeline.get_bus()
- bus.enable_sync_message_emission()
- bus.add_signal_watch()
- self.SYNC_ID = bus.connect('sync-message::element', self._onSyncMessageCb)
- self.MESSAGE_ID = bus.connect('message', self._onMessageCb)
-
- def createPhotoBin ( self ):
- queue = gst.element_factory_make("queue", "pbqueue")
- queue.set_property("leaky", True)
- queue.set_property("max-size-buffers", 1)
-
- colorspace = gst.element_factory_make("ffmpegcolorspace", "pbcolorspace")
- jpeg = gst.element_factory_make("jpegenc", "pbjpeg")
-
- sink = gst.element_factory_make("fakesink", "pbsink")
- self.HANDOFF_ID = sink.connect("handoff", self.copyPic)
- sink.set_property("signal-handoffs", True)
-
- self.photobin = gst.Bin("photobin")
- self.photobin.add(queue, colorspace, jpeg, sink)
-
- gst.element_link_many(queue, colorspace, jpeg, sink)
-
- pad = queue.get_static_pad("sink")
- self.photobin.add_pad(gst.GhostPad("sink", pad))
-
- def createAudioBin ( self ):
- src = gst.element_factory_make("alsasrc", "absrc")
- srccaps = gst.Caps("audio/x-raw-int,rate=16000,channels=1,depth=16")
-
- enc = gst.element_factory_make("wavenc", "abenc")
-
- sink = gst.element_factory_make("filesink", "absink")
- sink.set_property("location", os.path.join(Instance.instancePath, "output.wav"))
-
- self.audiobin = gst.Bin("audiobin")
- self.audiobin.add(src, enc, sink)
-
- src.link(enc, srccaps)
- enc.link(sink)
-
- def createVideoBin ( self ):
- queue = gst.element_factory_make("queue", "vbqueue")
-
- rate = gst.element_factory_make("videorate", "vbrate")
- ratecaps = gst.Caps('video/x-raw-yuv,framerate='+str(self.VIDEO_FRAMERATE_SMALL)+'/1')
-
- scale = gst.element_factory_make("videoscale", "vbscale")
- scalecaps = gst.Caps('video/x-raw-yuv,width='+str(self.VIDEO_WIDTH_SMALL)+',height='+str(self.VIDEO_HEIGHT_SMALL))
-
- colorspace = gst.element_factory_make("ffmpegcolorspace", "vbcolorspace")
+TMP_OGG = os.path.join(get_activity_root(), 'instance', 'output.ogg')
- enc = gst.element_factory_make("theoraenc", "vbenc")
- enc.set_property("quality", 16)
+PLAYBACK_WIDTH = 640
+PLAYBACK_HEIGHT = 480
- mux = gst.element_factory_make("oggmux", "vbmux")
+OGG_WIDTH = 160
+OGG_HEIGHT = 120
- sink = gst.element_factory_make("filesink", "vbfile")
- sink.set_property("location", os.path.join(Instance.instancePath, "output.ogg"))
-
- self.videobin = gst.Bin("videobin")
- self.videobin.add(queue, rate, scale, colorspace, enc, mux, sink)
-
- queue.link(rate)
- rate.link(scale, ratecaps)
- scale.link(colorspace, scalecaps)
- gst.element_link_many(colorspace, enc, mux, sink)
-
- pad = queue.get_static_pad("sink")
- self.videobin.add_pad(gst.GhostPad("sink", pad))
-
- def createPipeline ( self ):
- src = gst.element_factory_make("v4l2src", "camsrc")
- try:
- # old gst-plugins-good does not have this property
- src.set_property("queue-size", 2)
- except:
- pass
-
- tee = gst.element_factory_make("tee", "tee")
- self.queue = gst.element_factory_make("queue", "dispqueue")
- xvsink = gst.element_factory_make("xvimagesink", "xvsink")
- self.pipeline.add(src, tee, self.queue, xvsink)
- gst.element_link_many(src, tee, self.queue, xvsink)
-
- def thumbPipe(self):
- return self.thumbPipes[ len(self.thumbPipes)-1 ]
-
-
- def thumbEl(self, name):
- return self.thumbPipe().get_by_name(name)
-
-
- def muxPipe(self):
- return self.muxPipes[ len(self.muxPipes)-1 ]
+class Glive:
+ def play(self):
+ logger.debug('play')
+
+ if not self.play_pipe:
+ self.src_str = \
+ 'v4l2src ' \
+ '! video/x-raw-yuv,width=%s,height=%s ' \
+ % (PLAYBACK_WIDTH, PLAYBACK_HEIGHT)
+ self.play_str = \
+ 'xvimagesink force-aspect-ratio=true name=xsink'
+ self.play_pipe = gst.parse_launch(
+ '%s ' \
+ '! queue ' \
+ '! %s' \
+ % (self.src_str, self.play_str))
+
+ def message_cb(bus, message):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('play_pipe: %s %s' % (err, debug))
+
+ if not self.fallback:
+ logger.warning('use fallback_bin')
+ self.fallback = True
+
+ self.play_str = \
+ 'ffmpegcolorspace ' \
+ '! ximagesink force-aspect-ratio=true ' \
+ ' name=xsink'
+ self.play_pipe = gst.parse_launch(
+ '%s ' \
+ '! queue ' \
+ '! %s' \
+ % (self.src_str, self.play_str))
+
+ if [i for i in self.pipeline.get_state() \
+ if id(i) == id(gst.STATE_PLAYING)]:
+ self.pipeline = None
+ self._switch_pipe(self.play_pipe)
+
+ bus = self.play_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', message_cb)
+
+ self._switch_pipe(self.play_pipe)
+
+ def thumb_play(self, use_fallback=False):
+ if not self.fallback and not use_fallback:
+ # use xv to scale video
+ self.play()
+ return
+ logger.debug('thumb_play')
- def muxEl(self, name):
- return self.muxPipe().get_by_name(name)
+ if not self.fallback_pipe:
+ self.fallback_pipe = gst.parse_launch(
+ '%s ' \
+ '! queue ' \
+ '! videoscale ' \
+ '! video/x-raw-yuv,width=%s,height=%s ' \
+ '! ffmpegcolorspace ' \
+ '! ximagesink force-aspect-ratio=true name=xsink' \
+ % (self.src_str, ui.UI.dim_PIPW, ui.UI.dim_PIPH))
+ def message_cb(bus, message):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('fallback_pipe: %s %s' % (err, debug))
- def play(self):
- logger.debug('Glive.play')
+ bus = self.fallback_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', message_cb)
- self.pipeline.set_state(gst.STATE_PLAYING)
- self.playing = True
+ self._switch_pipe(self.fallback_pipe)
def pause(self):
- logger.debug('Glive.pause')
-
- self.pipeline.set_state(gst.STATE_PAUSED)
- self.playing = False
-
+ logger.debug('pause')
+ if self.pipeline:
+ self.pipeline.set_state(gst.STATE_PAUSED)
def stop(self):
- logger.debug('Glive.stop')
-
- self.pipeline.set_state(gst.STATE_NULL)
- self.playing = False
-
- def is_playing(self):
- return self.playing
+ logger.debug('stop')
+ if self.pipeline:
+ self.pipeline.set_state(gst.STATE_NULL)
+
+ def takePhoto(self, after_photo_cb=None):
+ logger.debug('takePhoto')
+
+ if not self.photo_pipe:
+ def valve_handoff(valve, buffer):
+ valve.props.drop_probability = 1
+
+ def sink_handoff(sink, buffer, pad, pipeline):
+ pixbuf = gtk.gdk.pixbuf_loader_new_with_mime_type('image/jpeg')
+ pixbuf.write(buffer)
+ pixbuf.close()
+
+ structure = gst.Structure('record.photo')
+ structure['pixbuf'] = pixbuf.get_pixbuf()
+ msg = gst.message_new_custom(gst.MESSAGE_APPLICATION, sink,
+ structure)
+ pipeline.get_bus().post(msg)
+
+ def message_cb(bus, message, self):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('photo_pipe: %s %s' % (err, debug))
+
+ elif message.type == gst.MESSAGE_APPLICATION:
+ name = message.structure.get_name()
+ logger.debug('photo_pipe: name=%s' % name)
+
+ if name == 'record.photo':
+ self.after_photo_cb(self, message.structure['pixbuf'])
+
+ self.photo_pipe = gst.parse_launch(
+ '%s ' \
+ '! ffmpegcolorspace ' \
+ '! jpegenc ' \
+ '! identity signal-handoffs=true silent=true name=valve ' \
+ '! fakesink signal-handoffs=true name=sink ' \
+ % self.src_str)
+
+ self.photo_pipe.get_by_name('valve').connect('handoff',
+ valve_handoff)
+ self.photo_pipe.get_by_name('sink').connect('handoff',
+ sink_handoff, self.photo_pipe)
+
+ bus = self.photo_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', message_cb, self)
+
+ def process_cb(self, pixbuf):
+ self.ca.m.savePhoto(pixbuf)
+ self._switch_pipe(self.play_pipe)
- def idlePlayElement(self, element):
- element.set_state(gst.STATE_PLAYING)
- return False
+ self.after_photo_cb = after_photo_cb and after_photo_cb or process_cb
+ self._switch_pipe(self.photo_pipe)
+ self.photo_pipe.get_by_name('valve').props.drop_probability = 0
- def stopRecordingAudio( self ):
- self.audiobin.set_state(gst.STATE_NULL)
- self.pipeline.remove(self.audiobin)
- gobject.idle_add( self.stoppedRecordingAudio )
+ def startRecordingVideo(self):
+ logger.debug('startRecordingVideo')
+
+ if True:
+ # XXX re-create pipe every time
+ # to supress gst glitches during the second invoking
+ if self.video_pipe:
+ del self.video_pipe
+
+ self.video_pipe = gst.parse_launch( \
+ '%s ' \
+ '! tee name=tee ' \
+ 'tee.! queue ! %s ' \
+ 'tee.! queue ' \
+ '! ffmpegcolorspace ' \
+ '! videorate skip_to_first=true ' \
+ '! video/x-raw-yuv,framerate=10/1 ' \
+ '! videoscale ' \
+ '! video/x-raw-yuv,width=%s,height=%s ' \
+ '! theoraenc quality=16 ' \
+ '! oggmux name=mux ' \
+ '! filesink location=%s ' \
+ 'alsasrc ' \
+ '! queue ' \
+ '! audioconvert ' \
+ '! vorbisenc name=vorbisenc ' \
+ '! mux.' \
+ % (self.src_str, self.play_str,
+ OGG_WIDTH, OGG_HEIGHT, TMP_OGG))
+
+ def message_cb(bus, message, self):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('video_pipe: %s %s' % (err, debug))
+
+ bus = self.video_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', message_cb, self)
+
+ def process_cb(self, pixbuf):
+ taglist = self.getTags(Constants.TYPE_VIDEO)
+ vorbisenc = self.video_pipe.get_by_name('vorbisenc')
+ vorbisenc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)
+
+ self.pixbuf = pixbuf
+ self._switch_pipe(self.video_pipe)
+
+ # take photo first
+ self.takePhoto(process_cb)
+ def stopRecordingVideo(self):
+ logger.debug('stopRecordingVideo')
- def stoppedRecordingVideo(self):
- if ( len(self.thumbPipes) > 0 ):
- thumbline = self.thumbPipes[len(self.thumbPipes)-1]
- thumbline.get_by_name('thumbFakesink').disconnect(self.THUMB_HANDOFF_ID)
+ self._switch_pipe(self.play_pipe)
- oggFilepath = os.path.join(Instance.instancePath, "output.ogg") #ogv
- if (not os.path.exists(oggFilepath)):
- self.record = False
+ if (not os.path.exists(TMP_OGG)):
self.ca.m.cannotSaveVideo()
self.ca.m.stoppedRecordingVideo()
return
- oggSize = os.path.getsize(oggFilepath)
- if (oggSize <= 0):
- self.record = False
+
+ if (os.path.getsize(TMP_OGG) <= 0):
self.ca.m.cannotSaveVideo()
self.ca.m.stoppedRecordingVideo()
return
- line = 'filesrc location=' + str(oggFilepath) + ' name=thumbFilesrc ! oggdemux name=thumbOggdemux ! theoradec name=thumbTheoradec ! tee name=thumbTee ! queue name=thumbQueue ! ffmpegcolorspace name=thumbFfmpegcolorspace ! jpegenc name=thumbJPegenc ! fakesink name=thumbFakesink'
- thumbline = gst.parse_launch(line)
- thumbQueue = thumbline.get_by_name('thumbQueue')
- thumbQueue.set_property("leaky", True)
- thumbQueue.set_property("max-size-buffers", 1)
- thumbTee = thumbline.get_by_name('thumbTee')
- thumbFakesink = thumbline.get_by_name('thumbFakesink')
- self.THUMB_HANDOFF_ID = thumbFakesink.connect("handoff", self.copyThumbPic)
- thumbFakesink.set_property("signal-handoffs", True)
- self.thumbPipes.append(thumbline)
- self.thumbExposureOpen = True
- gobject.idle_add( self.idlePlayElement, thumbline )
-
-
- def stoppedRecordingAudio( self ):
- record.Record.log.debug("stoppedRecordingAudio")
- if (self.audioPixbuf != None):
- audioFilepath = os.path.join(Instance.instancePath, "output.wav")#self.el("audioFilesink").get_property("location")
- if (not os.path.exists(audioFilepath)):
- self.record = False
- self.audio = False
- self.ca.m.cannotSaveVideo()
- return
- wavSize = os.path.getsize(audioFilepath)
- if (wavSize <= 0):
- self.record = False
- self.ca.m.cannotSaveVideo()
- return
-
- self.ca.ui.setPostProcessPixBuf(self.audioPixbuf)
-
- line = 'filesrc location=' + str(audioFilepath) + ' name=audioFilesrc ! wavparse name=audioWavparse ! audioconvert name=audioAudioconvert ! vorbisenc name=audioVorbisenc ! oggmux name=audioOggmux ! filesink name=audioFilesink'
- audioline = gst.parse_launch(line)
+ thumb = self.pixbuf.scale_simple(OGG_WIDTH, OGG_HEIGHT,
+ gtk.gdk.INTERP_HYPER)
+ self.ca.ui.setPostProcessPixBuf(thumb)
+ self.ca.m.saveVideo(thumb, TMP_OGG, OGG_WIDTH, OGG_HEIGHT)
+ self.ca.m.stoppedRecordingVideo()
+ self.ca.ui.updateVideoComponents()
+ def startRecordingAudio(self):
+ logger.debug('startRecordingAudio')
+
+ if not self.audio_pipe:
+ self.audio_pipe = gst.parse_launch( \
+ '%s ' \
+ '! queue ' \
+ '! %s ' \
+ 'alsasrc ' \
+ '! queue ' \
+ '! audioconvert ' \
+ '! vorbisenc name=vorbisenc ' \
+ '! oggmux ' \
+ '! filesink location=%s ' \
+ % (self.src_str, self.play_str, TMP_OGG))
+
+ def message_cb(bus, message, self):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('audio_pipe: %s %s' % (err, debug))
+
+ bus = self.audio_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', message_cb, self)
+
+ def process_cb(self, pixbuf):
taglist = self.getTags(Constants.TYPE_AUDIO)
- base64AudioSnapshot = utils.getStringFromPixbuf(self.audioPixbuf)
- taglist[gst.TAG_EXTENDED_COMMENT] = "coverart="+str(base64AudioSnapshot)
- vorbisEnc = audioline.get_by_name('audioVorbisenc')
- vorbisEnc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)
-
- audioFilesink = audioline.get_by_name('audioFilesink')
- audioOggFilepath = os.path.join(Instance.instancePath, "output.ogg")
- audioFilesink.set_property("location", audioOggFilepath )
-
- audioBus = audioline.get_bus()
- audioBus.add_signal_watch()
- self.AUDIO_TRANSCODE_ID = audioBus.connect('message', self._onMuxedAudioMessageCb, audioline)
- self.TRANSCODE_ID = gobject.timeout_add(self.TRANSCODE_UPDATE_INTERVAL, self._transcodeUpdateCb, audioline)
- gobject.idle_add( self.idlePlayElement, audioline )
- else:
- self.record = False
- self.audio = False
- self.ca.m.cannotSaveVideo()
-
+ cover = utils.getStringFromPixbuf(pixbuf)
+ taglist[gst.TAG_EXTENDED_COMMENT] = 'coverart=%s' % cover
- def getTags( self, type ):
- tl = gst.TagList()
- tl[gst.TAG_ARTIST] = str(Instance.nickName)
- tl[gst.TAG_COMMENT] = "olpc"
- #this is unfortunately, unreliable
- #record.Record.log.debug("self.ca.metadata['title']->" + str(self.ca.metadata['title']) )
- tl[gst.TAG_ALBUM] = "olpc" #self.ca.metadata['title']
- tl[gst.TAG_DATE] = utils.getDateString(int(time.time()))
- stringType = Constants.mediaTypes[type][Constants.keyIstr]
- tl[gst.TAG_TITLE] = Constants.istrBy % {"1":stringType, "2":str(Instance.nickName)}
- return tl
+ vorbisenc = self.audio_pipe.get_by_name('vorbisenc')
+ vorbisenc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)
- def blockedCb(self, x, y, z):
- pass
+ self.pixbuf = pixbuf
+ self._switch_pipe(self.audio_pipe)
- def _takePhoto(self):
- if self.picExposureOpen:
- return
+ # take photo first
+ self.takePhoto(process_cb)
- self.picExposureOpen = True
- pad = self.photobin.get_static_pad("sink")
- pad.set_blocked_async(True, self.blockedCb, None)
- self.pipeline.add(self.photobin)
- self.photobin.set_state(gst.STATE_PLAYING)
- self.pipeline.get_by_name("tee").link(self.photobin)
- pad.set_blocked_async(False, self.blockedCb, None)
+ def stopRecordingAudio( self ):
+ logger.debug('stopRecordingAudio')
- def takePhoto(self):
- self.photoMode = self.PHOTO_MODE_PHOTO
- self._takePhoto()
+ self._switch_pipe(self.play_pipe)
- def copyPic(self, fsink, buffer, pad, user_data=None):
- if not self.picExposureOpen:
+ if (not os.path.exists(TMP_OGG)):
+ self.ca.m.cannotSaveVideo()
return
-
- pad = self.photobin.get_static_pad("sink")
- pad.set_blocked_async(True, self.blockedCb, None)
- self.pipeline.get_by_name("tee").unlink(self.photobin)
- self.pipeline.remove(self.photobin)
- pad.set_blocked_async(False, self.blockedCb, None)
-
- self.picExposureOpen = False
- pic = gtk.gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
- pic.write( buffer )
- pic.close()
- pixBuf = pic.get_pixbuf()
- del pic
-
- self.savePhoto( pixBuf )
-
-
- def savePhoto(self, pixbuf):
- if self.photoMode == self.PHOTO_MODE_AUDIO:
- self.audioPixbuf = pixbuf
- else:
- self.ca.m.savePhoto(pixbuf)
-
-
- def startRecordingVideo(self):
- logger.debug('Glive.startRecordingVideo')
-
- self.record = True
- self.audio = True
-
- # It would be nicer to connect the video/audio-recording elements
- # without stopping the pipeline. However, that seems to cause a
- # very long delay at the start of the video recording where the first
- # frame is 'frozen' for several seconds. MikeS from #gstreamer
- # suggested that the videorate element might not be receiving a
- # "new segment" signal soon enough.
- #
- # Stopping the pipeline while we reshuffle neatly works around this
- # with minimal user experience impact.
- self.pipeline.set_state(gst.STATE_NULL)
- self.pipeline.add(self.videobin)
- self.pipeline.get_by_name("tee").link(self.videobin)
- self.pipeline.add(self.audiobin)
- self.pipeline.set_state(gst.STATE_PLAYING)
-
- def startRecordingAudio(self):
- self.audioPixbuf = None
-
- self.photoMode = self.PHOTO_MODE_AUDIO
- self._takePhoto()
-
- self.record = True
- self.pipeline.add(self.audiobin)
- self.audiobin.set_state(gst.STATE_PLAYING)
-
- def stopRecordingVideo(self):
- logger.debug('Glive.stopRecordingVideo')
-
- # Similarly to as when we start recording, we also stop the pipeline
- # while we are adjusting the pipeline to stop recording. If we do
- # it on-the-fly, the following video live feed to the screen becomes
- # several seconds delayed. Weird!
- self._eos_cb = self.stopRecordingVideoEOS
- self.pipeline.get_by_name('camsrc').send_event(gst.event_new_eos())
- self.audiobin.get_by_name('absrc').send_event(gst.event_new_eos())
-
- def stopRecordingVideoEOS(self):
- logger.debug('Glive.stopRecordingVideoEOS')
-
- self.pipeline.set_state(gst.STATE_NULL)
- self.pipeline.get_by_name("tee").unlink(self.videobin)
- self.pipeline.remove(self.videobin)
- self.pipeline.remove(self.audiobin)
- self.pipeline.set_state(gst.STATE_PLAYING)
- gobject.idle_add( self.stoppedRecordingVideo )
-
-
- def copyThumbPic(self, fsink, buffer, pad, user_data=None):
- if (self.thumbExposureOpen):
- self.thumbExposureOpen = False
- pic = gtk.gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
- pic.write(buffer)
- pic.close()
- self.thumbBuf = pic.get_pixbuf()
- del pic
- self.thumbEl('thumbTee').unlink(self.thumbEl('thumbQueue'))
-
- oggFilepath = os.path.join(Instance.instancePath, "output.ogg") #ogv
- if (self.audio):
- self.ca.ui.setPostProcessPixBuf(self.thumbBuf)
-
- wavFilepath = os.path.join(Instance.instancePath, "output.wav")
- muxFilepath = os.path.join(Instance.instancePath, "mux.ogg") #ogv
-
- muxline = gst.parse_launch('filesrc location=' + str(oggFilepath) + ' name=muxVideoFilesrc ! oggdemux name=muxOggdemux ! theoradec name=muxTheoradec ! theoraenc name=muxTheoraenc ! oggmux name=muxOggmux ! filesink location=' + str(muxFilepath) + ' name=muxFilesink filesrc location=' + str(wavFilepath) + ' name=muxAudioFilesrc ! wavparse name=muxWavparse ! audioconvert name=muxAudioconvert ! vorbisenc name=muxVorbisenc ! muxOggmux.')
- taglist = self.getTags(Constants.TYPE_VIDEO)
- vorbisEnc = muxline.get_by_name('muxVorbisenc')
- vorbisEnc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)
-
- muxBus = muxline.get_bus()
- muxBus.add_signal_watch()
- self.VIDEO_TRANSCODE_ID = muxBus.connect('message', self._onMuxedVideoMessageCb, muxline)
- self.muxPipes.append(muxline)
- #add a listener here to monitor % of transcoding...
- self.TRANSCODE_ID = gobject.timeout_add(self.TRANSCODE_UPDATE_INTERVAL, self._transcodeUpdateCb, muxline)
- muxline.set_state(gst.STATE_PLAYING)
- else:
- self.record = False
- self.audio = False
- self.ca.m.saveVideo(self.thumbBuf, str(oggFilepath), self.VIDEO_WIDTH_SMALL, self.VIDEO_HEIGHT_SMALL)
- self.ca.m.stoppedRecordingVideo()
-
-
- def _transcodeUpdateCb( self, pipe ):
- position, duration = self.queryPosition( pipe )
- if position != gst.CLOCK_TIME_NONE:
- value = position * 100.0 / duration
- value = value/100.0
- self.ca.ui.progressWindow.updateProgress(value, Constants.istrSaving)
- return True
-
-
- def queryPosition( self, pipe ):
- try:
- position, format = pipe.query_position(gst.FORMAT_TIME)
- except:
- position = gst.CLOCK_TIME_NONE
-
- try:
- duration, format = pipe.query_duration(gst.FORMAT_TIME)
- except:
- duration = gst.CLOCK_TIME_NONE
-
- return (position, duration)
-
-
- def _onMuxedVideoMessageCb(self, bus, message, pipe):
- t = message.type
- if (t == gst.MESSAGE_EOS):
- self.record = False
- self.audio = False
- gobject.source_remove(self.VIDEO_TRANSCODE_ID)
- self.VIDEO_TRANSCODE_ID = 0
- gobject.source_remove(self.TRANSCODE_ID)
- self.TRANSCODE_ID = 0
- pipe.set_state(gst.STATE_NULL)
- pipe.get_bus().remove_signal_watch()
- pipe.get_bus().disable_sync_message_emission()
-
- wavFilepath = os.path.join(Instance.instancePath, "output.wav")
- oggFilepath = os.path.join(Instance.instancePath, "output.ogg") #ogv
- muxFilepath = os.path.join(Instance.instancePath, "mux.ogg") #ogv
- os.remove( wavFilepath )
- os.remove( oggFilepath )
- self.ca.m.saveVideo(self.thumbBuf, str(muxFilepath), self.VIDEO_WIDTH_SMALL, self.VIDEO_HEIGHT_SMALL)
- self.ca.m.stoppedRecordingVideo()
- return False
- else:
- return True
-
-
- def _onMuxedAudioMessageCb(self, bus, message, pipe):
- t = message.type
- if (t == gst.MESSAGE_EOS):
- record.Record.log.debug("audio gst.MESSAGE_EOS")
- self.record = False
- self.audio = False
- gobject.source_remove(self.AUDIO_TRANSCODE_ID)
- self.AUDIO_TRANSCODE_ID = 0
- gobject.source_remove(self.TRANSCODE_ID)
- self.TRANSCODE_ID = 0
- pipe.set_state(gst.STATE_NULL)
- pipe.get_bus().remove_signal_watch()
- pipe.get_bus().disable_sync_message_emission()
-
- wavFilepath = os.path.join(Instance.instancePath, "output.wav")
- oggFilepath = os.path.join(Instance.instancePath, "output.ogg")
- os.remove( wavFilepath )
- self.ca.m.saveAudio(oggFilepath, self.audioPixbuf)
- return False
- else:
- return True
-
-
- def _onSyncMessageCb(self, bus, message):
- if message.structure is None:
+ if (os.path.getsize(TMP_OGG) <= 0):
+ self.ca.m.cannotSaveVideo()
return
- if message.structure.get_name() == 'prepare-xwindow-id':
- self.window.set_sink(message.src)
- message.src.set_property('force-aspect-ratio', True)
-
-
- def _onMessageCb(self, bus, message):
- t = message.type
- if t == gst.MESSAGE_EOS:
- if self._eos_cb:
- cb = self._eos_cb
- self._eos_cb = None
- cb()
- elif t == gst.MESSAGE_ERROR:
- #todo: if we come out of suspend/resume with errors, then get us back up and running...
- #todo: handle "No space left on the resource.gstfilesink.c"
- err, debug = message.parse_error()
- logger.error('GST_MESSAGE_ERROR: error=%s debug=%s' % (err, debug))
-
- xvsink = self.pipeline.get_by_name('xvsink')
-
- if xvsink:
- logger.warning('fallback to ximagesink')
-
- self.pipeline.remove(xvsink)
- colorspace = gst.element_factory_make('ffmpegcolorspace',
- 'colorspace')
- xsink = gst.element_factory_make('ximagesink', 'xsink')
- self.pipeline.add(colorspace, xsink)
- gst.element_link_many(self.queue, colorspace, xsink)
-
- if self.playing:
- self.pipeline.set_state(gst.STATE_PLAYING)
+
+ self.ca.ui.setPostProcessPixBuf(self.pixbuf)
+ self.ca.m.saveAudio(TMP_OGG, self.pixbuf)
def abandonMedia(self):
+ logger.debug('abandonMedia')
self.stop()
if (self.AUDIO_TRANSCODE_ID != 0):
@@ -551,22 +329,50 @@ class Glive:
gobject.source_remove(self.VIDEO_TRANSCODE_ID)
self.VIDEO_TRANSCODE_ID = 0
- wavFilepath = os.path.join(Instance.instancePath, "output.wav")
- if (os.path.exists(wavFilepath)):
- os.remove(wavFilepath)
- oggFilepath = os.path.join(Instance.instancePath, "output.ogg") #ogv
- if (os.path.exists(oggFilepath)):
- os.remove(oggFilepath)
- muxFilepath = os.path.join(Instance.instancePath, "mux.ogg") #ogv
- if (os.path.exists(muxFilepath)):
- os.remove(muxFilepath)
+ if (os.path.exists(TMP_OGG)):
+ os.remove(TMP_OGG)
+
+ def __init__(self, pca):
+ self.window = None
+ self.ca = pca
+
+ self.pipeline = None
+ self.play_pipe = None
+ self.fallback_pipe = None
+ self.photo_pipe = None
+ self.video_pipe = None
+ self.audio_pipe = None
+
+ self.fallback = False
+ def _switch_pipe(self, new_pipe):
+ if self.pipeline != new_pipe:
+ if self.pipeline:
+ self.pipeline.set_state(gst.STATE_NULL)
+ self.pipeline = new_pipe
+
+ if self.pipeline:
+ xsink = new_pipe.get_by_name('xsink')
+ if xsink:
+ xsink.set_xwindow_id(self.window.window.xid)
+ self.pipeline.set_state(gst.STATE_PLAYING)
+
+ def getTags( self, type ):
+ tl = gst.TagList()
+ tl[gst.TAG_ARTIST] = str(Instance.nickName)
+ tl[gst.TAG_COMMENT] = "sugar"
+ #this is unfortunately, unreliable
+ #record.Record.log.debug("self.ca.metadata['title']->" + str(self.ca.metadata['title']) )
+ tl[gst.TAG_ALBUM] = "sugar" #self.ca.metadata['title']
+ tl[gst.TAG_DATE] = utils.getDateString(int(time.time()))
+ stringType = Constants.mediaTypes[type][Constants.keyIstr]
+ tl[gst.TAG_TITLE] = Constants.istrBy % {"1":stringType, "2":str(Instance.nickName)}
+ return tl
class LiveVideoWindow(gtk.Window):
def __init__(self, bgd ):
gtk.Window.__init__(self)
- self.imagesink = None
self.glive = None
self.modify_bg( gtk.STATE_NORMAL, bgd )
@@ -577,12 +383,3 @@ class LiveVideoWindow(gtk.Window):
def set_glive(self, pglive):
self.glive = pglive
self.glive.window = self
-
- def set_sink(self, sink):
- if (self.imagesink != None):
- assert self.window.xid
- self.imagesink = None
- del self.imagesink
-
- self.imagesink = sink
- self.imagesink.set_xwindow_id(self.window.xid)
diff --git a/glivex.py b/glivex.py
deleted file mode 100644
index 7c2955d..0000000
--- a/glivex.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#Copyright (c) 2008, Media Modifications Ltd.
-
-#Permission is hereby granted, free of charge, to any person obtaining a copy
-#of this software and associated documentation files (the "Software"), to deal
-#in the Software without restriction, including without limitation the rights
-#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-#copies of the Software, and to permit persons to whom the Software is
-#furnished to do so, subject to the following conditions:
-
-#The above copyright notice and this permission notice shall be included in
-#all copies or substantial portions of the Software.
-
-#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-#THE SOFTWARE.
-
-# This class is a cut-down version of glive which uses an ximagesink
-# rather than an xvimagesink. This is used in video playback mode, where
-# our only Xv port is used for Theora playback.
-#
-# I tried to modify the glive pipeline to allow swapping an xvimagesink for
-# an ximagesink and vice-versa, but that didn't work out (all kinds of strange
-# behaviour, perhaps a gstreamer bug). So we resort to using a separate
-# pipeline - ugly, but it works...
-
-import os
-import gtk
-import pygtk
-pygtk.require('2.0')
-import sys
-import gst
-import gst.interfaces
-import pygst
-pygst.require('0.10')
-import time
-import threading
-import gobject
-gobject.threads_init()
-
-from instance import Instance
-from constants import Constants
-import record
-import utils
-import ui
-
-class GliveX:
- def __init__(self, pca):
- self.window = None
- self.ca = pca
-
- self.playing = False
-
- self.pipeline = gst.Pipeline("slow-pipeline")
- self.createPipeline()
-
- bus = self.pipeline.get_bus()
- bus.enable_sync_message_emission()
- bus.add_signal_watch()
- self.SYNC_ID = bus.connect('sync-message::element', self._onSyncMessageCb)
- self.MESSAGE_ID = bus.connect('message', self._onMessageCb)
-
- def createPipeline ( self ):
- src = gst.element_factory_make("v4l2src", "camsrc")
- try:
- # old gst-plugins-good does not have this property
- src.set_property("queue-size", 2)
- except:
- pass
-
- queue = gst.element_factory_make("queue", "dispqueue")
- queue.set_property("leaky", True)
- queue.set_property('max-size-buffers', 1)
-
- scale = gst.element_factory_make("videoscale", "scale")
- scalecaps = gst.Caps('video/x-raw-yuv,width='+str(ui.UI.dim_PIPW)+',height='+str(ui.UI.dim_PIPH))
- colorspace = gst.element_factory_make("ffmpegcolorspace", "colorspace")
- xsink = gst.element_factory_make("ximagesink", "xsink")
- self.pipeline.add(src, queue, scale, colorspace, xsink)
- gst.element_link_many(src, queue, scale)
- scale.link(colorspace, scalecaps)
- colorspace.link(xsink)
-
- def play(self):
- self.pipeline.set_state(gst.STATE_PLAYING)
- self.playing = True
-
- def pause(self):
- self.pipeline.set_state(gst.STATE_PAUSED)
- self.playing = False
-
- def stop(self):
- self.pipeline.set_state(gst.STATE_NULL)
- self.playing = False
-
- def is_playing(self):
- return self.playing
-
- def idlePlayElement(self, element):
- element.set_state(gst.STATE_PLAYING)
- return False
-
- def _onSyncMessageCb(self, bus, message):
- if message.structure is None:
- return
- if message.structure.get_name() == 'prepare-xwindow-id':
- self.window.set_sink(message.src)
- message.src.set_property('force-aspect-ratio', True)
-
- def _onMessageCb(self, bus, message):
- t = message.type
- if t == gst.MESSAGE_EOS:
- #print("MESSAGE_EOS")
- pass
- elif t == gst.MESSAGE_ERROR:
- #todo: if we come out of suspend/resume with errors, then get us back up and running...
- #todo: handle "No space left on the resource.gstfilesink.c"
- #err, debug = message.parse_error()
- pass
-
-class SlowLiveVideoWindow(gtk.Window):
- def __init__(self, bgd ):
- gtk.Window.__init__(self)
-
- self.imagesink = None
- self.glivex = None
-
- self.modify_bg( gtk.STATE_NORMAL, bgd )
- self.modify_bg( gtk.STATE_INSENSITIVE, bgd )
- self.unset_flags(gtk.DOUBLE_BUFFERED)
- self.set_flags(gtk.APP_PAINTABLE)
-
- def set_glivex(self, pglivex):
- self.glivex = pglivex
- self.glivex.window = self
-
- def set_sink(self, sink):
- if (self.imagesink != None):
- assert self.window.xid
- self.imagesink = None
- del self.imagesink
-
- self.imagesink = sink
- self.imagesink.set_xwindow_id(self.window.xid)
diff --git a/gplay.py b/gplay.py
index b6cc7dd..e459563 100644
--- a/gplay.py
+++ b/gplay.py
@@ -32,54 +32,38 @@ import gobject
import time
gobject.threads_init()
+import logging
+logger = logging.getLogger('record:gplay.py')
+
import record
class Gplay:
- def __init__(self):
+ def __init__(self, ca):
+ self.ca = ca
self.window = None
self.players = []
self.playing = False
- self.nextMovie()
-
- def nextMovie(self):
- if ( len(self.players) > 0 ):
- self.playing = False
- self.getPlayer().set_property("video-sink", None)
- self.getPlayer().get_bus().disconnect(self.SYNC_ID)
- self.getPlayer().get_bus().remove_signal_watch()
- self.getPlayer().get_bus().disable_sync_message_emission()
-
- player = gst.element_factory_make("playbin", "playbin")
- xis = gst.element_factory_make("xvimagesink", "xvimagesink")
- player.set_property("video-sink", xis)
- bus = player.get_bus()
- bus.enable_sync_message_emission()
- bus.add_signal_watch()
- self.SYNC_ID = bus.connect('sync-message::element', self.onSyncMessage)
- self.players.append(player)
+ self.player = gst.element_factory_make('playbin')
- def getPlayer(self):
- return self.players[len(self.players)-1]
-
-
- def onSyncMessage(self, bus, message):
- if message.structure is None:
- return True
- if message.structure.get_name() == 'prepare-xwindow-id':
- self.window.set_sink(message.src)
- message.src.set_property('force-aspect-ratio', True)
- return True
+ bus = self.player.get_bus()
+ bus.enable_sync_message_emission()
+ bus.add_signal_watch()
+ bus.connect('message', self._onMessageCb)
+ def _onMessageCb(self, bus, message):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('_onMessageCb: error=%s debug=%s' % (err, debug))
def setLocation(self, location):
- if (self.getPlayer().get_property('uri') == location):
+ if (self.player.get_property('uri') == location):
self.seek(gst.SECOND*0)
return
- self.getPlayer().set_state(gst.STATE_READY)
- self.getPlayer().set_property('uri', location)
+ self.player.set_state(gst.STATE_READY)
+ self.player.set_property('uri', location)
ext = location[len(location)-3:]
record.Record.log.debug("setLocation: ext->"+str(ext))
if (ext == "jpg"):
@@ -89,12 +73,12 @@ class Gplay:
def queryPosition(self):
"Returns a (position, duration) tuple"
try:
- position, format = self.getPlayer().query_position(gst.FORMAT_TIME)
+ position, format = self.player.query_position(gst.FORMAT_TIME)
except:
position = gst.CLOCK_TIME_NONE
try:
- duration, format = self.getPlayer().query_duration(gst.FORMAT_TIME)
+ duration, format = self.player.query_duration(gst.FORMAT_TIME)
except:
duration = gst.CLOCK_TIME_NONE
@@ -103,29 +87,37 @@ class Gplay:
def seek(self, location):
event = gst.event_new_seek(1.0, gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE, gst.SEEK_TYPE_SET, location, gst.SEEK_TYPE_NONE, 0)
- res = self.getPlayer().send_event(event)
+ res = self.player.send_event(event)
if res:
- self.getPlayer().set_new_stream_time(0L)
+ self.player.set_new_stream_time(0L)
def pause(self):
self.playing = False
- self.getPlayer().set_state(gst.STATE_PAUSED)
+ self.player.set_state(gst.STATE_PAUSED)
def play(self):
+ if not self.player.props.video_sink:
+ if self.ca.glive.fallback:
+ sink = gst.element_factory_make('ximagesink')
+ else:
+ sink = gst.element_factory_make('xvimagesink')
+ sink.props.force_aspect_ratio = True
+ self.player.props.video_sink = sink
+
+ self.player.props.video_sink.set_xwindow_id(self.window.window.xid)
self.playing = True
- self.getPlayer().set_state(gst.STATE_PLAYING)
+ self.player.set_state(gst.STATE_PLAYING)
def stop(self):
self.playing = False
- self.getPlayer().set_state(gst.STATE_NULL)
- self.nextMovie()
+ self.player.set_state(gst.STATE_NULL)
def get_state(self, timeout=1):
- return self.getPlayer().get_state(timeout=timeout)
+ return self.player.get_state(timeout=timeout)
def is_playing(self):
@@ -136,19 +128,7 @@ class PlayVideoWindow(gtk.Window):
def __init__(self, bgd):
gtk.Window.__init__(self)
- self.imagesink = None
-
self.modify_bg( gtk.STATE_NORMAL, bgd )
self.modify_bg( gtk.STATE_INSENSITIVE, bgd )
self.unset_flags(gtk.DOUBLE_BUFFERED)
self.set_flags(gtk.APP_PAINTABLE)
-
-
- def set_sink(self, sink):
- if (self.imagesink != None):
- assert self.window.xid
- self.imagesink = None
- del self.imagesink
-
- self.imagesink = sink
- self.imagesink.set_xwindow_id(self.window.xid) \ No newline at end of file
diff --git a/model.py b/model.py
index 2eb0b1d..44ae2d3 100644
--- a/model.py
+++ b/model.py
@@ -20,6 +20,7 @@
#THE SOFTWARE.
+import uuid
import urllib
import string
import fnmatch
@@ -37,6 +38,9 @@ from time import strftime
import gobject
import operator
+import logging
+logger = logging.getLogger('record:model.py')
+
import sugar.env
from constants import Constants
@@ -249,10 +253,8 @@ class Model:
def stopRecordingVideo( self ):
self.ca.glive.stopRecordingVideo()
gobject.source_remove( self.ca.ui.UPDATE_DURATION_ID )
- self.ca.ui.progressWindow.updateProgress( 0, "" )
self.setUpdating( True )
self.setRecording( False )
- self.ca.ui.TRANSCODING = True
self.ca.ui.FULLSCREEN = False
self.ca.ui.updateVideoComponents()
@@ -276,9 +278,9 @@ class Model:
def meshShareRecd( self, recd ):
- record.Record.log.debug('meshShareRecd')
#hey, i just took a cool video.audio.photo! let me show you!
if (self.ca.recTube != None):
+ logger.debug('meshShareRecd')
recdXml = serialize.getRecdXmlMeshString(recd)
self.ca.recTube.notifyBudsOfNewRecd( Instance.keyHashPrintable, recdXml )
@@ -383,15 +385,15 @@ class Model:
recd.colorStroke = Instance.colorStroke
recd.colorFill = Instance.colorFill
- record.Record.log.debug('createNewRecorded: ' + str(recd) + ", thumbFilename:" + str(recd.thumbFilename))
+ logger.debug('createNewRecorded: ' + str(recd) + ", thumbFilename:" + str(recd.thumbFilename))
return recd
def createNewRecordedMd5Sums( self, recd ):
+ recd.thumbMd5 = recd.mediaMd5 = uuid.uuid4()
+
#load the thumbfile
thumbFile = os.path.join(Instance.instancePath, recd.thumbFilename)
- thumbMd5 = utils.md5File( thumbFile )
- recd.thumbMd5 = thumbMd5
tBytes = os.stat(thumbFile)[6]
recd.thumbBytes = tBytes
@@ -399,8 +401,6 @@ class Model:
#load the mediafile
mediaFile = os.path.join(Instance.instancePath, recd.mediaFilename)
- mediaMd5 = utils.md5File( mediaFile )
- recd.mediaMd5 = mediaMd5
mBytes = os.stat(mediaFile)[6]
recd.mediaBytes = mBytes
diff --git a/record.py b/record.py
index 94333d8..0b2f032 100644
--- a/record.py
+++ b/record.py
@@ -37,7 +37,6 @@ from model import Model
from ui import UI
from recordtube import RecordTube
from glive import Glive
-from glivex import GliveX
from gplay import Gplay
from greplay import Greplay
from recorded import Recorded
@@ -81,8 +80,7 @@ class Record(activity.Activity):
#the main classes
self.m = Model(self)
self.glive = Glive(self)
- self.glivex = GliveX(self)
- self.gplay = Gplay()
+ self.gplay = Gplay(self)
self.ui = UI(self)
#CSCL
@@ -139,7 +137,6 @@ class Record(activity.Activity):
self.m.doShutter()
else:
self.glive.stop()
- self.glivex.stop()
def restartPipes(self):
@@ -160,8 +157,6 @@ class Record(activity.Activity):
self.gplay.stop( )
if (self.glive != None):
self.glive.stop( )
- if (self.glivex != None):
- self.glivex.stop( )
#this calls write_file
activity.Activity.close( self )
diff --git a/ui.py b/ui.py
index eca4fb1..12ff80e 100644
--- a/ui.py
+++ b/ui.py
@@ -34,6 +34,9 @@ import time
import pango
import hippo
+import logging
+logger = logging.getLogger('record:ui.py')
+
#from sugar.graphics.toolcombobox import ToolComboBox
#from sugar.graphics.tray import HTray
from sugar.graphics.toolbutton import ToolButton
@@ -50,7 +53,6 @@ from p5_button import P5Button
from p5_button import Polygon
from p5_button import Button
from glive import LiveVideoWindow
-from glivex import SlowLiveVideoWindow
from gplay import PlayVideoWindow
from recorded import Recorded
from button import RecdButton
@@ -402,14 +404,6 @@ class UI:
self.liveVideoWindow.add_events(gtk.gdk.VISIBILITY_NOTIFY_MASK)
self.liveVideoWindow.connect("visibility-notify-event", self._visibleNotifyCb)
- self.slowLiveVideoWindow = SlowLiveVideoWindow(Constants.colorBlack.gColor)
- self.addToWindowStack( self.slowLiveVideoWindow, self.windowStack[len(self.windowStack)-1] )
- self.slowLiveVideoWindow.set_glivex(self.ca.glivex)
- self.slowLiveVideoWindow.set_events(gtk.gdk.BUTTON_RELEASE_MASK)
- self.slowLiveVideoWindow.connect("button_release_event", self._returnButtonReleaseCb)
- self.slowLiveVideoWindow.add_events(gtk.gdk.VISIBILITY_NOTIFY_MASK)
- self.slowLiveVideoWindow.connect("visibility-notify-event", self._visibleNotifyCb)
-
self.recordWindow = RecordWindow(self)
self.addToWindowStack( self.recordWindow, self.windowStack[len(self.windowStack)-1] )
@@ -546,7 +540,6 @@ class UI:
self.moveWinOffscreen( self.maxWindow )
self.moveWinOffscreen( self.pipBgdWindow )
self.moveWinOffscreen( self.infWindow )
- self.moveWinOffscreen( self.slowLiveVideoWindow )
if (self.FULLSCREEN):
self.moveWinOffscreen( self.recordWindow )
@@ -742,6 +735,7 @@ class UI:
img = camerac.cairo_surface_from_gdk_pixbuf(pixbuf)
self.livePhotoCanvas.setImage( img )
+ self.ca.glive.thumb_play()
self.LIVEMODE = False
self.updateVideoComponents()
@@ -831,12 +825,7 @@ class UI:
def _liveButtonReleaseCb(self, widget, event):
- self.resumeLiveVideo()
-
-
- def _returnButtonReleaseCb(self, widget, event):
self.ca.gplay.stop()
- self.ca.glivex.stop()
self.ca.glive.play()
self.resumeLiveVideo()
@@ -1309,6 +1298,17 @@ class UI:
def updateVideoComponents( self ):
+ logger.debug('updateVideoComponents: MODE=(%s,%s) FULLSCREEN=(%s,%s)' \
+ ' LIVE=(%s,%s) RECD_INFO=(%s,%s) TRANSCODING=(%s,%s)' \
+ ' MESHING=(%s,%s) windowStack=%s' \
+ % (self.LAST_MODE, self.ca.m.MODE,
+ self.LAST_FULLSCREEN, self.FULLSCREEN,
+ self.LAST_LIVE, self.LIVEMODE,
+ self.LAST_RECD_INFO, self.RECD_INFO_ON,
+ self.LAST_TRANSCODING, self.TRANSCODING,
+ self.LAST_MESHING, self.MESHING,
+ len(self.windowStack)))
+
if ( (self.LAST_MODE == self.ca.m.MODE)
and (self.LAST_FULLSCREEN == self.FULLSCREEN)
and (self.LAST_LIVE == self.LIVEMODE)
@@ -1330,7 +1330,7 @@ class UI:
pos.append({"position":"inf", "window":self.infWindow} )
elif (self.ca.m.MODE == Constants.MODE_VIDEO):
pos.append({"position":"pgd", "window":self.pipBgdWindow} )
- pos.append({"position":"pip", "window":self.slowLiveVideoWindow} )
+ pos.append({"position":"pip", "window":self.liveVideoWindow} )
pos.append({"position":"inb", "window":self.playOggWindow} )
pos.append({"position":"inf", "window":self.infWindow} )
elif (self.ca.m.MODE == Constants.MODE_AUDIO):
@@ -1362,7 +1362,7 @@ class UI:
else:
pos.append({"position":"img", "window":self.playOggWindow} )
pos.append({"position":"pgd", "window":self.pipBgdWindow} )
- pos.append({"position":"pip", "window":self.slowLiveVideoWindow} )
+ pos.append({"position":"pip", "window":self.liveVideoWindow} )
if (not self.MESHING):
pos.append({"position":"max", "window":self.maxWindow} )
pos.append({"position":"scr", "window":self.scrubWindow} )
@@ -1423,7 +1423,7 @@ class UI:
elif (self.ca.m.MODE == Constants.MODE_VIDEO):
if (not self.LIVEMODE):
pos.append({"position":"pgd", "window":self.pipBgdWindow} )
- pos.append({"position":"pip", "window":self.slowLiveVideoWindow} )
+ pos.append({"position":"pip", "window":self.liveVideoWindow} )
if (not self.MESHING):
pos.append({"position":"max", "window":self.maxWindow} )
pos.append({"position":"scr", "window":self.scrubWindow} )
@@ -1595,6 +1595,7 @@ class UI:
def showAudio( self, recd ):
+ self.ca.glive.thumb_play()
self.LIVEMODE = False
#if (recd != self.shownRecd):
@@ -1620,6 +1621,8 @@ class UI:
def showVideo( self, recd ):
+ logger.debug('showVideo')
+
downloading = self.ca.requestMeshDownload(recd)
if (not downloading):
@@ -1639,18 +1642,18 @@ class UI:
if (not downloading):
mediaFilepath = recd.getMediaFilepath()
if (mediaFilepath != None):
- self.ca.glive.stop()
- self.ca.glivex.play()
+ self.ca.glive.thumb_play(use_fallback=True)
+ logger.debug('showVideo2 file=%s' % mediaFilepath)
videoUrl = "file://" + str( mediaFilepath )
self.ca.gplay.setLocation(videoUrl)
self.scrubWindow.doPlay()
ableToShowVideo = True
if (not ableToShowVideo):
+ self.ca.glive.thumb_play(use_fallback=True)
# FIXME is this correct?
- self.ca.glive.stop()
- self.ca.glivex.play()
thumbFilepath = recd.getThumbFilepath( )
+ logger.debug('showVideo3 file=%s' % thumbFilepath)
thumbUrl = "file://" + str( thumbFilepath )
self.ca.gplay.setLocation(thumbUrl)
diff --git a/utils.py b/utils.py
index 36159ba..e9aa6cf 100644
--- a/utils.py
+++ b/utils.py
@@ -6,7 +6,6 @@ import statvfs
import cairo
import gc
import gtk
-from hashlib import md5
import time
from time import strftime
@@ -55,15 +54,6 @@ def getUniqueFilepath( path, i ):
return os.path.abspath( newPath )
-def md5File( filepath ):
- md = md5()
- f = file( filepath, 'rb' )
- md.update( f.read() )
- digest = md.hexdigest()
- hash = util.printable_hash(digest)
- return hash
-
-
def generateThumbnail( pixbuf, scale, thumbw, thumbh ):
#need to generate thumbnail version here
thumbImg = cairo.ImageSurface(cairo.FORMAT_ARGB32, thumbw, thumbh)