Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glive.py296
-rw-r--r--model.py3
-rw-r--r--ui.py32
3 files changed, 148 insertions, 183 deletions
diff --git a/glive.py b/glive.py
index bf27919..80fa3f9 100644
--- a/glive.py
+++ b/glive.py
@@ -43,21 +43,16 @@ class Glive:
def __init__(self, pca):
self.window = None
self.ca = pca
- self.pipes = []
self.playing = False
+ self.picExposureOpen = False
self.AUDIO_TRANSCODE_ID = 0
self.TRANSCODE_ID = 0
self.VIDEO_TRANSCODE_ID = 0
- self.PIPETYPE_SUGAR_JHBUILD = 0
- self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD = 1
- self.PIPETYPE_X_VIDEO_DISPLAY = 2
- self.PIPETYPE_AUDIO_RECORD = 3
- self._PIPETYPE = self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD
- self._LAST_PIPETYPE = self._PIPETYPE
- self._NEXT_PIPETYPE = -1
+ self.PHOTO_MODE_PHOTO = 0
+ self.PHOTO_MODE_AUDIO = 1
self.TRANSCODE_UPDATE_INTERVAL = 200
@@ -70,27 +65,94 @@ class Glive:
self.VIDEO_HEIGHT_LARGE = 150
self.VIDEO_FRAMERATE_SMALL = 10
+ self.pipeline = gst.Pipeline("my-pipeline")
+ self.createPhotoBin()
+ self.createAudioBin()
+ self.createVideoBin()
+ self.createPipeline()
self.thumbPipes = []
self.muxPipes = []
- self._nextPipe()
+ bus = self.pipeline.get_bus()
+ bus.enable_sync_message_emission()
+ bus.add_signal_watch()
+ self.SYNC_ID = bus.connect('sync-message::element', self._onSyncMessageCb)
+ self.MESSAGE_ID = bus.connect('message', self._onMessageCb)
- def setPipeType( self, type ):
- self._NEXT_PIPETYPE = type
+ def createPhotoBin ( self ):
+ queue = gst.element_factory_make("queue", "pbqueue")
+ queue.set_property("leaky", True)
+ queue.set_property("max-size-buffers", 1)
+ colorspace = gst.element_factory_make("ffmpegcolorspace", "pbcolorspace")
+ jpeg = gst.element_factory_make("jpegenc", "pbjpeg")
- def getPipeType( self ):
- return self._PIPETYPE
+ sink = gst.element_factory_make("fakesink", "pbsink")
+ self.HANDOFF_ID = sink.connect("handoff", self.copyPic)
+ sink.set_property("signal-handoffs", True)
+ self.photobin = gst.Bin("photobin")
+ self.photobin.add(queue, colorspace, jpeg, sink)
- def pipe(self):
- return self.pipes[ len(self.pipes)-1 ]
+ gst.element_link_many(queue, colorspace, jpeg, sink)
+ pad = queue.get_static_pad("sink")
+ self.photobin.add_pad(gst.GhostPad("sink", pad))
- def el(self, name):
- return self.pipe().get_by_name(name)
+ def createAudioBin ( self ):
+ src = gst.element_factory_make("alsasrc", "absrc")
+ srccaps = gst.Caps("audio/x-raw-int,rate=16000,channels=1,depth=16")
+ enc = gst.element_factory_make("wavenc", "abenc")
+
+ sink = gst.element_factory_make("filesink", "absink")
+ sink.set_property("location", os.path.join(Instance.instancePath, "output.wav"))
+
+ self.audiobin = gst.Bin("audiobin")
+ self.audiobin.add(src, enc, sink)
+
+ src.link(enc, srccaps)
+ enc.link(sink)
+
+ def createVideoBin ( self ):
+ queue = gst.element_factory_make("queue", "vbqueue")
+
+ rate = gst.element_factory_make("videorate", "vbrate")
+ ratecaps = gst.Caps('video/x-raw-yuv,framerate='+str(self.VIDEO_FRAMERATE_SMALL)+'/1')
+
+ scale = gst.element_factory_make("videoscale", "vbscale")
+ scalecaps = gst.Caps('video/x-raw-yuv,width='+str(self.VIDEO_WIDTH_SMALL)+',height='+str(self.VIDEO_HEIGHT_SMALL))
+
+ colorspace = gst.element_factory_make("ffmpegcolorspace", "vbcolorspace")
+
+ enc = gst.element_factory_make("theoraenc", "vbenc")
+ enc.set_property("quality", 16)
+
+ mux = gst.element_factory_make("oggmux", "vbmux")
+
+ sink = gst.element_factory_make("filesink", "vbfile")
+ sink.set_property("location", os.path.join(Instance.instancePath, "output.ogg"))
+
+ self.videobin = gst.Bin("videobin")
+ self.videobin.add(queue, rate, scale, colorspace, enc, mux, sink)
+
+ queue.link(rate)
+ rate.link(scale, ratecaps)
+ scale.link(colorspace, scalecaps)
+ gst.element_link_many(colorspace, enc, mux, sink)
+
+ pad = queue.get_static_pad("sink")
+ self.videobin.add_pad(gst.GhostPad("sink", pad))
+
+ def createPipeline ( self ):
+ src = gst.element_factory_make("v4l2src", "camsrc")
+ src.set_property("queue-size", 2)
+ tee = gst.element_factory_make("tee", "tee")
+ queue = gst.element_factory_make("queue", "dispqueue")
+ xvsink = gst.element_factory_make("xvimagesink", "xvsink")
+ self.pipeline.add(src, tee, queue, xvsink)
+ gst.element_link_many(src, tee, queue, xvsink)
def thumbPipe(self):
return self.thumbPipes[ len(self.thumbPipes)-1 ]
@@ -109,134 +171,27 @@ class Glive:
def play(self):
- self.pipe().set_state(gst.STATE_PLAYING)
self.playing = True
-
def pause(self):
self.pipe().set_state(gst.STATE_PAUSED)
self.playing = False
def stop(self):
- self.pipe().set_state(gst.STATE_NULL)
+ self.pipeline.set_state(gst.STATE_NULL)
self.playing = False
- self._LAST_PIPETYPE = self._PIPETYPE
- if (self._NEXT_PIPETYPE != -1):
- self._PIPETYPE = self._NEXT_PIPETYPE
- self._nextPipe()
- self._NEXT_PIPETYPE = -1
-
-
def is_playing(self):
return self.playing
-
def idlePlayElement(self, element):
element.set_state(gst.STATE_PLAYING)
return False
- def _nextPipe(self):
- if ( len(self.pipes) > 0 ):
-
- pipe = self.pipe()
- bus = pipe.get_bus()
- n = len(self.pipes)-1
- n = str(n)
-
- #only disconnect what was connected based on the last pipetype
- if ((self._LAST_PIPETYPE == self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD)
- or (self._LAST_PIPETYPE == self.PIPETYPE_X_VIDEO_DISPLAY)
- or (self._LAST_PIPETYPE == self.PIPETYPE_AUDIO_RECORD) ):
- bus.disconnect(self.SYNC_ID)
- bus.remove_signal_watch()
- bus.disable_sync_message_emission()
- if (self._LAST_PIPETYPE == self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD):
- pipe.get_by_name("picFakesink").disconnect(self.HANDOFF_ID)
- if (self._LAST_PIPETYPE == self.PIPETYPE_AUDIO_RECORD):
- pipe.get_by_name("picFakesink").disconnect(self.HANDOFF_ID)
-
- v4l2 = False
- if (self._PIPETYPE == self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD):
- pipeline = gst.parse_launch("v4l2src name=v4l2src ! tee name=videoTee ! queue name=movieQueue ! videorate name=movieVideorate ! video/x-raw-yuv,framerate="+str(self.VIDEO_FRAMERATE_SMALL)+"/1 ! videoscale name=movieVideoscale ! video/x-raw-yuv,width="+str(self.VIDEO_WIDTH_SMALL)+",height="+str(self.VIDEO_HEIGHT_SMALL)+" ! ffmpegcolorspace name=movieFfmpegcolorspace ! theoraenc quality=16 name=movieTheoraenc ! oggmux name=movieOggmux ! filesink name=movieFilesink videoTee. ! xvimagesink name=xvimagesink videoTee. ! queue name=picQueue ! ffmpegcolorspace name=picFfmpegcolorspace ! jpegenc name=picJPegenc ! fakesink name=picFakesink alsasrc name=audioAlsasrc ! audio/x-raw-int,rate=16000,channels=1,depth=16 ! tee name=audioTee ! wavenc name=audioWavenc ! filesink name=audioFilesink audioTee. ! fakesink name=audioFakesink" )
- v4l2 = True
-
- videoTee = pipeline.get_by_name('videoTee')
-
- picQueue = pipeline.get_by_name('picQueue')
- picQueue.set_property("leaky", True)
- picQueue.set_property("max-size-buffers", 1)
- picFakesink = pipeline.get_by_name("picFakesink")
- self.HANDOFF_ID = picFakesink.connect("handoff", self.copyPic)
- picFakesink.set_property("signal-handoffs", True)
- self.picExposureOpen = False
-
- movieQueue = pipeline.get_by_name("movieQueue")
- movieFilesink = pipeline.get_by_name("movieFilesink")
- movieFilepath = os.path.join(Instance.instancePath, "output.ogg" ) #ogv
- movieFilesink.set_property("location", movieFilepath )
-
- audioFilesink = pipeline.get_by_name('audioFilesink')
- audioFilepath = os.path.join(Instance.instancePath, "output.wav")
- audioFilesink.set_property("location", audioFilepath )
- audioTee = pipeline.get_by_name('audioTee')
- audioWavenc = pipeline.get_by_name('audioWavenc')
-
- audioTee.unlink(audioWavenc)
- videoTee.unlink(movieQueue)
- videoTee.unlink(picQueue)
-
- elif (self._PIPETYPE == self.PIPETYPE_X_VIDEO_DISPLAY ):
- pipeline = gst.parse_launch("v4l2src name=v4l2src ! queue name=xQueue ! videorate ! video/x-raw-yuv,framerate=2/1 ! videoscale ! video/x-raw-yuv,width="+str(ui.UI.dim_PIPW)+",height="+str(ui.UI.dim_PIPH)+" ! ffmpegcolorspace ! ximagesink name=ximagesink")
- v4l2 = True
-
- elif (self._PIPETYPE == self.PIPETYPE_AUDIO_RECORD):
- pipeline = gst.parse_launch("v4l2src name=v4l2src ! tee name=videoTee ! xvimagesink name=xvimagesink videoTee. ! queue name=picQueue ! ffmpegcolorspace name=picFfmpegcolorspace ! jpegenc name=picJPegenc ! fakesink name=picFakesink alsasrc name=audioAlsasrc ! audio/x-raw-int,rate=16000,channels=1,depth=16 ! queue name=audioQueue ! audioconvert name=audioAudioconvert ! wavenc name=audioWavenc ! filesink name=audioFilesink" )
- v4l2 = True
-
- audioQueue = pipeline.get_by_name('audioQueue')
- audioAudioconvert = pipeline.get_by_name('audioAudioconvert')
- audioQueue.unlink(audioAudioconvert)
-
- videoTee = pipeline.get_by_name('videoTee')
- picQueue = pipeline.get_by_name('picQueue')
- picQueue.set_property("leaky", True)
- picQueue.set_property("max-size-buffers", 1)
- picFakesink = pipeline.get_by_name('picFakesink')
- self.HANDOFF_ID = picFakesink.connect("handoff", self.copyPic)
- picFakesink.set_property("signal-handoffs", True)
- self.picExposureOpen = False
- videoTee.unlink(picQueue)
-
- audioFilesink = pipeline.get_by_name('audioFilesink')
- audioFilepath = os.path.join(Instance.instancePath, "output.wav")
- audioFilesink.set_property("location", audioFilepath )
-
- elif (self._PIPETYPE == self.PIPETYPE_SUGAR_JHBUILD):
- pipeline = gst.parse_launch("fakesrc ! queue name=xQueue ! videorate ! video/x-raw-yuv,framerate=2/1 ! videoscale ! video/x-raw-yuv,width=160,height=120 ! ffmpegcolorspace ! ximagesink name=ximagesink")
-
- if (v4l2):
- v4l2src = pipeline.get_by_name('v4l2src')
- try:
- v4l2src.set_property("queue-size", 2)
- except:
- pass
-
- if ((self._PIPETYPE == self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD)
- or (self._PIPETYPE == self.PIPETYPE_X_VIDEO_DISPLAY)
- or (self._PIPETYPE == self.PIPETYPE_AUDIO_RECORD)):
- bus = pipeline.get_bus()
- bus.enable_sync_message_emission()
- bus.add_signal_watch()
- self.SYNC_ID = bus.connect('sync-message::element', self._onSyncMessageCb)
- self.MESSAGE_ID = bus.connect('message', self._onMessageCb)
-
- self.pipes.append(pipeline)
-
-
def stopRecordingAudio( self ):
- self.stop()
+ self.audiobin.set_state(gst.STATE_NULL)
+ self.pipeline.remove(self.audiobin)
gobject.idle_add( self.stoppedRecordingAudio )
@@ -325,63 +280,85 @@ class Glive:
tl[gst.TAG_TITLE] = Constants.istrBy % {"1":stringType, "2":str(Instance.nickName)}
return tl
+ def blockedCb(self, x, y, z):
+ pass
- def takePhoto(self):
- if not(self.picExposureOpen):
- self.picExposureOpen = True
- self.el("videoTee").link(self.el("picQueue"))
+ def _takePhoto(self):
+ if self.picExposureOpen:
+ return
+ self.picExposureOpen = True
+ pad = self.photobin.get_static_pad("sink")
+ pad.set_blocked_async(True, self.blockedCb, None)
+ self.pipeline.add(self.photobin)
+ self.photobin.set_state(gst.STATE_PLAYING)
+ self.pipeline.get_by_name("tee").link(self.photobin)
+ pad.set_blocked_async(False, self.blockedCb, None)
+
+ def takePhoto(self):
+ self.photoMode = self.PHOTO_MODE_PHOTO
+ self._takePhoto()
def copyPic(self, fsink, buffer, pad, user_data=None):
- if (self.picExposureOpen):
+ if not self.picExposureOpen:
+ return
- self.picExposureOpen = False
- pic = gtk.gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
- pic.write( buffer )
- pic.close()
- pixBuf = pic.get_pixbuf()
- del pic
+ pad = self.photobin.get_static_pad("sink")
+ pad.set_blocked_async(True, self.blockedCb, None)
+ self.pipeline.get_by_name("tee").unlink(self.photobin)
+ self.pipeline.remove(self.photobin)
+ pad.set_blocked_async(False, self.blockedCb, None)
+
+ self.picExposureOpen = False
+ pic = gtk.gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
+ pic.write( buffer )
+ pic.close()
+ pixBuf = pic.get_pixbuf()
+ del pic
- self.el("videoTee").unlink(self.el("picQueue"))
- self.savePhoto( pixBuf )
+ self.savePhoto( pixBuf )
def savePhoto(self, pixbuf):
- if (self._PIPETYPE == self.PIPETYPE_AUDIO_RECORD):
+ if self.photoMode == self.PHOTO_MODE_AUDIO:
self.audioPixbuf = pixbuf
else:
self.ca.m.savePhoto(pixbuf)
def startRecordingVideo(self):
- self.pipe().set_state(gst.STATE_READY)
-
self.record = True
self.audio = True
- if (self.record):
- self.el("videoTee").link(self.el("movieQueue"))
-
- if (self.audio):
- self.el("audioTee").link(self.el("audioWavenc"))
- self.pipe().set_state(gst.STATE_PLAYING)
+ pad = self.videobin.get_static_pad("sink")
+ pad.set_blocked_async(True, self.blockedCb, None)
+ self.pipeline.add(self.videobin)
+ self.videobin.set_state(gst.STATE_PLAYING)
+ self.pipeline.get_by_name("tee").link(self.videobin)
+ pad.set_blocked_async(False, self.blockedCb, None)
+ self.pipeline.add(self.audiobin)
+ self.audiobin.set_state(gst.STATE_PLAYING)
def startRecordingAudio(self):
self.audioPixbuf = None
- self.pipe().set_state(gst.STATE_READY)
- self.takePhoto()
+ self.photoMode = self.PHOTO_MODE_AUDIO
+ self._takePhoto()
self.record = True
- if (self.record):
- self.el("audioQueue").link(self.el("audioAudioconvert"))
-
- self.pipe().set_state(gst.STATE_PLAYING)
-
+ self.pipeline.add(self.audiobin)
+ self.audiobin.set_state(gst.STATE_PLAYING)
def stopRecordingVideo(self):
- self.stop()
+ self.audiobin.set_state(gst.STATE_NULL)
+ self.videobin.set_state(gst.STATE_NULL)
+ pad = self.videobin.get_static_pad("sink")
+ pad.set_blocked_async(True, self.blockedCb, None)
+ self.pipeline.get_by_name("tee").unlink(self.videobin)
+ self.pipeline.remove(self.videobin)
+ pad.set_blocked_async(False, self.blockedCb, None)
+ self.pipeline.remove(self.audiobin)
gobject.idle_add( self.stoppedRecordingVideo )
@@ -511,11 +488,6 @@ class Glive:
#err, debug = message.parse_error()
pass
-
- def isXv(self):
- return self._PIPETYPE == self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD
-
-
def abandonMedia(self):
self.stop()
diff --git a/model.py b/model.py
index c9f2a90..15c4553 100644
--- a/model.py
+++ b/model.py
@@ -297,7 +297,6 @@ class Model:
#resume live video from the camera (if the activity is active)
if (self.ca.ui.ACTIVE):
self.ca.ui.updateVideoComponents()
- self.ca.glive.play()
self.ca.ui.progressWindow.updateProgress( 0, "" )
self.setRecording( False )
@@ -457,4 +456,4 @@ class Model:
self.MODE = Constants.MODE_AUDIO
self.setUpdating(True)
- gobject.idle_add( self.setupMode, self.MODE, True ) \ No newline at end of file
+ gobject.idle_add( self.setupMode, self.MODE, True )
diff --git a/ui.py b/ui.py
index 64786b2..ff99ebd 100644
--- a/ui.py
+++ b/ui.py
@@ -554,7 +554,7 @@ class UI:
self.moveWinOffscreen( self.liveVideoWindow )
elif (self.ca.m.MODE == Constants.MODE_VIDEO):
if (not self.LIVEMODE):
- self.moveWinOffscreen( self.playLiveWindow )
+ self.moveWinOffscreen( self.liveVideoWindow )
elif (self.ca.m.MODE == Constants.MODE_AUDIO):
if (not self.LIVEMODE):
self.moveWinOffscreen( self.liveVideoWindow )
@@ -862,7 +862,7 @@ class UI:
self.showLiveVideoTags()
self.LIVEMODE = True
- self.startLiveVideo( self.playLiveWindow, self.ca.glive.PIPETYPE_XV_VIDEO_DISPLAY_RECORD, False )
+ self.startLiveVideo( self.playLiveWindow, False )
self.updateVideoComponents()
@@ -927,11 +927,11 @@ class UI:
#set up the x & xv x-ition (if need be)
self.ca.gplay.stop()
if (self.ca.m.MODE == Constants.MODE_PHOTO):
- self.startLiveVideo( self.liveVideoWindow, self.ca.glive.PIPETYPE_XV_VIDEO_DISPLAY_RECORD, True )
+ self.startLiveVideo( self.liveVideoWindow, True )
elif (self.ca.m.MODE == Constants.MODE_VIDEO):
- self.startLiveVideo( self.playLiveWindow, self.ca.glive.PIPETYPE_XV_VIDEO_DISPLAY_RECORD, True )
+ self.startLiveVideo( self.liveVideoWindow, True )
elif (self.ca.m.MODE == Constants.MODE_AUDIO):
- self.startLiveVideo( self.liveVideoWindow, self.ca.glive.PIPETYPE_AUDIO_RECORD, True )
+ self.startLiveVideo( self.liveVideoWindow, True )
bottomKid = self.bottomCenter.get_child()
if (bottomKid != None):
@@ -944,19 +944,16 @@ class UI:
self.resetWidgetFadeTimer()
- def startLiveVideo(self, window, pipetype, force):
+ def startLiveVideo(self, window, force):
#We need to know which window and which pipe here
#if returning from another activity, active won't be false and needs to be to get started
- if (self.ca.glive.getPipeType() == pipetype
- and self.ca.glive.window == window
+ if (self.ca.glive.window == window
and self.ca.props.active
and not force):
return
- self.ca.glive.setPipeType( pipetype )
window.set_glive(self.ca.glive)
- self.ca.glive.stop()
self.ca.glive.play()
@@ -1352,7 +1349,7 @@ class UI:
pos.append({"position":"tmr", "window":self.progressWindow} )
elif (self.ca.m.MODE == Constants.MODE_VIDEO):
if (self.LIVEMODE):
- pos.append({"position":"img", "window":self.playLiveWindow} )
+ pos.append({"position":"img", "window":self.liveVideoWindow} )
pos.append({"position":"max", "window":self.maxWindow} )
pos.append({"position":"eye", "window":self.recordWindow} )
pos.append({"position":"prg", "window":self.progressWindow} )
@@ -1617,11 +1614,6 @@ class UI:
def showVideo( self, recd ):
- if (self.LIVEMODE):
- if (self.ca.glive.isXv()):
- self.ca.glive.setPipeType( self.ca.glive.PIPETYPE_X_VIDEO_DISPLAY )
- self.ca.glive.stop()
- self.ca.glive.play()
downloading = self.ca.requestMeshDownload(recd)
if (not downloading):
@@ -1641,12 +1633,15 @@ class UI:
if (not downloading):
mediaFilepath = recd.getMediaFilepath()
if (mediaFilepath != None):
+ self.ca.glive.stop()
videoUrl = "file://" + str( mediaFilepath )
self.ca.gplay.setLocation(videoUrl)
self.scrubWindow.doPlay()
ableToShowVideo = True
if (not ableToShowVideo):
+ # FIXME is this correct?
+ self.ca.glive.stop()
thumbFilepath = recd.getThumbFilepath( )
thumbUrl = "file://" + str( thumbFilepath )
self.ca.gplay.setLocation(thumbUrl)
@@ -1664,7 +1659,7 @@ class UI:
self.livePhotoCanvas.setImage( None )
elif (recd.type == Constants.TYPE_VIDEO):
self.ca.gplay.stop()
- self.startLiveVideo( self.playLiveWindow, self.ca.glive.PIPETYPE_XV_VIDEO_DISPLAY_RECORD, False )
+ self.startLiveVideo( self.playLiveWindow, False )
elif (recd.type == Constants.TYPE_AUDIO):
self.livePhotoCanvas.setImage( None )
self.startLiveAudio()
@@ -1679,7 +1674,6 @@ class UI:
self.ca.m.setUpdating(True)
self.ca.gplay.stop()
- self.ca.glive.setPipeType( self.ca.glive.PIPETYPE_AUDIO_RECORD )
self.liveVideoWindow.set_glive(self.ca.glive)
self.showLiveVideoTags()
@@ -2358,4 +2352,4 @@ class AudioToolbar(gtk.Toolbar):
def getDuration(self):
- return 60 * Constants.DURATIONS[self.durCb.combo.get_active()] \ No newline at end of file
+ return 60 * Constants.DURATIONS[self.durCb.combo.get_active()]