Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/glive.py
diff options
context:
space:
mode:
Diffstat (limited to 'glive.py')
-rw-r--r--glive.py324
1 files changed, 158 insertions, 166 deletions
diff --git a/glive.py b/glive.py
index bf27919..ae7480c 100644
--- a/glive.py
+++ b/glive.py
@@ -43,21 +43,17 @@ class Glive:
def __init__(self, pca):
self.window = None
self.ca = pca
- self.pipes = []
+ self._eos_cb = None
self.playing = False
+ self.picExposureOpen = False
self.AUDIO_TRANSCODE_ID = 0
self.TRANSCODE_ID = 0
self.VIDEO_TRANSCODE_ID = 0
- self.PIPETYPE_SUGAR_JHBUILD = 0
- self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD = 1
- self.PIPETYPE_X_VIDEO_DISPLAY = 2
- self.PIPETYPE_AUDIO_RECORD = 3
- self._PIPETYPE = self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD
- self._LAST_PIPETYPE = self._PIPETYPE
- self._NEXT_PIPETYPE = -1
+ self.PHOTO_MODE_PHOTO = 0
+ self.PHOTO_MODE_AUDIO = 1
self.TRANSCODE_UPDATE_INTERVAL = 200
@@ -70,27 +66,99 @@ class Glive:
self.VIDEO_HEIGHT_LARGE = 150
self.VIDEO_FRAMERATE_SMALL = 10
+ self.pipeline = gst.Pipeline("my-pipeline")
+ self.createPhotoBin()
+ self.createAudioBin()
+ self.createVideoBin()
+ self.createPipeline()
self.thumbPipes = []
self.muxPipes = []
- self._nextPipe()
+ bus = self.pipeline.get_bus()
+ bus.enable_sync_message_emission()
+ bus.add_signal_watch()
+ self.SYNC_ID = bus.connect('sync-message::element', self._onSyncMessageCb)
+ self.MESSAGE_ID = bus.connect('message', self._onMessageCb)
- def setPipeType( self, type ):
- self._NEXT_PIPETYPE = type
+ def createPhotoBin ( self ):
+ queue = gst.element_factory_make("queue", "pbqueue")
+ queue.set_property("leaky", True)
+ queue.set_property("max-size-buffers", 1)
+ colorspace = gst.element_factory_make("ffmpegcolorspace", "pbcolorspace")
+ jpeg = gst.element_factory_make("jpegenc", "pbjpeg")
- def getPipeType( self ):
- return self._PIPETYPE
+ sink = gst.element_factory_make("fakesink", "pbsink")
+ self.HANDOFF_ID = sink.connect("handoff", self.copyPic)
+ sink.set_property("signal-handoffs", True)
+ self.photobin = gst.Bin("photobin")
+ self.photobin.add(queue, colorspace, jpeg, sink)
- def pipe(self):
- return self.pipes[ len(self.pipes)-1 ]
+ gst.element_link_many(queue, colorspace, jpeg, sink)
+ pad = queue.get_static_pad("sink")
+ self.photobin.add_pad(gst.GhostPad("sink", pad))
- def el(self, name):
- return self.pipe().get_by_name(name)
+ def createAudioBin ( self ):
+ src = gst.element_factory_make("alsasrc", "absrc")
+ srccaps = gst.Caps("audio/x-raw-int,rate=16000,channels=1,depth=16")
+ enc = gst.element_factory_make("wavenc", "abenc")
+
+ sink = gst.element_factory_make("filesink", "absink")
+ sink.set_property("location", os.path.join(Instance.instancePath, "output.wav"))
+
+ self.audiobin = gst.Bin("audiobin")
+ self.audiobin.add(src, enc, sink)
+
+ src.link(enc, srccaps)
+ enc.link(sink)
+
+ def createVideoBin ( self ):
+ queue = gst.element_factory_make("queue", "vbqueue")
+
+ rate = gst.element_factory_make("videorate", "vbrate")
+ ratecaps = gst.Caps('video/x-raw-yuv,framerate='+str(self.VIDEO_FRAMERATE_SMALL)+'/1')
+
+ scale = gst.element_factory_make("videoscale", "vbscale")
+ scalecaps = gst.Caps('video/x-raw-yuv,width='+str(self.VIDEO_WIDTH_SMALL)+',height='+str(self.VIDEO_HEIGHT_SMALL))
+
+ colorspace = gst.element_factory_make("ffmpegcolorspace", "vbcolorspace")
+
+ enc = gst.element_factory_make("theoraenc", "vbenc")
+ enc.set_property("quality", 16)
+
+ mux = gst.element_factory_make("oggmux", "vbmux")
+
+ sink = gst.element_factory_make("filesink", "vbfile")
+ sink.set_property("location", os.path.join(Instance.instancePath, "output.ogg"))
+
+ self.videobin = gst.Bin("videobin")
+ self.videobin.add(queue, rate, scale, colorspace, enc, mux, sink)
+
+ queue.link(rate)
+ rate.link(scale, ratecaps)
+ scale.link(colorspace, scalecaps)
+ gst.element_link_many(colorspace, enc, mux, sink)
+
+ pad = queue.get_static_pad("sink")
+ self.videobin.add_pad(gst.GhostPad("sink", pad))
+
+ def createPipeline ( self ):
+ src = gst.element_factory_make("v4l2src", "camsrc")
+ try:
+ # old gst-plugins-good does not have this property
+ src.set_property("queue-size", 2)
+ except:
+ pass
+
+ tee = gst.element_factory_make("tee", "tee")
+ queue = gst.element_factory_make("queue", "dispqueue")
+ xvsink = gst.element_factory_make("xvimagesink", "xvsink")
+ self.pipeline.add(src, tee, queue, xvsink)
+ gst.element_link_many(src, tee, queue, xvsink)
def thumbPipe(self):
return self.thumbPipes[ len(self.thumbPipes)-1 ]
@@ -109,134 +177,28 @@ class Glive:
def play(self):
- self.pipe().set_state(gst.STATE_PLAYING)
+ self.pipeline.set_state(gst.STATE_PLAYING)
self.playing = True
-
def pause(self):
- self.pipe().set_state(gst.STATE_PAUSED)
+ self.pipeline.set_state(gst.STATE_PAUSED)
self.playing = False
def stop(self):
- self.pipe().set_state(gst.STATE_NULL)
+ self.pipeline.set_state(gst.STATE_NULL)
self.playing = False
- self._LAST_PIPETYPE = self._PIPETYPE
- if (self._NEXT_PIPETYPE != -1):
- self._PIPETYPE = self._NEXT_PIPETYPE
- self._nextPipe()
- self._NEXT_PIPETYPE = -1
-
-
def is_playing(self):
return self.playing
-
def idlePlayElement(self, element):
element.set_state(gst.STATE_PLAYING)
return False
- def _nextPipe(self):
- if ( len(self.pipes) > 0 ):
-
- pipe = self.pipe()
- bus = pipe.get_bus()
- n = len(self.pipes)-1
- n = str(n)
-
- #only disconnect what was connected based on the last pipetype
- if ((self._LAST_PIPETYPE == self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD)
- or (self._LAST_PIPETYPE == self.PIPETYPE_X_VIDEO_DISPLAY)
- or (self._LAST_PIPETYPE == self.PIPETYPE_AUDIO_RECORD) ):
- bus.disconnect(self.SYNC_ID)
- bus.remove_signal_watch()
- bus.disable_sync_message_emission()
- if (self._LAST_PIPETYPE == self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD):
- pipe.get_by_name("picFakesink").disconnect(self.HANDOFF_ID)
- if (self._LAST_PIPETYPE == self.PIPETYPE_AUDIO_RECORD):
- pipe.get_by_name("picFakesink").disconnect(self.HANDOFF_ID)
-
- v4l2 = False
- if (self._PIPETYPE == self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD):
- pipeline = gst.parse_launch("v4l2src name=v4l2src ! tee name=videoTee ! queue name=movieQueue ! videorate name=movieVideorate ! video/x-raw-yuv,framerate="+str(self.VIDEO_FRAMERATE_SMALL)+"/1 ! videoscale name=movieVideoscale ! video/x-raw-yuv,width="+str(self.VIDEO_WIDTH_SMALL)+",height="+str(self.VIDEO_HEIGHT_SMALL)+" ! ffmpegcolorspace name=movieFfmpegcolorspace ! theoraenc quality=16 name=movieTheoraenc ! oggmux name=movieOggmux ! filesink name=movieFilesink videoTee. ! xvimagesink name=xvimagesink videoTee. ! queue name=picQueue ! ffmpegcolorspace name=picFfmpegcolorspace ! jpegenc name=picJPegenc ! fakesink name=picFakesink alsasrc name=audioAlsasrc ! audio/x-raw-int,rate=16000,channels=1,depth=16 ! tee name=audioTee ! wavenc name=audioWavenc ! filesink name=audioFilesink audioTee. ! fakesink name=audioFakesink" )
- v4l2 = True
-
- videoTee = pipeline.get_by_name('videoTee')
-
- picQueue = pipeline.get_by_name('picQueue')
- picQueue.set_property("leaky", True)
- picQueue.set_property("max-size-buffers", 1)
- picFakesink = pipeline.get_by_name("picFakesink")
- self.HANDOFF_ID = picFakesink.connect("handoff", self.copyPic)
- picFakesink.set_property("signal-handoffs", True)
- self.picExposureOpen = False
-
- movieQueue = pipeline.get_by_name("movieQueue")
- movieFilesink = pipeline.get_by_name("movieFilesink")
- movieFilepath = os.path.join(Instance.instancePath, "output.ogg" ) #ogv
- movieFilesink.set_property("location", movieFilepath )
-
- audioFilesink = pipeline.get_by_name('audioFilesink')
- audioFilepath = os.path.join(Instance.instancePath, "output.wav")
- audioFilesink.set_property("location", audioFilepath )
- audioTee = pipeline.get_by_name('audioTee')
- audioWavenc = pipeline.get_by_name('audioWavenc')
-
- audioTee.unlink(audioWavenc)
- videoTee.unlink(movieQueue)
- videoTee.unlink(picQueue)
-
- elif (self._PIPETYPE == self.PIPETYPE_X_VIDEO_DISPLAY ):
- pipeline = gst.parse_launch("v4l2src name=v4l2src ! queue name=xQueue ! videorate ! video/x-raw-yuv,framerate=2/1 ! videoscale ! video/x-raw-yuv,width="+str(ui.UI.dim_PIPW)+",height="+str(ui.UI.dim_PIPH)+" ! ffmpegcolorspace ! ximagesink name=ximagesink")
- v4l2 = True
-
- elif (self._PIPETYPE == self.PIPETYPE_AUDIO_RECORD):
- pipeline = gst.parse_launch("v4l2src name=v4l2src ! tee name=videoTee ! xvimagesink name=xvimagesink videoTee. ! queue name=picQueue ! ffmpegcolorspace name=picFfmpegcolorspace ! jpegenc name=picJPegenc ! fakesink name=picFakesink alsasrc name=audioAlsasrc ! audio/x-raw-int,rate=16000,channels=1,depth=16 ! queue name=audioQueue ! audioconvert name=audioAudioconvert ! wavenc name=audioWavenc ! filesink name=audioFilesink" )
- v4l2 = True
-
- audioQueue = pipeline.get_by_name('audioQueue')
- audioAudioconvert = pipeline.get_by_name('audioAudioconvert')
- audioQueue.unlink(audioAudioconvert)
-
- videoTee = pipeline.get_by_name('videoTee')
- picQueue = pipeline.get_by_name('picQueue')
- picQueue.set_property("leaky", True)
- picQueue.set_property("max-size-buffers", 1)
- picFakesink = pipeline.get_by_name('picFakesink')
- self.HANDOFF_ID = picFakesink.connect("handoff", self.copyPic)
- picFakesink.set_property("signal-handoffs", True)
- self.picExposureOpen = False
- videoTee.unlink(picQueue)
-
- audioFilesink = pipeline.get_by_name('audioFilesink')
- audioFilepath = os.path.join(Instance.instancePath, "output.wav")
- audioFilesink.set_property("location", audioFilepath )
-
- elif (self._PIPETYPE == self.PIPETYPE_SUGAR_JHBUILD):
- pipeline = gst.parse_launch("fakesrc ! queue name=xQueue ! videorate ! video/x-raw-yuv,framerate=2/1 ! videoscale ! video/x-raw-yuv,width=160,height=120 ! ffmpegcolorspace ! ximagesink name=ximagesink")
-
- if (v4l2):
- v4l2src = pipeline.get_by_name('v4l2src')
- try:
- v4l2src.set_property("queue-size", 2)
- except:
- pass
-
- if ((self._PIPETYPE == self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD)
- or (self._PIPETYPE == self.PIPETYPE_X_VIDEO_DISPLAY)
- or (self._PIPETYPE == self.PIPETYPE_AUDIO_RECORD)):
- bus = pipeline.get_bus()
- bus.enable_sync_message_emission()
- bus.add_signal_watch()
- self.SYNC_ID = bus.connect('sync-message::element', self._onSyncMessageCb)
- self.MESSAGE_ID = bus.connect('message', self._onMessageCb)
-
- self.pipes.append(pipeline)
-
-
def stopRecordingAudio( self ):
- self.stop()
+ self.audiobin.set_state(gst.STATE_NULL)
+ self.pipeline.remove(self.audiobin)
gobject.idle_add( self.stoppedRecordingAudio )
@@ -325,63 +287,96 @@ class Glive:
tl[gst.TAG_TITLE] = Constants.istrBy % {"1":stringType, "2":str(Instance.nickName)}
return tl
+ def blockedCb(self, x, y, z):
+ pass
- def takePhoto(self):
- if not(self.picExposureOpen):
- self.picExposureOpen = True
- self.el("videoTee").link(self.el("picQueue"))
+ def _takePhoto(self):
+ if self.picExposureOpen:
+ return
+
+ self.picExposureOpen = True
+ pad = self.photobin.get_static_pad("sink")
+ pad.set_blocked_async(True, self.blockedCb, None)
+ self.pipeline.add(self.photobin)
+ self.photobin.set_state(gst.STATE_PLAYING)
+ self.pipeline.get_by_name("tee").link(self.photobin)
+ pad.set_blocked_async(False, self.blockedCb, None)
+ def takePhoto(self):
+ self.photoMode = self.PHOTO_MODE_PHOTO
+ self._takePhoto()
def copyPic(self, fsink, buffer, pad, user_data=None):
- if (self.picExposureOpen):
+ if not self.picExposureOpen:
+ return
- self.picExposureOpen = False
- pic = gtk.gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
- pic.write( buffer )
- pic.close()
- pixBuf = pic.get_pixbuf()
- del pic
+ pad = self.photobin.get_static_pad("sink")
+ pad.set_blocked_async(True, self.blockedCb, None)
+ self.pipeline.get_by_name("tee").unlink(self.photobin)
+ self.pipeline.remove(self.photobin)
+ pad.set_blocked_async(False, self.blockedCb, None)
+
+ self.picExposureOpen = False
+ pic = gtk.gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
+ pic.write( buffer )
+ pic.close()
+ pixBuf = pic.get_pixbuf()
+ del pic
- self.el("videoTee").unlink(self.el("picQueue"))
- self.savePhoto( pixBuf )
+ self.savePhoto( pixBuf )
def savePhoto(self, pixbuf):
- if (self._PIPETYPE == self.PIPETYPE_AUDIO_RECORD):
+ if self.photoMode == self.PHOTO_MODE_AUDIO:
self.audioPixbuf = pixbuf
else:
self.ca.m.savePhoto(pixbuf)
def startRecordingVideo(self):
- self.pipe().set_state(gst.STATE_READY)
-
self.record = True
self.audio = True
- if (self.record):
- self.el("videoTee").link(self.el("movieQueue"))
-
- if (self.audio):
- self.el("audioTee").link(self.el("audioWavenc"))
-
- self.pipe().set_state(gst.STATE_PLAYING)
+ # It would be nicer to connect the video/audio-recording elements
+ # without stopping the pipeline. However, that seems to cause a
+ # very long delay at the start of the video recording where the first
+ # frame is 'frozen' for several seconds. MikeS from #gstreamer
+ # suggested that the videorate element might not be receiving a
+ # "new segment" signal soon enough.
+ #
+ # Stopping the pipeline while we reshuffle neatly works around this
+ # with minimal user experience impact.
+ self.pipeline.set_state(gst.STATE_NULL)
+ self.pipeline.add(self.videobin)
+ self.pipeline.get_by_name("tee").link(self.videobin)
+ self.pipeline.add(self.audiobin)
+ self.pipeline.set_state(gst.STATE_PLAYING)
def startRecordingAudio(self):
self.audioPixbuf = None
- self.pipe().set_state(gst.STATE_READY)
- self.takePhoto()
+ self.photoMode = self.PHOTO_MODE_AUDIO
+ self._takePhoto()
self.record = True
- if (self.record):
- self.el("audioQueue").link(self.el("audioAudioconvert"))
-
- self.pipe().set_state(gst.STATE_PLAYING)
-
+ self.pipeline.add(self.audiobin)
+ self.audiobin.set_state(gst.STATE_PLAYING)
def stopRecordingVideo(self):
- self.stop()
+ # Similarly to as when we start recording, we also stop the pipeline
+ # while we are adjusting the pipeline to stop recording. If we do
+ # it on-the-fly, the following video live feed to the screen becomes
+ # several seconds delayed. Weird!
+ self._eos_cb = self.stopRecordingVideoEOS
+ self.pipeline.get_by_name('camsrc').send_event(gst.event_new_eos())
+ self.audiobin.get_by_name('absrc').send_event(gst.event_new_eos())
+
+ def stopRecordingVideoEOS(self):
+ self.pipeline.set_state(gst.STATE_NULL)
+ self.pipeline.get_by_name("tee").unlink(self.videobin)
+ self.pipeline.remove(self.videobin)
+ self.pipeline.remove(self.audiobin)
+ self.pipeline.set_state(gst.STATE_PLAYING)
gobject.idle_add( self.stoppedRecordingVideo )
@@ -503,19 +498,16 @@ class Glive:
def _onMessageCb(self, bus, message):
t = message.type
if t == gst.MESSAGE_EOS:
- #print("MESSAGE_EOS")
- pass
+ if self._eos_cb:
+ cb = self._eos_cb
+ self._eos_cb = None
+ cb()
elif t == gst.MESSAGE_ERROR:
#todo: if we come out of suspend/resume with errors, then get us back up and running...
#todo: handle "No space left on the resource.gstfilesink.c"
#err, debug = message.parse_error()
pass
-
- def isXv(self):
- return self._PIPETYPE == self.PIPETYPE_XV_VIDEO_DISPLAY_RECORD
-
-
def abandonMedia(self):
self.stop()