Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/glive.py
diff options
context:
space:
mode:
Diffstat (limited to 'glive.py')
-rw-r--r--glive.py927
1 files changed, 413 insertions, 514 deletions
diff --git a/glive.py b/glive.py
index ae7480c..99d6b7a 100644
--- a/glive.py
+++ b/glive.py
@@ -18,541 +18,440 @@
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
-
+import re
import os
import gtk
import pygtk
pygtk.require('2.0')
-import sys
import gst
-import gst.interfaces
import pygst
pygst.require('0.10')
import time
-import threading
import gobject
gobject.threads_init()
+import logging
+logger = logging.getLogger('record:glive.py')
+
+from sugar.activity.activity import get_activity_root
+
from instance import Instance
from constants import Constants
-import record
import utils
import ui
-class Glive:
- def __init__(self, pca):
- self.window = None
- self.ca = pca
- self._eos_cb = None
-
- self.playing = False
- self.picExposureOpen = False
-
- self.AUDIO_TRANSCODE_ID = 0
- self.TRANSCODE_ID = 0
- self.VIDEO_TRANSCODE_ID = 0
-
- self.PHOTO_MODE_PHOTO = 0
- self.PHOTO_MODE_AUDIO = 1
-
- self.TRANSCODE_UPDATE_INTERVAL = 200
-
-
- self.VIDEO_WIDTH_SMALL = 160
- self.VIDEO_HEIGHT_SMALL = 120
- self.VIDEO_FRAMERATE_SMALL = 10
-
- self.VIDEO_WIDTH_LARGE = 200
- self.VIDEO_HEIGHT_LARGE = 150
- self.VIDEO_FRAMERATE_SMALL = 10
-
- self.pipeline = gst.Pipeline("my-pipeline")
- self.createPhotoBin()
- self.createAudioBin()
- self.createVideoBin()
- self.createPipeline()
-
- self.thumbPipes = []
- self.muxPipes = []
-
- bus = self.pipeline.get_bus()
- bus.enable_sync_message_emission()
- bus.add_signal_watch()
- self.SYNC_ID = bus.connect('sync-message::element', self._onSyncMessageCb)
- self.MESSAGE_ID = bus.connect('message', self._onMessageCb)
-
- def createPhotoBin ( self ):
- queue = gst.element_factory_make("queue", "pbqueue")
- queue.set_property("leaky", True)
- queue.set_property("max-size-buffers", 1)
-
- colorspace = gst.element_factory_make("ffmpegcolorspace", "pbcolorspace")
- jpeg = gst.element_factory_make("jpegenc", "pbjpeg")
-
- sink = gst.element_factory_make("fakesink", "pbsink")
- self.HANDOFF_ID = sink.connect("handoff", self.copyPic)
- sink.set_property("signal-handoffs", True)
-
- self.photobin = gst.Bin("photobin")
- self.photobin.add(queue, colorspace, jpeg, sink)
-
- gst.element_link_many(queue, colorspace, jpeg, sink)
-
- pad = queue.get_static_pad("sink")
- self.photobin.add_pad(gst.GhostPad("sink", pad))
-
- def createAudioBin ( self ):
- src = gst.element_factory_make("alsasrc", "absrc")
- srccaps = gst.Caps("audio/x-raw-int,rate=16000,channels=1,depth=16")
-
- enc = gst.element_factory_make("wavenc", "abenc")
-
- sink = gst.element_factory_make("filesink", "absink")
- sink.set_property("location", os.path.join(Instance.instancePath, "output.wav"))
-
- self.audiobin = gst.Bin("audiobin")
- self.audiobin.add(src, enc, sink)
-
- src.link(enc, srccaps)
- enc.link(sink)
-
- def createVideoBin ( self ):
- queue = gst.element_factory_make("queue", "vbqueue")
-
- rate = gst.element_factory_make("videorate", "vbrate")
- ratecaps = gst.Caps('video/x-raw-yuv,framerate='+str(self.VIDEO_FRAMERATE_SMALL)+'/1')
+TMP_OGV = os.path.join(get_activity_root(), 'instance', 'output.ogv')
+TMP_OGG = os.path.join(get_activity_root(), 'instance', 'output.ogg')
+TMP_WAV = os.path.join(get_activity_root(), 'instance', 'output.wav')
- scale = gst.element_factory_make("videoscale", "vbscale")
- scalecaps = gst.Caps('video/x-raw-yuv,width='+str(self.VIDEO_WIDTH_SMALL)+',height='+str(self.VIDEO_HEIGHT_SMALL))
+PLAYBACK_WIDTH = 640
+PLAYBACK_HEIGHT = 480
- colorspace = gst.element_factory_make("ffmpegcolorspace", "vbcolorspace")
-
- enc = gst.element_factory_make("theoraenc", "vbenc")
- enc.set_property("quality", 16)
-
- mux = gst.element_factory_make("oggmux", "vbmux")
-
- sink = gst.element_factory_make("filesink", "vbfile")
- sink.set_property("location", os.path.join(Instance.instancePath, "output.ogg"))
-
- self.videobin = gst.Bin("videobin")
- self.videobin.add(queue, rate, scale, colorspace, enc, mux, sink)
-
- queue.link(rate)
- rate.link(scale, ratecaps)
- scale.link(colorspace, scalecaps)
- gst.element_link_many(colorspace, enc, mux, sink)
-
- pad = queue.get_static_pad("sink")
- self.videobin.add_pad(gst.GhostPad("sink", pad))
-
- def createPipeline ( self ):
- src = gst.element_factory_make("v4l2src", "camsrc")
- try:
- # old gst-plugins-good does not have this property
- src.set_property("queue-size", 2)
- except:
- pass
-
- tee = gst.element_factory_make("tee", "tee")
- queue = gst.element_factory_make("queue", "dispqueue")
- xvsink = gst.element_factory_make("xvimagesink", "xvsink")
- self.pipeline.add(src, tee, queue, xvsink)
- gst.element_link_many(src, tee, queue, xvsink)
-
- def thumbPipe(self):
- return self.thumbPipes[ len(self.thumbPipes)-1 ]
-
-
- def thumbEl(self, name):
- return self.thumbPipe().get_by_name(name)
-
-
- def muxPipe(self):
- return self.muxPipes[ len(self.muxPipes)-1 ]
-
-
- def muxEl(self, name):
- return self.muxPipe().get_by_name(name)
-
-
- def play(self):
- self.pipeline.set_state(gst.STATE_PLAYING)
- self.playing = True
-
- def pause(self):
- self.pipeline.set_state(gst.STATE_PAUSED)
- self.playing = False
-
-
- def stop(self):
- self.pipeline.set_state(gst.STATE_NULL)
- self.playing = False
-
- def is_playing(self):
- return self.playing
-
- def idlePlayElement(self, element):
- element.set_state(gst.STATE_PLAYING)
- return False
-
- def stopRecordingAudio( self ):
- self.audiobin.set_state(gst.STATE_NULL)
- self.pipeline.remove(self.audiobin)
- gobject.idle_add( self.stoppedRecordingAudio )
-
-
- def stoppedRecordingVideo(self):
- if ( len(self.thumbPipes) > 0 ):
- thumbline = self.thumbPipes[len(self.thumbPipes)-1]
- thumbline.get_by_name('thumbFakesink').disconnect(self.THUMB_HANDOFF_ID)
-
- oggFilepath = os.path.join(Instance.instancePath, "output.ogg") #ogv
- if (not os.path.exists(oggFilepath)):
- self.record = False
- self.ca.m.cannotSaveVideo()
- self.ca.m.stoppedRecordingVideo()
- return
- oggSize = os.path.getsize(oggFilepath)
- if (oggSize <= 0):
- self.record = False
- self.ca.m.cannotSaveVideo()
- self.ca.m.stoppedRecordingVideo()
- return
-
- line = 'filesrc location=' + str(oggFilepath) + ' name=thumbFilesrc ! oggdemux name=thumbOggdemux ! theoradec name=thumbTheoradec ! tee name=thumbTee ! queue name=thumbQueue ! ffmpegcolorspace name=thumbFfmpegcolorspace ! jpegenc name=thumbJPegenc ! fakesink name=thumbFakesink'
- thumbline = gst.parse_launch(line)
- thumbQueue = thumbline.get_by_name('thumbQueue')
- thumbQueue.set_property("leaky", True)
- thumbQueue.set_property("max-size-buffers", 1)
- thumbTee = thumbline.get_by_name('thumbTee')
- thumbFakesink = thumbline.get_by_name('thumbFakesink')
- self.THUMB_HANDOFF_ID = thumbFakesink.connect("handoff", self.copyThumbPic)
- thumbFakesink.set_property("signal-handoffs", True)
- self.thumbPipes.append(thumbline)
- self.thumbExposureOpen = True
- gobject.idle_add( self.idlePlayElement, thumbline )
-
-
- def stoppedRecordingAudio( self ):
- record.Record.log.debug("stoppedRecordingAudio")
- if (self.audioPixbuf != None):
- audioFilepath = os.path.join(Instance.instancePath, "output.wav")#self.el("audioFilesink").get_property("location")
- if (not os.path.exists(audioFilepath)):
- self.record = False
- self.audio = False
- self.ca.m.cannotSaveVideo()
- return
- wavSize = os.path.getsize(audioFilepath)
- if (wavSize <= 0):
- self.record = False
- self.ca.m.cannotSaveVideo()
- return
-
- self.ca.ui.setPostProcessPixBuf(self.audioPixbuf)
-
- line = 'filesrc location=' + str(audioFilepath) + ' name=audioFilesrc ! wavparse name=audioWavparse ! audioconvert name=audioAudioconvert ! vorbisenc name=audioVorbisenc ! oggmux name=audioOggmux ! filesink name=audioFilesink'
- audioline = gst.parse_launch(line)
-
- taglist = self.getTags(Constants.TYPE_AUDIO)
- base64AudioSnapshot = utils.getStringFromPixbuf(self.audioPixbuf)
- taglist[gst.TAG_EXTENDED_COMMENT] = "coverart="+str(base64AudioSnapshot)
- vorbisEnc = audioline.get_by_name('audioVorbisenc')
- vorbisEnc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)
-
- audioFilesink = audioline.get_by_name('audioFilesink')
- audioOggFilepath = os.path.join(Instance.instancePath, "output.ogg")
- audioFilesink.set_property("location", audioOggFilepath )
-
- audioBus = audioline.get_bus()
- audioBus.add_signal_watch()
- self.AUDIO_TRANSCODE_ID = audioBus.connect('message', self._onMuxedAudioMessageCb, audioline)
- self.TRANSCODE_ID = gobject.timeout_add(self.TRANSCODE_UPDATE_INTERVAL, self._transcodeUpdateCb, audioline)
- gobject.idle_add( self.idlePlayElement, audioline )
- else:
- self.record = False
- self.audio = False
- self.ca.m.cannotSaveVideo()
-
-
- def getTags( self, type ):
- tl = gst.TagList()
- tl[gst.TAG_ARTIST] = str(Instance.nickName)
- tl[gst.TAG_COMMENT] = "olpc"
- #this is unfortunately, unreliable
- #record.Record.log.debug("self.ca.metadata['title']->" + str(self.ca.metadata['title']) )
- tl[gst.TAG_ALBUM] = "olpc" #self.ca.metadata['title']
- tl[gst.TAG_DATE] = utils.getDateString(int(time.time()))
- stringType = Constants.mediaTypes[type][Constants.keyIstr]
- tl[gst.TAG_TITLE] = Constants.istrBy % {"1":stringType, "2":str(Instance.nickName)}
- return tl
-
- def blockedCb(self, x, y, z):
- pass
-
- def _takePhoto(self):
- if self.picExposureOpen:
- return
-
- self.picExposureOpen = True
- pad = self.photobin.get_static_pad("sink")
- pad.set_blocked_async(True, self.blockedCb, None)
- self.pipeline.add(self.photobin)
- self.photobin.set_state(gst.STATE_PLAYING)
- self.pipeline.get_by_name("tee").link(self.photobin)
- pad.set_blocked_async(False, self.blockedCb, None)
-
- def takePhoto(self):
- self.photoMode = self.PHOTO_MODE_PHOTO
- self._takePhoto()
-
- def copyPic(self, fsink, buffer, pad, user_data=None):
- if not self.picExposureOpen:
- return
-
- pad = self.photobin.get_static_pad("sink")
- pad.set_blocked_async(True, self.blockedCb, None)
- self.pipeline.get_by_name("tee").unlink(self.photobin)
- self.pipeline.remove(self.photobin)
- pad.set_blocked_async(False, self.blockedCb, None)
-
- self.picExposureOpen = False
- pic = gtk.gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
- pic.write( buffer )
- pic.close()
- pixBuf = pic.get_pixbuf()
- del pic
-
- self.savePhoto( pixBuf )
-
-
- def savePhoto(self, pixbuf):
- if self.photoMode == self.PHOTO_MODE_AUDIO:
- self.audioPixbuf = pixbuf
- else:
- self.ca.m.savePhoto(pixbuf)
-
-
- def startRecordingVideo(self):
- self.record = True
- self.audio = True
-
- # It would be nicer to connect the video/audio-recording elements
- # without stopping the pipeline. However, that seems to cause a
- # very long delay at the start of the video recording where the first
- # frame is 'frozen' for several seconds. MikeS from #gstreamer
- # suggested that the videorate element might not be receiving a
- # "new segment" signal soon enough.
- #
- # Stopping the pipeline while we reshuffle neatly works around this
- # with minimal user experience impact.
- self.pipeline.set_state(gst.STATE_NULL)
- self.pipeline.add(self.videobin)
- self.pipeline.get_by_name("tee").link(self.videobin)
- self.pipeline.add(self.audiobin)
- self.pipeline.set_state(gst.STATE_PLAYING)
-
- def startRecordingAudio(self):
- self.audioPixbuf = None
-
- self.photoMode = self.PHOTO_MODE_AUDIO
- self._takePhoto()
-
- self.record = True
- self.pipeline.add(self.audiobin)
- self.audiobin.set_state(gst.STATE_PLAYING)
-
- def stopRecordingVideo(self):
- # Similarly to as when we start recording, we also stop the pipeline
- # while we are adjusting the pipeline to stop recording. If we do
- # it on-the-fly, the following video live feed to the screen becomes
- # several seconds delayed. Weird!
- self._eos_cb = self.stopRecordingVideoEOS
- self.pipeline.get_by_name('camsrc').send_event(gst.event_new_eos())
- self.audiobin.get_by_name('absrc').send_event(gst.event_new_eos())
-
- def stopRecordingVideoEOS(self):
- self.pipeline.set_state(gst.STATE_NULL)
- self.pipeline.get_by_name("tee").unlink(self.videobin)
- self.pipeline.remove(self.videobin)
- self.pipeline.remove(self.audiobin)
- self.pipeline.set_state(gst.STATE_PLAYING)
- gobject.idle_add( self.stoppedRecordingVideo )
-
-
- def copyThumbPic(self, fsink, buffer, pad, user_data=None):
- if (self.thumbExposureOpen):
- self.thumbExposureOpen = False
- pic = gtk.gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
- pic.write(buffer)
- pic.close()
- self.thumbBuf = pic.get_pixbuf()
- del pic
- self.thumbEl('thumbTee').unlink(self.thumbEl('thumbQueue'))
-
- oggFilepath = os.path.join(Instance.instancePath, "output.ogg") #ogv
- if (self.audio):
- self.ca.ui.setPostProcessPixBuf(self.thumbBuf)
-
- wavFilepath = os.path.join(Instance.instancePath, "output.wav")
- muxFilepath = os.path.join(Instance.instancePath, "mux.ogg") #ogv
-
- muxline = gst.parse_launch('filesrc location=' + str(oggFilepath) + ' name=muxVideoFilesrc ! oggdemux name=muxOggdemux ! theoradec name=muxTheoradec ! theoraenc name=muxTheoraenc ! oggmux name=muxOggmux ! filesink location=' + str(muxFilepath) + ' name=muxFilesink filesrc location=' + str(wavFilepath) + ' name=muxAudioFilesrc ! wavparse name=muxWavparse ! audioconvert name=muxAudioconvert ! vorbisenc name=muxVorbisenc ! muxOggmux.')
- taglist = self.getTags(Constants.TYPE_VIDEO)
- vorbisEnc = muxline.get_by_name('muxVorbisenc')
- vorbisEnc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)
-
- muxBus = muxline.get_bus()
- muxBus.add_signal_watch()
- self.VIDEO_TRANSCODE_ID = muxBus.connect('message', self._onMuxedVideoMessageCb, muxline)
- self.muxPipes.append(muxline)
- #add a listener here to monitor % of transcoding...
- self.TRANSCODE_ID = gobject.timeout_add(self.TRANSCODE_UPDATE_INTERVAL, self._transcodeUpdateCb, muxline)
- muxline.set_state(gst.STATE_PLAYING)
- else:
- self.record = False
- self.audio = False
- self.ca.m.saveVideo(self.thumbBuf, str(oggFilepath), self.VIDEO_WIDTH_SMALL, self.VIDEO_HEIGHT_SMALL)
- self.ca.m.stoppedRecordingVideo()
-
-
- def _transcodeUpdateCb( self, pipe ):
- position, duration = self.queryPosition( pipe )
- if position != gst.CLOCK_TIME_NONE:
- value = position * 100.0 / duration
- value = value/100.0
- self.ca.ui.progressWindow.updateProgress(value, Constants.istrSaving)
- return True
-
-
- def queryPosition( self, pipe ):
- try:
- position, format = pipe.query_position(gst.FORMAT_TIME)
- except:
- position = gst.CLOCK_TIME_NONE
-
- try:
- duration, format = pipe.query_duration(gst.FORMAT_TIME)
- except:
- duration = gst.CLOCK_TIME_NONE
-
- return (position, duration)
-
-
- def _onMuxedVideoMessageCb(self, bus, message, pipe):
- t = message.type
- if (t == gst.MESSAGE_EOS):
- self.record = False
- self.audio = False
- gobject.source_remove(self.VIDEO_TRANSCODE_ID)
- self.VIDEO_TRANSCODE_ID = 0
- gobject.source_remove(self.TRANSCODE_ID)
- self.TRANSCODE_ID = 0
- pipe.set_state(gst.STATE_NULL)
- pipe.get_bus().remove_signal_watch()
- pipe.get_bus().disable_sync_message_emission()
-
- wavFilepath = os.path.join(Instance.instancePath, "output.wav")
- oggFilepath = os.path.join(Instance.instancePath, "output.ogg") #ogv
- muxFilepath = os.path.join(Instance.instancePath, "mux.ogg") #ogv
- os.remove( wavFilepath )
- os.remove( oggFilepath )
- self.ca.m.saveVideo(self.thumbBuf, str(muxFilepath), self.VIDEO_WIDTH_SMALL, self.VIDEO_HEIGHT_SMALL)
- self.ca.m.stoppedRecordingVideo()
- return False
- else:
- return True
-
-
- def _onMuxedAudioMessageCb(self, bus, message, pipe):
- t = message.type
- if (t == gst.MESSAGE_EOS):
- record.Record.log.debug("audio gst.MESSAGE_EOS")
- self.record = False
- self.audio = False
- gobject.source_remove(self.AUDIO_TRANSCODE_ID)
- self.AUDIO_TRANSCODE_ID = 0
- gobject.source_remove(self.TRANSCODE_ID)
- self.TRANSCODE_ID = 0
- pipe.set_state(gst.STATE_NULL)
- pipe.get_bus().remove_signal_watch()
- pipe.get_bus().disable_sync_message_emission()
-
- wavFilepath = os.path.join(Instance.instancePath, "output.wav")
- oggFilepath = os.path.join(Instance.instancePath, "output.ogg")
- os.remove( wavFilepath )
- self.ca.m.saveAudio(oggFilepath, self.audioPixbuf)
- return False
- else:
- return True
-
-
- def _onSyncMessageCb(self, bus, message):
- if message.structure is None:
- return
- if message.structure.get_name() == 'prepare-xwindow-id':
- self.window.set_sink(message.src)
- message.src.set_property('force-aspect-ratio', True)
-
-
- def _onMessageCb(self, bus, message):
- t = message.type
- if t == gst.MESSAGE_EOS:
- if self._eos_cb:
- cb = self._eos_cb
- self._eos_cb = None
- cb()
- elif t == gst.MESSAGE_ERROR:
- #todo: if we come out of suspend/resume with errors, then get us back up and running...
- #todo: handle "No space left on the resource.gstfilesink.c"
- #err, debug = message.parse_error()
- pass
-
- def abandonMedia(self):
- self.stop()
-
- if (self.AUDIO_TRANSCODE_ID != 0):
- gobject.source_remove(self.AUDIO_TRANSCODE_ID)
- self.AUDIO_TRANSCODE_ID = 0
- if (self.TRANSCODE_ID != 0):
- gobject.source_remove(self.TRANSCODE_ID)
- self.TRANSCODE_ID = 0
- if (self.VIDEO_TRANSCODE_ID != 0):
- gobject.source_remove(self.VIDEO_TRANSCODE_ID)
- self.VIDEO_TRANSCODE_ID = 0
-
- wavFilepath = os.path.join(Instance.instancePath, "output.wav")
- if (os.path.exists(wavFilepath)):
- os.remove(wavFilepath)
- oggFilepath = os.path.join(Instance.instancePath, "output.ogg") #ogv
- if (os.path.exists(oggFilepath)):
- os.remove(oggFilepath)
- muxFilepath = os.path.join(Instance.instancePath, "mux.ogg") #ogv
- if (os.path.exists(muxFilepath)):
- os.remove(muxFilepath)
+OGG_TRAITS = {
+ 0: { 'width': 160, 'height': 120, 'quality': 16 },
+ 1: { 'width': 400, 'height': 300, 'quality': 16 },
+ 2: { 'width': 640, 'height': 480, 'quality': 32 } }
+class Glive:
+ def play(self):
+ logger.debug('play')
+
+ if not self.play_pipe:
+ self.src_str = \
+ 'v4l2src ' \
+ '! video/x-raw-yuv,width=%s,height=%s ' \
+ % (PLAYBACK_WIDTH, PLAYBACK_HEIGHT)
+ self.play_str = \
+ 'xvimagesink force-aspect-ratio=true name=xsink'
+
+ self.play_pipe = gst.parse_launch(
+ '%s ' \
+ '! valve name=valve ' \
+ '! queue name=queue ' \
+ '! %s' \
+ % (self.src_str, self.play_str))
+ self.valve = self.play_pipe.get_by_name('valve')
+
+ def message_cb(bus, message):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('play_pipe: %s %s' % (err, debug))
+
+ if not self.fallback:
+ logger.warning('use fallback_bin')
+ self.fallback = True
+
+ self.play_str = \
+ 'ffmpegcolorspace ' \
+ '! ximagesink force-aspect-ratio=true ' \
+ ' name=xsink'
+
+ self.play_pipe.remove(
+ self.play_pipe.get_by_name('xsink'))
+
+ c = gst.element_factory_make('ffmpegcolorspace')
+ s = gst.element_factory_make('ximagesink', 'xsink')
+ s.props.force_aspect_ratio = True
+
+ self.play_pipe.add(c, s)
+ gst.element_link_many(
+ self.play_pipe.get_by_name('queue'), c, s)
+
+ if [i for i in self.pipeline.get_state() \
+ if id(i) == id(gst.STATE_PLAYING)]:
+ self.pipeline = None
+ self._switch_pipe(self.play_pipe)
+
+ bus = self.play_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', message_cb)
+
+ self._switch_pipe(self.play_pipe)
+
+ def thumb_play(self, use_fallback=False):
+ if not self.fallback and not use_fallback:
+ # use xv to scale video
+ self.play()
+ return
+
+ logger.debug('thumb_play')
+
+ if not self.fallback_pipe:
+ self.fallback_pipe = gst.parse_launch(
+ '%s ' \
+ '! queue ' \
+ '! videoscale ' \
+ '! video/x-raw-yuv,width=%s,height=%s ' \
+ '! ffmpegcolorspace ' \
+ '! ximagesink force-aspect-ratio=true name=xsink' \
+ % (self.src_str, ui.UI.dim_PIPW, ui.UI.dim_PIPH))
+
+ def message_cb(bus, message):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('fallback_pipe: %s %s' % (err, debug))
+
+ bus = self.fallback_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', message_cb)
+
+ self._switch_pipe(self.fallback_pipe)
+
+ def pause(self):
+ logger.debug('pause')
+ if self.pipeline:
+ self.pipeline.set_state(gst.STATE_PAUSED)
+
+ def stop(self):
+ logger.debug('stop')
+ if self.pipeline:
+ self.pipeline.set_state(gst.STATE_NULL)
+
+ def takePhoto(self, after_photo_cb=None):
+ logger.debug('takePhoto')
+
+ if not self.photo:
+ def sink_handoff(sink, buffer, pad, self):
+ sink.props.signal_handoffs = False
+
+ pixbuf = gtk.gdk.pixbuf_loader_new_with_mime_type('image/jpeg')
+ pixbuf.write(buffer)
+ pixbuf.close()
+
+ structure = gst.Structure('record.photo')
+ structure['pixbuf'] = pixbuf.get_pixbuf()
+ msg = gst.message_new_custom(gst.MESSAGE_APPLICATION, sink,
+ structure)
+ self.play_pipe.get_bus().post(msg)
+
+ self.photo = gst.element_factory_make('ffmpegcolorspace')
+ self.photo_jpegenc = gst.element_factory_make('jpegenc')
+ self.photo_sink = gst.element_factory_make('fakesink')
+ self.photo_sink.connect('handoff', sink_handoff, self)
+
+ def message_cb(bus, message, self):
+ if message.type == gst.MESSAGE_APPLICATION \
+ and message.structure.get_name() == 'record.photo':
+ self.valve.props.drop = True
+ self.play_pipe.remove(self.photo)
+ self.play_pipe.remove(self.photo_jpegenc)
+ self.play_pipe.remove(self.photo_sink)
+ self.valve.link(self.play_pipe.get_by_name('queue'))
+ self.valve.props.drop = False
+ self.after_photo_cb(self, message.structure['pixbuf'])
+
+ bus = self.play_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', message_cb, self)
+
+ def process_cb(self, pixbuf):
+ self.ca.m.savePhoto(pixbuf)
+ self._switch_pipe(self.play_pipe)
+
+ self.after_photo_cb = after_photo_cb and after_photo_cb or process_cb
+
+ self.valve.props.drop = True
+ self.valve.unlink(self.play_pipe.get_by_name('queue'))
+ self.play_pipe.add(self.photo, self.photo_jpegenc, self.photo_sink)
+ gst.element_link_many(self.valve, self.photo, self.photo_jpegenc,
+ self.photo_sink)
+ self.photo_sink.props.signal_handoffs = True
+ self.valve.props.drop = False
+
+ self._switch_pipe(self.play_pipe)
+
+ def startRecordingVideo(self, quality):
+ logger.debug('startRecordingVideo quality=%s' % quality)
+
+ if not self.video_pipe or quality != self.ogg_quality:
+ self.video_pipe = gst.parse_launch( \
+ '%s ' \
+ '! tee name=tee ' \
+ 'tee.! queue ! %s ' \
+ 'tee.! queue ' \
+ '! ffmpegcolorspace ' \
+ '! videorate skip_to_first=true ' \
+ '! video/x-raw-yuv,framerate=10/1 ' \
+ '! videoscale ' \
+ '! video/x-raw-yuv,width=%s,height=%s ' \
+ '! theoraenc quality=%s ' \
+ '! oggmux ' \
+ '! filesink location=%s ' \
+ 'alsasrc ' \
+ '! queue ' \
+ '! audio/x-raw-int,rate=16000,channels=1,depth=16 ' \
+ '! wavenc ' \
+ '! filesink location=%s ' \
+ % (self.src_str, self.play_str,
+ OGG_TRAITS[quality]['width'],
+ OGG_TRAITS[quality]['height'],
+ OGG_TRAITS[quality]['quality'],
+ TMP_OGV, TMP_WAV))
+
+ def message_cb(bus, message, self):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('video_pipe: %s %s' % (err, debug))
+
+ bus = self.video_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', message_cb, self)
+
+ def process_cb(self, pixbuf):
+ self.pixbuf = pixbuf
+ self._switch_pipe(self.video_pipe)
+
+ self.ogg_quality = quality
+ # take photo first
+ self.takePhoto(process_cb)
+
+ def stopRecordingVideo(self):
+ logger.debug('stopRecordingVideo')
+
+ self._switch_pipe(self.play_pipe)
+
+ if not os.path.exists(TMP_OGV) \
+ or not os.path.exists(TMP_WAV):
+ self.ca.m.cannotSaveVideo()
+ self.ca.m.stoppedRecordingVideo()
+ return
+
+ if os.path.getsize(TMP_OGV) <= 0 \
+ or os.path.getsize(TMP_WAV) <= 0:
+ self.ca.m.cannotSaveVideo()
+ self.ca.m.stoppedRecordingVideo()
+ return
+
+ if self.mux_pipe:
+ self.mux_pipe.set_state(gst.STATE_NULL)
+ del self.mux_pipe
+
+ self.mux_pipe = gst.parse_launch( \
+ 'filesrc location=%s ' \
+ '! oggdemux ' \
+ '! theoraparse ' \
+ '! oggmux name=mux ' \
+ '! filesink location=%s ' \
+ 'filesrc location=%s ' \
+ '! wavparse ' \
+ '! audioconvert ' \
+ '! vorbisenc name=vorbisenc ' \
+ '! mux.' \
+ % (TMP_OGV, TMP_OGG, TMP_WAV))
+
+ taglist = self.getTags(Constants.TYPE_VIDEO)
+ vorbisenc = self.mux_pipe.get_by_name('vorbisenc')
+ vorbisenc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)
+
+ def done(bus, message, self):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('audio_pipe: %s %s' % (err, debug))
+ return
+ elif message.type != gst.MESSAGE_EOS:
+ return
+
+ logger.debug('stopRecordingVideo.done')
+ self.mux_pipe.set_state(gst.STATE_NULL)
+
+ os.remove(TMP_OGV)
+ os.remove(TMP_WAV)
+
+ ogg_w = OGG_TRAITS[self.ogg_quality]['width']
+ ogg_h = OGG_TRAITS[self.ogg_quality]['height']
+
+ thumb = self.pixbuf.scale_simple(ogg_w, ogg_h,
+ gtk.gdk.INTERP_HYPER)
+
+ self.ca.ui.setPostProcessPixBuf(thumb)
+ self.ca.m.saveVideo(thumb, TMP_OGG, ogg_w, ogg_h)
+ self.ca.m.stoppedRecordingVideo()
+ self.ca.ui.updateVideoComponents()
+
+ bus = self.mux_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', done, self)
+
+ self.mux_pipe.set_state(gst.STATE_PLAYING)
+
+ def startRecordingAudio(self):
+ logger.debug('startRecordingAudio')
+
+ # XXX re-create pipe every time
+ # to supress gst glitches during the second invoking
+ if True:
+ self.audio_pipe = gst.parse_launch( \
+ '%s ' \
+ '! queue ' \
+ '! %s ' \
+ 'alsasrc ' \
+ '! queue ' \
+ '! audioconvert ' \
+ '! vorbisenc name=vorbisenc ' \
+ '! oggmux ' \
+ '! filesink location=%s ' \
+ % (self.src_str, self.play_str, TMP_OGG))
+
+ def message_cb(bus, message, self):
+ if message.type == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ logger.error('audio_pipe: %s %s' % (err, debug))
+
+ bus = self.audio_pipe.get_bus()
+ bus.add_signal_watch()
+ bus.connect('message', message_cb, self)
+
+ def process_cb(self, pixbuf):
+ taglist = self.getTags(Constants.TYPE_AUDIO)
+ cover = utils.getStringFromPixbuf(pixbuf)
+ taglist[gst.TAG_EXTENDED_COMMENT] = 'coverart=%s' % cover
+
+ vorbisenc = self.audio_pipe.get_by_name('vorbisenc')
+ vorbisenc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)
+
+ self.pixbuf = pixbuf
+ self._switch_pipe(self.audio_pipe)
+
+ # take photo first
+ self.takePhoto(process_cb)
+
+ def stopRecordingAudio( self ):
+ logger.debug('stopRecordingAudio')
+
+ self._switch_pipe(self.play_pipe)
+
+ if (not os.path.exists(TMP_OGG)):
+ self.ca.m.cannotSaveVideo()
+ return
+ if (os.path.getsize(TMP_OGG) <= 0):
+ self.ca.m.cannotSaveVideo()
+ return
+
+ self.ca.ui.setPostProcessPixBuf(self.pixbuf)
+ self.ca.m.saveAudio(TMP_OGG, self.pixbuf)
+
+ def abandonMedia(self):
+ logger.debug('abandonMedia')
+ self.stop()
+
+ if (self.AUDIO_TRANSCODE_ID != 0):
+ gobject.source_remove(self.AUDIO_TRANSCODE_ID)
+ self.AUDIO_TRANSCODE_ID = 0
+ if (self.TRANSCODE_ID != 0):
+ gobject.source_remove(self.TRANSCODE_ID)
+ self.TRANSCODE_ID = 0
+ if (self.VIDEO_TRANSCODE_ID != 0):
+ gobject.source_remove(self.VIDEO_TRANSCODE_ID)
+ self.VIDEO_TRANSCODE_ID = 0
+
+ if (os.path.exists(TMP_OGG)):
+ os.remove(TMP_OGG)
+
+ def __init__(self, pca):
+ self.window = None
+ self.ca = pca
+
+ self.pipeline = None
+ self.play_pipe = None
+ self.fallback_pipe = None
+ self.photo = None
+ self.video_pipe = None
+ self.mux_pipe = None
+ self.audio_pipe = None
+
+ self.fallback = False
+
+ # XXX since sugar doesn't control capture volumes (see #800)
+ # we have to do it by ourselves
+ alsasrc = gst.element_factory_make('alsasrc')
+ alsasrc.set_state(gst.STATE_PAUSED)
+ for i in alsasrc.list_tracks():
+ if i.props.flags & gst.interfaces.MIXER_TRACK_INPUT \
+ and re.search('capture', i.label, flags=re.IGNORECASE):
+ alsasrc.set_record(i, True)
+ volume = i.props.min_volume \
+ + int((i.props.max_volume - i.props.min_volume) \
+ / 100. * 90.)
+ alsasrc.set_volume(i, tuple([volume] * i.props.num_channels))
+ logger.debug('Set volume %s to %s' % (volume, i.label))
+ alsasrc.set_state(gst.STATE_NULL)
+ del alsasrc
+
+ def _switch_pipe(self, new_pipe):
+ if self.pipeline != new_pipe:
+ if self.pipeline:
+ self.pipeline.set_state(gst.STATE_NULL)
+ self.pipeline = new_pipe
+
+ if self.pipeline:
+ xsink = new_pipe.get_by_name('xsink')
+ if xsink:
+ xsink.set_xwindow_id(self.window.window.xid)
+ self.pipeline.set_state(gst.STATE_PLAYING)
+
+ def getTags( self, type ):
+ tl = gst.TagList()
+ tl[gst.TAG_ARTIST] = str(Instance.nickName)
+ tl[gst.TAG_COMMENT] = "sugar"
+ #this is unfortunately, unreliable
+ #record.Record.log.debug("self.ca.metadata['title']->" + str(self.ca.metadata['title']) )
+ tl[gst.TAG_ALBUM] = "sugar" #self.ca.metadata['title']
+ tl[gst.TAG_DATE] = utils.getDateString(int(time.time()))
+ stringType = Constants.mediaTypes[type][Constants.keyIstr]
+ tl[gst.TAG_TITLE] = Constants.istrBy % {"1":stringType, "2":str(Instance.nickName)}
+ return tl
class LiveVideoWindow(gtk.Window):
- def __init__(self, bgd ):
- gtk.Window.__init__(self)
-
- self.imagesink = None
- self.glive = None
-
- self.modify_bg( gtk.STATE_NORMAL, bgd )
- self.modify_bg( gtk.STATE_INSENSITIVE, bgd )
- self.unset_flags(gtk.DOUBLE_BUFFERED)
- self.set_flags(gtk.APP_PAINTABLE)
+ def __init__(self, bgd ):
+ gtk.Window.__init__(self)
- def set_glive(self, pglive):
- self.glive = pglive
- self.glive.window = self
+ self.glive = None
- def set_sink(self, sink):
- if (self.imagesink != None):
- assert self.window.xid
- self.imagesink = None
- del self.imagesink
+ self.modify_bg( gtk.STATE_NORMAL, bgd )
+ self.modify_bg( gtk.STATE_INSENSITIVE, bgd )
+ self.unset_flags(gtk.DOUBLE_BUFFERED)
+ self.set_flags(gtk.APP_PAINTABLE)
- self.imagesink = sink
- self.imagesink.set_xwindow_id(self.window.xid)
+ def set_glive(self, pglive):
+ self.glive = pglive
+ self.glive.window = self