Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/shared/js/media
diff options
context:
space:
mode:
Diffstat (limited to 'shared/js/media')
-rw-r--r--shared/js/media/README1
-rw-r--r--shared/js/media/get_video_rotation.js143
-rw-r--r--shared/js/media/jpeg_metadata_parser.js314
-rw-r--r--shared/js/media/media_frame.js537
-rw-r--r--shared/js/media/video_player.js313
5 files changed, 1308 insertions, 0 deletions
diff --git a/shared/js/media/README b/shared/js/media/README
new file mode 100644
index 0000000..05a67a0
--- /dev/null
+++ b/shared/js/media/README
@@ -0,0 +1 @@
+This directory contains files shared by the Camera, Gallery and Video apps.
diff --git a/shared/js/media/get_video_rotation.js b/shared/js/media/get_video_rotation.js
new file mode 100644
index 0000000..3359b92
--- /dev/null
+++ b/shared/js/media/get_video_rotation.js
@@ -0,0 +1,143 @@
+'use strict';
+
+//
+// Given an MP4/Quicktime based video file as a blob, read through its
+// atoms to find the track header "tkhd" atom and extract the rotation
+// matrix from it. Convert the matrix value to rotation in degrees and
+// pass that number to the specified callback function. If no value is
+// found but the video file is valid, pass null to the callback. If
+// any errors occur, pass an error message (a string) callback.
+//
+// See also:
+// http://androidxref.com/4.0.4/xref/frameworks/base/media/libstagefright/MPEG4Writer.cpp
+// https://developer.apple.com/library/mac/#documentation/QuickTime/QTFF/QTFFChap2/qtff2.html
+//
+function getVideoRotation(blob, rotationCallback) {
+
+ // A utility for traversing the tree of atoms in an MP4 file
+ function MP4Parser(blob, handlers) {
+ // Start off with a 1024 chunk from the start of the blob.
+ BlobView.get(blob, 0, 1024, function(data, error) {
+ // Make sure that the blob is, in fact, some kind of MP4 file
+ if (data.getASCIIText(4, 4) !== 'ftyp') {
+ handlers.errorHandler('not an MP4 file');
+ return;
+ }
+ parseAtom(data);
+ });
+
+ // Call this with a BlobView object that includes the first 16 bytes of
+ // an atom. It doesn't matter whether the body of the atom is included.
+ function parseAtom(data) {
+ var offset = data.sliceOffset + data.viewOffset; // atom position in blob
+ var size = data.readUnsignedInt(); // atom length
+ var type = data.readASCIIText(4); // atom type
+ var contentOffset = 8; // position of content
+
+ if (size === 0) {
+ // Zero size means the rest of the file
+ size = blob.size - offset;
+ }
+ else if (size === 1) {
+ // A size of 1 means the size is in bytes 8-15
+ size = data.readUnsignedInt() * 4294967296 + data.readUnsignedInt();
+ contentOffset = 16;
+ }
+
+ var handler = handlers[type] || handlers.defaultHandler;
+ if (typeof handler === 'function') {
+ // If the handler is a function, pass that function a
+ // DataView object that contains the entire atom
+ // including size and type. Then use the return value
+ // of the function as instructions on what to do next.
+ data.getMore(data.sliceOffset + data.viewOffset, size, function(atom) {
+ // Pass the entire atom to the handler function
+ var rv = handler(atom);
+
+ // If the return value is 'done', stop parsing.
+ // Otherwise, continue with the next atom.
+ // XXX: For more general parsing we need a way to pop some
+ // stack levels. A return value that is an atom name should mean
+ // pop back up to this atom type and go on to the next atom
+ // after that.
+ if (rv !== 'done') {
+ parseAtomAt(data, offset + size);
+ }
+ });
+ }
+ else if (handler === 'children') {
+ // If the handler is this string, then assume that the atom is
+ // a container atom and do its next child atom next
+ var skip = (type === 'meta') ? 4 : 0; // special case for meta atoms
+ parseAtomAt(data, offset + contentOffset + skip);
+ }
+ else if (handler === 'skip' || !handler) {
+ // Skip the atom entirely and go on to the next one.
+ // If there is no next one, call the eofHandler or just return
+ parseAtomAt(data, offset + size);
+ }
+ else if (handler === 'done') {
+ // Stop parsing
+ return;
+ }
+ }
+
+ function parseAtomAt(data, offset) {
+ if (offset >= blob.size) {
+ if (handlers.eofHandler)
+ handlers.eofHandler();
+ return;
+ }
+ else {
+ data.getMore(offset, 8, parseAtom);
+ }
+ }
+ }
+
+ // We want to loop through the top-level atoms until we find the 'moov'
+ // atom. Then, within this atom, there are one or more 'trak' atoms.
+ // Each 'trak' should begin with a 'tkhd' atom. The tkhd atom has
+ // a transformation matrix at byte 48. The matrix is 9 32 bit integers.
+ // We'll interpret those numbers as a rotation of 0, 90, 180 or 270.
+ // If the video has more than one track, we expect all of them to have
+ // the same rotation, so we'll only look at the first 'trak' atom that
+ // we find.
+ MP4Parser(blob, {
+ errorHandler: function(msg) { rotationCallback(msg); },
+ eofHandler: function() { rotationCallback(null); },
+ defaultHandler: 'skip', // Skip all atoms other than those listed below
+ moov: 'children', // Enumerate children of the moov atom
+ trak: 'children', // Enumerate children of the trak atom
+ tkhd: function(data) { // Pass the tkhd atom to this function
+ // The matrix begins at byte 48
+ data.advance(48);
+
+ var a = data.readUnsignedInt();
+ var b = data.readUnsignedInt();
+ data.advance(4); // we don't care about this number
+ var c = data.readUnsignedInt();
+ var d = data.readUnsignedInt();
+
+ if (a === 0 && d === 0) { // 90 or 270 degrees
+ if (b === 0x00010000 && c === 0xFFFF0000)
+ rotationCallback(90);
+ else if (b === 0xFFFF0000 && c === 0x00010000)
+ rotationCallback(270);
+ else
+ rotationCallback('unexpected rotation matrix');
+ }
+ else if (b === 0 && c === 0) { // 0 or 180 degrees
+ if (a === 0x00010000 && d === 0x00010000)
+ rotationCallback(0);
+ else if (a === 0xFFFF0000 && d === 0xFFFF0000)
+ rotationCallback(180);
+ else
+ rotationCallback('unexpected rotation matrix');
+ }
+ else {
+ rotationCallback('unexpected rotation matrix');
+ }
+ return 'done';
+ }
+ });
+}
diff --git a/shared/js/media/jpeg_metadata_parser.js b/shared/js/media/jpeg_metadata_parser.js
new file mode 100644
index 0000000..d8b2b02
--- /dev/null
+++ b/shared/js/media/jpeg_metadata_parser.js
@@ -0,0 +1,314 @@
+'use strict';
+
+//
+// This file defines a single function that asynchronously reads a
+// JPEG file (or blob) to determine its width and height and find the
+// location and size of the embedded preview image, if it has one. If
+// it succeeds, it passes an object containing this data to the
+// specified callback function. If it fails, it passes an error message
+// to the specified error function instead.
+//
+// This function is capable of parsing and returning EXIF data for a
+// JPEG file, but for speed, it ignores all EXIF data except the embedded
+// preview image.
+//
+// This function requires the BlobView utility class
+//
+function parseJPEGMetadata(file, metadataCallback, metadataError) {
+ // This is the object we'll pass to metadataCallback
+ var metadata = {};
+
+ // Start off reading a 16kb slice of the JPEG file.
+ // Hopefully, this will be all we need and everything else will
+ // be synchronous
+ BlobView.get(file, 0, Math.min(16 * 1024, file.size), function(data) {
+ if (data.byteLength < 2 ||
+ data.getUint8(0) !== 0xFF ||
+ data.getUint8(1) !== 0xD8) {
+ metadataError('Not a JPEG file');
+ return;
+ }
+
+ // Now start reading JPEG segments
+ // getSegment() and segmentHandler() are defined below.
+ getSegment(data, 2, segmentHandler);
+ });
+
+ // Read the JPEG segment at the specified offset and
+ // pass it to the callback function.
+ // Offset is relative to the current data offsets.
+ // We assume that data has enough data in it that we can
+ // can determine the size of the segment, and we guarantee that
+ // we read extra bytes so the next call works
+ function getSegment(data, offset, callback) {
+ try {
+ var header = data.getUint8(offset);
+ if (header !== 0xFF) {
+ metadataError('Malformed JPEG file: bad segment header');
+ return;
+ }
+
+ var type = data.getUint8(offset + 1);
+ var size = data.getUint16(offset + 2) + 2;
+
+ // the absolute position of the segment
+ var start = data.sliceOffset + data.viewOffset + offset;
+ // If this isn't the last segment in the file, add 4 bytes
+ // so we can read the size of the next segment
+ var isLast = (start + size >= file.size);
+ var length = isLast ? size : size + 4;
+
+ data.getMore(start, length,
+ function(data) {
+ callback(type, size, data, isLast);
+ });
+ }
+ catch (e) {
+ metadataError(e.toString() + '\n' + e.stack);
+ }
+ }
+
+ // This is a callback function for getNextSegment that handles the
+ // various types of segments we expect to see in a jpeg file
+ function segmentHandler(type, size, data, isLastSegment) {
+ try {
+ switch (type) {
+ case 0xC0: // Some actual image data, including image dimensions
+ case 0xC1:
+ case 0xC2:
+ case 0xC3:
+ // Get image dimensions
+ metadata.height = data.getUint16(5);
+ metadata.width = data.getUint16(7);
+
+ // We're done. All the EXIF data will come before this segment
+ // So call the callback
+ metadataCallback(metadata);
+ break;
+
+ case 0xE1: // APP1 segment. Probably holds EXIF metadata
+ parseAPP1(data);
+ /* fallthrough */
+
+ default:
+ // A segment we don't care about, so just go on and read the next one
+ if (isLastSegment) {
+ metadataError('unexpected end of JPEG file');
+ return;
+ }
+ getSegment(data, size, segmentHandler);
+ }
+ }
+ catch (e) {
+ metadataError(e.toString() + '\n' + e.stack);
+ }
+ }
+
+ function parseAPP1(data) {
+ if (data.getUint32(4, false) === 0x45786966) { // "Exif"
+ var exif = parseEXIFData(data);
+
+ if (exif.THUMBNAIL && exif.THUMBNAILLENGTH) {
+ var start = data.sliceOffset + data.viewOffset + 10 + exif.THUMBNAIL;
+ metadata.preview = {
+ start: start,
+ end: start + exif.THUMBNAILLENGTH
+ };
+ }
+ }
+ }
+
+ // Parse an EXIF segment from a JPEG file and return an object
+ // of metadata attributes. The argument must be a DataView object
+ function parseEXIFData(data) {
+ var exif = {};
+
+ var byteorder = data.getUint8(10);
+ if (byteorder === 0x4D) { // big endian
+ byteorder = false;
+ } else if (byteorder === 0x49) { // little endian
+ byteorder = true;
+ } else {
+ throw Error('invalid byteorder in EXIF segment');
+ }
+
+ if (data.getUint16(12, byteorder) !== 42) { // magic number
+ throw Error('bad magic number in EXIF segment');
+ }
+
+ var offset = data.getUint32(14, byteorder);
+
+ /*
+ * This is how we would parse all EXIF metadata more generally.
+ * I'm leaving this code in as a comment in case we need other EXIF
+ * data in the future.
+ *
+ parseIFD(data, offset + 10, byteorder, exif);
+
+ if (exif.EXIFIFD) {
+ parseIFD(data, exif.EXIFIFD + 10, byteorder, exif);
+ delete exif.EXIFIFD;
+ }
+
+ if (exif.GPSIFD) {
+ parseIFD(data, exif.GPSIFD + 10, byteorder, exif);
+ delete exif.GPSIFD;
+ }
+ */
+
+ // Instead of a general purpose EXIF parse, we're going to drill
+ // down directly to the thumbnail image.
+ // We're in IFD0 here. We want the offset of IFD1
+ var ifd0entries = data.getUint16(offset + 10, byteorder);
+ var ifd1 = data.getUint32(offset + 12 + 12 * ifd0entries, byteorder);
+ // If there is an offset for IFD1, parse that
+ if (ifd1 !== 0)
+ parseIFD(data, ifd1 + 10, byteorder, exif, true);
+
+ return exif;
+ }
+
+ function parseIFD(data, offset, byteorder, exif, onlyParseOne) {
+ var numentries = data.getUint16(offset, byteorder);
+ for (var i = 0; i < numentries; i++) {
+ parseEntry(data, offset + 2 + 12 * i, byteorder, exif);
+ }
+
+ if (onlyParseOne)
+ return;
+
+ var next = data.getUint32(offset + 2 + 12 * numentries, byteorder);
+ if (next !== 0 && next < file.size) {
+ parseIFD(data, next + 10, byteorder, exif);
+ }
+ }
+
+ // size, in bytes, of each TIFF data type
+ var typesize = [
+ 0, // Unused
+ 1, // BYTE
+ 1, // ASCII
+ 2, // SHORT
+ 4, // LONG
+ 8, // RATIONAL
+ 1, // SBYTE
+ 1, // UNDEFINED
+ 2, // SSHORT
+ 4, // SLONG
+ 8, // SRATIONAL
+ 4, // FLOAT
+ 8 // DOUBLE
+ ];
+
+ // This object maps EXIF tag numbers to their names.
+ // Only list the ones we want to bother parsing and returning.
+ // All others will be ignored.
+ var tagnames = {
+ /*
+ * We don't currently use any of these EXIF tags for anything.
+ *
+ *
+ '256': 'ImageWidth',
+ '257': 'ImageHeight',
+ '40962': 'PixelXDimension',
+ '40963': 'PixelYDimension',
+ '306': 'DateTime',
+ '315': 'Artist',
+ '33432': 'Copyright',
+ '36867': 'DateTimeOriginal',
+ '33434': 'ExposureTime',
+ '33437': 'FNumber',
+ '34850': 'ExposureProgram',
+ '34867': 'ISOSpeed',
+ '37377': 'ShutterSpeedValue',
+ '37378': 'ApertureValue',
+ '37379': 'BrightnessValue',
+ '37380': 'ExposureBiasValue',
+ '37382': 'SubjectDistance',
+ '37383': 'MeteringMode',
+ '37384': 'LightSource',
+ '37385': 'Flash',
+ '37386': 'FocalLength',
+ '41986': 'ExposureMode',
+ '41987': 'WhiteBalance',
+ '41991': 'GainControl',
+ '41992': 'Contrast',
+ '41993': 'Saturation',
+ '41994': 'Sharpness',
+ // These are special tags that we handle internally
+ '34665': 'EXIFIFD', // Offset of EXIF data
+ '34853': 'GPSIFD', // Offset of GPS data
+ */
+ '513': 'THUMBNAIL', // Offset of thumbnail
+ '514': 'THUMBNAILLENGTH' // Length of thumbnail
+ };
+
+ function parseEntry(data, offset, byteorder, exif) {
+ var tag = data.getUint16(offset, byteorder);
+ var tagname = tagnames[tag];
+
+ if (!tagname) // If we don't know about this tag type, skip it
+ return;
+
+ var type = data.getUint16(offset + 2, byteorder);
+ var count = data.getUint32(offset + 4, byteorder);
+
+ var total = count * typesize[type];
+ var valueOffset = total <= 4 ? offset + 8 :
+ data.getUint32(offset + 8, byteorder);
+ exif[tagname] = parseValue(data, valueOffset, type, count, byteorder);
+ }
+
+ function parseValue(data, offset, type, count, byteorder) {
+ if (type === 2) { // ASCII string
+ var codes = [];
+ for (var i = 0; i < count - 1; i++) {
+ codes[i] = data.getUint8(offset + i);
+ }
+ return String.fromCharCode.apply(String, codes);
+ } else {
+ if (count == 1) {
+ return parseOneValue(data, offset, type, byteorder);
+ } else {
+ var values = [];
+ var size = typesize[type];
+ for (var i = 0; i < count; i++) {
+ values[i] = parseOneValue(data, offset + size * i, type, byteorder);
+ }
+ return values;
+ }
+ }
+ }
+
+ function parseOneValue(data, offset, type, byteorder) {
+ switch (type) {
+ case 1: // BYTE
+ case 7: // UNDEFINED
+ return data.getUint8(offset);
+ case 2: // ASCII
+ // This case is handed in parseValue
+ return null;
+ case 3: // SHORT
+ return data.getUint16(offset, byteorder);
+ case 4: // LONG
+ return data.getUint32(offset, byteorder);
+ case 5: // RATIONAL
+ return data.getUint32(offset, byteorder) /
+ data.getUint32(offset + 4, byteorder);
+ case 6: // SBYTE
+ return data.getInt8(offset);
+ case 8: // SSHORT
+ return data.getInt16(offset, byteorder);
+ case 9: // SLONG
+ return data.getInt32(offset, byteorder);
+ case 10: // SRATIONAL
+ return data.getInt32(offset, byteorder) /
+ data.getInt32(offset + 4, byteorder);
+ case 11: // FLOAT
+ return data.getFloat32(offset, byteorder);
+ case 12: // DOUBLE
+ return data.getFloat64(offset, byteorder);
+ }
+ return null;
+ }
+}
diff --git a/shared/js/media/media_frame.js b/shared/js/media/media_frame.js
new file mode 100644
index 0000000..aaf8fbe
--- /dev/null
+++ b/shared/js/media/media_frame.js
@@ -0,0 +1,537 @@
+/*
+ * media_frame.js:
+ *
+ * A MediaFrame displays a photo or a video. The gallery app uses
+ * three side by side to support smooth panning from one item to the
+ * next. The Camera app uses one for image and video preview. The
+ * Gallery app's open activity uses one of these to display the opened
+ * item.
+ *
+ * MediaFrames have different behavior depending on whether they display
+ * images or videos. Photo frames allow the user to zoom and pan on the photo.
+ * Video frames allow the user to play and pause but don't allow zooming.
+ *
+ * When a frame is displaying a video, it handles mouse events.
+ * When display a picture, however, it expects the client to handle events
+ * and call the pan() and zoom() methods.
+ *
+ * The pan() method is a little unusual. It "uses" as much of the pan
+ * event as it can, and returns a number indicating how much of the
+ * horizontal motion it did not use. The gallery uses this returned
+ * value for transitioning between frames. If a frame displays a
+ * photo that is not zoomed in at all, then it can't use any of the
+ * pan, and returns the full amount which the gallery app turns into a
+ * panning motion between frames. But if the photo is zoomed in, then
+ * the MediaFrame will just move the photo within itself, if it can, and
+ * return 0.
+ *
+ * Much of the code in this file used to be part of the PhotoState class.
+ */
+function MediaFrame(container, includeVideo) {
+ if (typeof container === 'string')
+ container = document.getElementById(container);
+ this.container = container;
+ this.image = document.createElement('img');
+ this.container.appendChild(this.image);
+ this.image.style.display = 'none';
+ if (includeVideo !== false) {
+ this.video = new VideoPlayer(container);
+ this.video.hide();
+ }
+ this.displayingVideo = false;
+ this.displayingImage = false;
+ this.blob = null;
+ this.url = null;
+}
+
+MediaFrame.prototype.displayImage = function displayImage(blob, width, height,
+ preview)
+{
+ this.clear(); // Reset everything
+
+ // Remember what we're displaying
+ this.blob = blob;
+ this.fullsizeWidth = width;
+ this.fullsizeHeight = height;
+ this.preview = preview;
+
+ // Keep track of what kind of content we have
+ this.displayingImage = true;
+
+ // Make the image element visible
+ this.image.style.display = 'block';
+
+ // If the preview is at least as big as the screen, display that.
+ // Otherwise, display the full-size image.
+ if (preview &&
+ (preview.width >= window.innerWidth ||
+ preview.height >= window.innerHeight)) {
+ this.displayingPreview = true;
+ this._displayImage(blob.slice(preview.start, preview.end, 'image/jpeg'),
+ preview.width, preview.height);
+ }
+ else {
+ this._displayImage(blob, width, height);
+ }
+};
+
+// A utility function we use to display the full-size image or the
+// preview The last two arguments are optimizations used by
+// switchToFullSizeImage() to make the transition from preview to
+// fullscreen smooth. If waitForPaint is true, then this function will
+// keep the old image on the screen until the new image is painted
+// over it so we (hopefully) don't end up with a blank screen or
+// flash. And if callback is specified, it will call the callback
+// when thew new images is visible on the screen. If either of those
+// arguments are specified, the width and height must be specified.
+MediaFrame.prototype._displayImage = function _displayImage(blob, width, height,
+ waitForPaint,
+ callback)
+{
+ var self = this;
+ var oldImage;
+
+ // Create a URL for the blob (or preview blob)
+ if (this.url)
+ URL.revokeObjectURL(this.url);
+ this.url = URL.createObjectURL(blob);
+
+ // If we don't know the width or the height yet, then set up an event
+ // handler to set the image size and position once it is loaded.
+ // This happens for the open activity.
+ if (!width || !height) {
+ this.image.src = this.url;
+ this.image.addEventListener('load', function onload() {
+ this.removeEventListener('load', onload);
+ self.itemWidth = this.naturalWidth;
+ self.itemHeight = this.naturalHeight;
+ self.computeFit();
+ self.setPosition();
+ });
+ return;
+ }
+
+ // Otherwise, we have a width and height, and we may also have to handle
+ // the waitForPaint and callback arguments
+
+ // If waitForPaint is set, then keep the old image around and displayed
+ // until the new image is loaded.
+ if (waitForPaint) {
+ // Remember the old image
+ oldImage = this.image;
+
+ // Create a new element to load the new image into.
+ // Insert it into the frame, but don't remove the old image yet
+ this.image = document.createElement('img');
+ this.container.appendChild(this.image);
+
+ // Change the old image slightly to give the user some immediate
+ // feedback that something is happening
+ oldImage.classList.add('swapping');
+ }
+
+ // Start loading the new image
+ this.image.src = this.url;
+ // Update image size and position
+ this.itemWidth = width;
+ this.itemHeight = height;
+ this.computeFit();
+ this.setPosition();
+
+ // If waitForPaint is set, or if there is a callback, then we need to
+ // run some code when the new image has loaded and been painted.
+ if (waitForPaint || callback) {
+ whenLoadedAndVisible(this.image, 1000, function() {
+ if (waitForPaint) {
+ // Remove the old image now that the new one is visible
+ self.container.removeChild(oldImage);
+ oldImage.src = null;
+ }
+
+ if (callback) {
+ // Let the caller know that the new image is ready, but
+ // wait for an animation frame before doing it. The point of
+ // using mozRequestAnimationFrame here is that it gives the
+ // removeChild() call above a chance to take effect.
+ mozRequestAnimationFrame(function() {
+ callback();
+ });
+ }
+ });
+ }
+
+ // Wait until the load event on the image fires, and then wait for a
+ // MozAfterPaint event after that, and then, finally, invoke the
+ // callback. Don't wait more than the timeout, though: we need to
+ // ensure that we always call the callback even if the image does not
+ // load or if we don't get a MozAfterPaint event.
+ function whenLoadedAndVisible(image, timeout, callback) {
+ var called = false;
+ var timer = setTimeout(function()
+ {
+ called = true;
+ callback();
+ },
+ timeout || 1000);
+
+ image.addEventListener('load', function onload() {
+ image.removeEventListener('load', onload);
+ window.addEventListener('MozAfterPaint', function onpaint() {
+ window.removeEventListener('MozAfterPaint', onpaint);
+ clearTimeout(timer);
+ if (!called) {
+ callback();
+ }
+ });
+ });
+ }
+};
+
+MediaFrame.prototype._switchToFullSizeImage = function _switchToFull(cb) {
+ if (this.displayingImage && this.displayingPreview) {
+ this.displayingPreview = false;
+ this._displayImage(this.blob, this.fullsizeWidth, this.fullsizeHeight,
+ true, cb);
+ }
+};
+
+MediaFrame.prototype._switchToPreviewImage = function _switchToPreview() {
+ if (this.displayingImage && !this.displayingPreview) {
+ this.displayingPreview = true;
+ this._displayImage(this.blob.slice(this.preview.start,
+ this.preview.end,
+ 'image/jpeg'),
+ this.preview.width,
+ this.preview.height);
+ }
+};
+
+MediaFrame.prototype.displayVideo = function displayVideo(blob, width, height,
+ rotation)
+{
+ if (!this.video)
+ return;
+
+ this.clear(); // reset everything
+
+ // Keep track of what kind of content we have
+ this.displayingVideo = true;
+
+ // Show the video player and hide the image
+ this.video.show();
+
+ // Remember the blob
+ this.blob = blob;
+
+ // Get a new URL for this blob
+ this.url = URL.createObjectURL(blob);
+
+ // Display it in the video element.
+ // The VideoPlayer class takes care of positioning itself, so we
+ // don't have to do anything here with computeFit() or setPosition()
+ this.video.load(this.url, rotation || 0);
+};
+
+// Reset the frame state, release any urls and and hide everything
+MediaFrame.prototype.clear = function clear() {
+ // Reset the saved state
+ this.displayingImage = false;
+ this.displayingPreview = false;
+ this.displayingVideo = false;
+ this.itemWidth = this.itemHeight = null;
+ this.blob = null;
+ this.fullsizeWidth = this.fullsizeHeight = null;
+ this.preview = null;
+ this.fit = null;
+ if (this.url) {
+ URL.revokeObjectURL(this.url);
+ this.url = null;
+ }
+
+ // Hide the image
+ this.image.style.display = 'none';
+ this.image.src = null; // XXX: use about:blank or '' here?
+
+ // Hide the video player
+ if (this.video) {
+ this.video.hide();
+
+ // If the video player has its src set, clear it and release resources
+ // We do this in a roundabout way to avoid getting a warning in the console
+ if (this.video.player.src) {
+ this.video.player.removeAttribute('src');
+ this.video.player.load();
+ }
+ }
+};
+
+// Set the item's position based on this.fit
+// The VideoPlayer object fits itself to its container, and it
+// can't be zoomed or panned, so we only need to do this for images
+MediaFrame.prototype.setPosition = function setPosition() {
+ if (!this.fit || !this.displayingImage)
+ return;
+
+ this.image.style.transform =
+ 'translate(' + this.fit.left + 'px,' + this.fit.top + 'px) ' +
+ 'scale(' + this.fit.scale + ')';
+};
+
+MediaFrame.prototype.computeFit = function computeFit() {
+ if (!this.displayingImage)
+ return;
+ this.viewportWidth = this.container.offsetWidth;
+ this.viewportHeight = this.container.offsetHeight;
+
+ var scalex = this.viewportWidth / this.itemWidth;
+ var scaley = this.viewportHeight / this.itemHeight;
+ var scale = Math.min(Math.min(scalex, scaley), 1);
+
+ // Set the image size and position
+ var width = Math.floor(this.itemWidth * scale);
+ var height = Math.floor(this.itemHeight * scale);
+
+ this.fit = {
+ width: width,
+ height: height,
+ left: Math.floor((this.viewportWidth - width) / 2),
+ top: Math.floor((this.viewportHeight - height) / 2),
+ scale: scale,
+ baseScale: scale
+ };
+};
+
+MediaFrame.prototype.reset = function reset() {
+ // If we're not displaying the preview image, but we have one,
+ // and it is the right size, then switch to it
+ if (this.displayingImage && !this.displayingPreview && this.preview &&
+ (this.preview.width >= window.innerWidth ||
+ this.preview.height >= window.innerHeight)) {
+ this._switchToPreviewImage(); // resets image size and position
+ return;
+ }
+
+ // Otherwise, if we are displaying the preview image but it is no
+ // longer big enough for the screen (such as after a resize event)
+ // then switch to full size. This case should be rare.
+ if (this.displayingImage && this.displayingPreview &&
+ this.preview.width < window.innerWidth &&
+ this.preview.height < window.innerHeight) {
+ this._switchToFullSizeImage(); // resets image size and position
+ return;
+ }
+
+ // Otherwise, just resize and position the item we're already displaying
+ this.computeFit();
+ this.setPosition();
+};
+
+// We call this from the resize handler when the user rotates the
+// screen or when going into or out of fullscreen mode. If the user
+// has not zoomed in, then we just fit the image to the new size (same
+// as reset). But if the user has zoomed in (and we need to stay
+// zoomed for the new size) then we adjust the fit properties so that
+// the pixel that was at the center of the screen before remains at
+// the center now, or as close as possible
+MediaFrame.prototype.resize = function resize() {
+ var oldWidth = this.viewportWidth;
+ var oldHeight = this.viewportHeight;
+ var newWidth = this.container.offsetWidth;
+ var newHeight = this.container.offsetHeight;
+
+ var oldfit = this.fit; // The current image fit
+
+ // If this is triggered by a resize event before the frame has computed
+ // its size, then there is nothing we can do yet.
+ if (!oldfit)
+ return;
+
+ // Compute the new fit.
+ // This updates the the viewportWidth, viewportHeight and fit properties
+ this.computeFit();
+
+ // This is how the image would fit at the new screen size
+ var newfit = this.fit;
+
+ // If no zooming has been done, then a resize is just a reset.
+ // The same is true if the new fit base scale is greater than the
+ // old scale.
+ if (oldfit.scale === oldfit.baseScale || newfit.baseScale > oldfit.scale) {
+ this.reset();
+ return;
+ }
+
+ // Otherwise, just adjust the old fit as needed and use that so we
+ // retain the zoom factor.
+ oldfit.left += (newWidth - oldWidth) / 2;
+ oldfit.top += (newHeight - oldHeight) / 2;
+ oldfit.baseScale = newfit.baseScale;
+ this.fit = oldfit;
+
+ // Reposition this image without resetting the zoom
+ this.setPosition();
+};
+
+// Zoom in by the specified factor, adjusting the pan amount so that
+// the image pixels at (centerX, centerY) remain at that position.
+// Assume that zoom gestures can't be done in the middle of swipes, so
+// if we're calling zoom, then the swipe property will be 0.
+// If time is specified and non-zero, then we set a CSS transition
+// to animate the zoom.
+MediaFrame.prototype.zoom = function zoom(scale, centerX, centerY, time) {
+ // Ignore zooms if we're not displaying an image
+ if (!this.displayingImage)
+ return;
+
+ // If we were displaying the preview, switch to the full-size image
+ if (this.displayingPreview) {
+ // If we want to to animate the zoom, then switch images, wait
+ // for the new one to load, and call this function again to process
+ // the zoom and animation. But if we're not animating, then just
+ // switch images and continue.
+ if (time) { // if animating
+ var self = this;
+ this._switchToFullSizeImage(function() {
+ self.zoom(scale, centerX, centerY, time);
+ });
+ return;
+ }
+ else {
+ this.switching = true;
+ var self = this;
+ this._switchToFullSizeImage(function() { self.switching = false; });
+ }
+ }
+
+ // Never zoom in farther than the native resolution of the image
+ if (this.fit.scale * scale > 1) {
+ scale = 1 / (this.fit.scale);
+ }
+ // And never zoom out to make the image smaller than it would normally be
+ else if (this.fit.scale * scale < this.fit.baseScale) {
+ scale = this.fit.baseScale / this.fit.scale;
+ }
+
+ this.fit.scale = this.fit.scale * scale;
+
+ // Change the size of the photo
+ this.fit.width = Math.floor(this.itemWidth * this.fit.scale);
+ this.fit.height = Math.floor(this.itemHeight * this.fit.scale);
+
+ // centerX and centerY are in viewport coordinates.
+ // These are the photo coordinates displayed at that point in the viewport
+ var photoX = centerX - this.fit.left;
+ var photoY = centerY - this.fit.top;
+
+ // After zooming, these are the new photo coordinates.
+ // Note we just use the relative scale amount here, not this.fit.scale
+ var photoX = Math.floor(photoX * scale);
+ var photoY = Math.floor(photoY * scale);
+
+ // To keep that point still, here are the new left and top values we need
+ this.fit.left = centerX - photoX;
+ this.fit.top = centerY - photoY;
+
+ // Now make sure we didn't pan too much: If the image fits on the
+ // screen, center it. If the image is bigger than the screen, then
+ // make sure we haven't gone past any edges
+ if (this.fit.width <= this.viewportWidth) {
+ this.fit.left = (this.viewportWidth - this.fit.width) / 2;
+ }
+ else {
+ // Don't let the left of the photo be past the left edge of the screen
+ if (this.fit.left > 0)
+ this.fit.left = 0;
+
+ // Right of photo shouldn't be to the left of the right edge
+ if (this.fit.left + this.fit.width < this.viewportWidth) {
+ this.fit.left = this.viewportWidth - this.fit.width;
+ }
+ }
+
+ if (this.fit.height <= this.viewportHeight) {
+ this.fit.top = (this.viewportHeight - this.fit.height) / 2;
+ }
+ else {
+ // Don't let the top of the photo be below the top of the screen
+ if (this.fit.top > 0)
+ this.fit.top = 0;
+
+ // bottom of photo shouldn't be above the bottom of screen
+ if (this.fit.top + this.fit.height < this.viewportHeight) {
+ this.fit.top = this.viewportHeight - this.fit.height;
+ }
+ }
+
+ if (this.switching)
+ return;
+
+ // If a time was specified, set up a transition so that the
+ // call to setPosition() below is animated
+ if (time) {
+ // If a time was specfied, animate the transformation
+ this.image.style.transition = 'transform ' + time + 'ms ease';
+ var self = this;
+ this.image.addEventListener('transitionend', function done(e) {
+ self.image.removeEventListener('transitionend', done);
+ self.image.style.transition = null;
+ });
+ }
+
+ this.setPosition();
+};
+
+// If the item being displayed is larger than the continer, pan it by
+// the specified amounts. Return the "unused" dx amount for the gallery app
+// to use for sideways swiping
+MediaFrame.prototype.pan = function(dx, dy) {
+ // We can only pan images, so return the entire dx amount
+ if (!this.displayingImage) {
+ return dx;
+ }
+
+ // Handle panning in the y direction first, since it is easier.
+ // Don't pan in the y direction if we already fit on the screen
+ if (this.fit.height > this.viewportHeight) {
+ this.fit.top += dy;
+
+ // Don't let the top of the photo be below the top of the screen
+ if (this.fit.top > 0)
+ this.fit.top = 0;
+
+ // bottom of photo shouldn't be above the bottom of screen
+ if (this.fit.top + this.fit.height < this.viewportHeight)
+ this.fit.top = this.viewportHeight - this.fit.height;
+ }
+
+ // Now handle the X dimension. If we've already panned as far as we can
+ // within the image (or if it isn't zoomed in) then return the "extra"
+ // unused dx amount to the caller so that the caller can use them to
+ // shift the frame left or right.
+ var extra = 0;
+
+ if (this.fit.width <= this.viewportWidth) {
+ // In this case, the photo isn't zoomed in, so it is all extra
+ extra = dx;
+ }
+ else {
+ this.fit.left += dx;
+
+ // If this would take the left edge of the photo past the
+ // left edge of the screen, then some of the motion is extra
+ if (this.fit.left > 0) {
+ extra = this.fit.left;
+ this.fit.left = 0;
+ }
+
+ // Or, if this would take the right edge of the photo past the
+ // right edge of the screen, then we've got extra.
+ if (this.fit.left + this.fit.width < this.viewportWidth) {
+ extra = this.fit.left + this.fit.width - this.viewportWidth;
+ this.fit.left = this.viewportWidth - this.fit.width;
+ }
+ }
+
+ this.setPosition();
+ return extra;
+};
diff --git a/shared/js/media/video_player.js b/shared/js/media/video_player.js
new file mode 100644
index 0000000..c79bb8b
--- /dev/null
+++ b/shared/js/media/video_player.js
@@ -0,0 +1,313 @@
+'use strict';
+
+// Create a <video> element and <div> containing a video player UI and
+// add them to the specified container. The UI requires a GestureDetector
+// to be running for the container or one of its ancestors.
+function VideoPlayer(container) {
+ if (typeof container === 'string')
+ container = document.getElementById(container);
+
+ function newelt(parent, type, classes) {
+ var e = document.createElement(type);
+ if (classes)
+ e.className = classes;
+ parent.appendChild(e);
+ return e;
+ }
+
+ // This copies the controls structure of the Video app
+ var player = newelt(container, 'video', 'videoPlayer');
+ var controls = newelt(container, 'div', 'videoPlayerControls');
+ var playbutton = newelt(controls, 'button', 'videoPlayerPlayButton');
+ var footer = newelt(controls, 'div', 'videoPlayerFooter hidden');
+ var pausebutton = newelt(footer, 'button', 'videoPlayerPauseButton');
+ var slider = newelt(footer, 'div', 'videoPlayerSlider');
+ var elapsedText = newelt(slider, 'span', 'videoPlayerElapsedText');
+ var progress = newelt(slider, 'div', 'videoPlayerProgress');
+ var backgroundBar = newelt(progress, 'div', 'videoPlayerBackgroundBar');
+ var elapsedBar = newelt(progress, 'div', 'videoPlayerElapsedBar');
+ var playHead = newelt(progress, 'div', 'videoPlayerPlayHead');
+ var durationText = newelt(slider, 'span', 'videoPlayerDurationText');
+
+ this.player = player;
+ this.controls = controls;
+
+ player.preload = 'metadata';
+
+ var self = this;
+ var controlsHidden = false;
+ var dragging = false;
+ var pausedBeforeDragging = false;
+ var screenLock; // keep the screen on when playing
+ var endedTimer;
+ var rotation; // Do we have to rotate the video? Set by load()
+
+ this.load = function(url, rotate) {
+ rotation = rotate || 0;
+ player.mozAudioChannelType = 'content';
+ player.src = url;
+ };
+
+ // Call this when the container size changes
+ this.setPlayerSize = setPlayerSize;
+
+ // Set up everything for the initial paused state
+ this.pause = function pause() {
+ // Pause video playback
+ player.pause();
+
+ // Hide the pause button and slider
+ footer.classList.add('hidden');
+ controlsHidden = true;
+
+ // Show the big central play button
+ playbutton.classList.remove('hidden');
+
+ // Unlock the screen so it can sleep on idle
+ if (screenLock) {
+ screenLock.unlock();
+ screenLock = null;
+ }
+
+ if (this.onpaused)
+ this.onpaused();
+ };
+
+ // Set up the playing state
+ this.play = function play() {
+ // If we're at the end of the video, restart at the beginning.
+ // This seems to happen automatically when an 'ended' event was fired.
+ // But some media types don't generate the ended event and don't
+ // automatically go back to the start.
+ if (player.currentTime >= player.duration - 0.5)
+ player.currentTime = 0;
+
+ // Start playing the video
+ player.play();
+
+ // Hide the play button
+ playbutton.classList.add('hidden');
+
+ // Show the controls
+ footer.classList.remove('hidden');
+ controlsHidden = false;
+
+ // Don't let the screen go to sleep
+ if (!screenLock)
+ screenLock = navigator.requestWakeLock('screen');
+
+ if (this.onplaying)
+ this.onplaying();
+ };
+
+ // Hook up the play button
+ playbutton.addEventListener('tap', function(e) {
+ // If we're paused, go to the play state
+ if (player.paused) {
+ self.play();
+ }
+ e.stopPropagation();
+ });
+
+ // Hook up the pause button
+ pausebutton.addEventListener('tap', function(e) {
+ self.pause();
+ e.stopPropagation();
+ });
+
+ // A click anywhere else on the screen should toggle the footer
+ // But only when the video is playing.
+ controls.addEventListener('tap', function(e) {
+ if (e.target === controls && !player.paused) {
+ footer.classList.toggle('hidden');
+ controlsHidden = !controlsHidden;
+ }
+ });
+
+ // Set the video size and duration when we get metadata
+ player.onloadedmetadata = function() {
+ durationText.textContent = formatTime(player.duration);
+ setPlayerSize();
+ // start off in the paused state
+ self.pause();
+ };
+
+ // Also resize the player on a resize event
+ // (when the user rotates the phone)
+ window.addEventListener('resize', function() {
+ setPlayerSize();
+ });
+
+ // If we reach the end of a video, reset to beginning
+ // This isn't always reliable, so we also set a timer in updateTime()
+ player.onended = ended;
+
+ function ended() {
+ if (dragging)
+ return;
+ if (endedTimer) {
+ clearTimeout(endedTimer);
+ endedTimer = null;
+ }
+ self.pause();
+ };
+
+ // Update the slider and elapsed time as the video plays
+ player.ontimeupdate = updateTime;
+
+ // Set the elapsed time and slider position
+ function updateTime() {
+ if (!controlsHidden) {
+ elapsedText.textContent = formatTime(player.currentTime);
+
+ // We can't update a progress bar if we don't know how long
+ // the video is. It is kind of a bug that the <video> element
+ // can't figure this out for ogv videos.
+ if (player.duration === Infinity || player.duration === 0)
+ return;
+
+ var percent = (player.currentTime / player.duration) * 100 + '%';
+ elapsedBar.style.width = percent;
+ playHead.style.left = percent;
+ }
+
+ // Since we don't always get reliable 'ended' events, see if
+ // we've reached the end this way.
+ // See: https://bugzilla.mozilla.org/show_bug.cgi?id=783512
+ // If we're within 1 second of the end of the video, register
+ // a timeout a half a second after we'd expect an ended event.
+ if (!endedTimer) {
+ if (!dragging && player.currentTime >= player.duration - 1) {
+ var timeUntilEnd = (player.duration - player.currentTime + .5);
+ endedTimer = setTimeout(ended, timeUntilEnd * 1000);
+ }
+ }
+ else if (dragging && player.currentTime < player.duration - 1) {
+ // If there is a timer set and we drag away from the end, cancel the timer
+ clearTimeout(endedTimer);
+ endedTimer = null;
+ }
+ }
+
+ // Make the video fit the container
+ function setPlayerSize() {
+ var containerWidth = container.clientWidth;
+ var containerHeight = container.clientHeight;
+
+ // Don't do anything if we don't know our size.
+ // This could happen if we get a resize event before our metadata loads
+ if (!player.videoWidth || !player.videoHeight)
+ return;
+
+ var width, height; // The size the video will appear, after rotation
+ switch (rotation) {
+ case 0:
+ case 180:
+ width = player.videoWidth;
+ height = player.videoHeight;
+ break;
+ case 90:
+ case 270:
+ width = player.videoHeight;
+ height = player.videoWidth;
+ }
+
+ var xscale = containerWidth / width;
+ var yscale = containerHeight / height;
+ var scale = Math.min(xscale, yscale);
+
+ // Scale large videos down, and scale small videos up.
+ // This might reduce image quality for small videos.
+ width *= scale;
+ height *= scale;
+
+ var left = ((containerWidth - width) / 2);
+ var top = ((containerHeight - height) / 2);
+
+ var transform;
+ switch (rotation) {
+ case 0:
+ transform = 'translate(' + left + 'px,' + top + 'px)';
+ break;
+ case 90:
+ transform =
+ 'translate(' + (left + width) + 'px,' + top + 'px) ' +
+ 'rotate(90deg)';
+ break;
+ case 180:
+ transform =
+ 'translate(' + (left + width) + 'px,' + (top + height) + 'px) ' +
+ 'rotate(180deg)';
+ break;
+ case 270:
+ transform =
+ 'translate(' + left + 'px,' + (top + height) + 'px) ' +
+ 'rotate(270deg)';
+ break;
+ }
+
+ transform += ' scale(' + scale + ')';
+
+ player.style.transform = transform;
+ }
+
+ // handle drags on the time slider
+ slider.addEventListener('pan', function pan(e) {
+ e.stopPropagation();
+ // We can't do anything if we don't know our duration
+ if (player.duration === Infinity)
+ return;
+
+ if (!dragging) { // Do this stuff on the first pan event only
+ dragging = true;
+ pausedBeforeDragging = player.paused;
+ if (!pausedBeforeDragging) {
+ player.pause();
+ }
+ }
+
+ var rect = backgroundBar.getBoundingClientRect();
+ var position = (e.detail.position.clientX - rect.left) / rect.width;
+ var pos = Math.min(Math.max(position, 0), 1);
+ player.currentTime = player.duration * pos;
+ updateTime();
+ });
+
+ slider.addEventListener('swipe', function swipe(e) {
+ e.stopPropagation();
+ dragging = false;
+ if (player.currentTime >= player.duration) {
+ self.pause();
+ } else if (!pausedBeforeDragging) {
+ player.play();
+ }
+ });
+
+ function formatTime(time) {
+ function padLeft(num, length) {
+ var r = String(num);
+ while (r.length < length) {
+ r = '0' + r;
+ }
+ return r;
+ }
+
+ time = Math.round(time);
+ var minutes = Math.floor(time / 60);
+ var seconds = time % 60;
+ if (minutes < 60) {
+ return padLeft(minutes, 2) + ':' + padLeft(seconds, 2);
+ }
+ return '';
+ }
+}
+
+VideoPlayer.prototype.hide = function() {
+ this.player.style.display = 'none';
+ this.controls.style.display = 'none';
+};
+
+VideoPlayer.prototype.show = function() {
+ this.player.style.display = 'block';
+ this.controls.style.display = 'block';
+};