First release
This commit is contained in:
commit
fa6c85266e
2339 changed files with 761050 additions and 0 deletions
151
node_modules/mux.js/lib/flv/coalesce-stream.js
generated
vendored
Normal file
151
node_modules/mux.js/lib/flv/coalesce-stream.js
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
/**
|
||||
* mux.js
|
||||
*
|
||||
* Copyright (c) Brightcove
|
||||
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
|
||||
*/
|
||||
'use strict';
|
||||
|
||||
var Stream = require('../utils/stream.js');
|
||||
|
||||
/**
|
||||
* The final stage of the transmuxer that emits the flv tags
|
||||
* for audio, video, and metadata. Also tranlates in time and
|
||||
* outputs caption data and id3 cues.
|
||||
*/
|
||||
var CoalesceStream = function(options) {
|
||||
// Number of Tracks per output segment
|
||||
// If greater than 1, we combine multiple
|
||||
// tracks into a single segment
|
||||
this.numberOfTracks = 0;
|
||||
this.metadataStream = options.metadataStream;
|
||||
|
||||
this.videoTags = [];
|
||||
this.audioTags = [];
|
||||
this.videoTrack = null;
|
||||
this.audioTrack = null;
|
||||
this.pendingCaptions = [];
|
||||
this.pendingMetadata = [];
|
||||
this.pendingTracks = 0;
|
||||
this.processedTracks = 0;
|
||||
|
||||
CoalesceStream.prototype.init.call(this);
|
||||
|
||||
// Take output from multiple
|
||||
this.push = function(output) {
|
||||
// buffer incoming captions until the associated video segment
|
||||
// finishes
|
||||
if (output.text) {
|
||||
return this.pendingCaptions.push(output);
|
||||
}
|
||||
// buffer incoming id3 tags until the final flush
|
||||
if (output.frames) {
|
||||
return this.pendingMetadata.push(output);
|
||||
}
|
||||
|
||||
if (output.track.type === 'video') {
|
||||
this.videoTrack = output.track;
|
||||
this.videoTags = output.tags;
|
||||
this.pendingTracks++;
|
||||
}
|
||||
if (output.track.type === 'audio') {
|
||||
this.audioTrack = output.track;
|
||||
this.audioTags = output.tags;
|
||||
this.pendingTracks++;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
CoalesceStream.prototype = new Stream();
|
||||
CoalesceStream.prototype.flush = function(flushSource) {
|
||||
var
|
||||
id3,
|
||||
caption,
|
||||
i,
|
||||
timelineStartPts,
|
||||
event = {
|
||||
tags: {},
|
||||
captions: [],
|
||||
captionStreams: {},
|
||||
metadata: []
|
||||
};
|
||||
|
||||
if (this.pendingTracks < this.numberOfTracks) {
|
||||
if (flushSource !== 'VideoSegmentStream' &&
|
||||
flushSource !== 'AudioSegmentStream') {
|
||||
// Return because we haven't received a flush from a data-generating
|
||||
// portion of the segment (meaning that we have only recieved meta-data
|
||||
// or captions.)
|
||||
return;
|
||||
} else if (this.pendingTracks === 0) {
|
||||
// In the case where we receive a flush without any data having been
|
||||
// received we consider it an emitted track for the purposes of coalescing
|
||||
// `done` events.
|
||||
// We do this for the case where there is an audio and video track in the
|
||||
// segment but no audio data. (seen in several playlists with alternate
|
||||
// audio tracks and no audio present in the main TS segments.)
|
||||
this.processedTracks++;
|
||||
|
||||
if (this.processedTracks < this.numberOfTracks) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.processedTracks += this.pendingTracks;
|
||||
this.pendingTracks = 0;
|
||||
|
||||
if (this.processedTracks < this.numberOfTracks) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.videoTrack) {
|
||||
timelineStartPts = this.videoTrack.timelineStartInfo.pts;
|
||||
} else if (this.audioTrack) {
|
||||
timelineStartPts = this.audioTrack.timelineStartInfo.pts;
|
||||
}
|
||||
|
||||
event.tags.videoTags = this.videoTags;
|
||||
event.tags.audioTags = this.audioTags;
|
||||
|
||||
// Translate caption PTS times into second offsets into the
|
||||
// video timeline for the segment, and add track info
|
||||
for (i = 0; i < this.pendingCaptions.length; i++) {
|
||||
caption = this.pendingCaptions[i];
|
||||
caption.startTime = caption.startPts - timelineStartPts;
|
||||
caption.startTime /= 90e3;
|
||||
caption.endTime = caption.endPts - timelineStartPts;
|
||||
caption.endTime /= 90e3;
|
||||
event.captionStreams[caption.stream] = true;
|
||||
event.captions.push(caption);
|
||||
}
|
||||
|
||||
// Translate ID3 frame PTS times into second offsets into the
|
||||
// video timeline for the segment
|
||||
for (i = 0; i < this.pendingMetadata.length; i++) {
|
||||
id3 = this.pendingMetadata[i];
|
||||
id3.cueTime = id3.pts - timelineStartPts;
|
||||
id3.cueTime /= 90e3;
|
||||
event.metadata.push(id3);
|
||||
}
|
||||
// We add this to every single emitted segment even though we only need
|
||||
// it for the first
|
||||
event.metadata.dispatchType = this.metadataStream.dispatchType;
|
||||
|
||||
// Reset stream state
|
||||
this.videoTrack = null;
|
||||
this.audioTrack = null;
|
||||
this.videoTags = [];
|
||||
this.audioTags = [];
|
||||
this.pendingCaptions.length = 0;
|
||||
this.pendingMetadata.length = 0;
|
||||
this.pendingTracks = 0;
|
||||
this.processedTracks = 0;
|
||||
|
||||
// Emit the final segment
|
||||
this.trigger('data', event);
|
||||
|
||||
this.trigger('done');
|
||||
};
|
||||
|
||||
module.exports = CoalesceStream;
|
Loading…
Add table
Add a link
Reference in a new issue