First release

This commit is contained in:
Owen Quinlan 2021-07-02 19:29:34 +10:00
commit fa6c85266e
2339 changed files with 761050 additions and 0 deletions

154
node_modules/mux.js/lib/partial/audio-segment-stream.js generated vendored Normal file
View file

@ -0,0 +1,154 @@
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var audioFrameUtils = require('../mp4/audio-frame-utils');
var trackInfo = require('../mp4/track-decode-info.js');
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
var AUDIO_PROPERTIES = require('../constants/audio-properties.js');
/**
* Constructs a single-track, ISO BMFF media segment from AAC data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
*/
var AudioSegmentStream = function(track, options) {
var
adtsFrames = [],
sequenceNumber = 0,
earliestAllowedDts = 0,
audioAppendStartTs = 0,
videoBaseMediaDecodeTime = Infinity,
segmentStartPts = null,
segmentEndPts = null;
options = options || {};
AudioSegmentStream.prototype.init.call(this);
this.push = function(data) {
trackInfo.collectDtsInfo(track, data);
if (track) {
AUDIO_PROPERTIES.forEach(function(prop) {
track[prop] = data[prop];
});
}
// buffer audio data until end() is called
adtsFrames.push(data);
};
this.setEarliestDts = function(earliestDts) {
earliestAllowedDts = earliestDts;
};
this.setVideoBaseMediaDecodeTime = function(baseMediaDecodeTime) {
videoBaseMediaDecodeTime = baseMediaDecodeTime;
};
this.setAudioAppendStart = function(timestamp) {
audioAppendStartTs = timestamp;
};
this.processFrames_ = function() {
var
frames,
moof,
mdat,
boxes,
timingInfo;
// return early if no audio data has been observed
if (adtsFrames.length === 0) {
return;
}
frames = audioFrameUtils.trimAdtsFramesByEarliestDts(
adtsFrames, track, earliestAllowedDts);
if (frames.length === 0) {
// return early if the frames are all after the earliest allowed DTS
// TODO should we clear the adtsFrames?
return;
}
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(
track, options.keepOriginalTimestamps);
audioFrameUtils.prefixWithSilence(
track, frames, audioAppendStartTs, videoBaseMediaDecodeTime);
// we have to build the index from byte locations to
// samples (that is, adts frames) in the audio data
track.samples = audioFrameUtils.generateSampleTable(frames);
// concatenate the audio data to constuct the mdat
mdat = mp4.mdat(audioFrameUtils.concatenateFrameData(frames));
adtsFrames = [];
moof = mp4.moof(sequenceNumber, [track]);
// bump the sequence number for next time
sequenceNumber++;
track.initSegment = mp4.initSegment([track]);
// it would be great to allocate this array up front instead of
// throwing away hundreds of media segment fragments
boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
trackInfo.clearDtsInfo(track);
if (segmentStartPts === null) {
segmentEndPts = segmentStartPts = frames[0].pts;
}
segmentEndPts += frames.length * (ONE_SECOND_IN_TS * 1024 / track.samplerate);
timingInfo = { start: segmentStartPts };
this.trigger('timingInfo', timingInfo);
this.trigger('data', {track: track, boxes: boxes});
};
this.flush = function() {
this.processFrames_();
// trigger final timing info
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
this.resetTiming_();
this.trigger('done', 'AudioSegmentStream');
};
this.partialFlush = function() {
this.processFrames_();
this.trigger('partialdone', 'AudioSegmentStream');
};
this.endTimeline = function() {
this.flush();
this.trigger('endedtimeline', 'AudioSegmentStream');
};
this.resetTiming_ = function() {
trackInfo.clearDtsInfo(track);
segmentStartPts = null;
segmentEndPts = null;
};
this.reset = function() {
this.resetTiming_();
adtsFrames = [];
this.trigger('reset');
};
};
AudioSegmentStream.prototype = new Stream();
module.exports = AudioSegmentStream;

3
node_modules/mux.js/lib/partial/index.js generated vendored Normal file
View file

@ -0,0 +1,3 @@
module.exports = {
Transmuxer: require('./transmuxer')
};

378
node_modules/mux.js/lib/partial/transmuxer.js generated vendored Normal file
View file

@ -0,0 +1,378 @@
var Stream = require('../utils/stream.js');
var m2ts = require('../m2ts/m2ts.js');
var codecs = require('../codecs/index.js');
var AudioSegmentStream = require('./audio-segment-stream.js');
var VideoSegmentStream = require('./video-segment-stream.js');
var trackInfo = require('../mp4/track-decode-info.js');
var isLikelyAacData = require('../aac/utils').isLikelyAacData;
var AdtsStream = require('../codecs/adts');
var AacStream = require('../aac/index');
var clock = require('../utils/clock');
var createPipeline = function(object) {
object.prototype = new Stream();
object.prototype.init.call(object);
return object;
};
var tsPipeline = function(options) {
var
pipeline = {
type: 'ts',
tracks: {
audio: null,
video: null
},
packet: new m2ts.TransportPacketStream(),
parse: new m2ts.TransportParseStream(),
elementary: new m2ts.ElementaryStream(),
timestampRollover: new m2ts.TimestampRolloverStream(),
adts: new codecs.Adts(),
h264: new codecs.h264.H264Stream(),
captionStream: new m2ts.CaptionStream(options),
metadataStream: new m2ts.MetadataStream()
};
pipeline.headOfPipeline = pipeline.packet;
// Transport Stream
pipeline.packet
.pipe(pipeline.parse)
.pipe(pipeline.elementary)
.pipe(pipeline.timestampRollover);
// H264
pipeline.timestampRollover
.pipe(pipeline.h264);
// Hook up CEA-608/708 caption stream
pipeline.h264
.pipe(pipeline.captionStream);
pipeline.timestampRollover
.pipe(pipeline.metadataStream);
// ADTS
pipeline.timestampRollover
.pipe(pipeline.adts);
pipeline.elementary.on('data', function(data) {
if (data.type !== 'metadata') {
return;
}
for (var i = 0; i < data.tracks.length; i++) {
if (!pipeline.tracks[data.tracks[i].type]) {
pipeline.tracks[data.tracks[i].type] = data.tracks[i];
pipeline.tracks[data.tracks[i].type].timelineStartInfo.baseMediaDecodeTime = options.baseMediaDecodeTime;
}
}
if (pipeline.tracks.video && !pipeline.videoSegmentStream) {
pipeline.videoSegmentStream = new VideoSegmentStream(pipeline.tracks.video, options);
pipeline.videoSegmentStream.on('timelineStartInfo', function(timelineStartInfo) {
if (pipeline.tracks.audio && !options.keepOriginalTimestamps) {
pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts - options.baseMediaDecodeTime);
}
});
pipeline.videoSegmentStream.on('timingInfo',
pipeline.trigger.bind(pipeline, 'videoTimingInfo'));
pipeline.videoSegmentStream.on('data', function(data) {
pipeline.trigger('data', {
type: 'video',
data: data
});
});
pipeline.videoSegmentStream.on('done',
pipeline.trigger.bind(pipeline, 'done'));
pipeline.videoSegmentStream.on('partialdone',
pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.videoSegmentStream.on('endedtimeline',
pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.h264
.pipe(pipeline.videoSegmentStream);
}
if (pipeline.tracks.audio && !pipeline.audioSegmentStream) {
pipeline.audioSegmentStream = new AudioSegmentStream(pipeline.tracks.audio, options);
pipeline.audioSegmentStream.on('data', function(data) {
pipeline.trigger('data', {
type: 'audio',
data: data
});
});
pipeline.audioSegmentStream.on('done',
pipeline.trigger.bind(pipeline, 'done'));
pipeline.audioSegmentStream.on('partialdone',
pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.audioSegmentStream.on('endedtimeline',
pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.audioSegmentStream.on('timingInfo',
pipeline.trigger.bind(pipeline, 'audioTimingInfo'));
pipeline.adts
.pipe(pipeline.audioSegmentStream);
}
// emit pmt info
pipeline.trigger('trackinfo', {
hasAudio: !!pipeline.tracks.audio,
hasVideo: !!pipeline.tracks.video
});
});
pipeline.captionStream.on('data', function(caption) {
var timelineStartPts;
if (pipeline.tracks.video) {
timelineStartPts = pipeline.tracks.video.timelineStartInfo.pts || 0;
} else {
// This will only happen if we encounter caption packets before
// video data in a segment. This is an unusual/unlikely scenario,
// so we assume the timeline starts at zero for now.
timelineStartPts = 0;
}
// Translate caption PTS times into second offsets into the
// video timeline for the segment
caption.startTime = clock.metadataTsToSeconds(caption.startPts, timelineStartPts, options.keepOriginalTimestamps);
caption.endTime = clock.metadataTsToSeconds(caption.endPts, timelineStartPts, options.keepOriginalTimestamps);
pipeline.trigger('caption', caption);
});
pipeline = createPipeline(pipeline);
pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));
return pipeline;
};
var aacPipeline = function(options) {
var
pipeline = {
type: 'aac',
tracks: {
audio: null
},
metadataStream: new m2ts.MetadataStream(),
aacStream: new AacStream(),
audioRollover: new m2ts.TimestampRolloverStream('audio'),
timedMetadataRollover: new m2ts.TimestampRolloverStream('timed-metadata'),
adtsStream: new AdtsStream(true)
};
// set up the parsing pipeline
pipeline.headOfPipeline = pipeline.aacStream;
pipeline.aacStream
.pipe(pipeline.audioRollover)
.pipe(pipeline.adtsStream);
pipeline.aacStream
.pipe(pipeline.timedMetadataRollover)
.pipe(pipeline.metadataStream);
pipeline.metadataStream.on('timestamp', function(frame) {
pipeline.aacStream.setTimestamp(frame.timeStamp);
});
pipeline.aacStream.on('data', function(data) {
if ((data.type !== 'timed-metadata' && data.type !== 'audio') || pipeline.audioSegmentStream) {
return;
}
pipeline.tracks.audio = pipeline.tracks.audio || {
timelineStartInfo: {
baseMediaDecodeTime: options.baseMediaDecodeTime
},
codec: 'adts',
type: 'audio'
};
// hook up the audio segment stream to the first track with aac data
pipeline.audioSegmentStream = new AudioSegmentStream(pipeline.tracks.audio, options);
pipeline.audioSegmentStream.on('data', function(data) {
pipeline.trigger('data', {
type: 'audio',
data: data
});
});
pipeline.audioSegmentStream.on('partialdone',
pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.audioSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
pipeline.audioSegmentStream.on('endedtimeline',
pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.audioSegmentStream.on('timingInfo',
pipeline.trigger.bind(pipeline, 'audioTimingInfo'));
// Set up the final part of the audio pipeline
pipeline.adtsStream
.pipe(pipeline.audioSegmentStream);
pipeline.trigger('trackinfo', {
hasAudio: !!pipeline.tracks.audio,
hasVideo: !!pipeline.tracks.video
});
});
// set the pipeline up as a stream before binding to get access to the trigger function
pipeline = createPipeline(pipeline);
pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));
return pipeline;
};
var setupPipelineListeners = function(pipeline, transmuxer) {
pipeline.on('data', transmuxer.trigger.bind(transmuxer, 'data'));
pipeline.on('done', transmuxer.trigger.bind(transmuxer, 'done'));
pipeline.on('partialdone', transmuxer.trigger.bind(transmuxer, 'partialdone'));
pipeline.on('endedtimeline', transmuxer.trigger.bind(transmuxer, 'endedtimeline'));
pipeline.on('audioTimingInfo', transmuxer.trigger.bind(transmuxer, 'audioTimingInfo'));
pipeline.on('videoTimingInfo', transmuxer.trigger.bind(transmuxer, 'videoTimingInfo'));
pipeline.on('trackinfo', transmuxer.trigger.bind(transmuxer, 'trackinfo'));
pipeline.on('id3Frame', function(event) {
// add this to every single emitted segment even though it's only needed for the first
event.dispatchType = pipeline.metadataStream.dispatchType;
// keep original time, can be adjusted if needed at a higher level
event.cueTime = clock.videoTsToSeconds(event.pts);
transmuxer.trigger('id3Frame', event);
});
pipeline.on('caption', function(event) {
transmuxer.trigger('caption', event);
});
};
var Transmuxer = function(options) {
var
pipeline = null,
hasFlushed = true;
options = options || {};
Transmuxer.prototype.init.call(this);
options.baseMediaDecodeTime = options.baseMediaDecodeTime || 0;
this.push = function(bytes) {
if (hasFlushed) {
var isAac = isLikelyAacData(bytes);
if (isAac && (!pipeline || pipeline.type !== 'aac')) {
pipeline = aacPipeline(options);
setupPipelineListeners(pipeline, this);
} else if (!isAac && (!pipeline || pipeline.type !== 'ts')) {
pipeline = tsPipeline(options);
setupPipelineListeners(pipeline, this);
}
hasFlushed = false;
}
pipeline.headOfPipeline.push(bytes);
};
this.flush = function() {
if (!pipeline) {
return;
}
hasFlushed = true;
pipeline.headOfPipeline.flush();
};
this.partialFlush = function() {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.partialFlush();
};
this.endTimeline = function() {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.endTimeline();
};
this.reset = function() {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.reset();
};
this.setBaseMediaDecodeTime = function(baseMediaDecodeTime) {
if (!options.keepOriginalTimestamps) {
options.baseMediaDecodeTime = baseMediaDecodeTime;
}
if (!pipeline) {
return;
}
if (pipeline.tracks.audio) {
pipeline.tracks.audio.timelineStartInfo.dts = undefined;
pipeline.tracks.audio.timelineStartInfo.pts = undefined;
trackInfo.clearDtsInfo(pipeline.tracks.audio);
if (pipeline.audioRollover) {
pipeline.audioRollover.discontinuity();
}
}
if (pipeline.tracks.video) {
if (pipeline.videoSegmentStream) {
pipeline.videoSegmentStream.gopCache_ = [];
}
pipeline.tracks.video.timelineStartInfo.dts = undefined;
pipeline.tracks.video.timelineStartInfo.pts = undefined;
trackInfo.clearDtsInfo(pipeline.tracks.video);
// pipeline.captionStream.reset();
}
if (pipeline.timestampRollover) {
pipeline.timestampRollover.discontinuity();
}
};
this.setRemux = function(val) {
options.remux = val;
if (pipeline && pipeline.coalesceStream) {
pipeline.coalesceStream.setRemux(val);
}
};
this.setAudioAppendStart = function(audioAppendStart) {
if (!pipeline || !pipeline.tracks.audio || !pipeline.audioSegmentStream) {
return;
}
pipeline.audioSegmentStream.setAudioAppendStart(audioAppendStart);
};
// TODO GOP alignment support
// Support may be a bit trickier than with full segment appends, as GOPs may be split
// and processed in a more granular fashion
this.alignGopsWith = function(gopsToAlignWith) {
return;
};
};
Transmuxer.prototype = new Stream();
module.exports = Transmuxer;

208
node_modules/mux.js/lib/partial/video-segment-stream.js generated vendored Normal file
View file

@ -0,0 +1,208 @@
/**
* Constructs a single-track, ISO BMFF media segment from H264 data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
* @param track {object} track metadata configuration
* @param options {object} transmuxer options object
* @param options.alignGopsAtEnd {boolean} If true, start from the end of the
* gopsToAlignWith list when attempting to align gop pts
*/
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var trackInfo = require('../mp4/track-decode-info.js');
var frameUtils = require('../mp4/frame-utils');
var VIDEO_PROPERTIES = require('../constants/video-properties.js');
var VideoSegmentStream = function(track, options) {
var
sequenceNumber = 0,
nalUnits = [],
frameCache = [],
// gopsToAlignWith = [],
config,
pps,
segmentStartPts = null,
segmentEndPts = null,
gops,
ensureNextFrameIsKeyFrame = true;
options = options || {};
VideoSegmentStream.prototype.init.call(this);
this.push = function(nalUnit) {
trackInfo.collectDtsInfo(track, nalUnit);
if (typeof track.timelineStartInfo.dts === 'undefined') {
track.timelineStartInfo.dts = nalUnit.dts;
}
// record the track config
if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {
config = nalUnit.config;
track.sps = [nalUnit.data];
VIDEO_PROPERTIES.forEach(function(prop) {
track[prop] = config[prop];
}, this);
}
if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' &&
!pps) {
pps = nalUnit.data;
track.pps = [nalUnit.data];
}
// buffer video until flush() is called
nalUnits.push(nalUnit);
};
this.processNals_ = function(cacheLastFrame) {
var i;
nalUnits = frameCache.concat(nalUnits);
// Throw away nalUnits at the start of the byte stream until
// we find the first AUD
while (nalUnits.length) {
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
break;
}
nalUnits.shift();
}
// Return early if no video data has been observed
if (nalUnits.length === 0) {
return;
}
var frames = frameUtils.groupNalsIntoFrames(nalUnits);
if (!frames.length) {
return;
}
// note that the frame cache may also protect us from cases where we haven't
// pushed data for the entire first or last frame yet
frameCache = frames[frames.length - 1];
if (cacheLastFrame) {
frames.pop();
frames.duration -= frameCache.duration;
frames.nalCount -= frameCache.length;
frames.byteLength -= frameCache.byteLength;
}
if (!frames.length) {
nalUnits = [];
return;
}
this.trigger('timelineStartInfo', track.timelineStartInfo);
if (ensureNextFrameIsKeyFrame) {
gops = frameUtils.groupFramesIntoGops(frames);
if (!gops[0][0].keyFrame) {
gops = frameUtils.extendFirstKeyFrame(gops);
if (!gops[0][0].keyFrame) {
// we haven't yet gotten a key frame, so reset nal units to wait for more nal
// units
nalUnits = ([].concat.apply([], frames)).concat(frameCache);
frameCache = [];
return;
}
frames = [].concat.apply([], gops);
frames.duration = gops.duration;
}
ensureNextFrameIsKeyFrame = false;
}
if (segmentStartPts === null) {
segmentStartPts = frames[0].pts;
segmentEndPts = segmentStartPts;
}
segmentEndPts += frames.duration;
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
for (i = 0; i < frames.length; i++) {
var frame = frames[i];
track.samples = frameUtils.generateSampleTableForFrame(frame);
var mdat = mp4.mdat(frameUtils.concatenateNalDataForFrame(frame));
trackInfo.clearDtsInfo(track);
trackInfo.collectDtsInfo(track, frame);
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(
track, options.keepOriginalTimestamps);
var moof = mp4.moof(sequenceNumber, [track]);
sequenceNumber++;
track.initSegment = mp4.initSegment([track]);
var boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
this.trigger('data', {
track: track,
boxes: boxes,
sequence: sequenceNumber,
videoFrameDts: frame.dts,
videoFramePts: frame.pts
});
}
nalUnits = [];
};
this.resetTimingAndConfig_ = function() {
config = undefined;
pps = undefined;
segmentStartPts = null;
segmentEndPts = null;
};
this.partialFlush = function() {
this.processNals_(true);
this.trigger('partialdone', 'VideoSegmentStream');
};
this.flush = function() {
this.processNals_(false);
// reset config and pps because they may differ across segments
// for instance, when we are rendition switching
this.resetTimingAndConfig_();
this.trigger('done', 'VideoSegmentStream');
};
this.endTimeline = function() {
this.flush();
this.trigger('endedtimeline', 'VideoSegmentStream');
};
this.reset = function() {
this.resetTimingAndConfig_();
frameCache = [];
nalUnits = [];
ensureNextFrameIsKeyFrame = true;
this.trigger('reset');
};
};
VideoSegmentStream.prototype = new Stream();
module.exports = VideoSegmentStream;