First release

This commit is contained in:
Owen Quinlan 2021-07-02 19:29:34 +10:00
commit fa6c85266e
2339 changed files with 761050 additions and 0 deletions

128
node_modules/@videojs/http-streaming/src/util/codecs.js generated vendored Normal file
View file

@ -0,0 +1,128 @@
/**
* @file - codecs.js - Handles tasks regarding codec strings such as translating them to
* codec strings, or translating codec strings into objects that can be examined.
*/
import {
translateLegacyCodec,
parseCodecs,
codecsFromDefault
} from '@videojs/vhs-utils/es/codecs.js';
import logger from './logger.js';
const logFn = logger('CodecUtils');
/**
* Returns a set of codec strings parsed from the playlist or the default
* codec strings if no codecs were specified in the playlist
*
* @param {Playlist} media the current media playlist
* @return {Object} an object with the video and audio codecs
*/
const getCodecs = function(media) {
// if the codecs were explicitly specified, use them instead of the
// defaults
const mediaAttributes = media.attributes || {};
if (mediaAttributes.CODECS) {
return parseCodecs(mediaAttributes.CODECS);
}
};
export const isMaat = (master, media) => {
const mediaAttributes = media.attributes || {};
return master && master.mediaGroups && master.mediaGroups.AUDIO &&
mediaAttributes.AUDIO &&
master.mediaGroups.AUDIO[mediaAttributes.AUDIO];
};
export const isMuxed = (master, media) => {
if (!isMaat(master, media)) {
return true;
}
const mediaAttributes = media.attributes || {};
const audioGroup = master.mediaGroups.AUDIO[mediaAttributes.AUDIO];
for (const groupId in audioGroup) {
// If an audio group has a URI (the case for HLS, as HLS will use external playlists),
// or there are listed playlists (the case for DASH, as the manifest will have already
// provided all of the details necessary to generate the audio playlist, as opposed to
// HLS' externally requested playlists), then the content is demuxed.
if (!audioGroup[groupId].uri && !audioGroup[groupId].playlists) {
return true;
}
}
return false;
};
export const unwrapCodecList = function(codecList) {
const codecs = {};
codecList.forEach(({mediaType, type, details}) => {
codecs[mediaType] = codecs[mediaType] || [];
codecs[mediaType].push(translateLegacyCodec(`${type}${details}`));
});
Object.keys(codecs).forEach(function(mediaType) {
if (codecs[mediaType].length > 1) {
logFn(`multiple ${mediaType} codecs found as attributes: ${codecs[mediaType].join(', ')}. Setting playlist codecs to null so that we wait for mux.js to probe segments for real codecs.`);
codecs[mediaType] = null;
return;
}
codecs[mediaType] = codecs[mediaType][0];
});
return codecs;
};
export const codecCount = function(codecObj) {
let count = 0;
if (codecObj.audio) {
count++;
}
if (codecObj.video) {
count++;
}
return count;
};
/**
* Calculates the codec strings for a working configuration of
* SourceBuffers to play variant streams in a master playlist. If
* there is no possible working configuration, an empty object will be
* returned.
*
* @param master {Object} the m3u8 object for the master playlist
* @param media {Object} the m3u8 object for the variant playlist
* @return {Object} the codec strings.
*
* @private
*/
export const codecsForPlaylist = function(master, media) {
const mediaAttributes = media.attributes || {};
const codecInfo = unwrapCodecList(getCodecs(media) || []);
// HLS with multiple-audio tracks must always get an audio codec.
// Put another way, there is no way to have a video-only multiple-audio HLS!
if (isMaat(master, media) && !codecInfo.audio) {
if (!isMuxed(master, media)) {
// It is possible for codecs to be specified on the audio media group playlist but
// not on the rendition playlist. This is mostly the case for DASH, where audio and
// video are always separate (and separately specified).
const defaultCodecs = unwrapCodecList(codecsFromDefault(master, mediaAttributes.AUDIO) || []);
if (defaultCodecs.audio) {
codecInfo.audio = defaultCodecs.audio;
}
}
}
return codecInfo;
};

View file

@ -0,0 +1,86 @@
import {getId3Offset} from '@videojs/vhs-utils/es/id3-helpers';
import {detectContainerForBytes} from '@videojs/vhs-utils/es/containers';
import {stringToBytes, concatTypedArrays} from '@videojs/vhs-utils/es/byte-helpers';
import {callbackWrapper} from '../xhr';
// calls back if the request is readyState DONE
// which will only happen if the request is complete.
const callbackOnCompleted = (request, cb) => {
if (request.readyState === 4) {
return cb();
}
return;
};
const containerRequest = (uri, xhr, cb) => {
let bytes = [];
let id3Offset;
let finished = false;
const endRequestAndCallback = function(err, req, type, _bytes) {
req.abort();
finished = true;
return cb(err, req, type, _bytes);
};
const progressListener = function(error, request) {
if (finished) {
return;
}
if (error) {
return endRequestAndCallback(error, request, '', bytes);
}
// grap the new part of content that was just downloaded
const newPart = request.responseText.substring(
bytes && bytes.byteLength || 0,
request.responseText.length
);
// add that onto bytes
bytes = concatTypedArrays(bytes, stringToBytes(newPart, true));
id3Offset = id3Offset || getId3Offset(bytes);
// we need at least 10 bytes to determine a type
// or we need at least two bytes after an id3Offset
if (bytes.length < 10 || (id3Offset && bytes.length < id3Offset + 2)) {
return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));
}
const type = detectContainerForBytes(bytes);
// if this looks like a ts segment but we don't have enough data
// to see the second sync byte, wait until we have enough data
// before declaring it ts
if (type === 'ts' && bytes.length < 188) {
return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));
}
// this may be an unsynced ts segment
// wait for 376 bytes before detecting no container
if (!type && bytes.length < 376) {
return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));
}
return endRequestAndCallback(null, request, type, bytes);
};
const options = {
uri,
beforeSend(request) {
// this forces the browser to pass the bytes to us unprocessed
request.overrideMimeType('text/plain; charset=x-user-defined');
request.addEventListener('progress', function({total, loaded}) {
return callbackWrapper(request, null, {statusCode: request.status}, progressListener);
});
}
};
const request = xhr(options, function(error, response) {
return callbackWrapper(request, error, response, progressListener);
});
return request;
};
export default containerRequest;

119
node_modules/@videojs/http-streaming/src/util/gops.js generated vendored Normal file
View file

@ -0,0 +1,119 @@
import { ONE_SECOND_IN_TS } from 'mux.js/lib/utils/clock';
/**
* Returns a list of gops in the buffer that have a pts value of 3 seconds or more in
* front of current time.
*
* @param {Array} buffer
* The current buffer of gop information
* @param {number} currentTime
* The current time
* @param {Double} mapping
* Offset to map display time to stream presentation time
* @return {Array}
* List of gops considered safe to append over
*/
export const gopsSafeToAlignWith = (buffer, currentTime, mapping) => {
if (typeof currentTime === 'undefined' || currentTime === null || !buffer.length) {
return [];
}
// pts value for current time + 3 seconds to give a bit more wiggle room
const currentTimePts = Math.ceil((currentTime - mapping + 3) * ONE_SECOND_IN_TS);
let i;
for (i = 0; i < buffer.length; i++) {
if (buffer[i].pts > currentTimePts) {
break;
}
}
return buffer.slice(i);
};
/**
* Appends gop information (timing and byteLength) received by the transmuxer for the
* gops appended in the last call to appendBuffer
*
* @param {Array} buffer
* The current buffer of gop information
* @param {Array} gops
* List of new gop information
* @param {boolean} replace
* If true, replace the buffer with the new gop information. If false, append the
* new gop information to the buffer in the right location of time.
* @return {Array}
* Updated list of gop information
*/
export const updateGopBuffer = (buffer, gops, replace) => {
if (!gops.length) {
return buffer;
}
if (replace) {
// If we are in safe append mode, then completely overwrite the gop buffer
// with the most recent appeneded data. This will make sure that when appending
// future segments, we only try to align with gops that are both ahead of current
// time and in the last segment appended.
return gops.slice();
}
const start = gops[0].pts;
let i = 0;
for (i; i < buffer.length; i++) {
if (buffer[i].pts >= start) {
break;
}
}
return buffer.slice(0, i).concat(gops);
};
/**
* Removes gop information in buffer that overlaps with provided start and end
*
* @param {Array} buffer
* The current buffer of gop information
* @param {Double} start
* position to start the remove at
* @param {Double} end
* position to end the remove at
* @param {Double} mapping
* Offset to map display time to stream presentation time
*/
export const removeGopBuffer = (buffer, start, end, mapping) => {
const startPts = Math.ceil((start - mapping) * ONE_SECOND_IN_TS);
const endPts = Math.ceil((end - mapping) * ONE_SECOND_IN_TS);
const updatedBuffer = buffer.slice();
let i = buffer.length;
while (i--) {
if (buffer[i].pts <= endPts) {
break;
}
}
if (i === -1) {
// no removal because end of remove range is before start of buffer
return updatedBuffer;
}
let j = i + 1;
while (j--) {
if (buffer[j].pts <= startPts) {
break;
}
}
// clamp remove range start to 0 index
j = Math.max(j, 0);
updatedBuffer.splice(j, i - j + 1);
return updatedBuffer;
};

View file

@ -0,0 +1,11 @@
import videojs from 'video.js';
const logger = (source) => {
if (videojs.log.debug) {
return videojs.log.debug.bind(videojs, 'VHS:', `${source} >`);
}
return function() {};
};
export default logger;

View file

@ -0,0 +1 @@
export default function noop() {}

View file

@ -0,0 +1,23 @@
/**
* Combine all segments into a single Uint8Array
*
* @param {Object} segmentObj
* @return {Uint8Array} concatenated bytes
* @private
*/
export const concatSegments = (segmentObj) => {
let offset = 0;
let tempBuffer;
if (segmentObj.bytes) {
tempBuffer = new Uint8Array(segmentObj.bytes);
// combine the individual segments into one large typed-array
segmentObj.segments.forEach((segment) => {
tempBuffer.set(segment, offset);
offset += segment.byteLength;
});
}
return tempBuffer;
};

View file

@ -0,0 +1,41 @@
const shallowEqual = function(a, b) {
// if both are undefined
// or one or the other is undefined
// they are not equal
if ((!a && !b) || (!a && b) || (a && !b)) {
return false;
}
// they are the same object and thus, equal
if (a === b) {
return true;
}
// sort keys so we can make sure they have
// all the same keys later.
const akeys = Object.keys(a).sort();
const bkeys = Object.keys(b).sort();
// different number of keys, not equal
if (akeys.length !== bkeys.length) {
return false;
}
for (let i = 0; i < akeys.length; i++) {
const key = akeys[i];
// different sorted keys, not equal
if (key !== bkeys[i]) {
return false;
}
// different values, not equal
if (a[key] !== b[key]) {
return false;
}
}
return true;
};
export default shallowEqual;

View file

@ -0,0 +1,9 @@
export const stringToArrayBuffer = (string) => {
const view = new Uint8Array(new ArrayBuffer(string.length));
for (let i = 0; i < string.length; i++) {
view[i] = string.charCodeAt(i);
}
return view.buffer;
};

View file

@ -0,0 +1,2 @@
export const uint8ToUtf8 = (uintArray) =>
decodeURIComponent(escape(String.fromCharCode.apply(null, uintArray)));

View file

@ -0,0 +1,311 @@
/**
* @file text-tracks.js
*/
import window from 'global/window';
import videojs from 'video.js';
/**
* Create captions text tracks on video.js if they do not exist
*
* @param {Object} inbandTextTracks a reference to current inbandTextTracks
* @param {Object} tech the video.js tech
* @param {Object} captionStream the caption stream to create
* @private
*/
export const createCaptionsTrackIfNotExists = function(inbandTextTracks, tech, captionStream) {
if (!inbandTextTracks[captionStream]) {
tech.trigger({type: 'usage', name: 'vhs-608'});
tech.trigger({type: 'usage', name: 'hls-608'});
let instreamId = captionStream;
// we need to translate SERVICEn for 708 to how mux.js currently labels them
if (/^cc708_/.test(captionStream)) {
instreamId = 'SERVICE' + captionStream.split('_')[1];
}
const track = tech.textTracks().getTrackById(instreamId);
if (track) {
// Resuse an existing track with a CC# id because this was
// very likely created by videojs-contrib-hls from information
// in the m3u8 for us to use
inbandTextTracks[captionStream] = track;
} else {
// This section gets called when we have caption services that aren't specified in the manifest.
// Manifest level caption services are handled in media-groups.js under CLOSED-CAPTIONS.
const captionServices = tech.options_.vhs && tech.options_.vhs.captionServices || {};
let label = captionStream;
let language = captionStream;
let def = false;
const captionService = captionServices[instreamId];
if (captionService) {
label = captionService.label;
language = captionService.language;
def = captionService.default;
}
// Otherwise, create a track with the default `CC#` label and
// without a language
inbandTextTracks[captionStream] = tech.addRemoteTextTrack({
kind: 'captions',
id: instreamId,
// TODO: investigate why this doesn't seem to turn the caption on by default
default: def,
label,
language
}, false).track;
}
}
};
/**
* Add caption text track data to a source handler given an array of captions
*
* @param {Object}
* @param {Object} inbandTextTracks the inband text tracks
* @param {number} timestampOffset the timestamp offset of the source buffer
* @param {Array} captionArray an array of caption data
* @private
*/
export const addCaptionData = function({
inbandTextTracks,
captionArray,
timestampOffset
}) {
if (!captionArray) {
return;
}
const Cue = window.WebKitDataCue || window.VTTCue;
captionArray.forEach((caption) => {
const track = caption.stream;
inbandTextTracks[track].addCue(new Cue(
caption.startTime + timestampOffset,
caption.endTime + timestampOffset,
caption.text
));
});
};
/**
* Define properties on a cue for backwards compatability,
* but warn the user that the way that they are using it
* is depricated and will be removed at a later date.
*
* @param {Cue} cue the cue to add the properties on
* @private
*/
const deprecateOldCue = function(cue) {
Object.defineProperties(cue.frame, {
id: {
get() {
videojs.log.warn('cue.frame.id is deprecated. Use cue.value.key instead.');
return cue.value.key;
}
},
value: {
get() {
videojs.log.warn('cue.frame.value is deprecated. Use cue.value.data instead.');
return cue.value.data;
}
},
privateData: {
get() {
videojs.log.warn('cue.frame.privateData is deprecated. Use cue.value.data instead.');
return cue.value.data;
}
}
});
};
/**
* Add metadata text track data to a source handler given an array of metadata
*
* @param {Object}
* @param {Object} inbandTextTracks the inband text tracks
* @param {Array} metadataArray an array of meta data
* @param {number} timestampOffset the timestamp offset of the source buffer
* @param {number} videoDuration the duration of the video
* @private
*/
export const addMetadata = ({
inbandTextTracks,
metadataArray,
timestampOffset,
videoDuration
}) => {
if (!metadataArray) {
return;
}
const Cue = window.WebKitDataCue || window.VTTCue;
const metadataTrack = inbandTextTracks.metadataTrack_;
if (!metadataTrack) {
return;
}
metadataArray.forEach((metadata) => {
const time = metadata.cueTime + timestampOffset;
// if time isn't a finite number between 0 and Infinity, like NaN,
// ignore this bit of metadata.
// This likely occurs when you have an non-timed ID3 tag like TIT2,
// which is the "Title/Songname/Content description" frame
if (typeof time !== 'number' || window.isNaN(time) || time < 0 || !(time < Infinity)) {
return;
}
metadata.frames.forEach((frame) => {
const cue = new Cue(
time,
time,
frame.value || frame.url || frame.data || ''
);
cue.frame = frame;
cue.value = frame;
deprecateOldCue(cue);
metadataTrack.addCue(cue);
});
});
if (!metadataTrack.cues || !metadataTrack.cues.length) {
return;
}
// Updating the metadeta cues so that
// the endTime of each cue is the startTime of the next cue
// the endTime of last cue is the duration of the video
const cues = metadataTrack.cues;
const cuesArray = [];
// Create a copy of the TextTrackCueList...
// ...disregarding cues with a falsey value
for (let i = 0; i < cues.length; i++) {
if (cues[i]) {
cuesArray.push(cues[i]);
}
}
// Group cues by their startTime value
const cuesGroupedByStartTime = cuesArray.reduce((obj, cue) => {
const timeSlot = obj[cue.startTime] || [];
timeSlot.push(cue);
obj[cue.startTime] = timeSlot;
return obj;
}, {});
// Sort startTimes by ascending order
const sortedStartTimes = Object.keys(cuesGroupedByStartTime)
.sort((a, b) => Number(a) - Number(b));
// Map each cue group's endTime to the next group's startTime
sortedStartTimes.forEach((startTime, idx) => {
const cueGroup = cuesGroupedByStartTime[startTime];
const nextTime = Number(sortedStartTimes[idx + 1]) || videoDuration;
// Map each cue's endTime the next group's startTime
cueGroup.forEach((cue) => {
cue.endTime = nextTime;
});
});
};
/**
* Create metadata text track on video.js if it does not exist
*
* @param {Object} inbandTextTracks a reference to current inbandTextTracks
* @param {string} dispatchType the inband metadata track dispatch type
* @param {Object} tech the video.js tech
* @private
*/
export const createMetadataTrackIfNotExists = (inbandTextTracks, dispatchType, tech) => {
if (inbandTextTracks.metadataTrack_) {
return;
}
inbandTextTracks.metadataTrack_ = tech.addRemoteTextTrack({
kind: 'metadata',
label: 'Timed Metadata'
}, false).track;
inbandTextTracks.metadataTrack_.inBandMetadataTrackDispatchType = dispatchType;
};
/**
* Remove cues from a track on video.js.
*
* @param {Double} start start of where we should remove the cue
* @param {Double} end end of where the we should remove the cue
* @param {Object} track the text track to remove the cues from
* @private
*/
export const removeCuesFromTrack = function(start, end, track) {
let i;
let cue;
if (!track) {
return;
}
if (!track.cues) {
return;
}
i = track.cues.length;
while (i--) {
cue = track.cues[i];
// Remove any cue within the provided start and end time
if (cue.startTime >= start && cue.endTime <= end) {
track.removeCue(cue);
}
}
};
/**
* Remove duplicate cues from a track on video.js (a cue is considered a
* duplicate if it has the same time interval and text as another)
*
* @param {Object} track the text track to remove the duplicate cues from
* @private
*/
export const removeDuplicateCuesFromTrack = function(track) {
const cues = track.cues;
if (!cues) {
return;
}
for (let i = 0; i < cues.length; i++) {
const duplicates = [];
let occurrences = 0;
for (let j = 0; j < cues.length; j++) {
if (
cues[i].startTime === cues[j].startTime &&
cues[i].endTime === cues[j].endTime &&
cues[i].text === cues[j].text
) {
occurrences++;
if (occurrences > 1) {
duplicates.push(cues[j]);
}
}
}
if (duplicates.length) {
duplicates.forEach(dupe => track.removeCue(dupe));
}
}
};

402
node_modules/@videojs/http-streaming/src/util/time.js generated vendored Normal file
View file

@ -0,0 +1,402 @@
// TODO handle fmp4 case where the timing info is accurate and doesn't involve transmux
/**
* @file time.js
*/
import Playlist from '../playlist';
// Add 25% to the segment duration to account for small discrepencies in segment timing.
// 25% was arbitrarily chosen, and may need to be refined over time.
const SEGMENT_END_FUDGE_PERCENT = 0.25;
/**
* Converts a player time (any time that can be gotten/set from player.currentTime(),
* e.g., any time within player.seekable().start(0) to player.seekable().end(0)) to a
* program time (any time referencing the real world (e.g., EXT-X-PROGRAM-DATE-TIME)).
*
* The containing segment is required as the EXT-X-PROGRAM-DATE-TIME serves as an "anchor
* point" (a point where we have a mapping from program time to player time, with player
* time being the post transmux start of the segment).
*
* For more details, see [this doc](../../docs/program-time-from-player-time.md).
*
* @param {number} playerTime the player time
* @param {Object} segment the segment which contains the player time
* @return {Date} program time
*/
export const playerTimeToProgramTime = (playerTime, segment) => {
if (!segment.dateTimeObject) {
// Can't convert without an "anchor point" for the program time (i.e., a time that can
// be used to map the start of a segment with a real world time).
return null;
}
const transmuxerPrependedSeconds = segment.videoTimingInfo.transmuxerPrependedSeconds;
const transmuxedStart = segment.videoTimingInfo.transmuxedPresentationStart;
// get the start of the content from before old content is prepended
const startOfSegment = transmuxedStart + transmuxerPrependedSeconds;
const offsetFromSegmentStart = playerTime - startOfSegment;
return new Date(segment.dateTimeObject.getTime() + offsetFromSegmentStart * 1000);
};
export const originalSegmentVideoDuration = (videoTimingInfo) => {
return videoTimingInfo.transmuxedPresentationEnd -
videoTimingInfo.transmuxedPresentationStart -
videoTimingInfo.transmuxerPrependedSeconds;
};
/**
* Finds a segment that contains the time requested given as an ISO-8601 string. The
* returned segment might be an estimate or an accurate match.
*
* @param {string} programTime The ISO-8601 programTime to find a match for
* @param {Object} playlist A playlist object to search within
*/
export const findSegmentForProgramTime = (programTime, playlist) => {
// Assumptions:
// - verifyProgramDateTimeTags has already been run
// - live streams have been started
let dateTimeObject;
try {
dateTimeObject = new Date(programTime);
} catch (e) {
return null;
}
if (!playlist || !playlist.segments || playlist.segments.length === 0) {
return null;
}
let segment = playlist.segments[0];
if (dateTimeObject < segment.dateTimeObject) {
// Requested time is before stream start.
return null;
}
for (let i = 0; i < playlist.segments.length - 1; i++) {
segment = playlist.segments[i];
const nextSegmentStart = playlist.segments[i + 1].dateTimeObject;
if (dateTimeObject < nextSegmentStart) {
break;
}
}
const lastSegment = playlist.segments[playlist.segments.length - 1];
const lastSegmentStart = lastSegment.dateTimeObject;
const lastSegmentDuration = lastSegment.videoTimingInfo ?
originalSegmentVideoDuration(lastSegment.videoTimingInfo) :
lastSegment.duration + lastSegment.duration * SEGMENT_END_FUDGE_PERCENT;
const lastSegmentEnd =
new Date(lastSegmentStart.getTime() + lastSegmentDuration * 1000);
if (dateTimeObject > lastSegmentEnd) {
// Beyond the end of the stream, or our best guess of the end of the stream.
return null;
}
if (dateTimeObject > lastSegmentStart) {
segment = lastSegment;
}
return {
segment,
estimatedStart: segment.videoTimingInfo ?
segment.videoTimingInfo.transmuxedPresentationStart :
Playlist.duration(
playlist,
playlist.mediaSequence + playlist.segments.indexOf(segment)
),
// Although, given that all segments have accurate date time objects, the segment
// selected should be accurate, unless the video has been transmuxed at some point
// (determined by the presence of the videoTimingInfo object), the segment's "player
// time" (the start time in the player) can't be considered accurate.
type: segment.videoTimingInfo ? 'accurate' : 'estimate'
};
};
/**
* Finds a segment that contains the given player time(in seconds).
*
* @param {number} time The player time to find a match for
* @param {Object} playlist A playlist object to search within
*/
export const findSegmentForPlayerTime = (time, playlist) => {
// Assumptions:
// - there will always be a segment.duration
// - we can start from zero
// - segments are in time order
if (!playlist || !playlist.segments || playlist.segments.length === 0) {
return null;
}
let segmentEnd = 0;
let segment;
for (let i = 0; i < playlist.segments.length; i++) {
segment = playlist.segments[i];
// videoTimingInfo is set after the segment is downloaded and transmuxed, and
// should contain the most accurate values we have for the segment's player times.
//
// Use the accurate transmuxedPresentationEnd value if it is available, otherwise fall
// back to an estimate based on the manifest derived (inaccurate) segment.duration, to
// calculate an end value.
segmentEnd = segment.videoTimingInfo ?
segment.videoTimingInfo.transmuxedPresentationEnd : segmentEnd + segment.duration;
if (time <= segmentEnd) {
break;
}
}
const lastSegment = playlist.segments[playlist.segments.length - 1];
if (lastSegment.videoTimingInfo &&
lastSegment.videoTimingInfo.transmuxedPresentationEnd < time) {
// The time requested is beyond the stream end.
return null;
}
if (time > segmentEnd) {
// The time is within or beyond the last segment.
//
// Check to see if the time is beyond a reasonable guess of the end of the stream.
if (time > segmentEnd + (lastSegment.duration * SEGMENT_END_FUDGE_PERCENT)) {
// Technically, because the duration value is only an estimate, the time may still
// exist in the last segment, however, there isn't enough information to make even
// a reasonable estimate.
return null;
}
segment = lastSegment;
}
return {
segment,
estimatedStart: segment.videoTimingInfo ?
segment.videoTimingInfo.transmuxedPresentationStart : segmentEnd - segment.duration,
// Because videoTimingInfo is only set after transmux, it is the only way to get
// accurate timing values.
type: segment.videoTimingInfo ? 'accurate' : 'estimate'
};
};
/**
* Gives the offset of the comparisonTimestamp from the programTime timestamp in seconds.
* If the offset returned is positive, the programTime occurs after the
* comparisonTimestamp.
* If the offset is negative, the programTime occurs before the comparisonTimestamp.
*
* @param {string} comparisonTimeStamp An ISO-8601 timestamp to compare against
* @param {string} programTime The programTime as an ISO-8601 string
* @return {number} offset
*/
export const getOffsetFromTimestamp = (comparisonTimeStamp, programTime) => {
let segmentDateTime;
let programDateTime;
try {
segmentDateTime = new Date(comparisonTimeStamp);
programDateTime = new Date(programTime);
} catch (e) {
// TODO handle error
}
const segmentTimeEpoch = segmentDateTime.getTime();
const programTimeEpoch = programDateTime.getTime();
return (programTimeEpoch - segmentTimeEpoch) / 1000;
};
/**
* Checks that all segments in this playlist have programDateTime tags.
*
* @param {Object} playlist A playlist object
*/
export const verifyProgramDateTimeTags = (playlist) => {
if (!playlist.segments || playlist.segments.length === 0) {
return false;
}
for (let i = 0; i < playlist.segments.length; i++) {
const segment = playlist.segments[i];
if (!segment.dateTimeObject) {
return false;
}
}
return true;
};
/**
* Returns the programTime of the media given a playlist and a playerTime.
* The playlist must have programDateTime tags for a programDateTime tag to be returned.
* If the segments containing the time requested have not been buffered yet, an estimate
* may be returned to the callback.
*
* @param {Object} args
* @param {Object} args.playlist A playlist object to search within
* @param {number} time A playerTime in seconds
* @param {Function} callback(err, programTime)
* @return {string} err.message A detailed error message
* @return {Object} programTime
* @return {number} programTime.mediaSeconds The streamTime in seconds
* @return {string} programTime.programDateTime The programTime as an ISO-8601 String
*/
export const getProgramTime = ({
playlist,
time = undefined,
callback
}) => {
if (!callback) {
throw new Error('getProgramTime: callback must be provided');
}
if (!playlist || time === undefined) {
return callback({
message: 'getProgramTime: playlist and time must be provided'
});
}
const matchedSegment = findSegmentForPlayerTime(time, playlist);
if (!matchedSegment) {
return callback({
message: 'valid programTime was not found'
});
}
if (matchedSegment.type === 'estimate') {
return callback({
message:
'Accurate programTime could not be determined.' +
' Please seek to e.seekTime and try again',
seekTime: matchedSegment.estimatedStart
});
}
const programTimeObject = {
mediaSeconds: time
};
const programTime = playerTimeToProgramTime(time, matchedSegment.segment);
if (programTime) {
programTimeObject.programDateTime = programTime.toISOString();
}
return callback(null, programTimeObject);
};
/**
* Seeks in the player to a time that matches the given programTime ISO-8601 string.
*
* @param {Object} args
* @param {string} args.programTime A programTime to seek to as an ISO-8601 String
* @param {Object} args.playlist A playlist to look within
* @param {number} args.retryCount The number of times to try for an accurate seek. Default is 2.
* @param {Function} args.seekTo A method to perform a seek
* @param {boolean} args.pauseAfterSeek Whether to end in a paused state after seeking. Default is true.
* @param {Object} args.tech The tech to seek on
* @param {Function} args.callback(err, newTime) A callback to return the new time to
* @return {string} err.message A detailed error message
* @return {number} newTime The exact time that was seeked to in seconds
*/
export const seekToProgramTime = ({
programTime,
playlist,
retryCount = 2,
seekTo,
pauseAfterSeek = true,
tech,
callback
}) => {
if (!callback) {
throw new Error('seekToProgramTime: callback must be provided');
}
if (typeof programTime === 'undefined' || !playlist || !seekTo) {
return callback({
message: 'seekToProgramTime: programTime, seekTo and playlist must be provided'
});
}
if (!playlist.endList && !tech.hasStarted_) {
return callback({
message: 'player must be playing a live stream to start buffering'
});
}
if (!verifyProgramDateTimeTags(playlist)) {
return callback({
message: 'programDateTime tags must be provided in the manifest ' + playlist.resolvedUri
});
}
const matchedSegment = findSegmentForProgramTime(programTime, playlist);
// no match
if (!matchedSegment) {
return callback({
message: `${programTime} was not found in the stream`
});
}
const segment = matchedSegment.segment;
const mediaOffset = getOffsetFromTimestamp(
segment.dateTimeObject,
programTime
);
if (matchedSegment.type === 'estimate') {
// we've run out of retries
if (retryCount === 0) {
return callback({
message: `${programTime} is not buffered yet. Try again`
});
}
seekTo(matchedSegment.estimatedStart + mediaOffset);
tech.one('seeked', () => {
seekToProgramTime({
programTime,
playlist,
retryCount: retryCount - 1,
seekTo,
pauseAfterSeek,
tech,
callback
});
});
return;
}
// Since the segment.start value is determined from the buffered end or ending time
// of the prior segment, the seekToTime doesn't need to account for any transmuxer
// modifications.
const seekToTime = segment.start + mediaOffset;
const seekedCallback = () => {
return callback(null, tech.currentTime());
};
// listen for seeked event
tech.one('seeked', seekedCallback);
// pause before seeking as video.js will restore this state
if (pauseAfterSeek) {
tech.pause();
}
seekTo(seekToTime);
};

View file

@ -0,0 +1,9 @@
const toTitleCase = function(string) {
if (typeof string !== 'string') {
return string;
}
return string.replace(/./, (w) => w.toUpperCase());
};
export default toTitleCase;

View file

@ -0,0 +1,42 @@
export const workerCallback = function(options) {
const transmuxer = options.transmuxer;
const endAction = options.endAction || options.action;
const callback = options.callback;
const message = Object.assign({}, options, {endAction: null, transmuxer: null, callback: null});
const listenForEndEvent = (event) => {
if (event.data.action !== endAction) {
return;
}
transmuxer.removeEventListener('message', listenForEndEvent);
// transfer ownership of bytes back to us.
if (event.data.data) {
event.data.data = new Uint8Array(
event.data.data,
options.byteOffset || 0,
options.byteLength || event.data.data.byteLength
);
if (options.data) {
options.data = event.data.data;
}
}
callback(event.data);
};
transmuxer.addEventListener('message', listenForEndEvent);
if (options.data) {
const isArrayBuffer = options.data instanceof ArrayBuffer;
message.byteOffset = isArrayBuffer ? 0 : options.data.byteOffset;
message.byteLength = options.data.byteLength;
const transfers = [isArrayBuffer ? options.data : options.data.buffer];
transmuxer.postMessage(message, transfers);
} else {
transmuxer.postMessage(message);
}
};