App-MHFS
view release on metacpan or search on metacpan
share/public_html/static/hls.js view on Meta::CPAN
var prevStartFrag = findFirstFragWithCC(prevFrags, curFrags[0].cc);
if (!prevStartFrag || prevStartFrag && !prevStartFrag.startPTS) {
logger["b" /* logger */].log('No frag in previous level to align on');
return;
}
return prevStartFrag;
}
function adjustPts(sliding, details) {
details.fragments.forEach(function (frag) {
if (frag) {
var start = frag.start + sliding;
frag.start = frag.startPTS = start;
frag.endPTS = start + frag.duration;
}
});
details.PTSKnown = true;
}
// If a change in CC is detected, the PTS can no longer be relied upon
// Attempt to align the level by using the last level - find the last frag matching the current CC and use it's PTS
// as a reference
function alignDiscontinuities(lastFrag, lastLevel, details) {
if (shouldAlignOnDiscontinuities(lastFrag, lastLevel, details)) {
var referenceFrag = findDiscontinuousReferenceFrag(lastLevel.details, details);
if (referenceFrag) {
logger["b" /* logger */].log('Adjusting PTS using last level due to CC increase within current level');
adjustPts(referenceFrag.start, details);
}
}
// try to align using programDateTime attribute (if available)
if (details.PTSKnown === false && lastLevel && lastLevel.details && lastLevel.details.fragments && lastLevel.details.fragments.length) {
// if last level sliding is 1000 and its first frag PROGRAM-DATE-TIME is 2017-08-20 1:10:00 AM
// and if new details first frag PROGRAM DATE-TIME is 2017-08-20 1:10:08 AM
// then we can deduce that playlist B sliding is 1000+8 = 1008s
var lastPDT = lastLevel.details.programDateTime;
var newPDT = details.programDateTime;
// date diff is in ms. frag.start is in seconds
var sliding = (newPDT - lastPDT) / 1000 + lastLevel.details.fragments[0].start;
if (!isNaN(sliding)) {
logger["b" /* logger */].log('adjusting PTS using programDateTime delta, sliding:' + sliding.toFixed(3));
adjustPts(sliding, details);
}
}
}
// CONCATENATED MODULE: ./src/task-loop.js
function task_loop__classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function task_loop__possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function task_loop__inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.cre...
/**
* Sub-class specialization of EventHandler base class.
*
* TaskLoop allows to schedule a task function being called (optionnaly repeatedly) on the main loop,
* scheduled asynchroneously, avoiding recursive calls in the same tick.
*
* The task itself is implemented in `doTick`. It can be requested and called for single execution
* using the `tick` method.
*
* It will be assured that the task execution method (`tick`) only gets called once per main loop "tick",
* no matter how often it gets requested for execution. Execution in further ticks will be scheduled accordingly.
*
* If further execution requests have already been scheduled on the next tick, it can be checked with `hasNextTick`,
* and cancelled with `clearNextTick`.
*
* The task can be scheduled as an interval repeatedly with a period as parameter (see `setInterval`, `clearInterval`).
*
* Sub-classes need to implement the `doTick` method which will effectively have the task execution routine.
*
* Further explanations:
*
* The baseclass has a `tick` method that will schedule the doTick call. It may be called synchroneously
* only for a stack-depth of one. On re-entrant calls, sub-sequent calls are scheduled for next main loop ticks.
*
* When the task execution (`tick` method) is called in re-entrant way this is detected and
* we are limiting the task execution per call stack to exactly one, but scheduling/post-poning further
* task processing on the next main loop iteration (also known as "next tick" in the Node/JS runtime lingo).
*/
var TaskLoop = function (_EventHandler) {
task_loop__inherits(TaskLoop, _EventHandler);
function TaskLoop(hls) {
task_loop__classCallCheck(this, TaskLoop);
for (var _len = arguments.length, events = Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {
events[_key - 1] = arguments[_key];
}
var _this = task_loop__possibleConstructorReturn(this, _EventHandler.call.apply(_EventHandler, [this, hls].concat(events)));
_this._tickInterval = null;
_this._tickTimer = null;
_this._tickCallCount = 0;
_this._boundTick = _this.tick.bind(_this);
return _this;
}
/**
* @override
*/
TaskLoop.prototype.onHandlerDestroying = function onHandlerDestroying() {
// clear all timers before unregistering from event bus
this.clearNextTick();
this.clearInterval();
};
/**
* @returns {boolean}
*/
TaskLoop.prototype.hasInterval = function hasInterval() {
return !!this._tickInterval;
};
/**
* @returns {boolean}
*/
TaskLoop.prototype.hasNextTick = function hasNextTick() {
return !!this._tickTimer;
};
/**
* @param {number} millis Interval time (ms)
* @returns {boolean} True when interval has been scheduled, false when already scheduled (no effect)
*/
share/public_html/static/hls.js view on Meta::CPAN
} else {
// VoD playlist: if bufferEnd before start of playlist, load first fragment
if (bufferEnd < start) {
frag = fragments[0];
}
}
}
if (!frag) {
frag = this._findFragment(start, fragPrevious, fragLen, fragments, bufferEnd, end, levelDetails);
}
if (frag) {
if (frag.encrypted) {
logger["b" /* logger */].log('Loading key for ' + frag.sn + ' of [' + levelDetails.startSN + ' ,' + levelDetails.endSN + '],level ' + level);
this._loadKey(frag);
} else {
logger["b" /* logger */].log('Loading ' + frag.sn + ' of [' + levelDetails.startSN + ' ,' + levelDetails.endSN + '],level ' + level + ', currentTime:' + pos.toFixed(3) + ',bufferEnd:' + bufferEnd.toFixed(3));
this._loadFragment(frag);
}
}
};
StreamController.prototype._ensureFragmentAtLivePoint = function _ensureFragmentAtLivePoint(levelDetails, bufferEnd, start, end, fragPrevious, fragments, fragLen) {
var config = this.hls.config,
media = this.media;
var frag = void 0;
// check if requested position is within seekable boundaries :
// logger.log(`start/pos/bufEnd/seeking:${start.toFixed(3)}/${pos.toFixed(3)}/${bufferEnd.toFixed(3)}/${this.media.seeking}`);
var maxLatency = config.liveMaxLatencyDuration !== undefined ? config.liveMaxLatencyDuration : config.liveMaxLatencyDurationCount * levelDetails.targetduration;
if (bufferEnd < Math.max(start - config.maxFragLookUpTolerance, end - maxLatency)) {
var liveSyncPosition = this.liveSyncPosition = this.computeLivePosition(start, levelDetails);
logger["b" /* logger */].log('buffer end: ' + bufferEnd.toFixed(3) + ' is located too far from the end of live sliding playlist, reset currentTime to : ' + liveSyncPosition.toFixed(3));
bufferEnd = liveSyncPosition;
if (media && media.readyState && media.duration > liveSyncPosition) {
media.currentTime = liveSyncPosition;
}
this.nextLoadPosition = liveSyncPosition;
}
// if end of buffer greater than live edge, don't load any fragment
// this could happen if live playlist intermittently slides in the past.
// level 1 loaded [182580161,182580167]
// level 1 loaded [182580162,182580169]
// Loading 182580168 of [182580162 ,182580169],level 1 ..
// Loading 182580169 of [182580162 ,182580169],level 1 ..
// level 1 loaded [182580162,182580168] <============= here we should have bufferEnd > end. in that case break to avoid reloading 182580168
// level 1 loaded [182580164,182580171]
//
// don't return null in case media not loaded yet (readystate === 0)
if (levelDetails.PTSKnown && bufferEnd > end && media && media.readyState) {
return null;
}
if (this.startFragRequested && !levelDetails.PTSKnown) {
/* we are switching level on live playlist, but we don't have any PTS info for that quality level ...
try to load frag matching with next SN.
even if SN are not synchronized between playlists, loading this frag will help us
compute playlist sliding and find the right one after in case it was not the right consecutive one */
if (fragPrevious) {
if (!levelDetails.programDateTime) {
// Uses buffer and sequence number to calculate switch segment (required if using EXT-X-DISCONTINUITY-SEQUENCE)
var targetSN = fragPrevious.sn + 1;
if (targetSN >= levelDetails.startSN && targetSN <= levelDetails.endSN) {
var fragNext = fragments[targetSN - levelDetails.startSN];
if (fragPrevious.cc === fragNext.cc) {
frag = fragNext;
logger["b" /* logger */].log('live playlist, switching playlist, load frag with next SN: ' + frag.sn);
}
}
// next frag SN not available (or not with same continuity counter)
// look for a frag sharing the same CC
if (!frag) {
frag = binary_search.search(fragments, function (frag) {
return fragPrevious.cc - frag.cc;
});
if (frag) {
logger["b" /* logger */].log('live playlist, switching playlist, load frag with same CC: ' + frag.sn);
}
}
} else {
// Relies on PDT in order to switch bitrates (Support EXT-X-DISCONTINUITY without EXT-X-DISCONTINUITY-SEQUENCE)
frag = findFragmentByPDT(fragments, fragPrevious.endPdt + 1);
}
}
if (!frag) {
/* we have no idea about which fragment should be loaded.
so let's load mid fragment. it will help computing playlist sliding and find the right one
*/
frag = fragments[Math.min(fragLen - 1, Math.round(fragLen / 2))];
logger["b" /* logger */].log('live playlist, switching playlist, unknown, load middle frag : ' + frag.sn);
}
}
return frag;
};
StreamController.prototype._findFragment = function _findFragment(start, fragPrevious, fragLen, fragments, bufferEnd, end, levelDetails) {
var config = this.hls.config;
var fragBySN = function fragBySN() {
return findFragmentBySN(fragPrevious, fragments, bufferEnd, end, config.maxFragLookUpTolerance);
};
var frag = void 0;
var foundFrag = void 0;
if (bufferEnd < end) {
if (!levelDetails.programDateTime) {
// Uses buffer and sequence number to calculate switch segment (required if using EXT-X-DISCONTINUITY-SEQUENCE)
foundFrag = findFragmentBySN(fragPrevious, fragments, bufferEnd, end, config.maxFragLookUpTolerance);
} else {
// Relies on PDT in order to switch bitrates (Support EXT-X-DISCONTINUITY without EXT-X-DISCONTINUITY-SEQUENCE)
foundFrag = findFragmentByPDT(fragments, calculateNextPDT(start, bufferEnd, levelDetails));
if (!foundFrag || fragment_finders_fragmentWithinToleranceTest(bufferEnd, config.maxFragLookUpTolerance, foundFrag)) {
// Fall back to SN order if finding by PDT returns a frag which won't fit within the stream
// fragmentWithToleranceTest returns 0 if the frag is within tolerance; 1 or -1 otherwise
logger["b" /* logger */].warn('Frag found by PDT search did not fit within tolerance; falling back to finding by SN');
foundFrag = fragBySN();
}
}
share/public_html/static/hls.js view on Meta::CPAN
StreamController.prototype.onFragParsingData = function onFragParsingData(data) {
var _this2 = this;
var fragCurrent = this.fragCurrent;
var fragNew = data.frag;
if (fragCurrent && data.id === 'main' && fragNew.sn === fragCurrent.sn && fragNew.level === fragCurrent.level && !(data.type === 'audio' && this.altAudio) && // filter out main audio if audio track is loaded through audio stream controller
this.state === State.PARSING) {
var level = this.levels[this.level],
frag = fragCurrent;
if (isNaN(data.endPTS)) {
data.endPTS = data.startPTS + fragCurrent.duration;
data.endDTS = data.startDTS + fragCurrent.duration;
}
if (data.hasAudio === true) {
frag.addElementaryStream(loader_fragment.ElementaryStreamTypes.AUDIO);
}
if (data.hasVideo === true) {
frag.addElementaryStream(loader_fragment.ElementaryStreamTypes.VIDEO);
}
logger["b" /* logger */].log('Parsed ' + data.type + ',PTS:[' + data.startPTS.toFixed(3) + ',' + data.endPTS.toFixed(3) + '],DTS:[' + data.startDTS.toFixed(3) + '/' + data.endDTS.toFixed(3) + '],nb:' + data.nb + ',dropped:' + (data.dropped || 0...
// Detect gaps in a fragment and try to fix it by finding a keyframe in the previous fragment (see _findFragments)
if (data.type === 'video') {
frag.dropped = data.dropped;
if (frag.dropped) {
if (!frag.backtracked) {
var levelDetails = level.details;
if (levelDetails && frag.sn === levelDetails.startSN) {
logger["b" /* logger */].warn('missing video frame(s) on first frag, appending with gap', frag.sn);
} else {
logger["b" /* logger */].warn('missing video frame(s), backtracking fragment', frag.sn);
// Return back to the IDLE state without appending to buffer
// Causes findFragments to backtrack a segment and find the keyframe
// Audio fragments arriving before video sets the nextLoadPosition, causing _findFragments to skip the backtracked fragment
this.fragmentTracker.removeFragment(frag);
frag.backtracked = true;
this.nextLoadPosition = data.startPTS;
this.state = State.IDLE;
this.fragPrevious = frag;
this.tick();
return;
}
} else {
logger["b" /* logger */].warn('Already backtracked on this fragment, appending with the gap', frag.sn);
}
} else {
// Only reset the backtracked flag if we've loaded the frag without any dropped frames
frag.backtracked = false;
}
}
var drift = updateFragPTSDTS(level.details, frag, data.startPTS, data.endPTS, data.startDTS, data.endDTS),
hls = this.hls;
hls.trigger(events["a" /* default */].LEVEL_PTS_UPDATED, { details: level.details, level: this.level, drift: drift, type: data.type, start: data.startPTS, end: data.endPTS });
// has remuxer dropped video frames located before first keyframe ?
[data.data1, data.data2].forEach(function (buffer) {
// only append in PARSING state (rationale is that an appending error could happen synchronously on first segment appending)
// in that case it is useless to append following segments
if (buffer && buffer.length && _this2.state === State.PARSING) {
_this2.appended = true;
// arm pending Buffering flag before appending a segment
_this2.pendingBuffering = true;
hls.trigger(events["a" /* default */].BUFFER_APPENDING, { type: data.type, data: buffer, parent: 'main', content: 'data' });
}
});
// trigger handler right now
this.tick();
}
};
StreamController.prototype.onFragParsed = function onFragParsed(data) {
var fragCurrent = this.fragCurrent;
var fragNew = data.frag;
if (fragCurrent && data.id === 'main' && fragNew.sn === fragCurrent.sn && fragNew.level === fragCurrent.level && this.state === State.PARSING) {
this.stats.tparsed = window.performance.now();
this.state = State.PARSED;
this._checkAppendedParsed();
}
};
StreamController.prototype.onAudioTrackSwitching = function onAudioTrackSwitching(data) {
// if any URL found on new audio track, it is an alternate audio track
var altAudio = !!data.url,
trackId = data.id;
// if we switch on main audio, ensure that main fragment scheduling is synced with media.buffered
// don't do anything if we switch to alt audio: audio stream controller is handling it.
// we will just have to change buffer scheduling on audioTrackSwitched
if (!altAudio) {
if (this.mediaBuffer !== this.media) {
logger["b" /* logger */].log('switching on main audio, use media.buffered to schedule main fragment loading');
this.mediaBuffer = this.media;
var fragCurrent = this.fragCurrent;
// we need to refill audio buffer from main: cancel any frag loading to speed up audio switch
if (fragCurrent.loader) {
logger["b" /* logger */].log('switching to main audio track, cancel main fragment load');
fragCurrent.loader.abort();
}
this.fragCurrent = null;
this.fragPrevious = null;
// destroy demuxer to force init segment generation (following audio switch)
if (this.demuxer) {
this.demuxer.destroy();
this.demuxer = null;
}
// switch to IDLE state to load new fragment
this.state = State.IDLE;
}
var hls = this.hls;
// switching to main audio, flush all audio and trigger track switched
hls.trigger(events["a" /* default */].BUFFER_FLUSHING, { startOffset: 0, endOffset: Number.POSITIVE_INFINITY, type: 'audio' });
hls.trigger(events["a" /* default */].AUDIO_TRACK_SWITCHED, { id: trackId });
this.altAudio = false;
}
};
StreamController.prototype.onAudioTrackSwitched = function onAudioTrackSwitched(data) {
var trackId = data.id,
share/public_html/static/hls.js view on Meta::CPAN
if (sb) {
if (!sb.updating) {
// reset sourceBuffer ended flag before appending segment
sb.ended = false;
// logger.log(`appending ${segment.content} ${type} SB, size:${segment.data.length}, ${segment.parent}`);
this.parent = segment.parent;
sb.appendBuffer(segment.data);
this.appendError = 0;
this.appended++;
this.appending = true;
} else {
segments.unshift(segment);
}
} else {
// in case we don't have any source buffer matching with this segment type,
// it means that Mediasource fails to create sourcebuffer
// discard this segment, and trigger update end
this.onSBUpdateEnd();
}
} catch (err) {
// in case any error occured while appending, put back segment in segments table
logger["b" /* logger */].error('error while trying to append buffer:' + err.message);
segments.unshift(segment);
var event = { type: errors["b" /* ErrorTypes */].MEDIA_ERROR, parent: segment.parent };
if (err.code !== 22) {
if (this.appendError) {
this.appendError++;
} else {
this.appendError = 1;
}
event.details = errors["a" /* ErrorDetails */].BUFFER_APPEND_ERROR;
/* with UHD content, we could get loop of quota exceeded error until
browser is able to evict some data from sourcebuffer. retrying help recovering this
*/
if (this.appendError > hls.config.appendErrorMaxRetry) {
logger["b" /* logger */].log('fail ' + hls.config.appendErrorMaxRetry + ' times to append segment in sourceBuffer');
segments = [];
event.fatal = true;
hls.trigger(events["a" /* default */].ERROR, event);
} else {
event.fatal = false;
hls.trigger(events["a" /* default */].ERROR, event);
}
} else {
// QuotaExceededError: http://www.w3.org/TR/html5/infrastructure.html#quotaexceedederror
// let's stop appending any segments, and report BUFFER_FULL_ERROR error
this.segments = [];
event.details = errors["a" /* ErrorDetails */].BUFFER_FULL_ERROR;
event.fatal = false;
hls.trigger(events["a" /* default */].ERROR, event);
}
}
}
}
};
/*
flush specified buffered range,
return true once range has been flushed.
as sourceBuffer.remove() is asynchronous, flushBuffer will be retriggered on sourceBuffer update end
*/
BufferController.prototype.flushBuffer = function flushBuffer(startOffset, endOffset, typeIn) {
var sb = void 0,
i = void 0,
bufStart = void 0,
bufEnd = void 0,
flushStart = void 0,
flushEnd = void 0,
sourceBuffer = this.sourceBuffer;
if (Object.keys(sourceBuffer).length) {
logger["b" /* logger */].log('flushBuffer,pos/start/end: ' + this.media.currentTime.toFixed(3) + '/' + startOffset + '/' + endOffset);
// safeguard to avoid infinite looping : don't try to flush more than the nb of appended segments
if (this.flushBufferCounter < this.appended) {
for (var type in sourceBuffer) {
// check if sourcebuffer type is defined (typeIn): if yes, let's only flush this one
// if no, let's flush all sourcebuffers
if (typeIn && type !== typeIn) {
continue;
}
sb = sourceBuffer[type];
// we are going to flush buffer, mark source buffer as 'not ended'
sb.ended = false;
if (!sb.updating) {
try {
for (i = 0; i < sb.buffered.length; i++) {
bufStart = sb.buffered.start(i);
bufEnd = sb.buffered.end(i);
// workaround firefox not able to properly flush multiple buffered range.
if (navigator.userAgent.toLowerCase().indexOf('firefox') !== -1 && endOffset === Number.POSITIVE_INFINITY) {
flushStart = startOffset;
flushEnd = endOffset;
} else {
flushStart = Math.max(bufStart, startOffset);
flushEnd = Math.min(bufEnd, endOffset);
}
/* sometimes sourcebuffer.remove() does not flush
the exact expected time range.
to avoid rounding issues/infinite loop,
only flush buffer range of length greater than 500ms.
*/
if (Math.min(flushEnd, bufEnd) - flushStart > 0.5) {
this.flushBufferCounter++;
logger["b" /* logger */].log('flush ' + type + ' [' + flushStart + ',' + flushEnd + '], of [' + bufStart + ',' + bufEnd + '], pos:' + this.media.currentTime);
sb.remove(flushStart, flushEnd);
return false;
}
}
} catch (e) {
logger["b" /* logger */].warn('exception while accessing sourcebuffer, it might have been removed from MediaSource');
}
} else {
// logger.log('abort ' + type + ' append in progress');
// this will abort any appending in progress
// sb.abort();
logger["b" /* logger */].warn('cannot flush, sb updating in progress');
return false;
}
share/public_html/static/hls.js view on Meta::CPAN
AudioStreamController.prototype.onFragParsingData = function onFragParsingData(data) {
var _this2 = this;
var fragCurrent = this.fragCurrent;
var fragNew = data.frag;
if (fragCurrent && data.id === 'audio' && data.type === 'audio' && fragNew.sn === fragCurrent.sn && fragNew.level === fragCurrent.level && this.state === audio_stream_controller_State.PARSING) {
var trackId = this.trackId,
track = this.tracks[trackId],
hls = this.hls;
if (isNaN(data.endPTS)) {
data.endPTS = data.startPTS + fragCurrent.duration;
data.endDTS = data.startDTS + fragCurrent.duration;
}
fragCurrent.addElementaryStream(loader_fragment.ElementaryStreamTypes.AUDIO);
logger["b" /* logger */].log('parsed ' + data.type + ',PTS:[' + data.startPTS.toFixed(3) + ',' + data.endPTS.toFixed(3) + '],DTS:[' + data.startDTS.toFixed(3) + '/' + data.endDTS.toFixed(3) + '],nb:' + data.nb);
updateFragPTSDTS(track.details, fragCurrent, data.startPTS, data.endPTS);
var audioSwitch = this.audioSwitch,
media = this.media,
appendOnBufferFlush = false;
// Only flush audio from old audio tracks when PTS is known on new audio track
if (audioSwitch && media) {
if (media.readyState) {
var currentTime = media.currentTime;
logger["b" /* logger */].log('switching audio track : currentTime:' + currentTime);
if (currentTime >= data.startPTS) {
logger["b" /* logger */].log('switching audio track : flushing all audio');
this.state = audio_stream_controller_State.BUFFER_FLUSHING;
hls.trigger(events["a" /* default */].BUFFER_FLUSHING, { startOffset: 0, endOffset: Number.POSITIVE_INFINITY, type: 'audio' });
appendOnBufferFlush = true;
// Lets announce that the initial audio track switch flush occur
this.audioSwitch = false;
hls.trigger(events["a" /* default */].AUDIO_TRACK_SWITCHED, { id: trackId });
}
} else {
// Lets announce that the initial audio track switch flush occur
this.audioSwitch = false;
hls.trigger(events["a" /* default */].AUDIO_TRACK_SWITCHED, { id: trackId });
}
}
var pendingData = this.pendingData;
if (!pendingData) {
logger["b" /* logger */].warn('Apparently attempt to enqueue media payload without codec initialization data upfront');
hls.trigger(events["a" /* default */].ERROR, { type: errors["b" /* ErrorTypes */].MEDIA_ERROR, details: null, fatal: true });
return;
}
if (!this.audioSwitch) {
[data.data1, data.data2].forEach(function (buffer) {
if (buffer && buffer.length) {
pendingData.push({ type: data.type, data: buffer, parent: 'audio', content: 'data' });
}
});
if (!appendOnBufferFlush && pendingData.length) {
pendingData.forEach(function (appendObj) {
// only append in PARSING state (rationale is that an appending error could happen synchronously on first segment appending)
// in that case it is useless to append following segments
if (_this2.state === audio_stream_controller_State.PARSING) {
// arm pending Buffering flag before appending a segment
_this2.pendingBuffering = true;
_this2.hls.trigger(events["a" /* default */].BUFFER_APPENDING, appendObj);
}
});
this.pendingData = [];
this.appended = true;
}
}
// trigger handler right now
this.tick();
}
};
AudioStreamController.prototype.onFragParsed = function onFragParsed(data) {
var fragCurrent = this.fragCurrent;
var fragNew = data.frag;
if (fragCurrent && data.id === 'audio' && fragNew.sn === fragCurrent.sn && fragNew.level === fragCurrent.level && this.state === audio_stream_controller_State.PARSING) {
this.stats.tparsed = audio_stream_controller_performance.now();
this.state = audio_stream_controller_State.PARSED;
this._checkAppendedParsed();
}
};
AudioStreamController.prototype.onBufferReset = function onBufferReset() {
// reset reference to sourcebuffers
this.mediaBuffer = this.videoBuffer = null;
this.loadedmetadata = false;
};
AudioStreamController.prototype.onBufferCreated = function onBufferCreated(data) {
var audioTrack = data.tracks.audio;
if (audioTrack) {
this.mediaBuffer = audioTrack.buffer;
this.loadedmetadata = true;
}
if (data.tracks.video) {
this.videoBuffer = data.tracks.video.buffer;
}
};
AudioStreamController.prototype.onBufferAppended = function onBufferAppended(data) {
if (data.parent === 'audio') {
var state = this.state;
if (state === audio_stream_controller_State.PARSING || state === audio_stream_controller_State.PARSED) {
// check if all buffers have been appended
this.pendingBuffering = data.pending > 0;
this._checkAppendedParsed();
}
}
};
AudioStreamController.prototype._checkAppendedParsed = function _checkAppendedParsed() {
// trigger handler right now
if (this.state === audio_stream_controller_State.PARSED && (!this.appended || !this.pendingBuffering)) {
var frag = this.fragCurrent,
stats = this.stats,
hls = this.hls;
share/public_html/static/hls.js view on Meta::CPAN
function TimelineController(hls) {
timeline_controller__classCallCheck(this, TimelineController);
var _this = timeline_controller__possibleConstructorReturn(this, _EventHandler.call(this, hls, events["a" /* default */].MEDIA_ATTACHING, events["a" /* default */].MEDIA_DETACHING, events["a" /* default */].FRAG_PARSING_USERDATA, events["a" /* de...
_this.hls = hls;
_this.config = hls.config;
_this.enabled = true;
_this.Cues = hls.config.cueHandler;
_this.textTracks = [];
_this.tracks = [];
_this.unparsedVttFrags = [];
_this.initPTS = undefined;
_this.cueRanges = [];
_this.captionsTracks = {};
_this.captionsProperties = {
textTrack1: {
label: _this.config.captionsTextTrack1Label,
languageCode: _this.config.captionsTextTrack1LanguageCode
},
textTrack2: {
label: _this.config.captionsTextTrack2Label,
languageCode: _this.config.captionsTextTrack2LanguageCode
}
};
if (_this.config.enableCEA708Captions) {
var channel1 = new output_filter(_this, 'textTrack1');
var channel2 = new output_filter(_this, 'textTrack2');
_this.cea608Parser = new cea_608_parser(0, channel1, channel2);
}
return _this;
}
TimelineController.prototype.addCues = function addCues(trackName, startTime, endTime, screen) {
// skip cues which overlap more than 50% with previously parsed time ranges
var ranges = this.cueRanges;
var merged = false;
for (var i = ranges.length; i--;) {
var cueRange = ranges[i];
var overlap = intersection(cueRange[0], cueRange[1], startTime, endTime);
if (overlap >= 0) {
cueRange[0] = Math.min(cueRange[0], startTime);
cueRange[1] = Math.max(cueRange[1], endTime);
merged = true;
if (overlap / (endTime - startTime) > 0.5) {
return;
}
}
}
if (!merged) {
ranges.push([startTime, endTime]);
}
this.Cues.newCue(this.captionsTracks[trackName], startTime, endTime, screen);
};
// Triggered when an initial PTS is found; used for synchronisation of WebVTT.
TimelineController.prototype.onInitPtsFound = function onInitPtsFound(data) {
var _this2 = this;
if (typeof this.initPTS === 'undefined') {
this.initPTS = data.initPTS;
}
// Due to asynchrony, initial PTS may arrive later than the first VTT fragments are loaded.
// Parse any unparsed fragments upon receiving the initial PTS.
if (this.unparsedVttFrags.length) {
this.unparsedVttFrags.forEach(function (frag) {
_this2.onFragLoaded(frag);
});
this.unparsedVttFrags = [];
}
};
TimelineController.prototype.getExistingTrack = function getExistingTrack(trackName) {
var media = this.media;
if (media) {
for (var i = 0; i < media.textTracks.length; i++) {
var textTrack = media.textTracks[i];
if (textTrack[trackName]) {
return textTrack;
}
}
}
return null;
};
TimelineController.prototype.createCaptionsTrack = function createCaptionsTrack(trackName) {
var _captionsProperties$t = this.captionsProperties[trackName],
label = _captionsProperties$t.label,
languageCode = _captionsProperties$t.languageCode;
var captionsTracks = this.captionsTracks;
if (!captionsTracks[trackName]) {
// Enable reuse of existing text track.
var existingTrack = this.getExistingTrack(trackName);
if (!existingTrack) {
var textTrack = this.createTextTrack('captions', label, languageCode);
if (textTrack) {
// Set a special property on the track so we know it's managed by Hls.js
textTrack[trackName] = true;
captionsTracks[trackName] = textTrack;
}
} else {
captionsTracks[trackName] = existingTrack;
clearCurrentCues(captionsTracks[trackName]);
sendAddTrackEvent(captionsTracks[trackName], this.media);
}
}
};
TimelineController.prototype.createTextTrack = function createTextTrack(kind, label, lang) {
var media = this.media;
if (media) {
return media.addTextTrack(kind, label, lang);
}
};
TimelineController.prototype.destroy = function destroy() {
event_handler.prototype.destroy.call(this);
};
TimelineController.prototype.onMediaAttaching = function onMediaAttaching(data) {
this.media = data.media;
share/public_html/static/hls.js view on Meta::CPAN
}
};
TimelineController.prototype.onManifestLoaded = function onManifestLoaded(data) {
var _this3 = this;
this.textTracks = [];
this.unparsedVttFrags = this.unparsedVttFrags || [];
this.initPTS = undefined;
this.cueRanges = [];
if (this.config.enableWebVTT) {
this.tracks = data.subtitles || [];
var inUseTracks = this.media ? this.media.textTracks : [];
this.tracks.forEach(function (track, index) {
var textTrack = void 0;
if (index < inUseTracks.length) {
var inUseTrack = inUseTracks[index];
// Reuse tracks with the same label, but do not reuse 608/708 tracks
if (reuseVttTextTrack(inUseTrack, track)) {
textTrack = inUseTrack;
}
}
if (!textTrack) {
textTrack = _this3.createTextTrack('subtitles', track.name, track.lang);
}
if (track.default) {
textTrack.mode = _this3.hls.subtitleDisplay ? 'showing' : 'hidden';
} else {
textTrack.mode = 'disabled';
}
_this3.textTracks.push(textTrack);
});
}
};
TimelineController.prototype.onLevelSwitching = function onLevelSwitching() {
this.enabled = this.hls.currentLevel.closedCaptions !== 'NONE';
};
TimelineController.prototype.onFragLoaded = function onFragLoaded(data) {
var frag = data.frag,
payload = data.payload;
if (frag.type === 'main') {
var sn = frag.sn;
// if this frag isn't contiguous, clear the parser so cues with bad start/end times aren't added to the textTrack
if (sn !== this.lastSn + 1) {
var cea608Parser = this.cea608Parser;
if (cea608Parser) {
cea608Parser.reset();
}
}
this.lastSn = sn;
} // eslint-disable-line brace-style
// If fragment is subtitle type, parse as WebVTT.
else if (frag.type === 'subtitle') {
if (payload.byteLength) {
// We need an initial synchronisation PTS. Store fragments as long as none has arrived.
if (typeof this.initPTS === 'undefined') {
this.unparsedVttFrags.push(data);
return;
}
var decryptData = frag.decryptdata;
// If the subtitles are not encrypted, parse VTTs now. Otherwise, we need to wait.
if (decryptData == null || decryptData.key == null || decryptData.method !== 'AES-128') {
this._parseVTTs(frag, payload);
}
} else {
// In case there is no payload, finish unsuccessfully.
this.hls.trigger(events["a" /* default */].SUBTITLE_FRAG_PROCESSED, { success: false, frag: frag });
}
}
};
TimelineController.prototype._parseVTTs = function _parseVTTs(frag, payload) {
var vttCCs = this.vttCCs;
if (!vttCCs[frag.cc]) {
vttCCs[frag.cc] = { start: frag.start, prevCC: this.prevCC, new: true };
this.prevCC = frag.cc;
}
var textTracks = this.textTracks,
hls = this.hls;
// Parse the WebVTT file contents.
webvtt_parser.parse(payload, this.initPTS, vttCCs, frag.cc, function (cues) {
var currentTrack = textTracks[frag.trackId];
// WebVTTParser.parse is an async method and if the currently selected text track mode is set to "disabled"
// before parsing is done then don't try to access currentTrack.cues.getCueById as cues will be null
// and trying to access getCueById method of cues will throw an exception
if (currentTrack.mode === 'disabled') {
hls.trigger(events["a" /* default */].SUBTITLE_FRAG_PROCESSED, { success: false, frag: frag });
return;
}
// Add cues and trigger event with success true.
cues.forEach(function (cue) {
// Sometimes there are cue overlaps on segmented vtts so the same
// cue can appear more than once in different vtt files.
// This avoid showing duplicated cues with same timecode and text.
if (!currentTrack.cues.getCueById(cue.id)) {
try {
currentTrack.addCue(cue);
} catch (err) {
var textTrackCue = new window.TextTrackCue(cue.startTime, cue.endTime, cue.text);
textTrackCue.id = cue.id;
currentTrack.addCue(textTrackCue);
}
}
});
hls.trigger(events["a" /* default */].SUBTITLE_FRAG_PROCESSED, { success: true, frag: frag });
}, function (e) {
// Something went wrong while parsing. Trigger event with success false.
logger["b" /* logger */].log('Failed to parse VTT cue: ' + e);
hls.trigger(events["a" /* default */].SUBTITLE_FRAG_PROCESSED, { success: false, frag: frag });
});
};
TimelineController.prototype.onFragDecrypted = function onFragDecrypted(data) {
( run in 0.804 second using v1.01-cache-2.11-cpan-39bf76dae61 )