1
0
Fork 0
mirror of https://github.com/DanielnetoDotCom/YouPHPTube synced 2025-10-05 10:49:36 +02:00
This commit is contained in:
DanieL 2022-08-24 16:05:41 -03:00
parent 6efee3a800
commit 051401c6fc
803 changed files with 70126 additions and 6014 deletions

16
node_modules/hls.js/src/config.ts generated vendored
View file

@ -139,11 +139,17 @@ export type LatencyControllerConfig = {
maxLiveSyncPlaybackRate: number;
};
export type MetadataControllerConfig = {
enableDateRangeMetadataCues: boolean;
enableEmsgMetadataCues: boolean;
enableID3MetadataCues: boolean;
};
export type TimelineControllerConfig = {
cueHandler: CuesInterface;
enableCEA708Captions: boolean;
enableWebVTT: boolean;
enableIMSC1: boolean;
enableCEA708Captions: boolean;
captionsTextTrack1Label: string;
captionsTextTrack1LanguageCode: string;
captionsTextTrack2Label: string;
@ -164,6 +170,7 @@ export type HlsConfig = {
enableWorker: boolean;
enableSoftwareAES: boolean;
minAutoBitrate: number;
ignoreDevicePixelRatio: boolean;
loader: { new (confg: HlsConfig): Loader<LoaderContext> };
fetchSetup?: (context: LoaderContext, initParams: any) => Request;
xhrSetup?: (xhr: XMLHttpRequest, url: string) => void;
@ -198,6 +205,7 @@ export type HlsConfig = {
PlaylistLoaderConfig &
StreamControllerConfig &
LatencyControllerConfig &
MetadataControllerConfig &
TimelineControllerConfig &
TSDemuxerConfig;
@ -211,6 +219,7 @@ export const hlsDefaultConfig: HlsConfig = {
debug: false, // used by logger
capLevelOnFPSDrop: false, // used by fps-controller
capLevelToPlayerSize: false, // used by cap-level-controller
ignoreDevicePixelRatio: false, // used by cap-level-controller
initialLiveManifestSize: 1, // used by stream-controller
maxBufferLength: 30, // used by stream-controller
backBufferLength: Infinity, // used by buffer-controller
@ -280,6 +289,9 @@ export const hlsDefaultConfig: HlsConfig = {
progressive: false,
lowLatencyMode: true,
cmcd: undefined,
enableDateRangeMetadataCues: true,
enableEmsgMetadataCues: true,
enableID3MetadataCues: true,
// Dynamic Modules
...timelineConfig(),
@ -299,9 +311,9 @@ export const hlsDefaultConfig: HlsConfig = {
function timelineConfig(): TimelineControllerConfig {
return {
cueHandler: Cues, // used by timeline-controller
enableCEA708Captions: __USE_SUBTITLES__, // used by timeline-controller
enableWebVTT: __USE_SUBTITLES__, // used by timeline-controller
enableIMSC1: __USE_SUBTITLES__, // used by timeline-controller
enableCEA708Captions: __USE_SUBTITLES__, // used by timeline-controller
captionsTextTrack1Label: 'English', // used by timeline-controller
captionsTextTrack1LanguageCode: 'en', // used by timeline-controller
captionsTextTrack2Label: 'Spanish', // used by timeline-controller

View file

@ -55,6 +55,7 @@ class AudioStreamController
private waitingData: WaitingForPTSData | null = null;
private mainDetails: LevelDetails | null = null;
private bufferFlushed: boolean = false;
private cachedTrackLoadedData: TrackLoadedData | null = null;
constructor(hls: Hls, fragmentTracker: FragmentTracker) {
super(hls, fragmentTracker, '[audio-stream-controller]');
@ -299,8 +300,12 @@ class AudioStreamController
if (bufferInfo === null) {
return;
}
const mainBufferInfo = this.getFwdBufferInfo(
this.videoBuffer ? this.videoBuffer : this.media,
PlaylistLevelType.MAIN
);
const bufferLen = bufferInfo.len;
const maxBufLen = this.getMaxBufferLength();
const maxBufLen = this.getMaxBufferLength(mainBufferInfo?.len);
const audioSwitch = this.audioSwitch;
// if buffer length is less than maxBufLen try to load a new fragment
@ -333,6 +338,18 @@ class AudioStreamController
}
}
// buffer audio up to one target duration ahead of main buffer
if (
mainBufferInfo &&
targetBufferTime > mainBufferInfo.end + trackDetails.targetduration
) {
return;
}
// wait for main buffer after buffing some audio
if ((!mainBufferInfo || !mainBufferInfo.len) && bufferInfo.len) {
return;
}
const frag = this.getNextFragment(targetBufferTime, trackDetails);
if (!frag) {
this.bufferFlushed = true;
@ -346,16 +363,12 @@ class AudioStreamController
}
}
protected getMaxBufferLength(): number {
protected getMaxBufferLength(mainBufferLength?: number): number {
const maxConfigBuffer = super.getMaxBufferLength();
const mainBufferInfo = this.getFwdBufferInfo(
this.videoBuffer ? this.videoBuffer : this.media,
PlaylistLevelType.MAIN
);
if (mainBufferInfo === null) {
if (!mainBufferLength) {
return maxConfigBuffer;
}
return Math.max(maxConfigBuffer, mainBufferInfo.len);
return Math.max(maxConfigBuffer, mainBufferLength);
}
onMediaDetaching() {
@ -413,9 +426,17 @@ class AudioStreamController
onLevelLoaded(event: Events.LEVEL_LOADED, data: LevelLoadedData) {
this.mainDetails = data.details;
if (this.cachedTrackLoadedData !== null) {
this.hls.trigger(Events.AUDIO_TRACK_LOADED, this.cachedTrackLoadedData);
this.cachedTrackLoadedData = null;
}
}
onAudioTrackLoaded(event: Events.AUDIO_TRACK_LOADED, data: TrackLoadedData) {
if (this.mainDetails == null) {
this.cachedTrackLoadedData = data;
return;
}
const { levels } = this;
const { details: newDetails, id: trackId } = data;
if (!levels) {
@ -671,12 +692,16 @@ class AudioStreamController
this.resetLiveStartWhenNotLoaded(chunkMeta.level);
return;
}
const { frag, part } = context;
const {
frag,
part,
level: { details },
} = context;
const { audio, text, id3, initSegment } = remuxResult;
// Check if the current fragment has been aborted. We check this by first seeing if we're still playing the current level.
// If we are, subsequently check if the currently loading fragment (fragCurrent) has changed.
if (this.fragContextChanged(frag)) {
if (this.fragContextChanged(frag) || !details) {
return;
}
@ -717,8 +742,9 @@ class AudioStreamController
if (id3?.samples?.length) {
const emittedID3: FragParsingMetadataData = Object.assign(
{
frag,
id,
frag,
details,
},
id3
);
@ -727,8 +753,9 @@ class AudioStreamController
if (text) {
const emittedText: FragParsingUserdataData = Object.assign(
{
frag,
id,
frag,
details,
},
text
);

View file

@ -65,7 +65,7 @@ export default class BasePlaylistController implements NetworkComponentAPI {
for (let i = 0; i < renditionReports.length; i++) {
const attr = renditionReports[i];
const uri = '' + attr.URI;
if (uri === playlistUri.substr(-uri.length)) {
if (uri === playlistUri.slice(-uri.length)) {
const msn = parseInt(attr['LAST-MSN']);
let part = parseInt(attr['LAST-PART']);
if (previous && this.hls.config.lowLatencyMode) {

View file

@ -57,7 +57,6 @@ export const State = {
WAITING_TRACK: 'WAITING_TRACK',
PARSING: 'PARSING',
PARSED: 'PARSED',
BACKTRACKING: 'BACKTRACKING',
ENDED: 'ENDED',
ERROR: 'ERROR',
WAITING_INIT_PTS: 'WAITING_INIT_PTS',
@ -351,7 +350,6 @@ export default class BaseStreamController
if (this.fragContextChanged(frag)) {
if (
state === State.FRAG_LOADING ||
state === State.BACKTRACKING ||
(!this.fragCurrent && state === State.PARSING)
) {
this.fragmentTracker.removeFragment(frag);
@ -363,20 +361,15 @@ export default class BaseStreamController
if ('payload' in data) {
this.log(`Loaded fragment ${frag.sn} of level ${frag.level}`);
this.hls.trigger(Events.FRAG_LOADED, data);
// Tracker backtrack must be called after onFragLoaded to update the fragment entity state to BACKTRACKED
// This happens after handleTransmuxComplete when the worker or progressive is disabled
if (this.state === State.BACKTRACKING) {
this.fragmentTracker.backtrack(frag, data);
this.resetFragmentLoading(frag);
return;
}
}
// Pass through the whole payload; controllers not implementing progressive loading receive data from this callback
this._handleFragmentLoadComplete(data);
})
.catch((reason) => {
if (this.state === State.STOPPED) {
return;
}
this.warn(reason);
this.resetFragmentLoading(frag);
});
@ -1015,27 +1008,7 @@ export default class BaseStreamController
const curSNIdx = frag.sn - levelDetails.startSN;
const sameLevel = fragPrevious && frag.level === fragPrevious.level;
const nextFrag = fragments[curSNIdx + 1];
const fragState = this.fragmentTracker.getState(frag);
if (fragState === FragmentState.BACKTRACKED) {
frag = null;
let i = curSNIdx;
while (
fragments[i] &&
this.fragmentTracker.getState(fragments[i]) ===
FragmentState.BACKTRACKED
) {
// When fragPrevious is null, backtrack to first the first fragment is not BACKTRACKED for loading
// When fragPrevious is set, we want the first BACKTRACKED fragment for parsing and buffering
if (!fragPrevious) {
frag = fragments[--i];
} else {
frag = fragments[i--];
}
}
if (!frag) {
frag = nextFrag;
}
} else if (fragPrevious && frag.sn === fragPrevious.sn && !loadingParts) {
if (fragPrevious && frag.sn === fragPrevious.sn && !loadingParts) {
// Force the next fragment to load if the previous one was already selected. This can occasionally happen with
// non-uniform fragment durations
if (sameLevel) {
@ -1140,6 +1113,7 @@ export default class BaseStreamController
return (
details.live &&
details.canBlockReload &&
details.partTarget &&
details.tuneInGoal >
Math.max(details.partHoldBack, details.partTarget * advancePartLimit)
);
@ -1329,9 +1303,8 @@ export default class BaseStreamController
// The new transmuxer will be configured with a time offset matching the next fragment start,
// preventing the timeline from shifting.
this.warn(
`Could not parse fragment ${frag.sn} ${type} duration reliably (${parsedDuration}) resetting transmuxer to fallback to playlist timing`
`Could not parse fragment ${frag.sn} ${type} duration reliably (${parsedDuration})`
);
this.resetTransmuxer();
return result || false;
}
const drift = partial
@ -1359,12 +1332,14 @@ export default class BaseStreamController
},
false
);
if (parsed) {
this.state = State.PARSED;
this.hls.trigger(Events.FRAG_PARSED, { frag, part });
} else {
this.resetLoadingState();
if (!parsed) {
this.warn(
`Found no media in fragment ${frag.sn} of level ${level.id} resetting transmuxer to fallback to playlist timing`
);
this.resetTransmuxer();
}
this.state = State.PARSED;
this.hls.trigger(Events.FRAG_PARSED, { frag, part });
}
protected resetTransmuxer() {

View file

@ -243,7 +243,8 @@ export default class BufferController implements ComponentAPI {
// check if SourceBuffer codec needs to change
const track = this.tracks[trackName];
if (track && typeof track.buffer.changeType === 'function') {
const { codec, levelCodec, container } = data[trackName];
const { id, codec, levelCodec, container, metadata } =
data[trackName];
const currentCodec = (track.levelCodec || track.codec).replace(
VIDEO_CODEC_PROFILE_REPACE,
'$1'
@ -255,6 +256,17 @@ export default class BufferController implements ComponentAPI {
if (currentCodec !== nextCodec) {
const mimeType = `${container};codecs=${levelCodec || codec}`;
this.appendChangeType(trackName, mimeType);
logger.log(
`[buffer-controller]: switching codec ${currentCodec} to ${nextCodec}`
);
this.tracks[trackName] = {
buffer: track.buffer,
codec,
container,
levelCodec,
metadata,
id,
};
}
}
} else {
@ -714,6 +726,7 @@ export default class BufferController implements ComponentAPI {
codec: codec,
container: track.container,
levelCodec: track.levelCodec,
metadata: track.metadata,
id: track.id,
};
tracksCreated++;
@ -845,14 +858,14 @@ export default class BufferController implements ComponentAPI {
// resolve, the onUnblocked function is executed. Functions calling this method do not need to unblock the queue
// upon completion, since we already do it here
private blockBuffers(
onUnblocked: Function,
onUnblocked: () => void,
buffers: Array<SourceBufferName> = this.getSourceBufferTypes()
) {
if (!buffers.length) {
logger.log(
'[buffer-controller]: Blocking operation requested, but no SourceBuffers exist'
);
Promise.resolve(onUnblocked);
Promise.resolve().then(onUnblocked);
return;
}
const { operationQueue } = this;

View file

@ -214,20 +214,23 @@ class CapLevelController implements ComponentAPI {
}
get mediaWidth(): number {
return this.getDimensions().width * CapLevelController.contentScaleFactor;
return this.getDimensions().width * this.contentScaleFactor;
}
get mediaHeight(): number {
return this.getDimensions().height * CapLevelController.contentScaleFactor;
return this.getDimensions().height * this.contentScaleFactor;
}
static get contentScaleFactor(): number {
get contentScaleFactor(): number {
let pixelRatio = 1;
try {
pixelRatio = self.devicePixelRatio;
} catch (e) {
/* no-op */
if (!this.hls.config.ignoreDevicePixelRatio) {
try {
pixelRatio = self.devicePixelRatio;
} catch (e) {
/* no-op */
}
}
return pixelRatio;
}
@ -249,7 +252,7 @@ class CapLevelController implements ComponentAPI {
// Levels can have the same dimensions but differing bandwidths - since levels are ordered, we can look to the next
// to determine whether we've chosen the greatest bandwidth for the media's dimensions
const atGreatestBandiwdth = (curLevel, nextLevel) => {
const atGreatestBandwidth = (curLevel, nextLevel) => {
if (!nextLevel) {
return true;
}
@ -268,7 +271,7 @@ class CapLevelController implements ComponentAPI {
const level = levels[i];
if (
(level.width >= width || level.height >= height) &&
atGreatestBandiwdth(level, levels[i + 1])
atGreatestBandwidth(level, levels[i + 1])
) {
maxLevelIndex = i;
break;

View file

@ -396,7 +396,7 @@ export default class CMCDController implements ComponentAPI {
const url = URL.createObjectURL(new Blob());
const uuid = url.toString();
URL.revokeObjectURL(url);
return uuid.substr(uuid.lastIndexOf('/') + 1);
return uuid.slice(uuid.lastIndexOf('/') + 1);
}
/**

View file

@ -316,7 +316,9 @@ class EMEController implements ComponentAPI {
data ? data.byteLength : data
}), updating key-session`
);
keySession.update(data);
keySession.update(data).catch((err) => {
logger.warn(`Updating key-session failed: ${err}`);
});
});
}

View file

@ -17,7 +17,6 @@ import type Hls from '../hls';
export enum FragmentState {
NOT_LOADED = 'NOT_LOADED',
BACKTRACKED = 'BACKTRACKED',
APPENDING = 'APPENDING',
PARTIAL = 'PARTIAL',
OK = 'OK',
@ -199,7 +198,7 @@ export class FragmentTracker implements ComponentAPI {
timeRange
);
});
fragmentEntity.backtrack = fragmentEntity.loaded = null;
fragmentEntity.loaded = null;
if (Object.keys(fragmentEntity.range).length) {
fragmentEntity.buffered = true;
} else {
@ -212,7 +211,7 @@ export class FragmentTracker implements ComponentAPI {
const fragKey = getFragmentKey(frag);
const fragmentEntity = this.fragments[fragKey];
if (fragmentEntity) {
fragmentEntity.backtrack = fragmentEntity.loaded = null;
fragmentEntity.loaded = null;
fragmentEntity.buffered = true;
}
}
@ -295,9 +294,6 @@ export class FragmentTracker implements ComponentAPI {
if (fragmentEntity) {
if (!fragmentEntity.buffered) {
if (fragmentEntity.backtrack) {
return FragmentState.BACKTRACKED;
}
return FragmentState.APPENDING;
} else if (isPartial(fragmentEntity)) {
return FragmentState.PARTIAL;
@ -309,37 +305,6 @@ export class FragmentTracker implements ComponentAPI {
return FragmentState.NOT_LOADED;
}
public backtrack(
frag: Fragment,
data?: FragLoadedData
): FragLoadedData | null {
const fragKey = getFragmentKey(frag);
const fragmentEntity = this.fragments[fragKey];
if (!fragmentEntity || fragmentEntity.backtrack) {
return null;
}
const backtrack = (fragmentEntity.backtrack = data
? data
: fragmentEntity.loaded);
fragmentEntity.loaded = null;
return backtrack;
}
public getBacktrackData(fragment: Fragment): FragLoadedData | null {
const fragKey = getFragmentKey(fragment);
const fragmentEntity = this.fragments[fragKey];
if (fragmentEntity) {
const { backtrack } = fragmentEntity;
// If data was already sent to Worker it is detached no longer available
if (backtrack?.payload?.byteLength) {
return backtrack;
} else {
this.removeFragment(fragment);
}
}
return null;
}
private isTimeBuffered(
startPTS: number,
endPTS: number,
@ -376,7 +341,6 @@ export class FragmentTracker implements ComponentAPI {
this.fragments[fragKey] = {
body: frag,
loaded: data,
backtrack: null,
buffered: false,
range: Object.create(null),
};

View file

@ -5,8 +5,8 @@ import { Events } from '../events';
import { logger } from '../utils/logger';
import type Hls from '../hls';
import type { HlsConfig } from '../config';
import type { Fragment } from '../loader/fragment';
import type { FragmentTracker } from './fragment-tracker';
import { Fragment } from '../loader/fragment';
export const STALL_MINIMUM_DURATION_MS = 250;
export const MAX_START_GAP_JUMP = 2.0;
@ -15,7 +15,7 @@ export const SKIP_BUFFER_RANGE_START = 0.05;
export default class GapController {
private config: HlsConfig;
private media: HTMLMediaElement;
private media: HTMLMediaElement | null = null;
private fragmentTracker: FragmentTracker;
private hls: Hls;
private nudgeRetry: number = 0;
@ -32,8 +32,9 @@ export default class GapController {
}
public destroy() {
this.media = null;
// @ts-ignore
this.hls = this.fragmentTracker = this.media = null;
this.hls = this.fragmentTracker = null;
}
/**
@ -42,8 +43,11 @@ export default class GapController {
*
* @param {number} lastCurrentTime Previously read playhead position
*/
public poll(lastCurrentTime: number) {
public poll(lastCurrentTime: number, activeFrag: Fragment | null) {
const { config, media, stalled } = this;
if (media === null) {
return;
}
const { currentTime, seeking } = media;
const seeked = this.seeking && !seeking;
const beginSeek = !this.seeking && seeking;
@ -77,7 +81,7 @@ export default class GapController {
// The playhead should not be moving
if (
media.paused ||
(media.paused && !seeking) ||
media.ended ||
media.playbackRate === 0 ||
!BufferHelper.getBuffered(media).length
@ -100,6 +104,7 @@ export default class GapController {
// Next buffered range is too far ahead to jump to while still seeking
const noBufferGap =
!nextStart ||
(activeFrag && activeFrag.start <= currentTime) ||
(nextStart - currentTime > MAX_START_GAP_JUMP &&
!this.fragmentTracker.getPartialFragment(currentTime));
if (hasEnoughBuffer || noBufferGap) {
@ -142,7 +147,10 @@ export default class GapController {
const stalledDuration = tnow - stalled;
if (!seeking && stalledDuration >= STALL_MINIMUM_DURATION_MS) {
// Report stalling after trying to fix
this._reportStall(bufferInfo.len);
this._reportStall(bufferInfo);
if (!this.media) {
return;
}
}
const bufferedWithHoles = BufferHelper.bufferInfo(
@ -164,6 +172,9 @@ export default class GapController {
stalledDurationMs: number
) {
const { config, fragmentTracker, media } = this;
if (media === null) {
return;
}
const currentTime = media.currentTime;
const partial = fragmentTracker.getPartialFragment(currentTime);
@ -173,7 +184,7 @@ export default class GapController {
const targetTime = this._trySkipBufferHole(partial);
// we return here in this case, meaning
// the branch below only executes when we don't handle a partial fragment
if (targetTime) {
if (targetTime || !this.media) {
return;
}
}
@ -200,19 +211,21 @@ export default class GapController {
* @param bufferLen - The playhead distance from the end of the current buffer segment.
* @private
*/
private _reportStall(bufferLen) {
private _reportStall(bufferInfo: BufferInfo) {
const { hls, media, stallReported } = this;
if (!stallReported) {
if (!stallReported && media) {
// Report stalled error once
this.stallReported = true;
logger.warn(
`Playback stalling at @${media.currentTime} due to low buffer (buffer=${bufferLen})`
`Playback stalling at @${
media.currentTime
} due to low buffer (${JSON.stringify(bufferInfo)})`
);
hls.trigger(Events.ERROR, {
type: ErrorTypes.MEDIA_ERROR,
details: ErrorDetails.BUFFER_STALLED_ERROR,
fatal: false,
buffer: bufferLen,
buffer: bufferInfo.len,
});
}
}
@ -224,6 +237,9 @@ export default class GapController {
*/
private _trySkipBufferHole(partial: Fragment | null): number {
const { config, hls, media } = this;
if (media === null) {
return 0;
}
const currentTime = media.currentTime;
let lastEndTime = 0;
// Check if currentTime is between unbuffered regions of partial fragments
@ -265,13 +281,15 @@ export default class GapController {
* @private
*/
private _tryNudgeBuffer() {
const { config, hls, media } = this;
const { config, hls, media, nudgeRetry } = this;
if (media === null) {
return;
}
const currentTime = media.currentTime;
const nudgeRetry = (this.nudgeRetry || 0) + 1;
this.nudgeRetry = nudgeRetry;
this.nudgeRetry++;
if (nudgeRetry < config.nudgeMaxRetry) {
const targetTime = currentTime + nudgeRetry * config.nudgeOffset;
const targetTime = currentTime + (nudgeRetry + 1) * config.nudgeOffset;
// playback stalled in buffered area ... let's nudge currentTime to try to overcome this
logger.warn(`Nudging 'currentTime' from ${currentTime} to ${targetTime}`);
media.currentTime = targetTime;

View file

@ -5,9 +5,12 @@ import {
removeCuesInRange,
} from '../utils/texttrack-utils';
import * as ID3 from '../demux/id3';
import { DateRange, DateRangeAttribute } from '../loader/date-range';
import { MetadataSchema } from '../types/demuxer';
import type {
BufferFlushingData,
FragParsingMetadataData,
LevelUpdatedData,
MediaAttachedData,
} from '../types/events';
import type { ComponentAPI } from '../types/component-api';
@ -19,12 +22,38 @@ declare global {
}
}
type Cue = VTTCue | TextTrackCue;
const MIN_CUE_DURATION = 0.25;
function getCueClass() {
// Attempt to recreate Safari functionality by creating
// WebKitDataCue objects when available and store the decoded
// ID3 data in the value property of the cue
return (self.WebKitDataCue || self.VTTCue || self.TextTrackCue) as any;
}
function dateRangeDateToTimelineSeconds(date: Date, offset: number): number {
return date.getTime() / 1000 - offset;
}
function hexToArrayBuffer(str): ArrayBuffer {
return Uint8Array.from(
str
.replace(/^0x/, '')
.replace(/([\da-fA-F]{2}) ?/g, '0x$1 ')
.replace(/ +$/, '')
.split(' ')
).buffer;
}
class ID3TrackController implements ComponentAPI {
private hls: Hls;
private id3Track: TextTrack | null = null;
private media: HTMLMediaElement | null = null;
private dateRangeCuesAppended: Record<
string,
{ cues: Record<string, Cue>; dateRange: DateRange; durationKnown: boolean }
> = {};
constructor(hls) {
this.hls = hls;
@ -33,22 +62,31 @@ class ID3TrackController implements ComponentAPI {
destroy() {
this._unregisterListeners();
this.id3Track = null;
this.media = null;
this.dateRangeCuesAppended = {};
// @ts-ignore
this.hls = null;
}
private _registerListeners() {
const { hls } = this;
hls.on(Events.MEDIA_ATTACHED, this.onMediaAttached, this);
hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);
hls.on(Events.FRAG_PARSING_METADATA, this.onFragParsingMetadata, this);
hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
hls.on(Events.LEVEL_UPDATED, this.onLevelUpdated, this);
}
private _unregisterListeners() {
const { hls } = this;
hls.off(Events.MEDIA_ATTACHED, this.onMediaAttached, this);
hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);
hls.off(Events.FRAG_PARSING_METADATA, this.onFragParsingMetadata, this);
hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
hls.off(Events.LEVEL_UPDATED, this.onLevelUpdated, this);
}
// Add ID3 metatadata text track.
@ -66,6 +104,17 @@ class ID3TrackController implements ComponentAPI {
clearCurrentCues(this.id3Track);
this.id3Track = null;
this.media = null;
this.dateRangeCuesAppended = {};
}
private onManifestLoading() {
this.dateRangeCuesAppended = {};
}
createTrack(media: HTMLMediaElement): TextTrack {
const track = this.getID3Track(media.textTracks) as TextTrack;
track.mode = 'hidden';
return track;
}
getID3Track(textTracks: TextTrackList): TextTrack | void {
@ -92,26 +141,42 @@ class ID3TrackController implements ComponentAPI {
if (!this.media) {
return;
}
const fragment = data.frag;
const samples = data.samples;
const {
hls: {
config: { enableEmsgMetadataCues, enableID3MetadataCues },
},
} = this;
if (!enableEmsgMetadataCues && !enableID3MetadataCues) {
return;
}
const { frag: fragment, samples, details } = data;
// create track dynamically
if (!this.id3Track) {
this.id3Track = this.getID3Track(this.media.textTracks) as TextTrack;
this.id3Track.mode = 'hidden';
this.id3Track = this.createTrack(this.media);
}
// Attempt to recreate Safari functionality by creating
// WebKitDataCue objects when available and store the decoded
// ID3 data in the value property of the cue
const Cue = (self.WebKitDataCue || self.VTTCue || self.TextTrackCue) as any;
// VTTCue end time must be finite, so use playlist edge or fragment end until next fragment with same frame type is found
const maxCueTime = details.edge || fragment.end;
const Cue = getCueClass();
let updateCueRanges = false;
const frameTypesAdded: Record<string, number | null> = {};
for (let i = 0; i < samples.length; i++) {
const type = samples[i].type;
if (
(type === MetadataSchema.emsg && !enableEmsgMetadataCues) ||
!enableID3MetadataCues
) {
continue;
}
const frames = ID3.getID3Frames(samples[i].data);
if (frames) {
const startTime = samples[i].pts;
let endTime: number =
i < samples.length - 1 ? samples[i + 1].pts : fragment.end;
let endTime: number = maxCueTime;
const timeDiff = endTime - startTime;
if (timeDiff <= 0) {
@ -124,23 +189,186 @@ class ID3TrackController implements ComponentAPI {
if (!ID3.isTimeStampFrame(frame)) {
const cue = new Cue(startTime, endTime, '');
cue.value = frame;
if (type) {
cue.type = type;
}
this.id3Track.addCue(cue);
frameTypesAdded[frame.key] = null;
updateCueRanges = true;
}
}
}
}
if (updateCueRanges) {
this.updateId3CueEnds(frameTypesAdded);
}
}
updateId3CueEnds(frameTypesAdded: Record<string, number | null>) {
// Update endTime of previous cue with same IDR frame.type (Ex: TXXX cue spans to next TXXX)
const cues = this.id3Track?.cues;
if (cues) {
for (let i = cues.length; i--; ) {
const cue = cues[i] as any;
const frameType = cue.value?.key;
if (frameType && frameType in frameTypesAdded) {
const startTime = frameTypesAdded[frameType];
if (startTime && cue.endTime !== startTime) {
cue.endTime = startTime;
}
frameTypesAdded[frameType] = cue.startTime;
}
}
}
}
onBufferFlushing(
event: Events.BUFFER_FLUSHING,
{ startOffset, endOffset, type }: BufferFlushingData
) {
if (!type || type === 'audio') {
// id3 cues come from parsed audio only remove cues when audio buffer is cleared
const { id3Track } = this;
if (id3Track) {
removeCuesInRange(id3Track, startOffset, endOffset);
const { id3Track, hls } = this;
if (!hls) {
return;
}
const {
config: { enableEmsgMetadataCues, enableID3MetadataCues },
} = hls;
if (id3Track && (enableEmsgMetadataCues || enableID3MetadataCues)) {
let predicate;
if (type === 'audio') {
predicate = (cue) =>
(cue as any).type === MetadataSchema.audioId3 &&
enableID3MetadataCues;
} else if (type === 'video') {
predicate = (cue) =>
(cue as any).type === MetadataSchema.emsg && enableEmsgMetadataCues;
} else {
predicate = (cue) =>
((cue as any).type === MetadataSchema.audioId3 &&
enableID3MetadataCues) ||
((cue as any).type === MetadataSchema.emsg && enableEmsgMetadataCues);
}
removeCuesInRange(id3Track, startOffset, endOffset, predicate);
}
}
onLevelUpdated(event: Events.LEVEL_UPDATED, { details }: LevelUpdatedData) {
if (
!this.media ||
!details.hasProgramDateTime ||
!this.hls.config.enableDateRangeMetadataCues
) {
return;
}
const { dateRangeCuesAppended, id3Track } = this;
const { dateRanges } = details;
const ids = Object.keys(dateRanges);
// Remove cues from track not found in details.dateRanges
if (id3Track) {
const idsToRemove = Object.keys(dateRangeCuesAppended).filter(
(id) => !ids.includes(id)
);
for (let i = idsToRemove.length; i--; ) {
const id = idsToRemove[i];
Object.keys(dateRangeCuesAppended[id].cues).forEach((key) => {
id3Track.removeCue(dateRangeCuesAppended[id].cues[key]);
});
delete dateRangeCuesAppended[id];
}
}
// Exit if the playlist does not have Date Ranges or does not have Program Date Time
const lastFragment = details.fragments[details.fragments.length - 1];
if (ids.length === 0 || !Number.isFinite(lastFragment?.programDateTime)) {
return;
}
if (!this.id3Track) {
this.id3Track = this.createTrack(this.media);
}
const dateTimeOffset =
(lastFragment.programDateTime as number) / 1000 - lastFragment.start;
const maxCueTime = details.edge || lastFragment.end;
const Cue = getCueClass();
for (let i = 0; i < ids.length; i++) {
const id = ids[i];
const dateRange = dateRanges[id];
const appendedDateRangeCues = dateRangeCuesAppended[id];
const cues = appendedDateRangeCues?.cues || {};
let durationKnown = appendedDateRangeCues?.durationKnown || false;
const startTime = dateRangeDateToTimelineSeconds(
dateRange.startDate,
dateTimeOffset
);
let endTime = maxCueTime;
const endDate = dateRange.endDate;
if (endDate) {
endTime = dateRangeDateToTimelineSeconds(endDate, dateTimeOffset);
durationKnown = true;
} else if (dateRange.endOnNext && !durationKnown) {
const nextDateRangeWithSameClass = ids
.reduce((filterMapArray, id) => {
const candidate = dateRanges[id];
if (
candidate.class === dateRange.class &&
candidate.id !== id &&
candidate.startDate > dateRange.startDate
) {
filterMapArray.push(candidate);
}
return filterMapArray;
}, [] as DateRange[])
.sort((a, b) => a.startDate.getTime() - b.startDate.getTime())[0];
if (nextDateRangeWithSameClass) {
endTime = dateRangeDateToTimelineSeconds(
nextDateRangeWithSameClass.startDate,
dateTimeOffset
);
durationKnown = true;
}
}
const attributes = Object.keys(dateRange.attr);
for (let j = 0; j < attributes.length; j++) {
const key = attributes[j];
if (
key === DateRangeAttribute.ID ||
key === DateRangeAttribute.CLASS ||
key === DateRangeAttribute.START_DATE ||
key === DateRangeAttribute.DURATION ||
key === DateRangeAttribute.END_DATE ||
key === DateRangeAttribute.END_ON_NEXT
) {
continue;
}
let cue = cues[key] as any;
if (cue) {
if (durationKnown && !appendedDateRangeCues.durationKnown) {
cue.endTime = endTime;
}
} else {
let data = dateRange.attr[key];
cue = new Cue(startTime, endTime, '');
if (
key === DateRangeAttribute.SCTE35_OUT ||
key === DateRangeAttribute.SCTE35_IN
) {
data = hexToArrayBuffer(data);
}
cue.value = { key, data };
cue.type = MetadataSchema.dateRange;
this.id3Track.addCue(cue);
cues[key] = cue;
}
}
dateRangeCuesAppended[id] = {
cues,
dateRange,
durationKnown,
};
}
}
}

View file

@ -114,9 +114,11 @@ export default class LatencyController implements ComponentAPI {
return 0;
}
const bufferedRanges = media.buffered.length;
return bufferedRanges
? media.buffered.end(bufferedRanges - 1)
: levelDetails.edge - this.currentTime;
return (
(bufferedRanges
? media.buffered.end(bufferedRanges - 1)
: levelDetails.edge) - this.currentTime
);
}
public destroy(): void {

View file

@ -9,6 +9,7 @@ import { LevelDetails } from '../loader/level-details';
import type { Level } from '../types/level';
import type { LoaderStats } from '../types/loader';
import type { MediaPlaylist } from '../types/media-playlist';
import { DateRange } from '../loader/date-range';
type FragmentIntersection = (oldFrag: Fragment, newFrag: Fragment) => void;
type PartIntersection = (oldPart: Part, newPart: Part) => void;
@ -252,6 +253,12 @@ export function mergeDetails(
}
newDetails.startSN = newDetails.fragments[0].sn as number;
newDetails.startCC = newDetails.fragments[0].cc;
} else if (newDetails.canSkipDateRanges) {
newDetails.dateRanges = mergeDateRanges(
oldDetails.dateRanges,
newDetails.dateRanges,
newDetails.recentlyRemovedDateranges
);
}
}
@ -315,6 +322,32 @@ export function mergeDetails(
}
}
function mergeDateRanges(
oldDateRanges: Record<string, DateRange>,
deltaDateRanges: Record<string, DateRange>,
recentlyRemovedDateranges: string[] | undefined
): Record<string, DateRange> {
const dateRanges = Object.assign({}, oldDateRanges);
if (recentlyRemovedDateranges) {
recentlyRemovedDateranges.forEach((id) => {
delete dateRanges[id];
});
}
Object.keys(deltaDateRanges).forEach((id) => {
const dateRange = new DateRange(deltaDateRanges[id].attr, dateRanges[id]);
if (dateRange.isValid) {
dateRanges[id] = dateRange;
} else {
logger.warn(
`Ignoring invalid Playlist Delta Update DATERANGE tag: "${JSON.stringify(
deltaDateRanges[id].attr
)}"`
);
}
});
return dateRanges;
}
export function mapPartIntersection(
oldParts: Part[] | null,
newParts: Part[] | null,

View file

@ -54,6 +54,7 @@ export default class StreamController
private fragLastKbps: number = 0;
private stalled: boolean = false;
private couldBacktrack: boolean = false;
private backtrackFragment: Fragment | null = null;
private audioCodecSwitch: boolean = false;
private videoBuffer: any | null = null;
@ -244,10 +245,7 @@ export default class StreamController
return;
}
const bufferInfo = this.getFwdBufferInfo(
this.mediaBuffer ? this.mediaBuffer : media,
PlaylistLevelType.MAIN
);
const bufferInfo = this.getMainFwdBufferInfo();
if (bufferInfo === null) {
return;
}
@ -272,20 +270,33 @@ export default class StreamController
return;
}
const targetBufferTime = bufferInfo.end;
if (
this.backtrackFragment &&
this.backtrackFragment.start > bufferInfo.end
) {
this.backtrackFragment = null;
}
const targetBufferTime = this.backtrackFragment
? this.backtrackFragment.start
: bufferInfo.end;
let frag = this.getNextFragment(targetBufferTime, levelDetails);
// Avoid backtracking after seeking or switching by loading an earlier segment in streams that could backtrack
// Avoid backtracking by loading an earlier segment in streams with segments that do not start with a key frame (flagged by `couldBacktrack`)
if (
this.couldBacktrack &&
!this.fragPrevious &&
frag &&
frag.sn !== 'initSegment'
frag.sn !== 'initSegment' &&
this.fragmentTracker.getState(frag) !== FragmentState.OK
) {
const fragIdx = frag.sn - levelDetails.startSN;
if (fragIdx > 1) {
frag = levelDetails.fragments[fragIdx - 1];
this.fragmentTracker.removeFragment(frag);
const backtrackSn = (this.backtrackFragment ?? frag).sn as number;
const fragIdx = backtrackSn - levelDetails.startSN;
const backtrackFrag = levelDetails.fragments[fragIdx - 1];
if (backtrackFrag && frag.cc === backtrackFrag.cc) {
frag = backtrackFrag;
this.fragmentTracker.removeFragment(backtrackFrag);
}
} else if (this.backtrackFragment && bufferInfo.len) {
this.backtrackFragment = null;
}
// Avoid loop loading by using nextLoadPosition set for backtracking
if (
@ -323,19 +334,8 @@ export default class StreamController
targetBufferTime: number
) {
// Check if fragment is not loaded
let fragState = this.fragmentTracker.getState(frag);
const fragState = this.fragmentTracker.getState(frag);
this.fragCurrent = frag;
// Use data from loaded backtracked fragment if available
if (fragState === FragmentState.BACKTRACKED) {
const data = this.fragmentTracker.getBacktrackData(frag);
if (data) {
this._handleFragmentLoadProgress(data);
this._handleFragmentLoadComplete(data);
return;
} else {
fragState = FragmentState.NOT_LOADED;
}
}
if (
fragState === FragmentState.NOT_LOADED ||
fragState === FragmentState.PARTIAL
@ -467,11 +467,18 @@ export default class StreamController
private abortCurrentFrag() {
const fragCurrent = this.fragCurrent;
this.fragCurrent = null;
this.backtrackFragment = null;
if (fragCurrent?.loader) {
fragCurrent.loader.abort();
}
if (this.state === State.KEY_LOADING) {
this.state = State.IDLE;
switch (this.state) {
case State.KEY_LOADING:
case State.FRAG_LOADING:
case State.FRAG_LOADING_WAITING_RETRY:
case State.PARSING:
case State.PARSED:
this.state = State.IDLE;
break;
}
this.nextLoadPosition = this.getLoadPosition();
}
@ -542,6 +549,7 @@ export default class StreamController
this.couldBacktrack = this.stalled = false;
this.startPosition = this.lastCurrentTime = 0;
this.fragPlaying = null;
this.backtrackFragment = null;
}
private onManifestParsed(
@ -612,6 +620,7 @@ export default class StreamController
) {
if (fragCurrent.level !== data.level && fragCurrent.loader) {
this.state = State.IDLE;
this.backtrackFragment = null;
fragCurrent.loader.abort();
}
}
@ -916,7 +925,8 @@ export default class StreamController
this.seekToStartPos();
} else {
// Resolve gaps using the main buffer, whose ranges are the intersections of the A/V sourcebuffers
gapController.poll(this.lastCurrentTime);
const activeFrag = this.state !== State.IDLE ? this.fragCurrent : null;
gapController.poll(this.lastCurrentTime, activeFrag);
}
this.lastCurrentTime = media.currentTime;
@ -1048,6 +1058,7 @@ export default class StreamController
}
const { frag, part, level } = context;
const { video, text, id3, initSegment } = remuxResult;
const { details } = level;
// The audio-stream-controller handles audio buffering if Hls.js is playing an alternate audio track
const audio = this.altAudio ? undefined : remuxResult.audio;
@ -1080,7 +1091,7 @@ export default class StreamController
// Avoid buffering if backtracking this fragment
if (video && remuxResult.independent !== false) {
if (level.details) {
if (details) {
const { startPTS, endPTS, startDTS, endDTS } = video;
if (part) {
part.elementaryStreams[video.type] = {
@ -1095,8 +1106,15 @@ export default class StreamController
}
if (video.dropped && video.independent) {
// Backtrack if dropped frames create a gap after currentTime
const pos = this.getLoadPosition() + this.config.maxBufferHole;
if (pos < startPTS) {
const bufferInfo = this.getMainFwdBufferInfo();
const targetBufferTime =
(bufferInfo ? bufferInfo.end : this.getLoadPosition()) +
this.config.maxBufferHole;
const startTime = video.firstKeyFramePTS
? video.firstKeyFramePTS
: startPTS;
if (targetBufferTime < startTime - this.config.maxBufferHole) {
this.backtrack(frag);
return;
}
@ -1118,6 +1136,9 @@ export default class StreamController
startDTS,
endDTS
);
if (this.backtrackFragment) {
this.backtrackFragment = frag;
}
this.bufferFragmentData(video, frag, part, chunkMeta);
}
} else if (remuxResult.independent === false) {
@ -1145,18 +1166,20 @@ export default class StreamController
this.bufferFragmentData(audio, frag, part, chunkMeta);
}
if (id3?.samples?.length) {
if (details && id3?.samples?.length) {
const emittedID3: FragParsingMetadataData = {
frag,
id,
frag,
details,
samples: id3.samples,
};
hls.trigger(Events.FRAG_PARSING_METADATA, emittedID3);
}
if (text) {
if (details && text) {
const emittedText: FragParsingUserdataData = {
frag,
id,
frag,
details,
samples: text.samples,
};
hls.trigger(Events.FRAG_PARSING_USERDATA, emittedText);
@ -1260,20 +1283,23 @@ export default class StreamController
this.tick();
}
private getMainFwdBufferInfo() {
return this.getFwdBufferInfo(
this.mediaBuffer ? this.mediaBuffer : this.media,
PlaylistLevelType.MAIN
);
}
private backtrack(frag: Fragment) {
this.couldBacktrack = true;
// Causes findFragments to backtrack through fragments to find the keyframe
this.backtrackFragment = frag;
this.resetTransmuxer();
this.flushBufferGap(frag);
const data = this.fragmentTracker.backtrack(frag);
this.fragmentTracker.removeFragment(frag);
this.fragPrevious = null;
this.nextLoadPosition = frag.start;
if (data) {
this.resetFragmentLoading(frag);
} else {
// Change state to BACKTRACKING so that fragmentEntity.backtrack data can be added after _doFragLoad
this.state = State.BACKTRACKING;
}
this.state = State.IDLE;
}
private checkFragmentChanged() {
@ -1299,6 +1325,7 @@ export default class StreamController
fragPlayingCurrent = this.getAppendedFrag(currentTime + 0.1);
}
if (fragPlayingCurrent) {
this.backtrackFragment = null;
const fragPlaying = this.fragPlaying;
const fragCurrentLevel = fragPlayingCurrent.level;
if (
@ -1319,35 +1346,54 @@ export default class StreamController
}
}
get nextLevel() {
get nextLevel(): number {
const frag = this.nextBufferedFrag;
if (frag) {
return frag.level;
} else {
return -1;
}
return -1;
}
get currentLevel() {
get currentFrag(): Fragment | null {
const media = this.media;
if (media) {
const fragPlayingCurrent = this.getAppendedFrag(media.currentTime);
if (fragPlayingCurrent) {
return fragPlayingCurrent.level;
return this.fragPlaying || this.getAppendedFrag(media.currentTime);
}
return null;
}
get currentProgramDateTime(): Date | null {
const media = this.media;
if (media) {
const currentTime = media.currentTime;
const frag = this.currentFrag;
if (
frag &&
Number.isFinite(currentTime) &&
Number.isFinite(frag.programDateTime)
) {
const epocMs =
(frag.programDateTime as number) + (currentTime - frag.start) * 1000;
return new Date(epocMs);
}
}
return null;
}
get currentLevel(): number {
const frag = this.currentFrag;
if (frag) {
return frag.level;
}
return -1;
}
get nextBufferedFrag() {
const media = this.media;
if (media) {
// first get end range of current fragment
const fragPlayingCurrent = this.getAppendedFrag(media.currentTime);
return this.followingBufferedFrag(fragPlayingCurrent);
} else {
return null;
const frag = this.currentFrag;
if (frag) {
return this.followingBufferedFrag(frag);
}
return null;
}
get forceStartLoad() {

View file

@ -1,5 +1,4 @@
import { Events } from '../events';
import { logger } from '../utils/logger';
import { BufferHelper } from '../utils/buffer-helper';
import { findFragmentByPTS } from './fragment-finders';
import { alignMediaPlaylistByPDT } from '../utils/discontinuities';
@ -379,7 +378,7 @@ export class SubtitleStreamController
foundFrag = findFragmentByPTS(
fragPrevious,
fragments,
targetBufferTime,
Math.max(fragments[0].start, targetBufferTime),
maxFragLookUpTolerance
);
if (

View file

@ -26,8 +26,7 @@ class SubtitleTrackController extends BasePlaylistController {
private asyncPollTrackChange: () => void = () => this.pollTrackChange(0);
private useTextTrackPolling: boolean = false;
private subtitlePollingInterval: number = -1;
public subtitleDisplay: boolean = true; // Enable/disable subtitle display rendering
private _subtitleDisplay: boolean = true;
constructor(hls: Hls) {
super(hls, '[subtitle-track-controller]');
@ -42,6 +41,17 @@ class SubtitleTrackController extends BasePlaylistController {
super.destroy();
}
public get subtitleDisplay(): boolean {
return this._subtitleDisplay;
}
public set subtitleDisplay(value: boolean) {
this._subtitleDisplay = value;
if (this.trackId > -1) {
this.toggleTrackModes(this.trackId);
}
}
private registerListeners() {
const { hls } = this;
hls.on(Events.MEDIA_ATTACHED, this.onMediaAttached, this);
@ -295,7 +305,7 @@ class SubtitleTrackController extends BasePlaylistController {
* A value of -1 will disable all subtitle tracks.
*/
private toggleTrackModes(newId: number): void {
const { media, subtitleDisplay, trackId } = this;
const { media, trackId } = this;
if (!media) {
return;
}
@ -317,7 +327,7 @@ class SubtitleTrackController extends BasePlaylistController {
const nextTrack = groupTracks[newId];
if (nextTrack) {
nextTrack.mode = subtitleDisplay ? 'showing' : 'hidden';
nextTrack.mode = this.subtitleDisplay ? 'showing' : 'hidden';
}
}

View file

@ -587,12 +587,15 @@ export class TimelineController implements ComponentAPI {
// before parsing is done then don't try to access currentTrack.cues.getCueById as cues will be null
// and trying to access getCueById method of cues will throw an exception
// Because we check if the mode is disabled, we can force check `cues` below. They can't be null.
if (textTrack.mode === 'disabled') {
if (!textTrack || textTrack.mode === 'disabled') {
return;
}
cues.forEach((cue) => addCueToTrack(textTrack, cue));
} else {
const currentTrack = this.tracks[fragLevel];
if (!currentTrack) {
return;
}
const track = currentTrack.default ? 'default' : 'subtitles' + fragLevel;
hls.trigger(Events.CUES_PARSED, { type: 'subtitles', cues, track });
}
@ -670,23 +673,25 @@ export class TimelineController implements ComponentAPI {
}
private extractCea608Data(byteArray: Uint8Array): number[][] {
const count = byteArray[0] & 31;
let position = 2;
const actualCCBytes: number[][] = [[], []];
const count = byteArray[0] & 0x1f;
let position = 2;
for (let j = 0; j < count; j++) {
const tmpByte = byteArray[position++];
const ccbyte1 = 0x7f & byteArray[position++];
const ccbyte2 = 0x7f & byteArray[position++];
const ccValid = (4 & tmpByte) !== 0;
const ccType = 3 & tmpByte;
if (ccbyte1 === 0 && ccbyte2 === 0) {
continue;
}
const ccValid = (0x04 & tmpByte) !== 0; // Support all four channels
if (ccValid) {
if (ccType === 0 || ccType === 1) {
const ccType = 0x03 & tmpByte;
if (
0x00 /* CEA608 field1*/ === ccType ||
0x01 /* CEA608 field2*/ === ccType
) {
// Exclude CEA708 CC data.
actualCCBytes[ccType].push(ccbyte1);
actualCCBytes[ccType].push(ccbyte2);
}

View file

@ -19,18 +19,23 @@ class AACDemuxer extends BaseAudioDemuxer {
this.config = config;
}
resetInitSegment(audioCodec, videoCodec, duration) {
super.resetInitSegment(audioCodec, videoCodec, duration);
resetInitSegment(
initSegment: Uint8Array | undefined,
audioCodec: string | undefined,
videoCodec: string | undefined,
trackDuration: number
) {
super.resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration);
this._audioTrack = {
container: 'audio/adts',
type: 'audio',
id: 2,
pid: -1,
sequenceNumber: 0,
isAAC: true,
segmentCodec: 'aac',
samples: [],
manifestCodec: audioCodec,
duration: duration,
duration: trackDuration,
inputTimeScale: 90000,
dropped: 0,
};

View file

@ -23,7 +23,6 @@ type AudioConfig = {
type FrameHeader = {
headerLength: number;
frameLength: number;
stamp: number;
};
export function getAudioConfig(
@ -255,21 +254,17 @@ export function getFrameDuration(samplerate: number): number {
export function parseFrameHeader(
data: Uint8Array,
offset: number,
pts: number,
frameIndex: number,
frameDuration: number
offset: number
): FrameHeader | void {
// The protection skip bit tells us if we have 2 bytes of CRC data at the end of the ADTS header
const headerLength = getHeaderLength(data, offset);
// retrieve frame size
let frameLength = getFullFrameLength(data, offset);
frameLength -= headerLength;
if (frameLength > 0) {
const stamp = pts + frameIndex * frameDuration;
// logger.log(`AAC frame, offset/length/total/pts:${offset+headerLength}/${frameLength}/${data.byteLength}/${(stamp/90).toFixed(0)}`);
return { headerLength, frameLength, stamp };
if (offset + headerLength <= data.length) {
// retrieve frame size
const frameLength = getFullFrameLength(data, offset) - headerLength;
if (frameLength > 0) {
// logger.log(`AAC frame, offset/length/total/pts:${offset+headerLength}/${frameLength}/${data.byteLength}`);
return { headerLength, frameLength };
}
}
}
@ -279,15 +274,16 @@ export function appendFrame(
offset: number,
pts: number,
frameIndex: number
): AudioFrame | void {
): AudioFrame {
const frameDuration = getFrameDuration(track.samplerate as number);
const header = parseFrameHeader(data, offset, pts, frameIndex, frameDuration);
const stamp = pts + frameIndex * frameDuration;
const header = parseFrameHeader(data, offset);
let unit: Uint8Array;
if (header) {
const { frameLength, headerLength, stamp } = header;
const { frameLength, headerLength } = header;
const length = headerLength + frameLength;
const missing = Math.max(0, offset + length - data.length);
// logger.log(`AAC frame ${frameIndex}, pts:${stamp} length@offset/total: ${frameLength}@${offset+headerLength}/${data.byteLength} missing: ${missing}`);
let unit: Uint8Array;
if (missing) {
unit = new Uint8Array(length - headerLength);
unit.set(data.subarray(offset + headerLength, data.length), 0);
@ -305,4 +301,13 @@ export function appendFrame(
return { sample, length, missing };
}
// overflow incomplete header
const length = data.length - offset;
unit = new Uint8Array(length);
unit.set(data.subarray(offset, data.length), 0);
const sample: AudioSample = {
unit,
pts: stamp,
};
return { sample, length, missing: -1 };
}

View file

@ -1,13 +1,14 @@
import * as ID3 from '../demux/id3';
import type {
import {
DemuxerResult,
Demuxer,
DemuxedAudioTrack,
AudioFrame,
DemuxedMetadataTrack,
DemuxedAvcTrack,
DemuxedVideoTrack,
DemuxedUserdataTrack,
KeyData,
MetadataSchema,
} from '../types/demuxer';
import { dummyTrack } from './dummy-demuxed-track';
import { appendUint8Array } from '../utils/mp4-tools';
@ -20,7 +21,12 @@ class BaseAudioDemuxer implements Demuxer {
protected cachedData: Uint8Array | null = null;
protected initPTS: number | null = null;
resetInitSegment(audioCodec: string, videoCodec: string, duration: number) {
resetInitSegment(
initSegment: Uint8Array | undefined,
audioCodec: string | undefined,
videoCodec: string | undefined,
trackDuration: number
) {
this._id3Track = {
type: 'id3',
id: 3,
@ -72,6 +78,7 @@ class BaseAudioDemuxer implements Demuxer {
pts: this.initPTS,
dts: this.initPTS,
data: id3Data,
type: MetadataSchema.audioId3,
});
}
@ -91,7 +98,12 @@ class BaseAudioDemuxer implements Demuxer {
} else if (ID3.canParse(data, offset)) {
// after a ID3.canParse, a call to ID3.getID3Data *should* always returns some data
id3Data = ID3.getID3Data(data, offset)!;
id3Track.samples.push({ pts: pts, dts: pts, data: id3Data });
id3Track.samples.push({
pts: pts,
dts: pts,
data: id3Data,
type: MetadataSchema.audioId3,
});
offset += id3Data.length;
lastDataIndex = offset;
} else {
@ -109,7 +121,7 @@ class BaseAudioDemuxer implements Demuxer {
return {
audioTrack: track,
avcTrack: dummyTrack() as DemuxedAvcTrack,
videoTrack: dummyTrack() as DemuxedVideoTrack,
id3Track,
textTrack: dummyTrack() as DemuxedUserdataTrack,
};
@ -137,7 +149,7 @@ class BaseAudioDemuxer implements Demuxer {
return {
audioTrack: this._audioTrack,
avcTrack: dummyTrack() as DemuxedAvcTrack,
videoTrack: dummyTrack() as DemuxedVideoTrack,
id3Track: this._id3Track,
textTrack: dummyTrack() as DemuxedUserdataTrack,
};

View file

@ -1,11 +1,11 @@
import type { DemuxedTrack } from '../types/demuxer';
export function dummyTrack(): DemuxedTrack {
export function dummyTrack(type = '', inputTimeScale = 90000): DemuxedTrack {
return {
type: '',
type,
id: -1,
pid: -1,
inputTimeScale: 90000,
inputTimeScale,
sequenceNumber: -1,
samples: [],
dropped: 0,

View file

@ -9,18 +9,23 @@ import * as MpegAudio from './mpegaudio';
class MP3Demuxer extends BaseAudioDemuxer {
static readonly minProbeByteLength: number = 4;
resetInitSegment(audioCodec, videoCodec, duration) {
super.resetInitSegment(audioCodec, videoCodec, duration);
resetInitSegment(
initSegment: Uint8Array | undefined,
audioCodec: string | undefined,
videoCodec: string | undefined,
trackDuration: number
) {
super.resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration);
this._audioTrack = {
container: 'audio/mpeg',
type: 'audio',
id: 2,
pid: -1,
sequenceNumber: 0,
isAAC: false,
segmentCodec: 'mp3',
samples: [],
manifestCodec: audioCodec,
duration: duration,
duration: trackDuration,
inputTimeScale: 90000,
dropped: 0,
};

View file

@ -4,82 +4,171 @@
import {
Demuxer,
DemuxerResult,
PassthroughVideoTrack,
PassthroughTrack,
DemuxedAudioTrack,
DemuxedUserdataTrack,
DemuxedMetadataTrack,
KeyData,
MetadataSchema,
} from '../types/demuxer';
import {
findBox,
segmentValidRange,
appendUint8Array,
parseEmsg,
parseSamples,
parseInitSegment,
RemuxerTrackIdConfig,
} from '../utils/mp4-tools';
import { dummyTrack } from './dummy-demuxed-track';
import type { HlsEventEmitter } from '../events';
import type { HlsConfig } from '../config';
const emsgSchemePattern = /\/emsg[-/]ID3/i;
class MP4Demuxer implements Demuxer {
static readonly minProbeByteLength = 1024;
private remainderData: Uint8Array | null = null;
private timeOffset: number = 0;
private config: HlsConfig;
private videoTrack?: PassthroughTrack;
private audioTrack?: DemuxedAudioTrack;
private id3Track?: DemuxedMetadataTrack;
private txtTrack?: DemuxedUserdataTrack;
constructor(observer: HlsEventEmitter, config: HlsConfig) {
this.config = config;
}
resetTimeStamp() {}
public resetTimeStamp() {}
resetInitSegment() {}
public resetInitSegment(
initSegment: Uint8Array,
audioCodec: string | undefined,
videoCodec: string | undefined,
trackDuration: number
) {
const initData = parseInitSegment(initSegment);
const videoTrack = (this.videoTrack = dummyTrack(
'video',
1
) as PassthroughTrack);
const audioTrack = (this.audioTrack = dummyTrack(
'audio',
1
) as DemuxedAudioTrack);
const captionTrack = (this.txtTrack = dummyTrack(
'text',
1
) as DemuxedUserdataTrack);
resetContiguity(): void {}
this.id3Track = dummyTrack('id3', 1) as DemuxedMetadataTrack;
this.timeOffset = 0;
static probe(data) {
// ensure we find a moof box in the first 16 kB
return (
findBox({ data: data, start: 0, end: Math.min(data.length, 16384) }, [
'moof',
]).length > 0
);
if (initData.video) {
const { id, timescale, codec } = initData.video;
videoTrack.id = id;
videoTrack.timescale = captionTrack.timescale = timescale;
videoTrack.codec = codec;
}
if (initData.audio) {
const { id, timescale, codec } = initData.audio;
audioTrack.id = id;
audioTrack.timescale = timescale;
audioTrack.codec = codec;
}
captionTrack.id = RemuxerTrackIdConfig.text;
videoTrack.sampleDuration = 0;
videoTrack.duration = audioTrack.duration = trackDuration;
}
demux(data): DemuxerResult {
public resetContiguity(): void {}
static probe(data: Uint8Array) {
// ensure we find a moof box in the first 16 kB
data = data.length > 16384 ? data.subarray(0, 16384) : data;
return findBox(data, ['moof']).length > 0;
}
public demux(data: Uint8Array, timeOffset: number): DemuxerResult {
this.timeOffset = timeOffset;
// Load all data into the avc track. The CMAF remuxer will look for the data in the samples object; the rest of the fields do not matter
let avcSamples = data;
const avcTrack = dummyTrack() as PassthroughVideoTrack;
let videoSamples = data;
const videoTrack = this.videoTrack as PassthroughTrack;
const textTrack = this.txtTrack as DemuxedUserdataTrack;
if (this.config.progressive) {
// Split the bytestream into two ranges: one encompassing all data up until the start of the last moof, and everything else.
// This is done to guarantee that we're sending valid data to MSE - when demuxing progressively, we have no guarantee
// that the fetch loader gives us flush moof+mdat pairs. If we push jagged data to MSE, it will throw an exception.
if (this.remainderData) {
avcSamples = appendUint8Array(this.remainderData, data);
videoSamples = appendUint8Array(this.remainderData, data);
}
const segmentedData = segmentValidRange(avcSamples);
const segmentedData = segmentValidRange(videoSamples);
this.remainderData = segmentedData.remainder;
avcTrack.samples = segmentedData.valid || new Uint8Array();
videoTrack.samples = segmentedData.valid || new Uint8Array();
} else {
avcTrack.samples = avcSamples;
videoTrack.samples = videoSamples;
}
const id3Track = this.extractID3Track(videoTrack, timeOffset);
textTrack.samples = parseSamples(timeOffset, videoTrack);
return {
videoTrack,
audioTrack: this.audioTrack as DemuxedAudioTrack,
id3Track,
textTrack: this.txtTrack as DemuxedUserdataTrack,
};
}
public flush() {
const timeOffset = this.timeOffset;
const videoTrack = this.videoTrack as PassthroughTrack;
const textTrack = this.txtTrack as DemuxedUserdataTrack;
videoTrack.samples = this.remainderData || new Uint8Array();
this.remainderData = null;
const id3Track = this.extractID3Track(videoTrack, this.timeOffset);
textTrack.samples = parseSamples(timeOffset, videoTrack);
return {
videoTrack,
audioTrack: dummyTrack() as DemuxedAudioTrack,
avcTrack,
id3Track: dummyTrack() as DemuxedMetadataTrack,
id3Track,
textTrack: dummyTrack() as DemuxedUserdataTrack,
};
}
flush() {
const avcTrack = dummyTrack() as PassthroughVideoTrack;
avcTrack.samples = this.remainderData || new Uint8Array();
this.remainderData = null;
return {
audioTrack: dummyTrack() as DemuxedAudioTrack,
avcTrack,
id3Track: dummyTrack() as DemuxedMetadataTrack,
textTrack: dummyTrack() as DemuxedUserdataTrack,
};
private extractID3Track(
videoTrack: PassthroughTrack,
timeOffset: number
): DemuxedMetadataTrack {
const id3Track = this.id3Track as DemuxedMetadataTrack;
if (videoTrack.samples.length) {
const emsgs = findBox(videoTrack.samples, ['emsg']);
if (emsgs) {
emsgs.forEach((data: Uint8Array) => {
const emsgInfo = parseEmsg(data);
if (emsgSchemePattern.test(emsgInfo.schemeIdUri)) {
const pts = Number.isFinite(emsgInfo.presentationTime)
? emsgInfo.presentationTime! / emsgInfo.timeScale
: timeOffset +
emsgInfo.presentationTimeDelta! / emsgInfo.timeScale;
const payload = emsgInfo.payload;
id3Track.samples.push({
data: payload,
len: payload.byteLength,
dts: pts,
pts: pts,
type: MetadataSchema.emsg,
});
}
});
}
}
return id3Track;
}
demuxSampleAes(

View file

@ -45,6 +45,11 @@ class SampleAesDecrypter {
sync: boolean
) {
const curUnit = samples[sampleIndex].unit;
if (curUnit.length <= 16) {
// No encrypted portion in this sample (first 16 bytes is not
// encrypted, see https://developer.apple.com/library/archive/documentation/AudioVideo/Conceptual/HLS_Sample_Encryption/Encryption/Encryption.html),
return;
}
const encryptedData = curUnit.subarray(
16,
curUnit.length - (curUnit.length % 16)

View file

@ -125,6 +125,7 @@ export default class TransmuxerInterface {
w.removeEventListener('message', this.onwmsg);
w.terminate();
this.worker = null;
this.onwmsg = undefined;
} else {
const transmuxer = this.transmuxer;
if (transmuxer) {
@ -136,8 +137,11 @@ export default class TransmuxerInterface {
if (observer) {
observer.removeAllListeners();
}
this.frag = null;
// @ts-ignore
this.observer = null;
// @ts-ignore
this.hls = null;
}
push(
@ -161,7 +165,7 @@ export default class TransmuxerInterface {
const discontinuity = !(lastFrag && frag.cc === lastFrag.cc);
const trackSwitch = !(lastFrag && chunkMeta.level === lastFrag.level);
const snDiff = lastFrag ? chunkMeta.sn - (lastFrag.sn as number) : -1;
const partDiff = this.part ? chunkMeta.part - this.part.index : 1;
const partDiff = this.part ? chunkMeta.part - this.part.index : -1;
const contiguous =
!trackSwitch && (snDiff === 1 || (snDiff === 0 && partDiff === 1));
const now = self.performance.now();

View file

@ -74,9 +74,12 @@ export default function TransmuxerWorker(self) {
});
}
function emitTransmuxComplete(self: any, transmuxResult: TransmuxerResult) {
function emitTransmuxComplete(
self: any,
transmuxResult: TransmuxerResult
): boolean {
if (isEmptyResult(transmuxResult.remuxResult)) {
return;
return false;
}
const transferable: Array<ArrayBuffer> = [];
const { audio, video } = transmuxResult.remuxResult;
@ -90,6 +93,7 @@ function emitTransmuxComplete(self: any, transmuxResult: TransmuxerResult) {
{ event: 'transmuxComplete', data: transmuxResult },
transferable
);
return true;
}
// Converts data to a transferable object https://developers.google.com/web/updates/2011/12/Transferable-Objects-Lightning-Fast)
@ -111,9 +115,14 @@ function handleFlushResult(
results: Array<TransmuxerResult>,
chunkMeta: ChunkMetadata
) {
results.forEach((result) => {
emitTransmuxComplete(self, result);
});
const parsed = results.reduce(
(parsed, result) => emitTransmuxComplete(self, result) || parsed,
false
);
if (!parsed) {
// Emit at least one "transmuxComplete" message even if media is not found to update stream-controller state to PARSING
self.postMessage({ event: 'transmuxComplete', data: results[0] });
}
self.postMessage({ event: 'flush', data: chunkMeta });
}

View file

@ -11,7 +11,7 @@ import PassThroughRemuxer from '../remux/passthrough-remuxer';
import ChunkCache from './chunk-cache';
import { appendUint8Array } from '../utils/mp4-tools';
import { logger } from '../utils/logger';
import type { Demuxer, KeyData } from '../types/demuxer';
import type { Demuxer, DemuxerResult, KeyData } from '../types/demuxer';
import type { Remuxer } from '../types/remuxer';
import type { TransmuxerResult, ChunkMetadata } from '../types/transmuxer';
import type { HlsConfig } from '../config';
@ -202,7 +202,7 @@ export default class Transmuxer {
});
}
const transmuxResults: Array<TransmuxerResult> = [];
const transmuxResults: TransmuxerResult[] = [];
const { timeOffset } = currentTransmuxState;
if (decrypter) {
// The decrypter may have data cached, which needs to be demuxed. In this case we'll have two TransmuxResults
@ -247,8 +247,12 @@ export default class Transmuxer {
return transmuxResults;
}
private flushRemux(transmuxResults, demuxResult, chunkMeta) {
const { audioTrack, avcTrack, id3Track, textTrack } = demuxResult;
private flushRemux(
transmuxResults: TransmuxerResult[],
demuxResult: DemuxerResult,
chunkMeta: ChunkMetadata
) {
const { audioTrack, videoTrack, id3Track, textTrack } = demuxResult;
const { accurateTimeOffset, timeOffset } = this.currentTransmuxState;
logger.log(
`[transmuxer.ts]: Flushed fragment ${chunkMeta.sn}${
@ -257,7 +261,7 @@ export default class Transmuxer {
);
const remuxResult = this.remuxer!.remux(
audioTrack,
avcTrack,
videoTrack,
id3Track,
textTrack,
timeOffset,
@ -295,13 +299,18 @@ export default class Transmuxer {
initSegmentData: Uint8Array | undefined,
audioCodec: string | undefined,
videoCodec: string | undefined,
duration: number
trackDuration: number
) {
const { demuxer, remuxer } = this;
if (!demuxer || !remuxer) {
return;
}
demuxer.resetInitSegment(audioCodec, videoCodec, duration);
demuxer.resetInitSegment(
initSegmentData,
audioCodec,
videoCodec,
trackDuration
);
remuxer.resetInitSegment(initSegmentData, audioCodec, videoCodec);
}
@ -349,12 +358,12 @@ export default class Transmuxer {
accurateTimeOffset: boolean,
chunkMeta: ChunkMetadata
): TransmuxerResult {
const { audioTrack, avcTrack, id3Track, textTrack } = (
const { audioTrack, videoTrack, id3Track, textTrack } = (
this.demuxer as Demuxer
).demux(data, timeOffset, false, !this.config.progressive);
const remuxResult = this.remuxer!.remux(
audioTrack,
avcTrack,
videoTrack,
id3Track,
textTrack,
timeOffset,
@ -380,7 +389,7 @@ export default class Transmuxer {
.then((demuxResult) => {
const remuxResult = this.remuxer!.remux(
demuxResult.audioTrack,
demuxResult.avcTrack,
demuxResult.videoTrack,
demuxResult.id3Track,
demuxResult.textTrack,
timeOffset,

View file

@ -12,15 +12,18 @@
import * as ADTS from './adts';
import * as MpegAudio from './mpegaudio';
import ExpGolomb from './exp-golomb';
import { utf8ArrayToStr } from './id3';
import SampleAesDecrypter from './sample-aes';
import { Events } from '../events';
import { appendUint8Array } from '../utils/mp4-tools';
import {
appendUint8Array,
parseSEIMessageFromNALu,
RemuxerTrackIdConfig,
} from '../utils/mp4-tools';
import { logger } from '../utils/logger';
import { ErrorTypes, ErrorDetails } from '../errors';
import type { HlsConfig } from '../config';
import type { HlsEventEmitter } from '../events';
import type {
import {
DemuxedAvcTrack,
DemuxedAudioTrack,
DemuxedTrack,
@ -31,24 +34,10 @@ import type {
DemuxedUserdataTrack,
ElementaryStreamData,
KeyData,
MetadataSchema,
} from '../types/demuxer';
import { AudioFrame } from '../types/demuxer';
// We are using fixed track IDs for driving the MP4 remuxer
// instead of following the TS PIDs.
// There is no reason not to do this and some browsers/SourceBuffer-demuxers
// may not like if there are TrackID "switches"
// See https://github.com/video-dev/hls.js/issues/1331
// Here we are mapping our internal track types to constant MP4 track IDs
// With MSE currently one can only have one track of each, and we are muxing
// whatever video/audio rendition in them.
const RemuxerTrackIdConfig = {
video: 1,
audio: 2,
id3: 3,
text: 4,
};
type ParsedTimestamp = {
pts?: number;
dts?: number;
@ -76,18 +65,15 @@ class TSDemuxer implements Demuxer {
private sampleAes: SampleAesDecrypter | null = null;
private pmtParsed: boolean = false;
private audioCodec!: string;
private videoCodec!: string;
private audioCodec?: string;
private videoCodec?: string;
private _duration: number = 0;
private aacLastPTS: number | null = null;
private _initPTS: number | null = null;
private _initDTS?: number | null = null;
private _pmtId: number = -1;
private _avcTrack!: DemuxedAvcTrack;
private _audioTrack!: DemuxedAudioTrack;
private _id3Track!: DemuxedMetadataTrack;
private _txtTrack!: DemuxedUserdataTrack;
private _avcTrack?: DemuxedAvcTrack;
private _audioTrack?: DemuxedAudioTrack;
private _id3Track?: DemuxedMetadataTrack;
private _txtTrack?: DemuxedUserdataTrack;
private aacOverFlow: AudioFrame | null = null;
private avcSample: ParsedAvcSample | null = null;
private remainderData: Uint8Array | null = null;
@ -145,7 +131,7 @@ class TSDemuxer implements Demuxer {
*/
static createTrack(
type: 'audio' | 'video' | 'id3' | 'text',
duration: number
duration?: number
): DemuxedTrack {
return {
container:
@ -166,38 +152,29 @@ class TSDemuxer implements Demuxer {
* Resets all internal track instances of the demuxer.
*/
public resetInitSegment(
initSegment: Uint8Array | undefined,
audioCodec: string,
videoCodec: string,
duration: number
trackDuration: number
) {
this.pmtParsed = false;
this._pmtId = -1;
this._avcTrack = TSDemuxer.createTrack(
'video',
duration
) as DemuxedAvcTrack;
this._avcTrack = TSDemuxer.createTrack('video') as DemuxedAvcTrack;
this._audioTrack = TSDemuxer.createTrack(
'audio',
duration
trackDuration
) as DemuxedAudioTrack;
this._id3Track = TSDemuxer.createTrack(
'id3',
duration
) as DemuxedMetadataTrack;
this._txtTrack = TSDemuxer.createTrack(
'text',
duration
) as DemuxedUserdataTrack;
this._audioTrack.isAAC = true;
this._id3Track = TSDemuxer.createTrack('id3') as DemuxedMetadataTrack;
this._txtTrack = TSDemuxer.createTrack('text') as DemuxedUserdataTrack;
this._audioTrack.segmentCodec = 'aac';
// flush any partial content
this.aacOverFlow = null;
this.aacLastPTS = null;
this.avcSample = null;
this.audioCodec = audioCodec;
this.videoCodec = videoCodec;
this._duration = duration;
this._duration = trackDuration;
}
public resetTimeStamp() {}
@ -214,7 +191,6 @@ class TSDemuxer implements Demuxer {
_id3Track.pesData = null;
}
this.aacOverFlow = null;
this.aacLastPTS = null;
}
public demux(
@ -229,12 +205,13 @@ class TSDemuxer implements Demuxer {
let pes: PES | null;
const avcTrack = this._avcTrack;
const audioTrack = this._audioTrack;
const id3Track = this._id3Track;
const videoTrack = this._avcTrack as DemuxedAvcTrack;
const audioTrack = this._audioTrack as DemuxedAudioTrack;
const id3Track = this._id3Track as DemuxedMetadataTrack;
const textTrack = this._txtTrack as DemuxedUserdataTrack;
let avcId = avcTrack.pid;
let avcData = avcTrack.pesData;
let avcId = videoTrack.pid;
let avcData = videoTrack.pesData;
let audioId = audioTrack.pid;
let id3Id = id3Track.pid;
let audioData = audioTrack.pesData;
@ -254,9 +231,9 @@ class TSDemuxer implements Demuxer {
this.remainderData = data;
return {
audioTrack,
avcTrack,
videoTrack,
id3Track,
textTrack: this._txtTrack,
textTrack,
};
}
@ -295,7 +272,7 @@ class TSDemuxer implements Demuxer {
case avcId:
if (stt) {
if (avcData && (pes = parsePES(avcData))) {
this.parseAVCPES(pes, false);
this.parseAVCPES(videoTrack, textTrack, pes, false);
}
avcData = { data: [], size: 0 };
@ -308,10 +285,13 @@ class TSDemuxer implements Demuxer {
case audioId:
if (stt) {
if (audioData && (pes = parsePES(audioData))) {
if (audioTrack.isAAC) {
this.parseAACPES(pes);
} else {
this.parseMPEGPES(pes);
switch (audioTrack.segmentCodec) {
case 'aac':
this.parseAACPES(audioTrack, pes);
break;
case 'mp3':
this.parseMPEGPES(audioTrack, pes);
break;
}
}
audioData = { data: [], size: 0 };
@ -324,7 +304,7 @@ class TSDemuxer implements Demuxer {
case id3Id:
if (stt) {
if (id3Data && (pes = parsePES(id3Data))) {
this.parseID3PES(pes);
this.parseID3PES(id3Track, pes);
}
id3Data = { data: [], size: 0 };
@ -349,8 +329,7 @@ class TSDemuxer implements Demuxer {
const parsedPIDs = parsePMT(
data,
offset,
this.typeSupported.mpeg === true ||
this.typeSupported.mp3 === true,
this.typeSupported,
isSampleAes
);
@ -362,13 +341,13 @@ class TSDemuxer implements Demuxer {
// but we are not using this for MP4 track IDs.
avcId = parsedPIDs.avc;
if (avcId > 0) {
avcTrack.pid = avcId;
videoTrack.pid = avcId;
}
audioId = parsedPIDs.audio;
if (audioId > 0) {
audioTrack.pid = audioId;
audioTrack.isAAC = parsedPIDs.isAAC;
audioTrack.segmentCodec = parsedPIDs.segmentCodec;
}
id3Id = parsedPIDs.id3;
if (id3Id > 0) {
@ -405,15 +384,15 @@ class TSDemuxer implements Demuxer {
});
}
avcTrack.pesData = avcData;
videoTrack.pesData = avcData;
audioTrack.pesData = audioData;
id3Track.pesData = id3Data;
const demuxResult: DemuxerResult = {
audioTrack,
avcTrack,
videoTrack,
id3Track,
textTrack: this._txtTrack,
textTrack,
};
if (flush) {
@ -431,10 +410,10 @@ class TSDemuxer implements Demuxer {
result = this.demux(remainderData, -1, false, true);
} else {
result = {
audioTrack: this._audioTrack,
avcTrack: this._avcTrack,
textTrack: this._txtTrack,
id3Track: this._id3Track,
videoTrack: this._avcTrack as DemuxedAvcTrack,
audioTrack: this._audioTrack as DemuxedAudioTrack,
id3Track: this._id3Track as DemuxedMetadataTrack,
textTrack: this._txtTrack as DemuxedUserdataTrack,
};
}
this.extractRemainingSamples(result);
@ -445,27 +424,34 @@ class TSDemuxer implements Demuxer {
}
private extractRemainingSamples(demuxResult: DemuxerResult) {
const { audioTrack, avcTrack, id3Track } = demuxResult;
const avcData = avcTrack.pesData;
const { audioTrack, videoTrack, id3Track, textTrack } = demuxResult;
const avcData = videoTrack.pesData;
const audioData = audioTrack.pesData;
const id3Data = id3Track.pesData;
// try to parse last PES packets
let pes: PES | null;
if (avcData && (pes = parsePES(avcData))) {
this.parseAVCPES(pes, true);
avcTrack.pesData = null;
this.parseAVCPES(
videoTrack as DemuxedAvcTrack,
textTrack as DemuxedUserdataTrack,
pes,
true
);
videoTrack.pesData = null;
} else {
// either avcData null or PES truncated, keep it for next frag parsing
avcTrack.pesData = avcData;
videoTrack.pesData = avcData;
}
if (audioData && (pes = parsePES(audioData))) {
if (audioTrack.isAAC) {
this.parseAACPES(pes);
} else {
this.parseMPEGPES(pes);
switch (audioTrack.segmentCodec) {
case 'aac':
this.parseAACPES(audioTrack, pes);
break;
case 'mp3':
this.parseMPEGPES(audioTrack, pes);
break;
}
audioTrack.pesData = null;
} else {
if (audioData?.size) {
@ -479,7 +465,7 @@ class TSDemuxer implements Demuxer {
}
if (id3Data && (pes = parsePES(id3Data))) {
this.parseID3PES(pes);
this.parseID3PES(id3Track, pes);
id3Track.pesData = null;
} else {
// either id3Data null or PES truncated, keep it for next frag parsing
@ -511,19 +497,19 @@ class TSDemuxer implements Demuxer {
sampleAes: SampleAesDecrypter
): Promise<DemuxerResult> {
return new Promise((resolve) => {
const { audioTrack, avcTrack } = demuxResult;
if (audioTrack.samples && audioTrack.isAAC) {
const { audioTrack, videoTrack } = demuxResult;
if (audioTrack.samples && audioTrack.segmentCodec === 'aac') {
sampleAes.decryptAacSamples(audioTrack.samples, 0, () => {
if (avcTrack.samples) {
sampleAes.decryptAvcSamples(avcTrack.samples, 0, 0, () => {
if (videoTrack.samples) {
sampleAes.decryptAvcSamples(videoTrack.samples, 0, 0, () => {
resolve(demuxResult);
});
} else {
resolve(demuxResult);
}
});
} else if (avcTrack.samples) {
sampleAes.decryptAvcSamples(avcTrack.samples, 0, 0, () => {
} else if (videoTrack.samples) {
sampleAes.decryptAvcSamples(videoTrack.samples, 0, 0, () => {
resolve(demuxResult);
});
}
@ -531,13 +517,16 @@ class TSDemuxer implements Demuxer {
}
public destroy() {
this._initPTS = this._initDTS = null;
this._duration = 0;
}
private parseAVCPES(pes: PES, last: boolean) {
const track = this._avcTrack;
const units = this.parseAVCNALu(pes.data);
private parseAVCPES(
track: DemuxedAvcTrack,
textTrack: DemuxedUserdataTrack,
pes: PES,
last: boolean
) {
const units = this.parseAVCNALu(track, pes.data);
const debug = false;
let avcSample = this.avcSample;
let push: boolean;
@ -618,106 +607,11 @@ class TSDemuxer implements Demuxer {
if (debug && avcSample) {
avcSample.debug += 'SEI ';
}
const expGolombDecoder = new ExpGolomb(discardEPB(unit.data));
// skip frameType
expGolombDecoder.readUByte();
let payloadType = 0;
let payloadSize = 0;
let endOfCaptions = false;
let b = 0;
while (!endOfCaptions && expGolombDecoder.bytesAvailable > 1) {
payloadType = 0;
do {
b = expGolombDecoder.readUByte();
payloadType += b;
} while (b === 0xff);
// Parse payload size.
payloadSize = 0;
do {
b = expGolombDecoder.readUByte();
payloadSize += b;
} while (b === 0xff);
// TODO: there can be more than one payload in an SEI packet...
// TODO: need to read type and size in a while loop to get them all
if (payloadType === 4 && expGolombDecoder.bytesAvailable !== 0) {
endOfCaptions = true;
const countryCode = expGolombDecoder.readUByte();
if (countryCode === 181) {
const providerCode = expGolombDecoder.readUShort();
if (providerCode === 49) {
const userStructure = expGolombDecoder.readUInt();
if (userStructure === 0x47413934) {
const userDataType = expGolombDecoder.readUByte();
// Raw CEA-608 bytes wrapped in CEA-708 packet
if (userDataType === 3) {
const firstByte = expGolombDecoder.readUByte();
const secondByte = expGolombDecoder.readUByte();
const totalCCs = 31 & firstByte;
const byteArray = [firstByte, secondByte];
for (let i = 0; i < totalCCs; i++) {
// 3 bytes per CC
byteArray.push(expGolombDecoder.readUByte());
byteArray.push(expGolombDecoder.readUByte());
byteArray.push(expGolombDecoder.readUByte());
}
insertSampleInOrder(this._txtTrack.samples, {
type: 3,
pts: pes.pts,
bytes: byteArray,
});
}
}
}
}
} else if (
payloadType === 5 &&
expGolombDecoder.bytesAvailable !== 0
) {
endOfCaptions = true;
if (payloadSize > 16) {
const uuidStrArray: Array<string> = [];
for (let i = 0; i < 16; i++) {
uuidStrArray.push(expGolombDecoder.readUByte().toString(16));
if (i === 3 || i === 5 || i === 7 || i === 9) {
uuidStrArray.push('-');
}
}
const length = payloadSize - 16;
const userDataPayloadBytes = new Uint8Array(length);
for (let i = 0; i < length; i++) {
userDataPayloadBytes[i] = expGolombDecoder.readUByte();
}
insertSampleInOrder(this._txtTrack.samples, {
pts: pes.pts,
payloadType: payloadType,
uuid: uuidStrArray.join(''),
userData: utf8ArrayToStr(userDataPayloadBytes),
userDataBytes: userDataPayloadBytes,
});
}
} else if (payloadSize < expGolombDecoder.bytesAvailable) {
for (let i = 0; i < payloadSize; i++) {
expGolombDecoder.readUByte();
}
}
}
parseSEIMessageFromNALu(
discardEPB(unit.data),
pes.pts as number,
textTrack.samples
);
break;
// SPS
}
@ -780,7 +674,7 @@ class TSDemuxer implements Demuxer {
break;
// Filler Data
case 12:
push = false;
push = true;
break;
default:
push = false;
@ -802,12 +696,11 @@ class TSDemuxer implements Demuxer {
}
}
private getLastNalUnit() {
private getLastNalUnit(samples: AvcSample[]) {
let avcSample = this.avcSample;
let lastUnit;
// try to fallback to previous sample if current one is empty
if (!avcSample || avcSample.units.length === 0) {
const samples = this._avcTrack.samples;
avcSample = samples[samples.length - 1];
}
if (avcSample?.units) {
@ -817,13 +710,15 @@ class TSDemuxer implements Demuxer {
return lastUnit;
}
private parseAVCNALu(array: Uint8Array): Array<{
private parseAVCNALu(
track: DemuxedAvcTrack,
array: Uint8Array
): Array<{
data: Uint8Array;
type: number;
state?: number;
}> {
const len = array.byteLength;
const track = this._avcTrack;
let state = track.naluState || 0;
const lastState = state;
const units = [] as Array<{
@ -875,7 +770,7 @@ class TSDemuxer implements Demuxer {
// first check if start code delimiter is overlapping between 2 PES packets,
// ie it started in last packet (lastState not zero)
// and ended at the beginning of this PES packet (i <= 4 - lastState)
const lastUnit = this.getLastNalUnit();
const lastUnit = this.getLastNalUnit(track.samples);
if (lastUnit) {
if (lastState && i <= 4 - lastState) {
// start delimiter overlapping between PES packets
@ -928,7 +823,7 @@ class TSDemuxer implements Demuxer {
// no NALu found
if (units.length === 0) {
// append pes.data to previous NAL unit
const lastUnit = this.getLastNalUnit();
const lastUnit = this.getLastNalUnit(track.samples);
if (lastUnit) {
const tmp = new Uint8Array(lastUnit.data.byteLength + array.byteLength);
tmp.set(lastUnit.data, 0);
@ -940,24 +835,29 @@ class TSDemuxer implements Demuxer {
return units;
}
private parseAACPES(pes: PES) {
private parseAACPES(track: DemuxedAudioTrack, pes: PES) {
let startOffset = 0;
const track = this._audioTrack;
const aacOverFlow = this.aacOverFlow;
const data = pes.data;
let data = pes.data;
if (aacOverFlow) {
this.aacOverFlow = null;
const frameMissingBytes = aacOverFlow.missing;
const sampleLength = aacOverFlow.sample.unit.byteLength;
const frameMissingBytes = Math.min(aacOverFlow.missing, sampleLength);
const frameOverflowBytes = sampleLength - frameMissingBytes;
aacOverFlow.sample.unit.set(
data.subarray(0, frameMissingBytes),
frameOverflowBytes
);
track.samples.push(aacOverFlow.sample);
// logger.log(`AAC: append overflowing ${frameOverflowBytes} bytes to beginning of new PES`);
startOffset = aacOverFlow.missing;
// logger.log(`AAC: append overflowing ${sampleLength} bytes to beginning of new PES`);
if (frameMissingBytes === -1) {
const tmp = new Uint8Array(sampleLength + data.byteLength);
tmp.set(aacOverFlow.sample.unit, 0);
tmp.set(data, sampleLength);
data = tmp;
} else {
const frameOverflowBytes = sampleLength - frameMissingBytes;
aacOverFlow.sample.unit.set(
data.subarray(0, frameMissingBytes),
frameOverflowBytes
);
track.samples.push(aacOverFlow.sample);
startOffset = aacOverFlow.missing;
}
}
// look for ADTS header (0xFFFx)
let offset: number;
@ -990,7 +890,13 @@ class TSDemuxer implements Demuxer {
}
}
ADTS.initTrackConfig(track, this.observer, data, offset, this.audioCodec);
ADTS.initTrackConfig(
track,
this.observer,
data,
offset,
this.audioCodec as string
);
let pts: number;
if (pes.pts !== undefined) {
@ -1007,31 +913,25 @@ class TSDemuxer implements Demuxer {
// scan for aac samples
let frameIndex = 0;
let frame;
while (offset < len) {
if (ADTS.isHeader(data, offset)) {
if (offset + 5 < len) {
const frame = ADTS.appendFrame(track, data, offset, pts, frameIndex);
if (frame) {
if (frame.missing) {
this.aacOverFlow = frame;
} else {
offset += frame.length;
frameIndex++;
continue;
}
frame = ADTS.appendFrame(track, data, offset, pts, frameIndex);
offset += frame.length;
if (!frame.missing) {
frameIndex++;
for (; offset < len - 1; offset++) {
if (ADTS.isHeader(data, offset)) {
break;
}
}
// We are at an ADTS header, but do not have enough data for a frame
// Remaining data will be added to aacOverFlow
break;
} else {
// nothing found, keep looking
offset++;
this.aacOverFlow = frame;
break;
}
}
}
private parseMPEGPES(pes: PES) {
private parseMPEGPES(track: DemuxedAudioTrack, pes: PES) {
const data = pes.data;
const length = data.length;
let frameIndex = 0;
@ -1045,7 +945,7 @@ class TSDemuxer implements Demuxer {
while (offset < length) {
if (MpegAudio.isHeader(data, offset)) {
const frame = MpegAudio.appendFrame(
this._audioTrack,
track,
data,
offset,
pts,
@ -1065,12 +965,15 @@ class TSDemuxer implements Demuxer {
}
}
private parseID3PES(pes: PES) {
private parseID3PES(id3Track: DemuxedMetadataTrack, pes: PES) {
if (pes.pts === undefined) {
logger.warn('[tsdemuxer]: ID3 PES unknown PTS');
return;
}
this._id3Track.samples.push(pes as Required<PES>);
const id3Sample = Object.assign({}, pes as Required<PES>, {
type: this._avcTrack ? MetadataSchema.emsg : MetadataSchema.audioId3,
});
id3Track.samples.push(id3Sample);
}
}
@ -1097,8 +1000,8 @@ function parsePAT(data, offset) {
// logger.log('PMT PID:' + this._pmtId);
}
function parsePMT(data, offset, mpegSupported, isSampleAes) {
const result = { audio: -1, avc: -1, id3: -1, isAAC: true };
function parsePMT(data, offset, typeSupported, isSampleAes) {
const result = { audio: -1, avc: -1, id3: -1, segmentCodec: 'aac' };
const sectionLength = ((data[offset + 1] & 0x0f) << 8) | data[offset + 2];
const tableEnd = offset + 3 + sectionLength - 4;
// to determine where the table is, we have to figure out how
@ -1156,11 +1059,11 @@ function parsePMT(data, offset, mpegSupported, isSampleAes) {
case 0x03:
case 0x04:
// logger.log('MPEG PID:' + pid);
if (!mpegSupported) {
if (typeSupported.mpeg !== true && typeSupported.mp3 !== true) {
logger.log('MPEG audio found, not supported in this browser');
} else if (result.audio === -1) {
result.audio = pid;
result.isAAC = false;
result.segmentCodec = 'mp3';
}
break;
@ -1304,24 +1207,6 @@ function pushAccessUnit(avcSample: ParsedAvcSample, avcTrack: DemuxedAvcTrack) {
}
}
function insertSampleInOrder(arr, data) {
const len = arr.length;
if (len > 0) {
if (data.pts >= arr[len - 1].pts) {
arr.push(data);
} else {
for (let pos = len - 1; pos >= 0; pos--) {
if (data.pts < arr[pos].pts) {
arr.splice(pos, 0, data);
break;
}
}
}
} else {
arr.push(data);
}
}
/**
* remove Emulation Prevention bytes from a RBSP
*/

6
node_modules/hls.js/src/events.ts generated vendored
View file

@ -58,7 +58,7 @@ export enum Events {
MEDIA_ATTACHING = 'hlsMediaAttaching',
// Fired when MediaSource has been successfully attached to media element
MEDIA_ATTACHED = 'hlsMediaAttached',
// Fired before deatching MediaSource from media element
// Fired before detaching MediaSource from media element
MEDIA_DETACHING = 'hlsMediaDetaching',
// Fired when MediaSource has been detached from media element
MEDIA_DETACHED = 'hlsMediaDetached',
@ -150,9 +150,9 @@ export enum Events {
FRAG_BUFFERED = 'hlsFragBuffered',
// fired when fragment matching with current media position is changing - data : { id : demuxer id, frag : fragment object }
FRAG_CHANGED = 'hlsFragChanged',
// Identifier for a FPS drop event - data: { curentDropped, currentDecoded, totalDroppedFrames }
// Identifier for a FPS drop event - data: { currentDropped, currentDecoded, totalDroppedFrames }
FPS_DROP = 'hlsFpsDrop',
// triggered when FPS drop triggers auto level capping - data: { level, droppedlevel }
// triggered when FPS drop triggers auto level capping - data: { level, droppedLevel }
FPS_DROP_LEVEL_CAPPING = 'hlsFpsDropLevelCapping',
// Identifier for an error event - data: { type : error type, details : error details, fatal : if true, hls.js cannot/will not try to recover, if false, hls.js will try to recover,other error specific data }
ERROR = 'hlsError',

18
node_modules/hls.js/src/hls.ts generated vendored
View file

@ -622,7 +622,7 @@ export default class Hls implements HlsEventEmitter {
const len = levels.length;
for (let i = 0; i < len; i++) {
if (levels[i].maxBitrate > minAutoBitrate) {
if (levels[i].maxBitrate >= minAutoBitrate) {
return i;
}
}
@ -671,6 +671,14 @@ export default class Hls implements HlsEventEmitter {
this.abrController.nextAutoLevel = Math.max(this.minAutoLevel, nextLevel);
}
/**
* get the datetime value relative to media.currentTime for the active level Program Date Time if present
* @type {Date}
*/
public get playingDate(): Date | null {
return this.streamController.currentProgramDateTime;
}
/**
* @type {AudioTrack[]}
*/
@ -851,16 +859,22 @@ export type {
PlaylistLoaderConstructor,
StreamControllerConfig,
LatencyControllerConfig,
MetadataControllerConfig,
TimelineControllerConfig,
TSDemuxerConfig,
} from './config';
export type { CuesInterface } from './utils/cues';
export type { MediaKeyFunc, KeySystems } from './utils/mediakeys-helper';
export type { DateRange } from './loader/date-range';
export type { LoadStats } from './loader/load-stats';
export type { LevelKey } from './loader/level-key';
export type { LevelDetails } from './loader/level-details';
export type { SourceBufferName } from './types/buffer';
export type { MetadataSample, UserdataSample } from './types/demuxer';
export type {
MetadataSample,
MetadataSchema,
UserdataSample,
} from './types/demuxer';
export type {
LevelParsed,
LevelAttributes,

113
node_modules/hls.js/src/loader/date-range.ts generated vendored Normal file
View file

@ -0,0 +1,113 @@
import { AttrList } from '../utils/attr-list';
import { logger } from '../utils/logger';
export enum DateRangeAttribute {
ID = 'ID',
CLASS = 'CLASS',
START_DATE = 'START-DATE',
DURATION = 'DURATION',
END_DATE = 'END-DATE',
END_ON_NEXT = 'END-ON-NEXT',
PLANNED_DURATION = 'PLANNED-DURATION',
SCTE35_OUT = 'SCTE35-OUT',
SCTE35_IN = 'SCTE35-IN',
}
export class DateRange {
public attr: AttrList;
private _startDate: Date;
private _endDate?: Date;
private _badValueForSameId?: string;
constructor(dateRangeAttr: AttrList, dateRangeWithSameId?: DateRange) {
if (dateRangeWithSameId) {
const previousAttr = dateRangeWithSameId.attr;
for (const key in previousAttr) {
if (
Object.prototype.hasOwnProperty.call(dateRangeAttr, key) &&
dateRangeAttr[key] !== previousAttr[key]
) {
logger.warn(
`DATERANGE tag attribute: "${key}" does not match for tags with ID: "${dateRangeAttr.ID}"`
);
this._badValueForSameId = key;
break;
}
}
// Merge DateRange tags with the same ID
dateRangeAttr = Object.assign(
new AttrList({}),
previousAttr,
dateRangeAttr
);
}
this.attr = dateRangeAttr;
this._startDate = new Date(dateRangeAttr[DateRangeAttribute.START_DATE]);
if (DateRangeAttribute.END_DATE in this.attr) {
const endDate = new Date(this.attr[DateRangeAttribute.END_DATE]);
if (Number.isFinite(endDate.getTime())) {
this._endDate = endDate;
}
}
}
get id(): string {
return this.attr.ID;
}
get class(): string {
return this.attr.CLASS;
}
get startDate(): Date {
return this._startDate;
}
get endDate(): Date | null {
if (this._endDate) {
return this._endDate;
}
const duration = this.duration;
if (duration !== null) {
return new Date(this._startDate.getTime() + duration * 1000);
}
return null;
}
get duration(): number | null {
if (DateRangeAttribute.DURATION in this.attr) {
const duration = this.attr.decimalFloatingPoint(
DateRangeAttribute.DURATION
);
if (Number.isFinite(duration)) {
return duration;
}
} else if (this._endDate) {
return (this._endDate.getTime() - this._startDate.getTime()) / 1000;
}
return null;
}
get plannedDuration(): number | null {
if (DateRangeAttribute.PLANNED_DURATION in this.attr) {
return this.attr.decimalFloatingPoint(
DateRangeAttribute.PLANNED_DURATION
);
}
return null;
}
get endOnNext(): boolean {
return this.attr.bool(DateRangeAttribute.END_ON_NEXT);
}
get isValid(): boolean {
return (
!!this.id &&
!this._badValueForSameId &&
Number.isFinite(this.startDate.getTime()) &&
(this.duration === null || this.duration >= 0) &&
(!this.endOnNext || !!this.class)
);
}
}

View file

@ -75,7 +75,7 @@ export default class FragmentLoader {
maxRetry: 0,
retryDelay: 0,
maxRetryDelay: config.fragLoadingMaxRetryTimeout,
highWaterMark: MIN_CHUNK_SIZE,
highWaterMark: frag.sn === 'initSegment' ? Infinity : MIN_CHUNK_SIZE,
};
// Assign frag stats to the loader's stats reference
frag.stats = loader.stats;

View file

@ -97,7 +97,7 @@ export class Fragment extends BaseSegment {
public programDateTime: number | null = null;
public tagList: Array<string[]> = [];
// EXTINF has to be present for a m38 to be considered valid
// EXTINF has to be present for a m3u8 to be considered valid
public duration: number = 0;
// sn notates the sequence number for a segment, and if set to a string can be 'initSegment'
public sn: number | 'initSegment' = 0;

View file

@ -1,6 +1,7 @@
import { Part } from './fragment';
import type { Fragment } from './fragment';
import type { AttrList } from '../utils/attr-list';
import type { DateRange } from './date-range';
const DEFAULT_TARGET_DURATION = 10;
@ -13,6 +14,7 @@ export class LevelDetails {
public fragments: Fragment[];
public fragmentHint?: Fragment;
public partList: Part[] | null = null;
public dateRanges: Record<string, DateRange>;
public live: boolean = true;
public ageHeader: number = 0;
public advancedDateTime?: number;
@ -49,6 +51,7 @@ export class LevelDetails {
constructor(baseUrl) {
this.fragments = [];
this.dateRanges = {};
this.url = baseUrl;
}

View file

@ -1,5 +1,6 @@
import * as URLToolkit from 'url-toolkit';
import { DateRange } from './date-range';
import { Fragment, Part } from './fragment';
import { LevelDetails } from './level-details';
import { LevelKey } from './level-key';
@ -37,24 +38,11 @@ const LEVEL_PLAYLIST_REGEX_FAST = new RegExp(
const LEVEL_PLAYLIST_REGEX_SLOW = new RegExp(
[
/#(EXTM3U)/.source,
/#EXT-X-(PLAYLIST-TYPE):(.+)/.source,
/#EXT-X-(MEDIA-SEQUENCE): *(\d+)/.source,
/#EXT-X-(SKIP):(.+)/.source,
/#EXT-X-(TARGETDURATION): *(\d+)/.source,
/#EXT-X-(KEY):(.+)/.source,
/#EXT-X-(START):(.+)/.source,
/#EXT-X-(ENDLIST)/.source,
/#EXT-X-(DISCONTINUITY-SEQ)UENCE: *(\d+)/.source,
/#EXT-X-(DIS)CONTINUITY/.source,
/#EXT-X-(VERSION):(\d+)/.source,
/#EXT-X-(MAP):(.+)/.source,
/#EXT-X-(SERVER-CONTROL):(.+)/.source,
/#EXT-X-(PART-INF):(.+)/.source,
/#EXT-X-(GAP)/.source,
/#EXT-X-(BITRATE):\s*(\d+)/.source,
/#EXT-X-(PART):(.+)/.source,
/#EXT-X-(PRELOAD-HINT):(.+)/.source,
/#EXT-X-(RENDITION-REPORT):(.+)/.source,
/#EXT-X-(DATERANGE|KEY|MAP|PART|PART-INF|PLAYLIST-TYPE|PRELOAD-HINT|RENDITION-REPORT|SERVER-CONTROL|SKIP|START):(.+)/
.source,
/#EXT-X-(BITRATE|DISCONTINUITY-SEQUENCE|MEDIA-SEQUENCE|TARGETDURATION|VERSION): *(\d+)/
.source,
/#EXT-X-(DISCONTINUITY|ENDLIST|GAP)/.source,
/(#)([^:]*):(.*)/.source,
/(#)(.*)(?:.*)\r?\n?/.source,
].join('|')
@ -85,7 +73,7 @@ export default class M3U8Parser {
if (avcdata.length > 2) {
let result = avcdata.shift() + '.';
result += parseInt(avcdata.shift()).toString(16);
result += ('000' + parseInt(avcdata.shift()).toString(16)).substr(-4);
result += ('000' + parseInt(avcdata.shift()).toString(16)).slice(-4);
return result;
}
return codec;
@ -228,6 +216,7 @@ export default class M3U8Parser {
if (currentInitSegment) {
frag.initSegment = currentInitSegment;
frag.rawProgramDateTime = currentInitSegment.rawProgramDateTime;
currentInitSegment.rawProgramDateTime = null;
}
}
@ -337,16 +326,32 @@ export default class M3U8Parser {
frag.tagList.push(value2 ? [value1, value2] : [value1]);
}
break;
case 'DIS':
case 'DISCONTINUITY':
discontinuityCounter++;
/* falls through */
frag.tagList.push(['DIS']);
break;
case 'GAP':
frag.tagList.push([tag]);
break;
case 'BITRATE':
frag.tagList.push([tag, value1]);
break;
case 'DISCONTINUITY-SEQ':
case 'DATERANGE': {
const dateRangeAttr = new AttrList(value1);
const dateRange = new DateRange(
dateRangeAttr,
level.dateRanges[dateRangeAttr.ID]
);
if (dateRange.isValid || level.skippedSegments) {
level.dateRanges[dateRange.id] = dateRange;
} else {
logger.warn(`Ignoring invalid DATERANGE tag: "${value1}"`);
}
// Add to fragment tag list for backwards compatibility (< v1.2.0)
frag.tagList.push(['EXT-X-DATERANGE', value1]);
break;
}
case 'DISCONTINUITY-SEQUENCE':
discontinuityCounter = parseInt(value1);
break;
case 'KEY': {
@ -425,18 +430,26 @@ export default class M3U8Parser {
}
case 'MAP': {
const mapAttrs = new AttrList(value1);
frag.relurl = mapAttrs.URI;
if (mapAttrs.BYTERANGE) {
frag.setByteRange(mapAttrs.BYTERANGE);
if (frag.duration) {
// Initial segment tag is after segment duration tag.
// #EXTINF: 6.0
// #EXT-X-MAP:URI="init.mp4
const init = new Fragment(type, baseurl);
setInitSegment(init, mapAttrs, id, levelkey);
currentInitSegment = init;
frag.initSegment = currentInitSegment;
if (
currentInitSegment.rawProgramDateTime &&
!frag.rawProgramDateTime
) {
frag.rawProgramDateTime = currentInitSegment.rawProgramDateTime;
}
} else {
// Initial segment tag is before segment duration tag
setInitSegment(frag, mapAttrs, id, levelkey);
currentInitSegment = frag;
createNextFrag = true;
}
frag.level = id;
frag.sn = 'initSegment';
if (levelkey) {
frag.levelkey = levelkey;
}
frag.initSegment = null;
currentInitSegment = frag;
createNextFrag = true;
break;
}
case 'SERVER-CONTROL': {
@ -622,3 +635,21 @@ function assignProgramDateTime(frag, prevFrag) {
frag.rawProgramDateTime = null;
}
}
function setInitSegment(
frag: Fragment,
mapAttrs: AttrList,
id: number,
levelkey: LevelKey | undefined
) {
frag.relurl = mapAttrs.URI;
if (mapAttrs.BYTERANGE) {
frag.setByteRange(mapAttrs.BYTERANGE);
}
frag.level = id;
frag.sn = 'initSegment';
if (levelkey) {
frag.levelkey = levelkey;
}
frag.initSegment = null;
}

View file

@ -852,7 +852,7 @@ class MP4 {
static stsd(track) {
if (track.type === 'audio') {
if (!track.isAAC && track.codec === 'mp3') {
if (track.segmentCodec === 'mp3' && track.codec === 'mp3') {
return MP4.box(MP4.types.stsd, MP4.STSD, MP4.mp3(track));
}

View file

@ -44,6 +44,7 @@ export default class MP4Remuxer implements Remuxer {
private _initDTS!: number;
private nextAvcDts: number | null = null;
private nextAudioPts: number | null = null;
private videoSampleDuration: number | null = null;
private isAudioContiguous: boolean = false;
private isVideoContiguous: boolean = false;
@ -67,9 +68,10 @@ export default class MP4Remuxer implements Remuxer {
const result = navigator.userAgent.match(/Safari\/(\d+)/i);
safariWebkitVersion = result ? parseInt(result[1]) : 0;
}
requiresPositiveDts =
(!!chromeVersion && chromeVersion < 75) ||
(!!safariWebkitVersion && safariWebkitVersion < 600);
requiresPositiveDts = !(
(!!chromeVersion && chromeVersion >= 75) ||
(!!safariWebkitVersion && safariWebkitVersion >= 600)
);
}
destroy() {}
@ -138,7 +140,7 @@ export default class MP4Remuxer implements Remuxer {
const hasVideo = videoTrack.pid > -1;
const length = videoTrack.samples.length;
const enoughAudioSamples = audioTrack.samples.length > 0;
const enoughVideoSamples = length > 1;
const enoughVideoSamples = (flush && length > 0) || length > 1;
const canRemuxAvc =
((!hasAudio || enoughAudioSamples) &&
(!hasVideo || enoughVideoSamples)) ||
@ -152,6 +154,7 @@ export default class MP4Remuxer implements Remuxer {
const isVideoContiguous = this.isVideoContiguous;
let firstKeyFrameIndex = -1;
let firstKeyFramePTS;
if (enoughVideoSamples) {
firstKeyFrameIndex = findKeyframeIndex(videoTrack.samples);
@ -166,7 +169,8 @@ export default class MP4Remuxer implements Remuxer {
videoTrack.dropped += firstKeyFrameIndex;
videoTimeOffset +=
(videoTrack.samples[0].pts - startPTS) /
(videoTrack.timescale || 90000);
videoTrack.inputTimeScale;
firstKeyFramePTS = videoTimeOffset;
} else if (firstKeyFrameIndex === -1) {
logger.warn(
`[mp4-remuxer]: No keyframe found out of ${length} video samples`
@ -237,6 +241,7 @@ export default class MP4Remuxer implements Remuxer {
if (video) {
video.firstKeyFrame = firstKeyFrameIndex;
video.independent = firstKeyFrameIndex !== -1;
video.firstKeyFramePTS = firstKeyFramePTS;
}
}
}
@ -244,11 +249,20 @@ export default class MP4Remuxer implements Remuxer {
// Allow ID3 and text to remux, even if more audio/video samples are required
if (this.ISGenerated) {
if (id3Track.samples.length) {
id3 = this.remuxID3(id3Track, timeOffset);
id3 = flushTextTrackMetadataCueSamples(
id3Track,
timeOffset,
this._initPTS,
this._initDTS
);
}
if (textTrack.samples.length) {
text = this.remuxText(textTrack, timeOffset);
text = flushTextTrackUserdataCueSamples(
textTrack,
timeOffset,
this._initPTS
);
}
}
@ -287,22 +301,24 @@ export default class MP4Remuxer implements Remuxer {
// using audio sampling rate here helps having an integer MP4 frame duration
// this avoids potential rounding issue and AV sync issue
audioTrack.timescale = audioTrack.samplerate;
if (!audioTrack.isAAC) {
if (typeSupported.mpeg) {
// Chrome and Safari
container = 'audio/mpeg';
audioTrack.codec = '';
} else if (typeSupported.mp3) {
// Firefox
audioTrack.codec = 'mp3';
}
switch (audioTrack.segmentCodec) {
case 'mp3':
if (typeSupported.mpeg) {
// Chrome and Safari
container = 'audio/mpeg';
audioTrack.codec = '';
} else if (typeSupported.mp3) {
// Firefox
audioTrack.codec = 'mp3';
}
break;
}
tracks.audio = {
id: 'audio',
container: container,
codec: audioTrack.codec,
initSegment:
!audioTrack.isAAC && typeSupported.mpeg
audioTrack.segmentCodec === 'mp3' && typeSupported.mpeg
? new Uint8Array(0)
: MP4.initSegment([audioTrack]),
metadata: {
@ -371,7 +387,7 @@ export default class MP4Remuxer implements Remuxer {
const initPTS: number = this._initPTS;
let nextAvcDts = this.nextAvcDts;
let offset = 8;
let mp4SampleDuration!: number;
let mp4SampleDuration = this.videoSampleDuration;
let firstDTS;
let lastDTS;
let minPTS: number = Number.POSITIVE_INFINITY;
@ -423,9 +439,10 @@ export default class MP4Remuxer implements Remuxer {
// on Safari let's signal the same sample duration for all samples
// sample duration (as expected by trun MP4 boxes), should be the delta between sample DTS
// set this constant duration as being the avg delta between consecutive DTS.
const averageSampleDuration = Math.round(
(lastDTS - firstDTS) / (nbSamples - 1)
);
const inputDuration = lastDTS - firstDTS;
const averageSampleDuration = inputDuration
? Math.round(inputDuration / (nbSamples - 1))
: mp4SampleDuration || track.inputTimeScale / 30;
// handle broken streams with PTS < DTS, tolerance up 0.2 seconds
if (ptsDtsShift < 0) {
@ -549,6 +566,7 @@ export default class MP4Remuxer implements Remuxer {
view.setUint32(0, mdatSize);
mdat.set(MP4.types.mdat, 4);
let stretchedLastFrame = false;
for (let i = 0; i < nbSamples; i++) {
const avcSample = inputSamples[i];
const avcSampleUnits = avcSample.units;
@ -571,7 +589,9 @@ export default class MP4Remuxer implements Remuxer {
} else {
const config = this.config;
const lastFrameDuration =
avcSample.dts - inputSamples[i > 0 ? i - 1 : i].dts;
i > 0
? avcSample.dts - inputSamples[i - 1].dts
: averageSampleDuration;
if (config.stretchShortVideoTrack && this.nextAudioPts !== null) {
// In some cases, a segment's audio track duration may exceed the video track duration.
// Since we've already remuxed audio, and we know how long the audio track is, we look to
@ -589,6 +609,8 @@ export default class MP4Remuxer implements Remuxer {
mp4SampleDuration = deltaToFrameEnd - lastFrameDuration;
if (mp4SampleDuration < 0) {
mp4SampleDuration = lastFrameDuration;
} else {
stretchedLastFrame = true;
}
logger.log(
`[mp4-remuxer]: It is approximately ${
@ -625,11 +647,16 @@ export default class MP4Remuxer implements Remuxer {
}
console.assert(
mp4SampleDuration !== undefined,
mp4SampleDuration !== null,
'mp4SampleDuration must be computed'
);
// next AVC sample DTS should be equal to last sample DTS + last sample duration (in PES timescale)
mp4SampleDuration =
stretchedLastFrame || !mp4SampleDuration
? averageSampleDuration
: mp4SampleDuration;
this.nextAvcDts = nextAvcDts = lastDTS + mp4SampleDuration;
this.videoSampleDuration = mp4SampleDuration;
this.isVideoContiguous = true;
const moof = MP4.moof(
track.sequenceNumber++,
@ -673,13 +700,16 @@ export default class MP4Remuxer implements Remuxer {
? track.samplerate
: inputTimeScale;
const scaleFactor: number = inputTimeScale / mp4timeScale;
const mp4SampleDuration: number = track.isAAC
? AAC_SAMPLES_PER_FRAME
: MPEG_AUDIO_SAMPLE_PER_FRAME;
const mp4SampleDuration: number =
track.segmentCodec === 'aac'
? AAC_SAMPLES_PER_FRAME
: MPEG_AUDIO_SAMPLE_PER_FRAME;
const inputSampleDuration: number = mp4SampleDuration * scaleFactor;
const initPTS: number = this._initPTS;
const rawMPEG: boolean = !track.isAAC && this.typeSupported.mpeg;
const rawMPEG: boolean =
track.segmentCodec === 'mp3' && this.typeSupported.mpeg;
const outputSamples: Array<Mp4Sample> = [];
const alignedWithVideo = videoTimeOffset !== undefined;
let inputSamples: Array<AudioSample> = track.samples;
let offset: number = rawMPEG ? 0 : 8;
@ -727,7 +757,7 @@ export default class MP4Remuxer implements Remuxer {
if (videoTimeOffset === 0) {
// Set the start to 0 to match video so that start gaps larger than inputSampleDuration are filled with silence
nextAudioPts = 0;
} else if (accurateTimeOffset) {
} else if (accurateTimeOffset && !alignedWithVideo) {
// When not seeking, not live, and LevelDetails.PTSKnown, use fragment start as predicted next audio PTS
nextAudioPts = Math.max(0, timeOffsetMpegTS);
} else {
@ -742,8 +772,7 @@ export default class MP4Remuxer implements Remuxer {
// When possible, we inject a silent frame; when that's not possible, we duplicate the last
// frame.
if (track.isAAC) {
const alignedWithVideo = videoTimeOffset !== undefined;
if (track.segmentCodec === 'aac') {
const maxAudioFramesDrift = this.config.maxAudioFramesDrift;
for (let i = 0, nextPts = nextAudioPts; i < inputSamples.length; i++) {
// First, let's see how far off this frame is from where we expect it to be
@ -839,7 +868,7 @@ export default class MP4Remuxer implements Remuxer {
const prevSample = outputSamples[j - 1];
prevSample.duration = Math.round((pts - lastPTS) / scaleFactor);
} else {
if (contiguous && track.isAAC) {
if (contiguous && track.segmentCodec === 'aac') {
// set PTS/DTS to expected PTS/DTS
pts = nextAudioPts;
}
@ -971,62 +1000,6 @@ export default class MP4Remuxer implements Remuxer {
return this.remuxAudio(track, timeOffset, contiguous, false);
}
remuxID3(
track: DemuxedMetadataTrack,
timeOffset: number
): RemuxedMetadata | undefined {
const length = track.samples.length;
if (!length) {
return;
}
const inputTimeScale = track.inputTimeScale;
const initPTS = this._initPTS;
const initDTS = this._initDTS;
for (let index = 0; index < length; index++) {
const sample = track.samples[index];
// setting id3 pts, dts to relative time
// using this._initPTS and this._initDTS to calculate relative time
sample.pts =
normalizePts(sample.pts - initPTS, timeOffset * inputTimeScale) /
inputTimeScale;
sample.dts =
normalizePts(sample.dts - initDTS, timeOffset * inputTimeScale) /
inputTimeScale;
}
const samples = track.samples;
track.samples = [];
return {
samples,
};
}
remuxText(
track: DemuxedUserdataTrack,
timeOffset: number
): RemuxedUserdata | undefined {
const length = track.samples.length;
if (!length) {
return;
}
const inputTimeScale = track.inputTimeScale;
const initPTS = this._initPTS;
for (let index = 0; index < length; index++) {
const sample = track.samples[index];
// setting text pts, dts to relative time
// using this._initPTS and this._initDTS to calculate relative time
sample.pts =
normalizePts(sample.pts - initPTS, timeOffset * inputTimeScale) /
inputTimeScale;
}
track.samples.sort((a, b) => a.pts - b.pts);
const samples = track.samples;
track.samples = [];
return {
samples,
};
}
}
export function normalizePts(value: number, reference: number | null): number {
@ -1061,6 +1034,62 @@ function findKeyframeIndex(samples: Array<AvcSample>): number {
return -1;
}
export function flushTextTrackMetadataCueSamples(
track: DemuxedMetadataTrack,
timeOffset: number,
initPTS: number,
initDTS: number
): RemuxedMetadata | undefined {
const length = track.samples.length;
if (!length) {
return;
}
const inputTimeScale = track.inputTimeScale;
for (let index = 0; index < length; index++) {
const sample = track.samples[index];
// setting id3 pts, dts to relative time
// using this._initPTS and this._initDTS to calculate relative time
sample.pts =
normalizePts(sample.pts - initPTS, timeOffset * inputTimeScale) /
inputTimeScale;
sample.dts =
normalizePts(sample.dts - initDTS, timeOffset * inputTimeScale) /
inputTimeScale;
}
const samples = track.samples;
track.samples = [];
return {
samples,
};
}
export function flushTextTrackUserdataCueSamples(
track: DemuxedUserdataTrack,
timeOffset: number,
initPTS: number
): RemuxedUserdata | undefined {
const length = track.samples.length;
if (!length) {
return;
}
const inputTimeScale = track.inputTimeScale;
for (let index = 0; index < length; index++) {
const sample = track.samples[index];
// setting text pts, dts to relative time
// using this._initPTS and this._initDTS to calculate relative time
sample.pts =
normalizePts(sample.pts - initPTS, timeOffset * inputTimeScale) /
inputTimeScale;
}
track.samples.sort((a, b) => a.pts - b.pts);
const samples = track.samples;
track.samples = [];
return {
samples,
};
}
class Mp4Sample {
public size: number;
public duration: number;

View file

@ -1,3 +1,7 @@
import {
flushTextTrackMetadataCueSamples,
flushTextTrackUserdataCueSamples,
} from './mp4-remuxer';
import type { InitData, InitDataTrack } from '../utils/mp4-tools';
import {
getDuration,
@ -18,7 +22,7 @@ import type {
DemuxedAudioTrack,
DemuxedMetadataTrack,
DemuxedUserdataTrack,
PassthroughVideoTrack,
PassthroughTrack,
} from '../types/demuxer';
class PassThroughRemuxer implements Remuxer {
@ -30,18 +34,18 @@ class PassThroughRemuxer implements Remuxer {
private initTracks?: TrackSet;
private lastEndDTS: number | null = null;
destroy() {}
public destroy() {}
resetTimeStamp(defaultInitPTS) {
public resetTimeStamp(defaultInitPTS) {
this.initPTS = defaultInitPTS;
this.lastEndDTS = null;
}
resetNextTimestamp() {
public resetNextTimestamp() {
this.lastEndDTS = null;
}
resetInitSegment(
public resetInitSegment(
initSegment: Uint8Array | undefined,
audioCodec: string | undefined,
videoCodec: string | undefined
@ -52,7 +56,7 @@ class PassThroughRemuxer implements Remuxer {
this.emitInitSegment = true;
}
generateInitSegment(initSegment: Uint8Array | undefined): void {
private generateInitSegment(initSegment: Uint8Array | undefined): void {
let { audioCodec, videoCodec } = this;
if (!initSegment || !initSegment.byteLength) {
this.initTracks = undefined;
@ -106,9 +110,9 @@ class PassThroughRemuxer implements Remuxer {
this.initTracks = tracks;
}
remux(
public remux(
audioTrack: DemuxedAudioTrack,
videoTrack: PassthroughVideoTrack,
videoTrack: PassthroughTrack,
id3Track: DemuxedMetadataTrack,
textTrack: DemuxedUserdataTrack,
timeOffset: number
@ -201,9 +205,22 @@ class PassThroughRemuxer implements Remuxer {
result.audio = track.type === 'audio' ? track : undefined;
result.video = track.type !== 'audio' ? track : undefined;
result.text = textTrack;
result.id3 = id3Track;
result.initSegment = initSegment;
const initPtsNum = this.initPTS ?? 0;
result.id3 = flushTextTrackMetadataCueSamples(
id3Track,
timeOffset,
initPtsNum,
initPtsNum
);
if (textTrack.samples.length) {
result.text = flushTextTrackUserdataCueSamples(
textTrack,
timeOffset,
initPtsNum
);
}
return result;
}
@ -223,7 +240,7 @@ function getParsedTrackCodec(
// Since mp4-tools cannot parse full codec string (see 'TODO: Parse codec details'... in mp4-tools)
// Provide defaults based on codec type
// This allows for some playback of some fmp4 playlists without CODECS defined in manifest
if (parsedCodec === 'hvc1') {
if (parsedCodec === 'hvc1' || parsedCodec === 'hev1') {
return 'hvc1.1.c.L120.90';
}
if (parsedCodec === 'av01') {

View file

@ -13,9 +13,10 @@ export interface Demuxer {
flush(timeOffset?: number): DemuxerResult | Promise<DemuxerResult>;
destroy(): void;
resetInitSegment(
initSegment: Uint8Array | undefined,
audioCodec: string | undefined,
videoCodec: string | undefined,
duration: number
trackDuration: number
);
resetTimeStamp(defaultInitPTS?: number | null): void;
resetContiguity(): void;
@ -23,7 +24,7 @@ export interface Demuxer {
export interface DemuxerResult {
audioTrack: DemuxedAudioTrack;
avcTrack: DemuxedVideoTrack;
videoTrack: DemuxedVideoTrack;
id3Track: DemuxedMetadataTrack;
textTrack: DemuxedUserdataTrack;
}
@ -48,10 +49,17 @@ export interface DemuxedTrack {
codec?: string;
}
export interface PassthroughTrack extends DemuxedTrack {
sampleDuration: number;
samples: Uint8Array;
timescale: number;
duration: number;
codec: string;
}
export interface DemuxedAudioTrack extends DemuxedTrack {
config?: number[];
samplerate?: number;
isAAC?: boolean;
segmentCodec?: string;
channelCount?: number;
manifestCodec?: string;
samples: AudioSample[];
@ -72,10 +80,6 @@ export interface DemuxedAvcTrack extends DemuxedVideoTrack {
samples: AvcSample[];
}
export interface PassthroughVideoTrack extends DemuxedVideoTrack {
samples: Uint8Array;
}
export interface DemuxedMetadataTrack extends DemuxedTrack {
samples: MetadataSample[];
}
@ -84,16 +88,27 @@ export interface DemuxedUserdataTrack extends DemuxedTrack {
samples: UserdataSample[];
}
export enum MetadataSchema {
audioId3 = 'org.id3',
dateRange = 'com.apple.quicktime.HLS',
emsg = 'https://aomedia.org/emsg/ID3',
}
export interface MetadataSample {
pts: number;
dts: number;
len?: number;
data: Uint8Array;
type: MetadataSchema;
}
export interface UserdataSample {
pts: number;
bytes: Uint8Array;
bytes?: Uint8Array;
type?: number;
payloadType?: number;
uuid?: string;
userData?: string;
userDataBytes?: Uint8Array;
}
export interface AvcSample {

View file

@ -305,12 +305,14 @@ export interface FragParsingInitSegmentData {}
export interface FragParsingUserdataData {
id: string;
frag: Fragment;
details: LevelDetails;
samples: UserdataSample[];
}
export interface FragParsingMetadataData {
id: string;
frag: Fragment;
details: LevelDetails;
samples: MetadataSample[];
}

View file

@ -5,7 +5,6 @@ import type { FragLoadedData } from './events';
export interface FragmentEntity {
body: Fragment;
loaded: FragLoadedData | null;
backtrack: FragLoadedData | null;
buffered: boolean;
range: { [key in SourceBufferName]: FragmentBufferedRange };
}

View file

@ -43,6 +43,7 @@ export interface RemuxedTrack {
hasVideo: boolean;
independent?: boolean;
firstKeyFrame?: number;
firstKeyFramePTS?: number;
nb: number;
transferredData1?: ArrayBuffer;
transferredData2?: ArrayBuffer;

View file

@ -210,7 +210,7 @@ export function alignMediaPlaylistByPDT(
const refPDT = refDetails.fragments[0].programDateTime!; // hasProgramDateTime check above makes this safe.
const refStart = refDetails.fragments[0].start;
// Use the delta between the reference details' presentation timeline's start time and its PDT
// to align the other rendtion's timeline.
// to align the other rendition's timeline.
const delta = refPDT - refStart * 1000;
// Per spec: "If any Media Playlist in a Master Playlist contains an EXT-X-PROGRAM-DATE-TIME tag, then all
// Media Playlists in that Master Playlist MUST contain EXT-X-PROGRAM-DATE-TIME tags with consistent mappings

View file

@ -152,11 +152,13 @@ class FetchLoader implements Loader<LoaderContext> {
return;
}
// CORS errors result in an undefined code. Set it to 0 here to align with XHR's behavior
const code = error.code || 0;
// when destroying, 'error' itself can be undefined
const code: number = !error ? 0 : error.code || 0;
const text: string = !error ? null : error.message;
callbacks.onError(
{ code, text: error.message },
{ code, text },
context,
error.details
error ? error.details : null
);
});
}

View file

@ -33,14 +33,13 @@ export function parseIMSC1(
errorCallBack(new Error('Could not parse IMSC1 mdat'));
return;
}
const mdat = results[0];
const ttml = utf8ArrayToStr(
new Uint8Array(payload, mdat.start, mdat.end - mdat.start)
);
const ttmlList = results.map((mdat) => utf8ArrayToStr(mdat));
const syncTime = toTimescaleFromScale(initPTS, 1, timescale);
try {
callBack(parseTTML(ttml, syncTime));
ttmlList.forEach((ttml) => callBack(parseTTML(ttml, syncTime)));
} catch (error) {
errorCallBack(error);
}
@ -111,7 +110,7 @@ function parseTTML(ttml: string, syncTime: number): Array<VTTCue> {
cue.size = 80;
// Apply styles to cue
const styles = getTtmlStyles(region, style);
const styles = getTtmlStyles(region, style, styleElements);
const { textAlign } = styles;
if (textAlign) {
// cue.positionAlign not settable in FF~2016
@ -166,8 +165,13 @@ function getTextContent(element, trim): string {
}, '');
}
function getTtmlStyles(region, style): { [style: string]: string } {
function getTtmlStyles(
region,
style,
styleElements
): { [style: string]: string } {
const ttsNs = 'http://www.w3.org/ns/ttml#styling';
let regionStyle = null;
const styleAttributes = [
'displayAlign',
'textAlign',
@ -182,9 +186,20 @@ function getTtmlStyles(region, style): { [style: string]: string } {
// 'direction',
// 'writingMode'
];
const regionStyleName = region?.hasAttribute('style')
? region.getAttribute('style')
: null;
if (regionStyleName && styleElements.hasOwnProperty(regionStyleName)) {
regionStyle = styleElements[regionStyleName];
}
return styleAttributes.reduce((styles, name) => {
const value =
getAttributeNS(style, ttsNs, name) || getAttributeNS(region, ttsNs, name);
getAttributeNS(style, ttsNs, name) ||
getAttributeNS(region, ttsNs, name) ||
getAttributeNS(regionStyle, ttsNs, name);
if (value) {
styles[name] = value;
}
@ -193,6 +208,9 @@ function getTtmlStyles(region, style): { [style: string]: string } {
}
function getAttributeNS(element, ns, name): string | null {
if (!element) {
return null;
}
return element.hasAttributeNS(ns, name)
? element.getAttributeNS(ns, name)
: null;

View file

@ -1,59 +1,50 @@
import { sliceUint8 } from './typed-array';
import { ElementaryStreamTypes } from '../loader/fragment';
type Mp4BoxData = {
data: Uint8Array;
start: number;
end: number;
};
import { PassthroughTrack, UserdataSample } from '../types/demuxer';
import { utf8ArrayToStr } from '../demux/id3';
const UINT32_MAX = Math.pow(2, 32) - 1;
const push = [].push;
// We are using fixed track IDs for driving the MP4 remuxer
// instead of following the TS PIDs.
// There is no reason not to do this and some browsers/SourceBuffer-demuxers
// may not like if there are TrackID "switches"
// See https://github.com/video-dev/hls.js/issues/1331
// Here we are mapping our internal track types to constant MP4 track IDs
// With MSE currently one can only have one track of each, and we are muxing
// whatever video/audio rendition in them.
export const RemuxerTrackIdConfig = {
video: 1,
audio: 2,
id3: 3,
text: 4,
};
export function bin2str(data: Uint8Array): string {
return String.fromCharCode.apply(null, data);
}
export function readUint16(
buffer: Uint8Array | Mp4BoxData,
offset: number
): number {
if ('data' in buffer) {
offset += buffer.start;
buffer = buffer.data;
}
export function readUint16(buffer: Uint8Array, offset: number): number {
const val = (buffer[offset] << 8) | buffer[offset + 1];
return val < 0 ? 65536 + val : val;
}
export function readUint32(
buffer: Uint8Array | Mp4BoxData,
offset: number
): number {
if ('data' in buffer) {
offset += buffer.start;
buffer = buffer.data;
}
const val =
(buffer[offset] << 24) |
(buffer[offset + 1] << 16) |
(buffer[offset + 2] << 8) |
buffer[offset + 3];
export function readUint32(buffer: Uint8Array, offset: number): number {
const val = readSint32(buffer, offset);
return val < 0 ? 4294967296 + val : val;
}
export function writeUint32(
buffer: Uint8Array | Mp4BoxData,
offset: number,
value: number
) {
if ('data' in buffer) {
offset += buffer.start;
buffer = buffer.data;
}
export function readSint32(buffer: Uint8Array, offset: number): number {
return (
(buffer[offset] << 24) |
(buffer[offset + 1] << 16) |
(buffer[offset + 2] << 8) |
buffer[offset + 3]
);
}
export function writeUint32(buffer: Uint8Array, offset: number, value: number) {
buffer[offset] = value >> 24;
buffer[offset + 1] = (value >> 16) & 0xff;
buffer[offset + 2] = (value >> 8) & 0xff;
@ -61,30 +52,15 @@ export function writeUint32(
}
// Find the data for a box specified by its path
export function findBox(
input: Uint8Array | Mp4BoxData,
path: Array<string>
): Array<Mp4BoxData> {
const results = [] as Array<Mp4BoxData>;
export function findBox(data: Uint8Array, path: string[]): Uint8Array[] {
const results = [] as Uint8Array[];
if (!path.length) {
// short-circuit the search for empty paths
return results;
}
const end = data.byteLength;
let data: Uint8Array;
let start;
let end;
if ('data' in input) {
data = input.data;
start = input.start;
end = input.end;
} else {
data = input;
start = 0;
end = data.byteLength;
}
for (let i = start; i < end; ) {
for (let i = 0; i < end; ) {
const size = readUint32(data, i);
const type = bin2str(data.subarray(i + 4, i + 8));
const endbox = size > 1 ? i + size : end;
@ -93,13 +69,10 @@ export function findBox(
if (path.length === 1) {
// this is the end of the path and we've found the box we were
// looking for
results.push({ data: data, start: i + 8, end: endbox });
results.push(data.subarray(i + 8, endbox));
} else {
// recursively search for the next box along the path
const subresults = findBox(
{ data: data, start: i + 8, end: endbox },
path.slice(1)
);
const subresults = findBox(data.subarray(i + 8, endbox), path.slice(1));
if (subresults.length) {
push.apply(results, subresults);
}
@ -124,7 +97,7 @@ type SidxInfo = {
export function parseSegmentIndex(initSegment: Uint8Array): SidxInfo | null {
const moovBox = findBox(initSegment, ['moov']);
const moov = moovBox[0];
const moovEndOffset = moov ? moov.end : null; // we need this in case we need to chop of garbage of the end of current data
const moovEndOffset = moov ? moov.length : null; // we need this in case we need to chop of garbage of the end of current data
const sidxBox = findBox(initSegment, ['sidx']);
@ -135,10 +108,10 @@ export function parseSegmentIndex(initSegment: Uint8Array): SidxInfo | null {
const references: any[] = [];
const sidx = sidxBox[0];
const version = sidx.data[0];
const version = sidx[0];
// set initial offset, we skip the reference ID (not needed)
let index = version === 0 ? 8 : 16;
let index = 8;
const timescale = readUint32(sidx, index);
index += 4;
@ -157,7 +130,7 @@ export function parseSegmentIndex(initSegment: Uint8Array): SidxInfo | null {
// skip reserved
index += 2;
let startByte = sidx.end + firstOffset;
let startByte = sidx.length + firstOffset;
const referencesCount = readUint16(sidx, index);
index += 2;
@ -251,6 +224,7 @@ export interface InitData extends Array<any> {
| undefined;
audio?: InitDataTrack;
video?: InitDataTrack;
caption?: InitDataTrack;
}
export function parseInitSegment(initSegment: Uint8Array): InitData {
@ -260,19 +234,17 @@ export function parseInitSegment(initSegment: Uint8Array): InitData {
const trak = traks[i];
const tkhd = findBox(trak, ['tkhd'])[0];
if (tkhd) {
let version = tkhd.data[tkhd.start];
let version = tkhd[0];
let index = version === 0 ? 12 : 20;
const trackId = readUint32(tkhd, index);
const mdhd = findBox(trak, ['mdia', 'mdhd'])[0];
if (mdhd) {
version = mdhd.data[mdhd.start];
version = mdhd[0];
index = version === 0 ? 12 : 20;
const timescale = readUint32(mdhd, index);
const hdlr = findBox(trak, ['mdia', 'hdlr'])[0];
if (hdlr) {
const hdlrType = bin2str(
hdlr.data.subarray(hdlr.start + 8, hdlr.start + 12)
);
const hdlrType = bin2str(hdlr.subarray(8, 12));
const type: HdlrType | undefined = {
soun: ElementaryStreamTypes.AUDIO as const,
vide: ElementaryStreamTypes.VIDEO as const,
@ -282,9 +254,7 @@ export function parseInitSegment(initSegment: Uint8Array): InitData {
const stsd = findBox(trak, ['mdia', 'minf', 'stbl', 'stsd'])[0];
let codec;
if (stsd) {
codec = bin2str(
stsd.data.subarray(stsd.start + 12, stsd.start + 16)
);
codec = bin2str(stsd.subarray(12, 16));
// TODO: Parse codec details to be able to build MIME type.
// stsd.start += 8;
// const codecBox = findBox(stsd, [codec])[0];
@ -337,7 +307,7 @@ export function getStartDTS(initData: InitData, fmp4: Uint8Array): number {
return (
findBox(fmp4, ['moof', 'traf']).reduce((result: number | null, traf) => {
const tfdt = findBox(traf, ['tfdt'])[0];
const version = tfdt.data[tfdt.start];
const version = tfdt[0];
const start = findBox(traf, ['tfhd']).reduce(
(result: number | null, tfhd) => {
// get the track id from the tfhd
@ -517,8 +487,8 @@ export function offsetStartDTS(
fmp4: Uint8Array,
timeOffset: number
) {
findBox(fmp4, ['moof', 'traf']).forEach(function (traf) {
findBox(traf, ['tfhd']).forEach(function (tfhd) {
findBox(fmp4, ['moof', 'traf']).forEach((traf) => {
findBox(traf, ['tfhd']).forEach((tfhd) => {
// get the track id from the tfhd
const id = readUint32(tfhd, 4);
const track = initData[id];
@ -528,11 +498,14 @@ export function offsetStartDTS(
// assume a 90kHz clock if no timescale was specified
const timescale = track.timescale || 90e3;
// get the base media decode time from the tfdt
findBox(traf, ['tfdt']).forEach(function (tfdt) {
const version = tfdt.data[tfdt.start];
findBox(traf, ['tfdt']).forEach((tfdt) => {
const version = tfdt[0];
let baseMediaDecodeTime = readUint32(tfdt, 4);
if (version === 0) {
writeUint32(tfdt, 4, baseMediaDecodeTime - timeOffset * timescale);
baseMediaDecodeTime -= timeOffset * timescale;
baseMediaDecodeTime = Math.max(baseMediaDecodeTime, 0);
writeUint32(tfdt, 4, baseMediaDecodeTime);
} else {
baseMediaDecodeTime *= Math.pow(2, 32);
baseMediaDecodeTime += readUint32(tfdt, 8);
@ -564,8 +537,8 @@ export function segmentValidRange(data: Uint8Array): SegmentedRange {
}
const last = moofs[moofs.length - 1];
// Offset by 8 bytes; findBox offsets the start by as much
segmentedRange.valid = sliceUint8(data, 0, last.start - 8);
segmentedRange.remainder = sliceUint8(data, last.start - 8);
segmentedRange.valid = sliceUint8(data, 0, last.byteOffset - 8);
segmentedRange.remainder = sliceUint8(data, last.byteOffset - 8);
return segmentedRange;
}
@ -584,3 +557,415 @@ export function appendUint8Array(
return temp;
}
export interface IEmsgParsingData {
schemeIdUri: string;
value: string;
timeScale: number;
presentationTimeDelta?: number;
presentationTime?: number;
eventDuration: number;
id: number;
payload: Uint8Array;
}
export function parseSamples(
timeOffset: number,
track: PassthroughTrack
): UserdataSample[] {
const seiSamples = [] as UserdataSample[];
const videoData = track.samples;
const timescale = track.timescale;
const trackId = track.id;
let isHEVCFlavor = false;
const moofs = findBox(videoData, ['moof']);
moofs.map((moof) => {
const moofOffset = moof.byteOffset - 8;
const trafs = findBox(moof, ['traf']);
trafs.map((traf) => {
// get the base media decode time from the tfdt
const baseTime = findBox(traf, ['tfdt']).map((tfdt) => {
const version = tfdt[0];
let result = readUint32(tfdt, 4);
if (version === 1) {
result *= Math.pow(2, 32);
result += readUint32(tfdt, 8);
}
return result / timescale;
})[0];
if (baseTime !== undefined) {
timeOffset = baseTime;
}
return findBox(traf, ['tfhd']).map((tfhd) => {
const id = readUint32(tfhd, 4);
const tfhdFlags = readUint32(tfhd, 0) & 0xffffff;
const baseDataOffsetPresent = (tfhdFlags & 0x000001) !== 0;
const sampleDescriptionIndexPresent = (tfhdFlags & 0x000002) !== 0;
const defaultSampleDurationPresent = (tfhdFlags & 0x000008) !== 0;
let defaultSampleDuration = 0;
const defaultSampleSizePresent = (tfhdFlags & 0x000010) !== 0;
let defaultSampleSize = 0;
const defaultSampleFlagsPresent = (tfhdFlags & 0x000020) !== 0;
let tfhdOffset = 8;
if (id === trackId) {
if (baseDataOffsetPresent) {
tfhdOffset += 8;
}
if (sampleDescriptionIndexPresent) {
tfhdOffset += 4;
}
if (defaultSampleDurationPresent) {
defaultSampleDuration = readUint32(tfhd, tfhdOffset);
tfhdOffset += 4;
}
if (defaultSampleSizePresent) {
defaultSampleSize = readUint32(tfhd, tfhdOffset);
tfhdOffset += 4;
}
if (defaultSampleFlagsPresent) {
tfhdOffset += 4;
}
if (track.type === 'video') {
isHEVCFlavor = isHEVC(track.codec);
}
findBox(traf, ['trun']).map((trun) => {
const version = trun[0];
const flags = readUint32(trun, 0) & 0xffffff;
const dataOffsetPresent = (flags & 0x000001) !== 0;
let dataOffset = 0;
const firstSampleFlagsPresent = (flags & 0x000004) !== 0;
const sampleDurationPresent = (flags & 0x000100) !== 0;
let sampleDuration = 0;
const sampleSizePresent = (flags & 0x000200) !== 0;
let sampleSize = 0;
const sampleFlagsPresent = (flags & 0x000400) !== 0;
const sampleCompositionOffsetsPresent = (flags & 0x000800) !== 0;
let compositionOffset = 0;
const sampleCount = readUint32(trun, 4);
let trunOffset = 8; // past version, flags, and sample count
if (dataOffsetPresent) {
dataOffset = readUint32(trun, trunOffset);
trunOffset += 4;
}
if (firstSampleFlagsPresent) {
trunOffset += 4;
}
let sampleOffset = dataOffset + moofOffset;
for (let ix = 0; ix < sampleCount; ix++) {
if (sampleDurationPresent) {
sampleDuration = readUint32(trun, trunOffset);
trunOffset += 4;
} else {
sampleDuration = defaultSampleDuration;
}
if (sampleSizePresent) {
sampleSize = readUint32(trun, trunOffset);
trunOffset += 4;
} else {
sampleSize = defaultSampleSize;
}
if (sampleFlagsPresent) {
trunOffset += 4;
}
if (sampleCompositionOffsetsPresent) {
if (version === 0) {
compositionOffset = readUint32(trun, trunOffset);
} else {
compositionOffset = readSint32(trun, trunOffset);
}
trunOffset += 4;
}
if (track.type === ElementaryStreamTypes.VIDEO) {
let naluTotalSize = 0;
while (naluTotalSize < sampleSize) {
const naluSize = readUint32(videoData, sampleOffset);
sampleOffset += 4;
const naluType = videoData[sampleOffset] & 0x1f;
if (isSEIMessage(isHEVCFlavor, naluType)) {
const data = videoData.subarray(
sampleOffset,
sampleOffset + naluSize
);
parseSEIMessageFromNALu(
data,
timeOffset + compositionOffset / timescale,
seiSamples
);
}
sampleOffset += naluSize;
naluTotalSize += naluSize + 4;
}
}
timeOffset += sampleDuration / timescale;
}
});
}
});
});
});
return seiSamples;
}
function isHEVC(codec: string) {
if (!codec) {
return false;
}
const delimit = codec.indexOf('.');
const baseCodec = delimit < 0 ? codec : codec.substring(0, delimit);
return (
baseCodec === 'hvc1' ||
baseCodec === 'hev1' ||
// Dolby Vision
baseCodec === 'dvh1' ||
baseCodec === 'dvhe'
);
}
function isSEIMessage(isHEVCFlavor: boolean, naluType: number) {
return isHEVCFlavor ? naluType === 39 || naluType === 40 : naluType === 6;
}
export function parseSEIMessageFromNALu(
unescapedData: Uint8Array,
pts: number,
samples: UserdataSample[]
) {
const data = discardEPB(unescapedData);
let seiPtr = 0;
// skip frameType
seiPtr++;
let payloadType = 0;
let payloadSize = 0;
let endOfCaptions = false;
let b = 0;
while (seiPtr < data.length) {
payloadType = 0;
do {
if (seiPtr >= data.length) {
break;
}
b = data[seiPtr++];
payloadType += b;
} while (b === 0xff);
// Parse payload size.
payloadSize = 0;
do {
if (seiPtr >= data.length) {
break;
}
b = data[seiPtr++];
payloadSize += b;
} while (b === 0xff);
const leftOver = data.length - seiPtr;
if (!endOfCaptions && payloadType === 4 && seiPtr < data.length) {
endOfCaptions = true;
const countryCode = data[seiPtr++];
if (countryCode === 181) {
const providerCode = readUint16(data, seiPtr);
seiPtr += 2;
if (providerCode === 49) {
const userStructure = readUint32(data, seiPtr);
seiPtr += 4;
if (userStructure === 0x47413934) {
const userDataType = data[seiPtr++];
// Raw CEA-608 bytes wrapped in CEA-708 packet
if (userDataType === 3) {
const firstByte = data[seiPtr++];
const totalCCs = 0x1f & firstByte;
const enabled = 0x40 & firstByte;
const totalBytes = enabled ? 2 + totalCCs * 3 : 0;
const byteArray = new Uint8Array(totalBytes);
if (enabled) {
byteArray[0] = firstByte;
for (let i = 1; i < totalBytes; i++) {
byteArray[i] = data[seiPtr++];
}
}
samples.push({
type: userDataType,
payloadType,
pts,
bytes: byteArray,
});
}
}
}
}
} else if (payloadType === 5 && payloadSize < leftOver) {
endOfCaptions = true;
if (payloadSize > 16) {
const uuidStrArray: Array<string> = [];
for (let i = 0; i < 16; i++) {
const b = data[seiPtr++].toString(16);
uuidStrArray.push(b.length == 1 ? '0' + b : b);
if (i === 3 || i === 5 || i === 7 || i === 9) {
uuidStrArray.push('-');
}
}
const length = payloadSize - 16;
const userDataBytes = new Uint8Array(length);
for (let i = 0; i < length; i++) {
userDataBytes[i] = data[seiPtr++];
}
samples.push({
payloadType,
pts,
uuid: uuidStrArray.join(''),
userData: utf8ArrayToStr(userDataBytes),
userDataBytes,
});
}
} else if (payloadSize < leftOver) {
seiPtr += payloadSize;
} else if (payloadSize > leftOver) {
break;
}
}
}
/**
* remove Emulation Prevention bytes from a RBSP
*/
function discardEPB(data: Uint8Array): Uint8Array {
const length = data.byteLength;
const EPBPositions = [] as Array<number>;
let i = 1;
// Find all `Emulation Prevention Bytes`
while (i < length - 2) {
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {
EPBPositions.push(i + 2);
i += 2;
} else {
i++;
}
}
// If no Emulation Prevention Bytes were found just return the original
// array
if (EPBPositions.length === 0) {
return data;
}
// Create a new array to hold the NAL unit data
const newLength = length - EPBPositions.length;
const newData = new Uint8Array(newLength);
let sourceIndex = 0;
for (i = 0; i < newLength; sourceIndex++, i++) {
if (sourceIndex === EPBPositions[0]) {
// Skip this byte
sourceIndex++;
// Remove this position index
EPBPositions.shift();
}
newData[i] = data[sourceIndex];
}
return newData;
}
export function parseEmsg(data: Uint8Array): IEmsgParsingData {
const version = data[0];
let schemeIdUri: string = '';
let value: string = '';
let timeScale: number = 0;
let presentationTimeDelta: number = 0;
let presentationTime: number = 0;
let eventDuration: number = 0;
let id: number = 0;
let offset: number = 0;
if (version === 0) {
while (bin2str(data.subarray(offset, offset + 1)) !== '\0') {
schemeIdUri += bin2str(data.subarray(offset, offset + 1));
offset += 1;
}
schemeIdUri += bin2str(data.subarray(offset, offset + 1));
offset += 1;
while (bin2str(data.subarray(offset, offset + 1)) !== '\0') {
value += bin2str(data.subarray(offset, offset + 1));
offset += 1;
}
value += bin2str(data.subarray(offset, offset + 1));
offset += 1;
timeScale = readUint32(data, 12);
presentationTimeDelta = readUint32(data, 16);
eventDuration = readUint32(data, 20);
id = readUint32(data, 24);
offset = 28;
} else if (version === 1) {
offset += 4;
timeScale = readUint32(data, offset);
offset += 4;
const leftPresentationTime = readUint32(data, offset);
offset += 4;
const rightPresentationTime = readUint32(data, offset);
offset += 4;
presentationTime = 2 ** 32 * leftPresentationTime + rightPresentationTime;
if (!Number.isSafeInteger(presentationTime)) {
presentationTime = Number.MAX_SAFE_INTEGER;
// eslint-disable-next-line no-console
console.warn(
'Presentation time exceeds safe integer limit and wrapped to max safe integer in parsing emsg box'
);
}
eventDuration = readUint32(data, offset);
offset += 4;
id = readUint32(data, offset);
offset += 4;
while (bin2str(data.subarray(offset, offset + 1)) !== '\0') {
schemeIdUri += bin2str(data.subarray(offset, offset + 1));
offset += 1;
}
schemeIdUri += bin2str(data.subarray(offset, offset + 1));
offset += 1;
while (bin2str(data.subarray(offset, offset + 1)) !== '\0') {
value += bin2str(data.subarray(offset, offset + 1));
offset += 1;
}
value += bin2str(data.subarray(offset, offset + 1));
offset += 1;
}
const payload = data.subarray(offset, data.byteLength);
return {
schemeIdUri,
value,
timeScale,
presentationTime,
presentationTimeDelta,
eventDuration,
id,
payload,
};
}

View file

@ -64,7 +64,8 @@ export function clearCurrentCues(track: TextTrack) {
export function removeCuesInRange(
track: TextTrack,
start: number,
end: number
end: number,
predicate?: (cue: TextTrackCue) => boolean
) {
const mode = track.mode;
if (mode === 'disabled') {
@ -74,7 +75,9 @@ export function removeCuesInRange(
if (track.cues && track.cues.length > 0) {
const cues = getCuesInRange(track.cues, start, end);
for (let i = 0; i < cues.length; i++) {
track.removeCue(cues[i]);
if (!predicate || predicate(cues[i])) {
track.removeCue(cues[i]);
}
}
}
if (mode === 'disabled') {

View file

@ -235,13 +235,13 @@ function parseCue(input: string, cue: VTTCue, regionList: Region[]) {
skipWhitespace();
cue.startTime = consumeTimeStamp(); // (1) collect cue start time
skipWhitespace();
if (input.substr(0, 3) !== '-->') {
if (input.slice(0, 3) !== '-->') {
// (3) next characters must match '-->'
throw new Error(
"Malformed time stamp (time stamps must be separated by '-->'): " + oInput
);
}
input = input.substr(3);
input = input.slice(3);
skipWhitespace();
cue.endTime = consumeTimeStamp(); // (5) collect cue end time
@ -302,7 +302,7 @@ export class VTTParser {
++pos;
}
const line: string = buffer.substr(0, pos);
const line: string = buffer.slice(0, pos);
// Advance the buffer early in case we fail below.
if (buffer[pos] === '\r') {
++pos;
@ -312,7 +312,7 @@ export class VTTParser {
++pos;
}
_this.buffer = buffer.substr(pos);
_this.buffer = buffer.slice(pos);
return line;
}

View file

@ -12,16 +12,18 @@ const startsWith = function (
searchString: string,
position: number = 0
) {
return inputString.substr(position, searchString.length) === searchString;
return (
inputString.slice(position, position + searchString.length) === searchString
);
};
const cueString2millis = function (timeString: string) {
let ts = parseInt(timeString.substr(-3));
const secs = parseInt(timeString.substr(-6, 2));
const mins = parseInt(timeString.substr(-9, 2));
let ts = parseInt(timeString.slice(-3));
const secs = parseInt(timeString.slice(-6, -4));
const mins = parseInt(timeString.slice(-9, -7));
const hours =
timeString.length > 9
? parseInt(timeString.substr(0, timeString.indexOf(':')))
? parseInt(timeString.substring(0, timeString.indexOf(':')))
: 0;
if (
@ -109,7 +111,6 @@ export function parseWebVTT(
let timestampMapLOCAL = 0;
let parsingError: Error;
let inHeader = true;
let timestampMap = false;
parser.oncue = function (cue: VTTCue) {
// Adjust cue timing; clamp cues to start no earlier than - and drop cues that don't end after - 0 on timeline.
@ -134,16 +135,14 @@ export function parseWebVTT(
cueOffset = webVttMpegTsMapOffset - vttCCs.presentationOffset;
}
if (timestampMap) {
const duration = cue.endTime - cue.startTime;
const startTime =
normalizePts(
(cue.startTime + cueOffset - timestampMapLOCAL) * 90000,
timeOffset * 90000
) / 90000;
cue.startTime = startTime;
cue.endTime = startTime + duration;
}
const duration = cue.endTime - cue.startTime;
const startTime =
normalizePts(
(cue.startTime + cueOffset - timestampMapLOCAL) * 90000,
timeOffset * 90000
) / 90000;
cue.startTime = Math.max(startTime, 0);
cue.endTime = Math.max(startTime + duration, 0);
//trim trailing webvtt block whitespaces
const text = cue.text.trim();
@ -180,23 +179,21 @@ export function parseWebVTT(
if (startsWith(line, 'X-TIMESTAMP-MAP=')) {
// Once found, no more are allowed anyway, so stop searching.
inHeader = false;
timestampMap = true;
// Extract LOCAL and MPEGTS.
line
.substr(16)
.slice(16)
.split(',')
.forEach((timestamp) => {
if (startsWith(timestamp, 'LOCAL:')) {
cueTime = timestamp.substr(6);
cueTime = timestamp.slice(6);
} else if (startsWith(timestamp, 'MPEGTS:')) {
timestampMapMPEGTS = parseInt(timestamp.substr(7));
timestampMapMPEGTS = parseInt(timestamp.slice(7));
}
});
try {
// Convert cue time to seconds
timestampMapLOCAL = cueString2millis(cueTime) / 1000;
} catch (error) {
timestampMap = false;
parsingError = error;
}
// Return without parsing X-TIMESTAMP-MAP line.