1
0
Fork 0
mirror of https://github.com/DanielnetoDotCom/YouPHPTube synced 2025-10-04 02:09:22 +02:00

Chromecast moved to PlayerSkins plugin

aisplay option added on PlayerSkins
This commit is contained in:
Daniel Neto 2024-10-30 20:19:49 -03:00
parent 480ae72b99
commit efd0665a44
286 changed files with 72588 additions and 1487 deletions

View file

@ -17,11 +17,11 @@ var findBox = require('../mp4/find-box.js');
var parseTfdt = require('../tools/parse-tfdt.js');
var parseTrun = require('../tools/parse-trun.js');
var parseTfhd = require('../tools/parse-tfhd.js');
var window = require('global/window');
var _require = require('./samples.js'),
getMdatTrafPairs = _require.getMdatTrafPairs,
parseSamples = _require.parseSamples;
/**
* Maps an offset in the mdat to a sample based on the the size of the samples.
* Assumes that `parseSamples` has been called first.
@ -123,61 +123,6 @@ var findSeiNals = function findSeiNals(avcStream, samples, trackId) {
return result;
};
/**
* Parses sample information out of Track Run Boxes and calculates
* the absolute presentation and decode timestamps of each sample.
*
* @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed
* @param {Number|BigInt} baseMediaDecodeTime - base media decode time from tfdt
@see ISO-BMFF-12/2015, Section 8.8.12
* @param {Object} tfhd - The parsed Track Fragment Header
* @see inspect.parseTfhd
* @return {Object[]} the parsed samples
*
* @see ISO-BMFF-12/2015, Section 8.8.8
**/
var parseSamples = function parseSamples(truns, baseMediaDecodeTime, tfhd) {
var currentDts = baseMediaDecodeTime;
var defaultSampleDuration = tfhd.defaultSampleDuration || 0;
var defaultSampleSize = tfhd.defaultSampleSize || 0;
var trackId = tfhd.trackId;
var allSamples = [];
truns.forEach(function (trun) {
// Note: We currently do not parse the sample table as well
// as the trun. It's possible some sources will require this.
// moov > trak > mdia > minf > stbl
var trackRun = parseTrun(trun);
var samples = trackRun.samples;
samples.forEach(function (sample) {
if (sample.duration === undefined) {
sample.duration = defaultSampleDuration;
}
if (sample.size === undefined) {
sample.size = defaultSampleSize;
}
sample.trackId = trackId;
sample.dts = currentDts;
if (sample.compositionTimeOffset === undefined) {
sample.compositionTimeOffset = 0;
}
if (typeof currentDts === 'bigint') {
sample.pts = currentDts + window.BigInt(sample.compositionTimeOffset);
currentDts += window.BigInt(sample.duration);
} else {
sample.pts = currentDts + sample.compositionTimeOffset;
currentDts += sample.duration;
}
});
allSamples = allSamples.concat(samples);
});
return allSamples;
};
/**
* Parses out caption nals from an FMP4 segment's video tracks.
*
@ -189,20 +134,8 @@ var parseSamples = function parseSamples(truns, baseMediaDecodeTime, tfhd) {
var parseCaptionNals = function parseCaptionNals(segment, videoTrackId) {
// To get the samples
var trafs = findBox(segment, ['moof', 'traf']); // To get SEI NAL units
var mdats = findBox(segment, ['mdat']);
var captionNals = {};
var mdatTrafPairs = []; // Pair up each traf with a mdat as moofs and mdats are in pairs
mdats.forEach(function (mdat, index) {
var matchingTraf = trafs[index];
mdatTrafPairs.push({
mdat: mdat,
traf: matchingTraf
});
});
var mdatTrafPairs = getMdatTrafPairs(segment);
mdatTrafPairs.forEach(function (pair) {
var mdat = pair.mdat;
var traf = pair.traf;

View file

@ -12,5 +12,6 @@ module.exports = {
Transmuxer: require('./transmuxer').Transmuxer,
AudioSegmentStream: require('./transmuxer').AudioSegmentStream,
VideoSegmentStream: require('./transmuxer').VideoSegmentStream,
CaptionParser: require('./caption-parser')
CaptionParser: require('./caption-parser'),
WebVttParser: require('./webvtt-parser')
};

90
node_modules/mux.js/cjs/mp4/samples.js generated vendored Normal file
View file

@ -0,0 +1,90 @@
"use strict";
var _require = require("../tools/mp4-inspector"),
parseTrun = _require.parseTrun;
var _require2 = require("./probe"),
findBox = _require2.findBox;
var window = require('global/window');
/**
* Utility function for parsing data from mdat boxes.
* @param {Array<Uint8Array>} segment the segment data to create mdat/traf pairs from.
* @returns mdat and traf boxes paired up for easier parsing.
*/
var getMdatTrafPairs = function getMdatTrafPairs(segment) {
var trafs = findBox(segment, ['moof', 'traf']);
var mdats = findBox(segment, ['mdat']);
var mdatTrafPairs = []; // Pair up each traf with a mdat as moofs and mdats are in pairs
mdats.forEach(function (mdat, index) {
var matchingTraf = trafs[index];
mdatTrafPairs.push({
mdat: mdat,
traf: matchingTraf
});
});
return mdatTrafPairs;
};
/**
* Parses sample information out of Track Run Boxes and calculates
* the absolute presentation and decode timestamps of each sample.
*
* @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed
* @param {Number|BigInt} baseMediaDecodeTime - base media decode time from tfdt
@see ISO-BMFF-12/2015, Section 8.8.12
* @param {Object} tfhd - The parsed Track Fragment Header
* @see inspect.parseTfhd
* @return {Object[]} the parsed samples
*
* @see ISO-BMFF-12/2015, Section 8.8.8
**/
var parseSamples = function parseSamples(truns, baseMediaDecodeTime, tfhd) {
var currentDts = baseMediaDecodeTime;
var defaultSampleDuration = tfhd.defaultSampleDuration || 0;
var defaultSampleSize = tfhd.defaultSampleSize || 0;
var trackId = tfhd.trackId;
var allSamples = [];
truns.forEach(function (trun) {
// Note: We currently do not parse the sample table as well
// as the trun. It's possible some sources will require this.
// moov > trak > mdia > minf > stbl
var trackRun = parseTrun(trun);
var samples = trackRun.samples;
samples.forEach(function (sample) {
if (sample.duration === undefined) {
sample.duration = defaultSampleDuration;
}
if (sample.size === undefined) {
sample.size = defaultSampleSize;
}
sample.trackId = trackId;
sample.dts = currentDts;
if (sample.compositionTimeOffset === undefined) {
sample.compositionTimeOffset = 0;
}
if (typeof currentDts === 'bigint') {
sample.pts = currentDts + window.BigInt(sample.compositionTimeOffset);
currentDts += window.BigInt(sample.duration);
} else {
sample.pts = currentDts + sample.compositionTimeOffset;
currentDts += sample.duration;
}
});
allSamples = allSamples.concat(samples);
});
return allSamples;
};
module.exports = {
getMdatTrafPairs: getMdatTrafPairs,
parseSamples: parseSamples
};

128
node_modules/mux.js/cjs/mp4/webvtt-parser.js generated vendored Normal file
View file

@ -0,0 +1,128 @@
"use strict";
var _require = require("../tools/mp4-inspector"),
parseTfdt = _require.parseTfdt;
var findBox = require("./find-box");
var _require2 = require("./probe"),
getTimescaleFromMediaHeader = _require2.getTimescaleFromMediaHeader;
var _require3 = require("./samples"),
parseSamples = _require3.parseSamples,
getMdatTrafPairs = _require3.getMdatTrafPairs;
/**
* Module for parsing WebVTT text and styles from FMP4 segments.
* Based on the ISO/IEC 14496-30.
*/
var WebVttParser = function WebVttParser() {
// default timescale to 90k
var timescale = 90e3;
/**
* Parses the timescale from the init segment.
* @param {Array<Uint8Array>} segment The initialization segment to parse the timescale from.
*/
this.init = function (segment) {
// We just need the timescale from the init segment.
var mdhd = findBox(segment, ['moov', 'trak', 'mdia', 'mdhd'])[0];
if (mdhd) {
timescale = getTimescaleFromMediaHeader(mdhd);
}
};
/**
* Parses a WebVTT FMP4 segment.
* @param {Array<Uint8Array>} segment The content segment to parse the WebVTT cues from.
* @returns The WebVTT cue text, styling, and timing info as an array of cue objects.
*/
this.parseSegment = function (segment) {
var vttCues = [];
var mdatTrafPairs = getMdatTrafPairs(segment);
var baseMediaDecodeTime = 0;
mdatTrafPairs.forEach(function (pair) {
var mdatBox = pair.mdat;
var trafBox = pair.traf; // zero or one.
var tfdtBox = findBox(trafBox, ['tfdt'])[0]; // zero or one.
var tfhdBox = findBox(trafBox, ['tfhd'])[0]; // zero or more.
var trunBoxes = findBox(trafBox, ['trun']);
if (tfdtBox) {
var tfdt = parseTfdt(tfdtBox);
baseMediaDecodeTime = tfdt.baseMediaDecodeTime;
}
if (trunBoxes.length && tfhdBox) {
var samples = parseSamples(trunBoxes, baseMediaDecodeTime, tfhdBox);
var mdatOffset = 0;
samples.forEach(function (sample) {
// decode utf8 payload
var UTF_8 = 'utf-8';
var textDecoder = new TextDecoder(UTF_8); // extract sample data from the mdat box.
// WebVTT Sample format:
// Exactly one VTTEmptyCueBox box
// OR one or more VTTCueBox boxes.
var sampleData = mdatBox.slice(mdatOffset, mdatOffset + sample.size); // single vtte box.
var vtteBox = findBox(sampleData, ['vtte'])[0]; // empty box
if (vtteBox) {
mdatOffset += sample.size;
return;
} // TODO: Support 'vtta' boxes.
// VTTAdditionalTextBoxes can be interleaved between VTTCueBoxes.
var vttcBoxes = findBox(sampleData, ['vttc']);
vttcBoxes.forEach(function (vttcBox) {
// mandatory payload box.
var paylBox = findBox(vttcBox, ['payl'])[0]; // optional settings box
var sttgBox = findBox(vttcBox, ['sttg'])[0];
var start = sample.pts / timescale;
var end = (sample.pts + sample.duration) / timescale;
var cueText, settings; // contains cue text.
if (paylBox) {
try {
cueText = textDecoder.decode(paylBox);
} catch (e) {
console.error(e);
}
} // settings box contains styling.
if (sttgBox) {
try {
settings = textDecoder.decode(sttgBox);
} catch (e) {
console.error(e);
}
}
if (sample.duration && cueText) {
vttCues.push({
cueText: cueText,
start: start,
end: end,
settings: settings
});
}
});
mdatOffset += sample.size;
});
}
});
return vttCues;
};
};
module.exports = WebVttParser;