1
0
Fork 0
mirror of https://github.com/DanielnetoDotCom/YouPHPTube synced 2025-10-05 02:39:46 +02:00

Git update

This commit is contained in:
Daniel 2022-03-17 11:43:59 -03:00
parent bd8d7eedb6
commit 602ca1128e
3123 changed files with 521005 additions and 521005 deletions

View file

@ -1,108 +1,108 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
/**
* Store information about the start and end of the track and the
* duration for each frame/sample we process in order to calculate
* the baseMediaDecodeTime
*/
var collectDtsInfo = function collectDtsInfo(track, data) {
if (typeof data.pts === 'number') {
if (track.timelineStartInfo.pts === undefined) {
track.timelineStartInfo.pts = data.pts;
}
if (track.minSegmentPts === undefined) {
track.minSegmentPts = data.pts;
} else {
track.minSegmentPts = Math.min(track.minSegmentPts, data.pts);
}
if (track.maxSegmentPts === undefined) {
track.maxSegmentPts = data.pts;
} else {
track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts);
}
}
if (typeof data.dts === 'number') {
if (track.timelineStartInfo.dts === undefined) {
track.timelineStartInfo.dts = data.dts;
}
if (track.minSegmentDts === undefined) {
track.minSegmentDts = data.dts;
} else {
track.minSegmentDts = Math.min(track.minSegmentDts, data.dts);
}
if (track.maxSegmentDts === undefined) {
track.maxSegmentDts = data.dts;
} else {
track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts);
}
}
};
/**
* Clear values used to calculate the baseMediaDecodeTime between
* tracks
*/
var clearDtsInfo = function clearDtsInfo(track) {
delete track.minSegmentDts;
delete track.maxSegmentDts;
delete track.minSegmentPts;
delete track.maxSegmentPts;
};
/**
* Calculate the track's baseMediaDecodeTime based on the earliest
* DTS the transmuxer has ever seen and the minimum DTS for the
* current track
* @param track {object} track metadata configuration
* @param keepOriginalTimestamps {boolean} If true, keep the timestamps
* in the source; false to adjust the first segment to start at 0.
*/
var calculateTrackBaseMediaDecodeTime = function calculateTrackBaseMediaDecodeTime(track, keepOriginalTimestamps) {
var baseMediaDecodeTime,
scale,
minSegmentDts = track.minSegmentDts; // Optionally adjust the time so the first segment starts at zero.
if (!keepOriginalTimestamps) {
minSegmentDts -= track.timelineStartInfo.dts;
} // track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where
// we want the start of the first segment to be placed
baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime; // Add to that the distance this segment is from the very first
baseMediaDecodeTime += minSegmentDts; // baseMediaDecodeTime must not become negative
baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime);
if (track.type === 'audio') {
// Audio has a different clock equal to the sampling_rate so we need to
// scale the PTS values into the clock rate of the track
scale = track.samplerate / ONE_SECOND_IN_TS;
baseMediaDecodeTime *= scale;
baseMediaDecodeTime = Math.floor(baseMediaDecodeTime);
}
return baseMediaDecodeTime;
};
module.exports = {
clearDtsInfo: clearDtsInfo,
calculateTrackBaseMediaDecodeTime: calculateTrackBaseMediaDecodeTime,
collectDtsInfo: collectDtsInfo
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
/**
* Store information about the start and end of the track and the
* duration for each frame/sample we process in order to calculate
* the baseMediaDecodeTime
*/
var collectDtsInfo = function collectDtsInfo(track, data) {
if (typeof data.pts === 'number') {
if (track.timelineStartInfo.pts === undefined) {
track.timelineStartInfo.pts = data.pts;
}
if (track.minSegmentPts === undefined) {
track.minSegmentPts = data.pts;
} else {
track.minSegmentPts = Math.min(track.minSegmentPts, data.pts);
}
if (track.maxSegmentPts === undefined) {
track.maxSegmentPts = data.pts;
} else {
track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts);
}
}
if (typeof data.dts === 'number') {
if (track.timelineStartInfo.dts === undefined) {
track.timelineStartInfo.dts = data.dts;
}
if (track.minSegmentDts === undefined) {
track.minSegmentDts = data.dts;
} else {
track.minSegmentDts = Math.min(track.minSegmentDts, data.dts);
}
if (track.maxSegmentDts === undefined) {
track.maxSegmentDts = data.dts;
} else {
track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts);
}
}
};
/**
* Clear values used to calculate the baseMediaDecodeTime between
* tracks
*/
var clearDtsInfo = function clearDtsInfo(track) {
delete track.minSegmentDts;
delete track.maxSegmentDts;
delete track.minSegmentPts;
delete track.maxSegmentPts;
};
/**
* Calculate the track's baseMediaDecodeTime based on the earliest
* DTS the transmuxer has ever seen and the minimum DTS for the
* current track
* @param track {object} track metadata configuration
* @param keepOriginalTimestamps {boolean} If true, keep the timestamps
* in the source; false to adjust the first segment to start at 0.
*/
var calculateTrackBaseMediaDecodeTime = function calculateTrackBaseMediaDecodeTime(track, keepOriginalTimestamps) {
var baseMediaDecodeTime,
scale,
minSegmentDts = track.minSegmentDts; // Optionally adjust the time so the first segment starts at zero.
if (!keepOriginalTimestamps) {
minSegmentDts -= track.timelineStartInfo.dts;
} // track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where
// we want the start of the first segment to be placed
baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime; // Add to that the distance this segment is from the very first
baseMediaDecodeTime += minSegmentDts; // baseMediaDecodeTime must not become negative
baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime);
if (track.type === 'audio') {
// Audio has a different clock equal to the sampling_rate so we need to
// scale the PTS values into the clock rate of the track
scale = track.samplerate / ONE_SECOND_IN_TS;
baseMediaDecodeTime *= scale;
baseMediaDecodeTime = Math.floor(baseMediaDecodeTime);
}
return baseMediaDecodeTime;
};
module.exports = {
clearDtsInfo: clearDtsInfo,
calculateTrackBaseMediaDecodeTime: calculateTrackBaseMediaDecodeTime,
collectDtsInfo: collectDtsInfo
};

File diff suppressed because it is too large Load diff

View file

@ -1,141 +1,141 @@
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var audioFrameUtils = require('../mp4/audio-frame-utils');
var trackInfo = require('../mp4/track-decode-info.js');
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
var AUDIO_PROPERTIES = require('../constants/audio-properties.js');
/**
* Constructs a single-track, ISO BMFF media segment from AAC data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
*/
var AudioSegmentStream = function AudioSegmentStream(track, options) {
var adtsFrames = [],
sequenceNumber = 0,
earliestAllowedDts = 0,
audioAppendStartTs = 0,
videoBaseMediaDecodeTime = Infinity,
segmentStartPts = null,
segmentEndPts = null;
options = options || {};
AudioSegmentStream.prototype.init.call(this);
this.push = function (data) {
trackInfo.collectDtsInfo(track, data);
if (track) {
AUDIO_PROPERTIES.forEach(function (prop) {
track[prop] = data[prop];
});
} // buffer audio data until end() is called
adtsFrames.push(data);
};
this.setEarliestDts = function (earliestDts) {
earliestAllowedDts = earliestDts;
};
this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {
videoBaseMediaDecodeTime = baseMediaDecodeTime;
};
this.setAudioAppendStart = function (timestamp) {
audioAppendStartTs = timestamp;
};
this.processFrames_ = function () {
var frames, moof, mdat, boxes, timingInfo; // return early if no audio data has been observed
if (adtsFrames.length === 0) {
return;
}
frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);
if (frames.length === 0) {
// return early if the frames are all after the earliest allowed DTS
// TODO should we clear the adtsFrames?
return;
}
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to
// samples (that is, adts frames) in the audio data
track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat
mdat = mp4.mdat(audioFrameUtils.concatenateFrameData(frames));
adtsFrames = [];
moof = mp4.moof(sequenceNumber, [track]); // bump the sequence number for next time
sequenceNumber++;
track.initSegment = mp4.initSegment([track]); // it would be great to allocate this array up front instead of
// throwing away hundreds of media segment fragments
boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
trackInfo.clearDtsInfo(track);
if (segmentStartPts === null) {
segmentEndPts = segmentStartPts = frames[0].pts;
}
segmentEndPts += frames.length * (ONE_SECOND_IN_TS * 1024 / track.samplerate);
timingInfo = {
start: segmentStartPts
};
this.trigger('timingInfo', timingInfo);
this.trigger('data', {
track: track,
boxes: boxes
});
};
this.flush = function () {
this.processFrames_(); // trigger final timing info
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
this.resetTiming_();
this.trigger('done', 'AudioSegmentStream');
};
this.partialFlush = function () {
this.processFrames_();
this.trigger('partialdone', 'AudioSegmentStream');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline', 'AudioSegmentStream');
};
this.resetTiming_ = function () {
trackInfo.clearDtsInfo(track);
segmentStartPts = null;
segmentEndPts = null;
};
this.reset = function () {
this.resetTiming_();
adtsFrames = [];
this.trigger('reset');
};
};
AudioSegmentStream.prototype = new Stream();
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var audioFrameUtils = require('../mp4/audio-frame-utils');
var trackInfo = require('../mp4/track-decode-info.js');
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
var AUDIO_PROPERTIES = require('../constants/audio-properties.js');
/**
* Constructs a single-track, ISO BMFF media segment from AAC data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
*/
var AudioSegmentStream = function AudioSegmentStream(track, options) {
var adtsFrames = [],
sequenceNumber = 0,
earliestAllowedDts = 0,
audioAppendStartTs = 0,
videoBaseMediaDecodeTime = Infinity,
segmentStartPts = null,
segmentEndPts = null;
options = options || {};
AudioSegmentStream.prototype.init.call(this);
this.push = function (data) {
trackInfo.collectDtsInfo(track, data);
if (track) {
AUDIO_PROPERTIES.forEach(function (prop) {
track[prop] = data[prop];
});
} // buffer audio data until end() is called
adtsFrames.push(data);
};
this.setEarliestDts = function (earliestDts) {
earliestAllowedDts = earliestDts;
};
this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {
videoBaseMediaDecodeTime = baseMediaDecodeTime;
};
this.setAudioAppendStart = function (timestamp) {
audioAppendStartTs = timestamp;
};
this.processFrames_ = function () {
var frames, moof, mdat, boxes, timingInfo; // return early if no audio data has been observed
if (adtsFrames.length === 0) {
return;
}
frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);
if (frames.length === 0) {
// return early if the frames are all after the earliest allowed DTS
// TODO should we clear the adtsFrames?
return;
}
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to
// samples (that is, adts frames) in the audio data
track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat
mdat = mp4.mdat(audioFrameUtils.concatenateFrameData(frames));
adtsFrames = [];
moof = mp4.moof(sequenceNumber, [track]); // bump the sequence number for next time
sequenceNumber++;
track.initSegment = mp4.initSegment([track]); // it would be great to allocate this array up front instead of
// throwing away hundreds of media segment fragments
boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
trackInfo.clearDtsInfo(track);
if (segmentStartPts === null) {
segmentEndPts = segmentStartPts = frames[0].pts;
}
segmentEndPts += frames.length * (ONE_SECOND_IN_TS * 1024 / track.samplerate);
timingInfo = {
start: segmentStartPts
};
this.trigger('timingInfo', timingInfo);
this.trigger('data', {
track: track,
boxes: boxes
});
};
this.flush = function () {
this.processFrames_(); // trigger final timing info
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
this.resetTiming_();
this.trigger('done', 'AudioSegmentStream');
};
this.partialFlush = function () {
this.processFrames_();
this.trigger('partialdone', 'AudioSegmentStream');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline', 'AudioSegmentStream');
};
this.resetTiming_ = function () {
trackInfo.clearDtsInfo(track);
segmentStartPts = null;
segmentEndPts = null;
};
this.reset = function () {
this.resetTiming_();
adtsFrames = [];
this.trigger('reset');
};
};
AudioSegmentStream.prototype = new Stream();
module.exports = AudioSegmentStream;