1
0
Fork 0
mirror of https://github.com/DanielnetoDotCom/YouPHPTube synced 2025-10-03 09:49:28 +02:00

Git update

This commit is contained in:
Daniel 2022-03-17 11:43:59 -03:00
parent bd8d7eedb6
commit 602ca1128e
3123 changed files with 521005 additions and 521005 deletions

View file

@ -1,108 +1,108 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
/**
* Store information about the start and end of the track and the
* duration for each frame/sample we process in order to calculate
* the baseMediaDecodeTime
*/
var collectDtsInfo = function collectDtsInfo(track, data) {
if (typeof data.pts === 'number') {
if (track.timelineStartInfo.pts === undefined) {
track.timelineStartInfo.pts = data.pts;
}
if (track.minSegmentPts === undefined) {
track.minSegmentPts = data.pts;
} else {
track.minSegmentPts = Math.min(track.minSegmentPts, data.pts);
}
if (track.maxSegmentPts === undefined) {
track.maxSegmentPts = data.pts;
} else {
track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts);
}
}
if (typeof data.dts === 'number') {
if (track.timelineStartInfo.dts === undefined) {
track.timelineStartInfo.dts = data.dts;
}
if (track.minSegmentDts === undefined) {
track.minSegmentDts = data.dts;
} else {
track.minSegmentDts = Math.min(track.minSegmentDts, data.dts);
}
if (track.maxSegmentDts === undefined) {
track.maxSegmentDts = data.dts;
} else {
track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts);
}
}
};
/**
* Clear values used to calculate the baseMediaDecodeTime between
* tracks
*/
var clearDtsInfo = function clearDtsInfo(track) {
delete track.minSegmentDts;
delete track.maxSegmentDts;
delete track.minSegmentPts;
delete track.maxSegmentPts;
};
/**
* Calculate the track's baseMediaDecodeTime based on the earliest
* DTS the transmuxer has ever seen and the minimum DTS for the
* current track
* @param track {object} track metadata configuration
* @param keepOriginalTimestamps {boolean} If true, keep the timestamps
* in the source; false to adjust the first segment to start at 0.
*/
var calculateTrackBaseMediaDecodeTime = function calculateTrackBaseMediaDecodeTime(track, keepOriginalTimestamps) {
var baseMediaDecodeTime,
scale,
minSegmentDts = track.minSegmentDts; // Optionally adjust the time so the first segment starts at zero.
if (!keepOriginalTimestamps) {
minSegmentDts -= track.timelineStartInfo.dts;
} // track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where
// we want the start of the first segment to be placed
baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime; // Add to that the distance this segment is from the very first
baseMediaDecodeTime += minSegmentDts; // baseMediaDecodeTime must not become negative
baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime);
if (track.type === 'audio') {
// Audio has a different clock equal to the sampling_rate so we need to
// scale the PTS values into the clock rate of the track
scale = track.samplerate / ONE_SECOND_IN_TS;
baseMediaDecodeTime *= scale;
baseMediaDecodeTime = Math.floor(baseMediaDecodeTime);
}
return baseMediaDecodeTime;
};
module.exports = {
clearDtsInfo: clearDtsInfo,
calculateTrackBaseMediaDecodeTime: calculateTrackBaseMediaDecodeTime,
collectDtsInfo: collectDtsInfo
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
/**
* Store information about the start and end of the track and the
* duration for each frame/sample we process in order to calculate
* the baseMediaDecodeTime
*/
var collectDtsInfo = function collectDtsInfo(track, data) {
if (typeof data.pts === 'number') {
if (track.timelineStartInfo.pts === undefined) {
track.timelineStartInfo.pts = data.pts;
}
if (track.minSegmentPts === undefined) {
track.minSegmentPts = data.pts;
} else {
track.minSegmentPts = Math.min(track.minSegmentPts, data.pts);
}
if (track.maxSegmentPts === undefined) {
track.maxSegmentPts = data.pts;
} else {
track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts);
}
}
if (typeof data.dts === 'number') {
if (track.timelineStartInfo.dts === undefined) {
track.timelineStartInfo.dts = data.dts;
}
if (track.minSegmentDts === undefined) {
track.minSegmentDts = data.dts;
} else {
track.minSegmentDts = Math.min(track.minSegmentDts, data.dts);
}
if (track.maxSegmentDts === undefined) {
track.maxSegmentDts = data.dts;
} else {
track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts);
}
}
};
/**
* Clear values used to calculate the baseMediaDecodeTime between
* tracks
*/
var clearDtsInfo = function clearDtsInfo(track) {
delete track.minSegmentDts;
delete track.maxSegmentDts;
delete track.minSegmentPts;
delete track.maxSegmentPts;
};
/**
* Calculate the track's baseMediaDecodeTime based on the earliest
* DTS the transmuxer has ever seen and the minimum DTS for the
* current track
* @param track {object} track metadata configuration
* @param keepOriginalTimestamps {boolean} If true, keep the timestamps
* in the source; false to adjust the first segment to start at 0.
*/
var calculateTrackBaseMediaDecodeTime = function calculateTrackBaseMediaDecodeTime(track, keepOriginalTimestamps) {
var baseMediaDecodeTime,
scale,
minSegmentDts = track.minSegmentDts; // Optionally adjust the time so the first segment starts at zero.
if (!keepOriginalTimestamps) {
minSegmentDts -= track.timelineStartInfo.dts;
} // track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where
// we want the start of the first segment to be placed
baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime; // Add to that the distance this segment is from the very first
baseMediaDecodeTime += minSegmentDts; // baseMediaDecodeTime must not become negative
baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime);
if (track.type === 'audio') {
// Audio has a different clock equal to the sampling_rate so we need to
// scale the PTS values into the clock rate of the track
scale = track.samplerate / ONE_SECOND_IN_TS;
baseMediaDecodeTime *= scale;
baseMediaDecodeTime = Math.floor(baseMediaDecodeTime);
}
return baseMediaDecodeTime;
};
module.exports = {
clearDtsInfo: clearDtsInfo,
calculateTrackBaseMediaDecodeTime: calculateTrackBaseMediaDecodeTime,
collectDtsInfo: collectDtsInfo
};

File diff suppressed because it is too large Load diff

View file

@ -1,141 +1,141 @@
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var audioFrameUtils = require('../mp4/audio-frame-utils');
var trackInfo = require('../mp4/track-decode-info.js');
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
var AUDIO_PROPERTIES = require('../constants/audio-properties.js');
/**
* Constructs a single-track, ISO BMFF media segment from AAC data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
*/
var AudioSegmentStream = function AudioSegmentStream(track, options) {
var adtsFrames = [],
sequenceNumber = 0,
earliestAllowedDts = 0,
audioAppendStartTs = 0,
videoBaseMediaDecodeTime = Infinity,
segmentStartPts = null,
segmentEndPts = null;
options = options || {};
AudioSegmentStream.prototype.init.call(this);
this.push = function (data) {
trackInfo.collectDtsInfo(track, data);
if (track) {
AUDIO_PROPERTIES.forEach(function (prop) {
track[prop] = data[prop];
});
} // buffer audio data until end() is called
adtsFrames.push(data);
};
this.setEarliestDts = function (earliestDts) {
earliestAllowedDts = earliestDts;
};
this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {
videoBaseMediaDecodeTime = baseMediaDecodeTime;
};
this.setAudioAppendStart = function (timestamp) {
audioAppendStartTs = timestamp;
};
this.processFrames_ = function () {
var frames, moof, mdat, boxes, timingInfo; // return early if no audio data has been observed
if (adtsFrames.length === 0) {
return;
}
frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);
if (frames.length === 0) {
// return early if the frames are all after the earliest allowed DTS
// TODO should we clear the adtsFrames?
return;
}
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to
// samples (that is, adts frames) in the audio data
track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat
mdat = mp4.mdat(audioFrameUtils.concatenateFrameData(frames));
adtsFrames = [];
moof = mp4.moof(sequenceNumber, [track]); // bump the sequence number for next time
sequenceNumber++;
track.initSegment = mp4.initSegment([track]); // it would be great to allocate this array up front instead of
// throwing away hundreds of media segment fragments
boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
trackInfo.clearDtsInfo(track);
if (segmentStartPts === null) {
segmentEndPts = segmentStartPts = frames[0].pts;
}
segmentEndPts += frames.length * (ONE_SECOND_IN_TS * 1024 / track.samplerate);
timingInfo = {
start: segmentStartPts
};
this.trigger('timingInfo', timingInfo);
this.trigger('data', {
track: track,
boxes: boxes
});
};
this.flush = function () {
this.processFrames_(); // trigger final timing info
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
this.resetTiming_();
this.trigger('done', 'AudioSegmentStream');
};
this.partialFlush = function () {
this.processFrames_();
this.trigger('partialdone', 'AudioSegmentStream');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline', 'AudioSegmentStream');
};
this.resetTiming_ = function () {
trackInfo.clearDtsInfo(track);
segmentStartPts = null;
segmentEndPts = null;
};
this.reset = function () {
this.resetTiming_();
adtsFrames = [];
this.trigger('reset');
};
};
AudioSegmentStream.prototype = new Stream();
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var audioFrameUtils = require('../mp4/audio-frame-utils');
var trackInfo = require('../mp4/track-decode-info.js');
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
var AUDIO_PROPERTIES = require('../constants/audio-properties.js');
/**
* Constructs a single-track, ISO BMFF media segment from AAC data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
*/
var AudioSegmentStream = function AudioSegmentStream(track, options) {
var adtsFrames = [],
sequenceNumber = 0,
earliestAllowedDts = 0,
audioAppendStartTs = 0,
videoBaseMediaDecodeTime = Infinity,
segmentStartPts = null,
segmentEndPts = null;
options = options || {};
AudioSegmentStream.prototype.init.call(this);
this.push = function (data) {
trackInfo.collectDtsInfo(track, data);
if (track) {
AUDIO_PROPERTIES.forEach(function (prop) {
track[prop] = data[prop];
});
} // buffer audio data until end() is called
adtsFrames.push(data);
};
this.setEarliestDts = function (earliestDts) {
earliestAllowedDts = earliestDts;
};
this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {
videoBaseMediaDecodeTime = baseMediaDecodeTime;
};
this.setAudioAppendStart = function (timestamp) {
audioAppendStartTs = timestamp;
};
this.processFrames_ = function () {
var frames, moof, mdat, boxes, timingInfo; // return early if no audio data has been observed
if (adtsFrames.length === 0) {
return;
}
frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);
if (frames.length === 0) {
// return early if the frames are all after the earliest allowed DTS
// TODO should we clear the adtsFrames?
return;
}
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to
// samples (that is, adts frames) in the audio data
track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat
mdat = mp4.mdat(audioFrameUtils.concatenateFrameData(frames));
adtsFrames = [];
moof = mp4.moof(sequenceNumber, [track]); // bump the sequence number for next time
sequenceNumber++;
track.initSegment = mp4.initSegment([track]); // it would be great to allocate this array up front instead of
// throwing away hundreds of media segment fragments
boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
trackInfo.clearDtsInfo(track);
if (segmentStartPts === null) {
segmentEndPts = segmentStartPts = frames[0].pts;
}
segmentEndPts += frames.length * (ONE_SECOND_IN_TS * 1024 / track.samplerate);
timingInfo = {
start: segmentStartPts
};
this.trigger('timingInfo', timingInfo);
this.trigger('data', {
track: track,
boxes: boxes
});
};
this.flush = function () {
this.processFrames_(); // trigger final timing info
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
this.resetTiming_();
this.trigger('done', 'AudioSegmentStream');
};
this.partialFlush = function () {
this.processFrames_();
this.trigger('partialdone', 'AudioSegmentStream');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline', 'AudioSegmentStream');
};
this.resetTiming_ = function () {
trackInfo.clearDtsInfo(track);
segmentStartPts = null;
segmentEndPts = null;
};
this.reset = function () {
this.resetTiming_();
adtsFrames = [];
this.trigger('reset');
};
};
AudioSegmentStream.prototype = new Stream();
module.exports = AudioSegmentStream;

File diff suppressed because it is too large Load diff

View file

@ -1,141 +1,141 @@
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var audioFrameUtils = require('../mp4/audio-frame-utils');
var trackInfo = require('../mp4/track-decode-info.js');
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
var AUDIO_PROPERTIES = require('../constants/audio-properties.js');
/**
* Constructs a single-track, ISO BMFF media segment from AAC data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
*/
var AudioSegmentStream = function AudioSegmentStream(track, options) {
var adtsFrames = [],
sequenceNumber = 0,
earliestAllowedDts = 0,
audioAppendStartTs = 0,
videoBaseMediaDecodeTime = Infinity,
segmentStartPts = null,
segmentEndPts = null;
options = options || {};
AudioSegmentStream.prototype.init.call(this);
this.push = function (data) {
trackInfo.collectDtsInfo(track, data);
if (track) {
AUDIO_PROPERTIES.forEach(function (prop) {
track[prop] = data[prop];
});
} // buffer audio data until end() is called
adtsFrames.push(data);
};
this.setEarliestDts = function (earliestDts) {
earliestAllowedDts = earliestDts;
};
this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {
videoBaseMediaDecodeTime = baseMediaDecodeTime;
};
this.setAudioAppendStart = function (timestamp) {
audioAppendStartTs = timestamp;
};
this.processFrames_ = function () {
var frames, moof, mdat, boxes, timingInfo; // return early if no audio data has been observed
if (adtsFrames.length === 0) {
return;
}
frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);
if (frames.length === 0) {
// return early if the frames are all after the earliest allowed DTS
// TODO should we clear the adtsFrames?
return;
}
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to
// samples (that is, adts frames) in the audio data
track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat
mdat = mp4.mdat(audioFrameUtils.concatenateFrameData(frames));
adtsFrames = [];
moof = mp4.moof(sequenceNumber, [track]); // bump the sequence number for next time
sequenceNumber++;
track.initSegment = mp4.initSegment([track]); // it would be great to allocate this array up front instead of
// throwing away hundreds of media segment fragments
boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
trackInfo.clearDtsInfo(track);
if (segmentStartPts === null) {
segmentEndPts = segmentStartPts = frames[0].pts;
}
segmentEndPts += frames.length * (ONE_SECOND_IN_TS * 1024 / track.samplerate);
timingInfo = {
start: segmentStartPts
};
this.trigger('timingInfo', timingInfo);
this.trigger('data', {
track: track,
boxes: boxes
});
};
this.flush = function () {
this.processFrames_(); // trigger final timing info
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
this.resetTiming_();
this.trigger('done', 'AudioSegmentStream');
};
this.partialFlush = function () {
this.processFrames_();
this.trigger('partialdone', 'AudioSegmentStream');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline', 'AudioSegmentStream');
};
this.resetTiming_ = function () {
trackInfo.clearDtsInfo(track);
segmentStartPts = null;
segmentEndPts = null;
};
this.reset = function () {
this.resetTiming_();
adtsFrames = [];
this.trigger('reset');
};
};
AudioSegmentStream.prototype = new Stream();
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var audioFrameUtils = require('../mp4/audio-frame-utils');
var trackInfo = require('../mp4/track-decode-info.js');
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
var AUDIO_PROPERTIES = require('../constants/audio-properties.js');
/**
* Constructs a single-track, ISO BMFF media segment from AAC data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
*/
var AudioSegmentStream = function AudioSegmentStream(track, options) {
var adtsFrames = [],
sequenceNumber = 0,
earliestAllowedDts = 0,
audioAppendStartTs = 0,
videoBaseMediaDecodeTime = Infinity,
segmentStartPts = null,
segmentEndPts = null;
options = options || {};
AudioSegmentStream.prototype.init.call(this);
this.push = function (data) {
trackInfo.collectDtsInfo(track, data);
if (track) {
AUDIO_PROPERTIES.forEach(function (prop) {
track[prop] = data[prop];
});
} // buffer audio data until end() is called
adtsFrames.push(data);
};
this.setEarliestDts = function (earliestDts) {
earliestAllowedDts = earliestDts;
};
this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {
videoBaseMediaDecodeTime = baseMediaDecodeTime;
};
this.setAudioAppendStart = function (timestamp) {
audioAppendStartTs = timestamp;
};
this.processFrames_ = function () {
var frames, moof, mdat, boxes, timingInfo; // return early if no audio data has been observed
if (adtsFrames.length === 0) {
return;
}
frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);
if (frames.length === 0) {
// return early if the frames are all after the earliest allowed DTS
// TODO should we clear the adtsFrames?
return;
}
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to
// samples (that is, adts frames) in the audio data
track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat
mdat = mp4.mdat(audioFrameUtils.concatenateFrameData(frames));
adtsFrames = [];
moof = mp4.moof(sequenceNumber, [track]); // bump the sequence number for next time
sequenceNumber++;
track.initSegment = mp4.initSegment([track]); // it would be great to allocate this array up front instead of
// throwing away hundreds of media segment fragments
boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
trackInfo.clearDtsInfo(track);
if (segmentStartPts === null) {
segmentEndPts = segmentStartPts = frames[0].pts;
}
segmentEndPts += frames.length * (ONE_SECOND_IN_TS * 1024 / track.samplerate);
timingInfo = {
start: segmentStartPts
};
this.trigger('timingInfo', timingInfo);
this.trigger('data', {
track: track,
boxes: boxes
});
};
this.flush = function () {
this.processFrames_(); // trigger final timing info
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
this.resetTiming_();
this.trigger('done', 'AudioSegmentStream');
};
this.partialFlush = function () {
this.processFrames_();
this.trigger('partialdone', 'AudioSegmentStream');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline', 'AudioSegmentStream');
};
this.resetTiming_ = function () {
trackInfo.clearDtsInfo(track);
segmentStartPts = null;
segmentEndPts = null;
};
this.reset = function () {
this.resetTiming_();
adtsFrames = [];
this.trigger('reset');
};
};
AudioSegmentStream.prototype = new Stream();
module.exports = AudioSegmentStream;

View file

@ -1,3 +1,3 @@
module.exports = {
Transmuxer: require('./transmuxer')
module.exports = {
Transmuxer: require('./transmuxer')
};

View file

@ -1,322 +1,322 @@
var Stream = require('../utils/stream.js');
var m2ts = require('../m2ts/m2ts.js');
var codecs = require('../codecs/index.js');
var AudioSegmentStream = require('./audio-segment-stream.js');
var VideoSegmentStream = require('./video-segment-stream.js');
var trackInfo = require('../mp4/track-decode-info.js');
var isLikelyAacData = require('../aac/utils').isLikelyAacData;
var AdtsStream = require('../codecs/adts');
var AacStream = require('../aac/index');
var clock = require('../utils/clock');
var createPipeline = function createPipeline(object) {
object.prototype = new Stream();
object.prototype.init.call(object);
return object;
};
var tsPipeline = function tsPipeline(options) {
var pipeline = {
type: 'ts',
tracks: {
audio: null,
video: null
},
packet: new m2ts.TransportPacketStream(),
parse: new m2ts.TransportParseStream(),
elementary: new m2ts.ElementaryStream(),
timestampRollover: new m2ts.TimestampRolloverStream(),
adts: new codecs.Adts(),
h264: new codecs.h264.H264Stream(),
captionStream: new m2ts.CaptionStream(options),
metadataStream: new m2ts.MetadataStream()
};
pipeline.headOfPipeline = pipeline.packet; // Transport Stream
pipeline.packet.pipe(pipeline.parse).pipe(pipeline.elementary).pipe(pipeline.timestampRollover); // H264
pipeline.timestampRollover.pipe(pipeline.h264); // Hook up CEA-608/708 caption stream
pipeline.h264.pipe(pipeline.captionStream);
pipeline.timestampRollover.pipe(pipeline.metadataStream); // ADTS
pipeline.timestampRollover.pipe(pipeline.adts);
pipeline.elementary.on('data', function (data) {
if (data.type !== 'metadata') {
return;
}
for (var i = 0; i < data.tracks.length; i++) {
if (!pipeline.tracks[data.tracks[i].type]) {
pipeline.tracks[data.tracks[i].type] = data.tracks[i];
pipeline.tracks[data.tracks[i].type].timelineStartInfo.baseMediaDecodeTime = options.baseMediaDecodeTime;
}
}
if (pipeline.tracks.video && !pipeline.videoSegmentStream) {
pipeline.videoSegmentStream = new VideoSegmentStream(pipeline.tracks.video, options);
pipeline.videoSegmentStream.on('timelineStartInfo', function (timelineStartInfo) {
if (pipeline.tracks.audio && !options.keepOriginalTimestamps) {
pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts - options.baseMediaDecodeTime);
}
});
pipeline.videoSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'videoTimingInfo'));
pipeline.videoSegmentStream.on('data', function (data) {
pipeline.trigger('data', {
type: 'video',
data: data
});
});
pipeline.videoSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
pipeline.videoSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.videoSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.h264.pipe(pipeline.videoSegmentStream);
}
if (pipeline.tracks.audio && !pipeline.audioSegmentStream) {
pipeline.audioSegmentStream = new AudioSegmentStream(pipeline.tracks.audio, options);
pipeline.audioSegmentStream.on('data', function (data) {
pipeline.trigger('data', {
type: 'audio',
data: data
});
});
pipeline.audioSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
pipeline.audioSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.audioSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.audioSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'audioTimingInfo'));
pipeline.adts.pipe(pipeline.audioSegmentStream);
} // emit pmt info
pipeline.trigger('trackinfo', {
hasAudio: !!pipeline.tracks.audio,
hasVideo: !!pipeline.tracks.video
});
});
pipeline.captionStream.on('data', function (caption) {
var timelineStartPts;
if (pipeline.tracks.video) {
timelineStartPts = pipeline.tracks.video.timelineStartInfo.pts || 0;
} else {
// This will only happen if we encounter caption packets before
// video data in a segment. This is an unusual/unlikely scenario,
// so we assume the timeline starts at zero for now.
timelineStartPts = 0;
} // Translate caption PTS times into second offsets into the
// video timeline for the segment
caption.startTime = clock.metadataTsToSeconds(caption.startPts, timelineStartPts, options.keepOriginalTimestamps);
caption.endTime = clock.metadataTsToSeconds(caption.endPts, timelineStartPts, options.keepOriginalTimestamps);
pipeline.trigger('caption', caption);
});
pipeline = createPipeline(pipeline);
pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));
return pipeline;
};
var aacPipeline = function aacPipeline(options) {
var pipeline = {
type: 'aac',
tracks: {
audio: null
},
metadataStream: new m2ts.MetadataStream(),
aacStream: new AacStream(),
audioRollover: new m2ts.TimestampRolloverStream('audio'),
timedMetadataRollover: new m2ts.TimestampRolloverStream('timed-metadata'),
adtsStream: new AdtsStream(true)
}; // set up the parsing pipeline
pipeline.headOfPipeline = pipeline.aacStream;
pipeline.aacStream.pipe(pipeline.audioRollover).pipe(pipeline.adtsStream);
pipeline.aacStream.pipe(pipeline.timedMetadataRollover).pipe(pipeline.metadataStream);
pipeline.metadataStream.on('timestamp', function (frame) {
pipeline.aacStream.setTimestamp(frame.timeStamp);
});
pipeline.aacStream.on('data', function (data) {
if (data.type !== 'timed-metadata' && data.type !== 'audio' || pipeline.audioSegmentStream) {
return;
}
pipeline.tracks.audio = pipeline.tracks.audio || {
timelineStartInfo: {
baseMediaDecodeTime: options.baseMediaDecodeTime
},
codec: 'adts',
type: 'audio'
}; // hook up the audio segment stream to the first track with aac data
pipeline.audioSegmentStream = new AudioSegmentStream(pipeline.tracks.audio, options);
pipeline.audioSegmentStream.on('data', function (data) {
pipeline.trigger('data', {
type: 'audio',
data: data
});
});
pipeline.audioSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.audioSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
pipeline.audioSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.audioSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'audioTimingInfo')); // Set up the final part of the audio pipeline
pipeline.adtsStream.pipe(pipeline.audioSegmentStream);
pipeline.trigger('trackinfo', {
hasAudio: !!pipeline.tracks.audio,
hasVideo: !!pipeline.tracks.video
});
}); // set the pipeline up as a stream before binding to get access to the trigger function
pipeline = createPipeline(pipeline);
pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));
return pipeline;
};
var setupPipelineListeners = function setupPipelineListeners(pipeline, transmuxer) {
pipeline.on('data', transmuxer.trigger.bind(transmuxer, 'data'));
pipeline.on('done', transmuxer.trigger.bind(transmuxer, 'done'));
pipeline.on('partialdone', transmuxer.trigger.bind(transmuxer, 'partialdone'));
pipeline.on('endedtimeline', transmuxer.trigger.bind(transmuxer, 'endedtimeline'));
pipeline.on('audioTimingInfo', transmuxer.trigger.bind(transmuxer, 'audioTimingInfo'));
pipeline.on('videoTimingInfo', transmuxer.trigger.bind(transmuxer, 'videoTimingInfo'));
pipeline.on('trackinfo', transmuxer.trigger.bind(transmuxer, 'trackinfo'));
pipeline.on('id3Frame', function (event) {
// add this to every single emitted segment even though it's only needed for the first
event.dispatchType = pipeline.metadataStream.dispatchType; // keep original time, can be adjusted if needed at a higher level
event.cueTime = clock.videoTsToSeconds(event.pts);
transmuxer.trigger('id3Frame', event);
});
pipeline.on('caption', function (event) {
transmuxer.trigger('caption', event);
});
};
var Transmuxer = function Transmuxer(options) {
var pipeline = null,
hasFlushed = true;
options = options || {};
Transmuxer.prototype.init.call(this);
options.baseMediaDecodeTime = options.baseMediaDecodeTime || 0;
this.push = function (bytes) {
if (hasFlushed) {
var isAac = isLikelyAacData(bytes);
if (isAac && (!pipeline || pipeline.type !== 'aac')) {
pipeline = aacPipeline(options);
setupPipelineListeners(pipeline, this);
} else if (!isAac && (!pipeline || pipeline.type !== 'ts')) {
pipeline = tsPipeline(options);
setupPipelineListeners(pipeline, this);
}
hasFlushed = false;
}
pipeline.headOfPipeline.push(bytes);
};
this.flush = function () {
if (!pipeline) {
return;
}
hasFlushed = true;
pipeline.headOfPipeline.flush();
};
this.partialFlush = function () {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.partialFlush();
};
this.endTimeline = function () {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.endTimeline();
};
this.reset = function () {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.reset();
};
this.setBaseMediaDecodeTime = function (baseMediaDecodeTime) {
if (!options.keepOriginalTimestamps) {
options.baseMediaDecodeTime = baseMediaDecodeTime;
}
if (!pipeline) {
return;
}
if (pipeline.tracks.audio) {
pipeline.tracks.audio.timelineStartInfo.dts = undefined;
pipeline.tracks.audio.timelineStartInfo.pts = undefined;
trackInfo.clearDtsInfo(pipeline.tracks.audio);
if (pipeline.audioRollover) {
pipeline.audioRollover.discontinuity();
}
}
if (pipeline.tracks.video) {
if (pipeline.videoSegmentStream) {
pipeline.videoSegmentStream.gopCache_ = [];
}
pipeline.tracks.video.timelineStartInfo.dts = undefined;
pipeline.tracks.video.timelineStartInfo.pts = undefined;
trackInfo.clearDtsInfo(pipeline.tracks.video); // pipeline.captionStream.reset();
}
if (pipeline.timestampRollover) {
pipeline.timestampRollover.discontinuity();
}
};
this.setRemux = function (val) {
options.remux = val;
if (pipeline && pipeline.coalesceStream) {
pipeline.coalesceStream.setRemux(val);
}
};
this.setAudioAppendStart = function (audioAppendStart) {
if (!pipeline || !pipeline.tracks.audio || !pipeline.audioSegmentStream) {
return;
}
pipeline.audioSegmentStream.setAudioAppendStart(audioAppendStart);
}; // TODO GOP alignment support
// Support may be a bit trickier than with full segment appends, as GOPs may be split
// and processed in a more granular fashion
this.alignGopsWith = function (gopsToAlignWith) {
return;
};
};
Transmuxer.prototype = new Stream();
var Stream = require('../utils/stream.js');
var m2ts = require('../m2ts/m2ts.js');
var codecs = require('../codecs/index.js');
var AudioSegmentStream = require('./audio-segment-stream.js');
var VideoSegmentStream = require('./video-segment-stream.js');
var trackInfo = require('../mp4/track-decode-info.js');
var isLikelyAacData = require('../aac/utils').isLikelyAacData;
var AdtsStream = require('../codecs/adts');
var AacStream = require('../aac/index');
var clock = require('../utils/clock');
var createPipeline = function createPipeline(object) {
object.prototype = new Stream();
object.prototype.init.call(object);
return object;
};
var tsPipeline = function tsPipeline(options) {
var pipeline = {
type: 'ts',
tracks: {
audio: null,
video: null
},
packet: new m2ts.TransportPacketStream(),
parse: new m2ts.TransportParseStream(),
elementary: new m2ts.ElementaryStream(),
timestampRollover: new m2ts.TimestampRolloverStream(),
adts: new codecs.Adts(),
h264: new codecs.h264.H264Stream(),
captionStream: new m2ts.CaptionStream(options),
metadataStream: new m2ts.MetadataStream()
};
pipeline.headOfPipeline = pipeline.packet; // Transport Stream
pipeline.packet.pipe(pipeline.parse).pipe(pipeline.elementary).pipe(pipeline.timestampRollover); // H264
pipeline.timestampRollover.pipe(pipeline.h264); // Hook up CEA-608/708 caption stream
pipeline.h264.pipe(pipeline.captionStream);
pipeline.timestampRollover.pipe(pipeline.metadataStream); // ADTS
pipeline.timestampRollover.pipe(pipeline.adts);
pipeline.elementary.on('data', function (data) {
if (data.type !== 'metadata') {
return;
}
for (var i = 0; i < data.tracks.length; i++) {
if (!pipeline.tracks[data.tracks[i].type]) {
pipeline.tracks[data.tracks[i].type] = data.tracks[i];
pipeline.tracks[data.tracks[i].type].timelineStartInfo.baseMediaDecodeTime = options.baseMediaDecodeTime;
}
}
if (pipeline.tracks.video && !pipeline.videoSegmentStream) {
pipeline.videoSegmentStream = new VideoSegmentStream(pipeline.tracks.video, options);
pipeline.videoSegmentStream.on('timelineStartInfo', function (timelineStartInfo) {
if (pipeline.tracks.audio && !options.keepOriginalTimestamps) {
pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts - options.baseMediaDecodeTime);
}
});
pipeline.videoSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'videoTimingInfo'));
pipeline.videoSegmentStream.on('data', function (data) {
pipeline.trigger('data', {
type: 'video',
data: data
});
});
pipeline.videoSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
pipeline.videoSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.videoSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.h264.pipe(pipeline.videoSegmentStream);
}
if (pipeline.tracks.audio && !pipeline.audioSegmentStream) {
pipeline.audioSegmentStream = new AudioSegmentStream(pipeline.tracks.audio, options);
pipeline.audioSegmentStream.on('data', function (data) {
pipeline.trigger('data', {
type: 'audio',
data: data
});
});
pipeline.audioSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
pipeline.audioSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.audioSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.audioSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'audioTimingInfo'));
pipeline.adts.pipe(pipeline.audioSegmentStream);
} // emit pmt info
pipeline.trigger('trackinfo', {
hasAudio: !!pipeline.tracks.audio,
hasVideo: !!pipeline.tracks.video
});
});
pipeline.captionStream.on('data', function (caption) {
var timelineStartPts;
if (pipeline.tracks.video) {
timelineStartPts = pipeline.tracks.video.timelineStartInfo.pts || 0;
} else {
// This will only happen if we encounter caption packets before
// video data in a segment. This is an unusual/unlikely scenario,
// so we assume the timeline starts at zero for now.
timelineStartPts = 0;
} // Translate caption PTS times into second offsets into the
// video timeline for the segment
caption.startTime = clock.metadataTsToSeconds(caption.startPts, timelineStartPts, options.keepOriginalTimestamps);
caption.endTime = clock.metadataTsToSeconds(caption.endPts, timelineStartPts, options.keepOriginalTimestamps);
pipeline.trigger('caption', caption);
});
pipeline = createPipeline(pipeline);
pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));
return pipeline;
};
var aacPipeline = function aacPipeline(options) {
var pipeline = {
type: 'aac',
tracks: {
audio: null
},
metadataStream: new m2ts.MetadataStream(),
aacStream: new AacStream(),
audioRollover: new m2ts.TimestampRolloverStream('audio'),
timedMetadataRollover: new m2ts.TimestampRolloverStream('timed-metadata'),
adtsStream: new AdtsStream(true)
}; // set up the parsing pipeline
pipeline.headOfPipeline = pipeline.aacStream;
pipeline.aacStream.pipe(pipeline.audioRollover).pipe(pipeline.adtsStream);
pipeline.aacStream.pipe(pipeline.timedMetadataRollover).pipe(pipeline.metadataStream);
pipeline.metadataStream.on('timestamp', function (frame) {
pipeline.aacStream.setTimestamp(frame.timeStamp);
});
pipeline.aacStream.on('data', function (data) {
if (data.type !== 'timed-metadata' && data.type !== 'audio' || pipeline.audioSegmentStream) {
return;
}
pipeline.tracks.audio = pipeline.tracks.audio || {
timelineStartInfo: {
baseMediaDecodeTime: options.baseMediaDecodeTime
},
codec: 'adts',
type: 'audio'
}; // hook up the audio segment stream to the first track with aac data
pipeline.audioSegmentStream = new AudioSegmentStream(pipeline.tracks.audio, options);
pipeline.audioSegmentStream.on('data', function (data) {
pipeline.trigger('data', {
type: 'audio',
data: data
});
});
pipeline.audioSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.audioSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
pipeline.audioSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.audioSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'audioTimingInfo')); // Set up the final part of the audio pipeline
pipeline.adtsStream.pipe(pipeline.audioSegmentStream);
pipeline.trigger('trackinfo', {
hasAudio: !!pipeline.tracks.audio,
hasVideo: !!pipeline.tracks.video
});
}); // set the pipeline up as a stream before binding to get access to the trigger function
pipeline = createPipeline(pipeline);
pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));
return pipeline;
};
var setupPipelineListeners = function setupPipelineListeners(pipeline, transmuxer) {
pipeline.on('data', transmuxer.trigger.bind(transmuxer, 'data'));
pipeline.on('done', transmuxer.trigger.bind(transmuxer, 'done'));
pipeline.on('partialdone', transmuxer.trigger.bind(transmuxer, 'partialdone'));
pipeline.on('endedtimeline', transmuxer.trigger.bind(transmuxer, 'endedtimeline'));
pipeline.on('audioTimingInfo', transmuxer.trigger.bind(transmuxer, 'audioTimingInfo'));
pipeline.on('videoTimingInfo', transmuxer.trigger.bind(transmuxer, 'videoTimingInfo'));
pipeline.on('trackinfo', transmuxer.trigger.bind(transmuxer, 'trackinfo'));
pipeline.on('id3Frame', function (event) {
// add this to every single emitted segment even though it's only needed for the first
event.dispatchType = pipeline.metadataStream.dispatchType; // keep original time, can be adjusted if needed at a higher level
event.cueTime = clock.videoTsToSeconds(event.pts);
transmuxer.trigger('id3Frame', event);
});
pipeline.on('caption', function (event) {
transmuxer.trigger('caption', event);
});
};
var Transmuxer = function Transmuxer(options) {
var pipeline = null,
hasFlushed = true;
options = options || {};
Transmuxer.prototype.init.call(this);
options.baseMediaDecodeTime = options.baseMediaDecodeTime || 0;
this.push = function (bytes) {
if (hasFlushed) {
var isAac = isLikelyAacData(bytes);
if (isAac && (!pipeline || pipeline.type !== 'aac')) {
pipeline = aacPipeline(options);
setupPipelineListeners(pipeline, this);
} else if (!isAac && (!pipeline || pipeline.type !== 'ts')) {
pipeline = tsPipeline(options);
setupPipelineListeners(pipeline, this);
}
hasFlushed = false;
}
pipeline.headOfPipeline.push(bytes);
};
this.flush = function () {
if (!pipeline) {
return;
}
hasFlushed = true;
pipeline.headOfPipeline.flush();
};
this.partialFlush = function () {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.partialFlush();
};
this.endTimeline = function () {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.endTimeline();
};
this.reset = function () {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.reset();
};
this.setBaseMediaDecodeTime = function (baseMediaDecodeTime) {
if (!options.keepOriginalTimestamps) {
options.baseMediaDecodeTime = baseMediaDecodeTime;
}
if (!pipeline) {
return;
}
if (pipeline.tracks.audio) {
pipeline.tracks.audio.timelineStartInfo.dts = undefined;
pipeline.tracks.audio.timelineStartInfo.pts = undefined;
trackInfo.clearDtsInfo(pipeline.tracks.audio);
if (pipeline.audioRollover) {
pipeline.audioRollover.discontinuity();
}
}
if (pipeline.tracks.video) {
if (pipeline.videoSegmentStream) {
pipeline.videoSegmentStream.gopCache_ = [];
}
pipeline.tracks.video.timelineStartInfo.dts = undefined;
pipeline.tracks.video.timelineStartInfo.pts = undefined;
trackInfo.clearDtsInfo(pipeline.tracks.video); // pipeline.captionStream.reset();
}
if (pipeline.timestampRollover) {
pipeline.timestampRollover.discontinuity();
}
};
this.setRemux = function (val) {
options.remux = val;
if (pipeline && pipeline.coalesceStream) {
pipeline.coalesceStream.setRemux(val);
}
};
this.setAudioAppendStart = function (audioAppendStart) {
if (!pipeline || !pipeline.tracks.audio || !pipeline.audioSegmentStream) {
return;
}
pipeline.audioSegmentStream.setAudioAppendStart(audioAppendStart);
}; // TODO GOP alignment support
// Support may be a bit trickier than with full segment appends, as GOPs may be split
// and processed in a more granular fashion
this.alignGopsWith = function (gopsToAlignWith) {
return;
};
};
Transmuxer.prototype = new Stream();
module.exports = Transmuxer;

View file

@ -1,195 +1,195 @@
/**
* Constructs a single-track, ISO BMFF media segment from H264 data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
* @param track {object} track metadata configuration
* @param options {object} transmuxer options object
* @param options.alignGopsAtEnd {boolean} If true, start from the end of the
* gopsToAlignWith list when attempting to align gop pts
*/
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var trackInfo = require('../mp4/track-decode-info.js');
var frameUtils = require('../mp4/frame-utils');
var VIDEO_PROPERTIES = require('../constants/video-properties.js');
var VideoSegmentStream = function VideoSegmentStream(track, options) {
var sequenceNumber = 0,
nalUnits = [],
frameCache = [],
// gopsToAlignWith = [],
config,
pps,
segmentStartPts = null,
segmentEndPts = null,
gops,
ensureNextFrameIsKeyFrame = true;
options = options || {};
VideoSegmentStream.prototype.init.call(this);
this.push = function (nalUnit) {
trackInfo.collectDtsInfo(track, nalUnit);
if (typeof track.timelineStartInfo.dts === 'undefined') {
track.timelineStartInfo.dts = nalUnit.dts;
} // record the track config
if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {
config = nalUnit.config;
track.sps = [nalUnit.data];
VIDEO_PROPERTIES.forEach(function (prop) {
track[prop] = config[prop];
}, this);
}
if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' && !pps) {
pps = nalUnit.data;
track.pps = [nalUnit.data];
} // buffer video until flush() is called
nalUnits.push(nalUnit);
};
this.processNals_ = function (cacheLastFrame) {
var i;
nalUnits = frameCache.concat(nalUnits); // Throw away nalUnits at the start of the byte stream until
// we find the first AUD
while (nalUnits.length) {
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
break;
}
nalUnits.shift();
} // Return early if no video data has been observed
if (nalUnits.length === 0) {
return;
}
var frames = frameUtils.groupNalsIntoFrames(nalUnits);
if (!frames.length) {
return;
} // note that the frame cache may also protect us from cases where we haven't
// pushed data for the entire first or last frame yet
frameCache = frames[frames.length - 1];
if (cacheLastFrame) {
frames.pop();
frames.duration -= frameCache.duration;
frames.nalCount -= frameCache.length;
frames.byteLength -= frameCache.byteLength;
}
if (!frames.length) {
nalUnits = [];
return;
}
this.trigger('timelineStartInfo', track.timelineStartInfo);
if (ensureNextFrameIsKeyFrame) {
gops = frameUtils.groupFramesIntoGops(frames);
if (!gops[0][0].keyFrame) {
gops = frameUtils.extendFirstKeyFrame(gops);
if (!gops[0][0].keyFrame) {
// we haven't yet gotten a key frame, so reset nal units to wait for more nal
// units
nalUnits = [].concat.apply([], frames).concat(frameCache);
frameCache = [];
return;
}
frames = [].concat.apply([], gops);
frames.duration = gops.duration;
}
ensureNextFrameIsKeyFrame = false;
}
if (segmentStartPts === null) {
segmentStartPts = frames[0].pts;
segmentEndPts = segmentStartPts;
}
segmentEndPts += frames.duration;
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
for (i = 0; i < frames.length; i++) {
var frame = frames[i];
track.samples = frameUtils.generateSampleTableForFrame(frame);
var mdat = mp4.mdat(frameUtils.concatenateNalDataForFrame(frame));
trackInfo.clearDtsInfo(track);
trackInfo.collectDtsInfo(track, frame);
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
var moof = mp4.moof(sequenceNumber, [track]);
sequenceNumber++;
track.initSegment = mp4.initSegment([track]);
var boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
this.trigger('data', {
track: track,
boxes: boxes,
sequence: sequenceNumber,
videoFrameDts: frame.dts,
videoFramePts: frame.pts
});
}
nalUnits = [];
};
this.resetTimingAndConfig_ = function () {
config = undefined;
pps = undefined;
segmentStartPts = null;
segmentEndPts = null;
};
this.partialFlush = function () {
this.processNals_(true);
this.trigger('partialdone', 'VideoSegmentStream');
};
this.flush = function () {
this.processNals_(false); // reset config and pps because they may differ across segments
// for instance, when we are rendition switching
this.resetTimingAndConfig_();
this.trigger('done', 'VideoSegmentStream');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline', 'VideoSegmentStream');
};
this.reset = function () {
this.resetTimingAndConfig_();
frameCache = [];
nalUnits = [];
ensureNextFrameIsKeyFrame = true;
this.trigger('reset');
};
};
VideoSegmentStream.prototype = new Stream();
/**
* Constructs a single-track, ISO BMFF media segment from H264 data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
* @param track {object} track metadata configuration
* @param options {object} transmuxer options object
* @param options.alignGopsAtEnd {boolean} If true, start from the end of the
* gopsToAlignWith list when attempting to align gop pts
*/
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var trackInfo = require('../mp4/track-decode-info.js');
var frameUtils = require('../mp4/frame-utils');
var VIDEO_PROPERTIES = require('../constants/video-properties.js');
var VideoSegmentStream = function VideoSegmentStream(track, options) {
var sequenceNumber = 0,
nalUnits = [],
frameCache = [],
// gopsToAlignWith = [],
config,
pps,
segmentStartPts = null,
segmentEndPts = null,
gops,
ensureNextFrameIsKeyFrame = true;
options = options || {};
VideoSegmentStream.prototype.init.call(this);
this.push = function (nalUnit) {
trackInfo.collectDtsInfo(track, nalUnit);
if (typeof track.timelineStartInfo.dts === 'undefined') {
track.timelineStartInfo.dts = nalUnit.dts;
} // record the track config
if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {
config = nalUnit.config;
track.sps = [nalUnit.data];
VIDEO_PROPERTIES.forEach(function (prop) {
track[prop] = config[prop];
}, this);
}
if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' && !pps) {
pps = nalUnit.data;
track.pps = [nalUnit.data];
} // buffer video until flush() is called
nalUnits.push(nalUnit);
};
this.processNals_ = function (cacheLastFrame) {
var i;
nalUnits = frameCache.concat(nalUnits); // Throw away nalUnits at the start of the byte stream until
// we find the first AUD
while (nalUnits.length) {
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
break;
}
nalUnits.shift();
} // Return early if no video data has been observed
if (nalUnits.length === 0) {
return;
}
var frames = frameUtils.groupNalsIntoFrames(nalUnits);
if (!frames.length) {
return;
} // note that the frame cache may also protect us from cases where we haven't
// pushed data for the entire first or last frame yet
frameCache = frames[frames.length - 1];
if (cacheLastFrame) {
frames.pop();
frames.duration -= frameCache.duration;
frames.nalCount -= frameCache.length;
frames.byteLength -= frameCache.byteLength;
}
if (!frames.length) {
nalUnits = [];
return;
}
this.trigger('timelineStartInfo', track.timelineStartInfo);
if (ensureNextFrameIsKeyFrame) {
gops = frameUtils.groupFramesIntoGops(frames);
if (!gops[0][0].keyFrame) {
gops = frameUtils.extendFirstKeyFrame(gops);
if (!gops[0][0].keyFrame) {
// we haven't yet gotten a key frame, so reset nal units to wait for more nal
// units
nalUnits = [].concat.apply([], frames).concat(frameCache);
frameCache = [];
return;
}
frames = [].concat.apply([], gops);
frames.duration = gops.duration;
}
ensureNextFrameIsKeyFrame = false;
}
if (segmentStartPts === null) {
segmentStartPts = frames[0].pts;
segmentEndPts = segmentStartPts;
}
segmentEndPts += frames.duration;
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
for (i = 0; i < frames.length; i++) {
var frame = frames[i];
track.samples = frameUtils.generateSampleTableForFrame(frame);
var mdat = mp4.mdat(frameUtils.concatenateNalDataForFrame(frame));
trackInfo.clearDtsInfo(track);
trackInfo.collectDtsInfo(track, frame);
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
var moof = mp4.moof(sequenceNumber, [track]);
sequenceNumber++;
track.initSegment = mp4.initSegment([track]);
var boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
this.trigger('data', {
track: track,
boxes: boxes,
sequence: sequenceNumber,
videoFrameDts: frame.dts,
videoFramePts: frame.pts
});
}
nalUnits = [];
};
this.resetTimingAndConfig_ = function () {
config = undefined;
pps = undefined;
segmentStartPts = null;
segmentEndPts = null;
};
this.partialFlush = function () {
this.processNals_(true);
this.trigger('partialdone', 'VideoSegmentStream');
};
this.flush = function () {
this.processNals_(false); // reset config and pps because they may differ across segments
// for instance, when we are rendition switching
this.resetTimingAndConfig_();
this.trigger('done', 'VideoSegmentStream');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline', 'VideoSegmentStream');
};
this.reset = function () {
this.resetTimingAndConfig_();
frameCache = [];
nalUnits = [];
ensureNextFrameIsKeyFrame = true;
this.trigger('reset');
};
};
VideoSegmentStream.prototype = new Stream();
module.exports = VideoSegmentStream;

View file

@ -1,151 +1,151 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var Stream = require('../utils/stream.js');
/**
* The final stage of the transmuxer that emits the flv tags
* for audio, video, and metadata. Also tranlates in time and
* outputs caption data and id3 cues.
*/
var CoalesceStream = function(options) {
// Number of Tracks per output segment
// If greater than 1, we combine multiple
// tracks into a single segment
this.numberOfTracks = 0;
this.metadataStream = options.metadataStream;
this.videoTags = [];
this.audioTags = [];
this.videoTrack = null;
this.audioTrack = null;
this.pendingCaptions = [];
this.pendingMetadata = [];
this.pendingTracks = 0;
this.processedTracks = 0;
CoalesceStream.prototype.init.call(this);
// Take output from multiple
this.push = function(output) {
// buffer incoming captions until the associated video segment
// finishes
if (output.text) {
return this.pendingCaptions.push(output);
}
// buffer incoming id3 tags until the final flush
if (output.frames) {
return this.pendingMetadata.push(output);
}
if (output.track.type === 'video') {
this.videoTrack = output.track;
this.videoTags = output.tags;
this.pendingTracks++;
}
if (output.track.type === 'audio') {
this.audioTrack = output.track;
this.audioTags = output.tags;
this.pendingTracks++;
}
};
};
CoalesceStream.prototype = new Stream();
CoalesceStream.prototype.flush = function(flushSource) {
var
id3,
caption,
i,
timelineStartPts,
event = {
tags: {},
captions: [],
captionStreams: {},
metadata: []
};
if (this.pendingTracks < this.numberOfTracks) {
if (flushSource !== 'VideoSegmentStream' &&
flushSource !== 'AudioSegmentStream') {
// Return because we haven't received a flush from a data-generating
// portion of the segment (meaning that we have only recieved meta-data
// or captions.)
return;
} else if (this.pendingTracks === 0) {
// In the case where we receive a flush without any data having been
// received we consider it an emitted track for the purposes of coalescing
// `done` events.
// We do this for the case where there is an audio and video track in the
// segment but no audio data. (seen in several playlists with alternate
// audio tracks and no audio present in the main TS segments.)
this.processedTracks++;
if (this.processedTracks < this.numberOfTracks) {
return;
}
}
}
this.processedTracks += this.pendingTracks;
this.pendingTracks = 0;
if (this.processedTracks < this.numberOfTracks) {
return;
}
if (this.videoTrack) {
timelineStartPts = this.videoTrack.timelineStartInfo.pts;
} else if (this.audioTrack) {
timelineStartPts = this.audioTrack.timelineStartInfo.pts;
}
event.tags.videoTags = this.videoTags;
event.tags.audioTags = this.audioTags;
// Translate caption PTS times into second offsets into the
// video timeline for the segment, and add track info
for (i = 0; i < this.pendingCaptions.length; i++) {
caption = this.pendingCaptions[i];
caption.startTime = caption.startPts - timelineStartPts;
caption.startTime /= 90e3;
caption.endTime = caption.endPts - timelineStartPts;
caption.endTime /= 90e3;
event.captionStreams[caption.stream] = true;
event.captions.push(caption);
}
// Translate ID3 frame PTS times into second offsets into the
// video timeline for the segment
for (i = 0; i < this.pendingMetadata.length; i++) {
id3 = this.pendingMetadata[i];
id3.cueTime = id3.pts - timelineStartPts;
id3.cueTime /= 90e3;
event.metadata.push(id3);
}
// We add this to every single emitted segment even though we only need
// it for the first
event.metadata.dispatchType = this.metadataStream.dispatchType;
// Reset stream state
this.videoTrack = null;
this.audioTrack = null;
this.videoTags = [];
this.audioTags = [];
this.pendingCaptions.length = 0;
this.pendingMetadata.length = 0;
this.pendingTracks = 0;
this.processedTracks = 0;
// Emit the final segment
this.trigger('data', event);
this.trigger('done');
};
module.exports = CoalesceStream;
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var Stream = require('../utils/stream.js');
/**
* The final stage of the transmuxer that emits the flv tags
* for audio, video, and metadata. Also tranlates in time and
* outputs caption data and id3 cues.
*/
var CoalesceStream = function(options) {
// Number of Tracks per output segment
// If greater than 1, we combine multiple
// tracks into a single segment
this.numberOfTracks = 0;
this.metadataStream = options.metadataStream;
this.videoTags = [];
this.audioTags = [];
this.videoTrack = null;
this.audioTrack = null;
this.pendingCaptions = [];
this.pendingMetadata = [];
this.pendingTracks = 0;
this.processedTracks = 0;
CoalesceStream.prototype.init.call(this);
// Take output from multiple
this.push = function(output) {
// buffer incoming captions until the associated video segment
// finishes
if (output.text) {
return this.pendingCaptions.push(output);
}
// buffer incoming id3 tags until the final flush
if (output.frames) {
return this.pendingMetadata.push(output);
}
if (output.track.type === 'video') {
this.videoTrack = output.track;
this.videoTags = output.tags;
this.pendingTracks++;
}
if (output.track.type === 'audio') {
this.audioTrack = output.track;
this.audioTags = output.tags;
this.pendingTracks++;
}
};
};
CoalesceStream.prototype = new Stream();
CoalesceStream.prototype.flush = function(flushSource) {
var
id3,
caption,
i,
timelineStartPts,
event = {
tags: {},
captions: [],
captionStreams: {},
metadata: []
};
if (this.pendingTracks < this.numberOfTracks) {
if (flushSource !== 'VideoSegmentStream' &&
flushSource !== 'AudioSegmentStream') {
// Return because we haven't received a flush from a data-generating
// portion of the segment (meaning that we have only recieved meta-data
// or captions.)
return;
} else if (this.pendingTracks === 0) {
// In the case where we receive a flush without any data having been
// received we consider it an emitted track for the purposes of coalescing
// `done` events.
// We do this for the case where there is an audio and video track in the
// segment but no audio data. (seen in several playlists with alternate
// audio tracks and no audio present in the main TS segments.)
this.processedTracks++;
if (this.processedTracks < this.numberOfTracks) {
return;
}
}
}
this.processedTracks += this.pendingTracks;
this.pendingTracks = 0;
if (this.processedTracks < this.numberOfTracks) {
return;
}
if (this.videoTrack) {
timelineStartPts = this.videoTrack.timelineStartInfo.pts;
} else if (this.audioTrack) {
timelineStartPts = this.audioTrack.timelineStartInfo.pts;
}
event.tags.videoTags = this.videoTags;
event.tags.audioTags = this.audioTags;
// Translate caption PTS times into second offsets into the
// video timeline for the segment, and add track info
for (i = 0; i < this.pendingCaptions.length; i++) {
caption = this.pendingCaptions[i];
caption.startTime = caption.startPts - timelineStartPts;
caption.startTime /= 90e3;
caption.endTime = caption.endPts - timelineStartPts;
caption.endTime /= 90e3;
event.captionStreams[caption.stream] = true;
event.captions.push(caption);
}
// Translate ID3 frame PTS times into second offsets into the
// video timeline for the segment
for (i = 0; i < this.pendingMetadata.length; i++) {
id3 = this.pendingMetadata[i];
id3.cueTime = id3.pts - timelineStartPts;
id3.cueTime /= 90e3;
event.metadata.push(id3);
}
// We add this to every single emitted segment even though we only need
// it for the first
event.metadata.dispatchType = this.metadataStream.dispatchType;
// Reset stream state
this.videoTrack = null;
this.audioTrack = null;
this.videoTags = [];
this.audioTags = [];
this.pendingCaptions.length = 0;
this.pendingMetadata.length = 0;
this.pendingTracks = 0;
this.processedTracks = 0;
// Emit the final segment
this.trigger('data', event);
this.trigger('done');
};
module.exports = CoalesceStream;

View file

@ -1,66 +1,66 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var FlvTag = require('./flv-tag.js');
// For information on the FLV format, see
// http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf.
// Technically, this function returns the header and a metadata FLV tag
// if duration is greater than zero
// duration in seconds
// @return {object} the bytes of the FLV header as a Uint8Array
var getFlvHeader = function(duration, audio, video) { // :ByteArray {
var
headBytes = new Uint8Array(3 + 1 + 1 + 4),
head = new DataView(headBytes.buffer),
metadata,
result,
metadataLength;
// default arguments
duration = duration || 0;
audio = audio === undefined ? true : audio;
video = video === undefined ? true : video;
// signature
head.setUint8(0, 0x46); // 'F'
head.setUint8(1, 0x4c); // 'L'
head.setUint8(2, 0x56); // 'V'
// version
head.setUint8(3, 0x01);
// flags
head.setUint8(4, (audio ? 0x04 : 0x00) | (video ? 0x01 : 0x00));
// data offset, should be 9 for FLV v1
head.setUint32(5, headBytes.byteLength);
// init the first FLV tag
if (duration <= 0) {
// no duration available so just write the first field of the first
// FLV tag
result = new Uint8Array(headBytes.byteLength + 4);
result.set(headBytes);
result.set([0, 0, 0, 0], headBytes.byteLength);
return result;
}
// write out the duration metadata tag
metadata = new FlvTag(FlvTag.METADATA_TAG);
metadata.pts = metadata.dts = 0;
metadata.writeMetaDataDouble('duration', duration);
metadataLength = metadata.finalize().length;
result = new Uint8Array(headBytes.byteLength + metadataLength);
result.set(headBytes);
result.set(head.byteLength, metadataLength);
return result;
};
module.exports = getFlvHeader;
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var FlvTag = require('./flv-tag.js');
// For information on the FLV format, see
// http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf.
// Technically, this function returns the header and a metadata FLV tag
// if duration is greater than zero
// duration in seconds
// @return {object} the bytes of the FLV header as a Uint8Array
var getFlvHeader = function(duration, audio, video) { // :ByteArray {
var
headBytes = new Uint8Array(3 + 1 + 1 + 4),
head = new DataView(headBytes.buffer),
metadata,
result,
metadataLength;
// default arguments
duration = duration || 0;
audio = audio === undefined ? true : audio;
video = video === undefined ? true : video;
// signature
head.setUint8(0, 0x46); // 'F'
head.setUint8(1, 0x4c); // 'L'
head.setUint8(2, 0x56); // 'V'
// version
head.setUint8(3, 0x01);
// flags
head.setUint8(4, (audio ? 0x04 : 0x00) | (video ? 0x01 : 0x00));
// data offset, should be 9 for FLV v1
head.setUint32(5, headBytes.byteLength);
// init the first FLV tag
if (duration <= 0) {
// no duration available so just write the first field of the first
// FLV tag
result = new Uint8Array(headBytes.byteLength + 4);
result.set(headBytes);
result.set([0, 0, 0, 0], headBytes.byteLength);
return result;
}
// write out the duration metadata tag
metadata = new FlvTag(FlvTag.METADATA_TAG);
metadata.pts = metadata.dts = 0;
metadata.writeMetaDataDouble('duration', duration);
metadataLength = metadata.finalize().length;
result = new Uint8Array(headBytes.byteLength + metadataLength);
result.set(headBytes);
result.set(head.byteLength, metadataLength);
return result;
};
module.exports = getFlvHeader;

View file

@ -1,377 +1,377 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* An object that stores the bytes of an FLV tag and methods for
* querying and manipulating that data.
* @see http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf
*/
'use strict';
var FlvTag;
// (type:uint, extraData:Boolean = false) extends ByteArray
FlvTag = function(type, extraData) {
var
// Counter if this is a metadata tag, nal start marker if this is a video
// tag. unused if this is an audio tag
adHoc = 0, // :uint
// The default size is 16kb but this is not enough to hold iframe
// data and the resizing algorithm costs a bit so we create a larger
// starting buffer for video tags
bufferStartSize = 16384,
// checks whether the FLV tag has enough capacity to accept the proposed
// write and re-allocates the internal buffers if necessary
prepareWrite = function(flv, count) {
var
bytes,
minLength = flv.position + count;
if (minLength < flv.bytes.byteLength) {
// there's enough capacity so do nothing
return;
}
// allocate a new buffer and copy over the data that will not be modified
bytes = new Uint8Array(minLength * 2);
bytes.set(flv.bytes.subarray(0, flv.position), 0);
flv.bytes = bytes;
flv.view = new DataView(flv.bytes.buffer);
},
// commonly used metadata properties
widthBytes = FlvTag.widthBytes || new Uint8Array('width'.length),
heightBytes = FlvTag.heightBytes || new Uint8Array('height'.length),
videocodecidBytes = FlvTag.videocodecidBytes || new Uint8Array('videocodecid'.length),
i;
if (!FlvTag.widthBytes) {
// calculating the bytes of common metadata names ahead of time makes the
// corresponding writes faster because we don't have to loop over the
// characters
// re-test with test/perf.html if you're planning on changing this
for (i = 0; i < 'width'.length; i++) {
widthBytes[i] = 'width'.charCodeAt(i);
}
for (i = 0; i < 'height'.length; i++) {
heightBytes[i] = 'height'.charCodeAt(i);
}
for (i = 0; i < 'videocodecid'.length; i++) {
videocodecidBytes[i] = 'videocodecid'.charCodeAt(i);
}
FlvTag.widthBytes = widthBytes;
FlvTag.heightBytes = heightBytes;
FlvTag.videocodecidBytes = videocodecidBytes;
}
this.keyFrame = false; // :Boolean
switch (type) {
case FlvTag.VIDEO_TAG:
this.length = 16;
// Start the buffer at 256k
bufferStartSize *= 6;
break;
case FlvTag.AUDIO_TAG:
this.length = 13;
this.keyFrame = true;
break;
case FlvTag.METADATA_TAG:
this.length = 29;
this.keyFrame = true;
break;
default:
throw new Error('Unknown FLV tag type');
}
this.bytes = new Uint8Array(bufferStartSize);
this.view = new DataView(this.bytes.buffer);
this.bytes[0] = type;
this.position = this.length;
this.keyFrame = extraData; // Defaults to false
// presentation timestamp
this.pts = 0;
// decoder timestamp
this.dts = 0;
// ByteArray#writeBytes(bytes:ByteArray, offset:uint = 0, length:uint = 0)
this.writeBytes = function(bytes, offset, length) {
var
start = offset || 0,
end;
length = length || bytes.byteLength;
end = start + length;
prepareWrite(this, length);
this.bytes.set(bytes.subarray(start, end), this.position);
this.position += length;
this.length = Math.max(this.length, this.position);
};
// ByteArray#writeByte(value:int):void
this.writeByte = function(byte) {
prepareWrite(this, 1);
this.bytes[this.position] = byte;
this.position++;
this.length = Math.max(this.length, this.position);
};
// ByteArray#writeShort(value:int):void
this.writeShort = function(short) {
prepareWrite(this, 2);
this.view.setUint16(this.position, short);
this.position += 2;
this.length = Math.max(this.length, this.position);
};
// Negative index into array
// (pos:uint):int
this.negIndex = function(pos) {
return this.bytes[this.length - pos];
};
// The functions below ONLY work when this[0] == VIDEO_TAG.
// We are not going to check for that because we dont want the overhead
// (nal:ByteArray = null):int
this.nalUnitSize = function() {
if (adHoc === 0) {
return 0;
}
return this.length - (adHoc + 4);
};
this.startNalUnit = function() {
// remember position and add 4 bytes
if (adHoc > 0) {
throw new Error('Attempted to create new NAL wihout closing the old one');
}
// reserve 4 bytes for nal unit size
adHoc = this.length;
this.length += 4;
this.position = this.length;
};
// (nal:ByteArray = null):void
this.endNalUnit = function(nalContainer) {
var
nalStart, // :uint
nalLength; // :uint
// Rewind to the marker and write the size
if (this.length === adHoc + 4) {
// we started a nal unit, but didnt write one, so roll back the 4 byte size value
this.length -= 4;
} else if (adHoc > 0) {
nalStart = adHoc + 4;
nalLength = this.length - nalStart;
this.position = adHoc;
this.view.setUint32(this.position, nalLength);
this.position = this.length;
if (nalContainer) {
// Add the tag to the NAL unit
nalContainer.push(this.bytes.subarray(nalStart, nalStart + nalLength));
}
}
adHoc = 0;
};
/**
* Write out a 64-bit floating point valued metadata property. This method is
* called frequently during a typical parse and needs to be fast.
*/
// (key:String, val:Number):void
this.writeMetaDataDouble = function(key, val) {
var i;
prepareWrite(this, 2 + key.length + 9);
// write size of property name
this.view.setUint16(this.position, key.length);
this.position += 2;
// this next part looks terrible but it improves parser throughput by
// 10kB/s in my testing
// write property name
if (key === 'width') {
this.bytes.set(widthBytes, this.position);
this.position += 5;
} else if (key === 'height') {
this.bytes.set(heightBytes, this.position);
this.position += 6;
} else if (key === 'videocodecid') {
this.bytes.set(videocodecidBytes, this.position);
this.position += 12;
} else {
for (i = 0; i < key.length; i++) {
this.bytes[this.position] = key.charCodeAt(i);
this.position++;
}
}
// skip null byte
this.position++;
// write property value
this.view.setFloat64(this.position, val);
this.position += 8;
// update flv tag length
this.length = Math.max(this.length, this.position);
++adHoc;
};
// (key:String, val:Boolean):void
this.writeMetaDataBoolean = function(key, val) {
var i;
prepareWrite(this, 2);
this.view.setUint16(this.position, key.length);
this.position += 2;
for (i = 0; i < key.length; i++) {
// if key.charCodeAt(i) >= 255, handle error
prepareWrite(this, 1);
this.bytes[this.position] = key.charCodeAt(i);
this.position++;
}
prepareWrite(this, 2);
this.view.setUint8(this.position, 0x01);
this.position++;
this.view.setUint8(this.position, val ? 0x01 : 0x00);
this.position++;
this.length = Math.max(this.length, this.position);
++adHoc;
};
// ():ByteArray
this.finalize = function() {
var
dtsDelta, // :int
len; // :int
switch (this.bytes[0]) {
// Video Data
case FlvTag.VIDEO_TAG:
// We only support AVC, 1 = key frame (for AVC, a seekable
// frame), 2 = inter frame (for AVC, a non-seekable frame)
this.bytes[11] = ((this.keyFrame || extraData) ? 0x10 : 0x20) | 0x07;
this.bytes[12] = extraData ? 0x00 : 0x01;
dtsDelta = this.pts - this.dts;
this.bytes[13] = (dtsDelta & 0x00FF0000) >>> 16;
this.bytes[14] = (dtsDelta & 0x0000FF00) >>> 8;
this.bytes[15] = (dtsDelta & 0x000000FF) >>> 0;
break;
case FlvTag.AUDIO_TAG:
this.bytes[11] = 0xAF; // 44 kHz, 16-bit stereo
this.bytes[12] = extraData ? 0x00 : 0x01;
break;
case FlvTag.METADATA_TAG:
this.position = 11;
this.view.setUint8(this.position, 0x02); // String type
this.position++;
this.view.setUint16(this.position, 0x0A); // 10 Bytes
this.position += 2;
// set "onMetaData"
this.bytes.set([0x6f, 0x6e, 0x4d, 0x65,
0x74, 0x61, 0x44, 0x61,
0x74, 0x61], this.position);
this.position += 10;
this.bytes[this.position] = 0x08; // Array type
this.position++;
this.view.setUint32(this.position, adHoc);
this.position = this.length;
this.bytes.set([0, 0, 9], this.position);
this.position += 3; // End Data Tag
this.length = this.position;
break;
}
len = this.length - 11;
// write the DataSize field
this.bytes[ 1] = (len & 0x00FF0000) >>> 16;
this.bytes[ 2] = (len & 0x0000FF00) >>> 8;
this.bytes[ 3] = (len & 0x000000FF) >>> 0;
// write the Timestamp
this.bytes[ 4] = (this.dts & 0x00FF0000) >>> 16;
this.bytes[ 5] = (this.dts & 0x0000FF00) >>> 8;
this.bytes[ 6] = (this.dts & 0x000000FF) >>> 0;
this.bytes[ 7] = (this.dts & 0xFF000000) >>> 24;
// write the StreamID
this.bytes[ 8] = 0;
this.bytes[ 9] = 0;
this.bytes[10] = 0;
// Sometimes we're at the end of the view and have one slot to write a
// uint32, so, prepareWrite of count 4, since, view is uint8
prepareWrite(this, 4);
this.view.setUint32(this.length, this.length);
this.length += 4;
this.position += 4;
// trim down the byte buffer to what is actually being used
this.bytes = this.bytes.subarray(0, this.length);
this.frameTime = FlvTag.frameTime(this.bytes);
// if bytes.bytelength isn't equal to this.length, handle error
return this;
};
};
FlvTag.AUDIO_TAG = 0x08; // == 8, :uint
FlvTag.VIDEO_TAG = 0x09; // == 9, :uint
FlvTag.METADATA_TAG = 0x12; // == 18, :uint
// (tag:ByteArray):Boolean {
FlvTag.isAudioFrame = function(tag) {
return FlvTag.AUDIO_TAG === tag[0];
};
// (tag:ByteArray):Boolean {
FlvTag.isVideoFrame = function(tag) {
return FlvTag.VIDEO_TAG === tag[0];
};
// (tag:ByteArray):Boolean {
FlvTag.isMetaData = function(tag) {
return FlvTag.METADATA_TAG === tag[0];
};
// (tag:ByteArray):Boolean {
FlvTag.isKeyFrame = function(tag) {
if (FlvTag.isVideoFrame(tag)) {
return tag[11] === 0x17;
}
if (FlvTag.isAudioFrame(tag)) {
return true;
}
if (FlvTag.isMetaData(tag)) {
return true;
}
return false;
};
// (tag:ByteArray):uint {
FlvTag.frameTime = function(tag) {
var pts = tag[ 4] << 16; // :uint
pts |= tag[ 5] << 8;
pts |= tag[ 6] << 0;
pts |= tag[ 7] << 24;
return pts;
};
module.exports = FlvTag;
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* An object that stores the bytes of an FLV tag and methods for
* querying and manipulating that data.
* @see http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf
*/
'use strict';
var FlvTag;
// (type:uint, extraData:Boolean = false) extends ByteArray
FlvTag = function(type, extraData) {
var
// Counter if this is a metadata tag, nal start marker if this is a video
// tag. unused if this is an audio tag
adHoc = 0, // :uint
// The default size is 16kb but this is not enough to hold iframe
// data and the resizing algorithm costs a bit so we create a larger
// starting buffer for video tags
bufferStartSize = 16384,
// checks whether the FLV tag has enough capacity to accept the proposed
// write and re-allocates the internal buffers if necessary
prepareWrite = function(flv, count) {
var
bytes,
minLength = flv.position + count;
if (minLength < flv.bytes.byteLength) {
// there's enough capacity so do nothing
return;
}
// allocate a new buffer and copy over the data that will not be modified
bytes = new Uint8Array(minLength * 2);
bytes.set(flv.bytes.subarray(0, flv.position), 0);
flv.bytes = bytes;
flv.view = new DataView(flv.bytes.buffer);
},
// commonly used metadata properties
widthBytes = FlvTag.widthBytes || new Uint8Array('width'.length),
heightBytes = FlvTag.heightBytes || new Uint8Array('height'.length),
videocodecidBytes = FlvTag.videocodecidBytes || new Uint8Array('videocodecid'.length),
i;
if (!FlvTag.widthBytes) {
// calculating the bytes of common metadata names ahead of time makes the
// corresponding writes faster because we don't have to loop over the
// characters
// re-test with test/perf.html if you're planning on changing this
for (i = 0; i < 'width'.length; i++) {
widthBytes[i] = 'width'.charCodeAt(i);
}
for (i = 0; i < 'height'.length; i++) {
heightBytes[i] = 'height'.charCodeAt(i);
}
for (i = 0; i < 'videocodecid'.length; i++) {
videocodecidBytes[i] = 'videocodecid'.charCodeAt(i);
}
FlvTag.widthBytes = widthBytes;
FlvTag.heightBytes = heightBytes;
FlvTag.videocodecidBytes = videocodecidBytes;
}
this.keyFrame = false; // :Boolean
switch (type) {
case FlvTag.VIDEO_TAG:
this.length = 16;
// Start the buffer at 256k
bufferStartSize *= 6;
break;
case FlvTag.AUDIO_TAG:
this.length = 13;
this.keyFrame = true;
break;
case FlvTag.METADATA_TAG:
this.length = 29;
this.keyFrame = true;
break;
default:
throw new Error('Unknown FLV tag type');
}
this.bytes = new Uint8Array(bufferStartSize);
this.view = new DataView(this.bytes.buffer);
this.bytes[0] = type;
this.position = this.length;
this.keyFrame = extraData; // Defaults to false
// presentation timestamp
this.pts = 0;
// decoder timestamp
this.dts = 0;
// ByteArray#writeBytes(bytes:ByteArray, offset:uint = 0, length:uint = 0)
this.writeBytes = function(bytes, offset, length) {
var
start = offset || 0,
end;
length = length || bytes.byteLength;
end = start + length;
prepareWrite(this, length);
this.bytes.set(bytes.subarray(start, end), this.position);
this.position += length;
this.length = Math.max(this.length, this.position);
};
// ByteArray#writeByte(value:int):void
this.writeByte = function(byte) {
prepareWrite(this, 1);
this.bytes[this.position] = byte;
this.position++;
this.length = Math.max(this.length, this.position);
};
// ByteArray#writeShort(value:int):void
this.writeShort = function(short) {
prepareWrite(this, 2);
this.view.setUint16(this.position, short);
this.position += 2;
this.length = Math.max(this.length, this.position);
};
// Negative index into array
// (pos:uint):int
this.negIndex = function(pos) {
return this.bytes[this.length - pos];
};
// The functions below ONLY work when this[0] == VIDEO_TAG.
// We are not going to check for that because we dont want the overhead
// (nal:ByteArray = null):int
this.nalUnitSize = function() {
if (adHoc === 0) {
return 0;
}
return this.length - (adHoc + 4);
};
this.startNalUnit = function() {
// remember position and add 4 bytes
if (adHoc > 0) {
throw new Error('Attempted to create new NAL wihout closing the old one');
}
// reserve 4 bytes for nal unit size
adHoc = this.length;
this.length += 4;
this.position = this.length;
};
// (nal:ByteArray = null):void
this.endNalUnit = function(nalContainer) {
var
nalStart, // :uint
nalLength; // :uint
// Rewind to the marker and write the size
if (this.length === adHoc + 4) {
// we started a nal unit, but didnt write one, so roll back the 4 byte size value
this.length -= 4;
} else if (adHoc > 0) {
nalStart = adHoc + 4;
nalLength = this.length - nalStart;
this.position = adHoc;
this.view.setUint32(this.position, nalLength);
this.position = this.length;
if (nalContainer) {
// Add the tag to the NAL unit
nalContainer.push(this.bytes.subarray(nalStart, nalStart + nalLength));
}
}
adHoc = 0;
};
/**
* Write out a 64-bit floating point valued metadata property. This method is
* called frequently during a typical parse and needs to be fast.
*/
// (key:String, val:Number):void
this.writeMetaDataDouble = function(key, val) {
var i;
prepareWrite(this, 2 + key.length + 9);
// write size of property name
this.view.setUint16(this.position, key.length);
this.position += 2;
// this next part looks terrible but it improves parser throughput by
// 10kB/s in my testing
// write property name
if (key === 'width') {
this.bytes.set(widthBytes, this.position);
this.position += 5;
} else if (key === 'height') {
this.bytes.set(heightBytes, this.position);
this.position += 6;
} else if (key === 'videocodecid') {
this.bytes.set(videocodecidBytes, this.position);
this.position += 12;
} else {
for (i = 0; i < key.length; i++) {
this.bytes[this.position] = key.charCodeAt(i);
this.position++;
}
}
// skip null byte
this.position++;
// write property value
this.view.setFloat64(this.position, val);
this.position += 8;
// update flv tag length
this.length = Math.max(this.length, this.position);
++adHoc;
};
// (key:String, val:Boolean):void
this.writeMetaDataBoolean = function(key, val) {
var i;
prepareWrite(this, 2);
this.view.setUint16(this.position, key.length);
this.position += 2;
for (i = 0; i < key.length; i++) {
// if key.charCodeAt(i) >= 255, handle error
prepareWrite(this, 1);
this.bytes[this.position] = key.charCodeAt(i);
this.position++;
}
prepareWrite(this, 2);
this.view.setUint8(this.position, 0x01);
this.position++;
this.view.setUint8(this.position, val ? 0x01 : 0x00);
this.position++;
this.length = Math.max(this.length, this.position);
++adHoc;
};
// ():ByteArray
this.finalize = function() {
var
dtsDelta, // :int
len; // :int
switch (this.bytes[0]) {
// Video Data
case FlvTag.VIDEO_TAG:
// We only support AVC, 1 = key frame (for AVC, a seekable
// frame), 2 = inter frame (for AVC, a non-seekable frame)
this.bytes[11] = ((this.keyFrame || extraData) ? 0x10 : 0x20) | 0x07;
this.bytes[12] = extraData ? 0x00 : 0x01;
dtsDelta = this.pts - this.dts;
this.bytes[13] = (dtsDelta & 0x00FF0000) >>> 16;
this.bytes[14] = (dtsDelta & 0x0000FF00) >>> 8;
this.bytes[15] = (dtsDelta & 0x000000FF) >>> 0;
break;
case FlvTag.AUDIO_TAG:
this.bytes[11] = 0xAF; // 44 kHz, 16-bit stereo
this.bytes[12] = extraData ? 0x00 : 0x01;
break;
case FlvTag.METADATA_TAG:
this.position = 11;
this.view.setUint8(this.position, 0x02); // String type
this.position++;
this.view.setUint16(this.position, 0x0A); // 10 Bytes
this.position += 2;
// set "onMetaData"
this.bytes.set([0x6f, 0x6e, 0x4d, 0x65,
0x74, 0x61, 0x44, 0x61,
0x74, 0x61], this.position);
this.position += 10;
this.bytes[this.position] = 0x08; // Array type
this.position++;
this.view.setUint32(this.position, adHoc);
this.position = this.length;
this.bytes.set([0, 0, 9], this.position);
this.position += 3; // End Data Tag
this.length = this.position;
break;
}
len = this.length - 11;
// write the DataSize field
this.bytes[ 1] = (len & 0x00FF0000) >>> 16;
this.bytes[ 2] = (len & 0x0000FF00) >>> 8;
this.bytes[ 3] = (len & 0x000000FF) >>> 0;
// write the Timestamp
this.bytes[ 4] = (this.dts & 0x00FF0000) >>> 16;
this.bytes[ 5] = (this.dts & 0x0000FF00) >>> 8;
this.bytes[ 6] = (this.dts & 0x000000FF) >>> 0;
this.bytes[ 7] = (this.dts & 0xFF000000) >>> 24;
// write the StreamID
this.bytes[ 8] = 0;
this.bytes[ 9] = 0;
this.bytes[10] = 0;
// Sometimes we're at the end of the view and have one slot to write a
// uint32, so, prepareWrite of count 4, since, view is uint8
prepareWrite(this, 4);
this.view.setUint32(this.length, this.length);
this.length += 4;
this.position += 4;
// trim down the byte buffer to what is actually being used
this.bytes = this.bytes.subarray(0, this.length);
this.frameTime = FlvTag.frameTime(this.bytes);
// if bytes.bytelength isn't equal to this.length, handle error
return this;
};
};
FlvTag.AUDIO_TAG = 0x08; // == 8, :uint
FlvTag.VIDEO_TAG = 0x09; // == 9, :uint
FlvTag.METADATA_TAG = 0x12; // == 18, :uint
// (tag:ByteArray):Boolean {
FlvTag.isAudioFrame = function(tag) {
return FlvTag.AUDIO_TAG === tag[0];
};
// (tag:ByteArray):Boolean {
FlvTag.isVideoFrame = function(tag) {
return FlvTag.VIDEO_TAG === tag[0];
};
// (tag:ByteArray):Boolean {
FlvTag.isMetaData = function(tag) {
return FlvTag.METADATA_TAG === tag[0];
};
// (tag:ByteArray):Boolean {
FlvTag.isKeyFrame = function(tag) {
if (FlvTag.isVideoFrame(tag)) {
return tag[11] === 0x17;
}
if (FlvTag.isAudioFrame(tag)) {
return true;
}
if (FlvTag.isMetaData(tag)) {
return true;
}
return false;
};
// (tag:ByteArray):uint {
FlvTag.frameTime = function(tag) {
var pts = tag[ 4] << 16; // :uint
pts |= tag[ 5] << 8;
pts |= tag[ 6] << 0;
pts |= tag[ 7] << 24;
return pts;
};
module.exports = FlvTag;

22
node_modules/mux.js/lib/flv/index.js generated vendored
View file

@ -1,11 +1,11 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
module.exports = {
tag: require('./flv-tag'),
Transmuxer: require('./transmuxer'),
getFlvHeader: require('./flv-header')
};
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
module.exports = {
tag: require('./flv-tag'),
Transmuxer: require('./transmuxer'),
getFlvHeader: require('./flv-header')
};

View file

@ -1,31 +1,31 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var TagList = function() {
var self = this;
this.list = [];
this.push = function(tag) {
this.list.push({
bytes: tag.bytes,
dts: tag.dts,
pts: tag.pts,
keyFrame: tag.keyFrame,
metaDataTag: tag.metaDataTag
});
};
Object.defineProperty(this, 'length', {
get: function() {
return self.list.length;
}
});
};
module.exports = TagList;
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var TagList = function() {
var self = this;
this.list = [];
this.push = function(tag) {
this.list.push({
bytes: tag.bytes,
dts: tag.dts,
pts: tag.pts,
keyFrame: tag.keyFrame,
metaDataTag: tag.metaDataTag
});
};
Object.defineProperty(this, 'length', {
get: function() {
return self.list.length;
}
});
};
module.exports = TagList;

View file

@ -1,453 +1,453 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var Stream = require('../utils/stream.js');
var FlvTag = require('./flv-tag.js');
var m2ts = require('../m2ts/m2ts.js');
var AdtsStream = require('../codecs/adts.js');
var H264Stream = require('../codecs/h264').H264Stream;
var CoalesceStream = require('./coalesce-stream.js');
var TagList = require('./tag-list.js');
var
Transmuxer,
VideoSegmentStream,
AudioSegmentStream,
collectTimelineInfo,
metaDataTag,
extraDataTag;
/**
* Store information about the start and end of the tracka and the
* duration for each frame/sample we process in order to calculate
* the baseMediaDecodeTime
*/
collectTimelineInfo = function(track, data) {
if (typeof data.pts === 'number') {
if (track.timelineStartInfo.pts === undefined) {
track.timelineStartInfo.pts = data.pts;
} else {
track.timelineStartInfo.pts =
Math.min(track.timelineStartInfo.pts, data.pts);
}
}
if (typeof data.dts === 'number') {
if (track.timelineStartInfo.dts === undefined) {
track.timelineStartInfo.dts = data.dts;
} else {
track.timelineStartInfo.dts =
Math.min(track.timelineStartInfo.dts, data.dts);
}
}
};
metaDataTag = function(track, pts) {
var
tag = new FlvTag(FlvTag.METADATA_TAG); // :FlvTag
tag.dts = pts;
tag.pts = pts;
tag.writeMetaDataDouble('videocodecid', 7);
tag.writeMetaDataDouble('width', track.width);
tag.writeMetaDataDouble('height', track.height);
return tag;
};
extraDataTag = function(track, pts) {
var
i,
tag = new FlvTag(FlvTag.VIDEO_TAG, true);
tag.dts = pts;
tag.pts = pts;
tag.writeByte(0x01);// version
tag.writeByte(track.profileIdc);// profile
tag.writeByte(track.profileCompatibility);// compatibility
tag.writeByte(track.levelIdc);// level
tag.writeByte(0xFC | 0x03); // reserved (6 bits), NULA length size - 1 (2 bits)
tag.writeByte(0xE0 | 0x01); // reserved (3 bits), num of SPS (5 bits)
tag.writeShort(track.sps[0].length); // data of SPS
tag.writeBytes(track.sps[0]); // SPS
tag.writeByte(track.pps.length); // num of PPS (will there ever be more that 1 PPS?)
for (i = 0; i < track.pps.length; ++i) {
tag.writeShort(track.pps[i].length); // 2 bytes for length of PPS
tag.writeBytes(track.pps[i]); // data of PPS
}
return tag;
};
/**
* Constructs a single-track, media segment from AAC data
* events. The output of this stream can be fed to flash.
*/
AudioSegmentStream = function(track) {
var
adtsFrames = [],
videoKeyFrames = [],
oldExtraData;
AudioSegmentStream.prototype.init.call(this);
this.push = function(data) {
collectTimelineInfo(track, data);
if (track) {
track.audioobjecttype = data.audioobjecttype;
track.channelcount = data.channelcount;
track.samplerate = data.samplerate;
track.samplingfrequencyindex = data.samplingfrequencyindex;
track.samplesize = data.samplesize;
track.extraData = (track.audioobjecttype << 11) |
(track.samplingfrequencyindex << 7) |
(track.channelcount << 3);
}
data.pts = Math.round(data.pts / 90);
data.dts = Math.round(data.dts / 90);
// buffer audio data until end() is called
adtsFrames.push(data);
};
this.flush = function() {
var currentFrame, adtsFrame, lastMetaPts, tags = new TagList();
// return early if no audio data has been observed
if (adtsFrames.length === 0) {
this.trigger('done', 'AudioSegmentStream');
return;
}
lastMetaPts = -Infinity;
while (adtsFrames.length) {
currentFrame = adtsFrames.shift();
// write out a metadata frame at every video key frame
if (videoKeyFrames.length && currentFrame.pts >= videoKeyFrames[0]) {
lastMetaPts = videoKeyFrames.shift();
this.writeMetaDataTags(tags, lastMetaPts);
}
// also write out metadata tags every 1 second so that the decoder
// is re-initialized quickly after seeking into a different
// audio configuration.
if (track.extraData !== oldExtraData || currentFrame.pts - lastMetaPts >= 1000) {
this.writeMetaDataTags(tags, currentFrame.pts);
oldExtraData = track.extraData;
lastMetaPts = currentFrame.pts;
}
adtsFrame = new FlvTag(FlvTag.AUDIO_TAG);
adtsFrame.pts = currentFrame.pts;
adtsFrame.dts = currentFrame.dts;
adtsFrame.writeBytes(currentFrame.data);
tags.push(adtsFrame.finalize());
}
videoKeyFrames.length = 0;
oldExtraData = null;
this.trigger('data', {track: track, tags: tags.list});
this.trigger('done', 'AudioSegmentStream');
};
this.writeMetaDataTags = function(tags, pts) {
var adtsFrame;
adtsFrame = new FlvTag(FlvTag.METADATA_TAG);
// For audio, DTS is always the same as PTS. We want to set the DTS
// however so we can compare with video DTS to determine approximate
// packet order
adtsFrame.pts = pts;
adtsFrame.dts = pts;
// AAC is always 10
adtsFrame.writeMetaDataDouble('audiocodecid', 10);
adtsFrame.writeMetaDataBoolean('stereo', track.channelcount === 2);
adtsFrame.writeMetaDataDouble('audiosamplerate', track.samplerate);
// Is AAC always 16 bit?
adtsFrame.writeMetaDataDouble('audiosamplesize', 16);
tags.push(adtsFrame.finalize());
adtsFrame = new FlvTag(FlvTag.AUDIO_TAG, true);
// For audio, DTS is always the same as PTS. We want to set the DTS
// however so we can compare with video DTS to determine approximate
// packet order
adtsFrame.pts = pts;
adtsFrame.dts = pts;
adtsFrame.view.setUint16(adtsFrame.position, track.extraData);
adtsFrame.position += 2;
adtsFrame.length = Math.max(adtsFrame.length, adtsFrame.position);
tags.push(adtsFrame.finalize());
};
this.onVideoKeyFrame = function(pts) {
videoKeyFrames.push(pts);
};
};
AudioSegmentStream.prototype = new Stream();
/**
* Store FlvTags for the h264 stream
* @param track {object} track metadata configuration
*/
VideoSegmentStream = function(track) {
var
nalUnits = [],
config,
h264Frame;
VideoSegmentStream.prototype.init.call(this);
this.finishFrame = function(tags, frame) {
if (!frame) {
return;
}
// Check if keyframe and the length of tags.
// This makes sure we write metadata on the first frame of a segment.
if (config && track && track.newMetadata &&
(frame.keyFrame || tags.length === 0)) {
// Push extra data on every IDR frame in case we did a stream change + seek
var metaTag = metaDataTag(config, frame.dts).finalize();
var extraTag = extraDataTag(track, frame.dts).finalize();
metaTag.metaDataTag = extraTag.metaDataTag = true;
tags.push(metaTag);
tags.push(extraTag);
track.newMetadata = false;
this.trigger('keyframe', frame.dts);
}
frame.endNalUnit();
tags.push(frame.finalize());
h264Frame = null;
};
this.push = function(data) {
collectTimelineInfo(track, data);
data.pts = Math.round(data.pts / 90);
data.dts = Math.round(data.dts / 90);
// buffer video until flush() is called
nalUnits.push(data);
};
this.flush = function() {
var
currentNal,
tags = new TagList();
// Throw away nalUnits at the start of the byte stream until we find
// the first AUD
while (nalUnits.length) {
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
break;
}
nalUnits.shift();
}
// return early if no video data has been observed
if (nalUnits.length === 0) {
this.trigger('done', 'VideoSegmentStream');
return;
}
while (nalUnits.length) {
currentNal = nalUnits.shift();
// record the track config
if (currentNal.nalUnitType === 'seq_parameter_set_rbsp') {
track.newMetadata = true;
config = currentNal.config;
track.width = config.width;
track.height = config.height;
track.sps = [currentNal.data];
track.profileIdc = config.profileIdc;
track.levelIdc = config.levelIdc;
track.profileCompatibility = config.profileCompatibility;
h264Frame.endNalUnit();
} else if (currentNal.nalUnitType === 'pic_parameter_set_rbsp') {
track.newMetadata = true;
track.pps = [currentNal.data];
h264Frame.endNalUnit();
} else if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {
if (h264Frame) {
this.finishFrame(tags, h264Frame);
}
h264Frame = new FlvTag(FlvTag.VIDEO_TAG);
h264Frame.pts = currentNal.pts;
h264Frame.dts = currentNal.dts;
} else {
if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {
// the current sample is a key frame
h264Frame.keyFrame = true;
}
h264Frame.endNalUnit();
}
h264Frame.startNalUnit();
h264Frame.writeBytes(currentNal.data);
}
if (h264Frame) {
this.finishFrame(tags, h264Frame);
}
this.trigger('data', {track: track, tags: tags.list});
// Continue with the flush process now
this.trigger('done', 'VideoSegmentStream');
};
};
VideoSegmentStream.prototype = new Stream();
/**
* An object that incrementally transmuxes MPEG2 Trasport Stream
* chunks into an FLV.
*/
Transmuxer = function(options) {
var
self = this,
packetStream, parseStream, elementaryStream,
videoTimestampRolloverStream, audioTimestampRolloverStream,
timedMetadataTimestampRolloverStream,
adtsStream, h264Stream,
videoSegmentStream, audioSegmentStream, captionStream,
coalesceStream;
Transmuxer.prototype.init.call(this);
options = options || {};
// expose the metadata stream
this.metadataStream = new m2ts.MetadataStream();
options.metadataStream = this.metadataStream;
// set up the parsing pipeline
packetStream = new m2ts.TransportPacketStream();
parseStream = new m2ts.TransportParseStream();
elementaryStream = new m2ts.ElementaryStream();
videoTimestampRolloverStream = new m2ts.TimestampRolloverStream('video');
audioTimestampRolloverStream = new m2ts.TimestampRolloverStream('audio');
timedMetadataTimestampRolloverStream = new m2ts.TimestampRolloverStream('timed-metadata');
adtsStream = new AdtsStream();
h264Stream = new H264Stream();
coalesceStream = new CoalesceStream(options);
// disassemble MPEG2-TS packets into elementary streams
packetStream
.pipe(parseStream)
.pipe(elementaryStream);
// !!THIS ORDER IS IMPORTANT!!
// demux the streams
elementaryStream
.pipe(videoTimestampRolloverStream)
.pipe(h264Stream);
elementaryStream
.pipe(audioTimestampRolloverStream)
.pipe(adtsStream);
elementaryStream
.pipe(timedMetadataTimestampRolloverStream)
.pipe(this.metadataStream)
.pipe(coalesceStream);
// if CEA-708 parsing is available, hook up a caption stream
captionStream = new m2ts.CaptionStream(options);
h264Stream.pipe(captionStream)
.pipe(coalesceStream);
// hook up the segment streams once track metadata is delivered
elementaryStream.on('data', function(data) {
var i, videoTrack, audioTrack;
if (data.type === 'metadata') {
i = data.tracks.length;
// scan the tracks listed in the metadata
while (i--) {
if (data.tracks[i].type === 'video') {
videoTrack = data.tracks[i];
} else if (data.tracks[i].type === 'audio') {
audioTrack = data.tracks[i];
}
}
// hook up the video segment stream to the first track with h264 data
if (videoTrack && !videoSegmentStream) {
coalesceStream.numberOfTracks++;
videoSegmentStream = new VideoSegmentStream(videoTrack);
// Set up the final part of the video pipeline
h264Stream
.pipe(videoSegmentStream)
.pipe(coalesceStream);
}
if (audioTrack && !audioSegmentStream) {
// hook up the audio segment stream to the first track with aac data
coalesceStream.numberOfTracks++;
audioSegmentStream = new AudioSegmentStream(audioTrack);
// Set up the final part of the audio pipeline
adtsStream
.pipe(audioSegmentStream)
.pipe(coalesceStream);
if (videoSegmentStream) {
videoSegmentStream.on('keyframe', audioSegmentStream.onVideoKeyFrame);
}
}
}
});
// feed incoming data to the front of the parsing pipeline
this.push = function(data) {
packetStream.push(data);
};
// flush any buffered data
this.flush = function() {
// Start at the top of the pipeline and flush all pending work
packetStream.flush();
};
// Caption data has to be reset when seeking outside buffered range
this.resetCaptions = function() {
captionStream.reset();
};
// Re-emit any data coming from the coalesce stream to the outside world
coalesceStream.on('data', function(event) {
self.trigger('data', event);
});
// Let the consumer know we have finished flushing the entire pipeline
coalesceStream.on('done', function() {
self.trigger('done');
});
};
Transmuxer.prototype = new Stream();
// forward compatibility
module.exports = Transmuxer;
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var Stream = require('../utils/stream.js');
var FlvTag = require('./flv-tag.js');
var m2ts = require('../m2ts/m2ts.js');
var AdtsStream = require('../codecs/adts.js');
var H264Stream = require('../codecs/h264').H264Stream;
var CoalesceStream = require('./coalesce-stream.js');
var TagList = require('./tag-list.js');
var
Transmuxer,
VideoSegmentStream,
AudioSegmentStream,
collectTimelineInfo,
metaDataTag,
extraDataTag;
/**
* Store information about the start and end of the tracka and the
* duration for each frame/sample we process in order to calculate
* the baseMediaDecodeTime
*/
collectTimelineInfo = function(track, data) {
if (typeof data.pts === 'number') {
if (track.timelineStartInfo.pts === undefined) {
track.timelineStartInfo.pts = data.pts;
} else {
track.timelineStartInfo.pts =
Math.min(track.timelineStartInfo.pts, data.pts);
}
}
if (typeof data.dts === 'number') {
if (track.timelineStartInfo.dts === undefined) {
track.timelineStartInfo.dts = data.dts;
} else {
track.timelineStartInfo.dts =
Math.min(track.timelineStartInfo.dts, data.dts);
}
}
};
metaDataTag = function(track, pts) {
var
tag = new FlvTag(FlvTag.METADATA_TAG); // :FlvTag
tag.dts = pts;
tag.pts = pts;
tag.writeMetaDataDouble('videocodecid', 7);
tag.writeMetaDataDouble('width', track.width);
tag.writeMetaDataDouble('height', track.height);
return tag;
};
extraDataTag = function(track, pts) {
var
i,
tag = new FlvTag(FlvTag.VIDEO_TAG, true);
tag.dts = pts;
tag.pts = pts;
tag.writeByte(0x01);// version
tag.writeByte(track.profileIdc);// profile
tag.writeByte(track.profileCompatibility);// compatibility
tag.writeByte(track.levelIdc);// level
tag.writeByte(0xFC | 0x03); // reserved (6 bits), NULA length size - 1 (2 bits)
tag.writeByte(0xE0 | 0x01); // reserved (3 bits), num of SPS (5 bits)
tag.writeShort(track.sps[0].length); // data of SPS
tag.writeBytes(track.sps[0]); // SPS
tag.writeByte(track.pps.length); // num of PPS (will there ever be more that 1 PPS?)
for (i = 0; i < track.pps.length; ++i) {
tag.writeShort(track.pps[i].length); // 2 bytes for length of PPS
tag.writeBytes(track.pps[i]); // data of PPS
}
return tag;
};
/**
* Constructs a single-track, media segment from AAC data
* events. The output of this stream can be fed to flash.
*/
AudioSegmentStream = function(track) {
var
adtsFrames = [],
videoKeyFrames = [],
oldExtraData;
AudioSegmentStream.prototype.init.call(this);
this.push = function(data) {
collectTimelineInfo(track, data);
if (track) {
track.audioobjecttype = data.audioobjecttype;
track.channelcount = data.channelcount;
track.samplerate = data.samplerate;
track.samplingfrequencyindex = data.samplingfrequencyindex;
track.samplesize = data.samplesize;
track.extraData = (track.audioobjecttype << 11) |
(track.samplingfrequencyindex << 7) |
(track.channelcount << 3);
}
data.pts = Math.round(data.pts / 90);
data.dts = Math.round(data.dts / 90);
// buffer audio data until end() is called
adtsFrames.push(data);
};
this.flush = function() {
var currentFrame, adtsFrame, lastMetaPts, tags = new TagList();
// return early if no audio data has been observed
if (adtsFrames.length === 0) {
this.trigger('done', 'AudioSegmentStream');
return;
}
lastMetaPts = -Infinity;
while (adtsFrames.length) {
currentFrame = adtsFrames.shift();
// write out a metadata frame at every video key frame
if (videoKeyFrames.length && currentFrame.pts >= videoKeyFrames[0]) {
lastMetaPts = videoKeyFrames.shift();
this.writeMetaDataTags(tags, lastMetaPts);
}
// also write out metadata tags every 1 second so that the decoder
// is re-initialized quickly after seeking into a different
// audio configuration.
if (track.extraData !== oldExtraData || currentFrame.pts - lastMetaPts >= 1000) {
this.writeMetaDataTags(tags, currentFrame.pts);
oldExtraData = track.extraData;
lastMetaPts = currentFrame.pts;
}
adtsFrame = new FlvTag(FlvTag.AUDIO_TAG);
adtsFrame.pts = currentFrame.pts;
adtsFrame.dts = currentFrame.dts;
adtsFrame.writeBytes(currentFrame.data);
tags.push(adtsFrame.finalize());
}
videoKeyFrames.length = 0;
oldExtraData = null;
this.trigger('data', {track: track, tags: tags.list});
this.trigger('done', 'AudioSegmentStream');
};
this.writeMetaDataTags = function(tags, pts) {
var adtsFrame;
adtsFrame = new FlvTag(FlvTag.METADATA_TAG);
// For audio, DTS is always the same as PTS. We want to set the DTS
// however so we can compare with video DTS to determine approximate
// packet order
adtsFrame.pts = pts;
adtsFrame.dts = pts;
// AAC is always 10
adtsFrame.writeMetaDataDouble('audiocodecid', 10);
adtsFrame.writeMetaDataBoolean('stereo', track.channelcount === 2);
adtsFrame.writeMetaDataDouble('audiosamplerate', track.samplerate);
// Is AAC always 16 bit?
adtsFrame.writeMetaDataDouble('audiosamplesize', 16);
tags.push(adtsFrame.finalize());
adtsFrame = new FlvTag(FlvTag.AUDIO_TAG, true);
// For audio, DTS is always the same as PTS. We want to set the DTS
// however so we can compare with video DTS to determine approximate
// packet order
adtsFrame.pts = pts;
adtsFrame.dts = pts;
adtsFrame.view.setUint16(adtsFrame.position, track.extraData);
adtsFrame.position += 2;
adtsFrame.length = Math.max(adtsFrame.length, adtsFrame.position);
tags.push(adtsFrame.finalize());
};
this.onVideoKeyFrame = function(pts) {
videoKeyFrames.push(pts);
};
};
AudioSegmentStream.prototype = new Stream();
/**
* Store FlvTags for the h264 stream
* @param track {object} track metadata configuration
*/
VideoSegmentStream = function(track) {
var
nalUnits = [],
config,
h264Frame;
VideoSegmentStream.prototype.init.call(this);
this.finishFrame = function(tags, frame) {
if (!frame) {
return;
}
// Check if keyframe and the length of tags.
// This makes sure we write metadata on the first frame of a segment.
if (config && track && track.newMetadata &&
(frame.keyFrame || tags.length === 0)) {
// Push extra data on every IDR frame in case we did a stream change + seek
var metaTag = metaDataTag(config, frame.dts).finalize();
var extraTag = extraDataTag(track, frame.dts).finalize();
metaTag.metaDataTag = extraTag.metaDataTag = true;
tags.push(metaTag);
tags.push(extraTag);
track.newMetadata = false;
this.trigger('keyframe', frame.dts);
}
frame.endNalUnit();
tags.push(frame.finalize());
h264Frame = null;
};
this.push = function(data) {
collectTimelineInfo(track, data);
data.pts = Math.round(data.pts / 90);
data.dts = Math.round(data.dts / 90);
// buffer video until flush() is called
nalUnits.push(data);
};
this.flush = function() {
var
currentNal,
tags = new TagList();
// Throw away nalUnits at the start of the byte stream until we find
// the first AUD
while (nalUnits.length) {
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
break;
}
nalUnits.shift();
}
// return early if no video data has been observed
if (nalUnits.length === 0) {
this.trigger('done', 'VideoSegmentStream');
return;
}
while (nalUnits.length) {
currentNal = nalUnits.shift();
// record the track config
if (currentNal.nalUnitType === 'seq_parameter_set_rbsp') {
track.newMetadata = true;
config = currentNal.config;
track.width = config.width;
track.height = config.height;
track.sps = [currentNal.data];
track.profileIdc = config.profileIdc;
track.levelIdc = config.levelIdc;
track.profileCompatibility = config.profileCompatibility;
h264Frame.endNalUnit();
} else if (currentNal.nalUnitType === 'pic_parameter_set_rbsp') {
track.newMetadata = true;
track.pps = [currentNal.data];
h264Frame.endNalUnit();
} else if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {
if (h264Frame) {
this.finishFrame(tags, h264Frame);
}
h264Frame = new FlvTag(FlvTag.VIDEO_TAG);
h264Frame.pts = currentNal.pts;
h264Frame.dts = currentNal.dts;
} else {
if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {
// the current sample is a key frame
h264Frame.keyFrame = true;
}
h264Frame.endNalUnit();
}
h264Frame.startNalUnit();
h264Frame.writeBytes(currentNal.data);
}
if (h264Frame) {
this.finishFrame(tags, h264Frame);
}
this.trigger('data', {track: track, tags: tags.list});
// Continue with the flush process now
this.trigger('done', 'VideoSegmentStream');
};
};
VideoSegmentStream.prototype = new Stream();
/**
* An object that incrementally transmuxes MPEG2 Trasport Stream
* chunks into an FLV.
*/
Transmuxer = function(options) {
var
self = this,
packetStream, parseStream, elementaryStream,
videoTimestampRolloverStream, audioTimestampRolloverStream,
timedMetadataTimestampRolloverStream,
adtsStream, h264Stream,
videoSegmentStream, audioSegmentStream, captionStream,
coalesceStream;
Transmuxer.prototype.init.call(this);
options = options || {};
// expose the metadata stream
this.metadataStream = new m2ts.MetadataStream();
options.metadataStream = this.metadataStream;
// set up the parsing pipeline
packetStream = new m2ts.TransportPacketStream();
parseStream = new m2ts.TransportParseStream();
elementaryStream = new m2ts.ElementaryStream();
videoTimestampRolloverStream = new m2ts.TimestampRolloverStream('video');
audioTimestampRolloverStream = new m2ts.TimestampRolloverStream('audio');
timedMetadataTimestampRolloverStream = new m2ts.TimestampRolloverStream('timed-metadata');
adtsStream = new AdtsStream();
h264Stream = new H264Stream();
coalesceStream = new CoalesceStream(options);
// disassemble MPEG2-TS packets into elementary streams
packetStream
.pipe(parseStream)
.pipe(elementaryStream);
// !!THIS ORDER IS IMPORTANT!!
// demux the streams
elementaryStream
.pipe(videoTimestampRolloverStream)
.pipe(h264Stream);
elementaryStream
.pipe(audioTimestampRolloverStream)
.pipe(adtsStream);
elementaryStream
.pipe(timedMetadataTimestampRolloverStream)
.pipe(this.metadataStream)
.pipe(coalesceStream);
// if CEA-708 parsing is available, hook up a caption stream
captionStream = new m2ts.CaptionStream(options);
h264Stream.pipe(captionStream)
.pipe(coalesceStream);
// hook up the segment streams once track metadata is delivered
elementaryStream.on('data', function(data) {
var i, videoTrack, audioTrack;
if (data.type === 'metadata') {
i = data.tracks.length;
// scan the tracks listed in the metadata
while (i--) {
if (data.tracks[i].type === 'video') {
videoTrack = data.tracks[i];
} else if (data.tracks[i].type === 'audio') {
audioTrack = data.tracks[i];
}
}
// hook up the video segment stream to the first track with h264 data
if (videoTrack && !videoSegmentStream) {
coalesceStream.numberOfTracks++;
videoSegmentStream = new VideoSegmentStream(videoTrack);
// Set up the final part of the video pipeline
h264Stream
.pipe(videoSegmentStream)
.pipe(coalesceStream);
}
if (audioTrack && !audioSegmentStream) {
// hook up the audio segment stream to the first track with aac data
coalesceStream.numberOfTracks++;
audioSegmentStream = new AudioSegmentStream(audioTrack);
// Set up the final part of the audio pipeline
adtsStream
.pipe(audioSegmentStream)
.pipe(coalesceStream);
if (videoSegmentStream) {
videoSegmentStream.on('keyframe', audioSegmentStream.onVideoKeyFrame);
}
}
}
});
// feed incoming data to the front of the parsing pipeline
this.push = function(data) {
packetStream.push(data);
};
// flush any buffered data
this.flush = function() {
// Start at the top of the pipeline and flush all pending work
packetStream.flush();
};
// Caption data has to be reset when seeking outside buffered range
this.resetCaptions = function() {
captionStream.reset();
};
// Re-emit any data coming from the coalesce stream to the outside world
coalesceStream.on('data', function(event) {
self.trigger('data', event);
});
// Let the consumer know we have finished flushing the entire pipeline
coalesceStream.on('done', function() {
self.trigger('done');
});
};
Transmuxer.prototype = new Stream();
// forward compatibility
module.exports = Transmuxer;

1172
node_modules/mux.js/lib/m2ts/m2ts.js generated vendored

File diff suppressed because it is too large Load diff

View file

@ -1,254 +1,254 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Accepts program elementary stream (PES) data events and parses out
* ID3 metadata from them, if present.
* @see http://id3.org/id3v2.3.0
*/
'use strict';
var
Stream = require('../utils/stream'),
StreamTypes = require('./stream-types'),
// return a percent-encoded representation of the specified byte range
// @see http://en.wikipedia.org/wiki/Percent-encoding
percentEncode = function(bytes, start, end) {
var i, result = '';
for (i = start; i < end; i++) {
result += '%' + ('00' + bytes[i].toString(16)).slice(-2);
}
return result;
},
// return the string representation of the specified byte range,
// interpreted as UTf-8.
parseUtf8 = function(bytes, start, end) {
return decodeURIComponent(percentEncode(bytes, start, end));
},
// return the string representation of the specified byte range,
// interpreted as ISO-8859-1.
parseIso88591 = function(bytes, start, end) {
return unescape(percentEncode(bytes, start, end)); // jshint ignore:line
},
parseSyncSafeInteger = function(data) {
return (data[0] << 21) |
(data[1] << 14) |
(data[2] << 7) |
(data[3]);
},
tagParsers = {
TXXX: function(tag) {
var i;
if (tag.data[0] !== 3) {
// ignore frames with unrecognized character encodings
return;
}
for (i = 1; i < tag.data.length; i++) {
if (tag.data[i] === 0) {
// parse the text fields
tag.description = parseUtf8(tag.data, 1, i);
// do not include the null terminator in the tag value
tag.value = parseUtf8(tag.data, i + 1, tag.data.length).replace(/\0*$/, '');
break;
}
}
tag.data = tag.value;
},
WXXX: function(tag) {
var i;
if (tag.data[0] !== 3) {
// ignore frames with unrecognized character encodings
return;
}
for (i = 1; i < tag.data.length; i++) {
if (tag.data[i] === 0) {
// parse the description and URL fields
tag.description = parseUtf8(tag.data, 1, i);
tag.url = parseUtf8(tag.data, i + 1, tag.data.length);
break;
}
}
},
PRIV: function(tag) {
var i;
for (i = 0; i < tag.data.length; i++) {
if (tag.data[i] === 0) {
// parse the description and URL fields
tag.owner = parseIso88591(tag.data, 0, i);
break;
}
}
tag.privateData = tag.data.subarray(i + 1);
tag.data = tag.privateData;
}
},
MetadataStream;
MetadataStream = function(options) {
var
settings = {
// the bytes of the program-level descriptor field in MP2T
// see ISO/IEC 13818-1:2013 (E), section 2.6 "Program and
// program element descriptors"
descriptor: options && options.descriptor
},
// the total size in bytes of the ID3 tag being parsed
tagSize = 0,
// tag data that is not complete enough to be parsed
buffer = [],
// the total number of bytes currently in the buffer
bufferSize = 0,
i;
MetadataStream.prototype.init.call(this);
// calculate the text track in-band metadata track dispatch type
// https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track
this.dispatchType = StreamTypes.METADATA_STREAM_TYPE.toString(16);
if (settings.descriptor) {
for (i = 0; i < settings.descriptor.length; i++) {
this.dispatchType += ('00' + settings.descriptor[i].toString(16)).slice(-2);
}
}
this.push = function(chunk) {
var tag, frameStart, frameSize, frame, i, frameHeader;
if (chunk.type !== 'timed-metadata') {
return;
}
// if data_alignment_indicator is set in the PES header,
// we must have the start of a new ID3 tag. Assume anything
// remaining in the buffer was malformed and throw it out
if (chunk.dataAlignmentIndicator) {
bufferSize = 0;
buffer.length = 0;
}
// ignore events that don't look like ID3 data
if (buffer.length === 0 &&
(chunk.data.length < 10 ||
chunk.data[0] !== 'I'.charCodeAt(0) ||
chunk.data[1] !== 'D'.charCodeAt(0) ||
chunk.data[2] !== '3'.charCodeAt(0))) {
this.trigger('log', {
level: 'warn',
message: 'Skipping unrecognized metadata packet'
});
return;
}
// add this chunk to the data we've collected so far
buffer.push(chunk);
bufferSize += chunk.data.byteLength;
// grab the size of the entire frame from the ID3 header
if (buffer.length === 1) {
// the frame size is transmitted as a 28-bit integer in the
// last four bytes of the ID3 header.
// The most significant bit of each byte is dropped and the
// results concatenated to recover the actual value.
tagSize = parseSyncSafeInteger(chunk.data.subarray(6, 10));
// ID3 reports the tag size excluding the header but it's more
// convenient for our comparisons to include it
tagSize += 10;
}
// if the entire frame has not arrived, wait for more data
if (bufferSize < tagSize) {
return;
}
// collect the entire frame so it can be parsed
tag = {
data: new Uint8Array(tagSize),
frames: [],
pts: buffer[0].pts,
dts: buffer[0].dts
};
for (i = 0; i < tagSize;) {
tag.data.set(buffer[0].data.subarray(0, tagSize - i), i);
i += buffer[0].data.byteLength;
bufferSize -= buffer[0].data.byteLength;
buffer.shift();
}
// find the start of the first frame and the end of the tag
frameStart = 10;
if (tag.data[5] & 0x40) {
// advance the frame start past the extended header
frameStart += 4; // header size field
frameStart += parseSyncSafeInteger(tag.data.subarray(10, 14));
// clip any padding off the end
tagSize -= parseSyncSafeInteger(tag.data.subarray(16, 20));
}
// parse one or more ID3 frames
// http://id3.org/id3v2.3.0#ID3v2_frame_overview
do {
// determine the number of bytes in this frame
frameSize = parseSyncSafeInteger(tag.data.subarray(frameStart + 4, frameStart + 8));
if (frameSize < 1) {
this.trigger('log', {
level: 'warn',
message: 'Malformed ID3 frame encountered. Skipping metadata parsing.'
});
return;
}
frameHeader = String.fromCharCode(tag.data[frameStart],
tag.data[frameStart + 1],
tag.data[frameStart + 2],
tag.data[frameStart + 3]);
frame = {
id: frameHeader,
data: tag.data.subarray(frameStart + 10, frameStart + frameSize + 10)
};
frame.key = frame.id;
if (tagParsers[frame.id]) {
tagParsers[frame.id](frame);
// handle the special PRIV frame used to indicate the start
// time for raw AAC data
if (frame.owner === 'com.apple.streaming.transportStreamTimestamp') {
var
d = frame.data,
size = ((d[3] & 0x01) << 30) |
(d[4] << 22) |
(d[5] << 14) |
(d[6] << 6) |
(d[7] >>> 2);
size *= 4;
size += d[7] & 0x03;
frame.timeStamp = size;
// in raw AAC, all subsequent data will be timestamped based
// on the value of this frame
// we couldn't have known the appropriate pts and dts before
// parsing this ID3 tag so set those values now
if (tag.pts === undefined && tag.dts === undefined) {
tag.pts = frame.timeStamp;
tag.dts = frame.timeStamp;
}
this.trigger('timestamp', frame);
}
}
tag.frames.push(frame);
frameStart += 10; // advance past the frame header
frameStart += frameSize; // advance past the frame body
} while (frameStart < tagSize);
this.trigger('data', tag);
};
};
MetadataStream.prototype = new Stream();
module.exports = MetadataStream;
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Accepts program elementary stream (PES) data events and parses out
* ID3 metadata from them, if present.
* @see http://id3.org/id3v2.3.0
*/
'use strict';
var
Stream = require('../utils/stream'),
StreamTypes = require('./stream-types'),
// return a percent-encoded representation of the specified byte range
// @see http://en.wikipedia.org/wiki/Percent-encoding
percentEncode = function(bytes, start, end) {
var i, result = '';
for (i = start; i < end; i++) {
result += '%' + ('00' + bytes[i].toString(16)).slice(-2);
}
return result;
},
// return the string representation of the specified byte range,
// interpreted as UTf-8.
parseUtf8 = function(bytes, start, end) {
return decodeURIComponent(percentEncode(bytes, start, end));
},
// return the string representation of the specified byte range,
// interpreted as ISO-8859-1.
parseIso88591 = function(bytes, start, end) {
return unescape(percentEncode(bytes, start, end)); // jshint ignore:line
},
parseSyncSafeInteger = function(data) {
return (data[0] << 21) |
(data[1] << 14) |
(data[2] << 7) |
(data[3]);
},
tagParsers = {
TXXX: function(tag) {
var i;
if (tag.data[0] !== 3) {
// ignore frames with unrecognized character encodings
return;
}
for (i = 1; i < tag.data.length; i++) {
if (tag.data[i] === 0) {
// parse the text fields
tag.description = parseUtf8(tag.data, 1, i);
// do not include the null terminator in the tag value
tag.value = parseUtf8(tag.data, i + 1, tag.data.length).replace(/\0*$/, '');
break;
}
}
tag.data = tag.value;
},
WXXX: function(tag) {
var i;
if (tag.data[0] !== 3) {
// ignore frames with unrecognized character encodings
return;
}
for (i = 1; i < tag.data.length; i++) {
if (tag.data[i] === 0) {
// parse the description and URL fields
tag.description = parseUtf8(tag.data, 1, i);
tag.url = parseUtf8(tag.data, i + 1, tag.data.length);
break;
}
}
},
PRIV: function(tag) {
var i;
for (i = 0; i < tag.data.length; i++) {
if (tag.data[i] === 0) {
// parse the description and URL fields
tag.owner = parseIso88591(tag.data, 0, i);
break;
}
}
tag.privateData = tag.data.subarray(i + 1);
tag.data = tag.privateData;
}
},
MetadataStream;
MetadataStream = function(options) {
var
settings = {
// the bytes of the program-level descriptor field in MP2T
// see ISO/IEC 13818-1:2013 (E), section 2.6 "Program and
// program element descriptors"
descriptor: options && options.descriptor
},
// the total size in bytes of the ID3 tag being parsed
tagSize = 0,
// tag data that is not complete enough to be parsed
buffer = [],
// the total number of bytes currently in the buffer
bufferSize = 0,
i;
MetadataStream.prototype.init.call(this);
// calculate the text track in-band metadata track dispatch type
// https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track
this.dispatchType = StreamTypes.METADATA_STREAM_TYPE.toString(16);
if (settings.descriptor) {
for (i = 0; i < settings.descriptor.length; i++) {
this.dispatchType += ('00' + settings.descriptor[i].toString(16)).slice(-2);
}
}
this.push = function(chunk) {
var tag, frameStart, frameSize, frame, i, frameHeader;
if (chunk.type !== 'timed-metadata') {
return;
}
// if data_alignment_indicator is set in the PES header,
// we must have the start of a new ID3 tag. Assume anything
// remaining in the buffer was malformed and throw it out
if (chunk.dataAlignmentIndicator) {
bufferSize = 0;
buffer.length = 0;
}
// ignore events that don't look like ID3 data
if (buffer.length === 0 &&
(chunk.data.length < 10 ||
chunk.data[0] !== 'I'.charCodeAt(0) ||
chunk.data[1] !== 'D'.charCodeAt(0) ||
chunk.data[2] !== '3'.charCodeAt(0))) {
this.trigger('log', {
level: 'warn',
message: 'Skipping unrecognized metadata packet'
});
return;
}
// add this chunk to the data we've collected so far
buffer.push(chunk);
bufferSize += chunk.data.byteLength;
// grab the size of the entire frame from the ID3 header
if (buffer.length === 1) {
// the frame size is transmitted as a 28-bit integer in the
// last four bytes of the ID3 header.
// The most significant bit of each byte is dropped and the
// results concatenated to recover the actual value.
tagSize = parseSyncSafeInteger(chunk.data.subarray(6, 10));
// ID3 reports the tag size excluding the header but it's more
// convenient for our comparisons to include it
tagSize += 10;
}
// if the entire frame has not arrived, wait for more data
if (bufferSize < tagSize) {
return;
}
// collect the entire frame so it can be parsed
tag = {
data: new Uint8Array(tagSize),
frames: [],
pts: buffer[0].pts,
dts: buffer[0].dts
};
for (i = 0; i < tagSize;) {
tag.data.set(buffer[0].data.subarray(0, tagSize - i), i);
i += buffer[0].data.byteLength;
bufferSize -= buffer[0].data.byteLength;
buffer.shift();
}
// find the start of the first frame and the end of the tag
frameStart = 10;
if (tag.data[5] & 0x40) {
// advance the frame start past the extended header
frameStart += 4; // header size field
frameStart += parseSyncSafeInteger(tag.data.subarray(10, 14));
// clip any padding off the end
tagSize -= parseSyncSafeInteger(tag.data.subarray(16, 20));
}
// parse one or more ID3 frames
// http://id3.org/id3v2.3.0#ID3v2_frame_overview
do {
// determine the number of bytes in this frame
frameSize = parseSyncSafeInteger(tag.data.subarray(frameStart + 4, frameStart + 8));
if (frameSize < 1) {
this.trigger('log', {
level: 'warn',
message: 'Malformed ID3 frame encountered. Skipping metadata parsing.'
});
return;
}
frameHeader = String.fromCharCode(tag.data[frameStart],
tag.data[frameStart + 1],
tag.data[frameStart + 2],
tag.data[frameStart + 3]);
frame = {
id: frameHeader,
data: tag.data.subarray(frameStart + 10, frameStart + frameSize + 10)
};
frame.key = frame.id;
if (tagParsers[frame.id]) {
tagParsers[frame.id](frame);
// handle the special PRIV frame used to indicate the start
// time for raw AAC data
if (frame.owner === 'com.apple.streaming.transportStreamTimestamp') {
var
d = frame.data,
size = ((d[3] & 0x01) << 30) |
(d[4] << 22) |
(d[5] << 14) |
(d[6] << 6) |
(d[7] >>> 2);
size *= 4;
size += d[7] & 0x03;
frame.timeStamp = size;
// in raw AAC, all subsequent data will be timestamped based
// on the value of this frame
// we couldn't have known the appropriate pts and dts before
// parsing this ID3 tag so set those values now
if (tag.pts === undefined && tag.dts === undefined) {
tag.pts = frame.timeStamp;
tag.dts = frame.timeStamp;
}
this.trigger('timestamp', frame);
}
}
tag.frames.push(frame);
frameStart += 10; // advance past the frame header
frameStart += frameSize; // advance past the frame body
} while (frameStart < tagSize);
this.trigger('data', tag);
};
};
MetadataStream.prototype = new Stream();
module.exports = MetadataStream;

View file

@ -1,208 +1,208 @@
/**
* Constructs a single-track, ISO BMFF media segment from H264 data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
* @param track {object} track metadata configuration
* @param options {object} transmuxer options object
* @param options.alignGopsAtEnd {boolean} If true, start from the end of the
* gopsToAlignWith list when attempting to align gop pts
*/
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var trackInfo = require('../mp4/track-decode-info.js');
var frameUtils = require('../mp4/frame-utils');
var VIDEO_PROPERTIES = require('../constants/video-properties.js');
var VideoSegmentStream = function(track, options) {
var
sequenceNumber = 0,
nalUnits = [],
frameCache = [],
// gopsToAlignWith = [],
config,
pps,
segmentStartPts = null,
segmentEndPts = null,
gops,
ensureNextFrameIsKeyFrame = true;
options = options || {};
VideoSegmentStream.prototype.init.call(this);
this.push = function(nalUnit) {
trackInfo.collectDtsInfo(track, nalUnit);
if (typeof track.timelineStartInfo.dts === 'undefined') {
track.timelineStartInfo.dts = nalUnit.dts;
}
// record the track config
if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {
config = nalUnit.config;
track.sps = [nalUnit.data];
VIDEO_PROPERTIES.forEach(function(prop) {
track[prop] = config[prop];
}, this);
}
if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' &&
!pps) {
pps = nalUnit.data;
track.pps = [nalUnit.data];
}
// buffer video until flush() is called
nalUnits.push(nalUnit);
};
this.processNals_ = function(cacheLastFrame) {
var i;
nalUnits = frameCache.concat(nalUnits);
// Throw away nalUnits at the start of the byte stream until
// we find the first AUD
while (nalUnits.length) {
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
break;
}
nalUnits.shift();
}
// Return early if no video data has been observed
if (nalUnits.length === 0) {
return;
}
var frames = frameUtils.groupNalsIntoFrames(nalUnits);
if (!frames.length) {
return;
}
// note that the frame cache may also protect us from cases where we haven't
// pushed data for the entire first or last frame yet
frameCache = frames[frames.length - 1];
if (cacheLastFrame) {
frames.pop();
frames.duration -= frameCache.duration;
frames.nalCount -= frameCache.length;
frames.byteLength -= frameCache.byteLength;
}
if (!frames.length) {
nalUnits = [];
return;
}
this.trigger('timelineStartInfo', track.timelineStartInfo);
if (ensureNextFrameIsKeyFrame) {
gops = frameUtils.groupFramesIntoGops(frames);
if (!gops[0][0].keyFrame) {
gops = frameUtils.extendFirstKeyFrame(gops);
if (!gops[0][0].keyFrame) {
// we haven't yet gotten a key frame, so reset nal units to wait for more nal
// units
nalUnits = ([].concat.apply([], frames)).concat(frameCache);
frameCache = [];
return;
}
frames = [].concat.apply([], gops);
frames.duration = gops.duration;
}
ensureNextFrameIsKeyFrame = false;
}
if (segmentStartPts === null) {
segmentStartPts = frames[0].pts;
segmentEndPts = segmentStartPts;
}
segmentEndPts += frames.duration;
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
for (i = 0; i < frames.length; i++) {
var frame = frames[i];
track.samples = frameUtils.generateSampleTableForFrame(frame);
var mdat = mp4.mdat(frameUtils.concatenateNalDataForFrame(frame));
trackInfo.clearDtsInfo(track);
trackInfo.collectDtsInfo(track, frame);
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(
track, options.keepOriginalTimestamps);
var moof = mp4.moof(sequenceNumber, [track]);
sequenceNumber++;
track.initSegment = mp4.initSegment([track]);
var boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
this.trigger('data', {
track: track,
boxes: boxes,
sequence: sequenceNumber,
videoFrameDts: frame.dts,
videoFramePts: frame.pts
});
}
nalUnits = [];
};
this.resetTimingAndConfig_ = function() {
config = undefined;
pps = undefined;
segmentStartPts = null;
segmentEndPts = null;
};
this.partialFlush = function() {
this.processNals_(true);
this.trigger('partialdone', 'VideoSegmentStream');
};
this.flush = function() {
this.processNals_(false);
// reset config and pps because they may differ across segments
// for instance, when we are rendition switching
this.resetTimingAndConfig_();
this.trigger('done', 'VideoSegmentStream');
};
this.endTimeline = function() {
this.flush();
this.trigger('endedtimeline', 'VideoSegmentStream');
};
this.reset = function() {
this.resetTimingAndConfig_();
frameCache = [];
nalUnits = [];
ensureNextFrameIsKeyFrame = true;
this.trigger('reset');
};
};
VideoSegmentStream.prototype = new Stream();
module.exports = VideoSegmentStream;
/**
* Constructs a single-track, ISO BMFF media segment from H264 data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
* @param track {object} track metadata configuration
* @param options {object} transmuxer options object
* @param options.alignGopsAtEnd {boolean} If true, start from the end of the
* gopsToAlignWith list when attempting to align gop pts
*/
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var trackInfo = require('../mp4/track-decode-info.js');
var frameUtils = require('../mp4/frame-utils');
var VIDEO_PROPERTIES = require('../constants/video-properties.js');
var VideoSegmentStream = function(track, options) {
var
sequenceNumber = 0,
nalUnits = [],
frameCache = [],
// gopsToAlignWith = [],
config,
pps,
segmentStartPts = null,
segmentEndPts = null,
gops,
ensureNextFrameIsKeyFrame = true;
options = options || {};
VideoSegmentStream.prototype.init.call(this);
this.push = function(nalUnit) {
trackInfo.collectDtsInfo(track, nalUnit);
if (typeof track.timelineStartInfo.dts === 'undefined') {
track.timelineStartInfo.dts = nalUnit.dts;
}
// record the track config
if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {
config = nalUnit.config;
track.sps = [nalUnit.data];
VIDEO_PROPERTIES.forEach(function(prop) {
track[prop] = config[prop];
}, this);
}
if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' &&
!pps) {
pps = nalUnit.data;
track.pps = [nalUnit.data];
}
// buffer video until flush() is called
nalUnits.push(nalUnit);
};
this.processNals_ = function(cacheLastFrame) {
var i;
nalUnits = frameCache.concat(nalUnits);
// Throw away nalUnits at the start of the byte stream until
// we find the first AUD
while (nalUnits.length) {
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
break;
}
nalUnits.shift();
}
// Return early if no video data has been observed
if (nalUnits.length === 0) {
return;
}
var frames = frameUtils.groupNalsIntoFrames(nalUnits);
if (!frames.length) {
return;
}
// note that the frame cache may also protect us from cases where we haven't
// pushed data for the entire first or last frame yet
frameCache = frames[frames.length - 1];
if (cacheLastFrame) {
frames.pop();
frames.duration -= frameCache.duration;
frames.nalCount -= frameCache.length;
frames.byteLength -= frameCache.byteLength;
}
if (!frames.length) {
nalUnits = [];
return;
}
this.trigger('timelineStartInfo', track.timelineStartInfo);
if (ensureNextFrameIsKeyFrame) {
gops = frameUtils.groupFramesIntoGops(frames);
if (!gops[0][0].keyFrame) {
gops = frameUtils.extendFirstKeyFrame(gops);
if (!gops[0][0].keyFrame) {
// we haven't yet gotten a key frame, so reset nal units to wait for more nal
// units
nalUnits = ([].concat.apply([], frames)).concat(frameCache);
frameCache = [];
return;
}
frames = [].concat.apply([], gops);
frames.duration = gops.duration;
}
ensureNextFrameIsKeyFrame = false;
}
if (segmentStartPts === null) {
segmentStartPts = frames[0].pts;
segmentEndPts = segmentStartPts;
}
segmentEndPts += frames.duration;
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
for (i = 0; i < frames.length; i++) {
var frame = frames[i];
track.samples = frameUtils.generateSampleTableForFrame(frame);
var mdat = mp4.mdat(frameUtils.concatenateNalDataForFrame(frame));
trackInfo.clearDtsInfo(track);
trackInfo.collectDtsInfo(track, frame);
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(
track, options.keepOriginalTimestamps);
var moof = mp4.moof(sequenceNumber, [track]);
sequenceNumber++;
track.initSegment = mp4.initSegment([track]);
var boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
this.trigger('data', {
track: track,
boxes: boxes,
sequence: sequenceNumber,
videoFrameDts: frame.dts,
videoFramePts: frame.pts
});
}
nalUnits = [];
};
this.resetTimingAndConfig_ = function() {
config = undefined;
pps = undefined;
segmentStartPts = null;
segmentEndPts = null;
};
this.partialFlush = function() {
this.processNals_(true);
this.trigger('partialdone', 'VideoSegmentStream');
};
this.flush = function() {
this.processNals_(false);
// reset config and pps because they may differ across segments
// for instance, when we are rendition switching
this.resetTimingAndConfig_();
this.trigger('done', 'VideoSegmentStream');
};
this.endTimeline = function() {
this.flush();
this.trigger('endedtimeline', 'VideoSegmentStream');
};
this.reset = function() {
this.resetTimingAndConfig_();
frameCache = [];
nalUnits = [];
ensureNextFrameIsKeyFrame = true;
this.trigger('reset');
};
};
VideoSegmentStream.prototype = new Stream();
module.exports = VideoSegmentStream;

View file

@ -1,196 +1,196 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Reads in-band caption information from a video elementary
* stream. Captions must follow the CEA-708 standard for injection
* into an MPEG-2 transport streams.
* @see https://en.wikipedia.org/wiki/CEA-708
* @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf
*/
'use strict';
// Supplemental enhancement information (SEI) NAL units have a
// payload type field to indicate how they are to be
// interpreted. CEAS-708 caption content is always transmitted with
// payload type 0x04.
var USER_DATA_REGISTERED_ITU_T_T35 = 4,
RBSP_TRAILING_BITS = 128;
/**
* Parse a supplemental enhancement information (SEI) NAL unit.
* Stops parsing once a message of type ITU T T35 has been found.
*
* @param bytes {Uint8Array} the bytes of a SEI NAL unit
* @return {object} the parsed SEI payload
* @see Rec. ITU-T H.264, 7.3.2.3.1
*/
var parseSei = function(bytes) {
var
i = 0,
result = {
payloadType: -1,
payloadSize: 0
},
payloadType = 0,
payloadSize = 0;
// go through the sei_rbsp parsing each each individual sei_message
while (i < bytes.byteLength) {
// stop once we have hit the end of the sei_rbsp
if (bytes[i] === RBSP_TRAILING_BITS) {
break;
}
// Parse payload type
while (bytes[i] === 0xFF) {
payloadType += 255;
i++;
}
payloadType += bytes[i++];
// Parse payload size
while (bytes[i] === 0xFF) {
payloadSize += 255;
i++;
}
payloadSize += bytes[i++];
// this sei_message is a 608/708 caption so save it and break
// there can only ever be one caption message in a frame's sei
if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) {
var userIdentifier = String.fromCharCode(
bytes[i + 3],
bytes[i + 4],
bytes[i + 5],
bytes[i + 6]);
if (userIdentifier === 'GA94') {
result.payloadType = payloadType;
result.payloadSize = payloadSize;
result.payload = bytes.subarray(i, i + payloadSize);
break;
} else {
result.payload = void 0;
}
}
// skip the payload and parse the next message
i += payloadSize;
payloadType = 0;
payloadSize = 0;
}
return result;
};
// see ANSI/SCTE 128-1 (2013), section 8.1
var parseUserData = function(sei) {
// itu_t_t35_contry_code must be 181 (United States) for
// captions
if (sei.payload[0] !== 181) {
return null;
}
// itu_t_t35_provider_code should be 49 (ATSC) for captions
if (((sei.payload[1] << 8) | sei.payload[2]) !== 49) {
return null;
}
// the user_identifier should be "GA94" to indicate ATSC1 data
if (String.fromCharCode(sei.payload[3],
sei.payload[4],
sei.payload[5],
sei.payload[6]) !== 'GA94') {
return null;
}
// finally, user_data_type_code should be 0x03 for caption data
if (sei.payload[7] !== 0x03) {
return null;
}
// return the user_data_type_structure and strip the trailing
// marker bits
return sei.payload.subarray(8, sei.payload.length - 1);
};
// see CEA-708-D, section 4.4
var parseCaptionPackets = function(pts, userData) {
var results = [], i, count, offset, data;
// if this is just filler, return immediately
if (!(userData[0] & 0x40)) {
return results;
}
// parse out the cc_data_1 and cc_data_2 fields
count = userData[0] & 0x1f;
for (i = 0; i < count; i++) {
offset = i * 3;
data = {
type: userData[offset + 2] & 0x03,
pts: pts
};
// capture cc data when cc_valid is 1
if (userData[offset + 2] & 0x04) {
data.ccData = (userData[offset + 3] << 8) | userData[offset + 4];
results.push(data);
}
}
return results;
};
var discardEmulationPreventionBytes = function(data) {
var
length = data.byteLength,
emulationPreventionBytesPositions = [],
i = 1,
newLength, newData;
// Find all `Emulation Prevention Bytes`
while (i < length - 2) {
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {
emulationPreventionBytesPositions.push(i + 2);
i += 2;
} else {
i++;
}
}
// If no Emulation Prevention Bytes were found just return the original
// array
if (emulationPreventionBytesPositions.length === 0) {
return data;
}
// Create a new array to hold the NAL unit data
newLength = length - emulationPreventionBytesPositions.length;
newData = new Uint8Array(newLength);
var sourceIndex = 0;
for (i = 0; i < newLength; sourceIndex++, i++) {
if (sourceIndex === emulationPreventionBytesPositions[0]) {
// Skip this byte
sourceIndex++;
// Remove this position index
emulationPreventionBytesPositions.shift();
}
newData[i] = data[sourceIndex];
}
return newData;
};
// exports
module.exports = {
parseSei: parseSei,
parseUserData: parseUserData,
parseCaptionPackets: parseCaptionPackets,
discardEmulationPreventionBytes: discardEmulationPreventionBytes,
USER_DATA_REGISTERED_ITU_T_T35: USER_DATA_REGISTERED_ITU_T_T35
};
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Reads in-band caption information from a video elementary
* stream. Captions must follow the CEA-708 standard for injection
* into an MPEG-2 transport streams.
* @see https://en.wikipedia.org/wiki/CEA-708
* @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf
*/
'use strict';
// Supplemental enhancement information (SEI) NAL units have a
// payload type field to indicate how they are to be
// interpreted. CEAS-708 caption content is always transmitted with
// payload type 0x04.
var USER_DATA_REGISTERED_ITU_T_T35 = 4,
RBSP_TRAILING_BITS = 128;
/**
* Parse a supplemental enhancement information (SEI) NAL unit.
* Stops parsing once a message of type ITU T T35 has been found.
*
* @param bytes {Uint8Array} the bytes of a SEI NAL unit
* @return {object} the parsed SEI payload
* @see Rec. ITU-T H.264, 7.3.2.3.1
*/
var parseSei = function(bytes) {
var
i = 0,
result = {
payloadType: -1,
payloadSize: 0
},
payloadType = 0,
payloadSize = 0;
// go through the sei_rbsp parsing each each individual sei_message
while (i < bytes.byteLength) {
// stop once we have hit the end of the sei_rbsp
if (bytes[i] === RBSP_TRAILING_BITS) {
break;
}
// Parse payload type
while (bytes[i] === 0xFF) {
payloadType += 255;
i++;
}
payloadType += bytes[i++];
// Parse payload size
while (bytes[i] === 0xFF) {
payloadSize += 255;
i++;
}
payloadSize += bytes[i++];
// this sei_message is a 608/708 caption so save it and break
// there can only ever be one caption message in a frame's sei
if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) {
var userIdentifier = String.fromCharCode(
bytes[i + 3],
bytes[i + 4],
bytes[i + 5],
bytes[i + 6]);
if (userIdentifier === 'GA94') {
result.payloadType = payloadType;
result.payloadSize = payloadSize;
result.payload = bytes.subarray(i, i + payloadSize);
break;
} else {
result.payload = void 0;
}
}
// skip the payload and parse the next message
i += payloadSize;
payloadType = 0;
payloadSize = 0;
}
return result;
};
// see ANSI/SCTE 128-1 (2013), section 8.1
var parseUserData = function(sei) {
// itu_t_t35_contry_code must be 181 (United States) for
// captions
if (sei.payload[0] !== 181) {
return null;
}
// itu_t_t35_provider_code should be 49 (ATSC) for captions
if (((sei.payload[1] << 8) | sei.payload[2]) !== 49) {
return null;
}
// the user_identifier should be "GA94" to indicate ATSC1 data
if (String.fromCharCode(sei.payload[3],
sei.payload[4],
sei.payload[5],
sei.payload[6]) !== 'GA94') {
return null;
}
// finally, user_data_type_code should be 0x03 for caption data
if (sei.payload[7] !== 0x03) {
return null;
}
// return the user_data_type_structure and strip the trailing
// marker bits
return sei.payload.subarray(8, sei.payload.length - 1);
};
// see CEA-708-D, section 4.4
var parseCaptionPackets = function(pts, userData) {
var results = [], i, count, offset, data;
// if this is just filler, return immediately
if (!(userData[0] & 0x40)) {
return results;
}
// parse out the cc_data_1 and cc_data_2 fields
count = userData[0] & 0x1f;
for (i = 0; i < count; i++) {
offset = i * 3;
data = {
type: userData[offset + 2] & 0x03,
pts: pts
};
// capture cc data when cc_valid is 1
if (userData[offset + 2] & 0x04) {
data.ccData = (userData[offset + 3] << 8) | userData[offset + 4];
results.push(data);
}
}
return results;
};
var discardEmulationPreventionBytes = function(data) {
var
length = data.byteLength,
emulationPreventionBytesPositions = [],
i = 1,
newLength, newData;
// Find all `Emulation Prevention Bytes`
while (i < length - 2) {
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {
emulationPreventionBytesPositions.push(i + 2);
i += 2;
} else {
i++;
}
}
// If no Emulation Prevention Bytes were found just return the original
// array
if (emulationPreventionBytesPositions.length === 0) {
return data;
}
// Create a new array to hold the NAL unit data
newLength = length - emulationPreventionBytesPositions.length;
newData = new Uint8Array(newLength);
var sourceIndex = 0;
for (i = 0; i < newLength; sourceIndex++, i++) {
if (sourceIndex === emulationPreventionBytesPositions[0]) {
// Skip this byte
sourceIndex++;
// Remove this position index
emulationPreventionBytesPositions.shift();
}
newData[i] = data[sourceIndex];
}
return newData;
};
// exports
module.exports = {
parseSei: parseSei,
parseUserData: parseUserData,
parseCaptionPackets: parseCaptionPackets,
discardEmulationPreventionBytes: discardEmulationPreventionBytes,
USER_DATA_REGISTERED_ITU_T_T35: USER_DATA_REGISTERED_ITU_T_T35
};

View file

@ -1,172 +1,172 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var
tagTypes = {
0x08: 'audio',
0x09: 'video',
0x12: 'metadata'
},
hex = function(val) {
return '0x' + ('00' + val.toString(16)).slice(-2).toUpperCase();
},
hexStringList = function(data) {
var arr = [], i;
while (data.byteLength > 0) {
i = 0;
arr.push(hex(data[i++]));
data = data.subarray(i);
}
return arr.join(' ');
},
parseAVCTag = function(tag, obj) {
var
avcPacketTypes = [
'AVC Sequence Header',
'AVC NALU',
'AVC End-of-Sequence'
],
compositionTime = (tag[1] & parseInt('01111111', 2) << 16) | (tag[2] << 8) | tag[3];
obj = obj || {};
obj.avcPacketType = avcPacketTypes[tag[0]];
obj.CompositionTime = (tag[1] & parseInt('10000000', 2)) ? -compositionTime : compositionTime;
if (tag[0] === 1) {
obj.nalUnitTypeRaw = hexStringList(tag.subarray(4, 100));
} else {
obj.data = hexStringList(tag.subarray(4));
}
return obj;
},
parseVideoTag = function(tag, obj) {
var
frameTypes = [
'Unknown',
'Keyframe (for AVC, a seekable frame)',
'Inter frame (for AVC, a nonseekable frame)',
'Disposable inter frame (H.263 only)',
'Generated keyframe (reserved for server use only)',
'Video info/command frame'
],
codecID = tag[0] & parseInt('00001111', 2);
obj = obj || {};
obj.frameType = frameTypes[(tag[0] & parseInt('11110000', 2)) >>> 4];
obj.codecID = codecID;
if (codecID === 7) {
return parseAVCTag(tag.subarray(1), obj);
}
return obj;
},
parseAACTag = function(tag, obj) {
var packetTypes = [
'AAC Sequence Header',
'AAC Raw'
];
obj = obj || {};
obj.aacPacketType = packetTypes[tag[0]];
obj.data = hexStringList(tag.subarray(1));
return obj;
},
parseAudioTag = function(tag, obj) {
var
formatTable = [
'Linear PCM, platform endian',
'ADPCM',
'MP3',
'Linear PCM, little endian',
'Nellymoser 16-kHz mono',
'Nellymoser 8-kHz mono',
'Nellymoser',
'G.711 A-law logarithmic PCM',
'G.711 mu-law logarithmic PCM',
'reserved',
'AAC',
'Speex',
'MP3 8-Khz',
'Device-specific sound'
],
samplingRateTable = [
'5.5-kHz',
'11-kHz',
'22-kHz',
'44-kHz'
],
soundFormat = (tag[0] & parseInt('11110000', 2)) >>> 4;
obj = obj || {};
obj.soundFormat = formatTable[soundFormat];
obj.soundRate = samplingRateTable[(tag[0] & parseInt('00001100', 2)) >>> 2];
obj.soundSize = ((tag[0] & parseInt('00000010', 2)) >>> 1) ? '16-bit' : '8-bit';
obj.soundType = (tag[0] & parseInt('00000001', 2)) ? 'Stereo' : 'Mono';
if (soundFormat === 10) {
return parseAACTag(tag.subarray(1), obj);
}
return obj;
},
parseGenericTag = function(tag) {
return {
tagType: tagTypes[tag[0]],
dataSize: (tag[1] << 16) | (tag[2] << 8) | tag[3],
timestamp: (tag[7] << 24) | (tag[4] << 16) | (tag[5] << 8) | tag[6],
streamID: (tag[8] << 16) | (tag[9] << 8) | tag[10]
};
},
inspectFlvTag = function(tag) {
var header = parseGenericTag(tag);
switch (tag[0]) {
case 0x08:
parseAudioTag(tag.subarray(11), header);
break;
case 0x09:
parseVideoTag(tag.subarray(11), header);
break;
case 0x12:
}
return header;
},
inspectFlv = function(bytes) {
var i = 9, // header
dataSize,
parsedResults = [],
tag;
// traverse the tags
i += 4; // skip previous tag size
while (i < bytes.byteLength) {
dataSize = bytes[i + 1] << 16;
dataSize |= bytes[i + 2] << 8;
dataSize |= bytes[i + 3];
dataSize += 11;
tag = bytes.subarray(i, i + dataSize);
parsedResults.push(inspectFlvTag(tag));
i += dataSize + 4;
}
return parsedResults;
},
textifyFlv = function(flvTagArray) {
return JSON.stringify(flvTagArray, null, 2);
};
module.exports = {
inspectTag: inspectFlvTag,
inspect: inspectFlv,
textify: textifyFlv
};
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var
tagTypes = {
0x08: 'audio',
0x09: 'video',
0x12: 'metadata'
},
hex = function(val) {
return '0x' + ('00' + val.toString(16)).slice(-2).toUpperCase();
},
hexStringList = function(data) {
var arr = [], i;
while (data.byteLength > 0) {
i = 0;
arr.push(hex(data[i++]));
data = data.subarray(i);
}
return arr.join(' ');
},
parseAVCTag = function(tag, obj) {
var
avcPacketTypes = [
'AVC Sequence Header',
'AVC NALU',
'AVC End-of-Sequence'
],
compositionTime = (tag[1] & parseInt('01111111', 2) << 16) | (tag[2] << 8) | tag[3];
obj = obj || {};
obj.avcPacketType = avcPacketTypes[tag[0]];
obj.CompositionTime = (tag[1] & parseInt('10000000', 2)) ? -compositionTime : compositionTime;
if (tag[0] === 1) {
obj.nalUnitTypeRaw = hexStringList(tag.subarray(4, 100));
} else {
obj.data = hexStringList(tag.subarray(4));
}
return obj;
},
parseVideoTag = function(tag, obj) {
var
frameTypes = [
'Unknown',
'Keyframe (for AVC, a seekable frame)',
'Inter frame (for AVC, a nonseekable frame)',
'Disposable inter frame (H.263 only)',
'Generated keyframe (reserved for server use only)',
'Video info/command frame'
],
codecID = tag[0] & parseInt('00001111', 2);
obj = obj || {};
obj.frameType = frameTypes[(tag[0] & parseInt('11110000', 2)) >>> 4];
obj.codecID = codecID;
if (codecID === 7) {
return parseAVCTag(tag.subarray(1), obj);
}
return obj;
},
parseAACTag = function(tag, obj) {
var packetTypes = [
'AAC Sequence Header',
'AAC Raw'
];
obj = obj || {};
obj.aacPacketType = packetTypes[tag[0]];
obj.data = hexStringList(tag.subarray(1));
return obj;
},
parseAudioTag = function(tag, obj) {
var
formatTable = [
'Linear PCM, platform endian',
'ADPCM',
'MP3',
'Linear PCM, little endian',
'Nellymoser 16-kHz mono',
'Nellymoser 8-kHz mono',
'Nellymoser',
'G.711 A-law logarithmic PCM',
'G.711 mu-law logarithmic PCM',
'reserved',
'AAC',
'Speex',
'MP3 8-Khz',
'Device-specific sound'
],
samplingRateTable = [
'5.5-kHz',
'11-kHz',
'22-kHz',
'44-kHz'
],
soundFormat = (tag[0] & parseInt('11110000', 2)) >>> 4;
obj = obj || {};
obj.soundFormat = formatTable[soundFormat];
obj.soundRate = samplingRateTable[(tag[0] & parseInt('00001100', 2)) >>> 2];
obj.soundSize = ((tag[0] & parseInt('00000010', 2)) >>> 1) ? '16-bit' : '8-bit';
obj.soundType = (tag[0] & parseInt('00000001', 2)) ? 'Stereo' : 'Mono';
if (soundFormat === 10) {
return parseAACTag(tag.subarray(1), obj);
}
return obj;
},
parseGenericTag = function(tag) {
return {
tagType: tagTypes[tag[0]],
dataSize: (tag[1] << 16) | (tag[2] << 8) | tag[3],
timestamp: (tag[7] << 24) | (tag[4] << 16) | (tag[5] << 8) | tag[6],
streamID: (tag[8] << 16) | (tag[9] << 8) | tag[10]
};
},
inspectFlvTag = function(tag) {
var header = parseGenericTag(tag);
switch (tag[0]) {
case 0x08:
parseAudioTag(tag.subarray(11), header);
break;
case 0x09:
parseVideoTag(tag.subarray(11), header);
break;
case 0x12:
}
return header;
},
inspectFlv = function(bytes) {
var i = 9, // header
dataSize,
parsedResults = [],
tag;
// traverse the tags
i += 4; // skip previous tag size
while (i < bytes.byteLength) {
dataSize = bytes[i + 1] << 16;
dataSize |= bytes[i + 2] << 8;
dataSize |= bytes[i + 3];
dataSize += 11;
tag = bytes.subarray(i, i + dataSize);
parsedResults.push(inspectFlvTag(tag));
i += dataSize + 4;
}
return parsedResults;
},
textifyFlv = function(flvTagArray) {
return JSON.stringify(flvTagArray, null, 2);
};
module.exports = {
inspectTag: inspectFlvTag,
inspect: inspectFlv,
textify: textifyFlv
};

File diff suppressed because it is too large Load diff

View file

@ -1,13 +1,13 @@
var parseSampleFlags = function(flags) {
return {
isLeading: (flags[0] & 0x0c) >>> 2,
dependsOn: flags[0] & 0x03,
isDependedOn: (flags[1] & 0xc0) >>> 6,
hasRedundancy: (flags[1] & 0x30) >>> 4,
paddingValue: (flags[1] & 0x0e) >>> 1,
isNonSyncSample: flags[1] & 0x01,
degradationPriority: (flags[2] << 8) | flags[3]
};
};
module.exports = parseSampleFlags;
var parseSampleFlags = function(flags) {
return {
isLeading: (flags[0] & 0x0c) >>> 2,
dependsOn: flags[0] & 0x03,
isDependedOn: (flags[1] & 0xc0) >>> 6,
hasRedundancy: (flags[1] & 0x30) >>> 4,
paddingValue: (flags[1] & 0x0e) >>> 1,
isNonSyncSample: flags[1] & 0x01,
degradationPriority: (flags[2] << 8) | flags[3]
};
};
module.exports = parseSampleFlags;

View file

@ -1,46 +1,46 @@
var MAX_UINT32 = Math.pow(2, 32);
var parseSidx = function(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
references: [],
referenceId: view.getUint32(4),
timescale: view.getUint32(8)
},
i = 12;
if (result.version === 0) {
result.earliestPresentationTime = view.getUint32(i);
result.firstOffset = view.getUint32(i + 4);
i += 8;
} else {
// read 64 bits
result.earliestPresentationTime = (view.getUint32(i) * MAX_UINT32) + view.getUint32(i + 4);
result.firstOffset = (view.getUint32(i + 8) * MAX_UINT32) + view.getUint32(i + 12);
i += 16;
}
i += 2; // reserved
var referenceCount = view.getUint16(i);
i += 2; // start of references
for (; referenceCount > 0; i += 12, referenceCount--) {
result.references.push({
referenceType: (data[i] & 0x80) >>> 7,
referencedSize: view.getUint32(i) & 0x7FFFFFFF,
subsegmentDuration: view.getUint32(i + 4),
startsWithSap: !!(data[i + 8] & 0x80),
sapType: (data[i + 8] & 0x70) >>> 4,
sapDeltaTime: view.getUint32(i + 8) & 0x0FFFFFFF
});
}
return result;
};
module.exports = parseSidx;
var MAX_UINT32 = Math.pow(2, 32);
var parseSidx = function(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
references: [],
referenceId: view.getUint32(4),
timescale: view.getUint32(8)
},
i = 12;
if (result.version === 0) {
result.earliestPresentationTime = view.getUint32(i);
result.firstOffset = view.getUint32(i + 4);
i += 8;
} else {
// read 64 bits
result.earliestPresentationTime = (view.getUint32(i) * MAX_UINT32) + view.getUint32(i + 4);
result.firstOffset = (view.getUint32(i + 8) * MAX_UINT32) + view.getUint32(i + 12);
i += 16;
}
i += 2; // reserved
var referenceCount = view.getUint16(i);
i += 2; // start of references
for (; referenceCount > 0; i += 12, referenceCount--) {
result.references.push({
referenceType: (data[i] & 0x80) >>> 7,
referencedSize: view.getUint32(i) & 0x7FFFFFFF,
subsegmentDuration: view.getUint32(i + 4),
startsWithSap: !!(data[i + 8] & 0x80),
sapType: (data[i + 8] & 0x70) >>> 4,
sapDeltaTime: view.getUint32(i + 8) & 0x0FFFFFFF
});
}
return result;
};
module.exports = parseSidx;