1
0
Fork 0
mirror of https://github.com/DanielnetoDotCom/YouPHPTube synced 2025-10-03 01:39:24 +02:00

Chromecast moved to PlayerSkins plugin

aisplay option added on PlayerSkins
This commit is contained in:
Daniel Neto 2024-10-30 20:19:49 -03:00
parent 480ae72b99
commit efd0665a44
286 changed files with 72588 additions and 1487 deletions

62
node_modules/.package-lock.json generated vendored
View file

@ -201,6 +201,14 @@
"url": "https://opencollective.com/popperjs"
}
},
"node_modules/@silvermine/videojs-airplay": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/@silvermine/videojs-airplay/-/videojs-airplay-1.3.0.tgz",
"integrity": "sha512-Oxq31DIEuKVt0qLj8/n5aaC9RRAc0hryarPVD9SFxPwCQ3A9Ef7bkRkGJz2i7XQxpIhPQ4SkM9BudUj6oHsPzA==",
"peerDependencies": {
"video.js": ">= 6.0.0"
}
},
"node_modules/@silvermine/videojs-chromecast": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/@silvermine/videojs-chromecast/-/videojs-chromecast-1.5.0.tgz",
@ -213,16 +221,16 @@
}
},
"node_modules/@videojs/http-streaming": {
"version": "3.14.2",
"resolved": "https://registry.npmjs.org/@videojs/http-streaming/-/http-streaming-3.14.2.tgz",
"integrity": "sha512-c+sg+rrrSrRekBZxd+sNpzjRteIcOEQRJllqCBcz6MrgSaGJGDzV1xhGSAFnxX8E/xfqQeF060us5474WwYi3Q==",
"version": "3.15.0",
"resolved": "https://registry.npmjs.org/@videojs/http-streaming/-/http-streaming-3.15.0.tgz",
"integrity": "sha512-6rjaqEa87gVFqDFsHaLKXGrDqL3NhNZRNi6wkMw+uyt1lrLD2OFY0SfRQRNl7Vmmx0pt5FRJoRJYlnKsowyElA==",
"dependencies": {
"@babel/runtime": "^7.12.5",
"@videojs/vhs-utils": "^4.1.1",
"aes-decrypter": "^4.0.2",
"global": "^4.4.0",
"m3u8-parser": "^7.2.0",
"mpd-parser": "^1.3.0",
"mpd-parser": "^1.3.1",
"mux.js": "7.0.3",
"video.js": "^7 || ^8"
},
@ -231,7 +239,23 @@
"npm": ">=5"
},
"peerDependencies": {
"video.js": "^8.14.0"
"video.js": "^8.19.0"
}
},
"node_modules/@videojs/http-streaming/node_modules/mux.js": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/mux.js/-/mux.js-7.0.3.tgz",
"integrity": "sha512-gzlzJVEGFYPtl2vvEiJneSWAWD4nfYRHD5XgxmB2gWvXraMPOYk+sxfvexmNfjQUFpmk6hwLR5C6iSFmuwCHdQ==",
"dependencies": {
"@babel/runtime": "^7.11.2",
"global": "^4.4.0"
},
"bin": {
"muxjs-transmux": "bin/transmux.js"
},
"engines": {
"node": ">=8",
"npm": ">=5"
}
},
"node_modules/@videojs/vhs-utils": {
@ -457,9 +481,9 @@
}
},
"node_modules/dexie": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/dexie/-/dexie-4.0.8.tgz",
"integrity": "sha512-1G6cJevS17KMDK847V3OHvK2zei899GwpDiqfEXHP1ASvme6eWJmAp9AU4s1son2TeGkWmC0g3y8ezOBPnalgQ=="
"version": "4.0.9",
"resolved": "https://registry.npmjs.org/dexie/-/dexie-4.0.9.tgz",
"integrity": "sha512-VQG1huEVSAdDZssb9Bb9mFy+d3jAE0PT4d1nIRYlT46ip1fzbs1tXi0SlUayRDgV3tTbJG8ZRqAo2um49gtynA=="
},
"node_modules/dom-walk": {
"version": "0.1.2",
@ -1025,9 +1049,9 @@
}
},
"node_modules/mpd-parser": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/mpd-parser/-/mpd-parser-1.3.0.tgz",
"integrity": "sha512-WgeIwxAqkmb9uTn4ClicXpEQYCEduDqRKfmUdp4X8vmghKfBNXZLYpREn9eqrDx/Tf5LhzRcJLSpi4ohfV742Q==",
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/mpd-parser/-/mpd-parser-1.3.1.tgz",
"integrity": "sha512-1FuyEWI5k2HcmhS1HkKnUAQV7yFPfXPht2DnRRGtoiiAAW+ESTbtEXIDpRkwdU+XyrQuwrIym7UkoPKsZ0SyFw==",
"dependencies": {
"@babel/runtime": "^7.12.5",
"@videojs/vhs-utils": "^4.0.0",
@ -1039,9 +1063,9 @@
}
},
"node_modules/mux.js": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/mux.js/-/mux.js-7.0.3.tgz",
"integrity": "sha512-gzlzJVEGFYPtl2vvEiJneSWAWD4nfYRHD5XgxmB2gWvXraMPOYk+sxfvexmNfjQUFpmk6hwLR5C6iSFmuwCHdQ==",
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/mux.js/-/mux.js-7.1.0.tgz",
"integrity": "sha512-NTxawK/BBELJrYsZThEulyUMDVlLizKdxyAsMuzoCD1eFj97BVaA8D/CvKsKu6FOLYkFojN5CbM9h++ZTZtknA==",
"dependencies": {
"@babel/runtime": "^7.11.2",
"global": "^4.4.0"
@ -1390,18 +1414,18 @@
}
},
"node_modules/video.js": {
"version": "8.18.1",
"resolved": "https://registry.npmjs.org/video.js/-/video.js-8.18.1.tgz",
"integrity": "sha512-oQ4M/HD2fFgEPHfmVMWxGykRFIpOmVhK0XZ4PSsPTgN2jH6E6+92f/RI2mDXDb0yu+Fxv9fxMUm0M7Z2K3Zo9w==",
"version": "8.19.1",
"resolved": "https://registry.npmjs.org/video.js/-/video.js-8.19.1.tgz",
"integrity": "sha512-MVuayhXpzTBv5Jk3nYEU2akawPhuBBlizEbpQGx2i+6FiBmqxGjkrkLdDLOzG54ut7xapjp26IfWQLGSpeLmcQ==",
"dependencies": {
"@babel/runtime": "^7.12.5",
"@videojs/http-streaming": "^3.14.2",
"@videojs/http-streaming": "^3.15.0",
"@videojs/vhs-utils": "^4.1.1",
"@videojs/xhr": "2.7.0",
"aes-decrypter": "^4.0.2",
"global": "4.4.0",
"m3u8-parser": "^7.2.0",
"mpd-parser": "^1.2.2",
"mpd-parser": "^1.3.1",
"mux.js": "^7.0.1",
"videojs-contrib-quality-levels": "4.1.0",
"videojs-font": "4.2.0",

View file

@ -0,0 +1,38 @@
name: CI
on: [ push, pull_request ]
jobs:
build:
runs-on: ubuntu-latest
steps:
-
uses: actions/checkout@v3
with:
fetch-depth: 0 # Fetch all history
-
uses: actions/setup-node@v3
with:
node-version-file: '.nvmrc'
- run: npm i -g npm@8.5.5
- run: npm ci
- run: npm run standards
test:
needs: [ build ]
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
node-version: [ 12, 14, 16, 'lts/*', 'latest' ]
steps:
- uses: actions/checkout@v3
-
name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node-version }}
- run: npm i -g npm@8.5.5
- run: npm ci # Reinstall the dependencies to ensure they install with the current version of node
- run: npm test
- name: Coveralls
uses: coverallsapp/github-action@v1

3
node_modules/@silvermine/videojs-airplay/.jsdoc generated vendored Normal file
View file

@ -0,0 +1,3 @@
{
"plugins": [ "plugins/markdown" ]
}

1
node_modules/@silvermine/videojs-airplay/.nvmrc generated vendored Normal file
View file

@ -0,0 +1 @@
16.15.0

16
node_modules/@silvermine/videojs-airplay/.nycrc.json generated vendored Normal file
View file

@ -0,0 +1,16 @@
{
"include": [
"src/**/*.js"
],
"extension": [
".js"
],
"reporter": [
"text-summary",
"html",
"lcov"
],
"instrument": true,
"sourceMap": true,
"all": true
}

View file

@ -0,0 +1 @@
extends: ./node_modules/@silvermine/standardization/.stylelintrc.yml

27
node_modules/@silvermine/videojs-airplay/CHANGELOG.md generated vendored Normal file
View file

@ -0,0 +1,27 @@
# Changelog
All notable changes to this project will be documented in this file.
See [our coding standards][commit-messages] for commit guidelines.
## [1.3.0](https://github.com/silvermine/videojs-airplay/compare/v1.2.0...v1.3.0) (2023-11-15)
### Bug Fixes
* remove deprecated `.extend` method ([1e4aa2a](https://github.com/silvermine/videojs-airplay/commit/1e4aa2a5980ca843e92efc7165a67908be2843d0))
## [1.2.0](https://github.com/silvermine/videojs-airplay/compare/v1.1.0...v1.2.0) (2023-03-21)
### Features
* Add optional "AirPlay" label to button component ([23ab737](https://github.com/silvermine/videojs-airplay/commit/23ab73773c54f60cd7f0fced5de64db22bd96722))
### Bug Fixes
* Remove target availability event listener on dispose ([#29](https://github.com/silvermine/videojs-airplay/issues/29)) ([f732699](https://github.com/silvermine/videojs-airplay/commit/f732699d8ef20278e8c089cc015abaf018a413e5))
[commit-messages]: https://github.com/silvermine/silvermine-info/blob/master/commit-history.md#commit-messages

20
node_modules/@silvermine/videojs-airplay/LICENSE generated vendored Normal file
View file

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2017 Jeremy Thomerson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

186
node_modules/@silvermine/videojs-airplay/README.md generated vendored Normal file
View file

@ -0,0 +1,186 @@
# Silvermine Video.js AirPlay Plugin
<!-- markdownlint-disable line-length -->
[![Build Status](https://travis-ci.org/silvermine/videojs-airplay.svg?branch=master)](https://travis-ci.org/silvermine/videojs-airplay)
[![Coverage Status](https://coveralls.io/repos/github/silvermine/videojs-airplay/badge.svg?branch=master)](https://coveralls.io/github/silvermine/videojs-airplay?branch=master)
[![Dependency Status](https://david-dm.org/silvermine/videojs-airplay.svg)](https://david-dm.org/silvermine/videojs-airplay)
[![Dev Dependency Status](https://david-dm.org/silvermine/videojs-airplay/dev-status.svg)](https://david-dm.org/silvermine/videojs-airplay#info=devDependencies&view=table)
<!-- markdownlint-enable line-length -->
## What is it?
A plugin for [Video.js](http://videojs.com/) versions 6+ that adds a button to the control
bar that will open the AirPlay menu if it is available on the user's device.
_NOTE: there is a [`videojs-airplay`](https://www.npmjs.com/package/videojs-airplay)
package that is in no way associated with `@silvermine/videojs-airplay`. The
`videojs-airplay` module appears to only support VideoJS version 5.x, whereas our
`@silvermine/videojs-airplay` module supports VideoJS 6.x._
## How do I use it?
The `@silvermine/videojs-airplay` plugin includes 3 types of assets: javascript, CSS and
images.
You can either build the plugin locally and use the assets that are output from the build
process directly, or you can install the plugin as an npm module, include the
javascript and SCSS source in your project using a Common-JS module loader and SASS build
process, and copy the images from the image source folder to your project.
### Building the plugin locally
1. Either clone this repository or install the `@silvermine/videojs-airplay` module
using `npm install @silvermine/videojs-airplay`.
2. Ensure that the project's `devDependencies` are installed by running `npm install`
from within the folder you cloned or installed the project.
3. Run `grunt build` to build and copy the javascript, CSS and image files to the
`dist` folder.
4. Copy the plugin's files from the `dist` folder into your project as needed.
5. Ensure that the images in the `dist/images` folder are accessible at `./images/`,
relative to where the plugin's CSS is located. If, for example, your CSS is located
at `https://example.com/plugins/silvermine-videojs-airplay.css`, then the plugin's
images should be located at `https://example.com/plugins/images/`.
Note: when adding the plugin's javascript to your web page, include the `silvermine-
videojs-airplay.min.js` javascript file in your HTML _after_ loading Video.js. The
plugin's built javascript file expects there to be a reference to Video.js at
`window.videojs` and will throw an error if it does not exist.
After both Video.js and `@silvermine/videojs-airplay` have loaded, follow the steps in the
"Configuration" section below.
### Configuration
Once the plugin has been loaded and registered, add it to your Video.js player using
Video.js' plugin configuration option (see the section under the heading "Setting up a
Plugin" on [Video.js' plugin documentation page][videojs-docs]. Use these options to
configure the plugin:
* **`plugins.airPlay.addButtonToControlBar`** - a `boolean` flag that tells the plugin
whether or not it should automatically add the AirPlay button to the Video.js
player's control bar component. Defaults to `true`.
* **`plugins.airPlay.buttonPositionIndex`** - a zero-based number specifying the index
of the AirPlay button among the control bar's child components (if
`addButtonToControlBar` is set to `true`). By default the AirPlay Button is added as
the last child of the control bar. A value less than 0 puts the button at the
specified position from the end of the control bar. Note that it's likely not all
child components of the control bar are visible.
* **`plugins.airPlay.addAirPlayLabelToButton`** (default: `false`) - by default, the
AirPlay button component will display only an icon. Setting `addAirPlayLabelToButton`
to `true` will display a label titled `"AirPlay"` alongside the default icon.
For example:
```js
var options;
options = {
controls: true,
plugins: {
airPlay: {
addButtonToControlBar: false, // defaults to `true`
}
}
};
videojs(document.getElementById('myVideoElement'), options);
```
Even though there are no configuration options, to enable the plugin you must either
provide an `airPlay` entry in the `plugins` option as shown above or you must call the
`airPlay` plugin function manually:
```js
var player = videojs(document.getElementById('myVideoElement'));
player.airPlay(); // initializes the AirPlay plugin
```
#### Localization
The `AirPlayButton` component has two translated strings: "Start AirPlay" and "AirPlay".
* The "Start AirPlay" string appears in both of the standard places for Button
component text: inside the `.vjs-control-text` span and as the `<button>` element's
`title` attribute.
* The "AirPlay" string appears in an optional label within the Button component: inside
the `.vjs-airplay-button-label` span.
To localize the AirPlay button text, follow the steps in the [Video.js Languages
tutorial][videojs-docs] to add `"Start AirPlay"` and `"AirPlay"` keys to the map of
translation strings.
### Using the npm module
If you are using a module loader such as Browserify or Webpack, first install
`@silvermine/videojs-airplay` using `npm install`. Then, use
`require('@silvermine/videojs-airplay')` to require `@silvermine/videojs-airplay` into
your project's source code. `require('@silvermine/videojs-airplay')` returns a function
that you can use to register the plugin with videojs by passing in a reference to
`videojs`:
```js
var videojs = require('video.js');
// Initialize the AirPlay plugin
require('@silvermine/videojs-airplay')(videojs);
```
Then, follow the steps in the "Configuration" section above.
> [!WARNING]
> This plugin's source code uses ES6+ syntax and keywords, such as `class` and `static`.
> If you need to support [browsers that do not support newer JavaScript
> syntax](https://caniuse.com/es6), you will need to use a tool like
> [Babel](https://babeljs.io/) to transpile and polyfill your code.
>
> Alternatively, you can
> `require('@silvermine/videojs-airplay/dist/silvermine-videojs-airplay.js')`
> to use a JavaScript file that has already been polyfilled/transpiled down to ES5
> compatibility.
### Using the CSS and images
If you are using SCSS in your project, you can simply reference the plugin's main SCSS
file in your project's SCSS:
```scss
@import "path/to/node_modules/@silvermine/videojs-airplay/src/scss/videojs-airplay";
```
Optionally, you can override the SCSS variables that contain the paths to the icon
image files:
* **`$icon-airplay--default`** - the path to the icon image that is displayed when the
AirPlay button is in its normal, default state. Defaults to
`"images/ic_airplay_white_24px.svg"`.
* **`$icon-airplay--hover`** - the path to the icon image that is displayed when the
user hovers over the AirPlay button. Defaults to
`"images/ic_airplay_white_24px.svg"`.
* **`$airplay-icon-size`** - the width and height of the icon (the button and icon is a
square). Defaults to `12px`.
#### Images
The plugin's images are located at `@silvermine/videojs-airplay/src/images`. If you have
not overridden the icon image path variables in the SCSS, then copy the images from the
`src/images` folder to a folder that is accessible at `./images/`, relative to where the
plugin's CSS is located. If, for example, your CSS is located at
`https://example.com/plugins/silvermine-videojs-airplay.css`, then the plugin's images
should be located at `https://example.com/plugins/images/`.
## How do I contribute?
We genuinely appreciate external contributions. See [our extensive
documentation][contributing] on how to contribute.
## License
This software is released under the MIT license. See [the license file](LICENSE) for more
details.
[videojs-docs]: http://docs.videojs.com/tutorial-plugins.html
[contributing]: https://github.com/silvermine/silvermine-info#contributing

View file

@ -0,0 +1,3 @@
'use strict';
module.exports = { extends: [ '@silvermine/standardization/commitlint' ] };

View file

@ -0,0 +1,15 @@
<svg fill="#000000" height="24" viewBox="0 0 24 24" width="24" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path d="M0 0h24v24H0V0z" id="a"/>
</defs>
<defs>
<path d="M0 0h24v24H0V0z" id="c"/>
</defs>
<clipPath id="b">
<use overflow="visible" xlink:href="#a"/>
</clipPath>
<clipPath clip-path="url(#b)" id="d">
<use overflow="visible" xlink:href="#c"/>
</clipPath>
<path clip-path="url(#d)" d="M6 22h12l-6-6zM21 3H3c-1.1 0-2 .9-2 2v12c0 1.1.9 2 2 2h4v-2H3V5h18v12h-4v2h4c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2z"/>
</svg>

After

Width:  |  Height:  |  Size: 623 B

View file

@ -0,0 +1,15 @@
<svg fill="#5C7CB0" height="24" viewBox="0 0 24 24" width="24" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path d="M0 0h24v24H0V0z" id="a"/>
</defs>
<defs>
<path d="M0 0h24v24H0V0z" id="c"/>
</defs>
<clipPath id="b">
<use overflow="visible" xlink:href="#a"/>
</clipPath>
<clipPath clip-path="url(#b)" id="d">
<use overflow="visible" xlink:href="#c"/>
</clipPath>
<path clip-path="url(#d)" d="M6 22h12l-6-6zM21 3H3c-1.1 0-2 .9-2 2v12c0 1.1.9 2 2 2h4v-2H3V5h18v12h-4v2h4c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2z"/>
</svg>

After

Width:  |  Height:  |  Size: 623 B

View file

@ -0,0 +1,15 @@
<svg fill="#FFFFFF" height="24" viewBox="0 0 24 24" width="24" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path d="M0 0h24v24H0V0z" id="a"/>
</defs>
<defs>
<path d="M0 0h24v24H0V0z" id="c"/>
</defs>
<clipPath id="b">
<use overflow="visible" xlink:href="#a"/>
</clipPath>
<clipPath clip-path="url(#b)" id="d">
<use overflow="visible" xlink:href="#c"/>
</clipPath>
<path clip-path="url(#d)" d="M6 22h12l-6-6zM21 3H3c-1.1 0-2 .9-2 2v12c0 1.1.9 2 2 2h4v-2H3V5h18v12h-4v2h4c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2z"/>
</svg>

After

Width:  |  Height:  |  Size: 623 B

View file

@ -0,0 +1 @@
.vjs-airplay-button .vjs-icon-placeholder{background:url("images/ic_airplay_white_24px.svg") center center no-repeat;background-size:contain;display:inline-block;width:12px;height:12px}.vjs-airplay-button:hover{cursor:pointer}.vjs-airplay-button:hover .vjs-icon-placeholder{background-image:url("images/ic_airplay_white_24px.svg")}.vjs-airplay-button.vjs-airplay-button-lg:not(.vjs-hidden){display:flex;align-items:center;width:auto;padding:0 4px}.vjs-airplay-button.vjs-airplay-button-lg:not(.vjs-hidden) .vjs-airplay-button-label{flex-grow:1;margin-left:4px}.vjs-airplay-button.vjs-airplay-button-lg:not(.vjs-hidden) .vjs-icon-placeholder{flex-grow:1}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

74
node_modules/@silvermine/videojs-airplay/package.json generated vendored Normal file
View file

@ -0,0 +1,74 @@
{
"name": "@silvermine/videojs-airplay",
"version": "1.3.0",
"description": "video.js plugin for casting to airplay",
"main": "src/js/index.js",
"scripts": {
"test": "check-node-version --npm 8.5.5 && nyc mocha -- 'tests/**/*.test.js'",
"prepublish": "grunt build",
"commitlint": "commitlint --from a9277dd",
"markdownlint": "markdownlint-cli2",
"eslint": "eslint '{,!(node_modules|dist)/**/}*.js'",
"stylelint": "stylelint './src/scss/**/*.scss'",
"standards": "npm run commitlint && npm run markdownlint && npm run stylelint && npm run eslint",
"release:preview": "node ./node_modules/@silvermine/standardization/scripts/release.js preview",
"release:prep-changelog": "node ./node_modules/@silvermine/standardization/scripts/release.js prep-changelog",
"release:finalize": "node ./node_modules/@silvermine/standardization/scripts/release.js finalize"
},
"author": "Jeremy Thomerson",
"license": "MIT",
"repository": {
"type": "git",
"url": "git+https://github.com/silvermine/videojs-airplay.git"
},
"keywords": [
"video.js",
"videojs",
"plugin",
"apple",
"appletv",
"airplay",
"cast"
],
"bugs": {
"url": "https://github.com/silvermine/videojs-airplay/issues"
},
"homepage": "https://github.com/silvermine/videojs-airplay#readme",
"devDependencies": {
"@babel/core": "7.21.0",
"@babel/preset-env": "7.20.2",
"@silvermine/eslint-config": "3.0.1",
"@silvermine/standardization": "2.2.1",
"autoprefixer": "8.6.5",
"babel-eslint": "10.1.0",
"babelify": "10.0.0",
"check-node-version": "4.0.2",
"core-js": "3.28.0",
"coveralls": "3.0.2",
"eslint": "6.8.0",
"expect.js": "0.3.1",
"grunt": "1.4.0",
"grunt-browserify": "5.3.0",
"grunt-contrib-clean": "1.1.0",
"grunt-contrib-copy": "1.0.0",
"grunt-contrib-uglify": "3.0.1",
"grunt-contrib-watch": "1.1.0",
"grunt-postcss": "0.9.0",
"grunt-sass": "3.1.0",
"mocha": "8.4.0",
"mocha-lcov-reporter": "1.3.0",
"nyc": "15.1.0",
"rewire": "2.5.2",
"sass": "1.49.7",
"silvermine-serverless-utils": "git+https://github.com/silvermine/serverless-utils.git#910f1149af824fc8d0fa840878079c7d3df0f414",
"sinon": "2.3.5"
},
"peerDependencies": {
"video.js": ">= 6.0.0"
},
"config": {
"commitizen": {
"path": "./node_modules/cz-conventional-changelog"
}
}
}

View file

@ -0,0 +1,15 @@
<svg fill="#000000" height="24" viewBox="0 0 24 24" width="24" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path d="M0 0h24v24H0V0z" id="a"/>
</defs>
<defs>
<path d="M0 0h24v24H0V0z" id="c"/>
</defs>
<clipPath id="b">
<use overflow="visible" xlink:href="#a"/>
</clipPath>
<clipPath clip-path="url(#b)" id="d">
<use overflow="visible" xlink:href="#c"/>
</clipPath>
<path clip-path="url(#d)" d="M6 22h12l-6-6zM21 3H3c-1.1 0-2 .9-2 2v12c0 1.1.9 2 2 2h4v-2H3V5h18v12h-4v2h4c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2z"/>
</svg>

After

Width:  |  Height:  |  Size: 623 B

View file

@ -0,0 +1,15 @@
<svg fill="#5C7CB0" height="24" viewBox="0 0 24 24" width="24" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path d="M0 0h24v24H0V0z" id="a"/>
</defs>
<defs>
<path d="M0 0h24v24H0V0z" id="c"/>
</defs>
<clipPath id="b">
<use overflow="visible" xlink:href="#a"/>
</clipPath>
<clipPath clip-path="url(#b)" id="d">
<use overflow="visible" xlink:href="#c"/>
</clipPath>
<path clip-path="url(#d)" d="M6 22h12l-6-6zM21 3H3c-1.1 0-2 .9-2 2v12c0 1.1.9 2 2 2h4v-2H3V5h18v12h-4v2h4c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2z"/>
</svg>

After

Width:  |  Height:  |  Size: 623 B

View file

@ -0,0 +1,15 @@
<svg fill="#FFFFFF" height="24" viewBox="0 0 24 24" width="24" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path d="M0 0h24v24H0V0z" id="a"/>
</defs>
<defs>
<path d="M0 0h24v24H0V0z" id="c"/>
</defs>
<clipPath id="b">
<use overflow="visible" xlink:href="#a"/>
</clipPath>
<clipPath clip-path="url(#b)" id="d">
<use overflow="visible" xlink:href="#c"/>
</clipPath>
<path clip-path="url(#d)" d="M6 22h12l-6-6zM21 3H3c-1.1 0-2 .9-2 2v12c0 1.1.9 2 2 2h4v-2H3V5h18v12h-4v2h4c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2z"/>
</svg>

After

Width:  |  Height:  |  Size: 623 B

View file

@ -0,0 +1,147 @@
var hasAirPlayAPISupport = require('../lib/hasAirPlayAPISupport');
/**
* Registers the AirPlayButton Component with Video.js. Calls
* {@link http://docs.videojs.com/Component.html#.registerComponent}, which will add a
* component called `airPlayButton` to the list of globally registered Video.js
* components. The `airPlayButton` is added to the player's control bar UI automatically
* once {@link module:enableAirPlay} has been called. If you would like to specify the
* order of the buttons that appear in the control bar, including this button, you can do
* so in the options that you pass to the `videojs` function when creating a player:
*
* ```
* videojs('playerID', {
* controlBar: {
* children: [
* 'playToggle',
* 'progressControl',
* 'volumePanel',
* 'fullscreenToggle',
* 'airPlayButton',
* ],
* }
* });
* ```
*
* @param videojs {object} A reference to {@link http://docs.videojs.com/module-videojs.html|Video.js}
* @see http://docs.videojs.com/module-videojs.html#~registerPlugin
*/
module.exports = function(videojs) {
/**
* The AirPlayButton module contains both the AirPlayButton class definition and the
* function used to register the button as a Video.js Component.
*
* @module AirPlayButton
*/
const ButtonComponent = videojs.getComponent('Button');
/**
* The Video.js Button class is the base class for UI button components.
*
* @external Button
* @see {@link http://docs.videojs.com/Button.html|Button}
*/
/** @lends AirPlayButton.prototype */
class AirPlayButton extends ButtonComponent {
/**
* This class is a button component designed to be displayed in the
* player UI's control bar. It displays an Apple AirPlay selection
* list when clicked.
*
* @constructs
* @extends external:Button
*/
constructor(player, options) {
super(player, options);
if (!hasAirPlayAPISupport()) {
this.hide();
}
this._reactToAirPlayAvailableEvents();
if (options.addAirPlayLabelToButton) {
this.el().classList.add('vjs-airplay-button-lg');
this._labelEl = document.createElement('span');
this._labelEl.classList.add('vjs-airplay-button-label');
this._labelEl.textContent = this.localize('AirPlay');
this.el().appendChild(this._labelEl);
} else {
this.controlText('Start AirPlay');
}
}
/**
* Overrides Button#buildCSSClass to return the classes used on the button element.
*
* @param {DOMElement} el
* @see {@link http://docs.videojs.com/Button.html#buildCSSClass|Button#buildCSSClass}
*/
buildCSSClass() {
return 'vjs-airplay-button ' + super.buildCSSClass();
}
/**
* Overrides Button#handleClick to handle button click events. AirPlay
* functionality is handled outside of this class, which should be limited
* to UI related logic. This function simply triggers an event on the player.
*
* @fires AirPlayButton#airPlayRequested
* @param {DOMElement} el
* @see {@link http://docs.videojs.com/Button.html#handleClick|Button#handleClick}
*/
handleClick() {
this.player().trigger('airPlayRequested');
}
/**
* Gets the underlying DOMElement used by the player.
*
* @private
* @returns {DOMElement} either an <audio> or <video> tag, depending on the type of
* player
*/
_getMediaEl() {
var playerEl = this.player().el();
return playerEl.querySelector('video, audio');
}
/**
* Binds a listener to the `webkitplaybacktargetavailabilitychanged` event, if it is
* supported, that will show or hide this button Component based on the availability
* of the AirPlay function.
*
* @private
*/
_reactToAirPlayAvailableEvents() {
var mediaEl = this._getMediaEl(),
self = this;
if (!mediaEl || !hasAirPlayAPISupport()) {
return;
}
function onTargetAvailabilityChanged(event) {
if (event.availability === 'available') {
self.show();
} else {
self.hide();
}
}
mediaEl.addEventListener('webkitplaybacktargetavailabilitychanged', onTargetAvailabilityChanged);
this.on('dispose', function() {
mediaEl.removeEventListener('webkitplaybacktargetavailabilitychanged', onTargetAvailabilityChanged);
});
}
}
videojs.registerComponent('airPlayButton', AirPlayButton);
};

View file

@ -0,0 +1,109 @@
/**
* @module enableAirPlay
*/
var hasAirPlayAPISupport = require('./lib/hasAirPlayAPISupport');
/**
* @private
* @param {object} the Video.js Player instance
* @returns {AirPlayButton} or `undefined` if it does not exist
*/
function getExistingAirPlayButton(player) {
return player.controlBar.getChild('airPlayButton');
}
/**
* Adds the AirPlayButton Component to the player's ControlBar component, if the
* AirPlayButton does not already exist in the ControlBar.
* @private
* @param player {object} the Video.js Player instance
* @param options {object}
*/
function ensureAirPlayButtonExists(player, options) {
var existingAirPlayButton = getExistingAirPlayButton(player),
indexOpt;
if (options.addButtonToControlBar && !existingAirPlayButton) {
// Figure out AirPlay button's index
indexOpt = player.controlBar.children().length;
if (typeof options.buttonPositionIndex !== 'undefined') {
indexOpt = options.buttonPositionIndex >= 0
? options.buttonPositionIndex
: player.controlBar.children().length + options.buttonPositionIndex;
}
player.controlBar.addChild('airPlayButton', options, indexOpt);
}
}
/**
* Handles requests for AirPlay triggered by the AirPlayButton Component.
*
* @private
* @param player {object} the Video.js Player instance
*/
function onAirPlayRequested(player) {
var mediaEl = player.el().querySelector('video, audio');
if (mediaEl && mediaEl.webkitShowPlaybackTargetPicker) {
mediaEl.webkitShowPlaybackTargetPicker();
}
}
/**
* Adds an event listener for the `airPlayRequested` event triggered by the AirPlayButton
* Component.
*
* @private
* @param player {object} the Video.js Player instance
*/
function listenForAirPlayEvents(player) {
// Respond to requests for AirPlay. The AirPlayButton component triggers this event
// when the user clicks the AirPlay button.
player.on('airPlayRequested', onAirPlayRequested.bind(null, player));
}
/**
* Sets up the AirPlay plugin.
*
* @private
* @param player {object} the Video.js player
* @param options {object} the plugin options
*/
function enableAirPlay(player, options) {
if (!player.controlBar) {
return;
}
if (hasAirPlayAPISupport()) {
listenForAirPlayEvents(player);
ensureAirPlayButtonExists(player, options);
}
}
/**
* Registers the AirPlay plugin with Video.js. Calls
* {@link http://docs.videojs.com/module-videojs.html#~registerPlugin|videojs#registerPlugin},
* which will add a plugin function called `airPlay` to any instance of a Video.js player
* that is created after calling this function. Call `player.airPlay(options)`, passing in
* configuration options, to enable the AirPlay plugin on your Player instance.
*
* Currently, the only configuration option is:
*
* * **buttonText** - the text to display inside of the button component. By default,
* this text is hidden and is used for accessibility purposes.
*
* @param {object} videojs
* @see http://docs.videojs.com/module-videojs.html#~registerPlugin
*/
module.exports = function(videojs) {
videojs.registerPlugin('airPlay', function(options) {
var pluginOptions = Object.assign({ addButtonToControlBar: true }, options || {});
// `this` is an instance of a Video.js Player.
// Wait until the player is "ready" so that the player's control bar component has
// been created.
this.ready(enableAirPlay.bind(this, this, pluginOptions));
});
};

View file

@ -0,0 +1,21 @@
var createAirPlayButton = require('./components/AirPlayButton'),
createAirPlayPlugin = require('./enableAirPlay');
/**
* @module index
*/
/**
* Registers the AirPlay plugin and AirPlayButton Component with Video.js. See
* {@link module:AirPlayButton} and {@link module:enableAirPlay} for more details about
* how the plugin and button are registered and configured.
*
* @param {object} videojs
* @see module:enableAirPlay
* @see module:AirPlayButton
*/
module.exports = function(videojs) {
videojs = videojs || window.videojs;
createAirPlayButton(videojs);
createAirPlayPlugin(videojs);
};

View file

@ -0,0 +1,13 @@
/**
* @module hasAirPlayAPISupport
*/
/**
* Returns whether or not the current browser environment supports AirPlay.
*
* @private
* @returns {boolean} true if AirPlay support is available
*/
module.exports = function() {
return !!window.WebKitPlaybackTargetAvailabilityEvent;
};

View file

@ -0,0 +1,13 @@
/**
* This module is used as an entry point for the build system to bundle this plugin into a
* single javascript file that can be loaded by a script tag on a web page. The javascript
* file that is built assumes that `videojs` is available globally at `window.videojs`, so
* Video.js must be loaded **before** this plugin is loaded.
*
* Run `npm install` and then `grunt build` to build the plugin's bundled javascript
* file, as well as the CSS and image assets into the project's `./dist/` folder.
*
* @module standalone
*/
require('./index')();

View file

@ -0,0 +1,38 @@
// Icon files
$icon-airplay--default: 'images/ic_airplay_white_24px.svg' !default;
$icon-airplay--hover: 'images/ic_airplay_white_24px.svg' !default;
// Sizes
$airplay-icon-size: 12px !default;
$airplay-button-spacing: 4px !default;
.vjs-airplay-button {
.vjs-icon-placeholder {
background: url($icon-airplay--default) center center no-repeat;
background-size: contain;
display: inline-block;
width: $airplay-icon-size;
height: $airplay-icon-size;
}
&:hover {
cursor: pointer;
.vjs-icon-placeholder {
background-image: url($icon-airplay--hover);
}
}
}
.vjs-airplay-button.vjs-airplay-button-lg:not(.vjs-hidden) {
// Fits both the icon and the label on the same control
display: flex;
align-items: center;
width: auto;
padding: 0 $airplay-button-spacing;
.vjs-airplay-button-label {
flex-grow: 1;
margin-left: $airplay-button-spacing;
}
.vjs-icon-placeholder {
flex-grow: 1;
}
}

View file

@ -1,3 +1,15 @@
<a name="3.15.0"></a>
# [3.15.0](https://github.com/videojs/http-streaming/compare/v3.14.2...v3.15.0) (2024-10-10)
### Features
* Add Airplay support when overriding native HLS in Safari/iOS ([#1543](https://github.com/videojs/http-streaming/issues/1543)) ([bfc17b4](https://github.com/videojs/http-streaming/commit/bfc17b4))
* Add support for ManagedMediaSource 'startstreaming' and 'endstream' event handling ([#1542](https://github.com/videojs/http-streaming/issues/1542)) ([ae1ae70](https://github.com/videojs/http-streaming/commit/ae1ae70))
### Chores
* update mpd-parser to v1.3.1 ([#1544](https://github.com/videojs/http-streaming/issues/1544)) ([a9dd790](https://github.com/videojs/http-streaming/commit/a9dd790))
<a name="3.14.2"></a>
## [3.14.2](https://github.com/videojs/http-streaming/compare/v3.14.1...v3.14.2) (2024-09-17)

View file

@ -1,4 +1,4 @@
/*! @name @videojs/http-streaming @version 3.14.2 @license Apache-2.0 */
/*! @name @videojs/http-streaming @version 3.15.0 @license Apache-2.0 */
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('video.js'), require('@xmldom/xmldom')) :
typeof define === 'function' && define.amd ? define(['exports', 'video.js', '@xmldom/xmldom'], factory) :
@ -6199,7 +6199,7 @@
});
};
/*! @name mpd-parser @version 1.3.0 @license Apache-2.0 */
/*! @name mpd-parser @version 1.3.1 @license Apache-2.0 */
const isObject = obj => {
return !!obj && typeof obj === 'object';
@ -7174,10 +7174,11 @@
const organizeVttPlaylists = (playlists, sidxMapping = {}) => {
return playlists.reduce((a, playlist) => {
const label = playlist.attributes.label || playlist.attributes.lang || 'text';
const language = playlist.attributes.lang || 'und';
if (!a[label]) {
a[label] = {
language: label,
language,
default: false,
autoselect: false,
playlists: [],
@ -31256,10 +31257,14 @@ ${segmentInfoString(segmentInfo)}`); // If there's an init segment associated wi
this.handleDurationChange_ = this.handleDurationChange_.bind(this);
this.handleSourceOpen_ = this.handleSourceOpen_.bind(this);
this.handleSourceEnded_ = this.handleSourceEnded_.bind(this);
this.load = this.load.bind(this);
this.pause = this.pause.bind(this);
this.mediaSource.addEventListener('durationchange', this.handleDurationChange_); // load the media source into the player
this.mediaSource.addEventListener('sourceopen', this.handleSourceOpen_);
this.mediaSource.addEventListener('sourceended', this.handleSourceEnded_); // we don't have to handle sourceclose since dispose will handle termination of
this.mediaSource.addEventListener('sourceended', this.handleSourceEnded_);
this.mediaSource.addEventListener('startstreaming', this.load);
this.mediaSource.addEventListener('endstreaming', this.pause); // we don't have to handle sourceclose since dispose will handle termination of
// everything, and the MediaSource should not be detached without a proper disposal
this.seekable_ = createTimeRanges();
@ -32063,6 +32068,22 @@ ${segmentInfoString(segmentInfo)}`); // If there's an init segment associated wi
this.subtitleSegmentLoader_.load();
}
}
/**
* Call pause on our SegmentLoaders
*/
pause() {
this.mainSegmentLoader_.pause();
if (this.mediaTypes_.AUDIO.activePlaylistLoader) {
this.audioSegmentLoader_.pause();
}
if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) {
this.subtitleSegmentLoader_.pause();
}
}
/**
* Re-tune playback quality level for the current player
* conditions. This method will perform destructive actions like removing
@ -34415,11 +34436,11 @@ ${segmentInfoString(segmentInfo)}`); // If there's an init segment associated wi
initPlugin(this, options);
};
var version$4 = "3.14.2";
var version$4 = "3.15.0";
var version$3 = "7.0.3";
var version$2 = "1.3.0";
var version$2 = "1.3.1";
var version$1 = "7.2.0";
@ -35397,8 +35418,15 @@ ${segmentInfoString(segmentInfo)}`); // If there's an init segment associated wi
return;
}
this.mediaSourceUrl_ = window.URL.createObjectURL(this.playlistController_.mediaSource);
this.tech_.src(this.mediaSourceUrl_);
this.mediaSourceUrl_ = window.URL.createObjectURL(this.playlistController_.mediaSource); // If we are playing HLS with MSE in Safari, add source elements for both the blob and manifest URLs.
// The latter will enable Airplay playback on receiver devices.
if ((videojs__default["default"].browser.IS_ANY_SAFARI || videojs__default["default"].browser.IS_IOS) && this.options_.overrideNative && this.options_.sourceType === 'hls' && typeof this.tech_.addSourceElement === 'function') {
this.tech_.addSourceElement(this.mediaSourceUrl_);
this.tech_.addSourceElement(this.source_.src);
} else {
this.tech_.src(this.mediaSourceUrl_);
}
}
createKeySessions_() {

View file

@ -1,4 +1,4 @@
/*! @name @videojs/http-streaming @version 3.14.2 @license Apache-2.0 */
/*! @name @videojs/http-streaming @version 3.15.0 @license Apache-2.0 */
'use strict';
Object.defineProperty(exports, '__esModule', { value: true });
@ -25149,10 +25149,14 @@ class PlaylistController extends videojs__default["default"].EventTarget {
this.handleDurationChange_ = this.handleDurationChange_.bind(this);
this.handleSourceOpen_ = this.handleSourceOpen_.bind(this);
this.handleSourceEnded_ = this.handleSourceEnded_.bind(this);
this.load = this.load.bind(this);
this.pause = this.pause.bind(this);
this.mediaSource.addEventListener('durationchange', this.handleDurationChange_); // load the media source into the player
this.mediaSource.addEventListener('sourceopen', this.handleSourceOpen_);
this.mediaSource.addEventListener('sourceended', this.handleSourceEnded_); // we don't have to handle sourceclose since dispose will handle termination of
this.mediaSource.addEventListener('sourceended', this.handleSourceEnded_);
this.mediaSource.addEventListener('startstreaming', this.load);
this.mediaSource.addEventListener('endstreaming', this.pause); // we don't have to handle sourceclose since dispose will handle termination of
// everything, and the MediaSource should not be detached without a proper disposal
this.seekable_ = createTimeRanges();
@ -25956,6 +25960,22 @@ class PlaylistController extends videojs__default["default"].EventTarget {
this.subtitleSegmentLoader_.load();
}
}
/**
* Call pause on our SegmentLoaders
*/
pause() {
this.mainSegmentLoader_.pause();
if (this.mediaTypes_.AUDIO.activePlaylistLoader) {
this.audioSegmentLoader_.pause();
}
if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) {
this.subtitleSegmentLoader_.pause();
}
}
/**
* Re-tune playback quality level for the current player
* conditions. This method will perform destructive actions like removing
@ -28308,11 +28328,11 @@ const reloadSourceOnError = function (options) {
initPlugin(this, options);
};
var version$4 = "3.14.2";
var version$4 = "3.15.0";
var version$3 = "7.0.3";
var version$2 = "1.3.0";
var version$2 = "1.3.1";
var version$1 = "7.2.0";
@ -29290,8 +29310,15 @@ class VhsHandler extends Component {
return;
}
this.mediaSourceUrl_ = window__default["default"].URL.createObjectURL(this.playlistController_.mediaSource);
this.tech_.src(this.mediaSourceUrl_);
this.mediaSourceUrl_ = window__default["default"].URL.createObjectURL(this.playlistController_.mediaSource); // If we are playing HLS with MSE in Safari, add source elements for both the blob and manifest URLs.
// The latter will enable Airplay playback on receiver devices.
if ((videojs__default["default"].browser.IS_ANY_SAFARI || videojs__default["default"].browser.IS_IOS) && this.options_.overrideNative && this.options_.sourceType === 'hls' && typeof this.tech_.addSourceElement === 'function') {
this.tech_.addSourceElement(this.mediaSourceUrl_);
this.tech_.addSourceElement(this.source_.src);
} else {
this.tech_.src(this.mediaSourceUrl_);
}
}
createKeySessions_() {

View file

@ -1,4 +1,4 @@
/*! @name @videojs/http-streaming @version 3.14.2 @license Apache-2.0 */
/*! @name @videojs/http-streaming @version 3.15.0 @license Apache-2.0 */
import _extends from '@babel/runtime/helpers/extends';
import document from 'global/document';
import window$1 from 'global/window';
@ -25137,10 +25137,14 @@ class PlaylistController extends videojs.EventTarget {
this.handleDurationChange_ = this.handleDurationChange_.bind(this);
this.handleSourceOpen_ = this.handleSourceOpen_.bind(this);
this.handleSourceEnded_ = this.handleSourceEnded_.bind(this);
this.load = this.load.bind(this);
this.pause = this.pause.bind(this);
this.mediaSource.addEventListener('durationchange', this.handleDurationChange_); // load the media source into the player
this.mediaSource.addEventListener('sourceopen', this.handleSourceOpen_);
this.mediaSource.addEventListener('sourceended', this.handleSourceEnded_); // we don't have to handle sourceclose since dispose will handle termination of
this.mediaSource.addEventListener('sourceended', this.handleSourceEnded_);
this.mediaSource.addEventListener('startstreaming', this.load);
this.mediaSource.addEventListener('endstreaming', this.pause); // we don't have to handle sourceclose since dispose will handle termination of
// everything, and the MediaSource should not be detached without a proper disposal
this.seekable_ = createTimeRanges();
@ -25944,6 +25948,22 @@ class PlaylistController extends videojs.EventTarget {
this.subtitleSegmentLoader_.load();
}
}
/**
* Call pause on our SegmentLoaders
*/
pause() {
this.mainSegmentLoader_.pause();
if (this.mediaTypes_.AUDIO.activePlaylistLoader) {
this.audioSegmentLoader_.pause();
}
if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) {
this.subtitleSegmentLoader_.pause();
}
}
/**
* Re-tune playback quality level for the current player
* conditions. This method will perform destructive actions like removing
@ -28296,11 +28316,11 @@ const reloadSourceOnError = function (options) {
initPlugin(this, options);
};
var version$4 = "3.14.2";
var version$4 = "3.15.0";
var version$3 = "7.0.3";
var version$2 = "1.3.0";
var version$2 = "1.3.1";
var version$1 = "7.2.0";
@ -29278,8 +29298,15 @@ class VhsHandler extends Component {
return;
}
this.mediaSourceUrl_ = window$1.URL.createObjectURL(this.playlistController_.mediaSource);
this.tech_.src(this.mediaSourceUrl_);
this.mediaSourceUrl_ = window$1.URL.createObjectURL(this.playlistController_.mediaSource); // If we are playing HLS with MSE in Safari, add source elements for both the blob and manifest URLs.
// The latter will enable Airplay playback on receiver devices.
if ((videojs.browser.IS_ANY_SAFARI || videojs.browser.IS_IOS) && this.options_.overrideNative && this.options_.sourceType === 'hls' && typeof this.tech_.addSourceElement === 'function') {
this.tech_.addSourceElement(this.mediaSourceUrl_);
this.tech_.addSourceElement(this.source_.src);
} else {
this.tech_.src(this.mediaSourceUrl_);
}
}
createKeySessions_() {

View file

@ -1,4 +1,4 @@
/*! @name @videojs/http-streaming @version 3.14.2 @license Apache-2.0 */
/*! @name @videojs/http-streaming @version 3.15.0 @license Apache-2.0 */
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('video.js'), require('@xmldom/xmldom')) :
typeof define === 'function' && define.amd ? define(['exports', 'video.js', '@xmldom/xmldom'], factory) :
@ -6199,7 +6199,7 @@
});
};
/*! @name mpd-parser @version 1.3.0 @license Apache-2.0 */
/*! @name mpd-parser @version 1.3.1 @license Apache-2.0 */
const isObject = obj => {
return !!obj && typeof obj === 'object';
@ -7174,10 +7174,11 @@
const organizeVttPlaylists = (playlists, sidxMapping = {}) => {
return playlists.reduce((a, playlist) => {
const label = playlist.attributes.label || playlist.attributes.lang || 'text';
const language = playlist.attributes.lang || 'und';
if (!a[label]) {
a[label] = {
language: label,
language,
default: false,
autoselect: false,
playlists: [],
@ -31206,10 +31207,14 @@ ${segmentInfoString(segmentInfo)}`); // If there's an init segment associated wi
this.handleDurationChange_ = this.handleDurationChange_.bind(this);
this.handleSourceOpen_ = this.handleSourceOpen_.bind(this);
this.handleSourceEnded_ = this.handleSourceEnded_.bind(this);
this.load = this.load.bind(this);
this.pause = this.pause.bind(this);
this.mediaSource.addEventListener('durationchange', this.handleDurationChange_); // load the media source into the player
this.mediaSource.addEventListener('sourceopen', this.handleSourceOpen_);
this.mediaSource.addEventListener('sourceended', this.handleSourceEnded_); // we don't have to handle sourceclose since dispose will handle termination of
this.mediaSource.addEventListener('sourceended', this.handleSourceEnded_);
this.mediaSource.addEventListener('startstreaming', this.load);
this.mediaSource.addEventListener('endstreaming', this.pause); // we don't have to handle sourceclose since dispose will handle termination of
// everything, and the MediaSource should not be detached without a proper disposal
this.seekable_ = createTimeRanges();
@ -32013,6 +32018,22 @@ ${segmentInfoString(segmentInfo)}`); // If there's an init segment associated wi
this.subtitleSegmentLoader_.load();
}
}
/**
* Call pause on our SegmentLoaders
*/
pause() {
this.mainSegmentLoader_.pause();
if (this.mediaTypes_.AUDIO.activePlaylistLoader) {
this.audioSegmentLoader_.pause();
}
if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) {
this.subtitleSegmentLoader_.pause();
}
}
/**
* Re-tune playback quality level for the current player
* conditions. This method will perform destructive actions like removing
@ -34365,11 +34386,11 @@ ${segmentInfoString(segmentInfo)}`); // If there's an init segment associated wi
initPlugin(this, options);
};
var version$4 = "3.14.2";
var version$4 = "3.15.0";
var version$3 = "7.0.3";
var version$2 = "1.3.0";
var version$2 = "1.3.1";
var version$1 = "7.2.0";
@ -35347,8 +35368,15 @@ ${segmentInfoString(segmentInfo)}`); // If there's an init segment associated wi
return;
}
this.mediaSourceUrl_ = window.URL.createObjectURL(this.playlistController_.mediaSource);
this.tech_.src(this.mediaSourceUrl_);
this.mediaSourceUrl_ = window.URL.createObjectURL(this.playlistController_.mediaSource); // If we are playing HLS with MSE in Safari, add source elements for both the blob and manifest URLs.
// The latter will enable Airplay playback on receiver devices.
if ((videojs__default["default"].browser.IS_ANY_SAFARI || videojs__default["default"].browser.IS_IOS) && this.options_.overrideNative && this.options_.sourceType === 'hls' && typeof this.tech_.addSourceElement === 'function') {
this.tech_.addSourceElement(this.mediaSourceUrl_);
this.tech_.addSourceElement(this.source_.src);
} else {
this.tech_.src(this.mediaSourceUrl_);
}
}
createKeySessions_() {

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,12 @@
#!/bin/sh
basedir=$(dirname "$(echo "$0" | sed -e 's,\\,/,g')")
case `uname` in
*CYGWIN*|*MINGW*|*MSYS*) basedir=`cygpath -w "$basedir"`;;
esac
if [ -x "$basedir/node" ]; then
exec "$basedir/node" "$basedir/../mux.js/bin/transmux.js" "$@"
else
exec node "$basedir/../mux.js/bin/transmux.js" "$@"
fi

View file

@ -0,0 +1,17 @@
@ECHO off
GOTO start
:find_dp0
SET dp0=%~dp0
EXIT /b
:start
SETLOCAL
CALL :find_dp0
IF EXIST "%dp0%\node.exe" (
SET "_prog=%dp0%\node.exe"
) ELSE (
SET "_prog=node"
SET PATHEXT=%PATHEXT:;.JS;=;%
)
endLocal & goto #_undefined_# 2>NUL || title %COMSPEC% & "%_prog%" "%dp0%\..\mux.js\bin\transmux.js" %*

View file

@ -0,0 +1,28 @@
#!/usr/bin/env pwsh
$basedir=Split-Path $MyInvocation.MyCommand.Definition -Parent
$exe=""
if ($PSVersionTable.PSVersion -lt "6.0" -or $IsWindows) {
# Fix case when both the Windows and Linux builds of Node
# are installed in the same directory
$exe=".exe"
}
$ret=0
if (Test-Path "$basedir/node$exe") {
# Support pipeline input
if ($MyInvocation.ExpectingInput) {
$input | & "$basedir/node$exe" "$basedir/../mux.js/bin/transmux.js" $args
} else {
& "$basedir/node$exe" "$basedir/../mux.js/bin/transmux.js" $args
}
$ret=$LASTEXITCODE
} else {
# Support pipeline input
if ($MyInvocation.ExpectingInput) {
$input | & "node$exe" "$basedir/../mux.js/bin/transmux.js" $args
} else {
& "node$exe" "$basedir/../mux.js/bin/transmux.js" $args
}
$ret=$LASTEXITCODE
}
exit $ret

View file

@ -0,0 +1,240 @@
<a name="7.0.3"></a>
## [7.0.3](https://github.com/videojs/mux.js/compare/v7.0.2...v7.0.3) (2024-03-12)
### Bug Fixes
* 608 caption out of bound rows ([#442](https://github.com/videojs/mux.js/issues/442)) ([37ec801](https://github.com/videojs/mux.js/commit/37ec801))
### Chores
* change code example transmuxer event listeners ([#438](https://github.com/videojs/mux.js/issues/438)) ([2d61f49](https://github.com/videojs/mux.js/commit/2d61f49))
<a name="7.0.2"></a>
## [7.0.2](https://github.com/videojs/mux.js/compare/v7.0.1...v7.0.2) (2023-11-27)
### Bug Fixes
* Ignore non-PES packets in the rollover stream ([#440](https://github.com/videojs/mux.js/issues/440)) ([2015be8](https://github.com/videojs/mux.js/commit/2015be8))
<a name="7.0.1"></a>
## [7.0.1](https://github.com/videojs/mux.js/compare/v7.0.0...v7.0.1) (2023-10-12)
### Bug Fixes
* 708 captions multi-byte char fix ([#439](https://github.com/videojs/mux.js/issues/439)) ([ec31749](https://github.com/videojs/mux.js/commit/ec31749))
### Chores
* update v7.0.0 documentation ([#435](https://github.com/videojs/mux.js/issues/435)) ([21e55aa](https://github.com/videojs/mux.js/commit/21e55aa))
<a name="7.0.0"></a>
# [7.0.0](https://github.com/videojs/mux.js/compare/v6.3.0...v7.0.0) (2023-07-21)
### Features
* add position data to captions ([#434](https://github.com/videojs/mux.js/issues/434)) ([30f2132](https://github.com/videojs/mux.js/commit/30f2132))
### Chores
* add npm publish step to the release workflow ([a8306cd](https://github.com/videojs/mux.js/commit/a8306cd))
* rename workflow name from github-release to release and add discussion category name for github releases ([4ba1607](https://github.com/videojs/mux.js/commit/4ba1607))
* Update CI and release workflows ([#431](https://github.com/videojs/mux.js/issues/431)) ([dc56f1b](https://github.com/videojs/mux.js/commit/dc56f1b))
* update collaborator guide md ([51b3ed4](https://github.com/videojs/mux.js/commit/51b3ed4))
* update git push suggestion in collaborator guide md ([73a5b60](https://github.com/videojs/mux.js/commit/73a5b60))
### BREAKING CHANGES
* In the case of CEA-608 captions, mux.js will now be returning captions in the form of caption sets.
This means that rather then returning a single text of combined caption cues, an array of caption cues is returned in the `content` property.
```js
transmuxer.on('data', function (segment) {
// create a VTTCue for all the parsed CEA-608 captions:>
segment.captions.forEach(function(captionSet) {
// Caption sets contains multiple captions with text and position data.
captionSet.content.forEach(function(cue) {
const newCue = new VTTCue(cue.startTime, cue.endTime, cue.text);
newCue.line = cue.line;
newCue.position = cue.position;
captionTextTrack.addCue(newCue);
});
});
});
```
<a name="6.3.0"></a>
# [6.3.0](https://github.com/videojs/mux.js/compare/v6.2.0...v6.3.0) (2023-02-22)
### Features
* support emsg box parsing ([2e77285](https://github.com/videojs/mux.js/commit/2e77285))
### Bug Fixes
* emsg ie11 test failures ([528e9ed](https://github.com/videojs/mux.js/commit/528e9ed))
<a name="6.2.0"></a>
# [6.2.0](https://github.com/videojs/mux.js/compare/v6.1.0...v6.2.0) (2022-07-08)
### Features
* add ID3 parsing for text, link, and APIC frames ([#412](https://github.com/videojs/mux.js/issues/412)) ([5454bdd](https://github.com/videojs/mux.js/commit/5454bdd))
### Bug Fixes
* replace indexOf with typedArrayIndexOf for IE11 support ([#417](https://github.com/videojs/mux.js/issues/417)) ([4e1b195](https://github.com/videojs/mux.js/commit/4e1b195))
<a name="6.1.0"></a>
# [6.1.0](https://github.com/videojs/mux.js/compare/v6.0.1...v6.1.0) (2022-05-26)
### Features
* send ID3 tag even when a frame has malformed content ([#408](https://github.com/videojs/mux.js/issues/408)) ([1da5d23](https://github.com/videojs/mux.js/commit/1da5d23))
<a name="6.0.1"></a>
## [6.0.1](https://github.com/videojs/mux.js/compare/v6.0.0...v6.0.1) (2021-12-20)
### Bug Fixes
* fix IE11 by replacing arrow function ([#406](https://github.com/videojs/mux.js/issues/406)) ([47302fe](https://github.com/videojs/mux.js/commit/47302fe))
<a name="6.0.0"></a>
# [6.0.0](https://github.com/videojs/mux.js/compare/v5.14.1...v6.0.0) (2021-11-29)
### Features
* use bigint for 64 bit ints if needed and available. ([#383](https://github.com/videojs/mux.js/issues/383)) ([83779b9](https://github.com/videojs/mux.js/commit/83779b9))
### Chores
* don't run tests on version ([#404](https://github.com/videojs/mux.js/issues/404)) ([45623ea](https://github.com/videojs/mux.js/commit/45623ea))
### BREAKING CHANGES
* In some cases, mux.js will now be returning a BigInt rather than a regular Number value. This means that consumers of this library will need to add checks for BigInt for optimal operation.
<a name="5.14.1"></a>
## [5.14.1](https://github.com/videojs/mux.js/compare/v5.14.0...v5.14.1) (2021-10-14)
### Bug Fixes
* avoid mismatch with avc1 and hvc1 codec ([#400](https://github.com/videojs/mux.js/issues/400)) ([8a58d6e](https://github.com/videojs/mux.js/commit/8a58d6e))
* prevent adding duplicate log listeners on every push after a flush ([#402](https://github.com/videojs/mux.js/issues/402)) ([eb332c1](https://github.com/videojs/mux.js/commit/eb332c1))
<a name="5.14.0"></a>
# [5.14.0](https://github.com/videojs/mux.js/compare/v5.13.0...v5.14.0) (2021-09-21)
### Features
* Add multibyte character support ([#398](https://github.com/videojs/mux.js/issues/398)) ([0849e0a](https://github.com/videojs/mux.js/commit/0849e0a))
<a name="5.13.0"></a>
# [5.13.0](https://github.com/videojs/mux.js/compare/v5.12.2...v5.13.0) (2021-08-24)
### Features
* add firstSequenceNumber option to Transmuxer to start sequence somewhere other than zero ([#395](https://github.com/videojs/mux.js/issues/395)) ([6ff42f4](https://github.com/videojs/mux.js/commit/6ff42f4))
### Chores
* add github release ci action ([#397](https://github.com/videojs/mux.js/issues/397)) ([abe7936](https://github.com/videojs/mux.js/commit/abe7936))
* update ci workflow to fix ci ([#396](https://github.com/videojs/mux.js/issues/396)) ([86cfdca](https://github.com/videojs/mux.js/commit/86cfdca))
<a name="5.12.2"></a>
## [5.12.2](https://github.com/videojs/mux.js/compare/v5.12.1...v5.12.2) (2021-07-14)
### Bug Fixes
* Do not scale width by sarRatio, let decoder handle it via the pasp box ([#393](https://github.com/videojs/mux.js/issues/393)) ([9e9982f](https://github.com/videojs/mux.js/commit/9e9982f))
<a name="5.12.1"></a>
## [5.12.1](https://github.com/videojs/mux.js/compare/v5.12.0...v5.12.1) (2021-07-09)
### Code Refactoring
* rename warn event to log, change console logs to log events ([#392](https://github.com/videojs/mux.js/issues/392)) ([4995603](https://github.com/videojs/mux.js/commit/4995603))
<a name="5.12.0"></a>
# [5.12.0](https://github.com/videojs/mux.js/compare/v5.11.3...v5.12.0) (2021-07-02)
### Features
* add general error/warn/debug log events and log skipped adts data ([#391](https://github.com/videojs/mux.js/issues/391)) ([6588d48](https://github.com/videojs/mux.js/commit/6588d48))
<a name="5.11.3"></a>
## [5.11.3](https://github.com/videojs/mux.js/compare/v5.11.2...v5.11.3) (2021-06-30)
### Bug Fixes
* Prevent skipping frames when we have garbage data between adts sync words ([#390](https://github.com/videojs/mux.js/issues/390)) ([71bac64](https://github.com/videojs/mux.js/commit/71bac64))
<a name="5.11.2"></a>
## [5.11.2](https://github.com/videojs/mux.js/compare/v5.11.1...v5.11.2) (2021-06-24)
### Bug Fixes
* on flush if a pmt has not been emitted and we have one, emit it ([#388](https://github.com/videojs/mux.js/issues/388)) ([67b4aab](https://github.com/videojs/mux.js/commit/67b4aab))
<a name="5.11.1"></a>
## [5.11.1](https://github.com/videojs/mux.js/compare/v5.11.0...v5.11.1) (2021-06-22)
### Bug Fixes
* inspect all program map tables for stream types ([#386](https://github.com/videojs/mux.js/issues/386)) ([bac4da9](https://github.com/videojs/mux.js/commit/bac4da9))
<a name="5.11.0"></a>
# [5.11.0](https://github.com/videojs/mux.js/compare/v5.10.0...v5.11.0) (2021-03-29)
### Features
* parse ctts atom in mp4 inspector ([#379](https://github.com/videojs/mux.js/issues/379)) ([b75a7a4](https://github.com/videojs/mux.js/commit/b75a7a4))
* stss atom parsing ([#380](https://github.com/videojs/mux.js/issues/380)) ([305eb4f](https://github.com/videojs/mux.js/commit/305eb4f))
<a name="5.10.0"></a>
# [5.10.0](https://github.com/videojs/mux.js/compare/v5.9.2...v5.10.0) (2021-03-05)
### Features
* parse edts boxes ([#375](https://github.com/videojs/mux.js/issues/375)) ([989bffd](https://github.com/videojs/mux.js/commit/989bffd))
### Bug Fixes
* Check if baseTimestamp is NaN ([#370](https://github.com/videojs/mux.js/issues/370)) ([b4e61dd](https://github.com/videojs/mux.js/commit/b4e61dd))
* only parse PES packets as PES packets ([#378](https://github.com/videojs/mux.js/issues/378)) ([bb984db](https://github.com/videojs/mux.js/commit/bb984db))
<a name="5.9.2"></a>
## [5.9.2](https://github.com/videojs/mux.js/compare/v5.9.1...v5.9.2) (2021-02-24)
### Features
* add a nodejs binary for transmux via command line ([#366](https://github.com/videojs/mux.js/issues/366)) ([b87ed0f](https://github.com/videojs/mux.js/commit/b87ed0f))
### Bug Fixes
* ts inspect ptsTime/dtsTime typo ([#377](https://github.com/videojs/mux.js/issues/377)) ([112e6e1](https://github.com/videojs/mux.js/commit/112e6e1))
### Chores
* switch to rollup-plugin-data-files ([#369](https://github.com/videojs/mux.js/issues/369)) ([0bb1556](https://github.com/videojs/mux.js/commit/0bb1556))
* update vjsverify to fix publish failure ([cb06bb5](https://github.com/videojs/mux.js/commit/cb06bb5))
<a name="5.9.1"></a>
## [5.9.1](https://github.com/videojs/mux.js/compare/v5.9.0...v5.9.1) (2021-01-20)
### Chores
* **package:** fixup browser field ([#368](https://github.com/videojs/mux.js/issues/368)) ([8926506](https://github.com/videojs/mux.js/commit/8926506))
<a name="5.9.0"></a>
# [5.9.0](https://github.com/videojs/mux.js/compare/v5.8.0...v5.9.0) (2021-01-20)
### Features
* **CaptionStream:** add flag to turn off 708 captions ([#365](https://github.com/videojs/mux.js/issues/365)) ([8a7cdb6](https://github.com/videojs/mux.js/commit/8a7cdb6))
### Chores
* update this project to use the generator ([#352](https://github.com/videojs/mux.js/issues/352)) ([fa920a6](https://github.com/videojs/mux.js/commit/fa920a6))

View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright Brightcove, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,371 @@
# mux.js
[![Build Status](https://travis-ci.org/videojs/mux.js.svg?branch=main)](https://travis-ci.org/videojs/mux.js)[![Greenkeeper badge](https://badges.greenkeeper.io/videojs/mux.js.svg)](https://greenkeeper.io/)
[![Slack Status](http://slack.videojs.com/badge.svg)](http://slack.videojs.com)
Lightweight utilities for inspecting and manipulating video container formats.
Maintenance Status: Stable
## Table of Contents
- [Installation](#installation)
- [NPM](#npm)
- [Manual Build](#manual-build)
- [Building](#building)
- [Collaborator](#collaborator)
- [Contributing](#contributing)
- [Options](#options)
- [Background](#background)
- [fmp4](#fmp4)
- [MPEG2-TS to fMP4 Transmuxer](#mpeg2-ts-to-fmp4-transmuxer)
- [Diagram](#diagram)
- [Usage Examples](#usage-examples)
- [Basic Usage](#basic-usage)
- [Metadata](#metadata)
- [MP4 Inspector](#mp4-inspector)
- [Documentation](#documentation)
- [Talk to Us](#talk-to-us)
## Installation
### NPM
To install `mux.js` with npm run
```bash
npm install --save mux.js
```
### Manual Build
Download a copy of this git repository and then follow the steps in [Building](#building)
## Building
If you're using this project in a node-like environment, just `require()` whatever you need. If you'd like to package up a distribution to include separately, run `npm run build`. See the `package.json` for other handy scripts if you're thinking about contributing.
## Collaborator
If you are a collaborator, we have a guide on how to [release](./COLLABORATOR_GUIDE.md#releasing) the project.
## Contributing
If you are interested in contributing to `mux.js`, take a look at our docs on [streams](/docs/streams.md) to get started.
## Options
The exported `muxjs` object contains the following modules:
- [codecs](#codecs): a module for handling various codecs
- [mp4](#mp4): a module for handling ISOBMFF MP4 boxes
- [flv](#flv): a module for handling Flash content
- [mp2t](#mp2t): a module for handling MPEG 2 Transport Stream content
### Codecs
#### Adts
`muxjs.codecs.Adts`
The Adts(Audio Data Transport Stream) module handles audio data, specifically AAC. Includes an `AdtsStream` that takes ADTS audio and parses out AAC frames to pass on to the next Stream component in a pipeline.
#### h264
`muxjs.codecs.h264`
The h264 module Handles H264 bitstreams, including a `NalByteStream` and `H264Stream` to parse out NAL Units and pass them on to the next Stream component in a pipeline.
### mp4
#### MP4 Generator
`muxjs.mp4.generator`
The MP4 Generator module contains multiple functions that can be used to generate fragmented MP4s (fmp4s) that can be used in MSE.
#### MP4 Probe
`muxjs.mp4.probe`
The MP4 Probe contains basic utilites that can be used to parse metadata about an MP4 segment. Some examples include: `timescale` and getting the base media decode time of a fragment in seconds.
#### MP4 Transmuxer
`muxjs.mp4.Transmuxer`
Takes MPEG2-TS segments and transmuxes them into fmp4 segments.
Options:
##### baseMediaDecodeTime
Type: `number`
Default: `0`
The Base Media Decode Time of the first segment to be passed into the transmuxer.
##### keepOriginalTimestamps
Type: `boolean`
Default: `false`
The default behavior of the MP4 Transmuxer is to rewrite the timestamps of media segments to ensure that they begin at `0` on the media timeline in MSE. To avoid this behavior, you may set this option to `true`.
**Note**: This will affect behavior of captions and metadata, and these may not align with audio and video without additional manipulation of timing metadata.
##### remux
Type: `boolean`
Default: `true`
Set to `true` to remux audio and video into a single MP4 segment.
#### CaptionParser
`muxjs.mp4.CaptionParser`
This module reads CEA-608 captions out of FMP4 segments.
#### Tools
`muxjs.mp4.tools`
This module includes utilities to parse MP4s into an equivalent javascript object, primarily for debugging purposes.
### flv
#### Transmuxer
`muxjs.flv.Transmuxer`
Takes MPEG2-TS segments and transmuxes them into FLV segments. This module is in maintenance mode and will not have further major development.
#### Tools
`muxjs.flv.tools`
This module includes utilities to parse FLV tags into an equivalent javascript object, primarily for debugging purposes.
### mp2t
`muxjs.mp2t`
Contains Streams specifically to handle MPEG2-TS data, for example `ElementaryStream` and `TransportPacketStream`. This is used in the MP4 module.
#### CaptionStream
`muxjs.mp2t.CaptionStream`
Handles the bulk of parsing CEA-608 captions out of MPEG2-TS segments.
#### Tools
`muxjs.mp2t.tools`
This module contains utilities to parse basic timing information out of MPEG2-TS segments.
## Background
### fMP4
Before making use of the Transmuxer it is best to understand the structure of a fragmented MP4 (fMP4).
fMP4's are structured in *boxes* as described in the ISOBMFF spec.
For a basic fMP4 to be valid it needs to have the following boxes:
1) ftyp (File Type Box)
2) moov (Movie Header Box)
3) moof (Movie Fragment Box)
4) mdat (Movie Data Box)
Every fMP4 stream needs to start with an `ftyp` and `moov` box which is then followed by many `moof` and `mdat` pairs.
It is important to understand that when you append your first segment to [Media Source Extensions](https://www.w3.org/TR/media-source/) that this segment will need to start with an `ftyp` and `moov` followed by a `moof` and `mdat`. A segment containing a `ftyp` and `moov` box is often referred to as an Initialization Segment(`init`) segment, and segments containing `moof` and `mdat` boxes, referring to media itself as Media Segments.
If you would like to see a clearer representation of your fMP4 you can use the `muxjs.mp4.tools.inspect()` method.
## MPEG2-TS to fMP4 Transmuxer
### Diagram
![mux.js diagram](/docs/diagram.png)
## Usage Examples
### Basic Usage
To make use of the Transmuxer method you will need to push data to the transmuxer you have created.
Feed in `Uint8Array`s of an MPEG-2 transport stream, get out a fragmented MP4.
Lets look at a very basic representation of what needs to happen the first time you want to append a fMP4 to an MSE buffer.
```js
// Create your transmuxer:
// initOptions is optional and can be omitted at this time.
var transmuxer = new muxjs.mp4.Transmuxer(initOptions);
// Create an event listener which will be triggered after the transmuxer processes data:
// 'data' events signal a new fMP4 segment is ready
transmuxer.on('data', function (segment) {
// This code will be executed when the event listener is triggered by a Transmuxer.push() method execution.
// Create an empty Uint8Array with the summed value of both the initSegment and data byteLength properties.
let data = new Uint8Array(segment.initSegment.byteLength + segment.data.byteLength);
// Add the segment.initSegment (ftyp/moov) starting at position 0
data.set(segment.initSegment, 0);
// Add the segment.data (moof/mdat) starting after the initSegment
data.set(segment.data, segment.initSegment.byteLength);
// Uncomment this line below to see the structure of your new fMP4
// console.log(muxjs.mp4.tools.inspect(data));
// Add your brand new fMP4 segment to your MSE Source Buffer
sourceBuffer.appendBuffer(data);
});
// When you push your starting MPEG-TS segment it will cause the 'data' event listener above to run.
// It is important to push after your event listener has been defined.
transmuxer.push(transportStreamSegment);
transmuxer.flush();
```
Above we are adding in the `initSegment` (ftyp/moov) to our data array before appending to the MSE Source Buffer.
This is required for the first part of data we append to the MSE Source Buffer, but we will omit the `initSegment` for our remaining chunks (moof/mdat)'s of video we are going to append to our Source Buffer.
In the case of appending additional segments after your first segment we will just need to use the following event listener anonymous function:
```js
transmuxer.on('data', function(segment){
sourceBuffer.appendBuffer(new Uint8Array(segment.data));
});
```
Here we put all of this together in a very basic example player.
```html
<html>
<head>
<title>Basic Transmuxer Test</title>
</head>
<body>
<video controls width="80%"></video>
<script src="https://github.com/videojs/mux.js/releases/latest/download/mux.js"></script>
<script>
// Create array of TS files to play
segments = [
"segment-0.ts",
"segment-1.ts",
"segment-2.ts",
];
// Replace this value with your files codec info
mime = 'video/mp4; codecs="mp4a.40.2,avc1.64001f"';
let mediaSource = new MediaSource();
let transmuxer = new muxjs.mp4.Transmuxer();
video = document.querySelector('video');
video.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener("sourceopen", appendFirstSegment);
function appendFirstSegment(){
if (segments.length == 0){
return;
}
URL.revokeObjectURL(video.src);
sourceBuffer = mediaSource.addSourceBuffer(mime);
sourceBuffer.addEventListener('updateend', appendNextSegment);
transmuxer.on('data', (segment) => {
let data = new Uint8Array(segment.initSegment.byteLength + segment.data.byteLength);
data.set(segment.initSegment, 0);
data.set(segment.data, segment.initSegment.byteLength);
console.log(muxjs.mp4.tools.inspect(data));
sourceBuffer.appendBuffer(data);
// reset the 'data' event listener to just append (moof/mdat) boxes to the Source Buffer
transmuxer.off('data');
})
fetch(segments.shift()).then((response)=>{
return response.arrayBuffer();
}).then((response)=>{
transmuxer.push(new Uint8Array(response));
transmuxer.flush();
})
}
function appendNextSegment(){
transmuxer.on('data', (segment) =>{
sourceBuffer.appendBuffer(new Uint8Array(segment.data));
transmuxer.off('data');
})
if (segments.length == 0){
// notify MSE that we have no more segments to append.
mediaSource.endOfStream();
}
segments.forEach((segment) => {
// fetch the next segment from the segments array and pass it into the transmuxer.push method
fetch(segments.shift()).then((response)=>{
return response.arrayBuffer();
}).then((response)=>{
transmuxer.push(new Uint8Array(response));
transmuxer.flush();
})
})
}
</script>
</body>
</html>
```
*NOTE: This player is only for example and should not be used in production.*
### Metadata
The transmuxer can also parse out supplementary video data like timed ID3 metadata and CEA-608 captions.
You can find both attached to the data event object:
```js
transmuxer.on('data', function (segment) {
// create a metadata text track cue for each ID3 frame:
segment.metadata.frames.forEach(function(frame) {
metadataTextTrack.addCue(new VTTCue(time, time, frame.value));
});
// create a VTTCue for all the parsed CEA-608 captions:>
segment.captions.forEach(function(captionSet) {
// Caption sets contains multiple caption cues with text and position data.
captionSet.content.forEach(function(cue) {
const newCue = new VTTCue(cue.startTime, cue.endTime, cue.text);
newCue.line = cue.line;
newCue.position = cue.position;
captionTextTrack.addCue(newCue);
});
});
});
```
### MP4 Inspector
Parse MP4s into javascript objects or a text representation for display or debugging:
```js
// drop in a Uint8Array of an MP4:
var parsed = muxjs.mp4.tools.inspect(bytes);
// dig into the boxes:
console.log('The major brand of the first box:', parsed[0].majorBrand);
// print out the structure of the MP4:
document.body.appendChild(document.createTextNode(muxjs.textifyMp4(parsed)));
```
The MP4 inspector is used extensively as a debugging tool for the transmuxer. You can see it in action by cloning the project and opening [the debug page](https://github.com/videojs/mux.js/blob/master/debug/index.html) in your browser.
## Documentation
Check out our [troubleshooting guide](/docs/troubleshooting.md).
We have some tips on [creating test content](/docs/test-content.md).
Also, check out our guide on [working with captions in Mux.js](/docs/captions.md).
## Talk to us
Drop by our slack channel (#playback) on the [Video.js slack](http://slack.videojs.com).

View file

@ -0,0 +1,126 @@
#!/usr/bin/env node
/* eslint-disable no-console */
const fs = require('fs');
const path = require('path');
const {Transmuxer} = require('../lib/mp4');
const {version} = require('../package.json');
const {concatTypedArrays} = require('@videojs/vhs-utils/cjs/byte-helpers');
const {ONE_SECOND_IN_TS} = require('../lib/utils/clock.js');
const showHelp = function() {
console.log(`
transmux media-file > foo.mp4
transmux media-file -o foo.mp4
curl -s 'some-media-ulr' | transmux.js -o foo.mp4
wget -O - -o /dev/null 'some-media-url' | transmux.js -o foo.mp4
transmux a supported segment (ts or adts) info an fmp4
-h, --help print help
-v, --version print the version
-o, --output <string> write to a file instead of stdout
-d, --debugger add a break point just before data goes to transmuxer
`);
};
const parseArgs = function(args) {
const options = {};
for (let i = 0; i < args.length; i++) {
const arg = args[i];
if ((/^--version|-v$/).test(arg)) {
console.log(`transmux.js v${version}`);
process.exit(0);
} else if ((/^--help|-h$/).test(arg)) {
showHelp();
process.exit(0);
} else if ((/^--debugger|-d$/).test(arg)) {
options.debugger = true;
} else if ((/^--output|-o$/).test(arg)) {
i++;
options.output = args[i];
} else {
options.file = arg;
}
}
return options;
};
const cli = function(stdin) {
const options = parseArgs(process.argv.slice(2));
let inputStream;
let outputStream;
// if stdin was provided
if (stdin && options.file) {
console.error(`You cannot pass in a file ${options.file} and pipe from stdin!`);
process.exit(1);
}
if (stdin) {
inputStream = process.stdin;
} else if (options.file) {
inputStream = fs.createReadStream(path.resolve(options.file));
}
if (!inputStream) {
console.error('A file or stdin must be passed in as an argument or via pipeing to this script!');
process.exit(1);
}
if (options.output) {
outputStream = fs.createWriteStream(path.resolve(options.output), {
encoding: null
});
} else {
outputStream = process.stdout;
}
return new Promise(function(resolve, reject) {
let allData;
inputStream.on('data', (chunk) => {
allData = concatTypedArrays(allData, chunk);
});
inputStream.on('error', reject);
inputStream.on('close', () => {
if (!allData || !allData.length) {
return reject('file is empty');
}
resolve(allData);
});
}).then(function(inputData) {
const transmuxer = new Transmuxer();
// Setting the BMDT to ensure that captions and id3 tags are not
// time-shifted by this value when they are output and instead are
// zero-based
transmuxer.setBaseMediaDecodeTime(ONE_SECOND_IN_TS);
transmuxer.on('data', function(data) {
if (data.initSegment) {
outputStream.write(concatTypedArrays(data.initSegment, data.data));
} else {
outputStream.write(data.data);
}
});
if (options.debugger) {
// eslint-disable-next-line
debugger;
}
transmuxer.push(inputData);
transmuxer.flush();
process.exit(0);
}).catch(function(e) {
console.error(e);
process.exit(1);
});
};
// no stdin if isTTY is set
cli(!process.stdin.isTTY ? process.stdin : null);

View file

@ -0,0 +1,125 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* A stream-based aac to mp4 converter. This utility can be used to
* deliver mp4s to a SourceBuffer on platforms that support native
* Media Source Extensions.
*/
'use strict';
var Stream = require('../utils/stream.js');
var aacUtils = require('./utils'); // Constants
var _AacStream;
/**
* Splits an incoming stream of binary data into ADTS and ID3 Frames.
*/
_AacStream = function AacStream() {
var everything = new Uint8Array(),
timeStamp = 0;
_AacStream.prototype.init.call(this);
this.setTimestamp = function (timestamp) {
timeStamp = timestamp;
};
this.push = function (bytes) {
var frameSize = 0,
byteIndex = 0,
bytesLeft,
chunk,
packet,
tempLength; // If there are bytes remaining from the last segment, prepend them to the
// bytes that were pushed in
if (everything.length) {
tempLength = everything.length;
everything = new Uint8Array(bytes.byteLength + tempLength);
everything.set(everything.subarray(0, tempLength));
everything.set(bytes, tempLength);
} else {
everything = bytes;
}
while (everything.length - byteIndex >= 3) {
if (everything[byteIndex] === 'I'.charCodeAt(0) && everything[byteIndex + 1] === 'D'.charCodeAt(0) && everything[byteIndex + 2] === '3'.charCodeAt(0)) {
// Exit early because we don't have enough to parse
// the ID3 tag header
if (everything.length - byteIndex < 10) {
break;
} // check framesize
frameSize = aacUtils.parseId3TagSize(everything, byteIndex); // Exit early if we don't have enough in the buffer
// to emit a full packet
// Add to byteIndex to support multiple ID3 tags in sequence
if (byteIndex + frameSize > everything.length) {
break;
}
chunk = {
type: 'timed-metadata',
data: everything.subarray(byteIndex, byteIndex + frameSize)
};
this.trigger('data', chunk);
byteIndex += frameSize;
continue;
} else if ((everything[byteIndex] & 0xff) === 0xff && (everything[byteIndex + 1] & 0xf0) === 0xf0) {
// Exit early because we don't have enough to parse
// the ADTS frame header
if (everything.length - byteIndex < 7) {
break;
}
frameSize = aacUtils.parseAdtsSize(everything, byteIndex); // Exit early if we don't have enough in the buffer
// to emit a full packet
if (byteIndex + frameSize > everything.length) {
break;
}
packet = {
type: 'audio',
data: everything.subarray(byteIndex, byteIndex + frameSize),
pts: timeStamp,
dts: timeStamp
};
this.trigger('data', packet);
byteIndex += frameSize;
continue;
}
byteIndex++;
}
bytesLeft = everything.length - byteIndex;
if (bytesLeft > 0) {
everything = everything.subarray(byteIndex);
} else {
everything = new Uint8Array();
}
};
this.reset = function () {
everything = new Uint8Array();
this.trigger('reset');
};
this.endTimeline = function () {
everything = new Uint8Array();
this.trigger('endedtimeline');
};
};
_AacStream.prototype = new Stream();
module.exports = _AacStream;

View file

@ -0,0 +1,160 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Utilities to detect basic properties and metadata about Aac data.
*/
'use strict';
var ADTS_SAMPLING_FREQUENCIES = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350];
var parseId3TagSize = function parseId3TagSize(header, byteIndex) {
var returnSize = header[byteIndex + 6] << 21 | header[byteIndex + 7] << 14 | header[byteIndex + 8] << 7 | header[byteIndex + 9],
flags = header[byteIndex + 5],
footerPresent = (flags & 16) >> 4; // if we get a negative returnSize clamp it to 0
returnSize = returnSize >= 0 ? returnSize : 0;
if (footerPresent) {
return returnSize + 20;
}
return returnSize + 10;
};
var getId3Offset = function getId3Offset(data, offset) {
if (data.length - offset < 10 || data[offset] !== 'I'.charCodeAt(0) || data[offset + 1] !== 'D'.charCodeAt(0) || data[offset + 2] !== '3'.charCodeAt(0)) {
return offset;
}
offset += parseId3TagSize(data, offset);
return getId3Offset(data, offset);
}; // TODO: use vhs-utils
var isLikelyAacData = function isLikelyAacData(data) {
var offset = getId3Offset(data, 0);
return data.length >= offset + 2 && (data[offset] & 0xFF) === 0xFF && (data[offset + 1] & 0xF0) === 0xF0 && // verify that the 2 layer bits are 0, aka this
// is not mp3 data but aac data.
(data[offset + 1] & 0x16) === 0x10;
};
var parseSyncSafeInteger = function parseSyncSafeInteger(data) {
return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3];
}; // return a percent-encoded representation of the specified byte range
// @see http://en.wikipedia.org/wiki/Percent-encoding
var percentEncode = function percentEncode(bytes, start, end) {
var i,
result = '';
for (i = start; i < end; i++) {
result += '%' + ('00' + bytes[i].toString(16)).slice(-2);
}
return result;
}; // return the string representation of the specified byte range,
// interpreted as ISO-8859-1.
var parseIso88591 = function parseIso88591(bytes, start, end) {
return unescape(percentEncode(bytes, start, end)); // jshint ignore:line
};
var parseAdtsSize = function parseAdtsSize(header, byteIndex) {
var lowThree = (header[byteIndex + 5] & 0xE0) >> 5,
middle = header[byteIndex + 4] << 3,
highTwo = header[byteIndex + 3] & 0x3 << 11;
return highTwo | middle | lowThree;
};
var parseType = function parseType(header, byteIndex) {
if (header[byteIndex] === 'I'.charCodeAt(0) && header[byteIndex + 1] === 'D'.charCodeAt(0) && header[byteIndex + 2] === '3'.charCodeAt(0)) {
return 'timed-metadata';
} else if (header[byteIndex] & 0xff === 0xff && (header[byteIndex + 1] & 0xf0) === 0xf0) {
return 'audio';
}
return null;
};
var parseSampleRate = function parseSampleRate(packet) {
var i = 0;
while (i + 5 < packet.length) {
if (packet[i] !== 0xFF || (packet[i + 1] & 0xF6) !== 0xF0) {
// If a valid header was not found, jump one forward and attempt to
// find a valid ADTS header starting at the next byte
i++;
continue;
}
return ADTS_SAMPLING_FREQUENCIES[(packet[i + 2] & 0x3c) >>> 2];
}
return null;
};
var parseAacTimestamp = function parseAacTimestamp(packet) {
var frameStart, frameSize, frame, frameHeader; // find the start of the first frame and the end of the tag
frameStart = 10;
if (packet[5] & 0x40) {
// advance the frame start past the extended header
frameStart += 4; // header size field
frameStart += parseSyncSafeInteger(packet.subarray(10, 14));
} // parse one or more ID3 frames
// http://id3.org/id3v2.3.0#ID3v2_frame_overview
do {
// determine the number of bytes in this frame
frameSize = parseSyncSafeInteger(packet.subarray(frameStart + 4, frameStart + 8));
if (frameSize < 1) {
return null;
}
frameHeader = String.fromCharCode(packet[frameStart], packet[frameStart + 1], packet[frameStart + 2], packet[frameStart + 3]);
if (frameHeader === 'PRIV') {
frame = packet.subarray(frameStart + 10, frameStart + frameSize + 10);
for (var i = 0; i < frame.byteLength; i++) {
if (frame[i] === 0) {
var owner = parseIso88591(frame, 0, i);
if (owner === 'com.apple.streaming.transportStreamTimestamp') {
var d = frame.subarray(i + 1);
var size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2;
size *= 4;
size += d[7] & 0x03;
return size;
}
break;
}
}
}
frameStart += 10; // advance past the frame header
frameStart += frameSize; // advance past the frame body
} while (frameStart < packet.byteLength);
return null;
};
module.exports = {
isLikelyAacData: isLikelyAacData,
parseId3TagSize: parseId3TagSize,
parseAdtsSize: parseAdtsSize,
parseType: parseType,
parseSampleRate: parseSampleRate,
parseAacTimestamp: parseAacTimestamp
};

View file

@ -0,0 +1,149 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var Stream = require('../utils/stream.js');
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
var _AdtsStream;
var ADTS_SAMPLING_FREQUENCIES = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350];
/*
* Accepts a ElementaryStream and emits data events with parsed
* AAC Audio Frames of the individual packets. Input audio in ADTS
* format is unpacked and re-emitted as AAC frames.
*
* @see http://wiki.multimedia.cx/index.php?title=ADTS
* @see http://wiki.multimedia.cx/?title=Understanding_AAC
*/
_AdtsStream = function AdtsStream(handlePartialSegments) {
var buffer,
frameNum = 0;
_AdtsStream.prototype.init.call(this);
this.skipWarn_ = function (start, end) {
this.trigger('log', {
level: 'warn',
message: "adts skiping bytes " + start + " to " + end + " in frame " + frameNum + " outside syncword"
});
};
this.push = function (packet) {
var i = 0,
frameLength,
protectionSkipBytes,
frameEnd,
oldBuffer,
sampleCount,
adtsFrameDuration;
if (!handlePartialSegments) {
frameNum = 0;
}
if (packet.type !== 'audio') {
// ignore non-audio data
return;
} // Prepend any data in the buffer to the input data so that we can parse
// aac frames the cross a PES packet boundary
if (buffer && buffer.length) {
oldBuffer = buffer;
buffer = new Uint8Array(oldBuffer.byteLength + packet.data.byteLength);
buffer.set(oldBuffer);
buffer.set(packet.data, oldBuffer.byteLength);
} else {
buffer = packet.data;
} // unpack any ADTS frames which have been fully received
// for details on the ADTS header, see http://wiki.multimedia.cx/index.php?title=ADTS
var skip; // We use i + 7 here because we want to be able to parse the entire header.
// If we don't have enough bytes to do that, then we definitely won't have a full frame.
while (i + 7 < buffer.length) {
// Look for the start of an ADTS header..
if (buffer[i] !== 0xFF || (buffer[i + 1] & 0xF6) !== 0xF0) {
if (typeof skip !== 'number') {
skip = i;
} // If a valid header was not found, jump one forward and attempt to
// find a valid ADTS header starting at the next byte
i++;
continue;
}
if (typeof skip === 'number') {
this.skipWarn_(skip, i);
skip = null;
} // The protection skip bit tells us if we have 2 bytes of CRC data at the
// end of the ADTS header
protectionSkipBytes = (~buffer[i + 1] & 0x01) * 2; // Frame length is a 13 bit integer starting 16 bits from the
// end of the sync sequence
// NOTE: frame length includes the size of the header
frameLength = (buffer[i + 3] & 0x03) << 11 | buffer[i + 4] << 3 | (buffer[i + 5] & 0xe0) >> 5;
sampleCount = ((buffer[i + 6] & 0x03) + 1) * 1024;
adtsFrameDuration = sampleCount * ONE_SECOND_IN_TS / ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2]; // If we don't have enough data to actually finish this ADTS frame,
// then we have to wait for more data
if (buffer.byteLength - i < frameLength) {
break;
} // Otherwise, deliver the complete AAC frame
this.trigger('data', {
pts: packet.pts + frameNum * adtsFrameDuration,
dts: packet.dts + frameNum * adtsFrameDuration,
sampleCount: sampleCount,
audioobjecttype: (buffer[i + 2] >>> 6 & 0x03) + 1,
channelcount: (buffer[i + 2] & 1) << 2 | (buffer[i + 3] & 0xc0) >>> 6,
samplerate: ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2],
samplingfrequencyindex: (buffer[i + 2] & 0x3c) >>> 2,
// assume ISO/IEC 14496-12 AudioSampleEntry default of 16
samplesize: 16,
// data is the frame without it's header
data: buffer.subarray(i + 7 + protectionSkipBytes, i + frameLength)
});
frameNum++;
i += frameLength;
}
if (typeof skip === 'number') {
this.skipWarn_(skip, i);
skip = null;
} // remove processed bytes from the buffer.
buffer = buffer.subarray(i);
};
this.flush = function () {
frameNum = 0;
this.trigger('done');
};
this.reset = function () {
buffer = void 0;
this.trigger('reset');
};
this.endTimeline = function () {
buffer = void 0;
this.trigger('endedtimeline');
};
};
_AdtsStream.prototype = new Stream();
module.exports = _AdtsStream;

View file

@ -0,0 +1,571 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var Stream = require('../utils/stream.js');
var ExpGolomb = require('../utils/exp-golomb.js');
var _H264Stream, _NalByteStream;
var PROFILES_WITH_OPTIONAL_SPS_DATA;
/**
* Accepts a NAL unit byte stream and unpacks the embedded NAL units.
*/
_NalByteStream = function NalByteStream() {
var syncPoint = 0,
i,
buffer;
_NalByteStream.prototype.init.call(this);
/*
* Scans a byte stream and triggers a data event with the NAL units found.
* @param {Object} data Event received from H264Stream
* @param {Uint8Array} data.data The h264 byte stream to be scanned
*
* @see H264Stream.push
*/
this.push = function (data) {
var swapBuffer;
if (!buffer) {
buffer = data.data;
} else {
swapBuffer = new Uint8Array(buffer.byteLength + data.data.byteLength);
swapBuffer.set(buffer);
swapBuffer.set(data.data, buffer.byteLength);
buffer = swapBuffer;
}
var len = buffer.byteLength; // Rec. ITU-T H.264, Annex B
// scan for NAL unit boundaries
// a match looks like this:
// 0 0 1 .. NAL .. 0 0 1
// ^ sync point ^ i
// or this:
// 0 0 1 .. NAL .. 0 0 0
// ^ sync point ^ i
// advance the sync point to a NAL start, if necessary
for (; syncPoint < len - 3; syncPoint++) {
if (buffer[syncPoint + 2] === 1) {
// the sync point is properly aligned
i = syncPoint + 5;
break;
}
}
while (i < len) {
// look at the current byte to determine if we've hit the end of
// a NAL unit boundary
switch (buffer[i]) {
case 0:
// skip past non-sync sequences
if (buffer[i - 1] !== 0) {
i += 2;
break;
} else if (buffer[i - 2] !== 0) {
i++;
break;
} // deliver the NAL unit if it isn't empty
if (syncPoint + 3 !== i - 2) {
this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));
} // drop trailing zeroes
do {
i++;
} while (buffer[i] !== 1 && i < len);
syncPoint = i - 2;
i += 3;
break;
case 1:
// skip past non-sync sequences
if (buffer[i - 1] !== 0 || buffer[i - 2] !== 0) {
i += 3;
break;
} // deliver the NAL unit
this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));
syncPoint = i - 2;
i += 3;
break;
default:
// the current byte isn't a one or zero, so it cannot be part
// of a sync sequence
i += 3;
break;
}
} // filter out the NAL units that were delivered
buffer = buffer.subarray(syncPoint);
i -= syncPoint;
syncPoint = 0;
};
this.reset = function () {
buffer = null;
syncPoint = 0;
this.trigger('reset');
};
this.flush = function () {
// deliver the last buffered NAL unit
if (buffer && buffer.byteLength > 3) {
this.trigger('data', buffer.subarray(syncPoint + 3));
} // reset the stream state
buffer = null;
syncPoint = 0;
this.trigger('done');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline');
};
};
_NalByteStream.prototype = new Stream(); // values of profile_idc that indicate additional fields are included in the SPS
// see Recommendation ITU-T H.264 (4/2013),
// 7.3.2.1.1 Sequence parameter set data syntax
PROFILES_WITH_OPTIONAL_SPS_DATA = {
100: true,
110: true,
122: true,
244: true,
44: true,
83: true,
86: true,
118: true,
128: true,
// TODO: the three profiles below don't
// appear to have sps data in the specificiation anymore?
138: true,
139: true,
134: true
};
/**
* Accepts input from a ElementaryStream and produces H.264 NAL unit data
* events.
*/
_H264Stream = function H264Stream() {
var nalByteStream = new _NalByteStream(),
self,
trackId,
currentPts,
currentDts,
discardEmulationPreventionBytes,
readSequenceParameterSet,
skipScalingList;
_H264Stream.prototype.init.call(this);
self = this;
/*
* Pushes a packet from a stream onto the NalByteStream
*
* @param {Object} packet - A packet received from a stream
* @param {Uint8Array} packet.data - The raw bytes of the packet
* @param {Number} packet.dts - Decode timestamp of the packet
* @param {Number} packet.pts - Presentation timestamp of the packet
* @param {Number} packet.trackId - The id of the h264 track this packet came from
* @param {('video'|'audio')} packet.type - The type of packet
*
*/
this.push = function (packet) {
if (packet.type !== 'video') {
return;
}
trackId = packet.trackId;
currentPts = packet.pts;
currentDts = packet.dts;
nalByteStream.push(packet);
};
/*
* Identify NAL unit types and pass on the NALU, trackId, presentation and decode timestamps
* for the NALUs to the next stream component.
* Also, preprocess caption and sequence parameter NALUs.
*
* @param {Uint8Array} data - A NAL unit identified by `NalByteStream.push`
* @see NalByteStream.push
*/
nalByteStream.on('data', function (data) {
var event = {
trackId: trackId,
pts: currentPts,
dts: currentDts,
data: data,
nalUnitTypeCode: data[0] & 0x1f
};
switch (event.nalUnitTypeCode) {
case 0x05:
event.nalUnitType = 'slice_layer_without_partitioning_rbsp_idr';
break;
case 0x06:
event.nalUnitType = 'sei_rbsp';
event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));
break;
case 0x07:
event.nalUnitType = 'seq_parameter_set_rbsp';
event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));
event.config = readSequenceParameterSet(event.escapedRBSP);
break;
case 0x08:
event.nalUnitType = 'pic_parameter_set_rbsp';
break;
case 0x09:
event.nalUnitType = 'access_unit_delimiter_rbsp';
break;
default:
break;
} // This triggers data on the H264Stream
self.trigger('data', event);
});
nalByteStream.on('done', function () {
self.trigger('done');
});
nalByteStream.on('partialdone', function () {
self.trigger('partialdone');
});
nalByteStream.on('reset', function () {
self.trigger('reset');
});
nalByteStream.on('endedtimeline', function () {
self.trigger('endedtimeline');
});
this.flush = function () {
nalByteStream.flush();
};
this.partialFlush = function () {
nalByteStream.partialFlush();
};
this.reset = function () {
nalByteStream.reset();
};
this.endTimeline = function () {
nalByteStream.endTimeline();
};
/**
* Advance the ExpGolomb decoder past a scaling list. The scaling
* list is optionally transmitted as part of a sequence parameter
* set and is not relevant to transmuxing.
* @param count {number} the number of entries in this scaling list
* @param expGolombDecoder {object} an ExpGolomb pointed to the
* start of a scaling list
* @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1
*/
skipScalingList = function skipScalingList(count, expGolombDecoder) {
var lastScale = 8,
nextScale = 8,
j,
deltaScale;
for (j = 0; j < count; j++) {
if (nextScale !== 0) {
deltaScale = expGolombDecoder.readExpGolomb();
nextScale = (lastScale + deltaScale + 256) % 256;
}
lastScale = nextScale === 0 ? lastScale : nextScale;
}
};
/**
* Expunge any "Emulation Prevention" bytes from a "Raw Byte
* Sequence Payload"
* @param data {Uint8Array} the bytes of a RBSP from a NAL
* unit
* @return {Uint8Array} the RBSP without any Emulation
* Prevention Bytes
*/
discardEmulationPreventionBytes = function discardEmulationPreventionBytes(data) {
var length = data.byteLength,
emulationPreventionBytesPositions = [],
i = 1,
newLength,
newData; // Find all `Emulation Prevention Bytes`
while (i < length - 2) {
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {
emulationPreventionBytesPositions.push(i + 2);
i += 2;
} else {
i++;
}
} // If no Emulation Prevention Bytes were found just return the original
// array
if (emulationPreventionBytesPositions.length === 0) {
return data;
} // Create a new array to hold the NAL unit data
newLength = length - emulationPreventionBytesPositions.length;
newData = new Uint8Array(newLength);
var sourceIndex = 0;
for (i = 0; i < newLength; sourceIndex++, i++) {
if (sourceIndex === emulationPreventionBytesPositions[0]) {
// Skip this byte
sourceIndex++; // Remove this position index
emulationPreventionBytesPositions.shift();
}
newData[i] = data[sourceIndex];
}
return newData;
};
/**
* Read a sequence parameter set and return some interesting video
* properties. A sequence parameter set is the H264 metadata that
* describes the properties of upcoming video frames.
* @param data {Uint8Array} the bytes of a sequence parameter set
* @return {object} an object with configuration parsed from the
* sequence parameter set, including the dimensions of the
* associated video frames.
*/
readSequenceParameterSet = function readSequenceParameterSet(data) {
var frameCropLeftOffset = 0,
frameCropRightOffset = 0,
frameCropTopOffset = 0,
frameCropBottomOffset = 0,
sarScale = 1,
expGolombDecoder,
profileIdc,
levelIdc,
profileCompatibility,
chromaFormatIdc,
picOrderCntType,
numRefFramesInPicOrderCntCycle,
picWidthInMbsMinus1,
picHeightInMapUnitsMinus1,
frameMbsOnlyFlag,
scalingListCount,
sarRatio = [1, 1],
aspectRatioIdc,
i;
expGolombDecoder = new ExpGolomb(data);
profileIdc = expGolombDecoder.readUnsignedByte(); // profile_idc
profileCompatibility = expGolombDecoder.readUnsignedByte(); // constraint_set[0-5]_flag
levelIdc = expGolombDecoder.readUnsignedByte(); // level_idc u(8)
expGolombDecoder.skipUnsignedExpGolomb(); // seq_parameter_set_id
// some profiles have more optional data we don't need
if (PROFILES_WITH_OPTIONAL_SPS_DATA[profileIdc]) {
chromaFormatIdc = expGolombDecoder.readUnsignedExpGolomb();
if (chromaFormatIdc === 3) {
expGolombDecoder.skipBits(1); // separate_colour_plane_flag
}
expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_luma_minus8
expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_chroma_minus8
expGolombDecoder.skipBits(1); // qpprime_y_zero_transform_bypass_flag
if (expGolombDecoder.readBoolean()) {
// seq_scaling_matrix_present_flag
scalingListCount = chromaFormatIdc !== 3 ? 8 : 12;
for (i = 0; i < scalingListCount; i++) {
if (expGolombDecoder.readBoolean()) {
// seq_scaling_list_present_flag[ i ]
if (i < 6) {
skipScalingList(16, expGolombDecoder);
} else {
skipScalingList(64, expGolombDecoder);
}
}
}
}
}
expGolombDecoder.skipUnsignedExpGolomb(); // log2_max_frame_num_minus4
picOrderCntType = expGolombDecoder.readUnsignedExpGolomb();
if (picOrderCntType === 0) {
expGolombDecoder.readUnsignedExpGolomb(); // log2_max_pic_order_cnt_lsb_minus4
} else if (picOrderCntType === 1) {
expGolombDecoder.skipBits(1); // delta_pic_order_always_zero_flag
expGolombDecoder.skipExpGolomb(); // offset_for_non_ref_pic
expGolombDecoder.skipExpGolomb(); // offset_for_top_to_bottom_field
numRefFramesInPicOrderCntCycle = expGolombDecoder.readUnsignedExpGolomb();
for (i = 0; i < numRefFramesInPicOrderCntCycle; i++) {
expGolombDecoder.skipExpGolomb(); // offset_for_ref_frame[ i ]
}
}
expGolombDecoder.skipUnsignedExpGolomb(); // max_num_ref_frames
expGolombDecoder.skipBits(1); // gaps_in_frame_num_value_allowed_flag
picWidthInMbsMinus1 = expGolombDecoder.readUnsignedExpGolomb();
picHeightInMapUnitsMinus1 = expGolombDecoder.readUnsignedExpGolomb();
frameMbsOnlyFlag = expGolombDecoder.readBits(1);
if (frameMbsOnlyFlag === 0) {
expGolombDecoder.skipBits(1); // mb_adaptive_frame_field_flag
}
expGolombDecoder.skipBits(1); // direct_8x8_inference_flag
if (expGolombDecoder.readBoolean()) {
// frame_cropping_flag
frameCropLeftOffset = expGolombDecoder.readUnsignedExpGolomb();
frameCropRightOffset = expGolombDecoder.readUnsignedExpGolomb();
frameCropTopOffset = expGolombDecoder.readUnsignedExpGolomb();
frameCropBottomOffset = expGolombDecoder.readUnsignedExpGolomb();
}
if (expGolombDecoder.readBoolean()) {
// vui_parameters_present_flag
if (expGolombDecoder.readBoolean()) {
// aspect_ratio_info_present_flag
aspectRatioIdc = expGolombDecoder.readUnsignedByte();
switch (aspectRatioIdc) {
case 1:
sarRatio = [1, 1];
break;
case 2:
sarRatio = [12, 11];
break;
case 3:
sarRatio = [10, 11];
break;
case 4:
sarRatio = [16, 11];
break;
case 5:
sarRatio = [40, 33];
break;
case 6:
sarRatio = [24, 11];
break;
case 7:
sarRatio = [20, 11];
break;
case 8:
sarRatio = [32, 11];
break;
case 9:
sarRatio = [80, 33];
break;
case 10:
sarRatio = [18, 11];
break;
case 11:
sarRatio = [15, 11];
break;
case 12:
sarRatio = [64, 33];
break;
case 13:
sarRatio = [160, 99];
break;
case 14:
sarRatio = [4, 3];
break;
case 15:
sarRatio = [3, 2];
break;
case 16:
sarRatio = [2, 1];
break;
case 255:
{
sarRatio = [expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte(), expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte()];
break;
}
}
if (sarRatio) {
sarScale = sarRatio[0] / sarRatio[1];
}
}
}
return {
profileIdc: profileIdc,
levelIdc: levelIdc,
profileCompatibility: profileCompatibility,
width: (picWidthInMbsMinus1 + 1) * 16 - frameCropLeftOffset * 2 - frameCropRightOffset * 2,
height: (2 - frameMbsOnlyFlag) * (picHeightInMapUnitsMinus1 + 1) * 16 - frameCropTopOffset * 2 - frameCropBottomOffset * 2,
// sar is sample aspect ratio
sarRatio: sarRatio
};
};
};
_H264Stream.prototype = new Stream();
module.exports = {
H264Stream: _H264Stream,
NalByteStream: _NalByteStream
};

View file

@ -0,0 +1,12 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
module.exports = {
Adts: require('./adts'),
h264: require('./h264')
};

View file

@ -0,0 +1,5 @@
"use strict";
// constants
var AUDIO_PROPERTIES = ['audioobjecttype', 'channelcount', 'samplerate', 'samplingfrequencyindex', 'samplesize'];
module.exports = AUDIO_PROPERTIES;

View file

@ -0,0 +1,4 @@
"use strict";
var VIDEO_PROPERTIES = ['width', 'height', 'profileIdc', 'levelIdc', 'profileCompatibility', 'sarRatio'];
module.exports = VIDEO_PROPERTIES;

View file

@ -0,0 +1,53 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
var highPrefix = [33, 16, 5, 32, 164, 27];
var lowPrefix = [33, 65, 108, 84, 1, 2, 4, 8, 168, 2, 4, 8, 17, 191, 252];
var zeroFill = function zeroFill(count) {
var a = [];
while (count--) {
a.push(0);
}
return a;
};
var makeTable = function makeTable(metaTable) {
return Object.keys(metaTable).reduce(function (obj, key) {
obj[key] = new Uint8Array(metaTable[key].reduce(function (arr, part) {
return arr.concat(part);
}, []));
return obj;
}, {});
};
var silence;
module.exports = function () {
if (!silence) {
// Frames-of-silence to use for filling in missing AAC frames
var coneOfSilence = {
96000: [highPrefix, [227, 64], zeroFill(154), [56]],
88200: [highPrefix, [231], zeroFill(170), [56]],
64000: [highPrefix, [248, 192], zeroFill(240), [56]],
48000: [highPrefix, [255, 192], zeroFill(268), [55, 148, 128], zeroFill(54), [112]],
44100: [highPrefix, [255, 192], zeroFill(268), [55, 163, 128], zeroFill(84), [112]],
32000: [highPrefix, [255, 192], zeroFill(268), [55, 234], zeroFill(226), [112]],
24000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 112], zeroFill(126), [224]],
16000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 255], zeroFill(269), [223, 108], zeroFill(195), [1, 192]],
12000: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 253, 128], zeroFill(259), [56]],
11025: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 255, 192], zeroFill(268), [55, 175, 128], zeroFill(108), [112]],
8000: [lowPrefix, zeroFill(268), [3, 121, 16], zeroFill(47), [7]]
};
silence = makeTable(coneOfSilence);
}
return silence;
};

View file

@ -0,0 +1,147 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var Stream = require('../utils/stream.js');
/**
* The final stage of the transmuxer that emits the flv tags
* for audio, video, and metadata. Also tranlates in time and
* outputs caption data and id3 cues.
*/
var CoalesceStream = function CoalesceStream(options) {
// Number of Tracks per output segment
// If greater than 1, we combine multiple
// tracks into a single segment
this.numberOfTracks = 0;
this.metadataStream = options.metadataStream;
this.videoTags = [];
this.audioTags = [];
this.videoTrack = null;
this.audioTrack = null;
this.pendingCaptions = [];
this.pendingMetadata = [];
this.pendingTracks = 0;
this.processedTracks = 0;
CoalesceStream.prototype.init.call(this); // Take output from multiple
this.push = function (output) {
// buffer incoming captions until the associated video segment
// finishes
if (output.content || output.text) {
return this.pendingCaptions.push(output);
} // buffer incoming id3 tags until the final flush
if (output.frames) {
return this.pendingMetadata.push(output);
}
if (output.track.type === 'video') {
this.videoTrack = output.track;
this.videoTags = output.tags;
this.pendingTracks++;
}
if (output.track.type === 'audio') {
this.audioTrack = output.track;
this.audioTags = output.tags;
this.pendingTracks++;
}
};
};
CoalesceStream.prototype = new Stream();
CoalesceStream.prototype.flush = function (flushSource) {
var id3,
caption,
i,
timelineStartPts,
event = {
tags: {},
captions: [],
captionStreams: {},
metadata: []
};
if (this.pendingTracks < this.numberOfTracks) {
if (flushSource !== 'VideoSegmentStream' && flushSource !== 'AudioSegmentStream') {
// Return because we haven't received a flush from a data-generating
// portion of the segment (meaning that we have only recieved meta-data
// or captions.)
return;
} else if (this.pendingTracks === 0) {
// In the case where we receive a flush without any data having been
// received we consider it an emitted track for the purposes of coalescing
// `done` events.
// We do this for the case where there is an audio and video track in the
// segment but no audio data. (seen in several playlists with alternate
// audio tracks and no audio present in the main TS segments.)
this.processedTracks++;
if (this.processedTracks < this.numberOfTracks) {
return;
}
}
}
this.processedTracks += this.pendingTracks;
this.pendingTracks = 0;
if (this.processedTracks < this.numberOfTracks) {
return;
}
if (this.videoTrack) {
timelineStartPts = this.videoTrack.timelineStartInfo.pts;
} else if (this.audioTrack) {
timelineStartPts = this.audioTrack.timelineStartInfo.pts;
}
event.tags.videoTags = this.videoTags;
event.tags.audioTags = this.audioTags; // Translate caption PTS times into second offsets into the
// video timeline for the segment, and add track info
for (i = 0; i < this.pendingCaptions.length; i++) {
caption = this.pendingCaptions[i];
caption.startTime = caption.startPts - timelineStartPts;
caption.startTime /= 90e3;
caption.endTime = caption.endPts - timelineStartPts;
caption.endTime /= 90e3;
event.captionStreams[caption.stream] = true;
event.captions.push(caption);
} // Translate ID3 frame PTS times into second offsets into the
// video timeline for the segment
for (i = 0; i < this.pendingMetadata.length; i++) {
id3 = this.pendingMetadata[i];
id3.cueTime = id3.pts - timelineStartPts;
id3.cueTime /= 90e3;
event.metadata.push(id3);
} // We add this to every single emitted segment even though we only need
// it for the first
event.metadata.dispatchType = this.metadataStream.dispatchType; // Reset stream state
this.videoTrack = null;
this.audioTrack = null;
this.videoTags = [];
this.audioTags = [];
this.pendingCaptions.length = 0;
this.pendingMetadata.length = 0;
this.pendingTracks = 0;
this.processedTracks = 0; // Emit the final segment
this.trigger('data', event);
this.trigger('done');
};
module.exports = CoalesceStream;

View file

@ -0,0 +1,62 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var FlvTag = require('./flv-tag.js'); // For information on the FLV format, see
// http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf.
// Technically, this function returns the header and a metadata FLV tag
// if duration is greater than zero
// duration in seconds
// @return {object} the bytes of the FLV header as a Uint8Array
var getFlvHeader = function getFlvHeader(duration, audio, video) {
// :ByteArray {
var headBytes = new Uint8Array(3 + 1 + 1 + 4),
head = new DataView(headBytes.buffer),
metadata,
result,
metadataLength; // default arguments
duration = duration || 0;
audio = audio === undefined ? true : audio;
video = video === undefined ? true : video; // signature
head.setUint8(0, 0x46); // 'F'
head.setUint8(1, 0x4c); // 'L'
head.setUint8(2, 0x56); // 'V'
// version
head.setUint8(3, 0x01); // flags
head.setUint8(4, (audio ? 0x04 : 0x00) | (video ? 0x01 : 0x00)); // data offset, should be 9 for FLV v1
head.setUint32(5, headBytes.byteLength); // init the first FLV tag
if (duration <= 0) {
// no duration available so just write the first field of the first
// FLV tag
result = new Uint8Array(headBytes.byteLength + 4);
result.set(headBytes);
result.set([0, 0, 0, 0], headBytes.byteLength);
return result;
} // write out the duration metadata tag
metadata = new FlvTag(FlvTag.METADATA_TAG);
metadata.pts = metadata.dts = 0;
metadata.writeMetaDataDouble('duration', duration);
metadataLength = metadata.finalize().length;
result = new Uint8Array(headBytes.byteLength + metadataLength);
result.set(headBytes);
result.set(head.byteLength, metadataLength);
return result;
};
module.exports = getFlvHeader;

View file

@ -0,0 +1,372 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* An object that stores the bytes of an FLV tag and methods for
* querying and manipulating that data.
* @see http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf
*/
'use strict';
var _FlvTag; // (type:uint, extraData:Boolean = false) extends ByteArray
_FlvTag = function FlvTag(type, extraData) {
var // Counter if this is a metadata tag, nal start marker if this is a video
// tag. unused if this is an audio tag
adHoc = 0,
// :uint
// The default size is 16kb but this is not enough to hold iframe
// data and the resizing algorithm costs a bit so we create a larger
// starting buffer for video tags
bufferStartSize = 16384,
// checks whether the FLV tag has enough capacity to accept the proposed
// write and re-allocates the internal buffers if necessary
prepareWrite = function prepareWrite(flv, count) {
var bytes,
minLength = flv.position + count;
if (minLength < flv.bytes.byteLength) {
// there's enough capacity so do nothing
return;
} // allocate a new buffer and copy over the data that will not be modified
bytes = new Uint8Array(minLength * 2);
bytes.set(flv.bytes.subarray(0, flv.position), 0);
flv.bytes = bytes;
flv.view = new DataView(flv.bytes.buffer);
},
// commonly used metadata properties
widthBytes = _FlvTag.widthBytes || new Uint8Array('width'.length),
heightBytes = _FlvTag.heightBytes || new Uint8Array('height'.length),
videocodecidBytes = _FlvTag.videocodecidBytes || new Uint8Array('videocodecid'.length),
i;
if (!_FlvTag.widthBytes) {
// calculating the bytes of common metadata names ahead of time makes the
// corresponding writes faster because we don't have to loop over the
// characters
// re-test with test/perf.html if you're planning on changing this
for (i = 0; i < 'width'.length; i++) {
widthBytes[i] = 'width'.charCodeAt(i);
}
for (i = 0; i < 'height'.length; i++) {
heightBytes[i] = 'height'.charCodeAt(i);
}
for (i = 0; i < 'videocodecid'.length; i++) {
videocodecidBytes[i] = 'videocodecid'.charCodeAt(i);
}
_FlvTag.widthBytes = widthBytes;
_FlvTag.heightBytes = heightBytes;
_FlvTag.videocodecidBytes = videocodecidBytes;
}
this.keyFrame = false; // :Boolean
switch (type) {
case _FlvTag.VIDEO_TAG:
this.length = 16; // Start the buffer at 256k
bufferStartSize *= 6;
break;
case _FlvTag.AUDIO_TAG:
this.length = 13;
this.keyFrame = true;
break;
case _FlvTag.METADATA_TAG:
this.length = 29;
this.keyFrame = true;
break;
default:
throw new Error('Unknown FLV tag type');
}
this.bytes = new Uint8Array(bufferStartSize);
this.view = new DataView(this.bytes.buffer);
this.bytes[0] = type;
this.position = this.length;
this.keyFrame = extraData; // Defaults to false
// presentation timestamp
this.pts = 0; // decoder timestamp
this.dts = 0; // ByteArray#writeBytes(bytes:ByteArray, offset:uint = 0, length:uint = 0)
this.writeBytes = function (bytes, offset, length) {
var start = offset || 0,
end;
length = length || bytes.byteLength;
end = start + length;
prepareWrite(this, length);
this.bytes.set(bytes.subarray(start, end), this.position);
this.position += length;
this.length = Math.max(this.length, this.position);
}; // ByteArray#writeByte(value:int):void
this.writeByte = function (byte) {
prepareWrite(this, 1);
this.bytes[this.position] = byte;
this.position++;
this.length = Math.max(this.length, this.position);
}; // ByteArray#writeShort(value:int):void
this.writeShort = function (short) {
prepareWrite(this, 2);
this.view.setUint16(this.position, short);
this.position += 2;
this.length = Math.max(this.length, this.position);
}; // Negative index into array
// (pos:uint):int
this.negIndex = function (pos) {
return this.bytes[this.length - pos];
}; // The functions below ONLY work when this[0] == VIDEO_TAG.
// We are not going to check for that because we dont want the overhead
// (nal:ByteArray = null):int
this.nalUnitSize = function () {
if (adHoc === 0) {
return 0;
}
return this.length - (adHoc + 4);
};
this.startNalUnit = function () {
// remember position and add 4 bytes
if (adHoc > 0) {
throw new Error('Attempted to create new NAL wihout closing the old one');
} // reserve 4 bytes for nal unit size
adHoc = this.length;
this.length += 4;
this.position = this.length;
}; // (nal:ByteArray = null):void
this.endNalUnit = function (nalContainer) {
var nalStart, // :uint
nalLength; // :uint
// Rewind to the marker and write the size
if (this.length === adHoc + 4) {
// we started a nal unit, but didnt write one, so roll back the 4 byte size value
this.length -= 4;
} else if (adHoc > 0) {
nalStart = adHoc + 4;
nalLength = this.length - nalStart;
this.position = adHoc;
this.view.setUint32(this.position, nalLength);
this.position = this.length;
if (nalContainer) {
// Add the tag to the NAL unit
nalContainer.push(this.bytes.subarray(nalStart, nalStart + nalLength));
}
}
adHoc = 0;
};
/**
* Write out a 64-bit floating point valued metadata property. This method is
* called frequently during a typical parse and needs to be fast.
*/
// (key:String, val:Number):void
this.writeMetaDataDouble = function (key, val) {
var i;
prepareWrite(this, 2 + key.length + 9); // write size of property name
this.view.setUint16(this.position, key.length);
this.position += 2; // this next part looks terrible but it improves parser throughput by
// 10kB/s in my testing
// write property name
if (key === 'width') {
this.bytes.set(widthBytes, this.position);
this.position += 5;
} else if (key === 'height') {
this.bytes.set(heightBytes, this.position);
this.position += 6;
} else if (key === 'videocodecid') {
this.bytes.set(videocodecidBytes, this.position);
this.position += 12;
} else {
for (i = 0; i < key.length; i++) {
this.bytes[this.position] = key.charCodeAt(i);
this.position++;
}
} // skip null byte
this.position++; // write property value
this.view.setFloat64(this.position, val);
this.position += 8; // update flv tag length
this.length = Math.max(this.length, this.position);
++adHoc;
}; // (key:String, val:Boolean):void
this.writeMetaDataBoolean = function (key, val) {
var i;
prepareWrite(this, 2);
this.view.setUint16(this.position, key.length);
this.position += 2;
for (i = 0; i < key.length; i++) {
// if key.charCodeAt(i) >= 255, handle error
prepareWrite(this, 1);
this.bytes[this.position] = key.charCodeAt(i);
this.position++;
}
prepareWrite(this, 2);
this.view.setUint8(this.position, 0x01);
this.position++;
this.view.setUint8(this.position, val ? 0x01 : 0x00);
this.position++;
this.length = Math.max(this.length, this.position);
++adHoc;
}; // ():ByteArray
this.finalize = function () {
var dtsDelta, // :int
len; // :int
switch (this.bytes[0]) {
// Video Data
case _FlvTag.VIDEO_TAG:
// We only support AVC, 1 = key frame (for AVC, a seekable
// frame), 2 = inter frame (for AVC, a non-seekable frame)
this.bytes[11] = (this.keyFrame || extraData ? 0x10 : 0x20) | 0x07;
this.bytes[12] = extraData ? 0x00 : 0x01;
dtsDelta = this.pts - this.dts;
this.bytes[13] = (dtsDelta & 0x00FF0000) >>> 16;
this.bytes[14] = (dtsDelta & 0x0000FF00) >>> 8;
this.bytes[15] = (dtsDelta & 0x000000FF) >>> 0;
break;
case _FlvTag.AUDIO_TAG:
this.bytes[11] = 0xAF; // 44 kHz, 16-bit stereo
this.bytes[12] = extraData ? 0x00 : 0x01;
break;
case _FlvTag.METADATA_TAG:
this.position = 11;
this.view.setUint8(this.position, 0x02); // String type
this.position++;
this.view.setUint16(this.position, 0x0A); // 10 Bytes
this.position += 2; // set "onMetaData"
this.bytes.set([0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61], this.position);
this.position += 10;
this.bytes[this.position] = 0x08; // Array type
this.position++;
this.view.setUint32(this.position, adHoc);
this.position = this.length;
this.bytes.set([0, 0, 9], this.position);
this.position += 3; // End Data Tag
this.length = this.position;
break;
}
len = this.length - 11; // write the DataSize field
this.bytes[1] = (len & 0x00FF0000) >>> 16;
this.bytes[2] = (len & 0x0000FF00) >>> 8;
this.bytes[3] = (len & 0x000000FF) >>> 0; // write the Timestamp
this.bytes[4] = (this.dts & 0x00FF0000) >>> 16;
this.bytes[5] = (this.dts & 0x0000FF00) >>> 8;
this.bytes[6] = (this.dts & 0x000000FF) >>> 0;
this.bytes[7] = (this.dts & 0xFF000000) >>> 24; // write the StreamID
this.bytes[8] = 0;
this.bytes[9] = 0;
this.bytes[10] = 0; // Sometimes we're at the end of the view and have one slot to write a
// uint32, so, prepareWrite of count 4, since, view is uint8
prepareWrite(this, 4);
this.view.setUint32(this.length, this.length);
this.length += 4;
this.position += 4; // trim down the byte buffer to what is actually being used
this.bytes = this.bytes.subarray(0, this.length);
this.frameTime = _FlvTag.frameTime(this.bytes); // if bytes.bytelength isn't equal to this.length, handle error
return this;
};
};
_FlvTag.AUDIO_TAG = 0x08; // == 8, :uint
_FlvTag.VIDEO_TAG = 0x09; // == 9, :uint
_FlvTag.METADATA_TAG = 0x12; // == 18, :uint
// (tag:ByteArray):Boolean {
_FlvTag.isAudioFrame = function (tag) {
return _FlvTag.AUDIO_TAG === tag[0];
}; // (tag:ByteArray):Boolean {
_FlvTag.isVideoFrame = function (tag) {
return _FlvTag.VIDEO_TAG === tag[0];
}; // (tag:ByteArray):Boolean {
_FlvTag.isMetaData = function (tag) {
return _FlvTag.METADATA_TAG === tag[0];
}; // (tag:ByteArray):Boolean {
_FlvTag.isKeyFrame = function (tag) {
if (_FlvTag.isVideoFrame(tag)) {
return tag[11] === 0x17;
}
if (_FlvTag.isAudioFrame(tag)) {
return true;
}
if (_FlvTag.isMetaData(tag)) {
return true;
}
return false;
}; // (tag:ByteArray):uint {
_FlvTag.frameTime = function (tag) {
var pts = tag[4] << 16; // :uint
pts |= tag[5] << 8;
pts |= tag[6] << 0;
pts |= tag[7] << 24;
return pts;
};
module.exports = _FlvTag;

View file

@ -0,0 +1,13 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
module.exports = {
tag: require('./flv-tag'),
Transmuxer: require('./transmuxer'),
getFlvHeader: require('./flv-header')
};

View file

@ -0,0 +1,30 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var TagList = function TagList() {
var self = this;
this.list = [];
this.push = function (tag) {
this.list.push({
bytes: tag.bytes,
dts: tag.dts,
pts: tag.pts,
keyFrame: tag.keyFrame,
metaDataTag: tag.metaDataTag
});
};
Object.defineProperty(this, 'length', {
get: function get() {
return self.list.length;
}
});
};
module.exports = TagList;

View file

@ -0,0 +1,425 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var Stream = require('../utils/stream.js');
var FlvTag = require('./flv-tag.js');
var m2ts = require('../m2ts/m2ts.js');
var AdtsStream = require('../codecs/adts.js');
var H264Stream = require('../codecs/h264').H264Stream;
var CoalesceStream = require('./coalesce-stream.js');
var TagList = require('./tag-list.js');
var _Transmuxer, _VideoSegmentStream, _AudioSegmentStream, collectTimelineInfo, metaDataTag, extraDataTag;
/**
* Store information about the start and end of the tracka and the
* duration for each frame/sample we process in order to calculate
* the baseMediaDecodeTime
*/
collectTimelineInfo = function collectTimelineInfo(track, data) {
if (typeof data.pts === 'number') {
if (track.timelineStartInfo.pts === undefined) {
track.timelineStartInfo.pts = data.pts;
} else {
track.timelineStartInfo.pts = Math.min(track.timelineStartInfo.pts, data.pts);
}
}
if (typeof data.dts === 'number') {
if (track.timelineStartInfo.dts === undefined) {
track.timelineStartInfo.dts = data.dts;
} else {
track.timelineStartInfo.dts = Math.min(track.timelineStartInfo.dts, data.dts);
}
}
};
metaDataTag = function metaDataTag(track, pts) {
var tag = new FlvTag(FlvTag.METADATA_TAG); // :FlvTag
tag.dts = pts;
tag.pts = pts;
tag.writeMetaDataDouble('videocodecid', 7);
tag.writeMetaDataDouble('width', track.width);
tag.writeMetaDataDouble('height', track.height);
return tag;
};
extraDataTag = function extraDataTag(track, pts) {
var i,
tag = new FlvTag(FlvTag.VIDEO_TAG, true);
tag.dts = pts;
tag.pts = pts;
tag.writeByte(0x01); // version
tag.writeByte(track.profileIdc); // profile
tag.writeByte(track.profileCompatibility); // compatibility
tag.writeByte(track.levelIdc); // level
tag.writeByte(0xFC | 0x03); // reserved (6 bits), NULA length size - 1 (2 bits)
tag.writeByte(0xE0 | 0x01); // reserved (3 bits), num of SPS (5 bits)
tag.writeShort(track.sps[0].length); // data of SPS
tag.writeBytes(track.sps[0]); // SPS
tag.writeByte(track.pps.length); // num of PPS (will there ever be more that 1 PPS?)
for (i = 0; i < track.pps.length; ++i) {
tag.writeShort(track.pps[i].length); // 2 bytes for length of PPS
tag.writeBytes(track.pps[i]); // data of PPS
}
return tag;
};
/**
* Constructs a single-track, media segment from AAC data
* events. The output of this stream can be fed to flash.
*/
_AudioSegmentStream = function AudioSegmentStream(track) {
var adtsFrames = [],
videoKeyFrames = [],
oldExtraData;
_AudioSegmentStream.prototype.init.call(this);
this.push = function (data) {
collectTimelineInfo(track, data);
if (track) {
track.audioobjecttype = data.audioobjecttype;
track.channelcount = data.channelcount;
track.samplerate = data.samplerate;
track.samplingfrequencyindex = data.samplingfrequencyindex;
track.samplesize = data.samplesize;
track.extraData = track.audioobjecttype << 11 | track.samplingfrequencyindex << 7 | track.channelcount << 3;
}
data.pts = Math.round(data.pts / 90);
data.dts = Math.round(data.dts / 90); // buffer audio data until end() is called
adtsFrames.push(data);
};
this.flush = function () {
var currentFrame,
adtsFrame,
lastMetaPts,
tags = new TagList(); // return early if no audio data has been observed
if (adtsFrames.length === 0) {
this.trigger('done', 'AudioSegmentStream');
return;
}
lastMetaPts = -Infinity;
while (adtsFrames.length) {
currentFrame = adtsFrames.shift(); // write out a metadata frame at every video key frame
if (videoKeyFrames.length && currentFrame.pts >= videoKeyFrames[0]) {
lastMetaPts = videoKeyFrames.shift();
this.writeMetaDataTags(tags, lastMetaPts);
} // also write out metadata tags every 1 second so that the decoder
// is re-initialized quickly after seeking into a different
// audio configuration.
if (track.extraData !== oldExtraData || currentFrame.pts - lastMetaPts >= 1000) {
this.writeMetaDataTags(tags, currentFrame.pts);
oldExtraData = track.extraData;
lastMetaPts = currentFrame.pts;
}
adtsFrame = new FlvTag(FlvTag.AUDIO_TAG);
adtsFrame.pts = currentFrame.pts;
adtsFrame.dts = currentFrame.dts;
adtsFrame.writeBytes(currentFrame.data);
tags.push(adtsFrame.finalize());
}
videoKeyFrames.length = 0;
oldExtraData = null;
this.trigger('data', {
track: track,
tags: tags.list
});
this.trigger('done', 'AudioSegmentStream');
};
this.writeMetaDataTags = function (tags, pts) {
var adtsFrame;
adtsFrame = new FlvTag(FlvTag.METADATA_TAG); // For audio, DTS is always the same as PTS. We want to set the DTS
// however so we can compare with video DTS to determine approximate
// packet order
adtsFrame.pts = pts;
adtsFrame.dts = pts; // AAC is always 10
adtsFrame.writeMetaDataDouble('audiocodecid', 10);
adtsFrame.writeMetaDataBoolean('stereo', track.channelcount === 2);
adtsFrame.writeMetaDataDouble('audiosamplerate', track.samplerate); // Is AAC always 16 bit?
adtsFrame.writeMetaDataDouble('audiosamplesize', 16);
tags.push(adtsFrame.finalize());
adtsFrame = new FlvTag(FlvTag.AUDIO_TAG, true); // For audio, DTS is always the same as PTS. We want to set the DTS
// however so we can compare with video DTS to determine approximate
// packet order
adtsFrame.pts = pts;
adtsFrame.dts = pts;
adtsFrame.view.setUint16(adtsFrame.position, track.extraData);
adtsFrame.position += 2;
adtsFrame.length = Math.max(adtsFrame.length, adtsFrame.position);
tags.push(adtsFrame.finalize());
};
this.onVideoKeyFrame = function (pts) {
videoKeyFrames.push(pts);
};
};
_AudioSegmentStream.prototype = new Stream();
/**
* Store FlvTags for the h264 stream
* @param track {object} track metadata configuration
*/
_VideoSegmentStream = function VideoSegmentStream(track) {
var nalUnits = [],
config,
h264Frame;
_VideoSegmentStream.prototype.init.call(this);
this.finishFrame = function (tags, frame) {
if (!frame) {
return;
} // Check if keyframe and the length of tags.
// This makes sure we write metadata on the first frame of a segment.
if (config && track && track.newMetadata && (frame.keyFrame || tags.length === 0)) {
// Push extra data on every IDR frame in case we did a stream change + seek
var metaTag = metaDataTag(config, frame.dts).finalize();
var extraTag = extraDataTag(track, frame.dts).finalize();
metaTag.metaDataTag = extraTag.metaDataTag = true;
tags.push(metaTag);
tags.push(extraTag);
track.newMetadata = false;
this.trigger('keyframe', frame.dts);
}
frame.endNalUnit();
tags.push(frame.finalize());
h264Frame = null;
};
this.push = function (data) {
collectTimelineInfo(track, data);
data.pts = Math.round(data.pts / 90);
data.dts = Math.round(data.dts / 90); // buffer video until flush() is called
nalUnits.push(data);
};
this.flush = function () {
var currentNal,
tags = new TagList(); // Throw away nalUnits at the start of the byte stream until we find
// the first AUD
while (nalUnits.length) {
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
break;
}
nalUnits.shift();
} // return early if no video data has been observed
if (nalUnits.length === 0) {
this.trigger('done', 'VideoSegmentStream');
return;
}
while (nalUnits.length) {
currentNal = nalUnits.shift(); // record the track config
if (currentNal.nalUnitType === 'seq_parameter_set_rbsp') {
track.newMetadata = true;
config = currentNal.config;
track.width = config.width;
track.height = config.height;
track.sps = [currentNal.data];
track.profileIdc = config.profileIdc;
track.levelIdc = config.levelIdc;
track.profileCompatibility = config.profileCompatibility;
h264Frame.endNalUnit();
} else if (currentNal.nalUnitType === 'pic_parameter_set_rbsp') {
track.newMetadata = true;
track.pps = [currentNal.data];
h264Frame.endNalUnit();
} else if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {
if (h264Frame) {
this.finishFrame(tags, h264Frame);
}
h264Frame = new FlvTag(FlvTag.VIDEO_TAG);
h264Frame.pts = currentNal.pts;
h264Frame.dts = currentNal.dts;
} else {
if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {
// the current sample is a key frame
h264Frame.keyFrame = true;
}
h264Frame.endNalUnit();
}
h264Frame.startNalUnit();
h264Frame.writeBytes(currentNal.data);
}
if (h264Frame) {
this.finishFrame(tags, h264Frame);
}
this.trigger('data', {
track: track,
tags: tags.list
}); // Continue with the flush process now
this.trigger('done', 'VideoSegmentStream');
};
};
_VideoSegmentStream.prototype = new Stream();
/**
* An object that incrementally transmuxes MPEG2 Trasport Stream
* chunks into an FLV.
*/
_Transmuxer = function Transmuxer(options) {
var self = this,
packetStream,
parseStream,
elementaryStream,
videoTimestampRolloverStream,
audioTimestampRolloverStream,
timedMetadataTimestampRolloverStream,
adtsStream,
h264Stream,
videoSegmentStream,
audioSegmentStream,
captionStream,
coalesceStream;
_Transmuxer.prototype.init.call(this);
options = options || {}; // expose the metadata stream
this.metadataStream = new m2ts.MetadataStream();
options.metadataStream = this.metadataStream; // set up the parsing pipeline
packetStream = new m2ts.TransportPacketStream();
parseStream = new m2ts.TransportParseStream();
elementaryStream = new m2ts.ElementaryStream();
videoTimestampRolloverStream = new m2ts.TimestampRolloverStream('video');
audioTimestampRolloverStream = new m2ts.TimestampRolloverStream('audio');
timedMetadataTimestampRolloverStream = new m2ts.TimestampRolloverStream('timed-metadata');
adtsStream = new AdtsStream();
h264Stream = new H264Stream();
coalesceStream = new CoalesceStream(options); // disassemble MPEG2-TS packets into elementary streams
packetStream.pipe(parseStream).pipe(elementaryStream); // !!THIS ORDER IS IMPORTANT!!
// demux the streams
elementaryStream.pipe(videoTimestampRolloverStream).pipe(h264Stream);
elementaryStream.pipe(audioTimestampRolloverStream).pipe(adtsStream);
elementaryStream.pipe(timedMetadataTimestampRolloverStream).pipe(this.metadataStream).pipe(coalesceStream); // if CEA-708 parsing is available, hook up a caption stream
captionStream = new m2ts.CaptionStream(options);
h264Stream.pipe(captionStream).pipe(coalesceStream); // hook up the segment streams once track metadata is delivered
elementaryStream.on('data', function (data) {
var i, videoTrack, audioTrack;
if (data.type === 'metadata') {
i = data.tracks.length; // scan the tracks listed in the metadata
while (i--) {
if (data.tracks[i].type === 'video') {
videoTrack = data.tracks[i];
} else if (data.tracks[i].type === 'audio') {
audioTrack = data.tracks[i];
}
} // hook up the video segment stream to the first track with h264 data
if (videoTrack && !videoSegmentStream) {
coalesceStream.numberOfTracks++;
videoSegmentStream = new _VideoSegmentStream(videoTrack); // Set up the final part of the video pipeline
h264Stream.pipe(videoSegmentStream).pipe(coalesceStream);
}
if (audioTrack && !audioSegmentStream) {
// hook up the audio segment stream to the first track with aac data
coalesceStream.numberOfTracks++;
audioSegmentStream = new _AudioSegmentStream(audioTrack); // Set up the final part of the audio pipeline
adtsStream.pipe(audioSegmentStream).pipe(coalesceStream);
if (videoSegmentStream) {
videoSegmentStream.on('keyframe', audioSegmentStream.onVideoKeyFrame);
}
}
}
}); // feed incoming data to the front of the parsing pipeline
this.push = function (data) {
packetStream.push(data);
}; // flush any buffered data
this.flush = function () {
// Start at the top of the pipeline and flush all pending work
packetStream.flush();
}; // Caption data has to be reset when seeking outside buffered range
this.resetCaptions = function () {
captionStream.reset();
}; // Re-emit any data coming from the coalesce stream to the outside world
coalesceStream.on('data', function (event) {
self.trigger('data', event);
}); // Let the consumer know we have finished flushing the entire pipeline
coalesceStream.on('done', function () {
self.trigger('done');
});
};
_Transmuxer.prototype = new Stream(); // forward compatibility
module.exports = _Transmuxer;

View file

@ -0,0 +1,20 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var muxjs = {
codecs: require('./codecs'),
mp4: require('./mp4'),
flv: require('./flv'),
mp2t: require('./m2ts'),
partial: require('./partial')
}; // include all the tools when the full library is required
muxjs.mp4.tools = require('./tools/mp4-inspector');
muxjs.flv.tools = require('./tools/flv-inspector');
muxjs.mp2t.tools = require('./tools/ts-inspector');
module.exports = muxjs;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,9 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
module.exports = require('./m2ts');

View file

@ -0,0 +1,572 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* A stream-based mp2t to mp4 converter. This utility can be used to
* deliver mp4s to a SourceBuffer on platforms that support native
* Media Source Extensions.
*/
'use strict';
var Stream = require('../utils/stream.js'),
CaptionStream = require('./caption-stream'),
StreamTypes = require('./stream-types'),
TimestampRolloverStream = require('./timestamp-rollover-stream').TimestampRolloverStream; // object types
var _TransportPacketStream, _TransportParseStream, _ElementaryStream; // constants
var MP2T_PACKET_LENGTH = 188,
// bytes
SYNC_BYTE = 0x47;
/**
* Splits an incoming stream of binary data into MPEG-2 Transport
* Stream packets.
*/
_TransportPacketStream = function TransportPacketStream() {
var buffer = new Uint8Array(MP2T_PACKET_LENGTH),
bytesInBuffer = 0;
_TransportPacketStream.prototype.init.call(this); // Deliver new bytes to the stream.
/**
* Split a stream of data into M2TS packets
**/
this.push = function (bytes) {
var startIndex = 0,
endIndex = MP2T_PACKET_LENGTH,
everything; // If there are bytes remaining from the last segment, prepend them to the
// bytes that were pushed in
if (bytesInBuffer) {
everything = new Uint8Array(bytes.byteLength + bytesInBuffer);
everything.set(buffer.subarray(0, bytesInBuffer));
everything.set(bytes, bytesInBuffer);
bytesInBuffer = 0;
} else {
everything = bytes;
} // While we have enough data for a packet
while (endIndex < everything.byteLength) {
// Look for a pair of start and end sync bytes in the data..
if (everything[startIndex] === SYNC_BYTE && everything[endIndex] === SYNC_BYTE) {
// We found a packet so emit it and jump one whole packet forward in
// the stream
this.trigger('data', everything.subarray(startIndex, endIndex));
startIndex += MP2T_PACKET_LENGTH;
endIndex += MP2T_PACKET_LENGTH;
continue;
} // If we get here, we have somehow become de-synchronized and we need to step
// forward one byte at a time until we find a pair of sync bytes that denote
// a packet
startIndex++;
endIndex++;
} // If there was some data left over at the end of the segment that couldn't
// possibly be a whole packet, keep it because it might be the start of a packet
// that continues in the next segment
if (startIndex < everything.byteLength) {
buffer.set(everything.subarray(startIndex), 0);
bytesInBuffer = everything.byteLength - startIndex;
}
};
/**
* Passes identified M2TS packets to the TransportParseStream to be parsed
**/
this.flush = function () {
// If the buffer contains a whole packet when we are being flushed, emit it
// and empty the buffer. Otherwise hold onto the data because it may be
// important for decoding the next segment
if (bytesInBuffer === MP2T_PACKET_LENGTH && buffer[0] === SYNC_BYTE) {
this.trigger('data', buffer);
bytesInBuffer = 0;
}
this.trigger('done');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline');
};
this.reset = function () {
bytesInBuffer = 0;
this.trigger('reset');
};
};
_TransportPacketStream.prototype = new Stream();
/**
* Accepts an MP2T TransportPacketStream and emits data events with parsed
* forms of the individual transport stream packets.
*/
_TransportParseStream = function TransportParseStream() {
var parsePsi, parsePat, parsePmt, self;
_TransportParseStream.prototype.init.call(this);
self = this;
this.packetsWaitingForPmt = [];
this.programMapTable = undefined;
parsePsi = function parsePsi(payload, psi) {
var offset = 0; // PSI packets may be split into multiple sections and those
// sections may be split into multiple packets. If a PSI
// section starts in this packet, the payload_unit_start_indicator
// will be true and the first byte of the payload will indicate
// the offset from the current position to the start of the
// section.
if (psi.payloadUnitStartIndicator) {
offset += payload[offset] + 1;
}
if (psi.type === 'pat') {
parsePat(payload.subarray(offset), psi);
} else {
parsePmt(payload.subarray(offset), psi);
}
};
parsePat = function parsePat(payload, pat) {
pat.section_number = payload[7]; // eslint-disable-line camelcase
pat.last_section_number = payload[8]; // eslint-disable-line camelcase
// skip the PSI header and parse the first PMT entry
self.pmtPid = (payload[10] & 0x1F) << 8 | payload[11];
pat.pmtPid = self.pmtPid;
};
/**
* Parse out the relevant fields of a Program Map Table (PMT).
* @param payload {Uint8Array} the PMT-specific portion of an MP2T
* packet. The first byte in this array should be the table_id
* field.
* @param pmt {object} the object that should be decorated with
* fields parsed from the PMT.
*/
parsePmt = function parsePmt(payload, pmt) {
var sectionLength, tableEnd, programInfoLength, offset; // PMTs can be sent ahead of the time when they should actually
// take effect. We don't believe this should ever be the case
// for HLS but we'll ignore "forward" PMT declarations if we see
// them. Future PMT declarations have the current_next_indicator
// set to zero.
if (!(payload[5] & 0x01)) {
return;
} // overwrite any existing program map table
self.programMapTable = {
video: null,
audio: null,
'timed-metadata': {}
}; // the mapping table ends at the end of the current section
sectionLength = (payload[1] & 0x0f) << 8 | payload[2];
tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how
// long the program info descriptors are
programInfoLength = (payload[10] & 0x0f) << 8 | payload[11]; // advance the offset to the first entry in the mapping table
offset = 12 + programInfoLength;
while (offset < tableEnd) {
var streamType = payload[offset];
var pid = (payload[offset + 1] & 0x1F) << 8 | payload[offset + 2]; // only map a single elementary_pid for audio and video stream types
// TODO: should this be done for metadata too? for now maintain behavior of
// multiple metadata streams
if (streamType === StreamTypes.H264_STREAM_TYPE && self.programMapTable.video === null) {
self.programMapTable.video = pid;
} else if (streamType === StreamTypes.ADTS_STREAM_TYPE && self.programMapTable.audio === null) {
self.programMapTable.audio = pid;
} else if (streamType === StreamTypes.METADATA_STREAM_TYPE) {
// map pid to stream type for metadata streams
self.programMapTable['timed-metadata'][pid] = streamType;
} // move to the next table entry
// skip past the elementary stream descriptors, if present
offset += ((payload[offset + 3] & 0x0F) << 8 | payload[offset + 4]) + 5;
} // record the map on the packet as well
pmt.programMapTable = self.programMapTable;
};
/**
* Deliver a new MP2T packet to the next stream in the pipeline.
*/
this.push = function (packet) {
var result = {},
offset = 4;
result.payloadUnitStartIndicator = !!(packet[1] & 0x40); // pid is a 13-bit field starting at the last bit of packet[1]
result.pid = packet[1] & 0x1f;
result.pid <<= 8;
result.pid |= packet[2]; // if an adaption field is present, its length is specified by the
// fifth byte of the TS packet header. The adaptation field is
// used to add stuffing to PES packets that don't fill a complete
// TS packet, and to specify some forms of timing and control data
// that we do not currently use.
if ((packet[3] & 0x30) >>> 4 > 0x01) {
offset += packet[offset] + 1;
} // parse the rest of the packet based on the type
if (result.pid === 0) {
result.type = 'pat';
parsePsi(packet.subarray(offset), result);
this.trigger('data', result);
} else if (result.pid === this.pmtPid) {
result.type = 'pmt';
parsePsi(packet.subarray(offset), result);
this.trigger('data', result); // if there are any packets waiting for a PMT to be found, process them now
while (this.packetsWaitingForPmt.length) {
this.processPes_.apply(this, this.packetsWaitingForPmt.shift());
}
} else if (this.programMapTable === undefined) {
// When we have not seen a PMT yet, defer further processing of
// PES packets until one has been parsed
this.packetsWaitingForPmt.push([packet, offset, result]);
} else {
this.processPes_(packet, offset, result);
}
};
this.processPes_ = function (packet, offset, result) {
// set the appropriate stream type
if (result.pid === this.programMapTable.video) {
result.streamType = StreamTypes.H264_STREAM_TYPE;
} else if (result.pid === this.programMapTable.audio) {
result.streamType = StreamTypes.ADTS_STREAM_TYPE;
} else {
// if not video or audio, it is timed-metadata or unknown
// if unknown, streamType will be undefined
result.streamType = this.programMapTable['timed-metadata'][result.pid];
}
result.type = 'pes';
result.data = packet.subarray(offset);
this.trigger('data', result);
};
};
_TransportParseStream.prototype = new Stream();
_TransportParseStream.STREAM_TYPES = {
h264: 0x1b,
adts: 0x0f
};
/**
* Reconsistutes program elementary stream (PES) packets from parsed
* transport stream packets. That is, if you pipe an
* mp2t.TransportParseStream into a mp2t.ElementaryStream, the output
* events will be events which capture the bytes for individual PES
* packets plus relevant metadata that has been extracted from the
* container.
*/
_ElementaryStream = function ElementaryStream() {
var self = this,
segmentHadPmt = false,
// PES packet fragments
video = {
data: [],
size: 0
},
audio = {
data: [],
size: 0
},
timedMetadata = {
data: [],
size: 0
},
programMapTable,
parsePes = function parsePes(payload, pes) {
var ptsDtsFlags;
var startPrefix = payload[0] << 16 | payload[1] << 8 | payload[2]; // default to an empty array
pes.data = new Uint8Array(); // In certain live streams, the start of a TS fragment has ts packets
// that are frame data that is continuing from the previous fragment. This
// is to check that the pes data is the start of a new pes payload
if (startPrefix !== 1) {
return;
} // get the packet length, this will be 0 for video
pes.packetLength = 6 + (payload[4] << 8 | payload[5]); // find out if this packets starts a new keyframe
pes.dataAlignmentIndicator = (payload[6] & 0x04) !== 0; // PES packets may be annotated with a PTS value, or a PTS value
// and a DTS value. Determine what combination of values is
// available to work with.
ptsDtsFlags = payload[7]; // PTS and DTS are normally stored as a 33-bit number. Javascript
// performs all bitwise operations on 32-bit integers but javascript
// supports a much greater range (52-bits) of integer using standard
// mathematical operations.
// We construct a 31-bit value using bitwise operators over the 31
// most significant bits and then multiply by 4 (equal to a left-shift
// of 2) before we add the final 2 least significant bits of the
// timestamp (equal to an OR.)
if (ptsDtsFlags & 0xC0) {
// the PTS and DTS are not written out directly. For information
// on how they are encoded, see
// http://dvd.sourceforge.net/dvdinfo/pes-hdr.html
pes.pts = (payload[9] & 0x0E) << 27 | (payload[10] & 0xFF) << 20 | (payload[11] & 0xFE) << 12 | (payload[12] & 0xFF) << 5 | (payload[13] & 0xFE) >>> 3;
pes.pts *= 4; // Left shift by 2
pes.pts += (payload[13] & 0x06) >>> 1; // OR by the two LSBs
pes.dts = pes.pts;
if (ptsDtsFlags & 0x40) {
pes.dts = (payload[14] & 0x0E) << 27 | (payload[15] & 0xFF) << 20 | (payload[16] & 0xFE) << 12 | (payload[17] & 0xFF) << 5 | (payload[18] & 0xFE) >>> 3;
pes.dts *= 4; // Left shift by 2
pes.dts += (payload[18] & 0x06) >>> 1; // OR by the two LSBs
}
} // the data section starts immediately after the PES header.
// pes_header_data_length specifies the number of header bytes
// that follow the last byte of the field.
pes.data = payload.subarray(9 + payload[8]);
},
/**
* Pass completely parsed PES packets to the next stream in the pipeline
**/
flushStream = function flushStream(stream, type, forceFlush) {
var packetData = new Uint8Array(stream.size),
event = {
type: type
},
i = 0,
offset = 0,
packetFlushable = false,
fragment; // do nothing if there is not enough buffered data for a complete
// PES header
if (!stream.data.length || stream.size < 9) {
return;
}
event.trackId = stream.data[0].pid; // reassemble the packet
for (i = 0; i < stream.data.length; i++) {
fragment = stream.data[i];
packetData.set(fragment.data, offset);
offset += fragment.data.byteLength;
} // parse assembled packet's PES header
parsePes(packetData, event); // non-video PES packets MUST have a non-zero PES_packet_length
// check that there is enough stream data to fill the packet
packetFlushable = type === 'video' || event.packetLength <= stream.size; // flush pending packets if the conditions are right
if (forceFlush || packetFlushable) {
stream.size = 0;
stream.data.length = 0;
} // only emit packets that are complete. this is to avoid assembling
// incomplete PES packets due to poor segmentation
if (packetFlushable) {
self.trigger('data', event);
}
};
_ElementaryStream.prototype.init.call(this);
/**
* Identifies M2TS packet types and parses PES packets using metadata
* parsed from the PMT
**/
this.push = function (data) {
({
pat: function pat() {// we have to wait for the PMT to arrive as well before we
// have any meaningful metadata
},
pes: function pes() {
var stream, streamType;
switch (data.streamType) {
case StreamTypes.H264_STREAM_TYPE:
stream = video;
streamType = 'video';
break;
case StreamTypes.ADTS_STREAM_TYPE:
stream = audio;
streamType = 'audio';
break;
case StreamTypes.METADATA_STREAM_TYPE:
stream = timedMetadata;
streamType = 'timed-metadata';
break;
default:
// ignore unknown stream types
return;
} // if a new packet is starting, we can flush the completed
// packet
if (data.payloadUnitStartIndicator) {
flushStream(stream, streamType, true);
} // buffer this fragment until we are sure we've received the
// complete payload
stream.data.push(data);
stream.size += data.data.byteLength;
},
pmt: function pmt() {
var event = {
type: 'metadata',
tracks: []
};
programMapTable = data.programMapTable; // translate audio and video streams to tracks
if (programMapTable.video !== null) {
event.tracks.push({
timelineStartInfo: {
baseMediaDecodeTime: 0
},
id: +programMapTable.video,
codec: 'avc',
type: 'video'
});
}
if (programMapTable.audio !== null) {
event.tracks.push({
timelineStartInfo: {
baseMediaDecodeTime: 0
},
id: +programMapTable.audio,
codec: 'adts',
type: 'audio'
});
}
segmentHadPmt = true;
self.trigger('data', event);
}
})[data.type]();
};
this.reset = function () {
video.size = 0;
video.data.length = 0;
audio.size = 0;
audio.data.length = 0;
this.trigger('reset');
};
/**
* Flush any remaining input. Video PES packets may be of variable
* length. Normally, the start of a new video packet can trigger the
* finalization of the previous packet. That is not possible if no
* more video is forthcoming, however. In that case, some other
* mechanism (like the end of the file) has to be employed. When it is
* clear that no additional data is forthcoming, calling this method
* will flush the buffered packets.
*/
this.flushStreams_ = function () {
// !!THIS ORDER IS IMPORTANT!!
// video first then audio
flushStream(video, 'video');
flushStream(audio, 'audio');
flushStream(timedMetadata, 'timed-metadata');
};
this.flush = function () {
// if on flush we haven't had a pmt emitted
// and we have a pmt to emit. emit the pmt
// so that we trigger a trackinfo downstream.
if (!segmentHadPmt && programMapTable) {
var pmt = {
type: 'metadata',
tracks: []
}; // translate audio and video streams to tracks
if (programMapTable.video !== null) {
pmt.tracks.push({
timelineStartInfo: {
baseMediaDecodeTime: 0
},
id: +programMapTable.video,
codec: 'avc',
type: 'video'
});
}
if (programMapTable.audio !== null) {
pmt.tracks.push({
timelineStartInfo: {
baseMediaDecodeTime: 0
},
id: +programMapTable.audio,
codec: 'adts',
type: 'audio'
});
}
self.trigger('data', pmt);
}
segmentHadPmt = false;
this.flushStreams_();
this.trigger('done');
};
};
_ElementaryStream.prototype = new Stream();
var m2ts = {
PAT_PID: 0x0000,
MP2T_PACKET_LENGTH: MP2T_PACKET_LENGTH,
TransportPacketStream: _TransportPacketStream,
TransportParseStream: _TransportParseStream,
ElementaryStream: _ElementaryStream,
TimestampRolloverStream: TimestampRolloverStream,
CaptionStream: CaptionStream.CaptionStream,
Cea608Stream: CaptionStream.Cea608Stream,
Cea708Stream: CaptionStream.Cea708Stream,
MetadataStream: require('./metadata-stream')
};
for (var type in StreamTypes) {
if (StreamTypes.hasOwnProperty(type)) {
m2ts[type] = StreamTypes[type];
}
}
module.exports = m2ts;

View file

@ -0,0 +1,181 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Accepts program elementary stream (PES) data events and parses out
* ID3 metadata from them, if present.
* @see http://id3.org/id3v2.3.0
*/
'use strict';
var Stream = require('../utils/stream'),
StreamTypes = require('./stream-types'),
id3 = require('../tools/parse-id3'),
_MetadataStream;
_MetadataStream = function MetadataStream(options) {
var settings = {
// the bytes of the program-level descriptor field in MP2T
// see ISO/IEC 13818-1:2013 (E), section 2.6 "Program and
// program element descriptors"
descriptor: options && options.descriptor
},
// the total size in bytes of the ID3 tag being parsed
tagSize = 0,
// tag data that is not complete enough to be parsed
buffer = [],
// the total number of bytes currently in the buffer
bufferSize = 0,
i;
_MetadataStream.prototype.init.call(this); // calculate the text track in-band metadata track dispatch type
// https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track
this.dispatchType = StreamTypes.METADATA_STREAM_TYPE.toString(16);
if (settings.descriptor) {
for (i = 0; i < settings.descriptor.length; i++) {
this.dispatchType += ('00' + settings.descriptor[i].toString(16)).slice(-2);
}
}
this.push = function (chunk) {
var tag, frameStart, frameSize, frame, i, frameHeader;
if (chunk.type !== 'timed-metadata') {
return;
} // if data_alignment_indicator is set in the PES header,
// we must have the start of a new ID3 tag. Assume anything
// remaining in the buffer was malformed and throw it out
if (chunk.dataAlignmentIndicator) {
bufferSize = 0;
buffer.length = 0;
} // ignore events that don't look like ID3 data
if (buffer.length === 0 && (chunk.data.length < 10 || chunk.data[0] !== 'I'.charCodeAt(0) || chunk.data[1] !== 'D'.charCodeAt(0) || chunk.data[2] !== '3'.charCodeAt(0))) {
this.trigger('log', {
level: 'warn',
message: 'Skipping unrecognized metadata packet'
});
return;
} // add this chunk to the data we've collected so far
buffer.push(chunk);
bufferSize += chunk.data.byteLength; // grab the size of the entire frame from the ID3 header
if (buffer.length === 1) {
// the frame size is transmitted as a 28-bit integer in the
// last four bytes of the ID3 header.
// The most significant bit of each byte is dropped and the
// results concatenated to recover the actual value.
tagSize = id3.parseSyncSafeInteger(chunk.data.subarray(6, 10)); // ID3 reports the tag size excluding the header but it's more
// convenient for our comparisons to include it
tagSize += 10;
} // if the entire frame has not arrived, wait for more data
if (bufferSize < tagSize) {
return;
} // collect the entire frame so it can be parsed
tag = {
data: new Uint8Array(tagSize),
frames: [],
pts: buffer[0].pts,
dts: buffer[0].dts
};
for (i = 0; i < tagSize;) {
tag.data.set(buffer[0].data.subarray(0, tagSize - i), i);
i += buffer[0].data.byteLength;
bufferSize -= buffer[0].data.byteLength;
buffer.shift();
} // find the start of the first frame and the end of the tag
frameStart = 10;
if (tag.data[5] & 0x40) {
// advance the frame start past the extended header
frameStart += 4; // header size field
frameStart += id3.parseSyncSafeInteger(tag.data.subarray(10, 14)); // clip any padding off the end
tagSize -= id3.parseSyncSafeInteger(tag.data.subarray(16, 20));
} // parse one or more ID3 frames
// http://id3.org/id3v2.3.0#ID3v2_frame_overview
do {
// determine the number of bytes in this frame
frameSize = id3.parseSyncSafeInteger(tag.data.subarray(frameStart + 4, frameStart + 8));
if (frameSize < 1) {
this.trigger('log', {
level: 'warn',
message: 'Malformed ID3 frame encountered. Skipping remaining metadata parsing.'
}); // If the frame is malformed, don't parse any further frames but allow previous valid parsed frames
// to be sent along.
break;
}
frameHeader = String.fromCharCode(tag.data[frameStart], tag.data[frameStart + 1], tag.data[frameStart + 2], tag.data[frameStart + 3]);
frame = {
id: frameHeader,
data: tag.data.subarray(frameStart + 10, frameStart + frameSize + 10)
};
frame.key = frame.id; // parse frame values
if (id3.frameParsers[frame.id]) {
// use frame specific parser
id3.frameParsers[frame.id](frame);
} else if (frame.id[0] === 'T') {
// use text frame generic parser
id3.frameParsers['T*'](frame);
} else if (frame.id[0] === 'W') {
// use URL link frame generic parser
id3.frameParsers['W*'](frame);
} // handle the special PRIV frame used to indicate the start
// time for raw AAC data
if (frame.owner === 'com.apple.streaming.transportStreamTimestamp') {
var d = frame.data,
size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2;
size *= 4;
size += d[7] & 0x03;
frame.timeStamp = size; // in raw AAC, all subsequent data will be timestamped based
// on the value of this frame
// we couldn't have known the appropriate pts and dts before
// parsing this ID3 tag so set those values now
if (tag.pts === undefined && tag.dts === undefined) {
tag.pts = frame.timeStamp;
tag.dts = frame.timeStamp;
}
this.trigger('timestamp', frame);
}
tag.frames.push(frame);
frameStart += 10; // advance past the frame header
frameStart += frameSize; // advance past the frame body
} while (frameStart < tagSize);
this.trigger('data', tag);
};
};
_MetadataStream.prototype = new Stream();
module.exports = _MetadataStream;

View file

@ -0,0 +1,299 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Utilities to detect basic properties and metadata about TS Segments.
*/
'use strict';
var StreamTypes = require('./stream-types.js');
var parsePid = function parsePid(packet) {
var pid = packet[1] & 0x1f;
pid <<= 8;
pid |= packet[2];
return pid;
};
var parsePayloadUnitStartIndicator = function parsePayloadUnitStartIndicator(packet) {
return !!(packet[1] & 0x40);
};
var parseAdaptionField = function parseAdaptionField(packet) {
var offset = 0; // if an adaption field is present, its length is specified by the
// fifth byte of the TS packet header. The adaptation field is
// used to add stuffing to PES packets that don't fill a complete
// TS packet, and to specify some forms of timing and control data
// that we do not currently use.
if ((packet[3] & 0x30) >>> 4 > 0x01) {
offset += packet[4] + 1;
}
return offset;
};
var parseType = function parseType(packet, pmtPid) {
var pid = parsePid(packet);
if (pid === 0) {
return 'pat';
} else if (pid === pmtPid) {
return 'pmt';
} else if (pmtPid) {
return 'pes';
}
return null;
};
var parsePat = function parsePat(packet) {
var pusi = parsePayloadUnitStartIndicator(packet);
var offset = 4 + parseAdaptionField(packet);
if (pusi) {
offset += packet[offset] + 1;
}
return (packet[offset + 10] & 0x1f) << 8 | packet[offset + 11];
};
var parsePmt = function parsePmt(packet) {
var programMapTable = {};
var pusi = parsePayloadUnitStartIndicator(packet);
var payloadOffset = 4 + parseAdaptionField(packet);
if (pusi) {
payloadOffset += packet[payloadOffset] + 1;
} // PMTs can be sent ahead of the time when they should actually
// take effect. We don't believe this should ever be the case
// for HLS but we'll ignore "forward" PMT declarations if we see
// them. Future PMT declarations have the current_next_indicator
// set to zero.
if (!(packet[payloadOffset + 5] & 0x01)) {
return;
}
var sectionLength, tableEnd, programInfoLength; // the mapping table ends at the end of the current section
sectionLength = (packet[payloadOffset + 1] & 0x0f) << 8 | packet[payloadOffset + 2];
tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how
// long the program info descriptors are
programInfoLength = (packet[payloadOffset + 10] & 0x0f) << 8 | packet[payloadOffset + 11]; // advance the offset to the first entry in the mapping table
var offset = 12 + programInfoLength;
while (offset < tableEnd) {
var i = payloadOffset + offset; // add an entry that maps the elementary_pid to the stream_type
programMapTable[(packet[i + 1] & 0x1F) << 8 | packet[i + 2]] = packet[i]; // move to the next table entry
// skip past the elementary stream descriptors, if present
offset += ((packet[i + 3] & 0x0F) << 8 | packet[i + 4]) + 5;
}
return programMapTable;
};
var parsePesType = function parsePesType(packet, programMapTable) {
var pid = parsePid(packet);
var type = programMapTable[pid];
switch (type) {
case StreamTypes.H264_STREAM_TYPE:
return 'video';
case StreamTypes.ADTS_STREAM_TYPE:
return 'audio';
case StreamTypes.METADATA_STREAM_TYPE:
return 'timed-metadata';
default:
return null;
}
};
var parsePesTime = function parsePesTime(packet) {
var pusi = parsePayloadUnitStartIndicator(packet);
if (!pusi) {
return null;
}
var offset = 4 + parseAdaptionField(packet);
if (offset >= packet.byteLength) {
// From the H 222.0 MPEG-TS spec
// "For transport stream packets carrying PES packets, stuffing is needed when there
// is insufficient PES packet data to completely fill the transport stream packet
// payload bytes. Stuffing is accomplished by defining an adaptation field longer than
// the sum of the lengths of the data elements in it, so that the payload bytes
// remaining after the adaptation field exactly accommodates the available PES packet
// data."
//
// If the offset is >= the length of the packet, then the packet contains no data
// and instead is just adaption field stuffing bytes
return null;
}
var pes = null;
var ptsDtsFlags; // PES packets may be annotated with a PTS value, or a PTS value
// and a DTS value. Determine what combination of values is
// available to work with.
ptsDtsFlags = packet[offset + 7]; // PTS and DTS are normally stored as a 33-bit number. Javascript
// performs all bitwise operations on 32-bit integers but javascript
// supports a much greater range (52-bits) of integer using standard
// mathematical operations.
// We construct a 31-bit value using bitwise operators over the 31
// most significant bits and then multiply by 4 (equal to a left-shift
// of 2) before we add the final 2 least significant bits of the
// timestamp (equal to an OR.)
if (ptsDtsFlags & 0xC0) {
pes = {}; // the PTS and DTS are not written out directly. For information
// on how they are encoded, see
// http://dvd.sourceforge.net/dvdinfo/pes-hdr.html
pes.pts = (packet[offset + 9] & 0x0E) << 27 | (packet[offset + 10] & 0xFF) << 20 | (packet[offset + 11] & 0xFE) << 12 | (packet[offset + 12] & 0xFF) << 5 | (packet[offset + 13] & 0xFE) >>> 3;
pes.pts *= 4; // Left shift by 2
pes.pts += (packet[offset + 13] & 0x06) >>> 1; // OR by the two LSBs
pes.dts = pes.pts;
if (ptsDtsFlags & 0x40) {
pes.dts = (packet[offset + 14] & 0x0E) << 27 | (packet[offset + 15] & 0xFF) << 20 | (packet[offset + 16] & 0xFE) << 12 | (packet[offset + 17] & 0xFF) << 5 | (packet[offset + 18] & 0xFE) >>> 3;
pes.dts *= 4; // Left shift by 2
pes.dts += (packet[offset + 18] & 0x06) >>> 1; // OR by the two LSBs
}
}
return pes;
};
var parseNalUnitType = function parseNalUnitType(type) {
switch (type) {
case 0x05:
return 'slice_layer_without_partitioning_rbsp_idr';
case 0x06:
return 'sei_rbsp';
case 0x07:
return 'seq_parameter_set_rbsp';
case 0x08:
return 'pic_parameter_set_rbsp';
case 0x09:
return 'access_unit_delimiter_rbsp';
default:
return null;
}
};
var videoPacketContainsKeyFrame = function videoPacketContainsKeyFrame(packet) {
var offset = 4 + parseAdaptionField(packet);
var frameBuffer = packet.subarray(offset);
var frameI = 0;
var frameSyncPoint = 0;
var foundKeyFrame = false;
var nalType; // advance the sync point to a NAL start, if necessary
for (; frameSyncPoint < frameBuffer.byteLength - 3; frameSyncPoint++) {
if (frameBuffer[frameSyncPoint + 2] === 1) {
// the sync point is properly aligned
frameI = frameSyncPoint + 5;
break;
}
}
while (frameI < frameBuffer.byteLength) {
// look at the current byte to determine if we've hit the end of
// a NAL unit boundary
switch (frameBuffer[frameI]) {
case 0:
// skip past non-sync sequences
if (frameBuffer[frameI - 1] !== 0) {
frameI += 2;
break;
} else if (frameBuffer[frameI - 2] !== 0) {
frameI++;
break;
}
if (frameSyncPoint + 3 !== frameI - 2) {
nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);
if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {
foundKeyFrame = true;
}
} // drop trailing zeroes
do {
frameI++;
} while (frameBuffer[frameI] !== 1 && frameI < frameBuffer.length);
frameSyncPoint = frameI - 2;
frameI += 3;
break;
case 1:
// skip past non-sync sequences
if (frameBuffer[frameI - 1] !== 0 || frameBuffer[frameI - 2] !== 0) {
frameI += 3;
break;
}
nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);
if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {
foundKeyFrame = true;
}
frameSyncPoint = frameI - 2;
frameI += 3;
break;
default:
// the current byte isn't a one or zero, so it cannot be part
// of a sync sequence
frameI += 3;
break;
}
}
frameBuffer = frameBuffer.subarray(frameSyncPoint);
frameI -= frameSyncPoint;
frameSyncPoint = 0; // parse the final nal
if (frameBuffer && frameBuffer.byteLength > 3) {
nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);
if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {
foundKeyFrame = true;
}
}
return foundKeyFrame;
};
module.exports = {
parseType: parseType,
parsePat: parsePat,
parsePmt: parsePmt,
parsePayloadUnitStartIndicator: parsePayloadUnitStartIndicator,
parsePesType: parsePesType,
parsePesTime: parsePesTime,
videoPacketContainsKeyFrame: videoPacketContainsKeyFrame
};

View file

@ -0,0 +1,13 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
module.exports = {
H264_STREAM_TYPE: 0x1B,
ADTS_STREAM_TYPE: 0x0F,
METADATA_STREAM_TYPE: 0x15
};

View file

@ -0,0 +1,107 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Accepts program elementary stream (PES) data events and corrects
* decode and presentation time stamps to account for a rollover
* of the 33 bit value.
*/
'use strict';
var Stream = require('../utils/stream');
var MAX_TS = 8589934592;
var RO_THRESH = 4294967296;
var TYPE_SHARED = 'shared';
var handleRollover = function handleRollover(value, reference) {
var direction = 1;
if (value > reference) {
// If the current timestamp value is greater than our reference timestamp and we detect a
// timestamp rollover, this means the roll over is happening in the opposite direction.
// Example scenario: Enter a long stream/video just after a rollover occurred. The reference
// point will be set to a small number, e.g. 1. The user then seeks backwards over the
// rollover point. In loading this segment, the timestamp values will be very large,
// e.g. 2^33 - 1. Since this comes before the data we loaded previously, we want to adjust
// the time stamp to be `value - 2^33`.
direction = -1;
} // Note: A seek forwards or back that is greater than the RO_THRESH (2^32, ~13 hours) will
// cause an incorrect adjustment.
while (Math.abs(reference - value) > RO_THRESH) {
value += direction * MAX_TS;
}
return value;
};
var TimestampRolloverStream = function TimestampRolloverStream(type) {
var lastDTS, referenceDTS;
TimestampRolloverStream.prototype.init.call(this); // The "shared" type is used in cases where a stream will contain muxed
// video and audio. We could use `undefined` here, but having a string
// makes debugging a little clearer.
this.type_ = type || TYPE_SHARED;
this.push = function (data) {
/**
* Rollover stream expects data from elementary stream.
* Elementary stream can push forward 2 types of data
* - Parsed Video/Audio/Timed-metadata PES (packetized elementary stream) packets
* - Tracks metadata from PMT (Program Map Table)
* Rollover stream expects pts/dts info to be available, since it stores lastDTS
* We should ignore non-PES packets since they may override lastDTS to undefined.
* lastDTS is important to signal the next segments
* about rollover from the previous segments.
*/
if (data.type === 'metadata') {
this.trigger('data', data);
return;
} // Any "shared" rollover streams will accept _all_ data. Otherwise,
// streams will only accept data that matches their type.
if (this.type_ !== TYPE_SHARED && data.type !== this.type_) {
return;
}
if (referenceDTS === undefined) {
referenceDTS = data.dts;
}
data.dts = handleRollover(data.dts, referenceDTS);
data.pts = handleRollover(data.pts, referenceDTS);
lastDTS = data.dts;
this.trigger('data', data);
};
this.flush = function () {
referenceDTS = lastDTS;
this.trigger('done');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline');
};
this.discontinuity = function () {
referenceDTS = void 0;
lastDTS = void 0;
};
this.reset = function () {
this.discontinuity();
this.trigger('reset');
};
};
TimestampRolloverStream.prototype = new Stream();
module.exports = {
TimestampRolloverStream: TimestampRolloverStream,
handleRollover: handleRollover
};

View file

@ -0,0 +1,148 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
var coneOfSilence = require('../data/silence');
var clock = require('../utils/clock');
/**
* Sum the `byteLength` properties of the data in each AAC frame
*/
var sumFrameByteLengths = function sumFrameByteLengths(array) {
var i,
currentObj,
sum = 0; // sum the byteLength's all each nal unit in the frame
for (i = 0; i < array.length; i++) {
currentObj = array[i];
sum += currentObj.data.byteLength;
}
return sum;
}; // Possibly pad (prefix) the audio track with silence if appending this track
// would lead to the introduction of a gap in the audio buffer
var prefixWithSilence = function prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime) {
var baseMediaDecodeTimeTs,
frameDuration = 0,
audioGapDuration = 0,
audioFillFrameCount = 0,
audioFillDuration = 0,
silentFrame,
i,
firstFrame;
if (!frames.length) {
return;
}
baseMediaDecodeTimeTs = clock.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate); // determine frame clock duration based on sample rate, round up to avoid overfills
frameDuration = Math.ceil(clock.ONE_SECOND_IN_TS / (track.samplerate / 1024));
if (audioAppendStartTs && videoBaseMediaDecodeTime) {
// insert the shortest possible amount (audio gap or audio to video gap)
audioGapDuration = baseMediaDecodeTimeTs - Math.max(audioAppendStartTs, videoBaseMediaDecodeTime); // number of full frames in the audio gap
audioFillFrameCount = Math.floor(audioGapDuration / frameDuration);
audioFillDuration = audioFillFrameCount * frameDuration;
} // don't attempt to fill gaps smaller than a single frame or larger
// than a half second
if (audioFillFrameCount < 1 || audioFillDuration > clock.ONE_SECOND_IN_TS / 2) {
return;
}
silentFrame = coneOfSilence()[track.samplerate];
if (!silentFrame) {
// we don't have a silent frame pregenerated for the sample rate, so use a frame
// from the content instead
silentFrame = frames[0].data;
}
for (i = 0; i < audioFillFrameCount; i++) {
firstFrame = frames[0];
frames.splice(0, 0, {
data: silentFrame,
dts: firstFrame.dts - frameDuration,
pts: firstFrame.pts - frameDuration
});
}
track.baseMediaDecodeTime -= Math.floor(clock.videoTsToAudioTs(audioFillDuration, track.samplerate));
return audioFillDuration;
}; // If the audio segment extends before the earliest allowed dts
// value, remove AAC frames until starts at or after the earliest
// allowed DTS so that we don't end up with a negative baseMedia-
// DecodeTime for the audio track
var trimAdtsFramesByEarliestDts = function trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts) {
if (track.minSegmentDts >= earliestAllowedDts) {
return adtsFrames;
} // We will need to recalculate the earliest segment Dts
track.minSegmentDts = Infinity;
return adtsFrames.filter(function (currentFrame) {
// If this is an allowed frame, keep it and record it's Dts
if (currentFrame.dts >= earliestAllowedDts) {
track.minSegmentDts = Math.min(track.minSegmentDts, currentFrame.dts);
track.minSegmentPts = track.minSegmentDts;
return true;
} // Otherwise, discard it
return false;
});
}; // generate the track's raw mdat data from an array of frames
var generateSampleTable = function generateSampleTable(frames) {
var i,
currentFrame,
samples = [];
for (i = 0; i < frames.length; i++) {
currentFrame = frames[i];
samples.push({
size: currentFrame.data.byteLength,
duration: 1024 // For AAC audio, all samples contain 1024 samples
});
}
return samples;
}; // generate the track's sample table from an array of frames
var concatenateFrameData = function concatenateFrameData(frames) {
var i,
currentFrame,
dataOffset = 0,
data = new Uint8Array(sumFrameByteLengths(frames));
for (i = 0; i < frames.length; i++) {
currentFrame = frames[i];
data.set(currentFrame.data, dataOffset);
dataOffset += currentFrame.data.byteLength;
}
return data;
};
module.exports = {
prefixWithSilence: prefixWithSilence,
trimAdtsFramesByEarliestDts: trimAdtsFramesByEarliestDts,
generateSampleTable: generateSampleTable,
concatenateFrameData: concatenateFrameData
};

View file

@ -0,0 +1,490 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Reads in-band CEA-708 captions out of FMP4 segments.
* @see https://en.wikipedia.org/wiki/CEA-708
*/
'use strict';
var discardEmulationPreventionBytes = require('../tools/caption-packet-parser').discardEmulationPreventionBytes;
var CaptionStream = require('../m2ts/caption-stream').CaptionStream;
var findBox = require('../mp4/find-box.js');
var parseTfdt = require('../tools/parse-tfdt.js');
var parseTrun = require('../tools/parse-trun.js');
var parseTfhd = require('../tools/parse-tfhd.js');
var window = require('global/window');
/**
* Maps an offset in the mdat to a sample based on the the size of the samples.
* Assumes that `parseSamples` has been called first.
*
* @param {Number} offset - The offset into the mdat
* @param {Object[]} samples - An array of samples, parsed using `parseSamples`
* @return {?Object} The matching sample, or null if no match was found.
*
* @see ISO-BMFF-12/2015, Section 8.8.8
**/
var mapToSample = function mapToSample(offset, samples) {
var approximateOffset = offset;
for (var i = 0; i < samples.length; i++) {
var sample = samples[i];
if (approximateOffset < sample.size) {
return sample;
}
approximateOffset -= sample.size;
}
return null;
};
/**
* Finds SEI nal units contained in a Media Data Box.
* Assumes that `parseSamples` has been called first.
*
* @param {Uint8Array} avcStream - The bytes of the mdat
* @param {Object[]} samples - The samples parsed out by `parseSamples`
* @param {Number} trackId - The trackId of this video track
* @return {Object[]} seiNals - the parsed SEI NALUs found.
* The contents of the seiNal should match what is expected by
* CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts)
*
* @see ISO-BMFF-12/2015, Section 8.1.1
* @see Rec. ITU-T H.264, 7.3.2.3.1
**/
var findSeiNals = function findSeiNals(avcStream, samples, trackId) {
var avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),
result = {
logs: [],
seiNals: []
},
seiNal,
i,
length,
lastMatchedSample;
for (i = 0; i + 4 < avcStream.length; i += length) {
length = avcView.getUint32(i);
i += 4; // Bail if this doesn't appear to be an H264 stream
if (length <= 0) {
continue;
}
switch (avcStream[i] & 0x1F) {
case 0x06:
var data = avcStream.subarray(i + 1, i + 1 + length);
var matchingSample = mapToSample(i, samples);
seiNal = {
nalUnitType: 'sei_rbsp',
size: length,
data: data,
escapedRBSP: discardEmulationPreventionBytes(data),
trackId: trackId
};
if (matchingSample) {
seiNal.pts = matchingSample.pts;
seiNal.dts = matchingSample.dts;
lastMatchedSample = matchingSample;
} else if (lastMatchedSample) {
// If a matching sample cannot be found, use the last
// sample's values as they should be as close as possible
seiNal.pts = lastMatchedSample.pts;
seiNal.dts = lastMatchedSample.dts;
} else {
result.logs.push({
level: 'warn',
message: 'We\'ve encountered a nal unit without data at ' + i + ' for trackId ' + trackId + '. See mux.js#223.'
});
break;
}
result.seiNals.push(seiNal);
break;
default:
break;
}
}
return result;
};
/**
* Parses sample information out of Track Run Boxes and calculates
* the absolute presentation and decode timestamps of each sample.
*
* @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed
* @param {Number|BigInt} baseMediaDecodeTime - base media decode time from tfdt
@see ISO-BMFF-12/2015, Section 8.8.12
* @param {Object} tfhd - The parsed Track Fragment Header
* @see inspect.parseTfhd
* @return {Object[]} the parsed samples
*
* @see ISO-BMFF-12/2015, Section 8.8.8
**/
var parseSamples = function parseSamples(truns, baseMediaDecodeTime, tfhd) {
var currentDts = baseMediaDecodeTime;
var defaultSampleDuration = tfhd.defaultSampleDuration || 0;
var defaultSampleSize = tfhd.defaultSampleSize || 0;
var trackId = tfhd.trackId;
var allSamples = [];
truns.forEach(function (trun) {
// Note: We currently do not parse the sample table as well
// as the trun. It's possible some sources will require this.
// moov > trak > mdia > minf > stbl
var trackRun = parseTrun(trun);
var samples = trackRun.samples;
samples.forEach(function (sample) {
if (sample.duration === undefined) {
sample.duration = defaultSampleDuration;
}
if (sample.size === undefined) {
sample.size = defaultSampleSize;
}
sample.trackId = trackId;
sample.dts = currentDts;
if (sample.compositionTimeOffset === undefined) {
sample.compositionTimeOffset = 0;
}
if (typeof currentDts === 'bigint') {
sample.pts = currentDts + window.BigInt(sample.compositionTimeOffset);
currentDts += window.BigInt(sample.duration);
} else {
sample.pts = currentDts + sample.compositionTimeOffset;
currentDts += sample.duration;
}
});
allSamples = allSamples.concat(samples);
});
return allSamples;
};
/**
* Parses out caption nals from an FMP4 segment's video tracks.
*
* @param {Uint8Array} segment - The bytes of a single segment
* @param {Number} videoTrackId - The trackId of a video track in the segment
* @return {Object.<Number, Object[]>} A mapping of video trackId to
* a list of seiNals found in that track
**/
var parseCaptionNals = function parseCaptionNals(segment, videoTrackId) {
// To get the samples
var trafs = findBox(segment, ['moof', 'traf']); // To get SEI NAL units
var mdats = findBox(segment, ['mdat']);
var captionNals = {};
var mdatTrafPairs = []; // Pair up each traf with a mdat as moofs and mdats are in pairs
mdats.forEach(function (mdat, index) {
var matchingTraf = trafs[index];
mdatTrafPairs.push({
mdat: mdat,
traf: matchingTraf
});
});
mdatTrafPairs.forEach(function (pair) {
var mdat = pair.mdat;
var traf = pair.traf;
var tfhd = findBox(traf, ['tfhd']); // Exactly 1 tfhd per traf
var headerInfo = parseTfhd(tfhd[0]);
var trackId = headerInfo.trackId;
var tfdt = findBox(traf, ['tfdt']); // Either 0 or 1 tfdt per traf
var baseMediaDecodeTime = tfdt.length > 0 ? parseTfdt(tfdt[0]).baseMediaDecodeTime : 0;
var truns = findBox(traf, ['trun']);
var samples;
var result; // Only parse video data for the chosen video track
if (videoTrackId === trackId && truns.length > 0) {
samples = parseSamples(truns, baseMediaDecodeTime, headerInfo);
result = findSeiNals(mdat, samples, trackId);
if (!captionNals[trackId]) {
captionNals[trackId] = {
seiNals: [],
logs: []
};
}
captionNals[trackId].seiNals = captionNals[trackId].seiNals.concat(result.seiNals);
captionNals[trackId].logs = captionNals[trackId].logs.concat(result.logs);
}
});
return captionNals;
};
/**
* Parses out inband captions from an MP4 container and returns
* caption objects that can be used by WebVTT and the TextTrack API.
* @see https://developer.mozilla.org/en-US/docs/Web/API/VTTCue
* @see https://developer.mozilla.org/en-US/docs/Web/API/TextTrack
* Assumes that `probe.getVideoTrackIds` and `probe.timescale` have been called first
*
* @param {Uint8Array} segment - The fmp4 segment containing embedded captions
* @param {Number} trackId - The id of the video track to parse
* @param {Number} timescale - The timescale for the video track from the init segment
*
* @return {?Object[]} parsedCaptions - A list of captions or null if no video tracks
* @return {Number} parsedCaptions[].startTime - The time to show the caption in seconds
* @return {Number} parsedCaptions[].endTime - The time to stop showing the caption in seconds
* @return {Object[]} parsedCaptions[].content - A list of individual caption segments
* @return {String} parsedCaptions[].content.text - The visible content of the caption segment
* @return {Number} parsedCaptions[].content.line - The line height from 1-15 for positioning of the caption segment
* @return {Number} parsedCaptions[].content.position - The column indent percentage for cue positioning from 10-80
**/
var parseEmbeddedCaptions = function parseEmbeddedCaptions(segment, trackId, timescale) {
var captionNals; // the ISO-BMFF spec says that trackId can't be zero, but there's some broken content out there
if (trackId === null) {
return null;
}
captionNals = parseCaptionNals(segment, trackId);
var trackNals = captionNals[trackId] || {};
return {
seiNals: trackNals.seiNals,
logs: trackNals.logs,
timescale: timescale
};
};
/**
* Converts SEI NALUs into captions that can be used by video.js
**/
var CaptionParser = function CaptionParser() {
var isInitialized = false;
var captionStream; // Stores segments seen before trackId and timescale are set
var segmentCache; // Stores video track ID of the track being parsed
var trackId; // Stores the timescale of the track being parsed
var timescale; // Stores captions parsed so far
var parsedCaptions; // Stores whether we are receiving partial data or not
var parsingPartial;
/**
* A method to indicate whether a CaptionParser has been initalized
* @returns {Boolean}
**/
this.isInitialized = function () {
return isInitialized;
};
/**
* Initializes the underlying CaptionStream, SEI NAL parsing
* and management, and caption collection
**/
this.init = function (options) {
captionStream = new CaptionStream();
isInitialized = true;
parsingPartial = options ? options.isPartial : false; // Collect dispatched captions
captionStream.on('data', function (event) {
// Convert to seconds in the source's timescale
event.startTime = event.startPts / timescale;
event.endTime = event.endPts / timescale;
parsedCaptions.captions.push(event);
parsedCaptions.captionStreams[event.stream] = true;
});
captionStream.on('log', function (log) {
parsedCaptions.logs.push(log);
});
};
/**
* Determines if a new video track will be selected
* or if the timescale changed
* @return {Boolean}
**/
this.isNewInit = function (videoTrackIds, timescales) {
if (videoTrackIds && videoTrackIds.length === 0 || timescales && typeof timescales === 'object' && Object.keys(timescales).length === 0) {
return false;
}
return trackId !== videoTrackIds[0] || timescale !== timescales[trackId];
};
/**
* Parses out SEI captions and interacts with underlying
* CaptionStream to return dispatched captions
*
* @param {Uint8Array} segment - The fmp4 segment containing embedded captions
* @param {Number[]} videoTrackIds - A list of video tracks found in the init segment
* @param {Object.<Number, Number>} timescales - The timescales found in the init segment
* @see parseEmbeddedCaptions
* @see m2ts/caption-stream.js
**/
this.parse = function (segment, videoTrackIds, timescales) {
var parsedData;
if (!this.isInitialized()) {
return null; // This is not likely to be a video segment
} else if (!videoTrackIds || !timescales) {
return null;
} else if (this.isNewInit(videoTrackIds, timescales)) {
// Use the first video track only as there is no
// mechanism to switch to other video tracks
trackId = videoTrackIds[0];
timescale = timescales[trackId]; // If an init segment has not been seen yet, hold onto segment
// data until we have one.
// the ISO-BMFF spec says that trackId can't be zero, but there's some broken content out there
} else if (trackId === null || !timescale) {
segmentCache.push(segment);
return null;
} // Now that a timescale and trackId is set, parse cached segments
while (segmentCache.length > 0) {
var cachedSegment = segmentCache.shift();
this.parse(cachedSegment, videoTrackIds, timescales);
}
parsedData = parseEmbeddedCaptions(segment, trackId, timescale);
if (parsedData && parsedData.logs) {
parsedCaptions.logs = parsedCaptions.logs.concat(parsedData.logs);
}
if (parsedData === null || !parsedData.seiNals) {
if (parsedCaptions.logs.length) {
return {
logs: parsedCaptions.logs,
captions: [],
captionStreams: []
};
}
return null;
}
this.pushNals(parsedData.seiNals); // Force the parsed captions to be dispatched
this.flushStream();
return parsedCaptions;
};
/**
* Pushes SEI NALUs onto CaptionStream
* @param {Object[]} nals - A list of SEI nals parsed using `parseCaptionNals`
* Assumes that `parseCaptionNals` has been called first
* @see m2ts/caption-stream.js
**/
this.pushNals = function (nals) {
if (!this.isInitialized() || !nals || nals.length === 0) {
return null;
}
nals.forEach(function (nal) {
captionStream.push(nal);
});
};
/**
* Flushes underlying CaptionStream to dispatch processed, displayable captions
* @see m2ts/caption-stream.js
**/
this.flushStream = function () {
if (!this.isInitialized()) {
return null;
}
if (!parsingPartial) {
captionStream.flush();
} else {
captionStream.partialFlush();
}
};
/**
* Reset caption buckets for new data
**/
this.clearParsedCaptions = function () {
parsedCaptions.captions = [];
parsedCaptions.captionStreams = {};
parsedCaptions.logs = [];
};
/**
* Resets underlying CaptionStream
* @see m2ts/caption-stream.js
**/
this.resetCaptionStream = function () {
if (!this.isInitialized()) {
return null;
}
captionStream.reset();
};
/**
* Convenience method to clear all captions flushed from the
* CaptionStream and still being parsed
* @see m2ts/caption-stream.js
**/
this.clearAllCaptions = function () {
this.clearParsedCaptions();
this.resetCaptionStream();
};
/**
* Reset caption parser
**/
this.reset = function () {
segmentCache = [];
trackId = null;
timescale = null;
if (!parsedCaptions) {
parsedCaptions = {
captions: [],
// CC1, CC2, CC3, CC4
captionStreams: {},
logs: []
};
} else {
this.clearParsedCaptions();
}
this.resetCaptionStream();
};
this.reset();
};
module.exports = CaptionParser;

View file

@ -0,0 +1,105 @@
"use strict";
var uint8ToCString = require('../utils/string.js').uint8ToCString;
var getUint64 = require('../utils/numbers.js').getUint64;
/**
* Based on: ISO/IEC 23009 Section: 5.10.3.3
* References:
* https://dashif-documents.azurewebsites.net/Events/master/event.html#emsg-format
* https://aomediacodec.github.io/id3-emsg/
*
* Takes emsg box data as a uint8 array and returns a emsg box object
* @param {UInt8Array} boxData data from emsg box
* @returns A parsed emsg box object
*/
var parseEmsgBox = function parseEmsgBox(boxData) {
// version + flags
var offset = 4;
var version = boxData[0];
var scheme_id_uri, value, timescale, presentation_time, presentation_time_delta, event_duration, id, message_data;
if (version === 0) {
scheme_id_uri = uint8ToCString(boxData.subarray(offset));
offset += scheme_id_uri.length;
value = uint8ToCString(boxData.subarray(offset));
offset += value.length;
var dv = new DataView(boxData.buffer);
timescale = dv.getUint32(offset);
offset += 4;
presentation_time_delta = dv.getUint32(offset);
offset += 4;
event_duration = dv.getUint32(offset);
offset += 4;
id = dv.getUint32(offset);
offset += 4;
} else if (version === 1) {
var dv = new DataView(boxData.buffer);
timescale = dv.getUint32(offset);
offset += 4;
presentation_time = getUint64(boxData.subarray(offset));
offset += 8;
event_duration = dv.getUint32(offset);
offset += 4;
id = dv.getUint32(offset);
offset += 4;
scheme_id_uri = uint8ToCString(boxData.subarray(offset));
offset += scheme_id_uri.length;
value = uint8ToCString(boxData.subarray(offset));
offset += value.length;
}
message_data = new Uint8Array(boxData.subarray(offset, boxData.byteLength));
var emsgBox = {
scheme_id_uri: scheme_id_uri,
value: value,
// if timescale is undefined or 0 set to 1
timescale: timescale ? timescale : 1,
presentation_time: presentation_time,
presentation_time_delta: presentation_time_delta,
event_duration: event_duration,
id: id,
message_data: message_data
};
return isValidEmsgBox(version, emsgBox) ? emsgBox : undefined;
};
/**
* Scales a presentation time or time delta with an offset with a provided timescale
* @param {number} presentationTime
* @param {number} timescale
* @param {number} timeDelta
* @param {number} offset
* @returns the scaled time as a number
*/
var scaleTime = function scaleTime(presentationTime, timescale, timeDelta, offset) {
return presentationTime || presentationTime === 0 ? presentationTime / timescale : offset + timeDelta / timescale;
};
/**
* Checks the emsg box data for validity based on the version
* @param {number} version of the emsg box to validate
* @param {Object} emsg the emsg data to validate
* @returns if the box is valid as a boolean
*/
var isValidEmsgBox = function isValidEmsgBox(version, emsg) {
var hasScheme = emsg.scheme_id_uri !== '\0';
var isValidV0Box = version === 0 && isDefined(emsg.presentation_time_delta) && hasScheme;
var isValidV1Box = version === 1 && isDefined(emsg.presentation_time) && hasScheme; // Only valid versions of emsg are 0 and 1
return !(version > 1) && isValidV0Box || isValidV1Box;
}; // Utility function to check if an object is defined
var isDefined = function isDefined(data) {
return data !== undefined || data !== null;
};
module.exports = {
parseEmsgBox: parseEmsgBox,
scaleTime: scaleTime
};

View file

@ -0,0 +1,47 @@
"use strict";
var toUnsigned = require('../utils/bin').toUnsigned;
var parseType = require('./parse-type.js');
var findBox = function findBox(data, path) {
var results = [],
i,
size,
type,
end,
subresults;
if (!path.length) {
// short-circuit the search for empty paths
return null;
}
for (i = 0; i < data.byteLength;) {
size = toUnsigned(data[i] << 24 | data[i + 1] << 16 | data[i + 2] << 8 | data[i + 3]);
type = parseType(data.subarray(i + 4, i + 8));
end = size > 1 ? i + size : data.byteLength;
if (type === path[0]) {
if (path.length === 1) {
// this is the end of the path and we've found the box we were
// looking for
results.push(data.subarray(i + 8, end));
} else {
// recursively search for the next box along the path
subresults = findBox(data.subarray(i + 8, end), path.slice(1));
if (subresults.length) {
results = results.concat(subresults);
}
}
}
i = end;
} // we've finished searching all of data
return results;
};
module.exports = findBox;

View file

@ -0,0 +1,304 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
// Convert an array of nal units into an array of frames with each frame being
// composed of the nal units that make up that frame
// Also keep track of cummulative data about the frame from the nal units such
// as the frame duration, starting pts, etc.
var groupNalsIntoFrames = function groupNalsIntoFrames(nalUnits) {
var i,
currentNal,
currentFrame = [],
frames = []; // TODO added for LHLS, make sure this is OK
frames.byteLength = 0;
frames.nalCount = 0;
frames.duration = 0;
currentFrame.byteLength = 0;
for (i = 0; i < nalUnits.length; i++) {
currentNal = nalUnits[i]; // Split on 'aud'-type nal units
if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {
// Since the very first nal unit is expected to be an AUD
// only push to the frames array when currentFrame is not empty
if (currentFrame.length) {
currentFrame.duration = currentNal.dts - currentFrame.dts; // TODO added for LHLS, make sure this is OK
frames.byteLength += currentFrame.byteLength;
frames.nalCount += currentFrame.length;
frames.duration += currentFrame.duration;
frames.push(currentFrame);
}
currentFrame = [currentNal];
currentFrame.byteLength = currentNal.data.byteLength;
currentFrame.pts = currentNal.pts;
currentFrame.dts = currentNal.dts;
} else {
// Specifically flag key frames for ease of use later
if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {
currentFrame.keyFrame = true;
}
currentFrame.duration = currentNal.dts - currentFrame.dts;
currentFrame.byteLength += currentNal.data.byteLength;
currentFrame.push(currentNal);
}
} // For the last frame, use the duration of the previous frame if we
// have nothing better to go on
if (frames.length && (!currentFrame.duration || currentFrame.duration <= 0)) {
currentFrame.duration = frames[frames.length - 1].duration;
} // Push the final frame
// TODO added for LHLS, make sure this is OK
frames.byteLength += currentFrame.byteLength;
frames.nalCount += currentFrame.length;
frames.duration += currentFrame.duration;
frames.push(currentFrame);
return frames;
}; // Convert an array of frames into an array of Gop with each Gop being composed
// of the frames that make up that Gop
// Also keep track of cummulative data about the Gop from the frames such as the
// Gop duration, starting pts, etc.
var groupFramesIntoGops = function groupFramesIntoGops(frames) {
var i,
currentFrame,
currentGop = [],
gops = []; // We must pre-set some of the values on the Gop since we
// keep running totals of these values
currentGop.byteLength = 0;
currentGop.nalCount = 0;
currentGop.duration = 0;
currentGop.pts = frames[0].pts;
currentGop.dts = frames[0].dts; // store some metadata about all the Gops
gops.byteLength = 0;
gops.nalCount = 0;
gops.duration = 0;
gops.pts = frames[0].pts;
gops.dts = frames[0].dts;
for (i = 0; i < frames.length; i++) {
currentFrame = frames[i];
if (currentFrame.keyFrame) {
// Since the very first frame is expected to be an keyframe
// only push to the gops array when currentGop is not empty
if (currentGop.length) {
gops.push(currentGop);
gops.byteLength += currentGop.byteLength;
gops.nalCount += currentGop.nalCount;
gops.duration += currentGop.duration;
}
currentGop = [currentFrame];
currentGop.nalCount = currentFrame.length;
currentGop.byteLength = currentFrame.byteLength;
currentGop.pts = currentFrame.pts;
currentGop.dts = currentFrame.dts;
currentGop.duration = currentFrame.duration;
} else {
currentGop.duration += currentFrame.duration;
currentGop.nalCount += currentFrame.length;
currentGop.byteLength += currentFrame.byteLength;
currentGop.push(currentFrame);
}
}
if (gops.length && currentGop.duration <= 0) {
currentGop.duration = gops[gops.length - 1].duration;
}
gops.byteLength += currentGop.byteLength;
gops.nalCount += currentGop.nalCount;
gops.duration += currentGop.duration; // push the final Gop
gops.push(currentGop);
return gops;
};
/*
* Search for the first keyframe in the GOPs and throw away all frames
* until that keyframe. Then extend the duration of the pulled keyframe
* and pull the PTS and DTS of the keyframe so that it covers the time
* range of the frames that were disposed.
*
* @param {Array} gops video GOPs
* @returns {Array} modified video GOPs
*/
var extendFirstKeyFrame = function extendFirstKeyFrame(gops) {
var currentGop;
if (!gops[0][0].keyFrame && gops.length > 1) {
// Remove the first GOP
currentGop = gops.shift();
gops.byteLength -= currentGop.byteLength;
gops.nalCount -= currentGop.nalCount; // Extend the first frame of what is now the
// first gop to cover the time period of the
// frames we just removed
gops[0][0].dts = currentGop.dts;
gops[0][0].pts = currentGop.pts;
gops[0][0].duration += currentGop.duration;
}
return gops;
};
/**
* Default sample object
* see ISO/IEC 14496-12:2012, section 8.6.4.3
*/
var createDefaultSample = function createDefaultSample() {
return {
size: 0,
flags: {
isLeading: 0,
dependsOn: 1,
isDependedOn: 0,
hasRedundancy: 0,
degradationPriority: 0,
isNonSyncSample: 1
}
};
};
/*
* Collates information from a video frame into an object for eventual
* entry into an MP4 sample table.
*
* @param {Object} frame the video frame
* @param {Number} dataOffset the byte offset to position the sample
* @return {Object} object containing sample table info for a frame
*/
var sampleForFrame = function sampleForFrame(frame, dataOffset) {
var sample = createDefaultSample();
sample.dataOffset = dataOffset;
sample.compositionTimeOffset = frame.pts - frame.dts;
sample.duration = frame.duration;
sample.size = 4 * frame.length; // Space for nal unit size
sample.size += frame.byteLength;
if (frame.keyFrame) {
sample.flags.dependsOn = 2;
sample.flags.isNonSyncSample = 0;
}
return sample;
}; // generate the track's sample table from an array of gops
var generateSampleTable = function generateSampleTable(gops, baseDataOffset) {
var h,
i,
sample,
currentGop,
currentFrame,
dataOffset = baseDataOffset || 0,
samples = [];
for (h = 0; h < gops.length; h++) {
currentGop = gops[h];
for (i = 0; i < currentGop.length; i++) {
currentFrame = currentGop[i];
sample = sampleForFrame(currentFrame, dataOffset);
dataOffset += sample.size;
samples.push(sample);
}
}
return samples;
}; // generate the track's raw mdat data from an array of gops
var concatenateNalData = function concatenateNalData(gops) {
var h,
i,
j,
currentGop,
currentFrame,
currentNal,
dataOffset = 0,
nalsByteLength = gops.byteLength,
numberOfNals = gops.nalCount,
totalByteLength = nalsByteLength + 4 * numberOfNals,
data = new Uint8Array(totalByteLength),
view = new DataView(data.buffer); // For each Gop..
for (h = 0; h < gops.length; h++) {
currentGop = gops[h]; // For each Frame..
for (i = 0; i < currentGop.length; i++) {
currentFrame = currentGop[i]; // For each NAL..
for (j = 0; j < currentFrame.length; j++) {
currentNal = currentFrame[j];
view.setUint32(dataOffset, currentNal.data.byteLength);
dataOffset += 4;
data.set(currentNal.data, dataOffset);
dataOffset += currentNal.data.byteLength;
}
}
}
return data;
}; // generate the track's sample table from a frame
var generateSampleTableForFrame = function generateSampleTableForFrame(frame, baseDataOffset) {
var sample,
dataOffset = baseDataOffset || 0,
samples = [];
sample = sampleForFrame(frame, dataOffset);
samples.push(sample);
return samples;
}; // generate the track's raw mdat data from a frame
var concatenateNalDataForFrame = function concatenateNalDataForFrame(frame) {
var i,
currentNal,
dataOffset = 0,
nalsByteLength = frame.byteLength,
numberOfNals = frame.length,
totalByteLength = nalsByteLength + 4 * numberOfNals,
data = new Uint8Array(totalByteLength),
view = new DataView(data.buffer); // For each NAL..
for (i = 0; i < frame.length; i++) {
currentNal = frame[i];
view.setUint32(dataOffset, currentNal.data.byteLength);
dataOffset += 4;
data.set(currentNal.data, dataOffset);
dataOffset += currentNal.data.byteLength;
}
return data;
};
module.exports = {
groupNalsIntoFrames: groupNalsIntoFrames,
groupFramesIntoGops: groupFramesIntoGops,
extendFirstKeyFrame: extendFirstKeyFrame,
generateSampleTable: generateSampleTable,
concatenateNalData: concatenateNalData,
generateSampleTableForFrame: generateSampleTableForFrame,
concatenateNalDataForFrame: concatenateNalDataForFrame
};

View file

@ -0,0 +1,16 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
module.exports = {
generator: require('./mp4-generator'),
probe: require('./probe'),
Transmuxer: require('./transmuxer').Transmuxer,
AudioSegmentStream: require('./transmuxer').AudioSegmentStream,
VideoSegmentStream: require('./transmuxer').VideoSegmentStream,
CaptionParser: require('./caption-parser')
};

View file

@ -0,0 +1,611 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Functions that generate fragmented MP4s suitable for use with Media
* Source Extensions.
*/
'use strict';
var MAX_UINT32 = require('../utils/numbers.js').MAX_UINT32;
var box, dinf, esds, ftyp, mdat, mfhd, minf, moof, moov, mvex, mvhd, trak, tkhd, mdia, mdhd, hdlr, sdtp, stbl, stsd, traf, trex, trun, types, MAJOR_BRAND, MINOR_VERSION, AVC1_BRAND, VIDEO_HDLR, AUDIO_HDLR, HDLR_TYPES, VMHD, SMHD, DREF, STCO, STSC, STSZ, STTS; // pre-calculate constants
(function () {
var i;
types = {
avc1: [],
// codingname
avcC: [],
btrt: [],
dinf: [],
dref: [],
esds: [],
ftyp: [],
hdlr: [],
mdat: [],
mdhd: [],
mdia: [],
mfhd: [],
minf: [],
moof: [],
moov: [],
mp4a: [],
// codingname
mvex: [],
mvhd: [],
pasp: [],
sdtp: [],
smhd: [],
stbl: [],
stco: [],
stsc: [],
stsd: [],
stsz: [],
stts: [],
styp: [],
tfdt: [],
tfhd: [],
traf: [],
trak: [],
trun: [],
trex: [],
tkhd: [],
vmhd: []
}; // In environments where Uint8Array is undefined (e.g., IE8), skip set up so that we
// don't throw an error
if (typeof Uint8Array === 'undefined') {
return;
}
for (i in types) {
if (types.hasOwnProperty(i)) {
types[i] = [i.charCodeAt(0), i.charCodeAt(1), i.charCodeAt(2), i.charCodeAt(3)];
}
}
MAJOR_BRAND = new Uint8Array(['i'.charCodeAt(0), 's'.charCodeAt(0), 'o'.charCodeAt(0), 'm'.charCodeAt(0)]);
AVC1_BRAND = new Uint8Array(['a'.charCodeAt(0), 'v'.charCodeAt(0), 'c'.charCodeAt(0), '1'.charCodeAt(0)]);
MINOR_VERSION = new Uint8Array([0, 0, 0, 1]);
VIDEO_HDLR = new Uint8Array([0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x00, // pre_defined
0x76, 0x69, 0x64, 0x65, // handler_type: 'vide'
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
0x56, 0x69, 0x64, 0x65, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'VideoHandler'
]);
AUDIO_HDLR = new Uint8Array([0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x00, // pre_defined
0x73, 0x6f, 0x75, 0x6e, // handler_type: 'soun'
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
0x53, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'SoundHandler'
]);
HDLR_TYPES = {
video: VIDEO_HDLR,
audio: AUDIO_HDLR
};
DREF = new Uint8Array([0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x01, // entry_count
0x00, 0x00, 0x00, 0x0c, // entry_size
0x75, 0x72, 0x6c, 0x20, // 'url' type
0x00, // version 0
0x00, 0x00, 0x01 // entry_flags
]);
SMHD = new Uint8Array([0x00, // version
0x00, 0x00, 0x00, // flags
0x00, 0x00, // balance, 0 means centered
0x00, 0x00 // reserved
]);
STCO = new Uint8Array([0x00, // version
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x00 // entry_count
]);
STSC = STCO;
STSZ = new Uint8Array([0x00, // version
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x00, // sample_size
0x00, 0x00, 0x00, 0x00 // sample_count
]);
STTS = STCO;
VMHD = new Uint8Array([0x00, // version
0x00, 0x00, 0x01, // flags
0x00, 0x00, // graphicsmode
0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // opcolor
]);
})();
box = function box(type) {
var payload = [],
size = 0,
i,
result,
view;
for (i = 1; i < arguments.length; i++) {
payload.push(arguments[i]);
}
i = payload.length; // calculate the total size we need to allocate
while (i--) {
size += payload[i].byteLength;
}
result = new Uint8Array(size + 8);
view = new DataView(result.buffer, result.byteOffset, result.byteLength);
view.setUint32(0, result.byteLength);
result.set(type, 4); // copy the payload into the result
for (i = 0, size = 8; i < payload.length; i++) {
result.set(payload[i], size);
size += payload[i].byteLength;
}
return result;
};
dinf = function dinf() {
return box(types.dinf, box(types.dref, DREF));
};
esds = function esds(track) {
return box(types.esds, new Uint8Array([0x00, // version
0x00, 0x00, 0x00, // flags
// ES_Descriptor
0x03, // tag, ES_DescrTag
0x19, // length
0x00, 0x00, // ES_ID
0x00, // streamDependenceFlag, URL_flag, reserved, streamPriority
// DecoderConfigDescriptor
0x04, // tag, DecoderConfigDescrTag
0x11, // length
0x40, // object type
0x15, // streamType
0x00, 0x06, 0x00, // bufferSizeDB
0x00, 0x00, 0xda, 0xc0, // maxBitrate
0x00, 0x00, 0xda, 0xc0, // avgBitrate
// DecoderSpecificInfo
0x05, // tag, DecoderSpecificInfoTag
0x02, // length
// ISO/IEC 14496-3, AudioSpecificConfig
// for samplingFrequencyIndex see ISO/IEC 13818-7:2006, 8.1.3.2.2, Table 35
track.audioobjecttype << 3 | track.samplingfrequencyindex >>> 1, track.samplingfrequencyindex << 7 | track.channelcount << 3, 0x06, 0x01, 0x02 // GASpecificConfig
]));
};
ftyp = function ftyp() {
return box(types.ftyp, MAJOR_BRAND, MINOR_VERSION, MAJOR_BRAND, AVC1_BRAND);
};
hdlr = function hdlr(type) {
return box(types.hdlr, HDLR_TYPES[type]);
};
mdat = function mdat(data) {
return box(types.mdat, data);
};
mdhd = function mdhd(track) {
var result = new Uint8Array([0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x02, // creation_time
0x00, 0x00, 0x00, 0x03, // modification_time
0x00, 0x01, 0x5f, 0x90, // timescale, 90,000 "ticks" per second
track.duration >>> 24 & 0xFF, track.duration >>> 16 & 0xFF, track.duration >>> 8 & 0xFF, track.duration & 0xFF, // duration
0x55, 0xc4, // 'und' language (undetermined)
0x00, 0x00]); // Use the sample rate from the track metadata, when it is
// defined. The sample rate can be parsed out of an ADTS header, for
// instance.
if (track.samplerate) {
result[12] = track.samplerate >>> 24 & 0xFF;
result[13] = track.samplerate >>> 16 & 0xFF;
result[14] = track.samplerate >>> 8 & 0xFF;
result[15] = track.samplerate & 0xFF;
}
return box(types.mdhd, result);
};
mdia = function mdia(track) {
return box(types.mdia, mdhd(track), hdlr(track.type), minf(track));
};
mfhd = function mfhd(sequenceNumber) {
return box(types.mfhd, new Uint8Array([0x00, 0x00, 0x00, 0x00, // flags
(sequenceNumber & 0xFF000000) >> 24, (sequenceNumber & 0xFF0000) >> 16, (sequenceNumber & 0xFF00) >> 8, sequenceNumber & 0xFF // sequence_number
]));
};
minf = function minf(track) {
return box(types.minf, track.type === 'video' ? box(types.vmhd, VMHD) : box(types.smhd, SMHD), dinf(), stbl(track));
};
moof = function moof(sequenceNumber, tracks) {
var trackFragments = [],
i = tracks.length; // build traf boxes for each track fragment
while (i--) {
trackFragments[i] = traf(tracks[i]);
}
return box.apply(null, [types.moof, mfhd(sequenceNumber)].concat(trackFragments));
};
/**
* Returns a movie box.
* @param tracks {array} the tracks associated with this movie
* @see ISO/IEC 14496-12:2012(E), section 8.2.1
*/
moov = function moov(tracks) {
var i = tracks.length,
boxes = [];
while (i--) {
boxes[i] = trak(tracks[i]);
}
return box.apply(null, [types.moov, mvhd(0xffffffff)].concat(boxes).concat(mvex(tracks)));
};
mvex = function mvex(tracks) {
var i = tracks.length,
boxes = [];
while (i--) {
boxes[i] = trex(tracks[i]);
}
return box.apply(null, [types.mvex].concat(boxes));
};
mvhd = function mvhd(duration) {
var bytes = new Uint8Array([0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x01, // creation_time
0x00, 0x00, 0x00, 0x02, // modification_time
0x00, 0x01, 0x5f, 0x90, // timescale, 90,000 "ticks" per second
(duration & 0xFF000000) >> 24, (duration & 0xFF0000) >> 16, (duration & 0xFF00) >> 8, duration & 0xFF, // duration
0x00, 0x01, 0x00, 0x00, // 1.0 rate
0x01, 0x00, // 1.0 volume
0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // transformation: unity matrix
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // pre_defined
0xff, 0xff, 0xff, 0xff // next_track_ID
]);
return box(types.mvhd, bytes);
};
sdtp = function sdtp(track) {
var samples = track.samples || [],
bytes = new Uint8Array(4 + samples.length),
flags,
i; // leave the full box header (4 bytes) all zero
// write the sample table
for (i = 0; i < samples.length; i++) {
flags = samples[i].flags;
bytes[i + 4] = flags.dependsOn << 4 | flags.isDependedOn << 2 | flags.hasRedundancy;
}
return box(types.sdtp, bytes);
};
stbl = function stbl(track) {
return box(types.stbl, stsd(track), box(types.stts, STTS), box(types.stsc, STSC), box(types.stsz, STSZ), box(types.stco, STCO));
};
(function () {
var videoSample, audioSample;
stsd = function stsd(track) {
return box(types.stsd, new Uint8Array([0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x01]), track.type === 'video' ? videoSample(track) : audioSample(track));
};
videoSample = function videoSample(track) {
var sps = track.sps || [],
pps = track.pps || [],
sequenceParameterSets = [],
pictureParameterSets = [],
i,
avc1Box; // assemble the SPSs
for (i = 0; i < sps.length; i++) {
sequenceParameterSets.push((sps[i].byteLength & 0xFF00) >>> 8);
sequenceParameterSets.push(sps[i].byteLength & 0xFF); // sequenceParameterSetLength
sequenceParameterSets = sequenceParameterSets.concat(Array.prototype.slice.call(sps[i])); // SPS
} // assemble the PPSs
for (i = 0; i < pps.length; i++) {
pictureParameterSets.push((pps[i].byteLength & 0xFF00) >>> 8);
pictureParameterSets.push(pps[i].byteLength & 0xFF);
pictureParameterSets = pictureParameterSets.concat(Array.prototype.slice.call(pps[i]));
}
avc1Box = [types.avc1, new Uint8Array([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x01, // data_reference_index
0x00, 0x00, // pre_defined
0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // pre_defined
(track.width & 0xff00) >> 8, track.width & 0xff, // width
(track.height & 0xff00) >> 8, track.height & 0xff, // height
0x00, 0x48, 0x00, 0x00, // horizresolution
0x00, 0x48, 0x00, 0x00, // vertresolution
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x01, // frame_count
0x13, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x6a, 0x73, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x69, 0x62, 0x2d, 0x68, 0x6c, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // compressorname
0x00, 0x18, // depth = 24
0x11, 0x11 // pre_defined = -1
]), box(types.avcC, new Uint8Array([0x01, // configurationVersion
track.profileIdc, // AVCProfileIndication
track.profileCompatibility, // profile_compatibility
track.levelIdc, // AVCLevelIndication
0xff // lengthSizeMinusOne, hard-coded to 4 bytes
].concat([sps.length], // numOfSequenceParameterSets
sequenceParameterSets, // "SPS"
[pps.length], // numOfPictureParameterSets
pictureParameterSets // "PPS"
))), box(types.btrt, new Uint8Array([0x00, 0x1c, 0x9c, 0x80, // bufferSizeDB
0x00, 0x2d, 0xc6, 0xc0, // maxBitrate
0x00, 0x2d, 0xc6, 0xc0 // avgBitrate
]))];
if (track.sarRatio) {
var hSpacing = track.sarRatio[0],
vSpacing = track.sarRatio[1];
avc1Box.push(box(types.pasp, new Uint8Array([(hSpacing & 0xFF000000) >> 24, (hSpacing & 0xFF0000) >> 16, (hSpacing & 0xFF00) >> 8, hSpacing & 0xFF, (vSpacing & 0xFF000000) >> 24, (vSpacing & 0xFF0000) >> 16, (vSpacing & 0xFF00) >> 8, vSpacing & 0xFF])));
}
return box.apply(null, avc1Box);
};
audioSample = function audioSample(track) {
return box(types.mp4a, new Uint8Array([// SampleEntry, ISO/IEC 14496-12
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x01, // data_reference_index
// AudioSampleEntry, ISO/IEC 14496-12
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
(track.channelcount & 0xff00) >> 8, track.channelcount & 0xff, // channelcount
(track.samplesize & 0xff00) >> 8, track.samplesize & 0xff, // samplesize
0x00, 0x00, // pre_defined
0x00, 0x00, // reserved
(track.samplerate & 0xff00) >> 8, track.samplerate & 0xff, 0x00, 0x00 // samplerate, 16.16
// MP4AudioSampleEntry, ISO/IEC 14496-14
]), esds(track));
};
})();
tkhd = function tkhd(track) {
var result = new Uint8Array([0x00, // version 0
0x00, 0x00, 0x07, // flags
0x00, 0x00, 0x00, 0x00, // creation_time
0x00, 0x00, 0x00, 0x00, // modification_time
(track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID
0x00, 0x00, 0x00, 0x00, // reserved
(track.duration & 0xFF000000) >> 24, (track.duration & 0xFF0000) >> 16, (track.duration & 0xFF00) >> 8, track.duration & 0xFF, // duration
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, // layer
0x00, 0x00, // alternate_group
0x01, 0x00, // non-audio track volume
0x00, 0x00, // reserved
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // transformation: unity matrix
(track.width & 0xFF00) >> 8, track.width & 0xFF, 0x00, 0x00, // width
(track.height & 0xFF00) >> 8, track.height & 0xFF, 0x00, 0x00 // height
]);
return box(types.tkhd, result);
};
/**
* Generate a track fragment (traf) box. A traf box collects metadata
* about tracks in a movie fragment (moof) box.
*/
traf = function traf(track) {
var trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable, dataOffset, upperWordBaseMediaDecodeTime, lowerWordBaseMediaDecodeTime;
trackFragmentHeader = box(types.tfhd, new Uint8Array([0x00, // version 0
0x00, 0x00, 0x3a, // flags
(track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID
0x00, 0x00, 0x00, 0x01, // sample_description_index
0x00, 0x00, 0x00, 0x00, // default_sample_duration
0x00, 0x00, 0x00, 0x00, // default_sample_size
0x00, 0x00, 0x00, 0x00 // default_sample_flags
]));
upperWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime / MAX_UINT32);
lowerWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime % MAX_UINT32);
trackFragmentDecodeTime = box(types.tfdt, new Uint8Array([0x01, // version 1
0x00, 0x00, 0x00, // flags
// baseMediaDecodeTime
upperWordBaseMediaDecodeTime >>> 24 & 0xFF, upperWordBaseMediaDecodeTime >>> 16 & 0xFF, upperWordBaseMediaDecodeTime >>> 8 & 0xFF, upperWordBaseMediaDecodeTime & 0xFF, lowerWordBaseMediaDecodeTime >>> 24 & 0xFF, lowerWordBaseMediaDecodeTime >>> 16 & 0xFF, lowerWordBaseMediaDecodeTime >>> 8 & 0xFF, lowerWordBaseMediaDecodeTime & 0xFF])); // the data offset specifies the number of bytes from the start of
// the containing moof to the first payload byte of the associated
// mdat
dataOffset = 32 + // tfhd
20 + // tfdt
8 + // traf header
16 + // mfhd
8 + // moof header
8; // mdat header
// audio tracks require less metadata
if (track.type === 'audio') {
trackFragmentRun = trun(track, dataOffset);
return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun);
} // video tracks should contain an independent and disposable samples
// box (sdtp)
// generate one and adjust offsets to match
sampleDependencyTable = sdtp(track);
trackFragmentRun = trun(track, sampleDependencyTable.length + dataOffset);
return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable);
};
/**
* Generate a track box.
* @param track {object} a track definition
* @return {Uint8Array} the track box
*/
trak = function trak(track) {
track.duration = track.duration || 0xffffffff;
return box(types.trak, tkhd(track), mdia(track));
};
trex = function trex(track) {
var result = new Uint8Array([0x00, // version 0
0x00, 0x00, 0x00, // flags
(track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID
0x00, 0x00, 0x00, 0x01, // default_sample_description_index
0x00, 0x00, 0x00, 0x00, // default_sample_duration
0x00, 0x00, 0x00, 0x00, // default_sample_size
0x00, 0x01, 0x00, 0x01 // default_sample_flags
]); // the last two bytes of default_sample_flags is the sample
// degradation priority, a hint about the importance of this sample
// relative to others. Lower the degradation priority for all sample
// types other than video.
if (track.type !== 'video') {
result[result.length - 1] = 0x00;
}
return box(types.trex, result);
};
(function () {
var audioTrun, videoTrun, trunHeader; // This method assumes all samples are uniform. That is, if a
// duration is present for the first sample, it will be present for
// all subsequent samples.
// see ISO/IEC 14496-12:2012, Section 8.8.8.1
trunHeader = function trunHeader(samples, offset) {
var durationPresent = 0,
sizePresent = 0,
flagsPresent = 0,
compositionTimeOffset = 0; // trun flag constants
if (samples.length) {
if (samples[0].duration !== undefined) {
durationPresent = 0x1;
}
if (samples[0].size !== undefined) {
sizePresent = 0x2;
}
if (samples[0].flags !== undefined) {
flagsPresent = 0x4;
}
if (samples[0].compositionTimeOffset !== undefined) {
compositionTimeOffset = 0x8;
}
}
return [0x00, // version 0
0x00, durationPresent | sizePresent | flagsPresent | compositionTimeOffset, 0x01, // flags
(samples.length & 0xFF000000) >>> 24, (samples.length & 0xFF0000) >>> 16, (samples.length & 0xFF00) >>> 8, samples.length & 0xFF, // sample_count
(offset & 0xFF000000) >>> 24, (offset & 0xFF0000) >>> 16, (offset & 0xFF00) >>> 8, offset & 0xFF // data_offset
];
};
videoTrun = function videoTrun(track, offset) {
var bytesOffest, bytes, header, samples, sample, i;
samples = track.samples || [];
offset += 8 + 12 + 16 * samples.length;
header = trunHeader(samples, offset);
bytes = new Uint8Array(header.length + samples.length * 16);
bytes.set(header);
bytesOffest = header.length;
for (i = 0; i < samples.length; i++) {
sample = samples[i];
bytes[bytesOffest++] = (sample.duration & 0xFF000000) >>> 24;
bytes[bytesOffest++] = (sample.duration & 0xFF0000) >>> 16;
bytes[bytesOffest++] = (sample.duration & 0xFF00) >>> 8;
bytes[bytesOffest++] = sample.duration & 0xFF; // sample_duration
bytes[bytesOffest++] = (sample.size & 0xFF000000) >>> 24;
bytes[bytesOffest++] = (sample.size & 0xFF0000) >>> 16;
bytes[bytesOffest++] = (sample.size & 0xFF00) >>> 8;
bytes[bytesOffest++] = sample.size & 0xFF; // sample_size
bytes[bytesOffest++] = sample.flags.isLeading << 2 | sample.flags.dependsOn;
bytes[bytesOffest++] = sample.flags.isDependedOn << 6 | sample.flags.hasRedundancy << 4 | sample.flags.paddingValue << 1 | sample.flags.isNonSyncSample;
bytes[bytesOffest++] = sample.flags.degradationPriority & 0xF0 << 8;
bytes[bytesOffest++] = sample.flags.degradationPriority & 0x0F; // sample_flags
bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF000000) >>> 24;
bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF0000) >>> 16;
bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF00) >>> 8;
bytes[bytesOffest++] = sample.compositionTimeOffset & 0xFF; // sample_composition_time_offset
}
return box(types.trun, bytes);
};
audioTrun = function audioTrun(track, offset) {
var bytes, bytesOffest, header, samples, sample, i;
samples = track.samples || [];
offset += 8 + 12 + 8 * samples.length;
header = trunHeader(samples, offset);
bytes = new Uint8Array(header.length + samples.length * 8);
bytes.set(header);
bytesOffest = header.length;
for (i = 0; i < samples.length; i++) {
sample = samples[i];
bytes[bytesOffest++] = (sample.duration & 0xFF000000) >>> 24;
bytes[bytesOffest++] = (sample.duration & 0xFF0000) >>> 16;
bytes[bytesOffest++] = (sample.duration & 0xFF00) >>> 8;
bytes[bytesOffest++] = sample.duration & 0xFF; // sample_duration
bytes[bytesOffest++] = (sample.size & 0xFF000000) >>> 24;
bytes[bytesOffest++] = (sample.size & 0xFF0000) >>> 16;
bytes[bytesOffest++] = (sample.size & 0xFF00) >>> 8;
bytes[bytesOffest++] = sample.size & 0xFF; // sample_size
}
return box(types.trun, bytes);
};
trun = function trun(track, offset) {
if (track.type === 'audio') {
return audioTrun(track, offset);
}
return videoTrun(track, offset);
};
})();
module.exports = {
ftyp: ftyp,
mdat: mdat,
moof: moof,
moov: moov,
initSegment: function initSegment(tracks) {
var fileType = ftyp(),
movie = moov(tracks),
result;
result = new Uint8Array(fileType.byteLength + movie.byteLength);
result.set(fileType);
result.set(movie, fileType.byteLength);
return result;
}
};

View file

@ -0,0 +1,12 @@
"use strict";
var parseType = function parseType(buffer) {
var result = '';
result += String.fromCharCode(buffer[0]);
result += String.fromCharCode(buffer[1]);
result += String.fromCharCode(buffer[2]);
result += String.fromCharCode(buffer[3]);
return result;
};
module.exports = parseType;

View file

@ -0,0 +1,393 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Utilities to detect basic properties and metadata about MP4s.
*/
'use strict';
var toUnsigned = require('../utils/bin').toUnsigned;
var toHexString = require('../utils/bin').toHexString;
var findBox = require('../mp4/find-box.js');
var parseType = require('../mp4/parse-type.js');
var emsg = require('../mp4/emsg.js');
var parseTfhd = require('../tools/parse-tfhd.js');
var parseTrun = require('../tools/parse-trun.js');
var parseTfdt = require('../tools/parse-tfdt.js');
var getUint64 = require('../utils/numbers.js').getUint64;
var timescale, startTime, compositionStartTime, getVideoTrackIds, getTracks, getTimescaleFromMediaHeader, getEmsgID3;
var window = require('global/window');
var parseId3Frames = require('../tools/parse-id3.js').parseId3Frames;
/**
* Parses an MP4 initialization segment and extracts the timescale
* values for any declared tracks. Timescale values indicate the
* number of clock ticks per second to assume for time-based values
* elsewhere in the MP4.
*
* To determine the start time of an MP4, you need two pieces of
* information: the timescale unit and the earliest base media decode
* time. Multiple timescales can be specified within an MP4 but the
* base media decode time is always expressed in the timescale from
* the media header box for the track:
* ```
* moov > trak > mdia > mdhd.timescale
* ```
* @param init {Uint8Array} the bytes of the init segment
* @return {object} a hash of track ids to timescale values or null if
* the init segment is malformed.
*/
timescale = function timescale(init) {
var result = {},
traks = findBox(init, ['moov', 'trak']); // mdhd timescale
return traks.reduce(function (result, trak) {
var tkhd, version, index, id, mdhd;
tkhd = findBox(trak, ['tkhd'])[0];
if (!tkhd) {
return null;
}
version = tkhd[0];
index = version === 0 ? 12 : 20;
id = toUnsigned(tkhd[index] << 24 | tkhd[index + 1] << 16 | tkhd[index + 2] << 8 | tkhd[index + 3]);
mdhd = findBox(trak, ['mdia', 'mdhd'])[0];
if (!mdhd) {
return null;
}
version = mdhd[0];
index = version === 0 ? 12 : 20;
result[id] = toUnsigned(mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]);
return result;
}, result);
};
/**
* Determine the base media decode start time, in seconds, for an MP4
* fragment. If multiple fragments are specified, the earliest time is
* returned.
*
* The base media decode time can be parsed from track fragment
* metadata:
* ```
* moof > traf > tfdt.baseMediaDecodeTime
* ```
* It requires the timescale value from the mdhd to interpret.
*
* @param timescale {object} a hash of track ids to timescale values.
* @return {number} the earliest base media decode start time for the
* fragment, in seconds
*/
startTime = function startTime(timescale, fragment) {
var trafs, result; // we need info from two childrend of each track fragment box
trafs = findBox(fragment, ['moof', 'traf']); // determine the start times for each track
var lowestTime = trafs.reduce(function (acc, traf) {
var tfhd = findBox(traf, ['tfhd'])[0]; // get the track id from the tfhd
var id = toUnsigned(tfhd[4] << 24 | tfhd[5] << 16 | tfhd[6] << 8 | tfhd[7]); // assume a 90kHz clock if no timescale was specified
var scale = timescale[id] || 90e3; // get the base media decode time from the tfdt
var tfdt = findBox(traf, ['tfdt'])[0];
var dv = new DataView(tfdt.buffer, tfdt.byteOffset, tfdt.byteLength);
var baseTime; // version 1 is 64 bit
if (tfdt[0] === 1) {
baseTime = getUint64(tfdt.subarray(4, 12));
} else {
baseTime = dv.getUint32(4);
} // convert base time to seconds if it is a valid number.
var seconds;
if (typeof baseTime === 'bigint') {
seconds = baseTime / window.BigInt(scale);
} else if (typeof baseTime === 'number' && !isNaN(baseTime)) {
seconds = baseTime / scale;
}
if (seconds < Number.MAX_SAFE_INTEGER) {
seconds = Number(seconds);
}
if (seconds < acc) {
acc = seconds;
}
return acc;
}, Infinity);
return typeof lowestTime === 'bigint' || isFinite(lowestTime) ? lowestTime : 0;
};
/**
* Determine the composition start, in seconds, for an MP4
* fragment.
*
* The composition start time of a fragment can be calculated using the base
* media decode time, composition time offset, and timescale, as follows:
*
* compositionStartTime = (baseMediaDecodeTime + compositionTimeOffset) / timescale
*
* All of the aforementioned information is contained within a media fragment's
* `traf` box, except for timescale info, which comes from the initialization
* segment, so a track id (also contained within a `traf`) is also necessary to
* associate it with a timescale
*
*
* @param timescales {object} - a hash of track ids to timescale values.
* @param fragment {Unit8Array} - the bytes of a media segment
* @return {number} the composition start time for the fragment, in seconds
**/
compositionStartTime = function compositionStartTime(timescales, fragment) {
var trafBoxes = findBox(fragment, ['moof', 'traf']);
var baseMediaDecodeTime = 0;
var compositionTimeOffset = 0;
var trackId;
if (trafBoxes && trafBoxes.length) {
// The spec states that track run samples contained within a `traf` box are contiguous, but
// it does not explicitly state whether the `traf` boxes themselves are contiguous.
// We will assume that they are, so we only need the first to calculate start time.
var tfhd = findBox(trafBoxes[0], ['tfhd'])[0];
var trun = findBox(trafBoxes[0], ['trun'])[0];
var tfdt = findBox(trafBoxes[0], ['tfdt'])[0];
if (tfhd) {
var parsedTfhd = parseTfhd(tfhd);
trackId = parsedTfhd.trackId;
}
if (tfdt) {
var parsedTfdt = parseTfdt(tfdt);
baseMediaDecodeTime = parsedTfdt.baseMediaDecodeTime;
}
if (trun) {
var parsedTrun = parseTrun(trun);
if (parsedTrun.samples && parsedTrun.samples.length) {
compositionTimeOffset = parsedTrun.samples[0].compositionTimeOffset || 0;
}
}
} // Get timescale for this specific track. Assume a 90kHz clock if no timescale was
// specified.
var timescale = timescales[trackId] || 90e3; // return the composition start time, in seconds
if (typeof baseMediaDecodeTime === 'bigint') {
compositionTimeOffset = window.BigInt(compositionTimeOffset);
timescale = window.BigInt(timescale);
}
var result = (baseMediaDecodeTime + compositionTimeOffset) / timescale;
if (typeof result === 'bigint' && result < Number.MAX_SAFE_INTEGER) {
result = Number(result);
}
return result;
};
/**
* Find the trackIds of the video tracks in this source.
* Found by parsing the Handler Reference and Track Header Boxes:
* moov > trak > mdia > hdlr
* moov > trak > tkhd
*
* @param {Uint8Array} init - The bytes of the init segment for this source
* @return {Number[]} A list of trackIds
*
* @see ISO-BMFF-12/2015, Section 8.4.3
**/
getVideoTrackIds = function getVideoTrackIds(init) {
var traks = findBox(init, ['moov', 'trak']);
var videoTrackIds = [];
traks.forEach(function (trak) {
var hdlrs = findBox(trak, ['mdia', 'hdlr']);
var tkhds = findBox(trak, ['tkhd']);
hdlrs.forEach(function (hdlr, index) {
var handlerType = parseType(hdlr.subarray(8, 12));
var tkhd = tkhds[index];
var view;
var version;
var trackId;
if (handlerType === 'vide') {
view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength);
version = view.getUint8(0);
trackId = version === 0 ? view.getUint32(12) : view.getUint32(20);
videoTrackIds.push(trackId);
}
});
});
return videoTrackIds;
};
getTimescaleFromMediaHeader = function getTimescaleFromMediaHeader(mdhd) {
// mdhd is a FullBox, meaning it will have its own version as the first byte
var version = mdhd[0];
var index = version === 0 ? 12 : 20;
return toUnsigned(mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]);
};
/**
* Get all the video, audio, and hint tracks from a non fragmented
* mp4 segment
*/
getTracks = function getTracks(init) {
var traks = findBox(init, ['moov', 'trak']);
var tracks = [];
traks.forEach(function (trak) {
var track = {};
var tkhd = findBox(trak, ['tkhd'])[0];
var view, tkhdVersion; // id
if (tkhd) {
view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength);
tkhdVersion = view.getUint8(0);
track.id = tkhdVersion === 0 ? view.getUint32(12) : view.getUint32(20);
}
var hdlr = findBox(trak, ['mdia', 'hdlr'])[0]; // type
if (hdlr) {
var type = parseType(hdlr.subarray(8, 12));
if (type === 'vide') {
track.type = 'video';
} else if (type === 'soun') {
track.type = 'audio';
} else {
track.type = type;
}
} // codec
var stsd = findBox(trak, ['mdia', 'minf', 'stbl', 'stsd'])[0];
if (stsd) {
var sampleDescriptions = stsd.subarray(8); // gives the codec type string
track.codec = parseType(sampleDescriptions.subarray(4, 8));
var codecBox = findBox(sampleDescriptions, [track.codec])[0];
var codecConfig, codecConfigType;
if (codecBox) {
// https://tools.ietf.org/html/rfc6381#section-3.3
if (/^[asm]vc[1-9]$/i.test(track.codec)) {
// we don't need anything but the "config" parameter of the
// avc1 codecBox
codecConfig = codecBox.subarray(78);
codecConfigType = parseType(codecConfig.subarray(4, 8));
if (codecConfigType === 'avcC' && codecConfig.length > 11) {
track.codec += '.'; // left padded with zeroes for single digit hex
// profile idc
track.codec += toHexString(codecConfig[9]); // the byte containing the constraint_set flags
track.codec += toHexString(codecConfig[10]); // level idc
track.codec += toHexString(codecConfig[11]);
} else {
// TODO: show a warning that we couldn't parse the codec
// and are using the default
track.codec = 'avc1.4d400d';
}
} else if (/^mp4[a,v]$/i.test(track.codec)) {
// we do not need anything but the streamDescriptor of the mp4a codecBox
codecConfig = codecBox.subarray(28);
codecConfigType = parseType(codecConfig.subarray(4, 8));
if (codecConfigType === 'esds' && codecConfig.length > 20 && codecConfig[19] !== 0) {
track.codec += '.' + toHexString(codecConfig[19]); // this value is only a single digit
track.codec += '.' + toHexString(codecConfig[20] >>> 2 & 0x3f).replace(/^0/, '');
} else {
// TODO: show a warning that we couldn't parse the codec
// and are using the default
track.codec = 'mp4a.40.2';
}
} else {
// flac, opus, etc
track.codec = track.codec.toLowerCase();
}
}
}
var mdhd = findBox(trak, ['mdia', 'mdhd'])[0];
if (mdhd) {
track.timescale = getTimescaleFromMediaHeader(mdhd);
}
tracks.push(track);
});
return tracks;
};
/**
* Returns an array of emsg ID3 data from the provided segmentData.
* An offset can also be provided as the Latest Arrival Time to calculate
* the Event Start Time of v0 EMSG boxes.
* See: https://dashif-documents.azurewebsites.net/Events/master/event.html#Inband-event-timing
*
* @param {Uint8Array} segmentData the segment byte array.
* @param {number} offset the segment start time or Latest Arrival Time,
* @return {Object[]} an array of ID3 parsed from EMSG boxes
*/
getEmsgID3 = function getEmsgID3(segmentData, offset) {
if (offset === void 0) {
offset = 0;
}
var emsgBoxes = findBox(segmentData, ['emsg']);
return emsgBoxes.map(function (data) {
var parsedBox = emsg.parseEmsgBox(new Uint8Array(data));
var parsedId3Frames = parseId3Frames(parsedBox.message_data);
return {
cueTime: emsg.scaleTime(parsedBox.presentation_time, parsedBox.timescale, parsedBox.presentation_time_delta, offset),
duration: emsg.scaleTime(parsedBox.event_duration, parsedBox.timescale),
frames: parsedId3Frames
};
});
};
module.exports = {
// export mp4 inspector's findBox and parseType for backwards compatibility
findBox: findBox,
parseType: parseType,
timescale: timescale,
startTime: startTime,
compositionStartTime: compositionStartTime,
videoTrackIds: getVideoTrackIds,
tracks: getTracks,
getTimescaleFromMediaHeader: getTimescaleFromMediaHeader,
getEmsgID3: getEmsgID3
};

View file

@ -0,0 +1,108 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
/**
* Store information about the start and end of the track and the
* duration for each frame/sample we process in order to calculate
* the baseMediaDecodeTime
*/
var collectDtsInfo = function collectDtsInfo(track, data) {
if (typeof data.pts === 'number') {
if (track.timelineStartInfo.pts === undefined) {
track.timelineStartInfo.pts = data.pts;
}
if (track.minSegmentPts === undefined) {
track.minSegmentPts = data.pts;
} else {
track.minSegmentPts = Math.min(track.minSegmentPts, data.pts);
}
if (track.maxSegmentPts === undefined) {
track.maxSegmentPts = data.pts;
} else {
track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts);
}
}
if (typeof data.dts === 'number') {
if (track.timelineStartInfo.dts === undefined) {
track.timelineStartInfo.dts = data.dts;
}
if (track.minSegmentDts === undefined) {
track.minSegmentDts = data.dts;
} else {
track.minSegmentDts = Math.min(track.minSegmentDts, data.dts);
}
if (track.maxSegmentDts === undefined) {
track.maxSegmentDts = data.dts;
} else {
track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts);
}
}
};
/**
* Clear values used to calculate the baseMediaDecodeTime between
* tracks
*/
var clearDtsInfo = function clearDtsInfo(track) {
delete track.minSegmentDts;
delete track.maxSegmentDts;
delete track.minSegmentPts;
delete track.maxSegmentPts;
};
/**
* Calculate the track's baseMediaDecodeTime based on the earliest
* DTS the transmuxer has ever seen and the minimum DTS for the
* current track
* @param track {object} track metadata configuration
* @param keepOriginalTimestamps {boolean} If true, keep the timestamps
* in the source; false to adjust the first segment to start at 0.
*/
var calculateTrackBaseMediaDecodeTime = function calculateTrackBaseMediaDecodeTime(track, keepOriginalTimestamps) {
var baseMediaDecodeTime,
scale,
minSegmentDts = track.minSegmentDts; // Optionally adjust the time so the first segment starts at zero.
if (!keepOriginalTimestamps) {
minSegmentDts -= track.timelineStartInfo.dts;
} // track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where
// we want the start of the first segment to be placed
baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime; // Add to that the distance this segment is from the very first
baseMediaDecodeTime += minSegmentDts; // baseMediaDecodeTime must not become negative
baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime);
if (track.type === 'audio') {
// Audio has a different clock equal to the sampling_rate so we need to
// scale the PTS values into the clock rate of the track
scale = track.samplerate / ONE_SECOND_IN_TS;
baseMediaDecodeTime *= scale;
baseMediaDecodeTime = Math.floor(baseMediaDecodeTime);
}
return baseMediaDecodeTime;
};
module.exports = {
clearDtsInfo: clearDtsInfo,
calculateTrackBaseMediaDecodeTime: calculateTrackBaseMediaDecodeTime,
collectDtsInfo: collectDtsInfo
};

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,141 @@
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var audioFrameUtils = require('../mp4/audio-frame-utils');
var trackInfo = require('../mp4/track-decode-info.js');
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
var AUDIO_PROPERTIES = require('../constants/audio-properties.js');
/**
* Constructs a single-track, ISO BMFF media segment from AAC data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
*/
var AudioSegmentStream = function AudioSegmentStream(track, options) {
var adtsFrames = [],
sequenceNumber = 0,
earliestAllowedDts = 0,
audioAppendStartTs = 0,
videoBaseMediaDecodeTime = Infinity,
segmentStartPts = null,
segmentEndPts = null;
options = options || {};
AudioSegmentStream.prototype.init.call(this);
this.push = function (data) {
trackInfo.collectDtsInfo(track, data);
if (track) {
AUDIO_PROPERTIES.forEach(function (prop) {
track[prop] = data[prop];
});
} // buffer audio data until end() is called
adtsFrames.push(data);
};
this.setEarliestDts = function (earliestDts) {
earliestAllowedDts = earliestDts;
};
this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {
videoBaseMediaDecodeTime = baseMediaDecodeTime;
};
this.setAudioAppendStart = function (timestamp) {
audioAppendStartTs = timestamp;
};
this.processFrames_ = function () {
var frames, moof, mdat, boxes, timingInfo; // return early if no audio data has been observed
if (adtsFrames.length === 0) {
return;
}
frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);
if (frames.length === 0) {
// return early if the frames are all after the earliest allowed DTS
// TODO should we clear the adtsFrames?
return;
}
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to
// samples (that is, adts frames) in the audio data
track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat
mdat = mp4.mdat(audioFrameUtils.concatenateFrameData(frames));
adtsFrames = [];
moof = mp4.moof(sequenceNumber, [track]); // bump the sequence number for next time
sequenceNumber++;
track.initSegment = mp4.initSegment([track]); // it would be great to allocate this array up front instead of
// throwing away hundreds of media segment fragments
boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
trackInfo.clearDtsInfo(track);
if (segmentStartPts === null) {
segmentEndPts = segmentStartPts = frames[0].pts;
}
segmentEndPts += frames.length * (ONE_SECOND_IN_TS * 1024 / track.samplerate);
timingInfo = {
start: segmentStartPts
};
this.trigger('timingInfo', timingInfo);
this.trigger('data', {
track: track,
boxes: boxes
});
};
this.flush = function () {
this.processFrames_(); // trigger final timing info
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
this.resetTiming_();
this.trigger('done', 'AudioSegmentStream');
};
this.partialFlush = function () {
this.processFrames_();
this.trigger('partialdone', 'AudioSegmentStream');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline', 'AudioSegmentStream');
};
this.resetTiming_ = function () {
trackInfo.clearDtsInfo(track);
segmentStartPts = null;
segmentEndPts = null;
};
this.reset = function () {
this.resetTiming_();
adtsFrames = [];
this.trigger('reset');
};
};
AudioSegmentStream.prototype = new Stream();
module.exports = AudioSegmentStream;

View file

@ -0,0 +1,5 @@
"use strict";
module.exports = {
Transmuxer: require('./transmuxer')
};

View file

@ -0,0 +1,324 @@
"use strict";
var Stream = require('../utils/stream.js');
var m2ts = require('../m2ts/m2ts.js');
var codecs = require('../codecs/index.js');
var AudioSegmentStream = require('./audio-segment-stream.js');
var VideoSegmentStream = require('./video-segment-stream.js');
var trackInfo = require('../mp4/track-decode-info.js');
var isLikelyAacData = require('../aac/utils').isLikelyAacData;
var AdtsStream = require('../codecs/adts');
var AacStream = require('../aac/index');
var clock = require('../utils/clock');
var createPipeline = function createPipeline(object) {
object.prototype = new Stream();
object.prototype.init.call(object);
return object;
};
var tsPipeline = function tsPipeline(options) {
var pipeline = {
type: 'ts',
tracks: {
audio: null,
video: null
},
packet: new m2ts.TransportPacketStream(),
parse: new m2ts.TransportParseStream(),
elementary: new m2ts.ElementaryStream(),
timestampRollover: new m2ts.TimestampRolloverStream(),
adts: new codecs.Adts(),
h264: new codecs.h264.H264Stream(),
captionStream: new m2ts.CaptionStream(options),
metadataStream: new m2ts.MetadataStream()
};
pipeline.headOfPipeline = pipeline.packet; // Transport Stream
pipeline.packet.pipe(pipeline.parse).pipe(pipeline.elementary).pipe(pipeline.timestampRollover); // H264
pipeline.timestampRollover.pipe(pipeline.h264); // Hook up CEA-608/708 caption stream
pipeline.h264.pipe(pipeline.captionStream);
pipeline.timestampRollover.pipe(pipeline.metadataStream); // ADTS
pipeline.timestampRollover.pipe(pipeline.adts);
pipeline.elementary.on('data', function (data) {
if (data.type !== 'metadata') {
return;
}
for (var i = 0; i < data.tracks.length; i++) {
if (!pipeline.tracks[data.tracks[i].type]) {
pipeline.tracks[data.tracks[i].type] = data.tracks[i];
pipeline.tracks[data.tracks[i].type].timelineStartInfo.baseMediaDecodeTime = options.baseMediaDecodeTime;
}
}
if (pipeline.tracks.video && !pipeline.videoSegmentStream) {
pipeline.videoSegmentStream = new VideoSegmentStream(pipeline.tracks.video, options);
pipeline.videoSegmentStream.on('timelineStartInfo', function (timelineStartInfo) {
if (pipeline.tracks.audio && !options.keepOriginalTimestamps) {
pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts - options.baseMediaDecodeTime);
}
});
pipeline.videoSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'videoTimingInfo'));
pipeline.videoSegmentStream.on('data', function (data) {
pipeline.trigger('data', {
type: 'video',
data: data
});
});
pipeline.videoSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
pipeline.videoSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.videoSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.h264.pipe(pipeline.videoSegmentStream);
}
if (pipeline.tracks.audio && !pipeline.audioSegmentStream) {
pipeline.audioSegmentStream = new AudioSegmentStream(pipeline.tracks.audio, options);
pipeline.audioSegmentStream.on('data', function (data) {
pipeline.trigger('data', {
type: 'audio',
data: data
});
});
pipeline.audioSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
pipeline.audioSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.audioSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.audioSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'audioTimingInfo'));
pipeline.adts.pipe(pipeline.audioSegmentStream);
} // emit pmt info
pipeline.trigger('trackinfo', {
hasAudio: !!pipeline.tracks.audio,
hasVideo: !!pipeline.tracks.video
});
});
pipeline.captionStream.on('data', function (caption) {
var timelineStartPts;
if (pipeline.tracks.video) {
timelineStartPts = pipeline.tracks.video.timelineStartInfo.pts || 0;
} else {
// This will only happen if we encounter caption packets before
// video data in a segment. This is an unusual/unlikely scenario,
// so we assume the timeline starts at zero for now.
timelineStartPts = 0;
} // Translate caption PTS times into second offsets into the
// video timeline for the segment
caption.startTime = clock.metadataTsToSeconds(caption.startPts, timelineStartPts, options.keepOriginalTimestamps);
caption.endTime = clock.metadataTsToSeconds(caption.endPts, timelineStartPts, options.keepOriginalTimestamps);
pipeline.trigger('caption', caption);
});
pipeline = createPipeline(pipeline);
pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));
return pipeline;
};
var aacPipeline = function aacPipeline(options) {
var pipeline = {
type: 'aac',
tracks: {
audio: null
},
metadataStream: new m2ts.MetadataStream(),
aacStream: new AacStream(),
audioRollover: new m2ts.TimestampRolloverStream('audio'),
timedMetadataRollover: new m2ts.TimestampRolloverStream('timed-metadata'),
adtsStream: new AdtsStream(true)
}; // set up the parsing pipeline
pipeline.headOfPipeline = pipeline.aacStream;
pipeline.aacStream.pipe(pipeline.audioRollover).pipe(pipeline.adtsStream);
pipeline.aacStream.pipe(pipeline.timedMetadataRollover).pipe(pipeline.metadataStream);
pipeline.metadataStream.on('timestamp', function (frame) {
pipeline.aacStream.setTimestamp(frame.timeStamp);
});
pipeline.aacStream.on('data', function (data) {
if (data.type !== 'timed-metadata' && data.type !== 'audio' || pipeline.audioSegmentStream) {
return;
}
pipeline.tracks.audio = pipeline.tracks.audio || {
timelineStartInfo: {
baseMediaDecodeTime: options.baseMediaDecodeTime
},
codec: 'adts',
type: 'audio'
}; // hook up the audio segment stream to the first track with aac data
pipeline.audioSegmentStream = new AudioSegmentStream(pipeline.tracks.audio, options);
pipeline.audioSegmentStream.on('data', function (data) {
pipeline.trigger('data', {
type: 'audio',
data: data
});
});
pipeline.audioSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));
pipeline.audioSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
pipeline.audioSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));
pipeline.audioSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'audioTimingInfo')); // Set up the final part of the audio pipeline
pipeline.adtsStream.pipe(pipeline.audioSegmentStream);
pipeline.trigger('trackinfo', {
hasAudio: !!pipeline.tracks.audio,
hasVideo: !!pipeline.tracks.video
});
}); // set the pipeline up as a stream before binding to get access to the trigger function
pipeline = createPipeline(pipeline);
pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));
return pipeline;
};
var setupPipelineListeners = function setupPipelineListeners(pipeline, transmuxer) {
pipeline.on('data', transmuxer.trigger.bind(transmuxer, 'data'));
pipeline.on('done', transmuxer.trigger.bind(transmuxer, 'done'));
pipeline.on('partialdone', transmuxer.trigger.bind(transmuxer, 'partialdone'));
pipeline.on('endedtimeline', transmuxer.trigger.bind(transmuxer, 'endedtimeline'));
pipeline.on('audioTimingInfo', transmuxer.trigger.bind(transmuxer, 'audioTimingInfo'));
pipeline.on('videoTimingInfo', transmuxer.trigger.bind(transmuxer, 'videoTimingInfo'));
pipeline.on('trackinfo', transmuxer.trigger.bind(transmuxer, 'trackinfo'));
pipeline.on('id3Frame', function (event) {
// add this to every single emitted segment even though it's only needed for the first
event.dispatchType = pipeline.metadataStream.dispatchType; // keep original time, can be adjusted if needed at a higher level
event.cueTime = clock.videoTsToSeconds(event.pts);
transmuxer.trigger('id3Frame', event);
});
pipeline.on('caption', function (event) {
transmuxer.trigger('caption', event);
});
};
var Transmuxer = function Transmuxer(options) {
var pipeline = null,
hasFlushed = true;
options = options || {};
Transmuxer.prototype.init.call(this);
options.baseMediaDecodeTime = options.baseMediaDecodeTime || 0;
this.push = function (bytes) {
if (hasFlushed) {
var isAac = isLikelyAacData(bytes);
if (isAac && (!pipeline || pipeline.type !== 'aac')) {
pipeline = aacPipeline(options);
setupPipelineListeners(pipeline, this);
} else if (!isAac && (!pipeline || pipeline.type !== 'ts')) {
pipeline = tsPipeline(options);
setupPipelineListeners(pipeline, this);
}
hasFlushed = false;
}
pipeline.headOfPipeline.push(bytes);
};
this.flush = function () {
if (!pipeline) {
return;
}
hasFlushed = true;
pipeline.headOfPipeline.flush();
};
this.partialFlush = function () {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.partialFlush();
};
this.endTimeline = function () {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.endTimeline();
};
this.reset = function () {
if (!pipeline) {
return;
}
pipeline.headOfPipeline.reset();
};
this.setBaseMediaDecodeTime = function (baseMediaDecodeTime) {
if (!options.keepOriginalTimestamps) {
options.baseMediaDecodeTime = baseMediaDecodeTime;
}
if (!pipeline) {
return;
}
if (pipeline.tracks.audio) {
pipeline.tracks.audio.timelineStartInfo.dts = undefined;
pipeline.tracks.audio.timelineStartInfo.pts = undefined;
trackInfo.clearDtsInfo(pipeline.tracks.audio);
if (pipeline.audioRollover) {
pipeline.audioRollover.discontinuity();
}
}
if (pipeline.tracks.video) {
if (pipeline.videoSegmentStream) {
pipeline.videoSegmentStream.gopCache_ = [];
}
pipeline.tracks.video.timelineStartInfo.dts = undefined;
pipeline.tracks.video.timelineStartInfo.pts = undefined;
trackInfo.clearDtsInfo(pipeline.tracks.video); // pipeline.captionStream.reset();
}
if (pipeline.timestampRollover) {
pipeline.timestampRollover.discontinuity();
}
};
this.setRemux = function (val) {
options.remux = val;
if (pipeline && pipeline.coalesceStream) {
pipeline.coalesceStream.setRemux(val);
}
};
this.setAudioAppendStart = function (audioAppendStart) {
if (!pipeline || !pipeline.tracks.audio || !pipeline.audioSegmentStream) {
return;
}
pipeline.audioSegmentStream.setAudioAppendStart(audioAppendStart);
}; // TODO GOP alignment support
// Support may be a bit trickier than with full segment appends, as GOPs may be split
// and processed in a more granular fashion
this.alignGopsWith = function (gopsToAlignWith) {
return;
};
};
Transmuxer.prototype = new Stream();
module.exports = Transmuxer;

View file

@ -0,0 +1,195 @@
/**
* Constructs a single-track, ISO BMFF media segment from H264 data
* events. The output of this stream can be fed to a SourceBuffer
* configured with a suitable initialization segment.
* @param track {object} track metadata configuration
* @param options {object} transmuxer options object
* @param options.alignGopsAtEnd {boolean} If true, start from the end of the
* gopsToAlignWith list when attempting to align gop pts
*/
'use strict';
var Stream = require('../utils/stream.js');
var mp4 = require('../mp4/mp4-generator.js');
var trackInfo = require('../mp4/track-decode-info.js');
var frameUtils = require('../mp4/frame-utils');
var VIDEO_PROPERTIES = require('../constants/video-properties.js');
var VideoSegmentStream = function VideoSegmentStream(track, options) {
var sequenceNumber = 0,
nalUnits = [],
frameCache = [],
// gopsToAlignWith = [],
config,
pps,
segmentStartPts = null,
segmentEndPts = null,
gops,
ensureNextFrameIsKeyFrame = true;
options = options || {};
VideoSegmentStream.prototype.init.call(this);
this.push = function (nalUnit) {
trackInfo.collectDtsInfo(track, nalUnit);
if (typeof track.timelineStartInfo.dts === 'undefined') {
track.timelineStartInfo.dts = nalUnit.dts;
} // record the track config
if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {
config = nalUnit.config;
track.sps = [nalUnit.data];
VIDEO_PROPERTIES.forEach(function (prop) {
track[prop] = config[prop];
}, this);
}
if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' && !pps) {
pps = nalUnit.data;
track.pps = [nalUnit.data];
} // buffer video until flush() is called
nalUnits.push(nalUnit);
};
this.processNals_ = function (cacheLastFrame) {
var i;
nalUnits = frameCache.concat(nalUnits); // Throw away nalUnits at the start of the byte stream until
// we find the first AUD
while (nalUnits.length) {
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
break;
}
nalUnits.shift();
} // Return early if no video data has been observed
if (nalUnits.length === 0) {
return;
}
var frames = frameUtils.groupNalsIntoFrames(nalUnits);
if (!frames.length) {
return;
} // note that the frame cache may also protect us from cases where we haven't
// pushed data for the entire first or last frame yet
frameCache = frames[frames.length - 1];
if (cacheLastFrame) {
frames.pop();
frames.duration -= frameCache.duration;
frames.nalCount -= frameCache.length;
frames.byteLength -= frameCache.byteLength;
}
if (!frames.length) {
nalUnits = [];
return;
}
this.trigger('timelineStartInfo', track.timelineStartInfo);
if (ensureNextFrameIsKeyFrame) {
gops = frameUtils.groupFramesIntoGops(frames);
if (!gops[0][0].keyFrame) {
gops = frameUtils.extendFirstKeyFrame(gops);
if (!gops[0][0].keyFrame) {
// we haven't yet gotten a key frame, so reset nal units to wait for more nal
// units
nalUnits = [].concat.apply([], frames).concat(frameCache);
frameCache = [];
return;
}
frames = [].concat.apply([], gops);
frames.duration = gops.duration;
}
ensureNextFrameIsKeyFrame = false;
}
if (segmentStartPts === null) {
segmentStartPts = frames[0].pts;
segmentEndPts = segmentStartPts;
}
segmentEndPts += frames.duration;
this.trigger('timingInfo', {
start: segmentStartPts,
end: segmentEndPts
});
for (i = 0; i < frames.length; i++) {
var frame = frames[i];
track.samples = frameUtils.generateSampleTableForFrame(frame);
var mdat = mp4.mdat(frameUtils.concatenateNalDataForFrame(frame));
trackInfo.clearDtsInfo(track);
trackInfo.collectDtsInfo(track, frame);
track.baseMediaDecodeTime = trackInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
var moof = mp4.moof(sequenceNumber, [track]);
sequenceNumber++;
track.initSegment = mp4.initSegment([track]);
var boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
boxes.set(moof);
boxes.set(mdat, moof.byteLength);
this.trigger('data', {
track: track,
boxes: boxes,
sequence: sequenceNumber,
videoFrameDts: frame.dts,
videoFramePts: frame.pts
});
}
nalUnits = [];
};
this.resetTimingAndConfig_ = function () {
config = undefined;
pps = undefined;
segmentStartPts = null;
segmentEndPts = null;
};
this.partialFlush = function () {
this.processNals_(true);
this.trigger('partialdone', 'VideoSegmentStream');
};
this.flush = function () {
this.processNals_(false); // reset config and pps because they may differ across segments
// for instance, when we are rendition switching
this.resetTimingAndConfig_();
this.trigger('done', 'VideoSegmentStream');
};
this.endTimeline = function () {
this.flush();
this.trigger('endedtimeline', 'VideoSegmentStream');
};
this.reset = function () {
this.resetTimingAndConfig_();
frameCache = [];
nalUnits = [];
ensureNextFrameIsKeyFrame = true;
this.trigger('reset');
};
};
VideoSegmentStream.prototype = new Stream();
module.exports = VideoSegmentStream;

View file

@ -0,0 +1,189 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Reads in-band caption information from a video elementary
* stream. Captions must follow the CEA-708 standard for injection
* into an MPEG-2 transport streams.
* @see https://en.wikipedia.org/wiki/CEA-708
* @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf
*/
'use strict'; // Supplemental enhancement information (SEI) NAL units have a
// payload type field to indicate how they are to be
// interpreted. CEAS-708 caption content is always transmitted with
// payload type 0x04.
var USER_DATA_REGISTERED_ITU_T_T35 = 4,
RBSP_TRAILING_BITS = 128;
/**
* Parse a supplemental enhancement information (SEI) NAL unit.
* Stops parsing once a message of type ITU T T35 has been found.
*
* @param bytes {Uint8Array} the bytes of a SEI NAL unit
* @return {object} the parsed SEI payload
* @see Rec. ITU-T H.264, 7.3.2.3.1
*/
var parseSei = function parseSei(bytes) {
var i = 0,
result = {
payloadType: -1,
payloadSize: 0
},
payloadType = 0,
payloadSize = 0; // go through the sei_rbsp parsing each each individual sei_message
while (i < bytes.byteLength) {
// stop once we have hit the end of the sei_rbsp
if (bytes[i] === RBSP_TRAILING_BITS) {
break;
} // Parse payload type
while (bytes[i] === 0xFF) {
payloadType += 255;
i++;
}
payloadType += bytes[i++]; // Parse payload size
while (bytes[i] === 0xFF) {
payloadSize += 255;
i++;
}
payloadSize += bytes[i++]; // this sei_message is a 608/708 caption so save it and break
// there can only ever be one caption message in a frame's sei
if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) {
var userIdentifier = String.fromCharCode(bytes[i + 3], bytes[i + 4], bytes[i + 5], bytes[i + 6]);
if (userIdentifier === 'GA94') {
result.payloadType = payloadType;
result.payloadSize = payloadSize;
result.payload = bytes.subarray(i, i + payloadSize);
break;
} else {
result.payload = void 0;
}
} // skip the payload and parse the next message
i += payloadSize;
payloadType = 0;
payloadSize = 0;
}
return result;
}; // see ANSI/SCTE 128-1 (2013), section 8.1
var parseUserData = function parseUserData(sei) {
// itu_t_t35_contry_code must be 181 (United States) for
// captions
if (sei.payload[0] !== 181) {
return null;
} // itu_t_t35_provider_code should be 49 (ATSC) for captions
if ((sei.payload[1] << 8 | sei.payload[2]) !== 49) {
return null;
} // the user_identifier should be "GA94" to indicate ATSC1 data
if (String.fromCharCode(sei.payload[3], sei.payload[4], sei.payload[5], sei.payload[6]) !== 'GA94') {
return null;
} // finally, user_data_type_code should be 0x03 for caption data
if (sei.payload[7] !== 0x03) {
return null;
} // return the user_data_type_structure and strip the trailing
// marker bits
return sei.payload.subarray(8, sei.payload.length - 1);
}; // see CEA-708-D, section 4.4
var parseCaptionPackets = function parseCaptionPackets(pts, userData) {
var results = [],
i,
count,
offset,
data; // if this is just filler, return immediately
if (!(userData[0] & 0x40)) {
return results;
} // parse out the cc_data_1 and cc_data_2 fields
count = userData[0] & 0x1f;
for (i = 0; i < count; i++) {
offset = i * 3;
data = {
type: userData[offset + 2] & 0x03,
pts: pts
}; // capture cc data when cc_valid is 1
if (userData[offset + 2] & 0x04) {
data.ccData = userData[offset + 3] << 8 | userData[offset + 4];
results.push(data);
}
}
return results;
};
var discardEmulationPreventionBytes = function discardEmulationPreventionBytes(data) {
var length = data.byteLength,
emulationPreventionBytesPositions = [],
i = 1,
newLength,
newData; // Find all `Emulation Prevention Bytes`
while (i < length - 2) {
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {
emulationPreventionBytesPositions.push(i + 2);
i += 2;
} else {
i++;
}
} // If no Emulation Prevention Bytes were found just return the original
// array
if (emulationPreventionBytesPositions.length === 0) {
return data;
} // Create a new array to hold the NAL unit data
newLength = length - emulationPreventionBytesPositions.length;
newData = new Uint8Array(newLength);
var sourceIndex = 0;
for (i = 0; i < newLength; sourceIndex++, i++) {
if (sourceIndex === emulationPreventionBytesPositions[0]) {
// Skip this byte
sourceIndex++; // Remove this position index
emulationPreventionBytesPositions.shift();
}
newData[i] = data[sourceIndex];
}
return newData;
}; // exports
module.exports = {
parseSei: parseSei,
parseUserData: parseUserData,
parseCaptionPackets: parseCaptionPackets,
discardEmulationPreventionBytes: discardEmulationPreventionBytes,
USER_DATA_REGISTERED_ITU_T_T35: USER_DATA_REGISTERED_ITU_T_T35
};

View file

@ -0,0 +1,134 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var tagTypes = {
0x08: 'audio',
0x09: 'video',
0x12: 'metadata'
},
hex = function hex(val) {
return '0x' + ('00' + val.toString(16)).slice(-2).toUpperCase();
},
hexStringList = function hexStringList(data) {
var arr = [],
i;
while (data.byteLength > 0) {
i = 0;
arr.push(hex(data[i++]));
data = data.subarray(i);
}
return arr.join(' ');
},
parseAVCTag = function parseAVCTag(tag, obj) {
var avcPacketTypes = ['AVC Sequence Header', 'AVC NALU', 'AVC End-of-Sequence'],
compositionTime = tag[1] & parseInt('01111111', 2) << 16 | tag[2] << 8 | tag[3];
obj = obj || {};
obj.avcPacketType = avcPacketTypes[tag[0]];
obj.CompositionTime = tag[1] & parseInt('10000000', 2) ? -compositionTime : compositionTime;
if (tag[0] === 1) {
obj.nalUnitTypeRaw = hexStringList(tag.subarray(4, 100));
} else {
obj.data = hexStringList(tag.subarray(4));
}
return obj;
},
parseVideoTag = function parseVideoTag(tag, obj) {
var frameTypes = ['Unknown', 'Keyframe (for AVC, a seekable frame)', 'Inter frame (for AVC, a nonseekable frame)', 'Disposable inter frame (H.263 only)', 'Generated keyframe (reserved for server use only)', 'Video info/command frame'],
codecID = tag[0] & parseInt('00001111', 2);
obj = obj || {};
obj.frameType = frameTypes[(tag[0] & parseInt('11110000', 2)) >>> 4];
obj.codecID = codecID;
if (codecID === 7) {
return parseAVCTag(tag.subarray(1), obj);
}
return obj;
},
parseAACTag = function parseAACTag(tag, obj) {
var packetTypes = ['AAC Sequence Header', 'AAC Raw'];
obj = obj || {};
obj.aacPacketType = packetTypes[tag[0]];
obj.data = hexStringList(tag.subarray(1));
return obj;
},
parseAudioTag = function parseAudioTag(tag, obj) {
var formatTable = ['Linear PCM, platform endian', 'ADPCM', 'MP3', 'Linear PCM, little endian', 'Nellymoser 16-kHz mono', 'Nellymoser 8-kHz mono', 'Nellymoser', 'G.711 A-law logarithmic PCM', 'G.711 mu-law logarithmic PCM', 'reserved', 'AAC', 'Speex', 'MP3 8-Khz', 'Device-specific sound'],
samplingRateTable = ['5.5-kHz', '11-kHz', '22-kHz', '44-kHz'],
soundFormat = (tag[0] & parseInt('11110000', 2)) >>> 4;
obj = obj || {};
obj.soundFormat = formatTable[soundFormat];
obj.soundRate = samplingRateTable[(tag[0] & parseInt('00001100', 2)) >>> 2];
obj.soundSize = (tag[0] & parseInt('00000010', 2)) >>> 1 ? '16-bit' : '8-bit';
obj.soundType = tag[0] & parseInt('00000001', 2) ? 'Stereo' : 'Mono';
if (soundFormat === 10) {
return parseAACTag(tag.subarray(1), obj);
}
return obj;
},
parseGenericTag = function parseGenericTag(tag) {
return {
tagType: tagTypes[tag[0]],
dataSize: tag[1] << 16 | tag[2] << 8 | tag[3],
timestamp: tag[7] << 24 | tag[4] << 16 | tag[5] << 8 | tag[6],
streamID: tag[8] << 16 | tag[9] << 8 | tag[10]
};
},
inspectFlvTag = function inspectFlvTag(tag) {
var header = parseGenericTag(tag);
switch (tag[0]) {
case 0x08:
parseAudioTag(tag.subarray(11), header);
break;
case 0x09:
parseVideoTag(tag.subarray(11), header);
break;
case 0x12:
}
return header;
},
inspectFlv = function inspectFlv(bytes) {
var i = 9,
// header
dataSize,
parsedResults = [],
tag; // traverse the tags
i += 4; // skip previous tag size
while (i < bytes.byteLength) {
dataSize = bytes[i + 1] << 16;
dataSize |= bytes[i + 2] << 8;
dataSize |= bytes[i + 3];
dataSize += 11;
tag = bytes.subarray(i, i + dataSize);
parsedResults.push(inspectFlvTag(tag));
i += dataSize + 4;
}
return parsedResults;
},
textifyFlv = function textifyFlv(flvTagArray) {
return JSON.stringify(flvTagArray, null, 2);
};
module.exports = {
inspectTag: inspectFlvTag,
inspect: inspectFlv,
textify: textifyFlv
};

View file

@ -0,0 +1,756 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Parse the internal MP4 structure into an equivalent javascript
* object.
*/
'use strict';
var numberHelpers = require('../utils/numbers.js');
var MAX_UINT32 = numberHelpers.MAX_UINT32;
var getUint64 = numberHelpers.getUint64;
var inspectMp4,
_textifyMp,
parseMp4Date = function parseMp4Date(seconds) {
return new Date(seconds * 1000 - 2082844800000);
},
parseType = require('../mp4/parse-type'),
findBox = require('../mp4/find-box'),
nalParse = function nalParse(avcStream) {
var avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),
result = [],
i,
length;
for (i = 0; i + 4 < avcStream.length; i += length) {
length = avcView.getUint32(i);
i += 4; // bail if this doesn't appear to be an H264 stream
if (length <= 0) {
result.push('<span style=\'color:red;\'>MALFORMED DATA</span>');
continue;
}
switch (avcStream[i] & 0x1F) {
case 0x01:
result.push('slice_layer_without_partitioning_rbsp');
break;
case 0x05:
result.push('slice_layer_without_partitioning_rbsp_idr');
break;
case 0x06:
result.push('sei_rbsp');
break;
case 0x07:
result.push('seq_parameter_set_rbsp');
break;
case 0x08:
result.push('pic_parameter_set_rbsp');
break;
case 0x09:
result.push('access_unit_delimiter_rbsp');
break;
default:
result.push('UNKNOWN NAL - ' + avcStream[i] & 0x1F);
break;
}
}
return result;
},
// registry of handlers for individual mp4 box types
parse = {
// codingname, not a first-class box type. stsd entries share the
// same format as real boxes so the parsing infrastructure can be
// shared
avc1: function avc1(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength);
return {
dataReferenceIndex: view.getUint16(6),
width: view.getUint16(24),
height: view.getUint16(26),
horizresolution: view.getUint16(28) + view.getUint16(30) / 16,
vertresolution: view.getUint16(32) + view.getUint16(34) / 16,
frameCount: view.getUint16(40),
depth: view.getUint16(74),
config: inspectMp4(data.subarray(78, data.byteLength))
};
},
avcC: function avcC(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
configurationVersion: data[0],
avcProfileIndication: data[1],
profileCompatibility: data[2],
avcLevelIndication: data[3],
lengthSizeMinusOne: data[4] & 0x03,
sps: [],
pps: []
},
numOfSequenceParameterSets = data[5] & 0x1f,
numOfPictureParameterSets,
nalSize,
offset,
i; // iterate past any SPSs
offset = 6;
for (i = 0; i < numOfSequenceParameterSets; i++) {
nalSize = view.getUint16(offset);
offset += 2;
result.sps.push(new Uint8Array(data.subarray(offset, offset + nalSize)));
offset += nalSize;
} // iterate past any PPSs
numOfPictureParameterSets = data[offset];
offset++;
for (i = 0; i < numOfPictureParameterSets; i++) {
nalSize = view.getUint16(offset);
offset += 2;
result.pps.push(new Uint8Array(data.subarray(offset, offset + nalSize)));
offset += nalSize;
}
return result;
},
btrt: function btrt(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength);
return {
bufferSizeDB: view.getUint32(0),
maxBitrate: view.getUint32(4),
avgBitrate: view.getUint32(8)
};
},
edts: function edts(data) {
return {
boxes: inspectMp4(data)
};
},
elst: function elst(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: view.getUint8(0),
flags: new Uint8Array(data.subarray(1, 4)),
edits: []
},
entryCount = view.getUint32(4),
i;
for (i = 8; entryCount; entryCount--) {
if (result.version === 0) {
result.edits.push({
segmentDuration: view.getUint32(i),
mediaTime: view.getInt32(i + 4),
mediaRate: view.getUint16(i + 8) + view.getUint16(i + 10) / (256 * 256)
});
i += 12;
} else {
result.edits.push({
segmentDuration: getUint64(data.subarray(i)),
mediaTime: getUint64(data.subarray(i + 8)),
mediaRate: view.getUint16(i + 16) + view.getUint16(i + 18) / (256 * 256)
});
i += 20;
}
}
return result;
},
esds: function esds(data) {
return {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
esId: data[6] << 8 | data[7],
streamPriority: data[8] & 0x1f,
decoderConfig: {
objectProfileIndication: data[11],
streamType: data[12] >>> 2 & 0x3f,
bufferSize: data[13] << 16 | data[14] << 8 | data[15],
maxBitrate: data[16] << 24 | data[17] << 16 | data[18] << 8 | data[19],
avgBitrate: data[20] << 24 | data[21] << 16 | data[22] << 8 | data[23],
decoderConfigDescriptor: {
tag: data[24],
length: data[25],
audioObjectType: data[26] >>> 3 & 0x1f,
samplingFrequencyIndex: (data[26] & 0x07) << 1 | data[27] >>> 7 & 0x01,
channelConfiguration: data[27] >>> 3 & 0x0f
}
}
};
},
ftyp: function ftyp(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
majorBrand: parseType(data.subarray(0, 4)),
minorVersion: view.getUint32(4),
compatibleBrands: []
},
i = 8;
while (i < data.byteLength) {
result.compatibleBrands.push(parseType(data.subarray(i, i + 4)));
i += 4;
}
return result;
},
dinf: function dinf(data) {
return {
boxes: inspectMp4(data)
};
},
dref: function dref(data) {
return {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
dataReferences: inspectMp4(data.subarray(8))
};
},
hdlr: function hdlr(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: view.getUint8(0),
flags: new Uint8Array(data.subarray(1, 4)),
handlerType: parseType(data.subarray(8, 12)),
name: ''
},
i = 8; // parse out the name field
for (i = 24; i < data.byteLength; i++) {
if (data[i] === 0x00) {
// the name field is null-terminated
i++;
break;
}
result.name += String.fromCharCode(data[i]);
} // decode UTF-8 to javascript's internal representation
// see http://ecmanaut.blogspot.com/2006/07/encoding-decoding-utf8-in-javascript.html
result.name = decodeURIComponent(escape(result.name));
return result;
},
mdat: function mdat(data) {
return {
byteLength: data.byteLength,
nals: nalParse(data)
};
},
mdhd: function mdhd(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
i = 4,
language,
result = {
version: view.getUint8(0),
flags: new Uint8Array(data.subarray(1, 4)),
language: ''
};
if (result.version === 1) {
i += 4;
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
i += 8;
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
i += 4;
result.timescale = view.getUint32(i);
i += 8;
result.duration = view.getUint32(i); // truncating top 4 bytes
} else {
result.creationTime = parseMp4Date(view.getUint32(i));
i += 4;
result.modificationTime = parseMp4Date(view.getUint32(i));
i += 4;
result.timescale = view.getUint32(i);
i += 4;
result.duration = view.getUint32(i);
}
i += 4; // language is stored as an ISO-639-2/T code in an array of three 5-bit fields
// each field is the packed difference between its ASCII value and 0x60
language = view.getUint16(i);
result.language += String.fromCharCode((language >> 10) + 0x60);
result.language += String.fromCharCode(((language & 0x03e0) >> 5) + 0x60);
result.language += String.fromCharCode((language & 0x1f) + 0x60);
return result;
},
mdia: function mdia(data) {
return {
boxes: inspectMp4(data)
};
},
mfhd: function mfhd(data) {
return {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
sequenceNumber: data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]
};
},
minf: function minf(data) {
return {
boxes: inspectMp4(data)
};
},
// codingname, not a first-class box type. stsd entries share the
// same format as real boxes so the parsing infrastructure can be
// shared
mp4a: function mp4a(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
// 6 bytes reserved
dataReferenceIndex: view.getUint16(6),
// 4 + 4 bytes reserved
channelcount: view.getUint16(16),
samplesize: view.getUint16(18),
// 2 bytes pre_defined
// 2 bytes reserved
samplerate: view.getUint16(24) + view.getUint16(26) / 65536
}; // if there are more bytes to process, assume this is an ISO/IEC
// 14496-14 MP4AudioSampleEntry and parse the ESDBox
if (data.byteLength > 28) {
result.streamDescriptor = inspectMp4(data.subarray(28))[0];
}
return result;
},
moof: function moof(data) {
return {
boxes: inspectMp4(data)
};
},
moov: function moov(data) {
return {
boxes: inspectMp4(data)
};
},
mvex: function mvex(data) {
return {
boxes: inspectMp4(data)
};
},
mvhd: function mvhd(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
i = 4,
result = {
version: view.getUint8(0),
flags: new Uint8Array(data.subarray(1, 4))
};
if (result.version === 1) {
i += 4;
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
i += 8;
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
i += 4;
result.timescale = view.getUint32(i);
i += 8;
result.duration = view.getUint32(i); // truncating top 4 bytes
} else {
result.creationTime = parseMp4Date(view.getUint32(i));
i += 4;
result.modificationTime = parseMp4Date(view.getUint32(i));
i += 4;
result.timescale = view.getUint32(i);
i += 4;
result.duration = view.getUint32(i);
}
i += 4; // convert fixed-point, base 16 back to a number
result.rate = view.getUint16(i) + view.getUint16(i + 2) / 16;
i += 4;
result.volume = view.getUint8(i) + view.getUint8(i + 1) / 8;
i += 2;
i += 2;
i += 2 * 4;
result.matrix = new Uint32Array(data.subarray(i, i + 9 * 4));
i += 9 * 4;
i += 6 * 4;
result.nextTrackId = view.getUint32(i);
return result;
},
pdin: function pdin(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength);
return {
version: view.getUint8(0),
flags: new Uint8Array(data.subarray(1, 4)),
rate: view.getUint32(4),
initialDelay: view.getUint32(8)
};
},
sdtp: function sdtp(data) {
var result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
samples: []
},
i;
for (i = 4; i < data.byteLength; i++) {
result.samples.push({
dependsOn: (data[i] & 0x30) >> 4,
isDependedOn: (data[i] & 0x0c) >> 2,
hasRedundancy: data[i] & 0x03
});
}
return result;
},
sidx: require('./parse-sidx.js'),
smhd: function smhd(data) {
return {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
balance: data[4] + data[5] / 256
};
},
stbl: function stbl(data) {
return {
boxes: inspectMp4(data)
};
},
ctts: function ctts(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: view.getUint8(0),
flags: new Uint8Array(data.subarray(1, 4)),
compositionOffsets: []
},
entryCount = view.getUint32(4),
i;
for (i = 8; entryCount; i += 8, entryCount--) {
result.compositionOffsets.push({
sampleCount: view.getUint32(i),
sampleOffset: view[result.version === 0 ? 'getUint32' : 'getInt32'](i + 4)
});
}
return result;
},
stss: function stss(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: view.getUint8(0),
flags: new Uint8Array(data.subarray(1, 4)),
syncSamples: []
},
entryCount = view.getUint32(4),
i;
for (i = 8; entryCount; i += 4, entryCount--) {
result.syncSamples.push(view.getUint32(i));
}
return result;
},
stco: function stco(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
chunkOffsets: []
},
entryCount = view.getUint32(4),
i;
for (i = 8; entryCount; i += 4, entryCount--) {
result.chunkOffsets.push(view.getUint32(i));
}
return result;
},
stsc: function stsc(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
entryCount = view.getUint32(4),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
sampleToChunks: []
},
i;
for (i = 8; entryCount; i += 12, entryCount--) {
result.sampleToChunks.push({
firstChunk: view.getUint32(i),
samplesPerChunk: view.getUint32(i + 4),
sampleDescriptionIndex: view.getUint32(i + 8)
});
}
return result;
},
stsd: function stsd(data) {
return {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
sampleDescriptions: inspectMp4(data.subarray(8))
};
},
stsz: function stsz(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
sampleSize: view.getUint32(4),
entries: []
},
i;
for (i = 12; i < data.byteLength; i += 4) {
result.entries.push(view.getUint32(i));
}
return result;
},
stts: function stts(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
timeToSamples: []
},
entryCount = view.getUint32(4),
i;
for (i = 8; entryCount; i += 8, entryCount--) {
result.timeToSamples.push({
sampleCount: view.getUint32(i),
sampleDelta: view.getUint32(i + 4)
});
}
return result;
},
styp: function styp(data) {
return parse.ftyp(data);
},
tfdt: require('./parse-tfdt.js'),
tfhd: require('./parse-tfhd.js'),
tkhd: function tkhd(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
i = 4,
result = {
version: view.getUint8(0),
flags: new Uint8Array(data.subarray(1, 4))
};
if (result.version === 1) {
i += 4;
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
i += 8;
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
i += 4;
result.trackId = view.getUint32(i);
i += 4;
i += 8;
result.duration = view.getUint32(i); // truncating top 4 bytes
} else {
result.creationTime = parseMp4Date(view.getUint32(i));
i += 4;
result.modificationTime = parseMp4Date(view.getUint32(i));
i += 4;
result.trackId = view.getUint32(i);
i += 4;
i += 4;
result.duration = view.getUint32(i);
}
i += 4;
i += 2 * 4;
result.layer = view.getUint16(i);
i += 2;
result.alternateGroup = view.getUint16(i);
i += 2; // convert fixed-point, base 16 back to a number
result.volume = view.getUint8(i) + view.getUint8(i + 1) / 8;
i += 2;
i += 2;
result.matrix = new Uint32Array(data.subarray(i, i + 9 * 4));
i += 9 * 4;
result.width = view.getUint16(i) + view.getUint16(i + 2) / 65536;
i += 4;
result.height = view.getUint16(i) + view.getUint16(i + 2) / 65536;
return result;
},
traf: function traf(data) {
return {
boxes: inspectMp4(data)
};
},
trak: function trak(data) {
return {
boxes: inspectMp4(data)
};
},
trex: function trex(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength);
return {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
trackId: view.getUint32(4),
defaultSampleDescriptionIndex: view.getUint32(8),
defaultSampleDuration: view.getUint32(12),
defaultSampleSize: view.getUint32(16),
sampleDependsOn: data[20] & 0x03,
sampleIsDependedOn: (data[21] & 0xc0) >> 6,
sampleHasRedundancy: (data[21] & 0x30) >> 4,
samplePaddingValue: (data[21] & 0x0e) >> 1,
sampleIsDifferenceSample: !!(data[21] & 0x01),
sampleDegradationPriority: view.getUint16(22)
};
},
trun: require('./parse-trun.js'),
'url ': function url(data) {
return {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4))
};
},
vmhd: function vmhd(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength);
return {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
graphicsmode: view.getUint16(4),
opcolor: new Uint16Array([view.getUint16(6), view.getUint16(8), view.getUint16(10)])
};
}
};
/**
* Return a javascript array of box objects parsed from an ISO base
* media file.
* @param data {Uint8Array} the binary data of the media to be inspected
* @return {array} a javascript array of potentially nested box objects
*/
inspectMp4 = function inspectMp4(data) {
var i = 0,
result = [],
view,
size,
type,
end,
box; // Convert data from Uint8Array to ArrayBuffer, to follow Dataview API
var ab = new ArrayBuffer(data.length);
var v = new Uint8Array(ab);
for (var z = 0; z < data.length; ++z) {
v[z] = data[z];
}
view = new DataView(ab);
while (i < data.byteLength) {
// parse box data
size = view.getUint32(i);
type = parseType(data.subarray(i + 4, i + 8));
end = size > 1 ? i + size : data.byteLength; // parse type-specific data
box = (parse[type] || function (data) {
return {
data: data
};
})(data.subarray(i + 8, end));
box.size = size;
box.type = type; // store this box and move to the next
result.push(box);
i = end;
}
return result;
};
/**
* Returns a textual representation of the javascript represtentation
* of an MP4 file. You can use it as an alternative to
* JSON.stringify() to compare inspected MP4s.
* @param inspectedMp4 {array} the parsed array of boxes in an MP4
* file
* @param depth {number} (optional) the number of ancestor boxes of
* the elements of inspectedMp4. Assumed to be zero if unspecified.
* @return {string} a text representation of the parsed MP4
*/
_textifyMp = function textifyMp4(inspectedMp4, depth) {
var indent;
depth = depth || 0;
indent = new Array(depth * 2 + 1).join(' '); // iterate over all the boxes
return inspectedMp4.map(function (box, index) {
// list the box type first at the current indentation level
return indent + box.type + '\n' + // the type is already included and handle child boxes separately
Object.keys(box).filter(function (key) {
return key !== 'type' && key !== 'boxes'; // output all the box properties
}).map(function (key) {
var prefix = indent + ' ' + key + ': ',
value = box[key]; // print out raw bytes as hexademical
if (value instanceof Uint8Array || value instanceof Uint32Array) {
var bytes = Array.prototype.slice.call(new Uint8Array(value.buffer, value.byteOffset, value.byteLength)).map(function (byte) {
return ' ' + ('00' + byte.toString(16)).slice(-2);
}).join('').match(/.{1,24}/g);
if (!bytes) {
return prefix + '<>';
}
if (bytes.length === 1) {
return prefix + '<' + bytes.join('').slice(1) + '>';
}
return prefix + '<\n' + bytes.map(function (line) {
return indent + ' ' + line;
}).join('\n') + '\n' + indent + ' >';
} // stringify generic objects
return prefix + JSON.stringify(value, null, 2).split('\n').map(function (line, index) {
if (index === 0) {
return line;
}
return indent + ' ' + line;
}).join('\n');
}).join('\n') + ( // recursively textify the child boxes
box.boxes ? '\n' + _textifyMp(box.boxes, depth + 1) : '');
}).join('\n');
};
module.exports = {
inspect: inspectMp4,
textify: _textifyMp,
parseType: parseType,
findBox: findBox,
parseTraf: parse.traf,
parseTfdt: parse.tfdt,
parseHdlr: parse.hdlr,
parseTfhd: parse.tfhd,
parseTrun: parse.trun,
parseSidx: parse.sidx
};

View file

@ -0,0 +1,243 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Tools for parsing ID3 frame data
* @see http://id3.org/id3v2.3.0
*/
'use strict';
var typedArrayIndexOf = require('../utils/typed-array').typedArrayIndexOf,
// Frames that allow different types of text encoding contain a text
// encoding description byte [ID3v2.4.0 section 4.]
textEncodingDescriptionByte = {
Iso88591: 0x00,
// ISO-8859-1, terminated with \0.
Utf16: 0x01,
// UTF-16 encoded Unicode BOM, terminated with \0\0
Utf16be: 0x02,
// UTF-16BE encoded Unicode, without BOM, terminated with \0\0
Utf8: 0x03 // UTF-8 encoded Unicode, terminated with \0
},
// return a percent-encoded representation of the specified byte range
// @see http://en.wikipedia.org/wiki/Percent-encoding
percentEncode = function percentEncode(bytes, start, end) {
var i,
result = '';
for (i = start; i < end; i++) {
result += '%' + ('00' + bytes[i].toString(16)).slice(-2);
}
return result;
},
// return the string representation of the specified byte range,
// interpreted as UTf-8.
parseUtf8 = function parseUtf8(bytes, start, end) {
return decodeURIComponent(percentEncode(bytes, start, end));
},
// return the string representation of the specified byte range,
// interpreted as ISO-8859-1.
parseIso88591 = function parseIso88591(bytes, start, end) {
return unescape(percentEncode(bytes, start, end)); // jshint ignore:line
},
parseSyncSafeInteger = function parseSyncSafeInteger(data) {
return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3];
},
frameParsers = {
'APIC': function APIC(frame) {
var i = 1,
mimeTypeEndIndex,
descriptionEndIndex,
LINK_MIME_TYPE = '-->';
if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {
// ignore frames with unrecognized character encodings
return;
} // parsing fields [ID3v2.4.0 section 4.14.]
mimeTypeEndIndex = typedArrayIndexOf(frame.data, 0, i);
if (mimeTypeEndIndex < 0) {
// malformed frame
return;
} // parsing Mime type field (terminated with \0)
frame.mimeType = parseIso88591(frame.data, i, mimeTypeEndIndex);
i = mimeTypeEndIndex + 1; // parsing 1-byte Picture Type field
frame.pictureType = frame.data[i];
i++;
descriptionEndIndex = typedArrayIndexOf(frame.data, 0, i);
if (descriptionEndIndex < 0) {
// malformed frame
return;
} // parsing Description field (terminated with \0)
frame.description = parseUtf8(frame.data, i, descriptionEndIndex);
i = descriptionEndIndex + 1;
if (frame.mimeType === LINK_MIME_TYPE) {
// parsing Picture Data field as URL (always represented as ISO-8859-1 [ID3v2.4.0 section 4.])
frame.url = parseIso88591(frame.data, i, frame.data.length);
} else {
// parsing Picture Data field as binary data
frame.pictureData = frame.data.subarray(i, frame.data.length);
}
},
'T*': function T(frame) {
if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {
// ignore frames with unrecognized character encodings
return;
} // parse text field, do not include null terminator in the frame value
// frames that allow different types of encoding contain terminated text [ID3v2.4.0 section 4.]
frame.value = parseUtf8(frame.data, 1, frame.data.length).replace(/\0*$/, ''); // text information frames supports multiple strings, stored as a terminator separated list [ID3v2.4.0 section 4.2.]
frame.values = frame.value.split('\0');
},
'TXXX': function TXXX(frame) {
var descriptionEndIndex;
if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {
// ignore frames with unrecognized character encodings
return;
}
descriptionEndIndex = typedArrayIndexOf(frame.data, 0, 1);
if (descriptionEndIndex === -1) {
return;
} // parse the text fields
frame.description = parseUtf8(frame.data, 1, descriptionEndIndex); // do not include the null terminator in the tag value
// frames that allow different types of encoding contain terminated text
// [ID3v2.4.0 section 4.]
frame.value = parseUtf8(frame.data, descriptionEndIndex + 1, frame.data.length).replace(/\0*$/, '');
frame.data = frame.value;
},
'W*': function W(frame) {
// parse URL field; URL fields are always represented as ISO-8859-1 [ID3v2.4.0 section 4.]
// if the value is followed by a string termination all the following information should be ignored [ID3v2.4.0 section 4.3]
frame.url = parseIso88591(frame.data, 0, frame.data.length).replace(/\0.*$/, '');
},
'WXXX': function WXXX(frame) {
var descriptionEndIndex;
if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {
// ignore frames with unrecognized character encodings
return;
}
descriptionEndIndex = typedArrayIndexOf(frame.data, 0, 1);
if (descriptionEndIndex === -1) {
return;
} // parse the description and URL fields
frame.description = parseUtf8(frame.data, 1, descriptionEndIndex); // URL fields are always represented as ISO-8859-1 [ID3v2.4.0 section 4.]
// if the value is followed by a string termination all the following information
// should be ignored [ID3v2.4.0 section 4.3]
frame.url = parseIso88591(frame.data, descriptionEndIndex + 1, frame.data.length).replace(/\0.*$/, '');
},
'PRIV': function PRIV(frame) {
var i;
for (i = 0; i < frame.data.length; i++) {
if (frame.data[i] === 0) {
// parse the description and URL fields
frame.owner = parseIso88591(frame.data, 0, i);
break;
}
}
frame.privateData = frame.data.subarray(i + 1);
frame.data = frame.privateData;
}
};
var parseId3Frames = function parseId3Frames(data) {
var frameSize,
frameHeader,
frameStart = 10,
tagSize = 0,
frames = []; // If we don't have enough data for a header, 10 bytes,
// or 'ID3' in the first 3 bytes this is not a valid ID3 tag.
if (data.length < 10 || data[0] !== 'I'.charCodeAt(0) || data[1] !== 'D'.charCodeAt(0) || data[2] !== '3'.charCodeAt(0)) {
return;
} // the frame size is transmitted as a 28-bit integer in the
// last four bytes of the ID3 header.
// The most significant bit of each byte is dropped and the
// results concatenated to recover the actual value.
tagSize = parseSyncSafeInteger(data.subarray(6, 10)); // ID3 reports the tag size excluding the header but it's more
// convenient for our comparisons to include it
tagSize += 10; // check bit 6 of byte 5 for the extended header flag.
var hasExtendedHeader = data[5] & 0x40;
if (hasExtendedHeader) {
// advance the frame start past the extended header
frameStart += 4; // header size field
frameStart += parseSyncSafeInteger(data.subarray(10, 14));
tagSize -= parseSyncSafeInteger(data.subarray(16, 20)); // clip any padding off the end
} // parse one or more ID3 frames
// http://id3.org/id3v2.3.0#ID3v2_frame_overview
do {
// determine the number of bytes in this frame
frameSize = parseSyncSafeInteger(data.subarray(frameStart + 4, frameStart + 8));
if (frameSize < 1) {
break;
}
frameHeader = String.fromCharCode(data[frameStart], data[frameStart + 1], data[frameStart + 2], data[frameStart + 3]);
var frame = {
id: frameHeader,
data: data.subarray(frameStart + 10, frameStart + frameSize + 10)
};
frame.key = frame.id; // parse frame values
if (frameParsers[frame.id]) {
// use frame specific parser
frameParsers[frame.id](frame);
} else if (frame.id[0] === 'T') {
// use text frame generic parser
frameParsers['T*'](frame);
} else if (frame.id[0] === 'W') {
// use URL link frame generic parser
frameParsers['W*'](frame);
}
frames.push(frame);
frameStart += 10; // advance past the frame header
frameStart += frameSize; // advance past the frame body
} while (frameStart < tagSize);
return frames;
};
module.exports = {
parseId3Frames: parseId3Frames,
parseSyncSafeInteger: parseSyncSafeInteger,
frameParsers: frameParsers
};

View file

@ -0,0 +1,15 @@
"use strict";
var parseSampleFlags = function parseSampleFlags(flags) {
return {
isLeading: (flags[0] & 0x0c) >>> 2,
dependsOn: flags[0] & 0x03,
isDependedOn: (flags[1] & 0xc0) >>> 6,
hasRedundancy: (flags[1] & 0x30) >>> 4,
paddingValue: (flags[1] & 0x0e) >>> 1,
isNonSyncSample: flags[1] & 0x01,
degradationPriority: flags[2] << 8 | flags[3]
};
};
module.exports = parseSampleFlags;

View file

@ -0,0 +1,46 @@
"use strict";
var getUint64 = require('../utils/numbers.js').getUint64;
var parseSidx = function parseSidx(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
references: [],
referenceId: view.getUint32(4),
timescale: view.getUint32(8)
},
i = 12;
if (result.version === 0) {
result.earliestPresentationTime = view.getUint32(i);
result.firstOffset = view.getUint32(i + 4);
i += 8;
} else {
// read 64 bits
result.earliestPresentationTime = getUint64(data.subarray(i));
result.firstOffset = getUint64(data.subarray(i + 8));
i += 16;
}
i += 2; // reserved
var referenceCount = view.getUint16(i);
i += 2; // start of references
for (; referenceCount > 0; i += 12, referenceCount--) {
result.references.push({
referenceType: (data[i] & 0x80) >>> 7,
referencedSize: view.getUint32(i) & 0x7FFFFFFF,
subsegmentDuration: view.getUint32(i + 4),
startsWithSap: !!(data[i + 8] & 0x80),
sapType: (data[i + 8] & 0x70) >>> 4,
sapDeltaTime: view.getUint32(i + 8) & 0x0FFFFFFF
});
}
return result;
};
module.exports = parseSidx;

View file

@ -0,0 +1,22 @@
"use strict";
var toUnsigned = require('../utils/bin').toUnsigned;
var getUint64 = require('../utils/numbers.js').getUint64;
var tfdt = function tfdt(data) {
var result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4))
};
if (result.version === 1) {
result.baseMediaDecodeTime = getUint64(data.subarray(4));
} else {
result.baseMediaDecodeTime = toUnsigned(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]);
}
return result;
};
module.exports = tfdt;

View file

@ -0,0 +1,58 @@
"use strict";
var tfhd = function tfhd(data) {
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
trackId: view.getUint32(4)
},
baseDataOffsetPresent = result.flags[2] & 0x01,
sampleDescriptionIndexPresent = result.flags[2] & 0x02,
defaultSampleDurationPresent = result.flags[2] & 0x08,
defaultSampleSizePresent = result.flags[2] & 0x10,
defaultSampleFlagsPresent = result.flags[2] & 0x20,
durationIsEmpty = result.flags[0] & 0x010000,
defaultBaseIsMoof = result.flags[0] & 0x020000,
i;
i = 8;
if (baseDataOffsetPresent) {
i += 4; // truncate top 4 bytes
// FIXME: should we read the full 64 bits?
result.baseDataOffset = view.getUint32(12);
i += 4;
}
if (sampleDescriptionIndexPresent) {
result.sampleDescriptionIndex = view.getUint32(i);
i += 4;
}
if (defaultSampleDurationPresent) {
result.defaultSampleDuration = view.getUint32(i);
i += 4;
}
if (defaultSampleSizePresent) {
result.defaultSampleSize = view.getUint32(i);
i += 4;
}
if (defaultSampleFlagsPresent) {
result.defaultSampleFlags = view.getUint32(i);
}
if (durationIsEmpty) {
result.durationIsEmpty = true;
}
if (!baseDataOffsetPresent && defaultBaseIsMoof) {
result.baseDataOffsetIsMoof = true;
}
return result;
};
module.exports = tfhd;

View file

@ -0,0 +1,101 @@
"use strict";
var parseSampleFlags = require('./parse-sample-flags.js');
var trun = function trun(data) {
var result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
samples: []
},
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
// Flag interpretation
dataOffsetPresent = result.flags[2] & 0x01,
// compare with 2nd byte of 0x1
firstSampleFlagsPresent = result.flags[2] & 0x04,
// compare with 2nd byte of 0x4
sampleDurationPresent = result.flags[1] & 0x01,
// compare with 2nd byte of 0x100
sampleSizePresent = result.flags[1] & 0x02,
// compare with 2nd byte of 0x200
sampleFlagsPresent = result.flags[1] & 0x04,
// compare with 2nd byte of 0x400
sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08,
// compare with 2nd byte of 0x800
sampleCount = view.getUint32(4),
offset = 8,
sample;
if (dataOffsetPresent) {
// 32 bit signed integer
result.dataOffset = view.getInt32(offset);
offset += 4;
} // Overrides the flags for the first sample only. The order of
// optional values will be: duration, size, compositionTimeOffset
if (firstSampleFlagsPresent && sampleCount) {
sample = {
flags: parseSampleFlags(data.subarray(offset, offset + 4))
};
offset += 4;
if (sampleDurationPresent) {
sample.duration = view.getUint32(offset);
offset += 4;
}
if (sampleSizePresent) {
sample.size = view.getUint32(offset);
offset += 4;
}
if (sampleCompositionTimeOffsetPresent) {
if (result.version === 1) {
sample.compositionTimeOffset = view.getInt32(offset);
} else {
sample.compositionTimeOffset = view.getUint32(offset);
}
offset += 4;
}
result.samples.push(sample);
sampleCount--;
}
while (sampleCount--) {
sample = {};
if (sampleDurationPresent) {
sample.duration = view.getUint32(offset);
offset += 4;
}
if (sampleSizePresent) {
sample.size = view.getUint32(offset);
offset += 4;
}
if (sampleFlagsPresent) {
sample.flags = parseSampleFlags(data.subarray(offset, offset + 4));
offset += 4;
}
if (sampleCompositionTimeOffsetPresent) {
if (result.version === 1) {
sample.compositionTimeOffset = view.getInt32(offset);
} else {
sample.compositionTimeOffset = view.getUint32(offset);
}
offset += 4;
}
result.samples.push(sample);
}
return result;
};
module.exports = trun;

View file

@ -0,0 +1,555 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* Parse mpeg2 transport stream packets to extract basic timing information
*/
'use strict';
var StreamTypes = require('../m2ts/stream-types.js');
var handleRollover = require('../m2ts/timestamp-rollover-stream.js').handleRollover;
var probe = {};
probe.ts = require('../m2ts/probe.js');
probe.aac = require('../aac/utils.js');
var ONE_SECOND_IN_TS = require('../utils/clock').ONE_SECOND_IN_TS;
var MP2T_PACKET_LENGTH = 188,
// bytes
SYNC_BYTE = 0x47;
/**
* walks through segment data looking for pat and pmt packets to parse out
* program map table information
*/
var parsePsi_ = function parsePsi_(bytes, pmt) {
var startIndex = 0,
endIndex = MP2T_PACKET_LENGTH,
packet,
type;
while (endIndex < bytes.byteLength) {
// Look for a pair of start and end sync bytes in the data..
if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {
// We found a packet
packet = bytes.subarray(startIndex, endIndex);
type = probe.ts.parseType(packet, pmt.pid);
switch (type) {
case 'pat':
pmt.pid = probe.ts.parsePat(packet);
break;
case 'pmt':
var table = probe.ts.parsePmt(packet);
pmt.table = pmt.table || {};
Object.keys(table).forEach(function (key) {
pmt.table[key] = table[key];
});
break;
default:
break;
}
startIndex += MP2T_PACKET_LENGTH;
endIndex += MP2T_PACKET_LENGTH;
continue;
} // If we get here, we have somehow become de-synchronized and we need to step
// forward one byte at a time until we find a pair of sync bytes that denote
// a packet
startIndex++;
endIndex++;
}
};
/**
* walks through the segment data from the start and end to get timing information
* for the first and last audio pes packets
*/
var parseAudioPes_ = function parseAudioPes_(bytes, pmt, result) {
var startIndex = 0,
endIndex = MP2T_PACKET_LENGTH,
packet,
type,
pesType,
pusi,
parsed;
var endLoop = false; // Start walking from start of segment to get first audio packet
while (endIndex <= bytes.byteLength) {
// Look for a pair of start and end sync bytes in the data..
if (bytes[startIndex] === SYNC_BYTE && (bytes[endIndex] === SYNC_BYTE || endIndex === bytes.byteLength)) {
// We found a packet
packet = bytes.subarray(startIndex, endIndex);
type = probe.ts.parseType(packet, pmt.pid);
switch (type) {
case 'pes':
pesType = probe.ts.parsePesType(packet, pmt.table);
pusi = probe.ts.parsePayloadUnitStartIndicator(packet);
if (pesType === 'audio' && pusi) {
parsed = probe.ts.parsePesTime(packet);
if (parsed) {
parsed.type = 'audio';
result.audio.push(parsed);
endLoop = true;
}
}
break;
default:
break;
}
if (endLoop) {
break;
}
startIndex += MP2T_PACKET_LENGTH;
endIndex += MP2T_PACKET_LENGTH;
continue;
} // If we get here, we have somehow become de-synchronized and we need to step
// forward one byte at a time until we find a pair of sync bytes that denote
// a packet
startIndex++;
endIndex++;
} // Start walking from end of segment to get last audio packet
endIndex = bytes.byteLength;
startIndex = endIndex - MP2T_PACKET_LENGTH;
endLoop = false;
while (startIndex >= 0) {
// Look for a pair of start and end sync bytes in the data..
if (bytes[startIndex] === SYNC_BYTE && (bytes[endIndex] === SYNC_BYTE || endIndex === bytes.byteLength)) {
// We found a packet
packet = bytes.subarray(startIndex, endIndex);
type = probe.ts.parseType(packet, pmt.pid);
switch (type) {
case 'pes':
pesType = probe.ts.parsePesType(packet, pmt.table);
pusi = probe.ts.parsePayloadUnitStartIndicator(packet);
if (pesType === 'audio' && pusi) {
parsed = probe.ts.parsePesTime(packet);
if (parsed) {
parsed.type = 'audio';
result.audio.push(parsed);
endLoop = true;
}
}
break;
default:
break;
}
if (endLoop) {
break;
}
startIndex -= MP2T_PACKET_LENGTH;
endIndex -= MP2T_PACKET_LENGTH;
continue;
} // If we get here, we have somehow become de-synchronized and we need to step
// forward one byte at a time until we find a pair of sync bytes that denote
// a packet
startIndex--;
endIndex--;
}
};
/**
* walks through the segment data from the start and end to get timing information
* for the first and last video pes packets as well as timing information for the first
* key frame.
*/
var parseVideoPes_ = function parseVideoPes_(bytes, pmt, result) {
var startIndex = 0,
endIndex = MP2T_PACKET_LENGTH,
packet,
type,
pesType,
pusi,
parsed,
frame,
i,
pes;
var endLoop = false;
var currentFrame = {
data: [],
size: 0
}; // Start walking from start of segment to get first video packet
while (endIndex < bytes.byteLength) {
// Look for a pair of start and end sync bytes in the data..
if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {
// We found a packet
packet = bytes.subarray(startIndex, endIndex);
type = probe.ts.parseType(packet, pmt.pid);
switch (type) {
case 'pes':
pesType = probe.ts.parsePesType(packet, pmt.table);
pusi = probe.ts.parsePayloadUnitStartIndicator(packet);
if (pesType === 'video') {
if (pusi && !endLoop) {
parsed = probe.ts.parsePesTime(packet);
if (parsed) {
parsed.type = 'video';
result.video.push(parsed);
endLoop = true;
}
}
if (!result.firstKeyFrame) {
if (pusi) {
if (currentFrame.size !== 0) {
frame = new Uint8Array(currentFrame.size);
i = 0;
while (currentFrame.data.length) {
pes = currentFrame.data.shift();
frame.set(pes, i);
i += pes.byteLength;
}
if (probe.ts.videoPacketContainsKeyFrame(frame)) {
var firstKeyFrame = probe.ts.parsePesTime(frame); // PTS/DTS may not be available. Simply *not* setting
// the keyframe seems to work fine with HLS playback
// and definitely preferable to a crash with TypeError...
if (firstKeyFrame) {
result.firstKeyFrame = firstKeyFrame;
result.firstKeyFrame.type = 'video';
} else {
// eslint-disable-next-line
console.warn('Failed to extract PTS/DTS from PES at first keyframe. ' + 'This could be an unusual TS segment, or else mux.js did not ' + 'parse your TS segment correctly. If you know your TS ' + 'segments do contain PTS/DTS on keyframes please file a bug ' + 'report! You can try ffprobe to double check for yourself.');
}
}
currentFrame.size = 0;
}
}
currentFrame.data.push(packet);
currentFrame.size += packet.byteLength;
}
}
break;
default:
break;
}
if (endLoop && result.firstKeyFrame) {
break;
}
startIndex += MP2T_PACKET_LENGTH;
endIndex += MP2T_PACKET_LENGTH;
continue;
} // If we get here, we have somehow become de-synchronized and we need to step
// forward one byte at a time until we find a pair of sync bytes that denote
// a packet
startIndex++;
endIndex++;
} // Start walking from end of segment to get last video packet
endIndex = bytes.byteLength;
startIndex = endIndex - MP2T_PACKET_LENGTH;
endLoop = false;
while (startIndex >= 0) {
// Look for a pair of start and end sync bytes in the data..
if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {
// We found a packet
packet = bytes.subarray(startIndex, endIndex);
type = probe.ts.parseType(packet, pmt.pid);
switch (type) {
case 'pes':
pesType = probe.ts.parsePesType(packet, pmt.table);
pusi = probe.ts.parsePayloadUnitStartIndicator(packet);
if (pesType === 'video' && pusi) {
parsed = probe.ts.parsePesTime(packet);
if (parsed) {
parsed.type = 'video';
result.video.push(parsed);
endLoop = true;
}
}
break;
default:
break;
}
if (endLoop) {
break;
}
startIndex -= MP2T_PACKET_LENGTH;
endIndex -= MP2T_PACKET_LENGTH;
continue;
} // If we get here, we have somehow become de-synchronized and we need to step
// forward one byte at a time until we find a pair of sync bytes that denote
// a packet
startIndex--;
endIndex--;
}
};
/**
* Adjusts the timestamp information for the segment to account for
* rollover and convert to seconds based on pes packet timescale (90khz clock)
*/
var adjustTimestamp_ = function adjustTimestamp_(segmentInfo, baseTimestamp) {
if (segmentInfo.audio && segmentInfo.audio.length) {
var audioBaseTimestamp = baseTimestamp;
if (typeof audioBaseTimestamp === 'undefined' || isNaN(audioBaseTimestamp)) {
audioBaseTimestamp = segmentInfo.audio[0].dts;
}
segmentInfo.audio.forEach(function (info) {
info.dts = handleRollover(info.dts, audioBaseTimestamp);
info.pts = handleRollover(info.pts, audioBaseTimestamp); // time in seconds
info.dtsTime = info.dts / ONE_SECOND_IN_TS;
info.ptsTime = info.pts / ONE_SECOND_IN_TS;
});
}
if (segmentInfo.video && segmentInfo.video.length) {
var videoBaseTimestamp = baseTimestamp;
if (typeof videoBaseTimestamp === 'undefined' || isNaN(videoBaseTimestamp)) {
videoBaseTimestamp = segmentInfo.video[0].dts;
}
segmentInfo.video.forEach(function (info) {
info.dts = handleRollover(info.dts, videoBaseTimestamp);
info.pts = handleRollover(info.pts, videoBaseTimestamp); // time in seconds
info.dtsTime = info.dts / ONE_SECOND_IN_TS;
info.ptsTime = info.pts / ONE_SECOND_IN_TS;
});
if (segmentInfo.firstKeyFrame) {
var frame = segmentInfo.firstKeyFrame;
frame.dts = handleRollover(frame.dts, videoBaseTimestamp);
frame.pts = handleRollover(frame.pts, videoBaseTimestamp); // time in seconds
frame.dtsTime = frame.dts / ONE_SECOND_IN_TS;
frame.ptsTime = frame.pts / ONE_SECOND_IN_TS;
}
}
};
/**
* inspects the aac data stream for start and end time information
*/
var inspectAac_ = function inspectAac_(bytes) {
var endLoop = false,
audioCount = 0,
sampleRate = null,
timestamp = null,
frameSize = 0,
byteIndex = 0,
packet;
while (bytes.length - byteIndex >= 3) {
var type = probe.aac.parseType(bytes, byteIndex);
switch (type) {
case 'timed-metadata':
// Exit early because we don't have enough to parse
// the ID3 tag header
if (bytes.length - byteIndex < 10) {
endLoop = true;
break;
}
frameSize = probe.aac.parseId3TagSize(bytes, byteIndex); // Exit early if we don't have enough in the buffer
// to emit a full packet
if (frameSize > bytes.length) {
endLoop = true;
break;
}
if (timestamp === null) {
packet = bytes.subarray(byteIndex, byteIndex + frameSize);
timestamp = probe.aac.parseAacTimestamp(packet);
}
byteIndex += frameSize;
break;
case 'audio':
// Exit early because we don't have enough to parse
// the ADTS frame header
if (bytes.length - byteIndex < 7) {
endLoop = true;
break;
}
frameSize = probe.aac.parseAdtsSize(bytes, byteIndex); // Exit early if we don't have enough in the buffer
// to emit a full packet
if (frameSize > bytes.length) {
endLoop = true;
break;
}
if (sampleRate === null) {
packet = bytes.subarray(byteIndex, byteIndex + frameSize);
sampleRate = probe.aac.parseSampleRate(packet);
}
audioCount++;
byteIndex += frameSize;
break;
default:
byteIndex++;
break;
}
if (endLoop) {
return null;
}
}
if (sampleRate === null || timestamp === null) {
return null;
}
var audioTimescale = ONE_SECOND_IN_TS / sampleRate;
var result = {
audio: [{
type: 'audio',
dts: timestamp,
pts: timestamp
}, {
type: 'audio',
dts: timestamp + audioCount * 1024 * audioTimescale,
pts: timestamp + audioCount * 1024 * audioTimescale
}]
};
return result;
};
/**
* inspects the transport stream segment data for start and end time information
* of the audio and video tracks (when present) as well as the first key frame's
* start time.
*/
var inspectTs_ = function inspectTs_(bytes) {
var pmt = {
pid: null,
table: null
};
var result = {};
parsePsi_(bytes, pmt);
for (var pid in pmt.table) {
if (pmt.table.hasOwnProperty(pid)) {
var type = pmt.table[pid];
switch (type) {
case StreamTypes.H264_STREAM_TYPE:
result.video = [];
parseVideoPes_(bytes, pmt, result);
if (result.video.length === 0) {
delete result.video;
}
break;
case StreamTypes.ADTS_STREAM_TYPE:
result.audio = [];
parseAudioPes_(bytes, pmt, result);
if (result.audio.length === 0) {
delete result.audio;
}
break;
default:
break;
}
}
}
return result;
};
/**
* Inspects segment byte data and returns an object with start and end timing information
*
* @param {Uint8Array} bytes The segment byte data
* @param {Number} baseTimestamp Relative reference timestamp used when adjusting frame
* timestamps for rollover. This value must be in 90khz clock.
* @return {Object} Object containing start and end frame timing info of segment.
*/
var inspect = function inspect(bytes, baseTimestamp) {
var isAacData = probe.aac.isLikelyAacData(bytes);
var result;
if (isAacData) {
result = inspectAac_(bytes);
} else {
result = inspectTs_(bytes);
}
if (!result || !result.audio && !result.video) {
return null;
}
adjustTimestamp_(result, baseTimestamp);
return result;
};
module.exports = {
inspect: inspect,
parseAudioPes_: parseAudioPes_
};

View file

@ -0,0 +1,20 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
var toUnsigned = function toUnsigned(value) {
return value >>> 0;
};
var toHexString = function toHexString(value) {
return ('00' + value.toString(16)).slice(-2);
};
module.exports = {
toUnsigned: toUnsigned,
toHexString: toHexString
};

View file

@ -0,0 +1,61 @@
"use strict";
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
var ONE_SECOND_IN_TS = 90000,
// 90kHz clock
secondsToVideoTs,
secondsToAudioTs,
videoTsToSeconds,
audioTsToSeconds,
audioTsToVideoTs,
videoTsToAudioTs,
metadataTsToSeconds;
secondsToVideoTs = function secondsToVideoTs(seconds) {
return seconds * ONE_SECOND_IN_TS;
};
secondsToAudioTs = function secondsToAudioTs(seconds, sampleRate) {
return seconds * sampleRate;
};
videoTsToSeconds = function videoTsToSeconds(timestamp) {
return timestamp / ONE_SECOND_IN_TS;
};
audioTsToSeconds = function audioTsToSeconds(timestamp, sampleRate) {
return timestamp / sampleRate;
};
audioTsToVideoTs = function audioTsToVideoTs(timestamp, sampleRate) {
return secondsToVideoTs(audioTsToSeconds(timestamp, sampleRate));
};
videoTsToAudioTs = function videoTsToAudioTs(timestamp, sampleRate) {
return secondsToAudioTs(videoTsToSeconds(timestamp), sampleRate);
};
/**
* Adjust ID3 tag or caption timing information by the timeline pts values
* (if keepOriginalTimestamps is false) and convert to seconds
*/
metadataTsToSeconds = function metadataTsToSeconds(timestamp, timelineStartPts, keepOriginalTimestamps) {
return videoTsToSeconds(keepOriginalTimestamps ? timestamp : timestamp - timelineStartPts);
};
module.exports = {
ONE_SECOND_IN_TS: ONE_SECOND_IN_TS,
secondsToVideoTs: secondsToVideoTs,
secondsToAudioTs: secondsToAudioTs,
videoTsToSeconds: videoTsToSeconds,
audioTsToSeconds: audioTsToSeconds,
audioTsToVideoTs: audioTsToVideoTs,
videoTsToAudioTs: videoTsToAudioTs,
metadataTsToSeconds: metadataTsToSeconds
};

View file

@ -0,0 +1,154 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*/
'use strict';
var ExpGolomb;
/**
* Parser for exponential Golomb codes, a variable-bitwidth number encoding
* scheme used by h264.
*/
ExpGolomb = function ExpGolomb(workingData) {
var // the number of bytes left to examine in workingData
workingBytesAvailable = workingData.byteLength,
// the current word being examined
workingWord = 0,
// :uint
// the number of bits left to examine in the current word
workingBitsAvailable = 0; // :uint;
// ():uint
this.length = function () {
return 8 * workingBytesAvailable;
}; // ():uint
this.bitsAvailable = function () {
return 8 * workingBytesAvailable + workingBitsAvailable;
}; // ():void
this.loadWord = function () {
var position = workingData.byteLength - workingBytesAvailable,
workingBytes = new Uint8Array(4),
availableBytes = Math.min(4, workingBytesAvailable);
if (availableBytes === 0) {
throw new Error('no bytes available');
}
workingBytes.set(workingData.subarray(position, position + availableBytes));
workingWord = new DataView(workingBytes.buffer).getUint32(0); // track the amount of workingData that has been processed
workingBitsAvailable = availableBytes * 8;
workingBytesAvailable -= availableBytes;
}; // (count:int):void
this.skipBits = function (count) {
var skipBytes; // :int
if (workingBitsAvailable > count) {
workingWord <<= count;
workingBitsAvailable -= count;
} else {
count -= workingBitsAvailable;
skipBytes = Math.floor(count / 8);
count -= skipBytes * 8;
workingBytesAvailable -= skipBytes;
this.loadWord();
workingWord <<= count;
workingBitsAvailable -= count;
}
}; // (size:int):uint
this.readBits = function (size) {
var bits = Math.min(workingBitsAvailable, size),
// :uint
valu = workingWord >>> 32 - bits; // :uint
// if size > 31, handle error
workingBitsAvailable -= bits;
if (workingBitsAvailable > 0) {
workingWord <<= bits;
} else if (workingBytesAvailable > 0) {
this.loadWord();
}
bits = size - bits;
if (bits > 0) {
return valu << bits | this.readBits(bits);
}
return valu;
}; // ():uint
this.skipLeadingZeros = function () {
var leadingZeroCount; // :uint
for (leadingZeroCount = 0; leadingZeroCount < workingBitsAvailable; ++leadingZeroCount) {
if ((workingWord & 0x80000000 >>> leadingZeroCount) !== 0) {
// the first bit of working word is 1
workingWord <<= leadingZeroCount;
workingBitsAvailable -= leadingZeroCount;
return leadingZeroCount;
}
} // we exhausted workingWord and still have not found a 1
this.loadWord();
return leadingZeroCount + this.skipLeadingZeros();
}; // ():void
this.skipUnsignedExpGolomb = function () {
this.skipBits(1 + this.skipLeadingZeros());
}; // ():void
this.skipExpGolomb = function () {
this.skipBits(1 + this.skipLeadingZeros());
}; // ():uint
this.readUnsignedExpGolomb = function () {
var clz = this.skipLeadingZeros(); // :uint
return this.readBits(clz + 1) - 1;
}; // ():int
this.readExpGolomb = function () {
var valu = this.readUnsignedExpGolomb(); // :int
if (0x01 & valu) {
// the number is odd if the low order bit is set
return 1 + valu >>> 1; // add 1 to make it even, and divide by 2
}
return -1 * (valu >>> 1); // divide by two then make it negative
}; // Some convenience functions
// :Boolean
this.readBoolean = function () {
return this.readBits(1) === 1;
}; // ():int
this.readUnsignedByte = function () {
return this.readBits(8);
};
this.loadWord();
};
module.exports = ExpGolomb;

View file

@ -0,0 +1,25 @@
"use strict";
var MAX_UINT32 = Math.pow(2, 32);
var getUint64 = function getUint64(uint8) {
var dv = new DataView(uint8.buffer, uint8.byteOffset, uint8.byteLength);
var value;
if (dv.getBigUint64) {
value = dv.getBigUint64(0);
if (value < Number.MAX_SAFE_INTEGER) {
return Number(value);
}
return value;
}
return dv.getUint32(0) * MAX_UINT32 + dv.getUint32(4);
};
module.exports = {
getUint64: getUint64,
MAX_UINT32: MAX_UINT32
};

View file

@ -0,0 +1,153 @@
/**
* mux.js
*
* Copyright (c) Brightcove
* Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
*
* A lightweight readable stream implemention that handles event dispatching.
* Objects that inherit from streams should call init in their constructors.
*/
'use strict';
var Stream = function Stream() {
this.init = function () {
var listeners = {};
/**
* Add a listener for a specified event type.
* @param type {string} the event name
* @param listener {function} the callback to be invoked when an event of
* the specified type occurs
*/
this.on = function (type, listener) {
if (!listeners[type]) {
listeners[type] = [];
}
listeners[type] = listeners[type].concat(listener);
};
/**
* Remove a listener for a specified event type.
* @param type {string} the event name
* @param listener {function} a function previously registered for this
* type of event through `on`
*/
this.off = function (type, listener) {
var index;
if (!listeners[type]) {
return false;
}
index = listeners[type].indexOf(listener);
listeners[type] = listeners[type].slice();
listeners[type].splice(index, 1);
return index > -1;
};
/**
* Trigger an event of the specified type on this stream. Any additional
* arguments to this function are passed as parameters to event listeners.
* @param type {string} the event name
*/
this.trigger = function (type) {
var callbacks, i, length, args;
callbacks = listeners[type];
if (!callbacks) {
return;
} // Slicing the arguments on every invocation of this method
// can add a significant amount of overhead. Avoid the
// intermediate object creation for the common case of a
// single callback argument
if (arguments.length === 2) {
length = callbacks.length;
for (i = 0; i < length; ++i) {
callbacks[i].call(this, arguments[1]);
}
} else {
args = [];
i = arguments.length;
for (i = 1; i < arguments.length; ++i) {
args.push(arguments[i]);
}
length = callbacks.length;
for (i = 0; i < length; ++i) {
callbacks[i].apply(this, args);
}
}
};
/**
* Destroys the stream and cleans up.
*/
this.dispose = function () {
listeners = {};
};
};
};
/**
* Forwards all `data` events on this stream to the destination stream. The
* destination stream should provide a method `push` to receive the data
* events as they arrive.
* @param destination {stream} the stream that will receive all `data` events
* @param autoFlush {boolean} if false, we will not call `flush` on the destination
* when the current stream emits a 'done' event
* @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options
*/
Stream.prototype.pipe = function (destination) {
this.on('data', function (data) {
destination.push(data);
});
this.on('done', function (flushSource) {
destination.flush(flushSource);
});
this.on('partialdone', function (flushSource) {
destination.partialFlush(flushSource);
});
this.on('endedtimeline', function (flushSource) {
destination.endTimeline(flushSource);
});
this.on('reset', function (flushSource) {
destination.reset(flushSource);
});
return destination;
}; // Default stream functions that are expected to be overridden to perform
// actual work. These are provided by the prototype as a sort of no-op
// implementation so that we don't have to check for their existence in the
// `pipe` function above.
Stream.prototype.push = function (data) {
this.trigger('data', data);
};
Stream.prototype.flush = function (flushSource) {
this.trigger('done', flushSource);
};
Stream.prototype.partialFlush = function (flushSource) {
this.trigger('partialdone', flushSource);
};
Stream.prototype.endTimeline = function (flushSource) {
this.trigger('endedtimeline', flushSource);
};
Stream.prototype.reset = function (flushSource) {
this.trigger('reset', flushSource);
};
module.exports = Stream;

View file

@ -0,0 +1,26 @@
"use strict";
/**
* Returns the first string in the data array ending with a null char '\0'
* @param {UInt8} data
* @returns the string with the null char
*/
var uint8ToCString = function uint8ToCString(data) {
var index = 0;
var curChar = String.fromCharCode(data[index]);
var retString = '';
while (curChar !== '\0') {
retString += curChar;
index++;
curChar = String.fromCharCode(data[index]);
} // Add nullChar
retString += curChar;
return retString;
};
module.exports = {
uint8ToCString: uint8ToCString
};

View file

@ -0,0 +1,23 @@
"use strict";
// IE11 doesn't support indexOf for TypedArrays.
// Once IE11 support is dropped, this function should be removed.
var typedArrayIndexOf = function typedArrayIndexOf(typedArray, element, fromIndex) {
if (!typedArray) {
return -1;
}
var currentIndex = fromIndex;
for (; currentIndex < typedArray.length; currentIndex++) {
if (typedArray[currentIndex] === element) {
return currentIndex;
}
}
return -1;
};
module.exports = {
typedArrayIndexOf: typedArrayIndexOf
};

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,37 @@
# In-Band Captions
Captions come in two varieties, based on their relationship to the
video. Typically on the web, captions are delivered as a separate file
and associated with a video through the `<track>` element. This type
of captions are sometimes referred to as *out-of-band*.
The alternative method involves embedding the caption data directly into
the video content and is sometimes called *in-band captions*. In-band
captions exist in many videos today that were originally encoded for
broadcast and they are also a standard method used to provide captions
for live events. In-band HLS captions follow the CEA-708 standard.
In this project, in-band captions are parsed using a [CaptionStream][caption-stream]. For MPEG2-TS sources, the CaptionStream is used as part of the [Transmuxer TS Pipeline][transmuxer]. For ISOBMFF sources, the CaptionStream is used as part of the [MP4 CaptionParser][mp4-caption-parser].
## Is my stream CEA-608/CEA-708 compatible?
If you are having difficulties getting caption data as you expect out of Mux.js, take a look at our [Troubleshooting Guide](/docs/troubleshooting.md#608/708-caption-parsing) to ensure your content is compatible.
# Useful Tools
- [CCExtractor][cc-extractor]
- [Thumbcoil][thumbcoil]
# References
- [Rec. ITU-T H.264][h264-spec]: H.264 video data specification. CEA-708 captions are encapsulated in supplemental enhancement information (SEI) network abstraction layer (NAL) units within the video stream.
- [ANSI/SCTE 128-1][ansi-scte-spec]: the binary encapsulation of caption data within an SEI user_data_registered_itu_t_t35 payload.
- CEA-708-E: describes the framing and interpretation of caption data reassembled out of the picture user data blobs.
- CEA-608-E: specifies the hex to character mapping for extended language characters.
- [Closed Captioning Intro by Technology Connections](https://www.youtube.com/watch?v=6SL6zs2bDks)
[h264-spec]: https://www.itu.int/rec/T-REC-H.264
[ansi-scte-spec]: https://www.scte.org/documents/pdf/Standards/ANSI_SCTE%20128-1%202013.pdf
[caption-stream]: /lib/m2ts/caption-stream.js
[transmuxer]: /lib/mp4/transmuxer.js
[mp4-caption-parser]: /lib/mp4/caption-parser.js
[thumbcoil]: http://thumb.co.il/
[cc-extractor]: https://github.com/CCExtractor/ccextractor

Some files were not shown because too many files have changed in this diff Show more