1
0
Fork 0
mirror of https://github.com/librespot-org/librespot.git synced 2025-10-03 09:49:31 +02:00

Compare commits

..

1 commit
dev ... v0.4.2

Author SHA1 Message Date
Roderick van Domburg
22f8aed3fc
Update Cargo.lock 2022-07-29 21:43:59 +02:00
667 changed files with 7707 additions and 39262 deletions

View file

@ -1,31 +0,0 @@
# syntax=docker/dockerfile:1
ARG debian_version=slim-bookworm
ARG rust_version=1.85.0
FROM rust:${rust_version}-${debian_version}
ARG DEBIAN_FRONTEND=noninteractive
ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL="sparse"
ENV RUST_BACKTRACE=1
ENV RUSTFLAGS="-D warnings"
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git \
nano\
openssh-server \
# for rust-analyzer vscode plugin
pkg-config \
# developer dependencies
libunwind-dev \
libpulse-dev \
portaudio19-dev \
libasound2-dev \
libsdl2-dev \
gstreamer1.0-dev \
libgstreamer-plugins-base1.0-dev \
libavahi-compat-libdnssd-dev && \
rm -rf /var/lib/apt/lists/*
RUN rustup component add rustfmt && \
rustup component add clippy && \
cargo install cargo-hack

View file

@ -1,32 +0,0 @@
# syntax=docker/dockerfile:1
ARG alpine_version=alpine3.20
ARG rust_version=1.85.0
FROM rust:${rust_version}-${alpine_version}
ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL="sparse"
ENV RUST_BACKTRACE=1
ENV RUSTFLAGS="-D warnings -C target-feature=-crt-static"
RUN apk add --no-cache \
git \
nano\
openssh-server \
# for rust-analyzer vscode plugin
pkgconf \
musl-dev \
# developer dependencies
openssl-dev \
libunwind-dev \
pulseaudio-dev \
portaudio-dev \
alsa-lib-dev \
sdl2-dev \
gstreamer-dev \
gst-plugins-base-dev \
jack-dev \
avahi-dev && \
rm -rf /lib/apk/db/*
RUN rustup component add rustfmt && \
rustup component add clippy && \
cargo install cargo-hack

View file

@ -1,20 +0,0 @@
{
"name": "Librespot Devcontainer",
"dockerFile": "Dockerfile.alpine",
"_postCreateCommand_comment": "Uncomment 'postCreateCommand' to run commands after the container is created.",
"_postCreateCommand": "",
"customizations": {
"_comment": "Configure properties specific to VS Code.",
"vscode": {
"settings": {
"dev.containers.copyGitConfig": true
},
"extensions": ["eamodio.gitlens", "github.vscode-github-actions", "rust-lang.rust-analyzer"]
}
},
"containerEnv": {
"GIT_EDITOR": "nano"
},
"_remoteUser_comment": "Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root",
"_remoteUser": "root"
}

View file

@ -1,39 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''
---
### Look for similar bugs
Please check if there's [already an issue](https://github.com/librespot-org/librespot/issues) for your problem.
If you've only a "me too" comment to make, consider if a :+1: [reaction](https://github.blog/news-insights/product-news/add-reactions-to-pull-requests-issues-and-comments/)
will suffice.
### Description
A clear and concise description of what the problem is.
### Version
What version(s) of *librespot* does this problem exist in?
### How to reproduce
Steps to reproduce the behavior in *librespot* e.g.
1. Launch `librespot` with '...'
2. Connect with '...'
3. In the client click on '...'
4. See some error/problem
### Log
* A *full* **debug** log so we may trace your problem (launch `librespot` with `--verbose`).
* Ideally contains your above steps to reproduce.
* Format the log as code ([help](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks)) or use a *non-expiring* [pastebin](https://pastebin.com/).
* Redact data you consider personal but do not remove/trim anything else.
### Host (what you are running `librespot` on):
- OS: [e.g. Linux]
- Platform: [e.g. RPi 3B+]
### Additional context
Add any other context about the problem here. If your issue is related to sound playback, at a minimum specify the type and make of your output device.

View file

@ -1,20 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View file

@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: weekly
day: saturday
time: "10:00"
open-pull-requests-limit: 10
target-branch: dev

View file

@ -1,101 +0,0 @@
---
# Note, this is used in the badge URL!
name: build
"on":
push:
branches: [dev, master]
paths-ignore:
- "**.md"
- "docs/**"
- "contrib/**"
- "LICENSE"
- "*.sh"
- "**/Dockerfile*"
- "publish.sh"
- "test.sh"
pull_request:
paths-ignore:
- "**.md"
- "docs/**"
- "contrib/**"
- "LICENSE"
- "*.sh"
- "**/Dockerfile*"
- "publish.sh"
- "test.sh"
schedule:
# Run CI every week
- cron: "00 01 * * 0"
env:
RUST_BACKTRACE: 1
RUSTFLAGS: -D warnings
jobs:
test:
name: cargo +${{ matrix.toolchain }} test (${{ matrix.os }})
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest]
toolchain:
- "1.85" # MSRV (Minimum supported rust version)
- stable
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.toolchain }}
- name: Cache Rust dependencies
uses: Swatinem/rust-cache@v2
- name: Install developer package dependencies (Linux)
if: runner.os == 'Linux'
run: >
sudo apt-get update && sudo apt-get install -y
libpulse-dev portaudio19-dev libasound2-dev libsdl2-dev
gstreamer1.0-dev libgstreamer-plugins-base1.0-dev
libavahi-compat-libdnssd-dev
- name: Fetch dependencies
run: cargo fetch --locked
- name: Build workspace with examples
run: cargo build --frozen --workspace --examples
- name: Run tests
run: cargo test --workspace
- name: Install cargo-hack
uses: taiki-e/install-action@cargo-hack
- name: Check packages without TLS requirements
run: cargo hack check -p librespot-protocol --each-feature
- name: Check workspace with native-tls
run: >
cargo hack check -p librespot --each-feature --exclude-all-features
--include-features native-tls
--exclude-features rustls-tls-native-roots,rustls-tls-webpki-roots
- name: Check workspace with rustls-tls-native-roots
run: >
cargo hack check -p librespot --each-feature --exclude-all-features
--include-features rustls-tls-native-roots
--exclude-features native-tls,rustls-tls-webpki-roots
- name: Build binary with default features
run: cargo build --frozen
- name: Upload debug artifacts
uses: actions/upload-artifact@v4
with:
name: librespot-${{ matrix.os }}-${{ matrix.toolchain }}
path: >
target/debug/librespot${{ runner.os == 'Windows' && '.exe' || '' }}
if-no-files-found: error

View file

@ -1,78 +0,0 @@
---
name: cross-compile
"on":
push:
branches: [dev, master]
paths-ignore:
- "**.md"
- "docs/**"
- "contrib/**"
- "LICENSE"
- "*.sh"
- "**/Dockerfile*"
pull_request:
paths-ignore:
- "**.md"
- "docs/**"
- "contrib/**"
- "LICENSE"
- "*.sh"
- "**/Dockerfile*"
env:
RUST_BACKTRACE: 1
RUSTFLAGS: -D warnings
jobs:
cross-compile:
name: cross +${{ matrix.toolchain }} build ${{ matrix.platform.target }}
runs-on: ${{ matrix.platform.runs-on }}
continue-on-error: false
strategy:
matrix:
platform:
- arch: armv7
runs-on: ubuntu-latest
target: armv7-unknown-linux-gnueabihf
- arch: aarch64
runs-on: ubuntu-latest
target: aarch64-unknown-linux-gnu
- arch: riscv64gc
runs-on: ubuntu-latest
target: riscv64gc-unknown-linux-gnu
toolchain:
- "1.85" # MSRV (Minimum Supported Rust Version)
- stable
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Build binary with default features
if: matrix.platform.target != 'riscv64gc-unknown-linux-gnu'
uses: houseabsolute/actions-rust-cross@v1
with:
command: build
target: ${{ matrix.platform.target }}
toolchain: ${{ matrix.toolchain }}
args: --locked --verbose
- name: Build binary without system dependencies
if: matrix.platform.target == 'riscv64gc-unknown-linux-gnu'
uses: houseabsolute/actions-rust-cross@v1
with:
command: build
target: ${{ matrix.platform.target }}
toolchain: ${{ matrix.toolchain }}
args: --locked --verbose --no-default-features --features rustls-tls-webpki-roots
- name: Upload debug artifacts
uses: actions/upload-artifact@v4
with:
name: librespot-${{ matrix.platform.runs-on }}-${{ matrix.platform.arch }}-${{ matrix.toolchain }} # yamllint disable-line rule:line-length
path: target/${{ matrix.platform.target }}/debug/librespot
if-no-files-found: error

View file

@ -1,87 +0,0 @@
---
name: code-quality
"on":
push:
branches: [dev, master]
paths-ignore:
- "**.md"
- "docs/**"
- "contrib/**"
- "LICENSE"
- "*.sh"
- "**/Dockerfile*"
pull_request:
paths-ignore:
- "**.md"
- "docs/**"
- "contrib/**"
- "LICENSE"
- "*.sh"
- "**/Dockerfile*"
schedule:
# Run CI every week
- cron: "00 01 * * 0"
env:
RUST_BACKTRACE: 1
RUSTFLAGS: -D warnings
jobs:
fmt:
name: cargo fmt
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Check formatting
run: cargo fmt --all -- --check
clippy:
needs: fmt
name: cargo clippy
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cache Rust dependencies
uses: Swatinem/rust-cache@v2
- name: Install developer package dependencies
run: >
sudo apt-get update && sudo apt-get install -y
libpulse-dev portaudio19-dev libasound2-dev libsdl2-dev
gstreamer1.0-dev libgstreamer-plugins-base1.0-dev
libavahi-compat-libdnssd-dev
- name: Install cargo-hack
uses: taiki-e/install-action@cargo-hack
- name: Run clippy on packages without TLS requirements
run: cargo hack clippy -p librespot-protocol --each-feature
- name: Run clippy with native-tls
run: >
cargo hack clippy -p librespot --each-feature --exclude-all-features
--include-features native-tls
--exclude-features rustls-tls-native-roots,rustls-tls-webpki-roots
- name: Run clippy with rustls-tls-native-roots
run: >
cargo hack clippy -p librespot --each-feature --exclude-all-features
--include-features rustls-tls-native-roots
--exclude-features native-tls,rustls-tls-webpki-roots
- name: Run clippy with rustls-tls-webpki-roots
run: >
cargo hack clippy -p librespot --each-feature --exclude-all-features
--include-features rustls-tls-webpki-roots
--exclude-features native-tls,rustls-tls-native-roots

199
.github/workflows/test.yml vendored Normal file
View file

@ -0,0 +1,199 @@
# Note, this is used in the badge URL!
name: test
on:
push:
branches: [master, dev]
paths:
[
"**.rs",
"Cargo.toml",
"Cargo.lock",
"rustfmt.toml",
".github/workflows/*",
"!*.md",
"!contrib/*",
"!docs/*",
"!LICENSE",
"!*.sh",
]
pull_request:
paths:
[
"**.rs",
"Cargo.toml",
"Cargo.lock",
"rustfmt.toml",
".github/workflows/*",
"!*.md",
"!contrib/*",
"!docs/*",
"!LICENSE",
"!*.sh",
]
schedule:
# Run CI every week
- cron: "00 01 * * 0"
env:
RUST_BACKTRACE: 1
jobs:
fmt:
name: rustfmt
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
components: rustfmt
- run: cargo fmt --all -- --check
test-linux:
needs: fmt
name: cargo +${{ matrix.toolchain }} build (${{ matrix.os }})
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.experimental }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
toolchain:
- 1.56 # MSRV (Minimum supported rust version)
- stable
- beta
experimental: [false]
# Ignore failures in nightly
include:
- os: ubuntu-latest
toolchain: nightly
experimental: true
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ matrix.toolchain }}
override: true
- name: Get Rustc version
id: get-rustc-version
run: echo "::set-output name=version::$(rustc -V)"
shell: bash
- name: Cache Rust dependencies
uses: actions/cache@v2
with:
path: |
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git
target
key: ${{ runner.os }}-${{ steps.get-rustc-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}
- name: Install developer package dependencies
run: sudo apt-get update && sudo apt-get install libpulse-dev portaudio19-dev libasound2-dev libsdl2-dev gstreamer1.0-dev libgstreamer-plugins-base1.0-dev libavahi-compat-libdnssd-dev
- run: cargo build --workspace --examples
- run: cargo test --workspace
- run: cargo install cargo-hack
- run: cargo hack --workspace --remove-dev-deps
- run: cargo build -p librespot-core --no-default-features
- run: cargo build -p librespot-core
- run: cargo hack build --each-feature -p librespot-discovery
- run: cargo hack build --each-feature -p librespot-playback
- run: cargo hack build --each-feature
test-windows:
needs: fmt
name: cargo build (${{ matrix.os }})
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [windows-latest]
toolchain: [stable]
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.toolchain }}
profile: minimal
override: true
- name: Get Rustc version
id: get-rustc-version
run: echo "::set-output name=version::$(rustc -V)"
shell: bash
- name: Cache Rust dependencies
uses: actions/cache@v2
with:
path: |
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git
target
key: ${{ runner.os }}-${{ steps.get-rustc-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}
- run: cargo build --workspace --examples
- run: cargo test --workspace
- run: cargo install cargo-hack
- run: cargo hack --workspace --remove-dev-deps
- run: cargo build --no-default-features
- run: cargo build
test-cross-arm:
needs: fmt
runs-on: ${{ matrix.os }}
continue-on-error: false
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
target: armv7-unknown-linux-gnueabihf
toolchain: stable
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
target: ${{ matrix.target }}
toolchain: ${{ matrix.toolchain }}
override: true
- name: Get Rustc version
id: get-rustc-version
run: echo "::set-output name=version::$(rustc -V)"
shell: bash
- name: Cache Rust dependencies
uses: actions/cache@v2
with:
path: |
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git
target
key: ${{ runner.os }}-${{ matrix.target }}-${{ steps.get-rustc-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}
- name: Install cross
run: cargo install cross || true
- name: Build
run: cross build --target ${{ matrix.target }} --no-default-features

5
.gitignore vendored
View file

@ -1,10 +1,9 @@
target
.cargo
spotify_appkey.key
.idea/
.vagrant/
.project
.history
.cache
*.save
*.*~

View file

@ -2,326 +2,36 @@
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) since v0.2.0.
## [Unreleased]
### Added
- [core] Add `SpotifyUri` type to represent more types of URI than `SpotifyId` can
### Changed
- [playback] Changed type of `SpotifyId` fields in `PlayerEvent` members to `SpotifyUri` (breaking)
- [metadata] Changed arguments for `Metadata` trait from `&SpotifyId` to `&SpotifyUri` (breaking)
- [player] `load` function changed from accepting a `SpotifyId` to accepting a `SpotifyUri` (breaking)
- [player] `preload` function changed from accepting a `SpotifyId` to accepting a `SpotifyUri` (breaking)
- [spclient] `get_radio_for_track` function changed from accepting a `SpotifyId` to accepting a `SpotifyUri` (breaking)
### Removed
- [core] Removed `SpotifyItemType` enum; the new `SpotifyUri` is an enum over all item types and so which variant it is
describes its item type (breaking)
- [core] Removed `NamedSpotifyId` struct; it was made obsolete by `SpotifyUri` (breaking)
- [core] The following methods have been removed from `SpotifyId` and moved to `SpotifyUri` (breaking):
- `is_playable`
- `from_uri`
- `to_uri`
## [v0.7.1] - 2025-08-31
### Changed
- [connect] Shuffling was adjusted, so that shuffle and repeat can be used combined
### Fixed
- [connect] Repeat context will not go into autoplay anymore and triggering autoplay while shuffling shouldn't reshuffle anymore
- [connect] Only deletes the connect state on dealer shutdown instead on disconnecting
- [core] Fixed a problem where in `spclient` where an HTTP/411 error was thrown because the header was set wrong
- [main] Use the config instead of the type default for values that are not provided by the user
## [0.7.0] - 2025-08-24
### Changed
- [core] MSRV is now 1.85 with Rust edition 2024 (breaking)
- [core] AP connect and handshake have a combined 5 second timeout.
- [core] `stream_from_cdn` now accepts the URL as `TryInto<Uri>` instead of `CdnUrl` (breaking)
- [core] Add TLS backend selection with native-tls and rustls-tls options, defaulting to native-tls
- [connect] Replaced `has_volume_ctrl` with `disable_volume` in `ConnectConfig` (breaking)
- [connect] Changed `initial_volume` from `Option<u16>` to `u16` in `ConnectConfig` (breaking)
- [connect] Replaced `SpircLoadCommand` with `LoadRequest`, `LoadRequestOptions` and `LoadContextOptions` (breaking)
- [connect] Moved all public items to the highest level (breaking)
- [connect] Replaced Mercury usage in `Spirc` with Dealer
- [metadata] Replaced `AudioFileFormat` with own enum. (breaking)
- [playback] Changed trait `Mixer::open` to return `Result<Self, Error>` instead of `Self` (breaking)
- [playback] Changed type alias `MixerFn` to return `Result<Arc<dyn Mixer>, Error>` instead of `Arc<dyn Mixer>` (breaking)
- [playback] Optimize audio conversion to always dither at 16-bit level, and improve performance
- [playback] Normalizer maintains better stereo imaging, while also being faster
- [oauth] Remove loopback address requirement from `redirect_uri` when spawning callback handling server versus using stdin.
### Added
- [connect] Add command line parameter for setting volume steps.
- [connect] Add support for `seek_to`, `repeat_track` and `autoplay` for `Spirc` loading
- [connect] Add `pause` parameter to `Spirc::disconnect` method (breaking)
- [connect] Add `volume_steps` to `ConnectConfig` (breaking)
- [connect] Add and enforce rustdoc
- [playback] Add `track` field to `PlayerEvent::RepeatChanged` (breaking)
- [playback] Add `PlayerEvent::PositionChanged` event to notify about the current playback position
- [core] Add `request_with_options` and `request_with_protobuf_and_options` to `SpClient`
- [core] Add `try_get_urls` to `CdnUrl`
- [oauth] Add `OAuthClient` and `OAuthClientBuilder` structs to achieve a more customizable login process
### Fixed
- [test] Missing bindgen breaks crossbuild on recent runners. Now installing latest bindgen in addition.
- [core] Fix "no native root CA certificates found" on platforms unsupported
by `rustls-native-certs`.
- [core] Fix all APs rejecting with "TryAnotherAP" when connecting session
on Android platform.
- [core] Fix "Invalid Credentials" when using a Keymaster access token and
client ID on Android platform.
- [connect] Fix "play" command not handled if missing "offset" property
- [discovery] Fix libmdns zerconf setup errors not propagating to the main task.
- [metadata] `Show::trailer_uri` is now optional since it isn't always present (breaking)
- [metadata] Fix incorrect parsing of audio format
- [connect] Handle transfer of playback with empty "uri" field
- [connect] Correctly apply playing/paused state when transferring playback
- [player] Saturate invalid seek positions to track duration
- [audio] Fall back to other URLs in case of a failure when downloading from CDN
- [core] Metadata requests failing with 500 Internal Server Error
- [player] Rodio backend did not honor audio output format request
### Deprecated
- [oauth] `get_access_token()` function marked for deprecation
- [core] `try_get_url()` function marked for deprecation
### Removed
- [core] Removed `get_canvases` from SpClient (breaking)
- [core] DeviceType `homething` removed due to crashes on Android (breaking)
- [metadata] Removed `genres` from Album (breaking)
- [metadata] Removed `genre` from Artists (breaking)
## [0.6.0] - 2024-10-30
This version takes another step into the direction of the HTTP API, fixes a
couple of bugs, and makes it easier for developers to mock a certain platform.
Also it adds the option to choose avahi, dnssd or libmdns as your zeroconf
backend for Spotify Connect discovery.
### Changed
- [core] The `access_token` for http requests is now acquired by `login5`
- [core] MSRV is now 1.75 (breaking)
- [discovery] librespot can now be compiled with multiple MDNS/DNS-SD backends
(avahi, dns_sd, libmdns) which can be selected using a CLI flag. The defaults
are unchanged (breaking).
### Added
- [core] Add `get_token_with_client_id()` to get a token for a specific client ID
- [core] Add `login` (mobile) and `auth_token` retrieval via login5
- [core] Add `OS` and `os_version` to `config.rs`
- [discovery] Added a new MDNS/DNS-SD backend which connects to Avahi via D-Bus.
### Fixed
- [connect] Fixes initial volume showing zero despite playing in full volume instead
- [core] Fix "source slice length (16) does not match destination slice length
(20)" panic on some tracks
## [0.5.0] - 2024-10-15
This version is be a major departure from the architecture up until now. It
focuses on implementing the "new Spotify API". This means moving large parts
of the Spotify protocol from Mercury to HTTP. A lot of this was reverse
engineered before by @devgianlu of librespot-java. It was long overdue that we
started implementing it too, not in the least because new features like the
hopefully upcoming Spotify HiFi depend on it.
Splitting up the work on the new Spotify API, v0.5.0 brings HTTP-based file
downloads and metadata access. Implementing the "dealer" (replacing the current
Mercury-based SPIRC message bus with WebSockets, also required for social plays)
is a large and separate effort, slated for some later release.
While at it, we are taking the liberty to do some major refactoring to make
librespot more robust. Consequently not only the Spotify API changed but large
parts of the librespot API too. For downstream maintainers, we realise that it
can be a lot to move from the current codebase to this one, but believe us it
will be well worth it.
All these changes are likely to introduce new bugs as well as some regressions.
We appreciate all your testing and contributions to the repository:
https://github.com/librespot-org/librespot
### Changed
- [all] Assertions were changed into `Result` or removed (breaking)
- [all] Purge use of `unwrap`, `expect` and return `Result` (breaking)
- [all] `chrono` replaced with `time` (breaking)
- [all] `time` updated (CVE-2020-26235)
- [all] Improve lock contention and performance (breaking)
- [all] Use a single `player` instance. Eliminates occasional `player` and
`audio backend` restarts, which can cause issues with some playback
configurations.
- [all] Updated and removed unused dependencies
- [audio] Files are now downloaded over the HTTPS CDN (breaking)
- [audio] Improve file opening and seeking performance (breaking)
- [core] MSRV is now 1.74 (breaking)
- [connect] `DeviceType` moved out of `connect` into `core` (breaking)
- [connect] Update and expose all `spirc` context fields (breaking)
- [connect] Add `Clone, Defaut` traits to `spirc` contexts
- [connect] Autoplay contexts are now retrieved with the `spclient` (breaking)
- [contrib] Updated Docker image
- [core] Message listeners are registered before authenticating. As a result
there now is a separate `Session::new` and subsequent `session.connect`.
(breaking)
- [core] `ConnectConfig` moved out of `core` into `connect` (breaking)
- [core] `client_id` for `get_token` moved to `SessionConfig` (breaking)
- [core] Mercury code has been refactored for better legibility (breaking)
- [core] Cache resolved access points during runtime (breaking)
- [core] `FileId` is moved out of `SpotifyId`. For now it will be re-exported.
- [core] Report actual platform data on login
- [core] Support `Session` authentication with a Spotify access token
- [core] `Credentials.username` is now an `Option` (breaking)
- [core] `Session::connect` tries multiple access points, retrying each one.
- [core] Each access point connection now timesout after 3 seconds.
- [core] Listen on both IPV4 and IPV6 on non-windows hosts
- [main] `autoplay {on|off}` now acts as an override. If unspecified, `librespot`
now follows the setting in the Connect client that controls it. (breaking)
- [metadata] Most metadata is now retrieved with the `spclient` (breaking)
- [metadata] Playlists are moved to the `playlist4_external` protobuf (breaking)
- [metadata] Handle playlists that are sent with microsecond-based timestamps
- [playback] The audio decoder has been switched from `lewton` to `Symphonia`.
This improves the Vorbis sound quality, adds support for MP3 as well as for
FLAC in the future. (breaking)
- [playback] Improve reporting of actual playback cursor
- [playback] The passthrough decoder is now feature-gated (breaking)
- [playback] `rodio`: call play and pause
- [protocol] protobufs have been updated
### Added
- [all] Check that array indexes are within bounds (panic safety)
- [all] Wrap errors in librespot `Error` type (breaking)
- [audio] Make audio fetch parameters tunable
- [connect] Add option on which zeroconf will bind. Defaults to all interfaces. Ignored by DNS-SD.
- [connect] Add session events
- [connect] Add `repeat`, `set_position_ms` and `set_volume` to `spirc.rs`
- [contrib] Add `event_handler_example.py`
- [core] Send metrics with metadata queries: client ID, country & product
- [core] Verify Spotify server certificates (prevents man-in-the-middle attacks)
- [core] User attributes are stored in `Session` upon login, accessible with a
getter and setter, and automatically updated as changes are pushed by the
Spotify infrastructure (breaking)
- [core] HTTPS is now supported, including for proxies (breaking)
- [core] Resolve `spclient` and `dealer` access points (breaking)
- [core] Get and cache tokens through new token provider (breaking)
- [core] `spclient` is the API for HTTP-based calls to the Spotify servers.
It supports a lot of functionality, including audio previews and image
downloads even if librespot doesn't use that for playback itself.
- [core] Support downloading of lyrics
- [core] Support parsing `SpotifyId` for local files
- [core] Support parsing `SpotifyId` for named playlists
- [core] Add checks and handling for stale server connections.
- [core] Fix potential deadlock waiting for audio decryption keys.
- [discovery] Add option to show playback device as a group
- [main] Add all player events to `player_event_handler.rs`
- [main] Add an event worker thread that runs async to the main thread(s) but
sync to itself to prevent potential data races for event consumers
- [metadata] All metadata fields in the protobufs are now exposed (breaking)
- [oauth] Standalone module to obtain Spotify access token using OAuth authorization code flow.
- [playback] Explicit tracks are skipped if the controlling Connect client has
disabled such content. Applications that use librespot as a library without
Connect should use the 'filter-explicit-content' user attribute in the session.
- [playback] Add metadata support via a `TrackChanged` event
- [connect] Add `activate` and `load` functions to `Spirc`, allowing control over local connect sessions
- [metadata] Add `Lyrics`
- [discovery] Add discovery initialisation retries if within the 1st min of uptime
### Fixed
- [connect] Set `PlayStatus` to the correct value when Player is loading to
avoid blanking out the controls when `self.play_status` is `LoadingPlay` or
`LoadingPause` in `spirc.rs`
- [connect] Handle attempts to play local files better by basically ignoring
attempts to load them in `handle_remote_update` in `spirc.rs`
- [connect] Loading previous or next tracks, or looping back on repeat, will
only start playback when we were already playing
- [connect, playback] Clean up and de-noise events and event firing
- [core] Fixed frequent disconnections for some users
- [core] More strict Spotify ID parsing
- [discovery] Update active user field upon connection
- [playback] Handle invalid track start positions by just starting the track
from the beginning
- [playback] Handle disappearing and invalid devices better
- [playback] Handle seek, pause, and play commands while loading
- [playback] Handle disabled normalisation correctly when using fixed volume
- [playback] Do not stop sink in gapless mode
- [metadata] Fix missing colon when converting named spotify IDs to URIs
## [0.4.2] - 2022-07-29
Besides a couple of small fixes, this point release is mainly to blacklist the
ap-gew4 and ap-gue1 access points that caused librespot to fail to playback
anything.
Development will now shift to the new HTTP-based API, targeted for a future
v0.5.0 release. The new-api branch will therefore be promoted to dev. This is a
major departure from the old API and although it brings many exciting new
things, it is also likely to introduce new bugs and some regressions.
Long story short, this v0.4.2 release is the most stable that librespot has yet
to offer. But, unless anything big comes up, it is also intended as the last
release to be based on the old API. Happy listening.
### Changed
- [playback] `pipe`: Better error handling
- [playback] `subprocess`: Better error handling
### Added
- [core] `apresolve`: Blacklist ap-gew4 and ap-gue1 access points that cause channel errors
- [playback] `pipe`: Implement stop
### Fixed
- [main] fix `--opt=value` line argument logging
- [playback] `alsamixer`: make `--volume-ctrl fixed` work as expected when combined with `--mixer alsa`
## Removed
## [0.4.1] - 2022-05-23
This release fixes dependency issues when installing from crates.
### Changed
- [chore] The MSRV is now 1.56
### Fixed
- [playback] Fixed dependency issues when installing from crate
## [0.4.0] - 2022-05-21
Note: This version was yanked, because a corrupt package was uploaded and failed
to install.
This is a polishing release, adding a few little extras and improving on many
thers. We had to break a couple of API's to do so, and therefore bumped the
minor version number. v0.4.x may be the last in series before we migrate from
the current channel-based Spotify backend to a more HTTP-based backend.
Targeting that major effort for a v0.5 release sometime, we intend to maintain
v0.4.x as a stable branch until then.
### Changed
- [chore] The MSRV is now 1.53
- [contrib] Hardened security of the `systemd` service units
- [core] `Session`: `connect()` now returns the long-term credentials
@ -334,7 +44,6 @@ v0.4.x as a stable branch until then.
- [playback] `Sink`: `write()` now receives ownership of the packet (breaking)
### Added
- [main] Enforce reasonable ranges for option values (breaking)
- [main] Add the ability to parse environment variables
- [main] Log now emits warning when trying to use options that would otherwise have no effect
@ -347,7 +56,6 @@ v0.4.x as a stable branch until then.
- [playback] `pulseaudio`: set values to: `PULSE_PROP_application.version`, `PULSE_PROP_application.process.binary`, `PULSE_PROP_stream.description`, `PULSE_PROP_media.software` and `PULSE_PROP_media.role` environment variables (user set env var values take precedence) (breaking)
### Fixed
- [connect] Don't panic when activating shuffle without previous interaction
- [core] Removed unsafe code (breaking)
- [main] Fix crash when built with Avahi support but Avahi is locally unavailable
@ -358,24 +66,20 @@ v0.4.x as a stable branch until then.
- [playback] `alsa`: make `--volume-range` overrides apply to Alsa softvol controls
### Removed
- [playback] `alsamixer`: previously deprecated options `mixer-card`, `mixer-name` and `mixer-index` have been removed
## [0.3.1] - 2021-10-24
### Changed
- Include build profile in the displayed version information
- [playback] Improve dithering CPU usage by about 33%
### Fixed
- [connect] Partly fix behavior after last track of an album/playlist
## [0.3.0] - 2021-10-13
### Added
- [discovery] The crate `librespot-discovery` for discovery in LAN was created. Its functionality was previously part of `librespot-connect`.
- [playback] Add support for dithering with `--dither` for lower requantization error (breaking)
- [playback] Add `--volume-range` option to set dB range and control `log` and `cubic` volume control curves
@ -384,7 +88,6 @@ v0.4.x as a stable branch until then.
- [playback] Add `--normalisation-gain-type auto` that switches between album and track automatically
### Changed
- [audio, playback] Moved `VorbisDecoder`, `VorbisError`, `AudioPacket`, `PassthroughDecoder`, `PassthroughError`, `DecoderError`, `AudioDecoder` and the `convert` module from `librespot-audio` to `librespot-playback`. The underlying crates `vorbis`, `librespot-tremor`, `lewton` and `ogg` should be used directly. (breaking)
- [audio, playback] Use `Duration` for time constants and functions (breaking)
- [connect, playback] Moved volume controls from `librespot-connect` to `librespot-playback` crate
@ -401,20 +104,17 @@ v0.4.x as a stable branch until then.
- [playback] `player`: default normalisation type is now `auto`
### Deprecated
- [connect] The `discovery` module was deprecated in favor of the `librespot-discovery` crate
- [playback] `alsamixer`: renamed `mixer-card` to `alsa-mixer-device`
- [playback] `alsamixer`: renamed `mixer-name` to `alsa-mixer-control`
- [playback] `alsamixer`: renamed `mixer-index` to `alsa-mixer-index`
### Removed
- [connect] Removed no-op mixer started/stopped logic (breaking)
- [playback] Removed `with-vorbis` and `with-tremor` features
- [playback] `alsamixer`: removed `--mixer-linear-volume` option, now that `--volume-ctrl {linear|log}` work as expected on Alsa
### Fixed
- [connect] Fix step size on volume up/down events
- [connect] Fix looping back to the first track after the last track of an album or playlist
- [playback] Incorrect `PlayerConfig::default().normalisation_threshold` caused distortion when using dynamic volume normalisation downstream
@ -441,20 +141,15 @@ v0.4.x as a stable branch until then.
## [0.1.0] - 2019-11-06
[unreleased]: https://github.com/librespot-org/librespot/compare/v0.7.1...HEAD
[0.7.1]: https://github.com/librespot-org/librespot/compare/v0.7.0...v0.7.1
[0.7.0]: https://github.com/librespot-org/librespot/compare/v0.6.0...v0.7.0
[0.6.0]: https://github.com/librespot-org/librespot/compare/v0.5.0...v0.6.0
[0.5.0]: https://github.com/librespot-org/librespot/compare/v0.4.2...v0.5.0
[0.4.2]: https://github.com/librespot-org/librespot/compare/v0.4.1...v0.4.2
[0.4.1]: https://github.com/librespot-org/librespot/compare/v0.4.0...v0.4.1
[0.4.0]: https://github.com/librespot-org/librespot/compare/v0.3.1...v0.4.0
[0.3.1]: https://github.com/librespot-org/librespot/compare/v0.3.0...v0.3.1
[0.3.0]: https://github.com/librespot-org/librespot/compare/v0.2.0...v0.3.0
[0.2.0]: https://github.com/librespot-org/librespot/compare/v0.1.6...v0.2.0
[0.1.6]: https://github.com/librespot-org/librespot/compare/v0.1.5...v0.1.6
[0.1.5]: https://github.com/librespot-org/librespot/compare/v0.1.3...v0.1.5
[0.1.3]: https://github.com/librespot-org/librespot/compare/v0.1.2...v0.1.3
[0.1.2]: https://github.com/librespot-org/librespot/compare/v0.1.1...v0.1.2
[0.1.1]: https://github.com/librespot-org/librespot/compare/v0.1.0...v0.1.1
[0.4.2]: https://github.com/librespot-org/librespot/compare/v0.4.1..v0.4.2
[0.4.1]: https://github.com/librespot-org/librespot/compare/v0.4.0..v0.4.1
[0.4.0]: https://github.com/librespot-org/librespot/compare/v0.3.1..v0.4.0
[0.3.1]: https://github.com/librespot-org/librespot/compare/v0.3.0..v0.3.1
[0.3.0]: https://github.com/librespot-org/librespot/compare/v0.2.0..v0.3.0
[0.2.0]: https://github.com/librespot-org/librespot/compare/v0.1.6..v0.2.0
[0.1.6]: https://github.com/librespot-org/librespot/compare/v0.1.5..v0.1.6
[0.1.5]: https://github.com/librespot-org/librespot/compare/v0.1.3..v0.1.5
[0.1.3]: https://github.com/librespot-org/librespot/compare/v0.1.2..v0.1.3
[0.1.2]: https://github.com/librespot-org/librespot/compare/v0.1.1..v0.1.2
[0.1.1]: https://github.com/librespot-org/librespot/compare/v0.1.0..v0.1.1
[0.1.0]: https://github.com/librespot-org/librespot/releases/tag/v0.1.0

View file

@ -7,17 +7,19 @@ In order to compile librespot, you will first need to set up a suitable Rust bui
### Install Rust
The easiest, and recommended way to get Rust is to use [rustup](https://rustup.rs). Once thats installed, Rust's standard tools should be set up and ready to use.
*Note: The current minimum required Rust version at the time of writing is 1.56, you can find the current minimum version specified in the `.github/workflow/test.yml` file.*
#### Additional Rust tools - `rustfmt`
To ensure a consistent codebase, we utilise [`rustfmt`](https://github.com/rust-lang/rustfmt) and [`clippy`](https://github.com/rust-lang/rust-clippy), which are installed by default with `rustup` these days, else they can be installed manually with:
```bash
rustup component add rustfmt
rustup component add clippy
```
Using `cargo fmt` and `cargo clippy` is not optional, as our CI checks against this repo's rules.
Using `rustfmt` is not optional, as our CI checks against this repo's rules.
### General dependencies
Along with Rust, you will also require a C compiler.
Along with Rust, you will also require a C compiler.
On Debian/Ubuntu, install with:
```shell
sudo apt-get install build-essential
@ -25,10 +27,10 @@ sudo apt-get install build-essential
```
On Fedora systems, install with:
```shell
sudo dnf install gcc
sudo dnf install gcc
```
### Audio library dependencies
Depending on the chosen backend, specific development libraries are required.
Depending on the chosen backend, specific development libraries are required.
*_Note this is an non-exhaustive list, open a PR to add to it!_*
@ -56,91 +58,12 @@ On Fedora systems:
sudo dnf install alsa-lib-devel
```
### Zeroconf library dependencies
Depending on the chosen backend, specific development libraries are required.
*_Note this is an non-exhaustive list, open a PR to add to it!_*
| Zeroconf backend | Debian/Ubuntu | Fedora | macOS |
|--------------------|------------------------------|-----------------------------------|-------------|
|avahi | | | |
|dns_sd | `libavahi-compat-libdnssd-dev pkg-config` | `avahi-compat-libdns_sd-devel` | |
|libmdns (default) | | | |
### TLS library dependencies
librespot requires a TLS implementation for secure connections to Spotify's servers. You can choose between two mutually exclusive options:
#### native-tls (default)
Uses your system's native TLS implementation:
- **Linux**: OpenSSL
- **macOS**: Secure Transport (Security.framework)
- **Windows**: SChannel (Windows TLS)
This is the **default choice** and provides the best compatibility. It integrates with your system's certificate store and is well-tested across platforms.
**When to choose native-tls:**
- You want maximum compatibility
- You're using system-managed certificates
- You're on a standard Linux distribution with OpenSSL
- You're deploying on platforms where OpenSSL is already present
**Dependencies:**
On Debian/Ubuntu:
```shell
sudo apt-get install libssl-dev pkg-config
```
On Fedora:
```shell
sudo dnf install openssl-devel pkg-config
```
#### rustls-tls
Uses a Rust-based TLS implementation with certificate authority (CA) verification. Two certificate store options are available:
**rustls-tls-native-roots**:
- **Linux**: Uses system ca-certificates package
- **macOS**: Uses Security.framework for CA verification
- **Windows**: Uses Windows certificate store
- Integrates with system certificate management and security updates
**rustls-tls-webpki-roots**:
- Uses Mozilla's compiled-in certificate store (webpki-roots)
- Certificate trust is independent of host system
- Best for reproducible builds, containers, or embedded systems
**When to choose rustls-tls:**
- You want to avoid external OpenSSL dependencies
- You're building for reproducible/deterministic builds
- You're targeting platforms where OpenSSL is unavailable or problematic (musl, embedded, static linking)
- You're cross-compiling and want to avoid OpenSSL build complexity
- You prefer having cryptographic operations implemented in Rust
**No additional system dependencies required** - rustls is implemented in Rust (with some assembly for performance-critical cryptographic operations) and doesn't require external libraries like OpenSSL.
#### Building with specific TLS backends
```bash
# Default (native-tls)
cargo build
# Explicitly use native-tls
cargo build --no-default-features --features "native-tls rodio-backend with-libmdns"
# Use rustls-tls with native certificate stores
cargo build --no-default-features --features "rustls-tls-native-roots rodio-backend with-libmdns"
# Use rustls-tls with Mozilla's webpki certificate store
cargo build --no-default-features --features "rustls-tls-webpki-roots rodio-backend with-libmdns"
```
**Important:** The TLS backends are mutually exclusive. Attempting to enable both will result in a compile-time error.
### Getting the Source
The recommended method is to first fork the repo, so that you have a copy that you have read/write access to. After that, its a simple case of cloning your fork.
```bash
git clone git@github.com:YOUR_USERNAME/librespot.git
git clone git@github.com:YOURUSERNAME/librespot.git
```
## Compiling & Running
@ -163,21 +86,17 @@ cargo build --release
You will most likely want to build debug builds when developing, as they compile faster, and more verbose, and as the name suggests, are for the purposes of debugging. When submitting a bug report, it is recommended to use a debug build to capture stack traces.
There are also a number of compiler feature flags that you can add, in the event that you want to have certain additional features also compiled. All available features and their descriptions are documented in the main [Cargo.toml](Cargo.toml) file. Additional platform-specific information is available on the [wiki](https://github.com/librespot-org/librespot/wiki/Compiling#addition-features).
There are also a number of compiler feature flags that you can add, in the event that you want to have certain additional features also compiled. The list of these is available on the [wiki](https://github.com/librespot-org/librespot/wiki/Compiling#addition-features).
By default, librespot compiles with the ```native-tls```, ```rodio-backend```, and ```with-libmdns``` features.
**Note:** librespot requires at least one TLS backend to function. Building with `--no-default-features` alone will fail compilation. For custom feature selection, you must specify at least one TLS backend along with your desired audio and discovery backends.
For example, to build with the ALSA audio, libmdns discovery, and native-tls backends:
By default, librespot compiles with the ```rodio-backend``` feature. To compile without default features, you can run with:
```bash
cargo build --no-default-features --features "native-tls alsa-backend with-libmdns"
cargo build --no-default-features
```
Or to use rustls-tls with ALSA:
Similarly, to build with the ALSA backend:
```bash
cargo build --no-default-features --features "rustls-tls alsa-backend with-libmdns"
cargo build --no-default-features --features "alsa-backend"
```
### Running

4325
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,137 +1,15 @@
[package]
name = "librespot"
version.workspace = true
rust-version.workspace = true
authors.workspace = true
license.workspace = true
description = "An open source client library for Spotify, with support for Spotify Connect"
keywords = ["audio", "spotify", "music", "streaming", "connect"]
categories = ["multimedia::audio"]
repository.workspace = true
readme = "README.md"
edition.workspace = true
include = [
"src/**/*",
"audio/**/*",
"connect/**/*",
"core/**/*",
"discovery/**/*",
"examples/**/*",
"metadata/**/*",
"oauth/**/*",
"playback/**/*",
"protocol/**/*",
"Cargo.toml",
"README.md",
"LICENSE",
"COMPILING.md",
"CONTRIBUTING.md",
]
[workspace.package]
version = "0.7.1"
rust-version = "1.85"
version = "0.4.2"
authors = ["Librespot Org"]
license = "MIT"
description = "An open source client library for Spotify, with support for Spotify Connect"
keywords = ["spotify"]
repository = "https://github.com/librespot-org/librespot"
edition = "2024"
readme = "README.md"
edition = "2018"
[features]
default = ["native-tls", "rodio-backend", "with-libmdns"]
# TLS backends (mutually exclusive - compile-time checks in oauth/src/lib.rs)
# Note: Feature validation is in oauth crate since it's compiled first in the dependency tree.
# See COMPILING.md for more details on TLS backend selection.
# native-tls: Uses the system's native TLS stack (OpenSSL on Linux, Secure Transport on macOS,
# SChannel on Windows). This is the default as it's well-tested, widely compatible, and integrates
# with system certificate stores. Choose this for maximum compatibility and when you want to use
# system-managed certificates.
native-tls = ["librespot-core/native-tls", "librespot-oauth/native-tls"]
# rustls-tls: Uses the Rust-based rustls TLS implementation with certificate authority (CA)
# verification. This provides a Rust TLS stack (with assembly optimizations). Choose this for
# avoiding external OpenSSL dependencies, reproducible builds, or when targeting platforms where
# native TLS dependencies are unavailable or problematic (musl, embedded, static linking).
#
# Two certificate store options are available:
#
# - rustls-tls-native-roots: Uses rustls with native system certificate stores (ca-certificates on
# Linux, Security.framework on macOS, Windows certificate store on Windows). Best for most users as
# it integrates with system-managed certificates and gets security updates through the OS.
rustls-tls-native-roots = [
"librespot-core/rustls-tls-native-roots",
"librespot-oauth/rustls-tls-native-roots",
]
# rustls-tls-webpki-roots: Uses rustls with Mozilla's compiled-in certificate store (webpki-roots).
# Best for reproducible builds, containerized environments, or when you want certificate handling
# to be independent of the host system.
rustls-tls-webpki-roots = [
"librespot-core/rustls-tls-webpki-roots",
"librespot-oauth/rustls-tls-webpki-roots",
]
# Audio backends - see README.md for audio backend selection guide
# Cross-platform backends:
# rodio-backend: Cross-platform audio backend using Rodio (default). Provides good cross-platform
# compatibility with automatic backend selection. Uses ALSA on Linux, WASAPI on Windows, CoreAudio
# on macOS.
rodio-backend = ["librespot-playback/rodio-backend"]
# rodiojack-backend: Rodio backend with JACK support for professional audio setups.
rodiojack-backend = ["librespot-playback/rodiojack-backend"]
# gstreamer-backend: Uses GStreamer multimedia framework for audio output.
# Provides extensive audio processing capabilities.
gstreamer-backend = ["librespot-playback/gstreamer-backend"]
# portaudio-backend: Cross-platform audio I/O library backend.
portaudio-backend = ["librespot-playback/portaudio-backend"]
# sdl-backend: Simple DirectMedia Layer audio backend.
sdl-backend = ["librespot-playback/sdl-backend"]
# Platform-specific backends:
# alsa-backend: Advanced Linux Sound Architecture backend (Linux only).
# Provides low-latency audio output on Linux systems.
alsa-backend = ["librespot-playback/alsa-backend"]
# pulseaudio-backend: PulseAudio backend (Linux only).
# Integrates with the PulseAudio sound server for advanced audio routing.
pulseaudio-backend = ["librespot-playback/pulseaudio-backend"]
# jackaudio-backend: JACK Audio Connection Kit backend.
# Professional audio backend for low-latency, high-quality audio routing.
jackaudio-backend = ["librespot-playback/jackaudio-backend"]
# Network discovery backends - choose one for Spotify Connect device discovery
# See COMPILING.md for dependencies and platform support.
# with-libmdns: Pure-Rust mDNS implementation (default).
# No external dependencies, works on all platforms. Choose this for simple deployments or when
# avoiding system dependencies.
with-libmdns = ["librespot-discovery/with-libmdns"]
# with-avahi: Uses Avahi daemon for mDNS (Linux only).
# Integrates with system's Avahi service for network discovery. Choose this when you want to
# integrate with existing Avahi infrastructure or need advanced mDNS features. Requires
# libavahi-client-dev.
with-avahi = ["librespot-discovery/with-avahi"]
# with-dns-sd: Uses DNS Service Discovery (cross-platform).
# On macOS uses Bonjour, on Linux uses Avahi compatibility layer. Choose this for tight system
# integration on macOS or when using Avahi's dns-sd compatibility mode on Linux.
with-dns-sd = ["librespot-discovery/with-dns-sd"]
# Audio processing features:
# passthrough-decoder: Enables direct passthrough of Ogg Vorbis streams without decoding.
# Useful for custom audio processing pipelines or when you want to handle audio decoding
# externally. When enabled, audio is not decoded by librespot but passed through as raw Ogg Vorbis
# data.
passthrough-decoder = ["librespot-playback/passthrough-decoder"]
[workspace]
[lib]
name = "librespot"
@ -142,71 +20,76 @@ name = "librespot"
path = "src/main.rs"
doc = false
[workspace.dependencies]
librespot-audio = { version = "0.7.1", path = "audio", default-features = false }
librespot-connect = { version = "0.7.1", path = "connect", default-features = false }
librespot-core = { version = "0.7.1", path = "core", default-features = false }
librespot-discovery = { version = "0.7.1", path = "discovery", default-features = false }
librespot-metadata = { version = "0.7.1", path = "metadata", default-features = false }
librespot-oauth = { version = "0.7.1", path = "oauth", default-features = false }
librespot-playback = { version = "0.7.1", path = "playback", default-features = false }
librespot-protocol = { version = "0.7.1", path = "protocol", default-features = false }
[dependencies.librespot-audio]
path = "audio"
version = "0.4.2"
[dependencies.librespot-connect]
path = "connect"
version = "0.4.2"
[dependencies.librespot-core]
path = "core"
version = "0.4.2"
[dependencies.librespot-discovery]
path = "discovery"
version = "0.4.2"
[dependencies.librespot-metadata]
path = "metadata"
version = "0.4.2"
[dependencies.librespot-playback]
path = "playback"
version = "0.4.2"
[dependencies.librespot-protocol]
path = "protocol"
version = "0.4.2"
[dependencies]
librespot-audio.workspace = true
librespot-connect.workspace = true
librespot-core.workspace = true
librespot-discovery.workspace = true
librespot-metadata.workspace = true
librespot-oauth.workspace = true
librespot-playback.workspace = true
librespot-protocol.workspace = true
data-encoding = "2.5"
env_logger = { version = "0.11.2", default-features = false, features = [
"color",
"humantime",
"auto-color",
] }
futures-util = { version = "0.3", default-features = false }
getopts = "0.2"
base64 = "0.13"
env_logger = {version = "0.9", default-features = false, features = ["termcolor","humantime","atty"]}
futures-util = { version = "0.3", default_features = false }
getopts = "0.2.21"
hex = "0.4"
hyper = "0.14"
log = "0.4"
sha1 = "0.10"
sysinfo = { version = "0.36", default-features = false, features = ["system"] }
thiserror = "2"
tokio = { version = "1", features = [
"rt",
"macros",
"signal",
"sync",
"process",
] }
rpassword = "6.0"
thiserror = "1.0"
tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros", "signal", "sync", "process"] }
url = "2.2"
sha-1 = "0.9"
[features]
alsa-backend = ["librespot-playback/alsa-backend"]
portaudio-backend = ["librespot-playback/portaudio-backend"]
pulseaudio-backend = ["librespot-playback/pulseaudio-backend"]
jackaudio-backend = ["librespot-playback/jackaudio-backend"]
rodio-backend = ["librespot-playback/rodio-backend"]
rodiojack-backend = ["librespot-playback/rodiojack-backend"]
sdl-backend = ["librespot-playback/sdl-backend"]
gstreamer-backend = ["librespot-playback/gstreamer-backend"]
with-dns-sd = ["librespot-discovery/with-dns-sd"]
default = ["rodio-backend"]
[package.metadata.deb]
maintainer = "Librespot Organization <noreply@github.com>"
copyright = "2015, Paul Liétar"
maintainer = "librespot-org"
copyright = "2018 Paul Liétar"
license-file = ["LICENSE", "4"]
depends = "$auto"
recommends = "avahi-daemon"
extended-description = """\
librespot is an open source client library for Spotify. It enables applications \
to use Spotify's service to control and play music via various backends, and to \
act as a Spotify Connect receiver. It is an alternative to the official and now \
deprecated closed-source libspotify. Additionally, it provides extra features \
which are not available in the official library.
.
This package provides the librespot binary for headless Spotify Connect playback. \
.
Note: librespot only works with Spotify Premium accounts."""
to use Spotify's service, without using the official but closed-source \
libspotify. Additionally, it will provide extra features which are not \
available in the official library."""
section = "sound"
priority = "optional"
assets = [
# Main binary
["target/release/librespot", "usr/bin/", "755"],
# Documentation
["README.md", "usr/share/doc/librespot/", "644"],
# Systemd services
["contrib/librespot.service", "lib/systemd/system/", "644"],
["contrib/librespot.user.service", "lib/systemd/user/", "644"],
["contrib/librespot.user.service", "lib/systemd/user/", "644"]
]

View file

@ -1,12 +0,0 @@
[build]
pre-build = [
"dpkg --add-architecture $CROSS_DEB_ARCH",
"apt-get update",
"apt-get --assume-yes install libssl-dev:$CROSS_DEB_ARCH libasound2-dev:$CROSS_DEB_ARCH",
]
[target.riscv64gc-unknown-linux-gnu]
# RISC-V: Uses rustls-tls (no system dependencies needed)
# Building with --no-default-features --features rustls-tls
# No pre-build steps required - rustls is pure Rust
pre-build = []

View file

@ -4,28 +4,23 @@
Read through this paragraph in its entirety before running anything.
The Bash script in the root of the project, named `publish.sh` can be used to publish a new version of librespot and its corresponding crates. the command should be used as follows from the project root: `./publish 0.1.0` from the project root, substituting the new version number that you wish to publish. *Note the lack of a v prefix on the version number. This is important, do not add one.* The v prefix is added where appropriate by the script.
The Bash script in the root of the project, named `publish.sh` can be used to publish a new version of librespot and it's corresponding crates. the command should be used as follows from the project root: `./publish 0.1.0` from the project root, substituting the new version number that you wish to publish. *Note the lack of a v prefix on the version number. This is important, do not add one.* The v prefix is added where appropriate by the script.
Make sure that you are are starting from a clean working directory for both `dev` and `master`, completely up to date with remote and all local changes either committed and pushed or stashed.
Note that the script will update the crates and lockfile, so in case you did not do so before, you really should to make sure none of the dependencies introduce some SemVer breaking change. Then commit so you again have a clean working directory.
Also don't forget to update `CHANGELOG.md` with the version number, release date, and at the bottom the comparison links.
You will want to perform a dry run first: `./publish --dry-run 0.1.0`. Please make note of any errors or warnings. In particular, you may need to explicitly inform Git which remote you want to track for the `master` branch like so: `git --track origin/master` (or whatever you have called the `librespot-org` remote `master` branch).
Depending on your system the script may fail to publish the main `librespot` crate after having published all the `librespot-xyz` sub-crates. If so then make sure the working directory is committed and pushed (watch `Cargo.toml`) and then run `cargo publish` manually after `publish.sh` finished.
To publish the crates your GitHub account needs to be authorized on `crates.io` by `librespot-org`. First time you should run `cargo login` and follow the on-screen instructions.
To publish the crates your GitHub account needs to be authorized on `crates.io` by `librespot-org`.
## What the script does
This is briefly how the script works:
- Change to branch master, pull latest version, merge development branch.
- Change to working directory.
- CD to working dir.
- Change version number in all files.
- Update crates and lockfile.
- Commit and tag changes.
- Publish crates in given order.
- Push version commit and tags to master.
@ -40,4 +35,4 @@ The `protocol` package needs to be published with `cargo publish --no-verify` du
Publishing can be done using the command `cargo publish` in each of the directories of the respective crate.
The script is meant to cover the standard publishing process. There are various improvements that could be made, such as adding options such as the user being able to add a changelog, though this is not the main focus, as the script is intended to be run by a CI. Feel free to improve and extend functionality, keeping in mind that it should always be possible for the script to be run in a non-interactive fashion.
The script is meant to cover the standard publishing process. There are various improvements that could be made, such as adding options such as the user being able to add a change log, though this is not the main focus, as the script is intended to be run by a CI. Feel free to improve and extend functionality, keeping in mind that it should always be possible for the script to be run in a non-interactive fashion.

View file

@ -1,4 +1,4 @@
[![Build Status](https://github.com/librespot-org/librespot/workflows/build/badge.svg)](https://github.com/librespot-org/librespot/actions)
[![Build Status](https://github.com/librespot-org/librespot/workflows/test/badge.svg)](https://github.com/librespot-org/librespot/actions)
[![Gitter chat](https://badges.gitter.im/librespot-org/librespot.png)](https://gitter.im/librespot-org/spotify-connect-resources)
[![Crates.io](https://img.shields.io/crates/v/librespot.svg)](https://crates.io/crates/librespot)
@ -62,15 +62,13 @@ SDL
Pipe
Subprocess
```
Please check [COMPILING.md](COMPILING.md) for detailed information on TLS, audio, and discovery backend dependencies, or the [Compiling](https://github.com/librespot-org/librespot/wiki/Compiling#general-dependencies) entry on the wiki for additional backend specific dependencies.
Please check the corresponding [Compiling](https://github.com/librespot-org/librespot/wiki/Compiling#general-dependencies) entry on the wiki for backend specific dependencies.
Once you've installed the dependencies and cloned this repository you can build *librespot* with the default features using Cargo.
Once you've installed the dependencies and cloned this repository you can build *librespot* with the default backend using Cargo.
```shell
cargo build --release
```
By default, this builds with native-tls (system TLS), rodio audio backend, and libmdns discovery. See [COMPILING.md](COMPILING.md) for information on selecting different TLS, audio, and discovery backends.
# Packages
librespot is also available via official package system on various operating systems such as Linux, FreeBSD, NetBSD. [Repology](https://repology.org/project/librespot/versions) offers a good overview.
@ -110,13 +108,13 @@ This is a non exhaustive list of projects that either use or have modified libre
- [librespot-golang](https://github.com/librespot-org/librespot-golang) - A golang port of librespot.
- [plugin.audio.spotify](https://github.com/marcelveldt/plugin.audio.spotify) - A Kodi plugin for Spotify.
- [raspotify](https://github.com/dtcooper/raspotify) - A Spotify Connect client that mostly Just Works™
- [raspotify](https://github.com/dtcooper/raspotify) - Spotify Connect client for the Raspberry Pi that Just Works™
- [Spotifyd](https://github.com/Spotifyd/spotifyd) - A stripped down librespot UNIX daemon.
- [rpi-audio-receiver](https://github.com/nicokaiser/rpi-audio-receiver) - easy Raspbian install scripts for Spotifyd, Bluetooth, Shairport and other audio receivers
- [Spotcontrol](https://github.com/badfortrains/spotcontrol) - A golang implementation of a Spotify Connect controller. No Playback functionality.
- [Spotcontrol](https://github.com/badfortrains/spotcontrol) - A golang implementation of a Spotify Connect controller. No playback
functionality.
- [librespot-java](https://github.com/devgianlu/librespot-java) - A Java port of librespot.
- [ncspot](https://github.com/hrkfdn/ncspot) - Cross-platform ncurses Spotify client.
- [ansible-role-librespot](https://github.com/xMordax/ansible-role-librespot/tree/master) - Ansible role that will build, install and configure Librespot.
- [Spot](https://github.com/xou816/spot) - Gtk/Rust native Spotify client for the GNOME desktop.
- [Spot](https://github.com/xou816/spot) - Gtk/Rust native Spotify client for the GNOME desktop.
- [Snapcast](https://github.com/badaix/snapcast) - synchronised multi-room audio player that uses librespot as its source for Spotify content
- [MuPiBox](https://mupibox.de/) - Portable music box for Spotify and local media based on Raspberry Pi. Operated via touchscreen. Suitable for children and older people.

View file

@ -1,20 +0,0 @@
# Security Policy
## Supported Versions
We will support the latest release and main development branch with security updates.
## Reporting a Vulnerability
If you believe to have found a vulnerability in `librespot` itself or as a result from
one of its dependencies, please report it by contacting one or more of the active
maintainers directly, allowing no less than three calendar days to receive a response.
If you believe that the vulnerability is public knowledge or already being exploited
in the wild, regardless of having received a response to your direct messages or not,
please create an issue report to warn other users about continued use and instruct
them on any known workarounds.
On your report you may expect feedback on whether we believe that the vulnerability
is indeed applicable and if so, when and how it may be fixed. You may expect to
be asked for assistance with review and testing.

View file

@ -1,33 +1,21 @@
[package]
name = "librespot-audio"
version.workspace = true
rust-version.workspace = true
version = "0.4.2"
authors = ["Paul Lietar <paul@lietar.net>"]
license.workspace = true
description = "The audio fetching logic for librespot"
repository.workspace = true
edition.workspace = true
license = "MIT"
repository = "https://github.com/librespot-org/librespot"
edition = "2018"
[features]
# Refer to the workspace Cargo.toml for the list of features
default = ["native-tls"]
# TLS backend propagation
native-tls = ["librespot-core/native-tls"]
rustls-tls-native-roots = ["librespot-core/rustls-tls-native-roots"]
rustls-tls-webpki-roots = ["librespot-core/rustls-tls-webpki-roots"]
[dependencies.librespot-core]
path = "../core"
version = "0.4.2"
[dependencies]
librespot-core = { version = "0.7.1", path = "../core", default-features = false }
aes = "0.8"
bytes = "1"
ctr = "0.9"
futures-util = { version = "0.3", default-features = false, features = ["std"] }
http-body-util = "0.1"
hyper = { version = "1.6", features = ["http1", "http2"] }
hyper-util = { version = "0.1", features = ["client", "http2"] }
aes-ctr = "0.6"
byteorder = "1.4"
bytes = "1.0"
log = "0.4"
tempfile = "3"
thiserror = "2"
tokio = { version = "1", features = ["macros", "sync"] }
futures-util = { version = "0.3", default_features = false }
tempfile = "3.1"
tokio = { version = "1", features = ["sync", "macros"] }

View file

@ -1,8 +1,8 @@
use std::io;
use aes::cipher::{KeyIvInit, StreamCipher, StreamCipherSeek};
type Aes128Ctr = ctr::Ctr128BE<aes::Aes128>;
use aes_ctr::cipher::generic_array::GenericArray;
use aes_ctr::cipher::{NewStreamCipher, SyncStreamCipher, SyncStreamCipherSeek};
use aes_ctr::Aes128Ctr;
use librespot_core::audio_key::AudioKey;
@ -11,20 +11,16 @@ const AUDIO_AESIV: [u8; 16] = [
];
pub struct AudioDecrypt<T: io::Read> {
// a `None` cipher is a convenience to make `AudioDecrypt` pass files unaltered
cipher: Option<Aes128Ctr>,
cipher: Aes128Ctr,
reader: T,
}
impl<T: io::Read> AudioDecrypt<T> {
pub fn new(key: Option<AudioKey>, reader: T) -> AudioDecrypt<T> {
let cipher = if let Some(key) = key {
Aes128Ctr::new_from_slices(&key.0, &AUDIO_AESIV).ok()
} else {
// some files are unencrypted
None
};
pub fn new(key: AudioKey, reader: T) -> AudioDecrypt<T> {
let cipher = Aes128Ctr::new(
GenericArray::from_slice(&key.0),
GenericArray::from_slice(&AUDIO_AESIV),
);
AudioDecrypt { cipher, reader }
}
}
@ -33,9 +29,7 @@ impl<T: io::Read> io::Read for AudioDecrypt<T> {
fn read(&mut self, output: &mut [u8]) -> io::Result<usize> {
let len = self.reader.read(output)?;
if let Some(ref mut cipher) = self.cipher {
cipher.apply_keystream(&mut output[..len]);
}
self.cipher.apply_keystream(&mut output[..len]);
Ok(len)
}
@ -45,9 +39,7 @@ impl<T: io::Read + io::Seek> io::Seek for AudioDecrypt<T> {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
let newpos = self.reader.seek(pos)?;
if let Some(ref mut cipher) = self.cipher {
cipher.seek(newpos);
}
self.cipher.seek(newpos);
Ok(newpos)
}

View file

@ -1,133 +1,87 @@
mod receive;
use std::{
cmp::min,
fs,
io::{self, Read, Seek, SeekFrom},
sync::{
Arc, OnceLock,
atomic::{AtomicBool, AtomicUsize, Ordering},
},
sync::{Condvar, Mutex},
time::Duration,
};
use futures_util::{StreamExt, TryFutureExt, future::IntoStream};
use hyper::{Response, StatusCode, body::Incoming, header::CONTENT_RANGE};
use hyper_util::client::legacy::ResponseFuture;
use std::cmp::{max, min};
use std::fs;
use std::io::{self, Read, Seek, SeekFrom};
use std::sync::atomic::{self, AtomicUsize};
use std::sync::{Arc, Condvar, Mutex};
use std::time::{Duration, Instant};
use byteorder::{BigEndian, ByteOrder};
use futures_util::{future, StreamExt, TryFutureExt, TryStreamExt};
use librespot_core::channel::{ChannelData, ChannelError, ChannelHeaders};
use librespot_core::session::Session;
use librespot_core::spotify_id::FileId;
use tempfile::NamedTempFile;
use thiserror::Error;
use tokio::sync::{Semaphore, mpsc, oneshot};
use librespot_core::{Error, FileId, Session, cdn_url::CdnUrl};
use self::receive::audio_file_fetch;
use tokio::sync::{mpsc, oneshot};
use self::receive::{audio_file_fetch, request_range};
use crate::range_set::{Range, RangeSet};
pub type AudioFileResult = Result<(), librespot_core::Error>;
/// The minimum size of a block that is requested from the Spotify servers in one request.
/// This is the block size that is typically requested while doing a `seek()` on a file.
/// Note: smaller requests can happen if part of the block is downloaded already.
const MINIMUM_DOWNLOAD_SIZE: usize = 1024 * 16;
const DOWNLOAD_STATUS_POISON_MSG: &str = "audio download status mutex should not be poisoned";
/// The amount of data that is requested when initially opening a file.
/// Note: if the file is opened to play from the beginning, the amount of data to
/// read ahead is requested in addition to this amount. If the file is opened to seek to
/// another position, then only this amount is requested on the first request.
const INITIAL_DOWNLOAD_SIZE: usize = 1024 * 16;
#[derive(Error, Debug)]
pub enum AudioFileError {
#[error("other end of channel disconnected")]
Channel,
#[error("required header not found")]
Header,
#[error("streamer received no data")]
NoData,
#[error("no output available")]
Output,
#[error("invalid status code {0}")]
StatusCode(StatusCode),
#[error("wait timeout exceeded")]
WaitTimeout,
}
/// The ping time that is used for calculations before a ping time was actually measured.
const INITIAL_PING_TIME_ESTIMATE: Duration = Duration::from_millis(500);
impl From<AudioFileError> for Error {
fn from(err: AudioFileError) -> Self {
match err {
AudioFileError::Channel => Error::aborted(err),
AudioFileError::Header => Error::unavailable(err),
AudioFileError::NoData => Error::unavailable(err),
AudioFileError::Output => Error::aborted(err),
AudioFileError::StatusCode(_) => Error::failed_precondition(err),
AudioFileError::WaitTimeout => Error::deadline_exceeded(err),
}
}
}
/// If the measured ping time to the Spotify server is larger than this value, it is capped
/// to avoid run-away block sizes and pre-fetching.
const MAXIMUM_ASSUMED_PING_TIME: Duration = Duration::from_millis(1500);
#[derive(Clone)]
pub struct AudioFetchParams {
/// The minimum size of a block that is requested from the Spotify servers in one request.
/// This is the block size that is typically requested while doing a `seek()` on a file.
/// The Symphonia decoder requires this to be a power of 2 and > 32 kB.
/// Note: smaller requests can happen if part of the block is downloaded already.
pub minimum_download_size: usize,
/// Before playback starts, this many seconds of data must be present.
/// Note: the calculations are done using the nominal bitrate of the file. The actual amount
/// of audio data may be larger or smaller.
pub const READ_AHEAD_BEFORE_PLAYBACK: Duration = Duration::from_secs(1);
/// The minimum network throughput that we expect. Together with the minimum download size,
/// this will determine the time we will wait for a response.
pub minimum_throughput: usize,
/// Same as `READ_AHEAD_BEFORE_PLAYBACK`, but the time is taken as a factor of the ping
/// time to the Spotify server. Both `READ_AHEAD_BEFORE_PLAYBACK` and
/// `READ_AHEAD_BEFORE_PLAYBACK_ROUNDTRIPS` are obeyed.
/// Note: the calculations are done using the nominal bitrate of the file. The actual amount
/// of audio data may be larger or smaller.
pub const READ_AHEAD_BEFORE_PLAYBACK_ROUNDTRIPS: f32 = 2.0;
/// The ping time that is used for calculations before a ping time was actually measured.
pub initial_ping_time_estimate: Duration,
/// While playing back, this many seconds of data ahead of the current read position are
/// requested.
/// Note: the calculations are done using the nominal bitrate of the file. The actual amount
/// of audio data may be larger or smaller.
pub const READ_AHEAD_DURING_PLAYBACK: Duration = Duration::from_secs(5);
/// If the measured ping time to the Spotify server is larger than this value, it is capped
/// to avoid run-away block sizes and pre-fetching.
pub maximum_assumed_ping_time: Duration,
/// Same as `READ_AHEAD_DURING_PLAYBACK`, but the time is taken as a factor of the ping
/// time to the Spotify server.
/// Note: the calculations are done using the nominal bitrate of the file. The actual amount
/// of audio data may be larger or smaller.
pub const READ_AHEAD_DURING_PLAYBACK_ROUNDTRIPS: f32 = 10.0;
/// Before playback starts, this many seconds of data must be present.
/// Note: the calculations are done using the nominal bitrate of the file. The actual amount
/// of audio data may be larger or smaller.
pub read_ahead_before_playback: Duration,
/// If the amount of data that is pending (requested but not received) is less than a certain amount,
/// data is pre-fetched in addition to the read ahead settings above. The threshold for requesting more
/// data is calculated as `<pending bytes> < PREFETCH_THRESHOLD_FACTOR * <ping time> * <nominal data rate>`
const PREFETCH_THRESHOLD_FACTOR: f32 = 4.0;
/// While playing back, this many seconds of data ahead of the current read position are
/// requested.
/// Note: the calculations are done using the nominal bitrate of the file. The actual amount
/// of audio data may be larger or smaller.
pub read_ahead_during_playback: Duration,
/// Similar to `PREFETCH_THRESHOLD_FACTOR`, but it also takes the current download rate into account.
/// The formula used is `<pending bytes> < FAST_PREFETCH_THRESHOLD_FACTOR * <ping time> * <measured download rate>`
/// This mechanism allows for fast downloading of the remainder of the file. The number should be larger
/// than `1.0` so the download rate ramps up until the bandwidth is saturated. The larger the value, the faster
/// the download rate ramps up. However, this comes at the cost that it might hurt ping time if a seek is
/// performed while downloading. Values smaller than `1.0` cause the download rate to collapse and effectively
/// only `PREFETCH_THRESHOLD_FACTOR` is in effect. Thus, set to `0.0` if bandwidth saturation is not wanted.
const FAST_PREFETCH_THRESHOLD_FACTOR: f32 = 1.5;
/// If the amount of data that is pending (requested but not received) is less than a certain amount,
/// data is pre-fetched in addition to the read ahead settings above. The threshold for requesting more
/// data is calculated as `<pending bytes> < PREFETCH_THRESHOLD_FACTOR * <ping time> * <nominal data rate>`
pub prefetch_threshold_factor: f32,
/// Limit the number of requests that are pending simultaneously before pre-fetching data. Pending
/// requests share bandwidth. Thus, havint too many requests can lead to the one that is needed next
/// for playback to be delayed leading to a buffer underrun. This limit has the effect that a new
/// pre-fetch request is only sent if less than `MAX_PREFETCH_REQUESTS` are pending.
const MAX_PREFETCH_REQUESTS: usize = 4;
/// The time we will wait to obtain status updates on downloading.
pub download_timeout: Duration,
}
impl Default for AudioFetchParams {
fn default() -> Self {
let minimum_download_size = 64 * 1024;
let minimum_throughput = 8 * 1024;
Self {
minimum_download_size,
minimum_throughput,
initial_ping_time_estimate: Duration::from_millis(500),
maximum_assumed_ping_time: Duration::from_millis(1500),
read_ahead_before_playback: Duration::from_secs(1),
read_ahead_during_playback: Duration::from_secs(5),
prefetch_threshold_factor: 4.0,
download_timeout: Duration::from_secs(
(minimum_download_size / minimum_throughput) as u64,
),
}
}
}
static AUDIO_FETCH_PARAMS: OnceLock<AudioFetchParams> = OnceLock::new();
impl AudioFetchParams {
pub fn set(params: AudioFetchParams) -> Result<(), AudioFetchParams> {
AUDIO_FETCH_PARAMS.set(params)
}
pub fn get() -> &'static AudioFetchParams {
AUDIO_FETCH_PARAMS.get_or_init(AudioFetchParams::default)
}
}
/// The time we will wait to obtain status updates on downloading.
const DOWNLOAD_TIMEOUT: Duration = Duration::from_secs(1);
pub enum AudioFile {
Cached(fs::File),
@ -135,17 +89,11 @@ pub enum AudioFile {
}
#[derive(Debug)]
pub struct StreamingRequest {
streamer: IntoStream<ResponseFuture>,
initial_response: Option<Response<Incoming>>,
offset: usize,
length: usize,
}
#[derive(Debug)]
pub enum StreamLoaderCommand {
Fetch(Range), // signal the stream loader to fetch a range of the file
Close, // terminate and don't load any more data
enum StreamLoaderCommand {
Fetch(Range), // signal the stream loader to fetch a range of the file
RandomAccessMode(), // optimise download strategy for random access
StreamMode(), // optimise download strategy for streaming
Close(), // terminate and don't load any more data
}
#[derive(Clone)]
@ -166,11 +114,7 @@ impl StreamLoaderController {
pub fn range_available(&self, range: Range) -> bool {
if let Some(ref shared) = self.stream_shared {
let download_status = shared
.download_status
.lock()
.expect(DOWNLOAD_STATUS_POISON_MSG);
let download_status = shared.download_status.lock().unwrap();
range.length
<= download_status
.downloaded
@ -181,23 +125,21 @@ impl StreamLoaderController {
}
pub fn range_to_end_available(&self) -> bool {
match self.stream_shared {
Some(ref shared) => {
let read_position = shared.read_position();
self.range_available(Range::new(read_position, self.len() - read_position))
}
None => true,
}
self.stream_shared.as_ref().map_or(true, |shared| {
let read_position = shared.read_position.load(atomic::Ordering::Relaxed);
self.range_available(Range::new(read_position, self.len() - read_position))
})
}
pub fn ping_time(&self) -> Option<Duration> {
self.stream_shared.as_ref().map(|shared| shared.ping_time())
pub fn ping_time(&self) -> Duration {
Duration::from_millis(self.stream_shared.as_ref().map_or(0, |shared| {
shared.ping_time_ms.load(atomic::Ordering::Relaxed) as u64
}))
}
fn send_stream_loader_command(&self, command: StreamLoaderCommand) {
if let Some(ref channel) = self.channel_tx {
// Ignore the error in case the channel has been closed already.
// This means that the file was completely downloaded.
// ignore the error in case the channel has been closed already.
let _ = channel.send(command);
}
}
@ -207,7 +149,7 @@ impl StreamLoaderController {
self.send_stream_loader_command(StreamLoaderCommand::Fetch(range));
}
pub fn fetch_blocking(&self, mut range: Range) -> AudioFileResult {
pub fn fetch_blocking(&self, mut range: Range) {
// signal the stream loader to tech a range of the file and block until it is loaded.
// ensure the range is within the file's bounds.
@ -220,27 +162,17 @@ impl StreamLoaderController {
self.fetch(range);
if let Some(ref shared) = self.stream_shared {
let mut download_status = shared
.download_status
.lock()
.expect(DOWNLOAD_STATUS_POISON_MSG);
let download_timeout = AudioFetchParams::get().download_timeout;
let mut download_status = shared.download_status.lock().unwrap();
while range.length
> download_status
.downloaded
.contained_length_from_value(range.start)
{
let (new_download_status, wait_result) = shared
download_status = shared
.cond
.wait_timeout(download_status, download_timeout)
.expect(DOWNLOAD_STATUS_POISON_MSG);
download_status = new_download_status;
if wait_result.timed_out() {
return Err(AudioFileError::WaitTimeout.into());
}
.wait_timeout(download_status, DOWNLOAD_TIMEOUT)
.unwrap()
.0;
if range.length
> (download_status
.downloaded
@ -253,52 +185,41 @@ impl StreamLoaderController {
}
}
}
Ok(())
}
pub fn fetch_next_and_wait(
&self,
request_length: usize,
wait_length: usize,
) -> AudioFileResult {
match self.stream_shared {
Some(ref shared) => {
let start = shared.read_position();
pub fn fetch_next(&self, length: usize) {
if let Some(ref shared) = self.stream_shared {
let range = Range {
start: shared.read_position.load(atomic::Ordering::Relaxed),
length,
};
self.fetch(range)
}
}
let request_range = Range {
start,
length: request_length,
};
self.fetch(request_range);
let wait_range = Range {
start,
length: wait_length,
};
self.fetch_blocking(wait_range)
}
None => Ok(()),
pub fn fetch_next_blocking(&self, length: usize) {
if let Some(ref shared) = self.stream_shared {
let range = Range {
start: shared.read_position.load(atomic::Ordering::Relaxed),
length,
};
self.fetch_blocking(range);
}
}
pub fn set_random_access_mode(&self) {
// optimise download strategy for random access
if let Some(ref shared) = self.stream_shared {
shared.set_download_streaming(false)
}
self.send_stream_loader_command(StreamLoaderCommand::RandomAccessMode());
}
pub fn set_stream_mode(&self) {
// optimise download strategy for streaming
if let Some(ref shared) = self.stream_shared {
shared.set_download_streaming(true)
}
self.send_stream_loader_command(StreamLoaderCommand::StreamMode());
}
pub fn close(&self) {
// terminate stream loading and don't load any more data for this file.
self.send_stream_loader_command(StreamLoaderCommand::Close);
self.send_stream_loader_command(StreamLoaderCommand::Close());
}
}
@ -314,58 +235,22 @@ struct AudioFileDownloadStatus {
downloaded: RangeSet,
}
struct AudioFileShared {
cdn_url: String,
file_size: usize,
bytes_per_second: usize,
cond: Condvar,
download_status: Mutex<AudioFileDownloadStatus>,
download_streaming: AtomicBool,
download_slots: Semaphore,
ping_time_ms: AtomicUsize,
read_position: AtomicUsize,
throughput: AtomicUsize,
#[derive(Copy, Clone, PartialEq, Eq)]
enum DownloadStrategy {
RandomAccess(),
Streaming(),
}
impl AudioFileShared {
fn is_download_streaming(&self) -> bool {
self.download_streaming.load(Ordering::Acquire)
}
fn set_download_streaming(&self, streaming: bool) {
self.download_streaming.store(streaming, Ordering::Release)
}
fn ping_time(&self) -> Duration {
let ping_time_ms = self.ping_time_ms.load(Ordering::Acquire);
if ping_time_ms > 0 {
Duration::from_millis(ping_time_ms as u64)
} else {
AudioFetchParams::get().initial_ping_time_estimate
}
}
fn set_ping_time(&self, duration: Duration) {
self.ping_time_ms
.store(duration.as_millis() as usize, Ordering::Release)
}
fn throughput(&self) -> usize {
self.throughput.load(Ordering::Acquire)
}
fn set_throughput(&self, throughput: usize) {
self.throughput.store(throughput, Ordering::Release)
}
fn read_position(&self) -> usize {
self.read_position.load(Ordering::Acquire)
}
fn set_read_position(&self, position: u64) {
self.read_position
.store(position as usize, Ordering::Release)
}
struct AudioFileShared {
file_id: FileId,
file_size: usize,
stream_data_rate: usize,
cond: Condvar,
download_status: Mutex<AudioFileDownloadStatus>,
download_strategy: Mutex<DownloadStrategy>,
number_of_open_requests: AtomicUsize,
ping_time_ms: AtomicUsize,
read_position: AtomicUsize,
}
impl AudioFile {
@ -373,52 +258,69 @@ impl AudioFile {
session: &Session,
file_id: FileId,
bytes_per_second: usize,
) -> Result<AudioFile, Error> {
play_from_beginning: bool,
) -> Result<AudioFile, ChannelError> {
if let Some(file) = session.cache().and_then(|cache| cache.file(file_id)) {
debug!("File {file_id} already in cache");
debug!("File {} already in cache", file_id);
return Ok(AudioFile::Cached(file));
}
debug!("Downloading file {file_id}");
debug!("Downloading file {}", file_id);
let (complete_tx, complete_rx) = oneshot::channel();
let mut initial_data_length = if play_from_beginning {
INITIAL_DOWNLOAD_SIZE
+ max(
(READ_AHEAD_DURING_PLAYBACK.as_secs_f32() * bytes_per_second as f32) as usize,
(INITIAL_PING_TIME_ESTIMATE.as_secs_f32()
* READ_AHEAD_DURING_PLAYBACK_ROUNDTRIPS
* bytes_per_second as f32) as usize,
)
} else {
INITIAL_DOWNLOAD_SIZE
};
if initial_data_length % 4 != 0 {
initial_data_length += 4 - (initial_data_length % 4);
}
let (headers, data) = request_range(session, file_id, 0, initial_data_length).split();
let streaming =
AudioFileStreaming::open(session.clone(), file_id, complete_tx, bytes_per_second);
let streaming = AudioFileStreaming::open(
session.clone(),
data,
initial_data_length,
Instant::now(),
headers,
file_id,
complete_tx,
bytes_per_second,
);
let session_ = session.clone();
session.spawn(complete_rx.map_ok(move |mut file| {
debug!("Downloading file {file_id} complete");
if let Some(cache) = session_.cache() {
if let Some(cache_id) = cache.file_path(file_id) {
if let Err(e) = cache.save_file(file_id, &mut file) {
error!("Error caching file {file_id} to {cache_id:?}: {e}");
} else {
debug!("File {file_id} cached to {cache_id:?}");
}
}
debug!("File {} complete, saving to cache", file_id);
cache.save_file(file_id, &mut file);
} else {
debug!("File {} complete", file_id);
}
}));
Ok(AudioFile::Streaming(streaming.await?))
}
pub fn get_stream_loader_controller(&self) -> Result<StreamLoaderController, Error> {
let controller = match self {
AudioFile::Streaming(stream) => StreamLoaderController {
pub fn get_stream_loader_controller(&self) -> StreamLoaderController {
match self {
AudioFile::Streaming(ref stream) => StreamLoaderController {
channel_tx: Some(stream.stream_loader_command_tx.clone()),
stream_shared: Some(stream.shared.clone()),
file_size: stream.shared.file_size,
},
AudioFile::Cached(file) => StreamLoaderController {
AudioFile::Cached(ref file) => StreamLoaderController {
channel_tx: None,
stream_shared: None,
file_size: file.metadata()?.len() as usize,
file_size: file.metadata().unwrap().len() as usize,
},
};
Ok(controller)
}
}
pub fn is_cached(&self) -> bool {
@ -429,104 +331,53 @@ impl AudioFile {
impl AudioFileStreaming {
pub async fn open(
session: Session,
initial_data_rx: ChannelData,
initial_data_length: usize,
initial_request_sent_time: Instant,
headers: ChannelHeaders,
file_id: FileId,
complete_tx: oneshot::Sender<NamedTempFile>,
bytes_per_second: usize,
) -> Result<AudioFileStreaming, Error> {
let cdn_url = CdnUrl::new(file_id).resolve_audio(&session).await?;
streaming_data_rate: usize,
) -> Result<AudioFileStreaming, ChannelError> {
let (_, data) = headers
.try_filter(|(id, _)| future::ready(*id == 0x3))
.next()
.await
.unwrap()?;
let minimum_download_size = AudioFetchParams::get().minimum_download_size;
let mut response_streamer_url = None;
let urls = cdn_url.try_get_urls()?;
for url in &urls {
// When the audio file is really small, this `download_size` may turn out to be
// larger than the audio file we're going to stream later on. This is OK; requesting
// `Content-Range` > `Content-Length` will return the complete file with status code
// 206 Partial Content.
let mut streamer =
session
.spclient()
.stream_from_cdn(*url, 0, minimum_download_size)?;
// Get the first chunk with the headers to get the file size.
// The remainder of that chunk with possibly also a response body is then
// further processed in `audio_file_fetch`.
let streamer_result = tokio::time::timeout(Duration::from_secs(10), streamer.next())
.await
.map_err(|_| AudioFileError::WaitTimeout.into())
.and_then(|x| x.ok_or_else(|| AudioFileError::NoData.into()))
.and_then(|x| x.map_err(Error::from));
match streamer_result {
Ok(r) => {
response_streamer_url = Some((r, streamer, url));
break;
}
Err(e) => warn!("Fetching {url} failed with error {e:?}, trying next"),
}
}
let Some((response, streamer, url)) = response_streamer_url else {
return Err(Error::unavailable(format!(
"{} URLs failed, none left to try",
urls.len()
)));
};
trace!("Streaming from {url}");
let code = response.status();
if code != StatusCode::PARTIAL_CONTENT {
debug!("Opening audio file expected partial content but got: {code}");
return Err(AudioFileError::StatusCode(code).into());
}
let header_value = response
.headers()
.get(CONTENT_RANGE)
.ok_or(AudioFileError::Header)?;
let str_value = header_value.to_str()?;
let hyphen_index = str_value.find('-').unwrap_or_default();
let slash_index = str_value.find('/').unwrap_or_default();
let upper_bound: usize = str_value[hyphen_index + 1..slash_index].parse()?;
let file_size = str_value[slash_index + 1..].parse()?;
let initial_request = StreamingRequest {
streamer,
initial_response: Some(response),
offset: 0,
length: upper_bound + 1,
};
let size = BigEndian::read_u32(&data) as usize * 4;
let shared = Arc::new(AudioFileShared {
cdn_url: url.to_string(),
file_size,
bytes_per_second,
file_id,
file_size: size,
stream_data_rate: streaming_data_rate,
cond: Condvar::new(),
download_status: Mutex::new(AudioFileDownloadStatus {
requested: RangeSet::new(),
downloaded: RangeSet::new(),
}),
download_streaming: AtomicBool::new(false),
download_slots: Semaphore::new(1),
download_strategy: Mutex::new(DownloadStrategy::RandomAccess()), // start with random access mode until someone tells us otherwise
number_of_open_requests: AtomicUsize::new(0),
ping_time_ms: AtomicUsize::new(0),
read_position: AtomicUsize::new(0),
throughput: AtomicUsize::new(0),
});
let write_file = NamedTempFile::new_in(session.config().tmp_dir.clone())?;
write_file.as_file().set_len(file_size as u64)?;
let mut write_file = NamedTempFile::new().unwrap();
write_file.as_file().set_len(size as u64).unwrap();
write_file.seek(SeekFrom::Start(0)).unwrap();
let read_file = write_file.reopen()?;
let read_file = write_file.reopen().unwrap();
// let (seek_tx, seek_rx) = mpsc::unbounded();
let (stream_loader_command_tx, stream_loader_command_rx) =
mpsc::unbounded_channel::<StreamLoaderCommand>();
session.spawn(audio_file_fetch(
session.clone(),
shared.clone(),
initial_request,
initial_data_rx,
initial_request_sent_time,
initial_data_length,
write_file,
stream_loader_command_rx,
complete_tx,
@ -550,68 +401,83 @@ impl Read for AudioFileStreaming {
}
let length = min(output.len(), self.shared.file_size - offset);
if length == 0 {
return Ok(0);
}
let read_ahead_during_playback = AudioFetchParams::get().read_ahead_during_playback;
let length_to_request = if self.shared.is_download_streaming() {
let length_to_request = length
+ (read_ahead_during_playback.as_secs_f32() * self.shared.bytes_per_second as f32)
as usize;
let length_to_request = match *(self.shared.download_strategy.lock().unwrap()) {
DownloadStrategy::RandomAccess() => length,
DownloadStrategy::Streaming() => {
// Due to the read-ahead stuff, we potentially request more than the actual request demanded.
let ping_time_seconds = Duration::from_millis(
self.shared.ping_time_ms.load(atomic::Ordering::Relaxed) as u64,
)
.as_secs_f32();
// Due to the read-ahead stuff, we potentially request more than the actual request demanded.
min(length_to_request, self.shared.file_size - offset)
} else {
length
let length_to_request = length
+ max(
(READ_AHEAD_DURING_PLAYBACK.as_secs_f32()
* self.shared.stream_data_rate as f32) as usize,
(READ_AHEAD_DURING_PLAYBACK_ROUNDTRIPS
* ping_time_seconds
* self.shared.stream_data_rate as f32) as usize,
);
min(length_to_request, self.shared.file_size - offset)
}
};
let mut ranges_to_request = RangeSet::new();
ranges_to_request.add_range(&Range::new(offset, length_to_request));
let mut download_status = self
.shared
.download_status
.lock()
.expect(DOWNLOAD_STATUS_POISON_MSG);
let mut download_status = self.shared.download_status.lock().unwrap();
ranges_to_request.subtract_range_set(&download_status.downloaded);
ranges_to_request.subtract_range_set(&download_status.requested);
for &range in ranges_to_request.iter() {
self.stream_loader_command_tx
.send(StreamLoaderCommand::Fetch(range))
.map_err(|err| io::Error::new(io::ErrorKind::BrokenPipe, err))?;
.unwrap();
}
let download_timeout = AudioFetchParams::get().download_timeout;
if length == 0 {
return Ok(0);
}
let mut download_message_printed = false;
while !download_status.downloaded.contains(offset) {
let (new_download_status, wait_result) = self
if let DownloadStrategy::Streaming() = *self.shared.download_strategy.lock().unwrap() {
if !download_message_printed {
debug!("Stream waiting for download of file position {}. Downloaded ranges: {}. Pending ranges: {}", offset, download_status.downloaded, download_status.requested.minus(&download_status.downloaded));
download_message_printed = true;
}
}
download_status = self
.shared
.cond
.wait_timeout(download_status, download_timeout)
.expect(DOWNLOAD_STATUS_POISON_MSG);
download_status = new_download_status;
if wait_result.timed_out() {
return Err(io::Error::new(
io::ErrorKind::TimedOut,
Error::deadline_exceeded(AudioFileError::WaitTimeout),
));
}
.wait_timeout(download_status, DOWNLOAD_TIMEOUT)
.unwrap()
.0;
}
let available_length = download_status
.downloaded
.contained_length_from_value(offset);
assert!(available_length > 0);
drop(download_status);
self.position = self.read_file.seek(SeekFrom::Start(offset as u64))?;
self.position = self.read_file.seek(SeekFrom::Start(offset as u64)).unwrap();
let read_len = min(length, available_length);
let read_len = self.read_file.read(&mut output[..read_len])?;
if download_message_printed {
debug!(
"Read at postion {} completed. {} bytes returned, {} bytes were requested.",
offset,
read_len,
output.len()
);
}
self.position += read_len as u64;
self.shared.set_read_position(self.position);
self.shared
.read_position
.store(self.position as usize, atomic::Ordering::Relaxed);
Ok(read_len)
}
@ -619,45 +485,11 @@ impl Read for AudioFileStreaming {
impl Seek for AudioFileStreaming {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
// If we are already at this position, we don't need to switch download mode.
// These checks and locks are less expensive than interrupting streaming.
let current_position = self.position as i64;
let requested_pos = match pos {
SeekFrom::Start(pos) => pos as i64,
SeekFrom::End(pos) => self.shared.file_size as i64 - pos - 1,
SeekFrom::Current(pos) => current_position + pos,
};
if requested_pos == current_position {
return Ok(current_position as u64);
}
// Again if we have already downloaded this part.
let available = self
.shared
.download_status
.lock()
.expect(DOWNLOAD_STATUS_POISON_MSG)
.downloaded
.contains(requested_pos as usize);
let mut was_streaming = false;
if !available {
// Ensure random access mode if we need to download this part.
// Checking whether we are streaming now is a micro-optimization
// to save an atomic load.
was_streaming = self.shared.is_download_streaming();
if was_streaming {
self.shared.set_download_streaming(false);
}
}
self.position = self.read_file.seek(pos)?;
self.shared.set_read_position(self.position);
if !available && was_streaming {
self.shared.set_download_streaming(true);
}
// Do not seek past EOF
self.shared
.read_position
.store(self.position as usize, atomic::Ordering::Relaxed);
Ok(self.position)
}
}

View file

@ -1,150 +1,143 @@
use std::{
cmp::{max, min},
io::{Seek, SeekFrom, Write},
sync::Arc,
time::{Duration, Instant},
};
use std::cmp::{max, min};
use std::io::{Seek, SeekFrom, Write};
use std::sync::{atomic, Arc};
use std::time::{Duration, Instant};
use atomic::Ordering;
use byteorder::{BigEndian, WriteBytesExt};
use bytes::Bytes;
use futures_util::StreamExt;
use http_body_util::BodyExt;
use hyper::StatusCode;
use librespot_core::channel::{Channel, ChannelData};
use librespot_core::session::Session;
use librespot_core::spotify_id::FileId;
use tempfile::NamedTempFile;
use tokio::sync::{mpsc, oneshot};
use librespot_core::{Error, http_client::HttpClient, session::Session};
use crate::range_set::{Range, RangeSet};
use super::{AudioFileShared, DownloadStrategy, StreamLoaderCommand};
use super::{
AudioFetchParams, AudioFileError, AudioFileResult, AudioFileShared, StreamLoaderCommand,
StreamingRequest,
FAST_PREFETCH_THRESHOLD_FACTOR, MAXIMUM_ASSUMED_PING_TIME, MAX_PREFETCH_REQUESTS,
MINIMUM_DOWNLOAD_SIZE, PREFETCH_THRESHOLD_FACTOR,
};
pub fn request_range(session: &Session, file: FileId, offset: usize, length: usize) -> Channel {
assert!(
offset % 4 == 0,
"Range request start positions must be aligned by 4 bytes."
);
assert!(
length % 4 == 0,
"Range request range lengths must be aligned by 4 bytes."
);
let start = offset / 4;
let end = (offset + length) / 4;
let (id, channel) = session.channel().allocate();
let mut data: Vec<u8> = Vec::new();
data.write_u16::<BigEndian>(id).unwrap();
data.write_u8(0).unwrap();
data.write_u8(1).unwrap();
data.write_u16::<BigEndian>(0x0000).unwrap();
data.write_u32::<BigEndian>(0x00000000).unwrap();
data.write_u32::<BigEndian>(0x00009C40).unwrap();
data.write_u32::<BigEndian>(0x00020000).unwrap();
data.write(&file.0).unwrap();
data.write_u32::<BigEndian>(start as u32).unwrap();
data.write_u32::<BigEndian>(end as u32).unwrap();
session.send_packet(0x8, data);
channel
}
struct PartialFileData {
offset: usize,
data: Bytes,
}
enum ReceivedData {
Throughput(usize),
ResponseTime(Duration),
Data(PartialFileData),
}
const ONE_SECOND: Duration = Duration::from_secs(1);
const DOWNLOAD_STATUS_POISON_MSG: &str = "audio download status mutex should not be poisoned";
async fn receive_data(
shared: Arc<AudioFileShared>,
file_data_tx: mpsc::UnboundedSender<ReceivedData>,
mut request: StreamingRequest,
) -> AudioFileResult {
let mut offset = request.offset;
let mut actual_length = 0;
mut data_rx: ChannelData,
initial_data_offset: usize,
initial_request_length: usize,
request_sent_time: Instant,
) {
let mut data_offset = initial_data_offset;
let mut request_length = initial_request_length;
let permit = shared.download_slots.acquire().await?;
let old_number_of_request = shared
.number_of_open_requests
.fetch_add(1, Ordering::SeqCst);
let request_time = Instant::now();
let mut measure_ping_time = true;
let mut measure_throughput = true;
let mut measure_ping_time = old_number_of_request == 0;
let result: Result<_, Error> = loop {
let response = match request.initial_response.take() {
Some(data) => {
// the request was already made outside of this function
measure_ping_time = false;
measure_throughput = false;
data
}
None => match request.streamer.next().await {
Some(Ok(response)) => response,
Some(Err(e)) => break Err(e.into()),
None => {
if actual_length != request.length {
let msg = format!("did not expect body to contain {actual_length} bytes");
break Err(Error::data_loss(msg));
}
break Ok(());
}
},
let result = loop {
let data = match data_rx.next().await {
Some(Ok(data)) => data,
Some(Err(e)) => break Err(e),
None => break Ok(()),
};
if measure_ping_time {
let duration = Instant::now().duration_since(request_time);
// may be zero if we are handling an initial response
if duration.as_millis() > 0 {
file_data_tx.send(ReceivedData::ResponseTime(duration))?;
measure_ping_time = false;
let mut duration = Instant::now() - request_sent_time;
if duration > MAXIMUM_ASSUMED_PING_TIME {
duration = MAXIMUM_ASSUMED_PING_TIME;
}
let _ = file_data_tx.send(ReceivedData::ResponseTime(duration));
measure_ping_time = false;
}
let code = response.status();
if code != StatusCode::PARTIAL_CONTENT {
if code == StatusCode::TOO_MANY_REQUESTS {
if let Some(duration) = HttpClient::get_retry_after(response.headers()) {
warn!(
"Rate limiting, retrying in {} seconds...",
duration.as_secs()
);
// sleeping here means we hold onto this streamer "slot"
// (we don't decrease the number of open requests)
tokio::time::sleep(duration).await;
}
}
break Err(AudioFileError::StatusCode(code).into());
}
let body = response.into_body();
let data = match body.collect().await.map(|b| b.to_bytes()) {
Ok(bytes) => bytes,
Err(e) => break Err(e.into()),
};
let data_size = data.len();
file_data_tx.send(ReceivedData::Data(PartialFileData { offset, data }))?;
let _ = file_data_tx.send(ReceivedData::Data(PartialFileData {
offset: data_offset,
data,
}));
data_offset += data_size;
if request_length < data_size {
warn!(
"Data receiver for range {} (+{}) received more data from server than requested.",
initial_data_offset, initial_request_length
);
request_length = 0;
} else {
request_length -= data_size;
}
actual_length += data_size;
offset += data_size;
if request_length == 0 {
break Ok(());
}
};
drop(request.streamer);
if request_length > 0 {
let missing_range = Range::new(data_offset, request_length);
if measure_throughput {
let duration = Instant::now().duration_since(request_time).as_millis();
if actual_length > 0 && duration > 0 {
let throughput = ONE_SECOND.as_millis() as usize * actual_length / duration as usize;
file_data_tx.send(ReceivedData::Throughput(throughput))?;
}
let mut download_status = shared.download_status.lock().unwrap();
download_status.requested.subtract_range(&missing_range);
shared.cond.notify_all();
}
let bytes_remaining = request.length - actual_length;
if bytes_remaining > 0 {
{
let missing_range = Range::new(offset, bytes_remaining);
let mut download_status = shared
.download_status
.lock()
.expect(DOWNLOAD_STATUS_POISON_MSG);
download_status.requested.subtract_range(&missing_range);
shared.cond.notify_all();
}
}
shared
.number_of_open_requests
.fetch_sub(1, Ordering::SeqCst);
drop(permit);
if let Err(e) = result {
error!(
"Streamer error requesting range {} +{}: {:?}",
request.offset, request.length, e
if result.is_err() {
warn!(
"Error from channel for data receiver for range {} (+{}).",
initial_data_offset, initial_request_length
);
} else if request_length > 0 {
warn!(
"Data receiver for range {} (+{}) received less data from server than requested.",
initial_data_offset, initial_request_length
);
return Err(e);
}
Ok(())
}
struct AudioFileFetch {
@ -155,8 +148,6 @@ struct AudioFileFetch {
file_data_tx: mpsc::UnboundedSender<ReceivedData>,
complete_tx: Option<oneshot::Sender<NamedTempFile>>,
network_response_times: Vec<Duration>,
params: AudioFetchParams,
}
// Might be replaced by enum from std once stable
@ -167,150 +158,116 @@ enum ControlFlow {
}
impl AudioFileFetch {
fn has_download_slots_available(&self) -> bool {
self.shared.download_slots.available_permits() > 0
fn get_download_strategy(&mut self) -> DownloadStrategy {
*(self.shared.download_strategy.lock().unwrap())
}
fn download_range(&mut self, offset: usize, mut length: usize) -> AudioFileResult {
if length < self.params.minimum_download_size {
length = self.params.minimum_download_size;
fn download_range(&mut self, mut offset: usize, mut length: usize) {
if length < MINIMUM_DOWNLOAD_SIZE {
length = MINIMUM_DOWNLOAD_SIZE;
}
// If we are in streaming mode (so not seeking) then start downloading as large
// of chunks as possible for better throughput and improved CPU usage, while
// still being reasonably responsive (~1 second) in case we want to seek.
if self.shared.is_download_streaming() {
let throughput = self.shared.throughput();
length = max(length, throughput);
// ensure the values are within the bounds and align them by 4 for the spotify protocol.
if offset >= self.shared.file_size {
return;
}
if length == 0 {
return;
}
if offset + length > self.shared.file_size {
length = self.shared.file_size - offset;
}
if offset % 4 != 0 {
length += offset % 4;
offset -= offset % 4;
}
if length % 4 != 0 {
length += 4 - (length % 4);
}
let mut ranges_to_request = RangeSet::new();
ranges_to_request.add_range(&Range::new(offset, length));
// The iteration that follows spawns streamers fast, without awaiting them,
// so holding the lock for the entire scope of this function should be faster
// then locking and unlocking multiple times.
let mut download_status = self
.shared
.download_status
.lock()
.expect(DOWNLOAD_STATUS_POISON_MSG);
let mut download_status = self.shared.download_status.lock().unwrap();
ranges_to_request.subtract_range_set(&download_status.downloaded);
ranges_to_request.subtract_range_set(&download_status.requested);
// TODO : refresh cdn_url when the token expired
for range in ranges_to_request.iter() {
let streamer = self.session.spclient().stream_from_cdn(
&self.shared.cdn_url,
let (_headers, data) = request_range(
&self.session,
self.shared.file_id,
range.start,
range.length,
)?;
)
.split();
download_status.requested.add_range(range);
let streaming_request = StreamingRequest {
streamer,
initial_response: None,
offset: range.start,
length: range.length,
};
self.session.spawn(receive_data(
self.shared.clone(),
self.file_data_tx.clone(),
streaming_request,
data,
range.start,
range.length,
Instant::now(),
));
}
Ok(())
}
fn pre_fetch_more_data(&mut self, bytes: usize) -> AudioFileResult {
// determine what is still missing
let mut missing_data = RangeSet::new();
missing_data.add_range(&Range::new(0, self.shared.file_size));
{
let download_status = self
.shared
.download_status
.lock()
.expect(DOWNLOAD_STATUS_POISON_MSG);
missing_data.subtract_range_set(&download_status.downloaded);
missing_data.subtract_range_set(&download_status.requested);
}
fn pre_fetch_more_data(&mut self, bytes: usize, max_requests_to_send: usize) {
let mut bytes_to_go = bytes;
let mut requests_to_go = max_requests_to_send;
// download data from after the current read position first
let mut tail_end = RangeSet::new();
let read_position = self.shared.read_position();
tail_end.add_range(&Range::new(
read_position,
self.shared.file_size - read_position,
));
let tail_end = tail_end.intersection(&missing_data);
if !tail_end.is_empty() {
let range = tail_end.get_range(0);
let offset = range.start;
let length = min(range.length, bytes);
self.download_range(offset, length)?;
} else if !missing_data.is_empty() {
// ok, the tail is downloaded, download something fom the beginning.
let range = missing_data.get_range(0);
let offset = range.start;
let length = min(range.length, bytes);
self.download_range(offset, length)?;
}
Ok(())
}
fn handle_file_data(&mut self, data: ReceivedData) -> Result<ControlFlow, Error> {
match data {
ReceivedData::Throughput(mut throughput) => {
if throughput < self.params.minimum_throughput {
warn!(
"Throughput {} kbps lower than minimum {}, setting to minimum",
throughput / 1000,
self.params.minimum_throughput / 1000,
);
throughput = self.params.minimum_throughput;
}
let old_throughput = self.shared.throughput();
let avg_throughput = if old_throughput > 0 {
(old_throughput + throughput) / 2
} else {
throughput
};
// print when the new estimate deviates by more than 10% from the last
if f32::abs((avg_throughput as f32 - old_throughput as f32) / old_throughput as f32)
> 0.1
{
trace!(
"Throughput now estimated as: {} kbps",
avg_throughput / 1000
);
}
self.shared.set_throughput(avg_throughput);
while bytes_to_go > 0 && requests_to_go > 0 {
// determine what is still missing
let mut missing_data = RangeSet::new();
missing_data.add_range(&Range::new(0, self.shared.file_size));
{
let download_status = self.shared.download_status.lock().unwrap();
missing_data.subtract_range_set(&download_status.downloaded);
missing_data.subtract_range_set(&download_status.requested);
}
ReceivedData::ResponseTime(mut response_time) => {
if response_time > self.params.maximum_assumed_ping_time {
warn!(
"Time to first byte {} ms exceeds maximum {}, setting to maximum",
response_time.as_millis(),
self.params.maximum_assumed_ping_time.as_millis()
);
response_time = self.params.maximum_assumed_ping_time;
}
let old_ping_time_ms = self.shared.ping_time().as_millis();
// download data from after the current read position first
let mut tail_end = RangeSet::new();
let read_position = self.shared.read_position.load(Ordering::Relaxed);
tail_end.add_range(&Range::new(
read_position,
self.shared.file_size - read_position,
));
let tail_end = tail_end.intersection(&missing_data);
if !tail_end.is_empty() {
let range = tail_end.get_range(0);
let offset = range.start;
let length = min(range.length, bytes_to_go);
self.download_range(offset, length);
requests_to_go -= 1;
bytes_to_go -= length;
} else if !missing_data.is_empty() {
// ok, the tail is downloaded, download something fom the beginning.
let range = missing_data.get_range(0);
let offset = range.start;
let length = min(range.length, bytes_to_go);
self.download_range(offset, length);
requests_to_go -= 1;
bytes_to_go -= length;
} else {
return;
}
}
}
fn handle_file_data(&mut self, data: ReceivedData) -> ControlFlow {
match data {
ReceivedData::ResponseTime(response_time) => {
// chatty
// trace!("Ping time estimated as: {}ms", response_time.as_millis());
// prune old response times. Keep at most two so we can push a third.
while self.network_response_times.len() >= 3 {
@ -321,197 +278,165 @@ impl AudioFileFetch {
self.network_response_times.push(response_time);
// stats::median is experimental. So we calculate the median of up to three ourselves.
let ping_time = {
match self.network_response_times.len() {
1 => self.network_response_times[0],
2 => (self.network_response_times[0] + self.network_response_times[1]) / 2,
3 => {
let mut times = self.network_response_times.clone();
times.sort_unstable();
times[1]
}
_ => unreachable!(),
let ping_time = match self.network_response_times.len() {
1 => self.network_response_times[0],
2 => (self.network_response_times[0] + self.network_response_times[1]) / 2,
3 => {
let mut times = self.network_response_times.clone();
times.sort_unstable();
times[1]
}
_ => unreachable!(),
};
// print when the new estimate deviates by more than 10% from the last
if f32::abs(
(ping_time.as_millis() as f32 - old_ping_time_ms as f32)
/ old_ping_time_ms as f32,
) > 0.1
{
trace!(
"Time to first byte now estimated as: {} ms",
ping_time.as_millis()
);
}
// store our new estimate for everyone to see
self.shared.set_ping_time(ping_time);
self.shared
.ping_time_ms
.store(ping_time.as_millis() as usize, Ordering::Relaxed);
}
ReceivedData::Data(data) => {
match self.output.as_mut() {
Some(output) => {
output.seek(SeekFrom::Start(data.offset as u64))?;
output.write_all(data.data.as_ref())?;
}
None => return Err(AudioFileError::Output.into()),
}
self.output
.as_mut()
.unwrap()
.seek(SeekFrom::Start(data.offset as u64))
.unwrap();
self.output
.as_mut()
.unwrap()
.write_all(data.data.as_ref())
.unwrap();
let mut download_status = self.shared.download_status.lock().unwrap();
let received_range = Range::new(data.offset, data.data.len());
download_status.downloaded.add_range(&received_range);
self.shared.cond.notify_all();
let full = {
let mut download_status = self
.shared
.download_status
.lock()
.expect(DOWNLOAD_STATUS_POISON_MSG);
download_status.downloaded.add_range(&received_range);
self.shared.cond.notify_all();
let full = download_status.downloaded.contained_length_from_value(0)
>= self.shared.file_size;
download_status.downloaded.contained_length_from_value(0)
>= self.shared.file_size
};
drop(download_status);
if full {
self.finish()?;
return Ok(ControlFlow::Break);
self.finish();
return ControlFlow::Break;
}
}
}
Ok(ControlFlow::Continue)
ControlFlow::Continue
}
fn handle_stream_loader_command(
&mut self,
cmd: StreamLoaderCommand,
) -> Result<ControlFlow, Error> {
fn handle_stream_loader_command(&mut self, cmd: StreamLoaderCommand) -> ControlFlow {
match cmd {
StreamLoaderCommand::Fetch(request) => {
self.download_range(request.start, request.length)?
self.download_range(request.start, request.length);
}
StreamLoaderCommand::Close => return Ok(ControlFlow::Break),
StreamLoaderCommand::RandomAccessMode() => {
*(self.shared.download_strategy.lock().unwrap()) = DownloadStrategy::RandomAccess();
}
StreamLoaderCommand::StreamMode() => {
*(self.shared.download_strategy.lock().unwrap()) = DownloadStrategy::Streaming();
}
StreamLoaderCommand::Close() => return ControlFlow::Break,
}
Ok(ControlFlow::Continue)
ControlFlow::Continue
}
fn finish(&mut self) -> AudioFileResult {
let output = self.output.take();
fn finish(&mut self) {
let mut output = self.output.take().unwrap();
let complete_tx = self.complete_tx.take().unwrap();
let complete_tx = self.complete_tx.take();
if let Some(mut output) = output {
output.rewind()?;
if let Some(complete_tx) = complete_tx {
complete_tx
.send(output)
.map_err(|_| AudioFileError::Channel)?;
}
}
Ok(())
output.seek(SeekFrom::Start(0)).unwrap();
let _ = complete_tx.send(output);
}
}
pub(super) async fn audio_file_fetch(
session: Session,
shared: Arc<AudioFileShared>,
initial_request: StreamingRequest,
initial_data_rx: ChannelData,
initial_request_sent_time: Instant,
initial_data_length: usize,
output: NamedTempFile,
mut stream_loader_command_rx: mpsc::UnboundedReceiver<StreamLoaderCommand>,
complete_tx: oneshot::Sender<NamedTempFile>,
) -> AudioFileResult {
) {
let (file_data_tx, mut file_data_rx) = mpsc::unbounded_channel();
{
let requested_range = Range::new(
initial_request.offset,
initial_request.offset + initial_request.length,
);
let mut download_status = shared
.download_status
.lock()
.expect(DOWNLOAD_STATUS_POISON_MSG);
let requested_range = Range::new(0, initial_data_length);
let mut download_status = shared.download_status.lock().unwrap();
download_status.requested.add_range(&requested_range);
}
session.spawn(receive_data(
shared.clone(),
file_data_tx.clone(),
initial_request,
initial_data_rx,
0,
initial_data_length,
initial_request_sent_time,
));
let params = AudioFetchParams::get();
let mut fetch = AudioFileFetch {
session: session.clone(),
session,
shared,
output: Some(output),
file_data_tx,
complete_tx: Some(complete_tx),
network_response_times: Vec::with_capacity(3),
params: params.clone(),
};
loop {
tokio::select! {
cmd = stream_loader_command_rx.recv() => {
match cmd {
Some(cmd) => {
if fetch.handle_stream_loader_command(cmd)? == ControlFlow::Break {
break;
}
}
None => break,
}
}
data = file_data_rx.recv() => {
match data {
Some(data) => {
if fetch.handle_file_data(data)? == ControlFlow::Break {
break;
}
}
None => break,
if cmd.map_or(true, |cmd| fetch.handle_stream_loader_command(cmd) == ControlFlow::Break) {
break;
}
},
else => (),
data = file_data_rx.recv() => {
if data.map_or(true, |data| fetch.handle_file_data(data) == ControlFlow::Break) {
break;
}
}
}
if fetch.shared.is_download_streaming() && fetch.has_download_slots_available() {
let bytes_pending: usize = {
let download_status = fetch
.shared
.download_status
.lock()
.expect(DOWNLOAD_STATUS_POISON_MSG);
if fetch.get_download_strategy() == DownloadStrategy::Streaming() {
let number_of_open_requests =
fetch.shared.number_of_open_requests.load(Ordering::SeqCst);
if number_of_open_requests < MAX_PREFETCH_REQUESTS {
let max_requests_to_send = MAX_PREFETCH_REQUESTS - number_of_open_requests;
download_status
.requested
.minus(&download_status.downloaded)
.len()
};
let bytes_pending: usize = {
let download_status = fetch.shared.download_status.lock().unwrap();
download_status
.requested
.minus(&download_status.downloaded)
.len()
};
let ping_time_seconds = fetch.shared.ping_time().as_secs_f32();
let throughput = fetch.shared.throughput();
let ping_time_seconds =
Duration::from_millis(fetch.shared.ping_time_ms.load(Ordering::Relaxed) as u64)
.as_secs_f32();
let download_rate = fetch.session.channel().get_download_rate_estimate();
let desired_pending_bytes = max(
(params.prefetch_threshold_factor
* ping_time_seconds
* fetch.shared.bytes_per_second as f32) as usize,
(ping_time_seconds * throughput as f32) as usize,
);
let desired_pending_bytes = max(
(PREFETCH_THRESHOLD_FACTOR
* ping_time_seconds
* fetch.shared.stream_data_rate as f32) as usize,
(FAST_PREFETCH_THRESHOLD_FACTOR * ping_time_seconds * download_rate as f32)
as usize,
);
if bytes_pending < desired_pending_bytes {
fetch.pre_fetch_more_data(desired_pending_bytes - bytes_pending)?;
if bytes_pending < desired_pending_bytes {
fetch.pre_fetch_more_data(
desired_pending_bytes - bytes_pending,
max_requests_to_send,
);
}
}
}
}
Ok(())
}

View file

@ -1,3 +1,5 @@
#![allow(clippy::unused_io_amount, clippy::too_many_arguments)]
#[macro_use]
extern crate log;
@ -7,4 +9,8 @@ mod fetch;
mod range_set;
pub use decrypt::AudioDecrypt;
pub use fetch::{AudioFetchParams, AudioFile, AudioFileError, StreamLoaderController};
pub use fetch::{AudioFile, StreamLoaderController};
pub use fetch::{
READ_AHEAD_BEFORE_PLAYBACK, READ_AHEAD_BEFORE_PLAYBACK_ROUNDTRIPS, READ_AHEAD_DURING_PLAYBACK,
READ_AHEAD_DURING_PLAYBACK_ROUNDTRIPS,
};

View file

@ -1,8 +1,6 @@
use std::{
cmp::{max, min},
fmt,
slice::Iter,
};
use std::cmp::{max, min};
use std::fmt;
use std::slice::Iter;
#[derive(Copy, Clone, Debug)]
pub struct Range {
@ -12,7 +10,7 @@ pub struct Range {
impl fmt::Display for Range {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[{}, {}]", self.start, self.start + self.length - 1)
return write!(f, "[{}, {}]", self.start, self.start + self.length - 1);
}
}
@ -26,16 +24,16 @@ impl Range {
}
}
#[derive(Debug, Clone)]
#[derive(Clone)]
pub struct RangeSet {
ranges: Vec<Range>,
}
impl fmt::Display for RangeSet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(")?;
write!(f, "(").unwrap();
for range in self.ranges.iter() {
write!(f, "{range}")?;
write!(f, "{}", range).unwrap();
}
write!(f, ")")
}
@ -60,7 +58,7 @@ impl RangeSet {
self.ranges[index]
}
pub fn iter(&self) -> Iter<'_, Range> {
pub fn iter(&self) -> Iter<Range> {
self.ranges.iter()
}
@ -229,6 +227,7 @@ impl RangeSet {
self.ranges[self_index].end(),
other.ranges[other_index].end(),
);
assert!(new_start <= new_end);
result.add_range(&Range::new(new_start, new_end - new_start));
if self.ranges[self_index].end() <= other.ranges[other_index].end() {
self_index += 1;

View file

@ -1,33 +1,38 @@
[package]
name = "librespot-connect"
version.workspace = true
rust-version.workspace = true
version = "0.4.2"
authors = ["Paul Lietar <paul@lietar.net>"]
license.workspace = true
description = "The Spotify Connect logic for librespot"
repository.workspace = true
edition.workspace = true
[features]
# Refer to the workspace Cargo.toml for the list of features
default = ["native-tls"]
# TLS backend propagation
native-tls = ["librespot-core/native-tls"]
rustls-tls-native-roots = ["librespot-core/rustls-tls-native-roots"]
rustls-tls-webpki-roots = ["librespot-core/rustls-tls-webpki-roots"]
description = "The discovery and Spotify Connect logic for librespot"
license = "MIT"
repository = "https://github.com/librespot-org/librespot"
edition = "2018"
[dependencies]
librespot-core = { version = "0.7.1", path = "../core", default-features = false }
librespot-playback = { version = "0.7.1", path = "../playback", default-features = false }
librespot-protocol = { version = "0.7.1", path = "../protocol", default-features = false }
futures-util = { version = "0.3", default-features = false, features = ["std"] }
form_urlencoded = "1.0"
futures-util = { version = "0.3.5", default_features = false }
log = "0.4"
protobuf = "3.7"
rand = { version = "0.9", default-features = false, features = ["small_rng"] }
protobuf = "2.14.0"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
thiserror = "2"
tokio = { version = "1", features = ["macros", "sync"] }
tokio-stream = { version = "0.1", default-features = false }
uuid = { version = "1.18", default-features = false, features = ["v4"] }
tokio = { version = "1.0", features = ["macros", "sync"] }
tokio-stream = "0.1.1"
[dependencies.librespot-core]
path = "../core"
version = "0.4.2"
[dependencies.librespot-playback]
path = "../playback"
version = "0.4.2"
[dependencies.librespot-protocol]
path = "../protocol"
version = "0.4.2"
[dependencies.librespot-discovery]
path = "../discovery"
version = "0.4.2"
[features]
with-dns-sd = ["librespot-discovery/with-dns-sd"]

View file

@ -1,63 +0,0 @@
[//]: # (This readme is optimized for inline rustdoc, if some links don't work, they will when included in lib.rs)
# Connect
The connect module of librespot. Provides the option to create your own connect device
and stream to it like any other official spotify client.
The [`Spirc`] is the entrypoint to creating your own connect device. It can be
configured with the given [`ConnectConfig`] options and requires some additional data
to start up the device.
When creating a new [`Spirc`] it returns two items. The [`Spirc`] itself, which is can
be used as to control the local connect device. And a [`Future`](std::future::Future),
lets name it `SpircTask`, that starts and executes the event loop of the connect device
when awaited.
A basic example in which the `Spirc` and `SpircTask` is used can be found here:
[`examples/play_connect.rs`](../examples/play_connect.rs).
# Example
```rust
use std::{future::Future, thread};
use librespot_connect::{ConnectConfig, Spirc};
use librespot_core::{authentication::Credentials, Error, Session, SessionConfig};
use librespot_playback::{
audio_backend, mixer,
config::{AudioFormat, PlayerConfig},
mixer::{MixerConfig, NoOpVolume},
player::Player
};
async fn create_basic_spirc() -> Result<(), Error> {
let credentials = Credentials::with_access_token("access-token-here");
let session = Session::new(SessionConfig::default(), None);
let backend = audio_backend::find(None).expect("will default to rodio");
let player = Player::new(
PlayerConfig::default(),
session.clone(),
Box::new(NoOpVolume),
move || {
let format = AudioFormat::default();
let device = None;
backend(device, format)
},
);
let mixer = mixer::find(None).expect("will default to SoftMixer");
let (spirc, spirc_task): (Spirc, _) = Spirc::new(
ConnectConfig::default(),
session,
credentials,
player,
mixer(MixerConfig::default())?
).await?;
Ok(())
}
```

88
connect/src/context.rs Normal file
View file

@ -0,0 +1,88 @@
use crate::core::spotify_id::SpotifyId;
use crate::protocol::spirc::TrackRef;
use serde::Deserialize;
#[derive(Deserialize, Debug)]
pub struct StationContext {
pub uri: Option<String>,
pub next_page_url: String,
#[serde(deserialize_with = "deserialize_protobuf_TrackRef")]
pub tracks: Vec<TrackRef>,
// Not required for core functionality
// pub seeds: Vec<String>,
// #[serde(rename = "imageUri")]
// pub image_uri: String,
// pub subtitle: Option<String>,
// pub subtitles: Vec<String>,
// #[serde(rename = "subtitleUri")]
// pub subtitle_uri: Option<String>,
// pub title: String,
// #[serde(rename = "titleUri")]
// pub title_uri: String,
// pub related_artists: Vec<ArtistContext>,
}
#[derive(Deserialize, Debug)]
pub struct PageContext {
pub uri: String,
pub next_page_url: String,
#[serde(deserialize_with = "deserialize_protobuf_TrackRef")]
pub tracks: Vec<TrackRef>,
// Not required for core functionality
// pub url: String,
// // pub restrictions:
}
#[derive(Deserialize, Debug)]
pub struct TrackContext {
#[serde(rename = "original_gid")]
pub gid: String,
pub uri: String,
pub uid: String,
// Not required for core functionality
// pub album_uri: String,
// pub artist_uri: String,
// pub metadata: MetadataContext,
}
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct ArtistContext {
artist_name: String,
artist_uri: String,
image_uri: String,
}
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
pub struct MetadataContext {
album_title: String,
artist_name: String,
artist_uri: String,
image_url: String,
title: String,
uid: String,
}
#[allow(non_snake_case)]
fn deserialize_protobuf_TrackRef<'d, D>(de: D) -> Result<Vec<TrackRef>, D::Error>
where
D: serde::Deserializer<'d>,
{
let v: Vec<TrackContext> = serde::Deserialize::deserialize(de)?;
let track_vec = v
.iter()
.map(|v| {
let mut t = TrackRef::new();
// This has got to be the most round about way of doing this.
t.set_gid(SpotifyId::from_base62(&v.gid).unwrap().to_raw().to_vec());
t.set_uri(v.uri.to_owned());
t
})
.collect::<Vec<TrackRef>>();
Ok(track_vec)
}

View file

@ -1,346 +0,0 @@
use crate::{
core::{Error, Session},
protocol::{
autoplay_context_request::AutoplayContextRequest, context::Context,
transfer_state::TransferState,
},
state::{ConnectState, context::ContextType},
};
use std::{
cmp::PartialEq,
collections::{HashMap, VecDeque},
fmt::{Display, Formatter},
hash::Hash,
time::Duration,
};
use thiserror::Error as ThisError;
use tokio::time::Instant;
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
enum Resolve {
Uri(String),
Context(Context),
}
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub(super) enum ContextAction {
Append,
Replace,
}
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub(super) struct ResolveContext {
resolve: Resolve,
fallback: Option<String>,
update: ContextType,
action: ContextAction,
}
impl ResolveContext {
fn append_context(uri: impl Into<String>) -> Self {
Self {
resolve: Resolve::Uri(uri.into()),
fallback: None,
update: ContextType::Default,
action: ContextAction::Append,
}
}
pub fn from_uri(
uri: impl Into<String>,
fallback: impl Into<String>,
update: ContextType,
action: ContextAction,
) -> Self {
let fallback_uri = fallback.into();
Self {
resolve: Resolve::Uri(uri.into()),
fallback: (!fallback_uri.is_empty()).then_some(fallback_uri),
update,
action,
}
}
pub fn from_context(context: Context, update: ContextType, action: ContextAction) -> Self {
Self {
resolve: Resolve::Context(context),
fallback: None,
update,
action,
}
}
/// the uri which should be used to resolve the context, might not be the context uri
fn resolve_uri(&self) -> Option<&str> {
// it's important to call this always, or at least for every ResolveContext
// otherwise we might not even check if we need to fallback and just use the fallback uri
match self.resolve {
Resolve::Uri(ref uri) => ConnectState::valid_resolve_uri(uri),
Resolve::Context(ref ctx) => {
ConnectState::find_valid_uri(ctx.uri.as_deref(), ctx.pages.first())
}
}
.or(self.fallback.as_deref())
}
/// the actual context uri
fn context_uri(&self) -> &str {
match self.resolve {
Resolve::Uri(ref uri) => uri,
Resolve::Context(ref ctx) => ctx.uri.as_deref().unwrap_or_default(),
}
}
}
impl Display for ResolveContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"resolve_uri: <{:?}>, context_uri: <{}>, update: <{:?}>",
self.resolve_uri(),
self.context_uri(),
self.update,
)
}
}
#[derive(Debug, ThisError)]
enum ContextResolverError {
#[error("no next context to resolve")]
NoNext,
#[error("tried appending context with {0} pages")]
UnexpectedPagesSize(usize),
#[error("tried resolving not allowed context: {0:?}")]
NotAllowedContext(String),
}
impl From<ContextResolverError> for Error {
fn from(value: ContextResolverError) -> Self {
Error::failed_precondition(value)
}
}
pub struct ContextResolver {
session: Session,
queue: VecDeque<ResolveContext>,
unavailable_contexts: HashMap<ResolveContext, Instant>,
}
// time after which an unavailable context is retried
const RETRY_UNAVAILABLE: Duration = Duration::from_secs(3600);
impl ContextResolver {
pub fn new(session: Session) -> Self {
Self {
session,
queue: VecDeque::new(),
unavailable_contexts: HashMap::new(),
}
}
pub fn add(&mut self, resolve: ResolveContext) {
let last_try = self
.unavailable_contexts
.get(&resolve)
.map(|i| i.duration_since(Instant::now()));
let last_try = if matches!(last_try, Some(last_try) if last_try > RETRY_UNAVAILABLE) {
let _ = self.unavailable_contexts.remove(&resolve);
debug!(
"context was requested {}s ago, trying again to resolve the requested context",
last_try.expect("checked by condition").as_secs()
);
None
} else {
last_try
};
if last_try.is_some() {
debug!("tried loading unavailable context: {resolve}");
return;
} else if self.queue.contains(&resolve) {
debug!("update for {resolve} is already added");
return;
} else {
trace!(
"added {} to resolver queue",
resolve.resolve_uri().unwrap_or(resolve.context_uri())
)
}
self.queue.push_back(resolve)
}
pub fn add_list(&mut self, resolve: Vec<ResolveContext>) {
for resolve in resolve {
self.add(resolve)
}
}
pub fn remove_used_and_invalid(&mut self) {
if let Some((_, _, remove)) = self.find_next() {
let _ = self.queue.drain(0..remove); // remove invalid
}
self.queue.pop_front(); // remove used
}
pub fn clear(&mut self) {
self.queue = VecDeque::new()
}
fn find_next(&self) -> Option<(&ResolveContext, &str, usize)> {
for idx in 0..self.queue.len() {
let next = self.queue.get(idx)?;
match next.resolve_uri() {
None => {
warn!("skipped {idx} because of invalid resolve_uri: {next}");
continue;
}
Some(uri) => return Some((next, uri, idx)),
}
}
None
}
pub fn has_next(&self) -> bool {
self.find_next().is_some()
}
pub async fn get_next_context(
&self,
recent_track_uri: impl Fn() -> Vec<String>,
) -> Result<Context, Error> {
let (next, resolve_uri, _) = self.find_next().ok_or(ContextResolverError::NoNext)?;
match next.update {
ContextType::Default => {
let mut ctx = self.session.spclient().get_context(resolve_uri).await;
if let Ok(ctx) = ctx.as_mut() {
ctx.uri = Some(next.context_uri().to_string());
ctx.url = ctx.uri.as_ref().map(|s| format!("context://{s}"));
}
ctx
}
ContextType::Autoplay => {
if resolve_uri.contains("spotify:show:") || resolve_uri.contains("spotify:episode:")
{
// autoplay is not supported for podcasts
Err(ContextResolverError::NotAllowedContext(
resolve_uri.to_string(),
))?
}
let request = AutoplayContextRequest {
context_uri: Some(resolve_uri.to_string()),
recent_track_uri: recent_track_uri(),
..Default::default()
};
self.session.spclient().get_autoplay_context(&request).await
}
}
}
pub fn mark_next_unavailable(&mut self) {
if let Some((next, _, _)) = self.find_next() {
self.unavailable_contexts
.insert(next.clone(), Instant::now());
}
}
pub fn apply_next_context(
&self,
state: &mut ConnectState,
mut context: Context,
) -> Result<Option<Vec<ResolveContext>>, Error> {
let (next, _, _) = self.find_next().ok_or(ContextResolverError::NoNext)?;
let remaining = match next.action {
ContextAction::Append if context.pages.len() == 1 => state
.fill_context_from_page(context.pages.remove(0))
.map(|_| None),
ContextAction::Replace => {
let remaining = state.update_context(context, next.update);
if let Resolve::Context(ref ctx) = next.resolve {
state.merge_context(ctx.pages.clone().pop());
}
remaining
}
ContextAction::Append => {
warn!("unexpected page size: {context:#?}");
Err(ContextResolverError::UnexpectedPagesSize(context.pages.len()).into())
}
}?;
Ok(remaining.map(|remaining| {
remaining
.into_iter()
.map(ResolveContext::append_context)
.collect::<Vec<_>>()
}))
}
pub fn try_finish(
&self,
state: &mut ConnectState,
transfer_state: &mut Option<TransferState>,
) -> bool {
let (next, _, _) = match self.find_next() {
None => return false,
Some(next) => next,
};
// when there is only one update type, we are the last of our kind, so we should update the state
if self
.queue
.iter()
.filter(|resolve| resolve.update == next.update)
.count()
!= 1
{
return false;
}
match (next.update, state.active_context) {
(ContextType::Default, ContextType::Default) | (ContextType::Autoplay, _) => {
debug!(
"last item of type <{:?}>, finishing state setup",
next.update
);
}
(ContextType::Default, _) => {
debug!("skipped finishing default, because it isn't the active context");
return false;
}
}
let active_ctx = state.get_context(state.active_context);
let res = if let Some(transfer_state) = transfer_state.take() {
state.finish_transfer(transfer_state)
} else if state.shuffling_context() && next.update == ContextType::Default {
state.shuffle_new()
} else if matches!(active_ctx, Ok(ctx) if ctx.index.track == 0) {
// has context, and context is not touched
// when the index is not zero, the next index was already evaluated elsewhere
let ctx = active_ctx.expect("checked by precondition");
let idx = ConnectState::find_index_in_context(ctx, |t| {
state.current_track(|c| t.uri == c.uri)
})
.ok();
state.reset_playback_to_position(idx)
} else {
state.fill_up_next_tracks()
};
if let Err(why) = res {
error!("setup of state failed: {why}, last used resolve {next:#?}")
}
state.update_restrictions();
state.update_queue_revision();
true
}
}

31
connect/src/discovery.rs Normal file
View file

@ -0,0 +1,31 @@
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_util::Stream;
use librespot_core::authentication::Credentials;
use librespot_core::config::ConnectConfig;
pub struct DiscoveryStream(librespot_discovery::Discovery);
impl Stream for DiscoveryStream {
type Item = Credentials;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.0).poll_next(cx)
}
}
pub fn discovery(
config: ConnectConfig,
device_id: String,
port: u16,
) -> io::Result<DiscoveryStream> {
librespot_discovery::Discovery::builder(device_id)
.device_type(config.device_type)
.port(port)
.name(config.name)
.launch()
.map(DiscoveryStream)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
}

View file

@ -1,6 +1,3 @@
#![warn(missing_docs)]
#![doc=include_str!("../README.md")]
#[macro_use]
extern crate log;
@ -8,12 +5,10 @@ use librespot_core as core;
use librespot_playback as playback;
use librespot_protocol as protocol;
mod context_resolver;
mod model;
mod shuffle_vec;
mod spirc;
mod state;
pub use model::*;
pub use spirc::*;
pub use state::*;
pub mod context;
#[deprecated(
since = "0.2.1",
note = "Please use the crate `librespot_discovery` instead."
)]
pub mod discovery;
pub mod spirc;

View file

@ -1,167 +0,0 @@
use crate::{
core::dealer::protocol::SkipTo, protocol::context_player_options::ContextPlayerOptionOverrides,
};
use std::ops::Deref;
/// Request for loading playback
#[derive(Debug, Clone)]
pub struct LoadRequest {
pub(super) context: PlayContext,
pub(super) options: LoadRequestOptions,
}
impl Deref for LoadRequest {
type Target = LoadRequestOptions;
fn deref(&self) -> &Self::Target {
&self.options
}
}
#[derive(Debug, Clone)]
pub(super) enum PlayContext {
Uri(String),
Tracks(Vec<String>),
}
/// The parameters for creating a load request
#[derive(Debug, Default, Clone)]
pub struct LoadRequestOptions {
/// Whether the given tracks should immediately start playing, or just be initially loaded.
pub start_playing: bool,
/// Start the playback at a specific point of the track.
///
/// The provided value is used as milliseconds. Providing a value greater
/// than the track duration will start the track at the beginning.
pub seek_to: u32,
/// Options that decide how the context starts playing
pub context_options: Option<LoadContextOptions>,
/// Decides the starting position in the given context.
///
/// If the provided item doesn't exist or is out of range,
/// the playback starts at the beginning of the context.
///
/// If `None` is provided and `shuffle` is `true`, a random track is played, otherwise the first
pub playing_track: Option<PlayingTrack>,
}
/// The options which decide how the playback is started
///
/// Separated into an `enum` to exclude the other variants from being used
/// simultaneously, as they are not compatible.
#[derive(Debug, Clone)]
pub enum LoadContextOptions {
/// Starts the context with options
Options(Options),
/// Starts the playback as the autoplay variant of the context
///
/// This is the same as finishing a context and
/// automatically continuing playback of similar tracks
Autoplay,
}
/// The available options that indicate how to start the context
#[derive(Debug, Default, Clone)]
pub struct Options {
/// Start the context in shuffle mode
pub shuffle: bool,
/// Start the context in repeat mode
pub repeat: bool,
/// Start the context, repeating the first track until skipped or manually disabled
pub repeat_track: bool,
}
impl From<ContextPlayerOptionOverrides> for Options {
fn from(value: ContextPlayerOptionOverrides) -> Self {
Self {
shuffle: value.shuffling_context.unwrap_or_default(),
repeat: value.repeating_context.unwrap_or_default(),
repeat_track: value.repeating_track.unwrap_or_default(),
}
}
}
impl LoadRequest {
/// Create a load request from a `context_uri`
///
/// For supported `context_uri` see [`SpClient::get_context`](librespot_core::spclient::SpClient::get_context)
///
/// Equivalent to using [`/me/player/play`](https://developer.spotify.com/documentation/web-api/reference/start-a-users-playback)
/// and providing `context_uri`
pub fn from_context_uri(context_uri: String, options: LoadRequestOptions) -> Self {
Self {
context: PlayContext::Uri(context_uri),
options,
}
}
/// Create a load request from a set of `tracks`
///
/// Equivalent to using [`/me/player/play`](https://developer.spotify.com/documentation/web-api/reference/start-a-users-playback)
/// and providing `uris`
pub fn from_tracks(tracks: Vec<String>, options: LoadRequestOptions) -> Self {
Self {
context: PlayContext::Tracks(tracks),
options,
}
}
}
/// An item that represent a track to play
#[derive(Debug, Clone)]
pub enum PlayingTrack {
/// Represent the track at a given index.
Index(u32),
/// Represent the uri of a track.
Uri(String),
#[doc(hidden)]
/// Represent an internal identifier from spotify.
///
/// The internal identifier is not the id contained in the uri. And rather
/// an unrelated id probably unique in spotify's internal database. But that's
/// just speculation.
///
/// This identifier is not available by any public api. It's used for varies in
/// any spotify client, like sorting, displaying which track is currently played
/// and skipping to a track. Mobile uses it pretty intensively but also web and
/// desktop seem to make use of it.
Uid(String),
}
impl TryFrom<SkipTo> for PlayingTrack {
type Error = ();
fn try_from(value: SkipTo) -> Result<Self, Self::Error> {
// order of checks is important, as the index can be 0, but still has an uid or uri provided,
// so we only use the index as last resort
if let Some(uri) = value.track_uri {
Ok(PlayingTrack::Uri(uri))
} else if let Some(uid) = value.track_uid {
Ok(PlayingTrack::Uid(uid))
} else if let Some(index) = value.track_index {
Ok(PlayingTrack::Index(index))
} else {
Err(())
}
}
}
#[derive(Debug)]
pub(super) enum SpircPlayStatus {
Stopped,
LoadingPlay {
position_ms: u32,
},
LoadingPause {
position_ms: u32,
},
Playing {
nominal_start_time: i64,
preloading_of_next_track_triggered: bool,
},
Paused {
position_ms: u32,
preloading_of_next_track_triggered: bool,
},
}

View file

@ -1,198 +0,0 @@
use rand::{Rng, SeedableRng, rngs::SmallRng};
use std::{
ops::{Deref, DerefMut},
vec::IntoIter,
};
#[derive(Debug, Clone, Default)]
pub struct ShuffleVec<T> {
vec: Vec<T>,
indices: Option<Vec<usize>>,
/// This is primarily necessary to ensure that shuffle does not behave out of place.
///
/// For that reason we swap the first track with the currently playing track. By that we ensure
/// that the shuffle state is consistent between resets of the state because the first track is
/// always the track with which we started playing when switching to shuffle.
original_first_position: Option<usize>,
}
impl<T: PartialEq> PartialEq for ShuffleVec<T> {
fn eq(&self, other: &Self) -> bool {
self.vec == other.vec
}
}
impl<T> Deref for ShuffleVec<T> {
type Target = Vec<T>;
fn deref(&self) -> &Self::Target {
&self.vec
}
}
impl<T> DerefMut for ShuffleVec<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.vec.as_mut()
}
}
impl<T> IntoIterator for ShuffleVec<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
self.vec.into_iter()
}
}
impl<T> From<Vec<T>> for ShuffleVec<T> {
fn from(vec: Vec<T>) -> Self {
Self {
vec,
original_first_position: None,
indices: None,
}
}
}
impl<T> ShuffleVec<T> {
pub fn shuffle_with_seed<F: Fn(&T) -> bool>(&mut self, seed: u64, is_first: F) {
self.shuffle_with_rng(SmallRng::seed_from_u64(seed), is_first)
}
pub fn shuffle_with_rng<F: Fn(&T) -> bool>(&mut self, mut rng: impl Rng, is_first: F) {
if self.vec.len() <= 1 {
info!("skipped shuffling for less or equal one item");
return;
}
if self.indices.is_some() {
self.unshuffle()
}
let indices: Vec<_> = {
(1..self.vec.len())
.rev()
.map(|i| rng.random_range(0..i + 1))
.collect()
};
for (i, &rnd_ind) in (1..self.vec.len()).rev().zip(&indices) {
self.vec.swap(i, rnd_ind);
}
self.indices = Some(indices);
self.original_first_position = self.vec.iter().position(is_first);
if let Some(first_pos) = self.original_first_position {
self.vec.swap(0, first_pos)
}
}
pub fn unshuffle(&mut self) {
let indices = match self.indices.take() {
Some(indices) => indices,
None => return,
};
if let Some(first_pos) = self.original_first_position {
self.vec.swap(0, first_pos);
self.original_first_position = None;
}
for i in 1..self.vec.len() {
match indices.get(self.vec.len() - i - 1) {
None => return,
Some(n) => self.vec.swap(*n, i),
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use rand::Rng;
use std::ops::Range;
fn base(range: Range<usize>) -> (ShuffleVec<usize>, u64) {
let seed = rand::rng().random_range(0..10_000_000_000_000);
let vec = range.collect::<Vec<_>>();
(vec.into(), seed)
}
#[test]
fn test_shuffle_without_first() {
let (base_vec, seed) = base(0..100);
let mut shuffled_vec = base_vec.clone();
shuffled_vec.shuffle_with_seed(seed, |_| false);
let mut different_shuffled_vec = base_vec.clone();
different_shuffled_vec.shuffle_with_seed(seed, |_| false);
assert_eq!(
shuffled_vec, different_shuffled_vec,
"shuffling with the same seed has the same result"
);
let mut unshuffled_vec = shuffled_vec.clone();
unshuffled_vec.unshuffle();
assert_eq!(
base_vec, unshuffled_vec,
"unshuffle restores the original state"
);
}
#[test]
fn test_shuffle_with_first() {
const MAX_RANGE: usize = 200;
let (base_vec, seed) = base(0..MAX_RANGE);
let rand_first = rand::rng().random_range(0..MAX_RANGE);
let mut shuffled_with_first = base_vec.clone();
shuffled_with_first.shuffle_with_seed(seed, |i| i == &rand_first);
assert_eq!(
Some(&rand_first),
shuffled_with_first.first(),
"after shuffling the first is expected to be the given item"
);
let mut shuffled_without_first = base_vec.clone();
shuffled_without_first.shuffle_with_seed(seed, |_| false);
let mut switched_positions = Vec::with_capacity(2);
for (i, without_first_value) in shuffled_without_first.iter().enumerate() {
if without_first_value != &shuffled_with_first[i] {
switched_positions.push(i);
} else {
assert_eq!(
without_first_value, &shuffled_with_first[i],
"shuffling with the same seed has the same result"
);
}
}
assert_eq!(
switched_positions.len(),
2,
"only the switched positions should be different"
);
assert_eq!(
shuffled_with_first[switched_positions[0]],
shuffled_without_first[switched_positions[1]],
"the switched values should be equal"
);
assert_eq!(
shuffled_with_first[switched_positions[1]],
shuffled_without_first[switched_positions[0]],
"the switched values should be equal"
)
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,495 +0,0 @@
pub(super) mod context;
mod handle;
mod metadata;
mod options;
pub(super) mod provider;
mod restrictions;
mod tracks;
mod transfer;
use crate::{
core::{
Error, Session, config::DeviceType, date::Date, dealer::protocol::Request,
spclient::SpClientResult, version,
},
model::SpircPlayStatus,
protocol::{
connect::{Capabilities, Device, DeviceInfo, MemberType, PutStateReason, PutStateRequest},
media::AudioQuality,
player::{
ContextIndex, ContextPlayerOptions, PlayOrigin, PlayerState, ProvidedTrack,
Suppressions,
},
},
state::{
context::{ContextType, ResetContext, StateContext},
options::ShuffleState,
provider::{IsProvider, Provider},
},
};
use log::LevelFilter;
use protobuf::{EnumOrUnknown, MessageField};
use std::{
collections::hash_map::DefaultHasher,
hash::{Hash, Hasher},
time::{Duration, SystemTime, UNIX_EPOCH},
};
use thiserror::Error;
// these limitations are essential, otherwise to many tracks will overload the web-player
const SPOTIFY_MAX_PREV_TRACKS_SIZE: usize = 10;
const SPOTIFY_MAX_NEXT_TRACKS_SIZE: usize = 80;
#[derive(Debug, Error)]
pub(super) enum StateError {
#[error("the current track couldn't be resolved from the transfer state")]
CouldNotResolveTrackFromTransfer,
#[error("context is not available. type: {0:?}")]
NoContext(ContextType),
#[error("could not find track {0:?} in context of {1}")]
CanNotFindTrackInContext(Option<usize>, usize),
#[error("currently {action} is not allowed because {reason}")]
CurrentlyDisallowed {
action: &'static str,
reason: String,
},
#[error("the provided context has no tracks")]
ContextHasNoTracks,
#[error("playback of local files is not supported")]
UnsupportedLocalPlayback,
#[error("track uri <{0:?}> contains invalid characters")]
InvalidTrackUri(Option<String>),
}
impl From<StateError> for Error {
fn from(err: StateError) -> Self {
use StateError::*;
match err {
CouldNotResolveTrackFromTransfer
| NoContext(_)
| CanNotFindTrackInContext(_, _)
| ContextHasNoTracks
| InvalidTrackUri(_) => Error::failed_precondition(err),
CurrentlyDisallowed { .. } | UnsupportedLocalPlayback => Error::unavailable(err),
}
}
}
/// Configuration of the connect device
#[derive(Debug, Clone)]
pub struct ConnectConfig {
/// The name of the connect device (default: librespot)
pub name: String,
/// The icon type of the connect device (default: [DeviceType::Speaker])
pub device_type: DeviceType,
/// Displays the [DeviceType] twice in the ui to show up as a group (default: false)
pub is_group: bool,
/// The volume with which the connect device will be initialized (default: 50%)
pub initial_volume: u16,
/// Disables the option to control the volume remotely (default: false)
pub disable_volume: bool,
/// Number of incremental steps (default: 64)
pub volume_steps: u16,
}
impl Default for ConnectConfig {
fn default() -> Self {
Self {
name: "librespot".to_string(),
device_type: DeviceType::Speaker,
is_group: false,
initial_volume: u16::MAX / 2,
disable_volume: false,
volume_steps: 64,
}
}
}
#[derive(Default, Debug)]
pub(super) struct ConnectState {
/// the entire state that is updated to the remote server
request: PutStateRequest,
unavailable_uri: Vec<String>,
active_since: Option<SystemTime>,
queue_count: u64,
// separation is necessary because we could have already loaded
// the autoplay context but are still playing from the default context
/// to update the active context use [switch_active_context](ConnectState::set_active_context)
pub active_context: ContextType,
fill_up_context: ContextType,
/// the context from which we play, is used to top up prev and next tracks
context: Option<StateContext>,
/// seed extracted in [ConnectState::handle_initial_transfer] and used in [ConnectState::finish_transfer]
transfer_shuffle: Option<ShuffleState>,
/// a context to keep track of the autoplay context
autoplay_context: Option<StateContext>,
/// The volume adjustment per step when handling individual volume adjustments.
pub volume_step_size: u16,
}
impl ConnectState {
pub fn new(cfg: ConnectConfig, session: &Session) -> Self {
let volume_step_size = u16::MAX.checked_div(cfg.volume_steps).unwrap_or(1024);
let device_info = DeviceInfo {
can_play: true,
volume: cfg.initial_volume.into(),
name: cfg.name,
device_id: session.device_id().to_string(),
device_type: EnumOrUnknown::new(cfg.device_type.into()),
device_software_version: version::SEMVER.to_string(),
spirc_version: version::SPOTIFY_SPIRC_VERSION.to_string(),
client_id: session.client_id(),
is_group: cfg.is_group,
capabilities: MessageField::some(Capabilities {
volume_steps: cfg.volume_steps.into(),
disable_volume: cfg.disable_volume,
gaia_eq_connect_id: true,
can_be_player: true,
needs_full_player_state: true,
is_observable: true,
is_controllable: true,
hidden: false,
supports_gzip_pushes: true,
// todo: enable after logout handling is implemented, see spirc logout_request
supports_logout: false,
supported_types: vec!["audio/episode".into(), "audio/track".into()],
supports_playlist_v2: true,
supports_transfer_command: true,
supports_command_request: true,
supports_set_options_command: true,
is_voice_enabled: false,
restrict_to_local: false,
connect_disabled: false,
supports_rename: false,
supports_external_episodes: false,
supports_set_backend_metadata: false,
supports_hifi: MessageField::none(),
// that "AI" dj thingy only available to specific regions/users
supports_dj: false,
supports_rooms: false,
// AudioQuality::HIFI is available, further investigation necessary
supported_audio_quality: EnumOrUnknown::new(AudioQuality::VERY_HIGH),
command_acks: true,
..Default::default()
}),
..Default::default()
};
let mut state = Self {
request: PutStateRequest {
member_type: EnumOrUnknown::new(MemberType::CONNECT_STATE),
put_state_reason: EnumOrUnknown::new(PutStateReason::PLAYER_STATE_CHANGED),
device: MessageField::some(Device {
device_info: MessageField::some(device_info),
player_state: MessageField::some(PlayerState {
session_id: session.session_id(),
..Default::default()
}),
..Default::default()
}),
..Default::default()
},
volume_step_size,
..Default::default()
};
state.reset();
state
}
fn reset(&mut self) {
self.set_active(false);
self.queue_count = 0;
// preserve the session_id
let session_id = self.player().session_id.clone();
self.device_mut().player_state = MessageField::some(PlayerState {
session_id,
is_system_initiated: true,
playback_speed: 1.,
play_origin: MessageField::some(PlayOrigin::new()),
suppressions: MessageField::some(Suppressions::new()),
options: MessageField::some(ContextPlayerOptions::new()),
// + 1, so that we have a buffer where we can swap elements
prev_tracks: Vec::with_capacity(SPOTIFY_MAX_PREV_TRACKS_SIZE + 1),
next_tracks: Vec::with_capacity(SPOTIFY_MAX_NEXT_TRACKS_SIZE + 1),
..Default::default()
});
}
fn device_mut(&mut self) -> &mut Device {
self.request
.device
.as_mut()
.expect("the request is always available")
}
fn player_mut(&mut self) -> &mut PlayerState {
self.device_mut()
.player_state
.as_mut()
.expect("the player_state has to be always given")
}
pub fn device_info(&self) -> &DeviceInfo {
&self.request.device.device_info
}
pub fn player(&self) -> &PlayerState {
&self.request.device.player_state
}
pub fn is_active(&self) -> bool {
self.request.is_active
}
/// Returns the `is_playing` value as perceived by other connect devices
///
/// see [ConnectState::set_status]
pub fn is_playing(&self) -> bool {
let player = self.player();
player.is_playing && !player.is_paused
}
/// Returns the `is_paused` state value as perceived by other connect devices
///
/// see [ConnectState::set_status]
pub fn is_pause(&self) -> bool {
let player = self.player();
player.is_playing && player.is_paused && player.is_buffering
}
pub fn set_volume(&mut self, volume: u32) {
self.device_mut()
.device_info
.as_mut()
.expect("the device_info has to be always given")
.volume = volume;
}
pub fn set_last_command(&mut self, command: Request) {
self.request.last_command_message_id = command.message_id;
self.request.last_command_sent_by_device_id = command.sent_by_device_id;
}
pub fn set_now(&mut self, now: u64) {
self.request.client_side_timestamp = now;
if let Some(active_since) = self.active_since {
if let Ok(active_since_duration) = active_since.duration_since(UNIX_EPOCH) {
match active_since_duration.as_millis().try_into() {
Ok(active_since_ms) => self.request.started_playing_at = active_since_ms,
Err(why) => warn!("couldn't update active since because {why}"),
}
}
}
}
pub fn set_active(&mut self, value: bool) {
if value {
if self.request.is_active {
return;
}
self.request.is_active = true;
self.active_since = Some(SystemTime::now())
} else {
self.request.is_active = false;
self.active_since = None
}
}
pub fn set_origin(&mut self, origin: PlayOrigin) {
self.player_mut().play_origin = MessageField::some(origin)
}
pub fn set_session_id(&mut self, session_id: String) {
self.player_mut().session_id = session_id;
}
pub(crate) fn set_status(&mut self, status: &SpircPlayStatus) {
let player = self.player_mut();
player.is_paused = matches!(
status,
SpircPlayStatus::LoadingPause { .. }
| SpircPlayStatus::Paused { .. }
| SpircPlayStatus::Stopped
);
if player.is_paused {
player.playback_speed = 0.;
} else {
player.playback_speed = 1.;
}
// desktop and mobile require all 'states' set to true, when we are paused,
// otherwise the play button (desktop) is grayed out or the preview (mobile) can't be opened
player.is_buffering = player.is_paused
|| matches!(
status,
SpircPlayStatus::LoadingPause { .. } | SpircPlayStatus::LoadingPlay { .. }
);
player.is_playing = player.is_paused
|| matches!(
status,
SpircPlayStatus::LoadingPlay { .. } | SpircPlayStatus::Playing { .. }
);
debug!(
"updated connect play status playing: {}, paused: {}, buffering: {}",
player.is_playing, player.is_paused, player.is_buffering
);
self.update_restrictions()
}
/// index is 0 based, so the first track is index 0
pub fn update_current_index(&mut self, f: impl Fn(&mut ContextIndex)) {
match self.player_mut().index.as_mut() {
Some(player_index) => f(player_index),
None => {
let mut new_index = ContextIndex::new();
f(&mut new_index);
self.player_mut().index = MessageField::some(new_index)
}
}
}
pub fn update_position(&mut self, position_ms: u32, timestamp: i64) {
let player = self.player_mut();
player.position_as_of_timestamp = position_ms.into();
player.timestamp = timestamp;
}
pub fn update_duration(&mut self, duration: u32) {
self.player_mut().duration = duration.into()
}
pub fn update_queue_revision(&mut self) {
let mut state = DefaultHasher::new();
self.next_tracks()
.iter()
.for_each(|t| t.uri.hash(&mut state));
self.player_mut().queue_revision = state.finish().to_string()
}
pub fn reset_playback_to_position(&mut self, new_index: Option<usize>) -> Result<(), Error> {
debug!(
"reset_playback with active ctx <{:?}> fill_up ctx <{:?}>",
self.active_context, self.fill_up_context
);
let new_index = new_index.unwrap_or(0);
self.update_current_index(|i| i.track = new_index as u32);
self.update_context_index(self.active_context, new_index + 1)?;
self.fill_up_context = self.active_context;
if !self.current_track(|t| t.is_queue() || self.is_skip_track(t, None)) {
self.set_current_track(new_index)?;
}
self.clear_prev_track();
if new_index > 0 {
let context = self.get_context(self.active_context)?;
let before_new_track = context.tracks.len() - new_index;
self.player_mut().prev_tracks = context
.tracks
.iter()
.rev()
.skip(before_new_track)
.take(SPOTIFY_MAX_PREV_TRACKS_SIZE)
.rev()
.cloned()
.collect();
debug!("has {} prev tracks", self.prev_tracks().len())
}
self.clear_next_tracks();
self.fill_up_next_tracks()?;
self.update_restrictions();
Ok(())
}
fn mark_as_unavailable_for_match(track: &mut ProvidedTrack, uri: &str) {
if track.uri == uri {
debug!("Marked <{}:{}> as unavailable", track.provider, track.uri);
track.set_provider(Provider::Unavailable);
}
}
pub fn update_position_in_relation(&mut self, timestamp: i64) {
let player = self.player_mut();
let diff = timestamp - player.timestamp;
player.position_as_of_timestamp += diff;
if log::max_level() >= LevelFilter::Debug {
let pos = Duration::from_millis(player.position_as_of_timestamp as u64);
let time = Date::from_timestamp_ms(timestamp)
.map(|d| d.time().to_string())
.unwrap_or_else(|_| timestamp.to_string());
let sec = pos.as_secs();
let (min, sec) = (sec / 60, sec % 60);
debug!("update position to {min}:{sec:0>2} at {time}");
}
player.timestamp = timestamp;
}
pub async fn became_inactive(&mut self, session: &Session) -> SpClientResult {
self.reset();
self.reset_context(ResetContext::Completely);
session.spclient().put_connect_state_inactive(false).await
}
async fn send_with_reason(
&mut self,
session: &Session,
reason: PutStateReason,
) -> SpClientResult {
let prev_reason = self.request.put_state_reason;
self.request.put_state_reason = EnumOrUnknown::new(reason);
let res = self.send_state(session).await;
self.request.put_state_reason = prev_reason;
res
}
/// Notifies the remote server about a new device
pub async fn notify_new_device_appeared(&mut self, session: &Session) -> SpClientResult {
self.send_with_reason(session, PutStateReason::NEW_DEVICE)
.await
}
/// Notifies the remote server about a new volume
pub async fn notify_volume_changed(&mut self, session: &Session) -> SpClientResult {
self.send_with_reason(session, PutStateReason::VOLUME_CHANGED)
.await
}
/// Sends the connect state for the connect session to the remote server
pub async fn send_state(&self, session: &Session) -> SpClientResult {
session
.spclient()
.put_connect_state_request(&self.request)
.await
}
}

View file

@ -1,520 +0,0 @@
use crate::{
core::{Error, SpotifyId, SpotifyUri},
protocol::{
context::Context,
context_page::ContextPage,
context_track::ContextTrack,
player::{ContextIndex, ProvidedTrack},
restrictions::Restrictions,
},
shuffle_vec::ShuffleVec,
state::{
ConnectState, SPOTIFY_MAX_NEXT_TRACKS_SIZE, StateError,
metadata::Metadata,
provider::{IsProvider, Provider},
},
};
use protobuf::MessageField;
use std::collections::HashMap;
use uuid::Uuid;
const LOCAL_FILES_IDENTIFIER: &str = "spotify:local-files";
const SEARCH_IDENTIFIER: &str = "spotify:search";
#[derive(Debug)]
pub struct StateContext {
pub tracks: ShuffleVec<ProvidedTrack>,
pub metadata: HashMap<String, String>,
pub restrictions: Option<Restrictions>,
/// is used to keep track which tracks are already loaded into the next_tracks
pub index: ContextIndex,
}
#[derive(Default, Debug, Copy, Clone, PartialEq, Hash, Eq)]
pub enum ContextType {
#[default]
Default,
Autoplay,
}
pub enum ResetContext<'s> {
Completely,
DefaultIndex,
WhenDifferent(&'s str),
}
/// Extracts the spotify uri from a given page_url
///
/// Just extracts "spotify/album/5LFzwirfFwBKXJQGfwmiMY" and replaces the slash's with colon's
///
/// Expected `page_url` should look something like the following:
/// `hm://artistplaycontext/v1/page/spotify/album/5LFzwirfFwBKXJQGfwmiMY/km_artist`
fn page_url_to_uri(page_url: &str) -> String {
let split = if let Some(rest) = page_url.strip_prefix("hm://") {
rest.split('/')
} else {
warn!("page_url didn't start with hm://. got page_url: {page_url}");
page_url.split('/')
};
split
.skip_while(|s| s != &"spotify")
.take(3)
.collect::<Vec<&str>>()
.join(":")
}
impl ConnectState {
pub fn find_index_in_context<F: Fn(&ProvidedTrack) -> bool>(
ctx: &StateContext,
f: F,
) -> Result<usize, StateError> {
ctx.tracks
.iter()
.position(f)
.ok_or(StateError::CanNotFindTrackInContext(None, ctx.tracks.len()))
}
pub fn get_context(&self, ty: ContextType) -> Result<&StateContext, StateError> {
match ty {
ContextType::Default => self.context.as_ref(),
ContextType::Autoplay => self.autoplay_context.as_ref(),
}
.ok_or(StateError::NoContext(ty))
}
pub fn get_context_mut(&mut self, ty: ContextType) -> Result<&mut StateContext, StateError> {
match ty {
ContextType::Default => self.context.as_mut(),
ContextType::Autoplay => self.autoplay_context.as_mut(),
}
.ok_or(StateError::NoContext(ty))
}
pub fn context_uri(&self) -> &String {
&self.player().context_uri
}
fn different_context_uri(&self, uri: &str) -> bool {
// search identifier is always different
self.context_uri() != uri || uri.starts_with(SEARCH_IDENTIFIER)
}
pub fn reset_context(&mut self, mut reset_as: ResetContext) {
if matches!(reset_as, ResetContext::WhenDifferent(ctx) if self.different_context_uri(ctx)) {
reset_as = ResetContext::Completely
}
if let Ok(ctx) = self.get_context_mut(ContextType::Default) {
ctx.remove_shuffle_seed();
ctx.remove_initial_track();
ctx.tracks.unshuffle()
}
match reset_as {
ResetContext::WhenDifferent(_) => debug!("context didn't change, no reset"),
ResetContext::Completely => {
self.context = None;
self.autoplay_context = None;
let player = self.player_mut();
player.context_uri.clear();
player.context_url.clear();
}
ResetContext::DefaultIndex => {
for ctx in [self.context.as_mut(), self.autoplay_context.as_mut()]
.into_iter()
.flatten()
{
ctx.index.track = 0;
ctx.index.page = 0;
}
}
}
self.fill_up_context = ContextType::Default;
self.set_active_context(ContextType::Default);
self.update_restrictions()
}
pub fn valid_resolve_uri(uri: &str) -> Option<&str> {
if uri.is_empty() || uri.starts_with(SEARCH_IDENTIFIER) {
None
} else {
Some(uri)
}
}
pub fn find_valid_uri<'s>(
context_uri: Option<&'s str>,
first_page: Option<&'s ContextPage>,
) -> Option<&'s str> {
context_uri
.and_then(Self::valid_resolve_uri)
.or_else(|| first_page.and_then(|p| p.tracks.first().and_then(|t| t.uri.as_deref())))
}
pub fn set_active_context(&mut self, new_context: ContextType) {
self.active_context = new_context;
let player = self.player_mut();
player.context_metadata = Default::default();
player.context_restrictions = MessageField::some(Default::default());
player.restrictions = MessageField::some(Default::default());
let ctx = match self.get_context(new_context) {
Err(why) => {
warn!("couldn't load context info because: {why}");
return;
}
Ok(ctx) => ctx,
};
let mut restrictions = ctx.restrictions.clone();
let metadata = ctx.metadata.clone();
let player = self.player_mut();
if let Some(restrictions) = restrictions.take() {
player.restrictions = MessageField::some(restrictions.into());
}
for (key, value) in metadata {
player.context_metadata.insert(key, value);
}
}
pub fn update_context(
&mut self,
mut context: Context,
ty: ContextType,
) -> Result<Option<Vec<String>>, Error> {
if context.pages.iter().all(|p| p.tracks.is_empty()) {
error!("context didn't have any tracks: {context:#?}");
Err(StateError::ContextHasNoTracks)?;
} else if matches!(context.uri, Some(ref uri) if uri.starts_with(LOCAL_FILES_IDENTIFIER)) {
Err(StateError::UnsupportedLocalPlayback)?;
}
let mut next_contexts = Vec::new();
let mut first_page = None;
for page in context.pages {
if first_page.is_none() && !page.tracks.is_empty() {
first_page = Some(page);
} else {
next_contexts.push(page)
}
}
let page = match first_page {
None => Err(StateError::ContextHasNoTracks)?,
Some(p) => p,
};
debug!(
"updated context {ty:?} to <{:?}> ({} tracks)",
context.uri,
page.tracks.len()
);
match ty {
ContextType::Default => {
let mut new_context = self.state_context_from_page(
page,
context.metadata,
context.restrictions.take(),
context.uri.as_deref(),
Some(0),
None,
);
// when we update the same context, we should try to preserve the previous position
// otherwise we might load the entire context twice, unless it's the search context
if !self.context_uri().starts_with(SEARCH_IDENTIFIER)
&& matches!(context.uri, Some(ref uri) if uri == self.context_uri())
{
if let Some(new_index) = self.find_last_index_in_new_context(&new_context) {
new_context.index.track = match new_index {
Ok(i) => i,
Err(i) => {
self.player_mut().index = MessageField::none();
i
}
};
// enforce reloading the context
if let Ok(autoplay_ctx) = self.get_context_mut(ContextType::Autoplay) {
autoplay_ctx.index.track = 0
}
self.clear_next_tracks();
}
}
self.context = Some(new_context);
if !matches!(context.url, Some(ref url) if url.contains(SEARCH_IDENTIFIER)) {
self.player_mut().context_url = context.url.take().unwrap_or_default();
} else {
self.player_mut().context_url.clear()
}
self.player_mut().context_uri = context.uri.take().unwrap_or_default();
}
ContextType::Autoplay => {
self.autoplay_context = Some(self.state_context_from_page(
page,
context.metadata,
context.restrictions.take(),
context.uri.as_deref(),
None,
Some(Provider::Autoplay),
))
}
}
if next_contexts.is_empty() {
return Ok(None);
}
// load remaining contexts
let next_contexts = next_contexts
.into_iter()
.flat_map(|page| {
if !page.tracks.is_empty() {
self.fill_context_from_page(page).ok()?;
None
} else if matches!(page.page_url, Some(ref url) if !url.is_empty()) {
Some(page_url_to_uri(
&page.page_url.expect("checked by precondition"),
))
} else {
warn!("unhandled context page: {page:#?}");
None
}
})
.collect();
Ok(Some(next_contexts))
}
fn find_first_prev_track_index(&self, ctx: &StateContext) -> Option<usize> {
let prev_tracks = self.prev_tracks();
for i in (0..prev_tracks.len()).rev() {
let prev_track = prev_tracks.get(i)?;
if let Ok(idx) = Self::find_index_in_context(ctx, |t| prev_track.uri == t.uri) {
return Some(idx);
}
}
None
}
fn find_last_index_in_new_context(
&self,
new_context: &StateContext,
) -> Option<Result<u32, u32>> {
let ctx = self.context.as_ref()?;
let is_queued_item = self.current_track(|t| t.is_queue() || t.is_from_queue());
let new_index = if ctx.index.track as usize >= SPOTIFY_MAX_NEXT_TRACKS_SIZE {
Some(ctx.index.track as usize - SPOTIFY_MAX_NEXT_TRACKS_SIZE)
} else if is_queued_item {
self.find_first_prev_track_index(new_context)
} else {
Self::find_index_in_context(new_context, |current| {
self.current_track(|t| t.uri == current.uri)
})
.ok()
}
.map(|i| i as u32 + 1);
Some(new_index.ok_or_else(|| {
info!(
"couldn't distinguish index from current or previous tracks in the updated context"
);
let fallback_index = self
.player()
.index
.as_ref()
.map(|i| i.track)
.unwrap_or_default();
info!("falling back to index {fallback_index}");
fallback_index
}))
}
fn state_context_from_page(
&mut self,
page: ContextPage,
metadata: HashMap<String, String>,
restrictions: Option<Restrictions>,
new_context_uri: Option<&str>,
context_length: Option<usize>,
provider: Option<Provider>,
) -> StateContext {
let new_context_uri = new_context_uri.unwrap_or(self.context_uri());
let tracks = page
.tracks
.iter()
.enumerate()
.flat_map(|(i, track)| {
match self.context_to_provided_track(
track,
Some(new_context_uri),
context_length.map(|l| l + i),
Some(&page.metadata),
provider.clone(),
) {
Ok(t) => Some(t),
Err(why) => {
error!("couldn't convert {track:#?} into ProvidedTrack: {why}");
None
}
}
})
.collect::<Vec<_>>();
StateContext {
tracks: tracks.into(),
restrictions,
metadata,
index: ContextIndex::new(),
}
}
pub fn is_skip_track(&self, track: &ProvidedTrack, iteration: Option<u32>) -> bool {
let ctx = match self.get_context(self.active_context).ok() {
None => return false,
Some(ctx) => ctx,
};
if ctx.get_initial_track().is_none_or(|uri| uri != &track.uri) {
return false;
}
iteration.is_none_or(|i| i == 0)
}
pub fn merge_context(&mut self, new_page: Option<ContextPage>) -> Option<()> {
let current_context = self.get_context_mut(ContextType::Default).ok()?;
for new_track in new_page?.tracks {
if new_track.uri.is_none() || matches!(new_track.uri, Some(ref uri) if uri.is_empty()) {
continue;
}
let new_track_uri = new_track.uri.unwrap_or_default();
if let Ok(position) =
Self::find_index_in_context(current_context, |t| t.uri == new_track_uri)
{
let context_track = current_context.tracks.get_mut(position)?;
for (key, value) in new_track.metadata {
context_track.metadata.insert(key, value);
}
// the uid provided from another context might be actual uid of an item
if new_track.uid.is_some()
|| matches!(new_track.uid, Some(ref uid) if uid.is_empty())
{
context_track.uid = new_track.uid.unwrap_or_default();
}
}
}
Some(())
}
pub(super) fn update_context_index(
&mut self,
ty: ContextType,
new_index: usize,
) -> Result<(), StateError> {
let context = self.get_context_mut(ty)?;
context.index.track = new_index as u32;
Ok(())
}
pub fn context_to_provided_track(
&self,
ctx_track: &ContextTrack,
context_uri: Option<&str>,
context_index: Option<usize>,
page_metadata: Option<&HashMap<String, String>>,
provider: Option<Provider>,
) -> Result<ProvidedTrack, Error> {
let id = match (ctx_track.uri.as_ref(), ctx_track.gid.as_ref()) {
(Some(uri), _) if uri.contains(['?', '%']) => {
Err(StateError::InvalidTrackUri(Some(uri.clone())))?
}
(Some(uri), _) if !uri.is_empty() => SpotifyUri::from_uri(uri)?,
(_, Some(gid)) if !gid.is_empty() => SpotifyUri::Track {
id: SpotifyId::from_raw(gid)?,
},
_ => Err(StateError::InvalidTrackUri(None))?,
};
let uri = id.to_uri()?.replace("unknown", "track");
let provider = if self.unavailable_uri.contains(&uri) {
Provider::Unavailable
} else {
provider.unwrap_or(Provider::Context)
};
// assumption: the uid is used as unique-id of any item
// - queue resorting is done by each client and orients itself by the given uid
// - if no uid is present, resorting doesn't work or behaves not as intended
let uid = match ctx_track.uid.as_ref() {
Some(uid) if !uid.is_empty() => uid.to_string(),
// so providing a unique id should allow to resort the queue
_ => Uuid::new_v4().as_simple().to_string(),
};
let mut metadata = page_metadata.cloned().unwrap_or_default();
for (k, v) in &ctx_track.metadata {
metadata.insert(k.to_string(), v.to_string());
}
let mut track = ProvidedTrack {
uri,
uid,
metadata,
provider: provider.to_string(),
..Default::default()
};
if let Some(context_uri) = context_uri {
track.set_entity_uri(context_uri);
track.set_context_uri(context_uri);
}
if let Some(index) = context_index {
track.set_context_index(index);
}
if matches!(provider, Provider::Autoplay) {
track.set_from_autoplay(true)
}
Ok(track)
}
pub fn fill_context_from_page(&mut self, page: ContextPage) -> Result<(), Error> {
let ctx_len = self.context.as_ref().map(|c| c.tracks.len());
let context = self.state_context_from_page(page, HashMap::new(), None, None, ctx_len, None);
let ctx = self
.context
.as_mut()
.ok_or(StateError::NoContext(ContextType::Default))?;
for t in context.tracks {
ctx.tracks.push(t)
}
Ok(())
}
}

View file

@ -1,57 +0,0 @@
use crate::{
core::{Error, dealer::protocol::SetQueueCommand},
state::{
ConnectState,
context::{ContextType, ResetContext},
metadata::Metadata,
},
};
use protobuf::MessageField;
impl ConnectState {
pub fn handle_shuffle(&mut self, shuffle: bool) -> Result<(), Error> {
self.set_shuffle(shuffle);
if shuffle {
return self.shuffle_new();
}
self.reset_context(ResetContext::DefaultIndex);
if self.current_track(MessageField::is_none) {
return Ok(());
}
match self.current_track(|t| t.get_context_index()) {
Some(current_index) => self.reset_playback_to_position(Some(current_index)),
None => {
let ctx = self.get_context(ContextType::Default)?;
let current_index = ConnectState::find_index_in_context(ctx, |c| {
self.current_track(|t| c.uri == t.uri)
})?;
self.reset_playback_to_position(Some(current_index))
}
}
}
pub fn handle_set_queue(&mut self, set_queue: SetQueueCommand) {
self.set_next_tracks(set_queue.next_tracks);
self.set_prev_tracks(set_queue.prev_tracks);
self.update_queue_revision();
}
pub fn handle_set_repeat_context(&mut self, repeat: bool) -> Result<(), Error> {
self.set_repeat_context(repeat);
if repeat {
if let ContextType::Autoplay = self.fill_up_context {
self.fill_up_context = ContextType::Default;
}
}
let ctx = self.get_context(ContextType::Default)?;
let current_track =
ConnectState::find_index_in_context(ctx, |t| self.current_track(|t| &t.uri) == &t.uri)?;
self.reset_playback_to_position(Some(current_track))
}
}

View file

@ -1,87 +0,0 @@
use crate::{
protocol::{context::Context, context_track::ContextTrack, player::ProvidedTrack},
state::context::StateContext,
};
use std::collections::HashMap;
use std::fmt::Display;
const CONTEXT_URI: &str = "context_uri";
const ENTITY_URI: &str = "entity_uri";
const IS_QUEUED: &str = "is_queued";
const IS_AUTOPLAY: &str = "autoplay.is_autoplay";
const HIDDEN: &str = "hidden";
const ITERATION: &str = "iteration";
const CUSTOM_CONTEXT_INDEX: &str = "context_index";
const CUSTOM_SHUFFLE_SEED: &str = "shuffle_seed";
const CUSTOM_INITIAL_TRACK: &str = "initial_track";
macro_rules! metadata_entry {
( $get:ident, $set:ident, $clear:ident ($key:ident: $entry:ident)) => {
metadata_entry!( $get use get, $set, $clear ($key: $entry) -> Option<&String> );
};
( $get_key:ident use $get:ident, $set:ident, $clear:ident ($key:ident: $entry:ident) -> $ty:ty ) => {
fn $get_key (&self) -> $ty {
self.$get($entry)
}
fn $set (&mut self, $key: impl Display) {
self.metadata_mut().insert($entry.to_string(), $key.to_string());
}
fn $clear(&mut self) {
self.metadata_mut().remove($entry);
}
};
}
/// Allows easy access of known metadata fields
#[allow(dead_code)]
pub(super) trait Metadata {
fn metadata(&self) -> &HashMap<String, String>;
fn metadata_mut(&mut self) -> &mut HashMap<String, String>;
fn get_bool(&self, entry: &str) -> bool {
matches!(self.metadata().get(entry), Some(entry) if entry.eq("true"))
}
fn get_usize(&self, entry: &str) -> Option<usize> {
self.metadata().get(entry)?.parse().ok()
}
fn get(&self, entry: &str) -> Option<&String> {
self.metadata().get(entry)
}
metadata_entry!(is_from_queue use get_bool, set_from_queue, remove_from_queue (is_queued: IS_QUEUED) -> bool);
metadata_entry!(is_from_autoplay use get_bool, set_from_autoplay, remove_from_autoplay (is_autoplay: IS_AUTOPLAY) -> bool);
metadata_entry!(is_hidden use get_bool, set_hidden, remove_hidden (is_hidden: HIDDEN) -> bool);
metadata_entry!(get_context_index use get_usize, set_context_index, remove_context_index (context_index: CUSTOM_CONTEXT_INDEX) -> Option<usize>);
metadata_entry!(get_context_uri, set_context_uri, remove_context_uri (context_uri: CONTEXT_URI));
metadata_entry!(get_entity_uri, set_entity_uri, remove_entity_uri (entity_uri: ENTITY_URI));
metadata_entry!(get_iteration, set_iteration, remove_iteration (iteration: ITERATION));
metadata_entry!(get_shuffle_seed, set_shuffle_seed, remove_shuffle_seed (shuffle_seed: CUSTOM_SHUFFLE_SEED));
metadata_entry!(get_initial_track, set_initial_track, remove_initial_track (initial_track: CUSTOM_INITIAL_TRACK));
}
macro_rules! impl_metadata {
($impl_for:ident) => {
impl Metadata for $impl_for {
fn metadata(&self) -> &HashMap<String, String> {
&self.metadata
}
fn metadata_mut(&mut self) -> &mut HashMap<String, String> {
&mut self.metadata
}
}
};
}
impl_metadata!(ContextTrack);
impl_metadata!(ProvidedTrack);
impl_metadata!(Context);
impl_metadata!(StateContext);

View file

@ -1,113 +0,0 @@
use crate::{
core::Error,
protocol::player::ContextPlayerOptions,
state::{
ConnectState, StateError,
context::{ContextType, ResetContext},
metadata::Metadata,
},
};
use protobuf::MessageField;
use rand::Rng;
#[derive(Default, Debug)]
pub(crate) struct ShuffleState {
pub seed: u64,
pub initial_track: String,
}
impl ConnectState {
fn add_options_if_empty(&mut self) {
if self.player().options.is_none() {
self.player_mut().options = MessageField::some(ContextPlayerOptions::new())
}
}
pub fn set_repeat_context(&mut self, repeat: bool) {
self.add_options_if_empty();
if let Some(options) = self.player_mut().options.as_mut() {
options.repeating_context = repeat;
}
}
pub fn set_repeat_track(&mut self, repeat: bool) {
self.add_options_if_empty();
if let Some(options) = self.player_mut().options.as_mut() {
options.repeating_track = repeat;
}
}
pub fn set_shuffle(&mut self, shuffle: bool) {
self.add_options_if_empty();
if let Some(options) = self.player_mut().options.as_mut() {
options.shuffling_context = shuffle;
}
}
pub fn reset_options(&mut self) {
self.set_shuffle(false);
self.set_repeat_track(false);
self.set_repeat_context(false);
}
fn validate_shuffle_allowed(&self) -> Result<(), Error> {
if let Some(reason) = self
.player()
.restrictions
.disallow_toggling_shuffle_reasons
.first()
{
Err(StateError::CurrentlyDisallowed {
action: "shuffle",
reason: reason.clone(),
})?
} else {
Ok(())
}
}
pub fn shuffle_restore(&mut self, shuffle_state: ShuffleState) -> Result<(), Error> {
self.validate_shuffle_allowed()?;
self.shuffle(shuffle_state.seed, &shuffle_state.initial_track)
}
pub fn shuffle_new(&mut self) -> Result<(), Error> {
self.validate_shuffle_allowed()?;
let new_seed = rand::rng().random_range(100_000_000_000..1_000_000_000_000);
let current_track = self.current_track(|t| t.uri.clone());
self.shuffle(new_seed, &current_track)
}
fn shuffle(&mut self, seed: u64, initial_track: &str) -> Result<(), Error> {
self.clear_prev_track();
self.clear_next_tracks();
self.reset_context(ResetContext::DefaultIndex);
let ctx = self.get_context_mut(ContextType::Default)?;
ctx.tracks
.shuffle_with_seed(seed, |f| f.uri == initial_track);
ctx.set_initial_track(initial_track);
ctx.set_shuffle_seed(seed);
self.fill_up_next_tracks()?;
Ok(())
}
pub fn shuffling_context(&self) -> bool {
self.player().options.shuffling_context
}
pub fn repeat_context(&self) -> bool {
self.player().options.repeating_context
}
pub fn repeat_track(&self) -> bool {
self.player().options.repeating_track
}
}

View file

@ -1,66 +0,0 @@
use librespot_protocol::player::ProvidedTrack;
use std::fmt::{Display, Formatter};
// providers used by spotify
const PROVIDER_CONTEXT: &str = "context";
const PROVIDER_QUEUE: &str = "queue";
const PROVIDER_AUTOPLAY: &str = "autoplay";
// custom providers, used to identify certain states that we can't handle preemptively, yet
/// it seems like spotify just knows that the track isn't available, currently we don't have an
/// option to do the same, so we stay with the old solution for now
const PROVIDER_UNAVAILABLE: &str = "unavailable";
#[derive(Debug, Clone)]
pub enum Provider {
Context,
Queue,
Autoplay,
Unavailable,
}
impl Display for Provider {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
Provider::Context => PROVIDER_CONTEXT,
Provider::Queue => PROVIDER_QUEUE,
Provider::Autoplay => PROVIDER_AUTOPLAY,
Provider::Unavailable => PROVIDER_UNAVAILABLE,
}
)
}
}
pub trait IsProvider {
fn is_autoplay(&self) -> bool;
fn is_context(&self) -> bool;
fn is_queue(&self) -> bool;
fn is_unavailable(&self) -> bool;
fn set_provider(&mut self, provider: Provider);
}
impl IsProvider for ProvidedTrack {
fn is_autoplay(&self) -> bool {
self.provider == PROVIDER_AUTOPLAY
}
fn is_context(&self) -> bool {
self.provider == PROVIDER_CONTEXT
}
fn is_queue(&self) -> bool {
self.provider == PROVIDER_QUEUE
}
fn is_unavailable(&self) -> bool {
self.provider == PROVIDER_UNAVAILABLE
}
fn set_provider(&mut self, provider: Provider) {
self.provider = provider.to_string()
}
}

View file

@ -1,62 +0,0 @@
use crate::state::ConnectState;
use crate::state::provider::IsProvider;
use librespot_protocol::player::Restrictions;
use protobuf::MessageField;
impl ConnectState {
pub fn clear_restrictions(&mut self) {
let player = self.player_mut();
player.context_restrictions = Some(Default::default()).into();
player.restrictions = Some(Default::default()).into();
}
pub fn update_restrictions(&mut self) {
const NO_PREV: &str = "no previous tracks";
const AUTOPLAY: &str = "autoplay";
let prev_tracks_is_empty = self.prev_tracks().is_empty();
let is_paused = self.is_pause();
let is_playing = self.is_playing();
let player = self.player_mut();
if let Some(restrictions) = player.restrictions.as_mut() {
if is_playing {
restrictions.disallow_pausing_reasons.clear();
restrictions.disallow_resuming_reasons = vec!["not_paused".to_string()]
}
if is_paused {
restrictions.disallow_resuming_reasons.clear();
restrictions.disallow_pausing_reasons = vec!["not_playing".to_string()]
}
}
if player.restrictions.is_none() {
player.restrictions = MessageField::some(Restrictions::new())
}
if let Some(restrictions) = player.restrictions.as_mut() {
if prev_tracks_is_empty {
restrictions.disallow_peeking_prev_reasons = vec![NO_PREV.to_string()];
restrictions.disallow_skipping_prev_reasons = vec![NO_PREV.to_string()];
} else {
restrictions.disallow_peeking_prev_reasons.clear();
restrictions.disallow_skipping_prev_reasons.clear();
}
if player.track.is_autoplay() {
restrictions.disallow_toggling_shuffle_reasons = vec![AUTOPLAY.to_string()];
restrictions.disallow_toggling_repeat_context_reasons = vec![AUTOPLAY.to_string()];
restrictions.disallow_toggling_repeat_track_reasons = vec![AUTOPLAY.to_string()];
} else {
restrictions.disallow_toggling_shuffle_reasons.clear();
restrictions
.disallow_toggling_repeat_context_reasons
.clear();
restrictions.disallow_toggling_repeat_track_reasons.clear();
}
}
}
}

View file

@ -1,439 +0,0 @@
use crate::{
core::{Error, SpotifyUri},
protocol::player::ProvidedTrack,
state::{
ConnectState, SPOTIFY_MAX_NEXT_TRACKS_SIZE, SPOTIFY_MAX_PREV_TRACKS_SIZE, StateError,
context::ContextType,
metadata::Metadata,
provider::{IsProvider, Provider},
},
};
use protobuf::MessageField;
use rand::Rng;
// identifier used as part of the uid
pub const IDENTIFIER_DELIMITER: &str = "delimiter";
impl<'ct> ConnectState {
fn new_delimiter(iteration: i64) -> ProvidedTrack {
let mut delimiter = ProvidedTrack {
uri: format!("spotify:{IDENTIFIER_DELIMITER}"),
uid: format!("{IDENTIFIER_DELIMITER}{iteration}"),
provider: Provider::Context.to_string(),
..Default::default()
};
delimiter.set_hidden(true);
delimiter.set_iteration(iteration);
delimiter
}
fn push_prev(&mut self, prev: ProvidedTrack) {
let prev_tracks = self.prev_tracks_mut();
// add prev track, while preserving a length of 10
if prev_tracks.len() >= SPOTIFY_MAX_PREV_TRACKS_SIZE {
// todo: O(n), but technically only maximal O(SPOTIFY_MAX_PREV_TRACKS_SIZE) aka O(10)
let _ = prev_tracks.remove(0);
}
prev_tracks.push(prev)
}
fn get_next_track(&mut self) -> Option<ProvidedTrack> {
if self.next_tracks().is_empty() {
None
} else {
// todo: O(n), but technically only maximal O(SPOTIFY_MAX_NEXT_TRACKS_SIZE) aka O(80)
Some(self.next_tracks_mut().remove(0))
}
}
/// bottom => top, aka the last track of the list is the prev track
fn prev_tracks_mut(&mut self) -> &mut Vec<ProvidedTrack> {
&mut self.player_mut().prev_tracks
}
/// bottom => top, aka the last track of the list is the prev track
pub(super) fn prev_tracks(&self) -> &Vec<ProvidedTrack> {
&self.player().prev_tracks
}
/// top => bottom, aka the first track of the list is the next track
fn next_tracks_mut(&mut self) -> &mut Vec<ProvidedTrack> {
&mut self.player_mut().next_tracks
}
/// top => bottom, aka the first track of the list is the next track
pub(super) fn next_tracks(&self) -> &Vec<ProvidedTrack> {
&self.player().next_tracks
}
pub fn set_current_track_random(&mut self) -> Result<(), Error> {
let max_tracks = self.get_context(self.active_context)?.tracks.len();
let rng_track = rand::rng().random_range(0..max_tracks);
self.set_current_track(rng_track)
}
pub fn set_current_track(&mut self, index: usize) -> Result<(), Error> {
let context = self.get_context(self.active_context)?;
let new_track = context
.tracks
.get(index)
.ok_or(StateError::CanNotFindTrackInContext(
Some(index),
context.tracks.len(),
))?;
debug!(
"set track to: {} at {} of {} tracks",
new_track.uri,
index,
context.tracks.len()
);
self.set_track(new_track.clone());
self.update_current_index(|i| i.track = index as u32);
Ok(())
}
/// Move to the next track
///
/// Updates the current track to the next track. Adds the old track
/// to prev tracks and fills up the next tracks from the current context
pub fn next_track(&mut self) -> Result<Option<u32>, Error> {
// when we skip in repeat track, we don't repeat the current track anymore
if self.repeat_track() {
self.set_repeat_track(false);
}
let old_track = self.player_mut().track.take();
if let Some(old_track) = old_track {
// only add songs from our context to our previous tracks
if old_track.is_context() || old_track.is_autoplay() {
self.push_prev(old_track)
}
}
let new_track = loop {
match self.get_next_track() {
Some(next) if next.uid.starts_with(IDENTIFIER_DELIMITER) => {
self.push_prev(next);
continue;
}
Some(next) if next.is_unavailable() => continue,
other => break other,
};
};
let new_track = match new_track {
None => return Ok(None),
Some(t) => t,
};
self.fill_up_next_tracks()?;
let update_index = if new_track.is_queue() {
None
} else if new_track.is_autoplay() {
self.set_active_context(ContextType::Autoplay);
None
} else {
match new_track.get_context_index() {
Some(new_index) => Some(new_index as u32),
None => {
error!("the given context track had no set context_index");
None
}
}
};
if let Some(update_index) = update_index {
self.update_current_index(|i| i.track = update_index)
} else {
self.player_mut().index.clear()
}
self.set_track(new_track);
self.update_restrictions();
Ok(Some(self.player().index.track))
}
/// Move to the prev track
///
/// Updates the current track to the prev track. Adds the old track
/// to next tracks (when from the context) and fills up the prev tracks from the
/// current context
pub fn prev_track(&mut self) -> Result<Option<&MessageField<ProvidedTrack>>, Error> {
let old_track = self.player_mut().track.take();
if let Some(old_track) = old_track {
if old_track.is_context() || old_track.is_autoplay() {
// todo: O(n)
self.next_tracks_mut().insert(0, old_track);
}
}
// handle possible delimiter
if matches!(self.prev_tracks().last(), Some(prev) if prev.uid.starts_with(IDENTIFIER_DELIMITER))
{
let delimiter = self
.prev_tracks_mut()
.pop()
.expect("item that was prechecked");
let next_tracks = self.next_tracks_mut();
if next_tracks.len() >= SPOTIFY_MAX_NEXT_TRACKS_SIZE {
let _ = next_tracks.pop();
}
// todo: O(n)
next_tracks.insert(0, delimiter)
}
while self.next_tracks().len() > SPOTIFY_MAX_NEXT_TRACKS_SIZE {
let _ = self.next_tracks_mut().pop();
}
let new_track = match self.prev_tracks_mut().pop() {
None => return Ok(None),
Some(t) => t,
};
if matches!(self.active_context, ContextType::Autoplay if new_track.is_context()) {
// transition back to default context
self.set_active_context(ContextType::Default);
}
self.fill_up_next_tracks()?;
self.set_track(new_track);
if self.player().index.track == 0 {
warn!("prev: trying to skip into negative, index update skipped")
} else {
self.update_current_index(|i| i.track -= 1)
}
self.update_restrictions();
Ok(Some(self.current_track(|t| t)))
}
pub fn current_track<F: Fn(&'ct MessageField<ProvidedTrack>) -> R, R>(
&'ct self,
access: F,
) -> R {
access(&self.player().track)
}
pub fn set_track(&mut self, track: ProvidedTrack) {
self.player_mut().track = MessageField::some(track)
}
pub fn set_next_tracks(&mut self, mut tracks: Vec<ProvidedTrack>) {
// mobile only sends a set_queue command instead of an add_to_queue command
// in addition to handling the mobile add_to_queue handling, this should also handle
// a mass queue addition
tracks
.iter_mut()
.filter(|t| t.is_from_queue())
.for_each(|t| {
t.set_provider(Provider::Queue);
// technically we could preserve the queue-uid here,
// but it seems to work without that, so we just override it
t.uid = format!("q{}", self.queue_count);
self.queue_count += 1;
});
// when you drag 'n drop the current track in the queue view into the "Next from: ..."
// section, it is only send as an empty item with just the provider and metadata, so we have
// to provide set the uri from the current track manually
tracks
.iter_mut()
.filter(|t| t.uri.is_empty())
.for_each(|t| t.uri = self.current_track(|ct| ct.uri.clone()));
self.player_mut().next_tracks = tracks;
}
pub fn set_prev_tracks(&mut self, tracks: Vec<ProvidedTrack>) {
self.player_mut().prev_tracks = tracks;
}
pub fn clear_prev_track(&mut self) {
self.prev_tracks_mut().clear()
}
pub fn clear_next_tracks(&mut self) {
// respect queued track and don't throw them out of our next played tracks
let first_non_queued_track = self
.next_tracks()
.iter()
.enumerate()
.find(|(_, track)| !track.is_queue());
if let Some((non_queued_track, _)) = first_non_queued_track {
while self.next_tracks().len() > non_queued_track
&& self.next_tracks_mut().pop().is_some()
{}
}
}
pub fn fill_up_next_tracks(&mut self) -> Result<(), Error> {
let ctx = self.get_context(self.fill_up_context)?;
let mut new_index = ctx.index.track as usize;
let mut iteration = ctx.index.page;
while self.next_tracks().len() < SPOTIFY_MAX_NEXT_TRACKS_SIZE {
let ctx = self.get_context(self.fill_up_context)?;
let track = match ctx.tracks.get(new_index) {
None if self.repeat_context() => {
let delimiter = Self::new_delimiter(iteration.into());
iteration += 1;
new_index = 0;
delimiter
}
None if !matches!(self.fill_up_context, ContextType::Autoplay)
&& self.autoplay_context.is_some()
&& !self.repeat_context() =>
{
self.update_context_index(self.fill_up_context, new_index)?;
// transition to autoplay as fill up context
self.fill_up_context = ContextType::Autoplay;
new_index = self.get_context(ContextType::Autoplay)?.index.track as usize;
// add delimiter to only display the current context
Self::new_delimiter(iteration.into())
}
None if self.autoplay_context.is_some() => {
match self
.get_context(ContextType::Autoplay)?
.tracks
.get(new_index)
{
None => break,
Some(ct) => {
new_index += 1;
ct.clone()
}
}
}
None => break,
Some(ct) if ct.is_unavailable() || self.is_skip_track(ct, Some(iteration)) => {
debug!(
"skipped track {} during fillup as it's unavailable or should be skipped",
ct.uri
);
new_index += 1;
continue;
}
Some(ct) => {
new_index += 1;
ct.clone()
}
};
self.next_tracks_mut().push(track);
}
debug!(
"finished filling up next_tracks ({})",
self.next_tracks().len()
);
self.update_context_index(self.fill_up_context, new_index)?;
// the web-player needs a revision update, otherwise the queue isn't updated in the ui
self.update_queue_revision();
Ok(())
}
pub fn preview_next_track(&mut self) -> Option<SpotifyUri> {
let next = if self.repeat_track() {
self.current_track(|t| &t.uri)
} else {
&self.next_tracks().first()?.uri
};
SpotifyUri::from_uri(next).ok()
}
pub fn has_next_tracks(&self, min: Option<usize>) -> bool {
if let Some(min) = min {
self.next_tracks().len() >= min
} else {
!self.next_tracks().is_empty()
}
}
pub fn recent_track_uris(&self) -> Vec<String> {
let mut prev = self
.prev_tracks()
.iter()
.map(|t| t.uri.clone())
.collect::<Vec<_>>();
prev.push(self.current_track(|t| t.uri.clone()));
prev
}
pub fn mark_unavailable(&mut self, id: &SpotifyUri) -> Result<(), Error> {
let uri = id.to_uri()?;
debug!("marking {uri} as unavailable");
let next_tracks = self.next_tracks_mut();
while let Some(pos) = next_tracks.iter().position(|t| t.uri == uri) {
let _ = next_tracks.remove(pos);
}
for next_track in next_tracks {
Self::mark_as_unavailable_for_match(next_track, &uri)
}
let prev_tracks = self.prev_tracks_mut();
while let Some(pos) = prev_tracks.iter().position(|t| t.uri == uri) {
let _ = prev_tracks.remove(pos);
}
for prev_track in prev_tracks {
Self::mark_as_unavailable_for_match(prev_track, &uri)
}
self.unavailable_uri.push(uri);
self.fill_up_next_tracks()?;
self.update_queue_revision();
Ok(())
}
pub fn add_to_queue(&mut self, mut track: ProvidedTrack, rev_update: bool) {
track.uid = format!("q{}", self.queue_count);
self.queue_count += 1;
track.set_provider(Provider::Queue);
if !track.is_from_queue() {
track.set_from_queue(true);
}
let next_tracks = self.next_tracks_mut();
if let Some(next_not_queued_track) = next_tracks.iter().position(|t| !t.is_queue()) {
next_tracks.insert(next_not_queued_track, track);
} else {
next_tracks.push(track)
}
while next_tracks.len() > SPOTIFY_MAX_NEXT_TRACKS_SIZE {
next_tracks.pop();
}
if rev_update {
self.update_queue_revision();
}
self.update_restrictions();
}
}

View file

@ -1,188 +0,0 @@
use crate::{
core::Error,
protocol::{player::ProvidedTrack, transfer_state::TransferState},
state::{
context::ContextType,
metadata::Metadata,
options::ShuffleState,
provider::{IsProvider, Provider},
{ConnectState, StateError},
},
};
use protobuf::MessageField;
impl ConnectState {
pub fn current_track_from_transfer(
&self,
transfer: &TransferState,
) -> Result<ProvidedTrack, Error> {
let track = if transfer.queue.is_playing_queue.unwrap_or_default() {
transfer.queue.tracks.first()
} else {
transfer.playback.current_track.as_ref()
}
.ok_or(StateError::CouldNotResolveTrackFromTransfer)?;
self.context_to_provided_track(
track,
transfer.current_session.context.uri.as_deref(),
None,
None,
transfer
.queue
.is_playing_queue
.unwrap_or_default()
.then_some(Provider::Queue),
)
}
/// handles the initially transferable data
pub fn handle_initial_transfer(&mut self, transfer: &mut TransferState) {
let current_context_metadata = self.context.as_ref().map(|c| c.metadata.clone());
let player = self.player_mut();
player.is_buffering = false;
if let Some(options) = transfer.options.take() {
player.options = MessageField::some(options.into());
}
player.is_paused = transfer.playback.is_paused.unwrap_or_default();
player.is_playing = !player.is_paused;
match transfer.playback.playback_speed {
Some(speed) if speed != 0. => player.playback_speed = speed,
_ => player.playback_speed = 1.,
}
let mut shuffle_seed = None;
let mut initial_track = None;
if let Some(session) = transfer.current_session.as_mut() {
player.play_origin = session.play_origin.take().map(Into::into).into();
player.suppressions = session.suppressions.take().map(Into::into).into();
// maybe at some point we can use the shuffle seed provided by spotify,
// but I doubt it, as spotify doesn't use true randomness but rather an algorithm
// based shuffle
trace!(
"shuffle_seed: <{:?}> (spotify), <{:?}> (own)",
session.shuffle_seed,
session.context.get_shuffle_seed()
);
shuffle_seed = session
.context
.get_shuffle_seed()
.and_then(|seed| seed.parse().ok());
initial_track = session.context.get_initial_track().cloned();
if let Some(mut ctx) = session.context.take() {
player.restrictions = ctx.restrictions.take().map(Into::into).into();
for (key, value) in ctx.metadata {
player.context_metadata.insert(key, value);
}
}
}
player.context_url.clear();
player.context_uri.clear();
if let Some(metadata) = current_context_metadata {
for (key, value) in metadata {
player.context_metadata.insert(key, value);
}
}
self.transfer_shuffle = match (shuffle_seed, initial_track) {
(Some(seed), Some(initial_track)) => Some(ShuffleState {
seed,
initial_track,
}),
_ => None,
};
self.clear_prev_track();
self.clear_next_tracks();
self.update_queue_revision()
}
/// completes the transfer, loading the queue and updating metadata
pub fn finish_transfer(&mut self, transfer: TransferState) -> Result<(), Error> {
let track = match self.player().track.as_ref() {
None => self.current_track_from_transfer(&transfer)?,
Some(track) => track.clone(),
};
let context_ty = if self.current_track(|t| t.is_from_autoplay()) {
ContextType::Autoplay
} else {
ContextType::Default
};
self.set_active_context(context_ty);
self.fill_up_context = context_ty;
let ctx = self.get_context(self.active_context)?;
let current_index = match transfer.current_session.current_uid.as_ref() {
Some(uid) if track.is_queue() => Self::find_index_in_context(ctx, |c| &c.uid == uid)
.map(|i| if i > 0 { i - 1 } else { i }),
_ => Self::find_index_in_context(ctx, |c| c.uri == track.uri || c.uid == track.uid),
};
debug!(
"active track is <{}> with index {current_index:?} in {:?} context, has {} tracks",
track.uri,
self.active_context,
ctx.tracks.len()
);
if self.player().track.is_none() {
self.set_track(track);
}
let current_index = current_index.ok();
if let Some(current_index) = current_index {
self.update_current_index(|i| i.track = current_index as u32);
}
debug!(
"setting up next and prev: index is at {current_index:?} while shuffle {}",
self.shuffling_context()
);
for (i, track) in transfer.queue.tracks.iter().enumerate() {
if transfer.queue.is_playing_queue.unwrap_or_default() && i == 0 {
// if we are currently playing from the queue,
// don't add the first queued item, because we are currently playing that item
continue;
}
if let Ok(queued_track) = self.context_to_provided_track(
track,
Some(self.context_uri()),
None,
None,
Some(Provider::Queue),
) {
self.add_to_queue(queued_track, false);
}
}
if self.shuffling_context() {
self.set_current_track(current_index.unwrap_or_default())?;
self.set_shuffle(true);
match self.transfer_shuffle.take() {
None => self.shuffle_new(),
Some(state) => self.shuffle_restore(state),
}?
} else {
self.reset_playback_to_position(current_index)?;
}
self.update_restrictions();
Ok(())
}
}

View file

@ -2,63 +2,54 @@
# Build the docker image from the root of the project with the following command :
# $ docker build -t librespot-cross -f contrib/Dockerfile .
#
# The resulting image can be used to build librespot for linux x86_64, armhf, armel, aarch64
# The resulting image can be used to build librespot for linux x86_64, armhf(with support for armv6hf), armel, mipsel, aarch64
# $ docker run -v /tmp/librespot-build:/build librespot-cross
#
# The compiled binaries will be located in /tmp/librespot-build
#
# If only one architecture is desired, cargo can be invoked directly with the appropriate options :
# $ docker run -v /tmp/librespot-build:/build librespot-cross cargo build --release --no-default-features --features "alsa-backend with-libmdns rustls-tls-native-roots"
# $ docker run -v /tmp/librespot-build:/build librespot-cross cargo build --release --target arm-unknown-linux-gnueabihf --no-default-features --features "alsa-backend with-libmdns rustls-tls-native-roots"
# $ docker run -v /tmp/librespot-build:/build librespot-cross cargo build --release --target arm-unknown-linux-gnueabi --no-default-features --features "alsa-backend with-libmdns rustls-tls-native-roots"
# $ docker run -v /tmp/librespot-build:/build librespot-cross cargo build --release --target aarch64-unknown-linux-gnu --no-default-features --features "alsa-backend with-libmdns rustls-tls-native-roots"
# $ docker run -v /tmp/librespot-build:/build librespot-cross cargo build --release --no-default-features --features "alsa-backend"
# $ docker run -v /tmp/librespot-build:/build librespot-cross cargo build --release --target arm-unknown-linux-gnueabihf --no-default-features --features alsa-backend
# $ docker run -v /tmp/librespot-build:/build librespot-cross cargo build --release --target arm-unknown-linux-gnueabi --no-default-features --features alsa-backend
# $ docker run -v /tmp/librespot-build:/build librespot-cross cargo build --release --target aarch64-unknown-linux-gnu --no-default-features --features alsa-backend
FROM debian:bookworm
# $ docker run -v /tmp/librespot-build:/build librespot-cross contrib/docker-build-pi-armv6hf.sh
RUN echo "deb http://deb.debian.org/debian bookworm main" > /etc/apt/sources.list && \
echo "deb http://deb.debian.org/debian bookworm-updates main" >> /etc/apt/sources.list && \
echo "deb http://deb.debian.org/debian-security bookworm-security main" >> /etc/apt/sources.list
FROM debian:stretch
RUN dpkg --add-architecture arm64 && \
dpkg --add-architecture armhf && \
dpkg --add-architecture armel && \
apt-get update && \
apt-get install -y \
build-essential \
cmake \
crossbuild-essential-arm64 \
crossbuild-essential-armel \
crossbuild-essential-armhf \
curl \
git \
libasound2-dev \
libasound2-dev:arm64 \
libasound2-dev:armel \
libasound2-dev:armhf \
libclang-dev \
libpulse0 \
libpulse0:arm64 \
libpulse0:armel \
libpulse0:armhf \
pkg-config
RUN dpkg --add-architecture arm64
RUN dpkg --add-architecture armhf
RUN dpkg --add-architecture armel
RUN dpkg --add-architecture mipsel
RUN apt-get update
RUN apt-get install -y curl git build-essential crossbuild-essential-arm64 crossbuild-essential-armel crossbuild-essential-armhf crossbuild-essential-mipsel pkg-config
RUN apt-get install -y libasound2-dev libasound2-dev:arm64 libasound2-dev:armel libasound2-dev:armhf libasound2-dev:mipsel
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y
ENV PATH="/root/.cargo/bin/:${PATH}"
RUN curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.85 -y && \
rustup target add aarch64-unknown-linux-gnu && \
rustup target add arm-unknown-linux-gnueabi && \
rustup target add arm-unknown-linux-gnueabihf && \
cargo install bindgen-cli && \
mkdir /.cargo && \
echo '[target.aarch64-unknown-linux-gnu]\nlinker = "aarch64-linux-gnu-gcc"' > /.cargo/config && \
echo '[target.arm-unknown-linux-gnueabihf]\nlinker = "arm-linux-gnueabihf-gcc"' >> /.cargo/config && \
echo '[target.arm-unknown-linux-gnueabi]\nlinker = "arm-linux-gnueabi-gcc"' >> /.cargo/config
RUN rustup target add aarch64-unknown-linux-gnu
RUN rustup target add arm-unknown-linux-gnueabi
RUN rustup target add arm-unknown-linux-gnueabihf
RUN rustup target add mipsel-unknown-linux-gnu
ENV CARGO_TARGET_DIR=/build
ENV CARGO_HOME=/build/cache
RUN mkdir /.cargo && \
echo '[target.aarch64-unknown-linux-gnu]\nlinker = "aarch64-linux-gnu-gcc"' > /.cargo/config && \
echo '[target.arm-unknown-linux-gnueabihf]\nlinker = "arm-linux-gnueabihf-gcc"' >> /.cargo/config && \
echo '[target.arm-unknown-linux-gnueabi]\nlinker = "arm-linux-gnueabi-gcc"' >> /.cargo/config && \
echo '[target.mipsel-unknown-linux-gnu]\nlinker = "mipsel-linux-gnu-gcc"' >> /.cargo/config
RUN mkdir /build && \
mkdir /pi-tools && \
curl -L https://github.com/raspberrypi/tools/archive/648a6eeb1e3c2b40af4eb34d88941ee0edeb3e9a.tar.gz | tar xz --strip-components 1 -C /pi-tools
ENV CARGO_TARGET_DIR /build
ENV CARGO_HOME /build/cache
ENV PKG_CONFIG_ALLOW_CROSS=1
ENV PKG_CONFIG_PATH_aarch64-unknown-linux-gnu=/usr/lib/aarch64-linux-gnu/pkgconfig/
ENV PKG_CONFIG_PATH_arm-unknown-linux-gnueabihf=/usr/lib/arm-linux-gnueabihf/pkgconfig/
ENV PKG_CONFIG_PATH_arm-unknown-linux-gnueabi=/usr/lib/arm-linux-gnueabi/pkgconfig/
ENV PKG_CONFIG_PATH_mipsel-unknown-linux-gnu=/usr/lib/mipsel-linux-gnu/pkgconfig/
ADD . /src
WORKDIR /src

View file

@ -1,44 +0,0 @@
# Cross compilation environment for librespot in armv6hf.
# Build the docker image from the root of the project with the following command:
# $ docker build -t librespot-cross-armv6hf -f contrib/cross-compile-armv6hf/Dockerfile .
#
# The resulting image can be used to build librespot for armv6hf:
# $ docker run -v /tmp/librespot-build-armv6hf:/build librespot-cross-armv6hf
#
# The compiled binary will be located in /tmp/librespot-build-armv6hf/arm-unknown-linux-gnueabihf/release/librespot
FROM --platform=linux/amd64 ubuntu:18.04
# Install common packages.
RUN apt-get update
RUN apt-get install -y -qq git curl build-essential cmake clang libclang-dev libasound2-dev libpulse-dev
# Install armhf packages.
RUN echo "deb [arch=armhf] http://ports.ubuntu.com/ubuntu-ports/ bionic main" | tee -a /etc/apt/sources.list
RUN apt-get update
RUN apt-get download libasound2:armhf libasound2-dev:armhf
RUN mkdir /sysroot && \
dpkg -x libasound2_*.deb /sysroot/ && \
dpkg -x libasound2-dev*.deb /sysroot/
# Install rust.
RUN curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.85 -y
ENV PATH="/root/.cargo/bin/:${PATH}"
RUN rustup target add arm-unknown-linux-gnueabihf
RUN mkdir /.cargo && \
echo '[target.arm-unknown-linux-gnueabihf]\nlinker = "arm-linux-gnueabihf-gcc"' >> /.cargo/config
# Install Pi tools for armv6.
RUN mkdir /pi && \
git -C /pi clone --depth=1 https://github.com/raspberrypi/tools.git
# Build env variables.
ENV CARGO_TARGET_DIR=/build
ENV CARGO_HOME=/build/cache
ENV PATH="/pi/tools/arm-bcm2708/arm-linux-gnueabihf/bin:${PATH}"
ENV PKG_CONFIG_ALLOW_CROSS=1
ENV PKG_CONFIG_PATH_arm-unknown-linux-gnueabihf=/usr/lib/arm-linux-gnueabihf/pkgconfig/
ADD . /src
WORKDIR /src
CMD ["/src/contrib/cross-compile-armv6hf/docker-build.sh"]

View file

@ -1,17 +0,0 @@
#!/usr/bin/env bash
set -eux
cargo install --force --locked bindgen-cli
PI1_TOOLS_DIR=/pi/tools/arm-bcm2708/arm-linux-gnueabihf
PI1_TOOLS_SYSROOT_DIR=$PI1_TOOLS_DIR/arm-linux-gnueabihf/sysroot
PI1_LIB_DIRS=(
"$PI1_TOOLS_SYSROOT_DIR/lib"
"$PI1_TOOLS_SYSROOT_DIR/usr/lib"
"/sysroot/usr/lib/arm-linux-gnueabihf"
)
export RUSTFLAGS="-C linker=$PI1_TOOLS_DIR/bin/arm-linux-gnueabihf-gcc ${PI1_LIB_DIRS[*]/#/-L}"
export BINDGEN_EXTRA_CLANG_ARGS=--sysroot=$PI1_TOOLS_SYSROOT_DIR
cargo build --release --target arm-unknown-linux-gnueabihf --no-default-features --features "alsa-backend with-libmdns rustls-tls-native-roots"

View file

@ -0,0 +1,42 @@
#!/usr/bin/env bash
# Snipped and tucked from https://github.com/plietar/librespot/pull/202/commits/21549641d39399cbaec0bc92b36c9951d1b87b90
# and further inputs from https://github.com/kingosticks/librespot/commit/c55dd20bd6c7e44dd75ff33185cf50b2d3bd79c3
set -eux
# Get alsa lib and headers
ALSA_VER="1.0.25-4"
DEPS=( \
"http://mirrordirector.raspbian.org/raspbian/pool/main/a/alsa-lib/libasound2_${ALSA_VER}_armhf.deb" \
"http://mirrordirector.raspbian.org/raspbian/pool/main/a/alsa-lib/libasound2-dev_${ALSA_VER}_armhf.deb" \
)
# Collect Paths
SYSROOT="/pi-tools/arm-bcm2708/arm-bcm2708hardfp-linux-gnueabi/arm-bcm2708hardfp-linux-gnueabi/sysroot"
GCC="/pi-tools/arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian-x64/bin"
GCC_SYSROOT="$GCC/gcc-sysroot"
export PATH=/pi-tools/arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian-x64/bin/:$PATH
# Link the compiler
export TARGET_CC="$GCC/arm-linux-gnueabihf-gcc"
# Create wrapper around gcc to point to rpi sysroot
echo -e '#!/bin/bash' "\n$TARGET_CC --sysroot $SYSROOT \"\$@\"" > $GCC_SYSROOT
chmod +x $GCC_SYSROOT
# Add extra target dependencies to our rpi sysroot
for path in "${DEPS[@]}"; do
curl -OL $path
dpkg -x $(basename $path) $SYSROOT
done
# i don't why this is neccessary
# ln -s ld-linux.so.3 $SYSROOT/lib/ld-linux-armhf.so.3
# point cargo to use gcc wrapper as linker
echo -e '[target.arm-unknown-linux-gnueabihf]\nlinker = "gcc-sysroot"' > /.cargo/config
# Build
cargo build --release --target arm-unknown-linux-gnueabihf --no-default-features --features "alsa-backend"

View file

@ -1,7 +1,8 @@
#!/usr/bin/env bash
set -eux
cargo build --release --no-default-features --features "alsa-backend with-libmdns rustls-tls-native-roots"
cargo build --release --target aarch64-unknown-linux-gnu --no-default-features --features "alsa-backend with-libmdns rustls-tls-native-roots"
cargo build --release --target arm-unknown-linux-gnueabihf --no-default-features --features "alsa-backend with-libmdns rustls-tls-native-roots"
cargo build --release --target arm-unknown-linux-gnueabi --no-default-features --features "alsa-backend with-libmdns rustls-tls-native-roots"
cargo build --release --no-default-features --features alsa-backend
cargo build --release --target aarch64-unknown-linux-gnu --no-default-features --features alsa-backend
cargo build --release --target arm-unknown-linux-gnueabihf --no-default-features --features alsa-backend
cargo build --release --target arm-unknown-linux-gnueabi --no-default-features --features alsa-backend
cargo build --release --target mipsel-unknown-linux-gnu --no-default-features --features alsa-backend

View file

@ -1,77 +0,0 @@
#!/usr/bin/python3
import os
import json
from datetime import datetime
player_event = os.getenv('PLAYER_EVENT')
json_dict = {
'event_time': str(datetime.now()),
'event': player_event,
}
if player_event in ('session_connected', 'session_disconnected'):
json_dict['user_name'] = os.environ['USER_NAME']
json_dict['connection_id'] = os.environ['CONNECTION_ID']
elif player_event == 'session_client_changed':
json_dict['client_id'] = os.environ['CLIENT_ID']
json_dict['client_name'] = os.environ['CLIENT_NAME']
json_dict['client_brand_name'] = os.environ['CLIENT_BRAND_NAME']
json_dict['client_model_name'] = os.environ['CLIENT_MODEL_NAME']
elif player_event == 'shuffle_changed':
json_dict['shuffle'] = os.environ['SHUFFLE']
elif player_event == 'repeat_changed':
json_dict['repeat'] = os.environ['REPEAT']
elif player_event == 'auto_play_changed':
json_dict['auto_play'] = os.environ['AUTO_PLAY']
elif player_event == 'filter_explicit_content_changed':
json_dict['filter'] = os.environ['FILTER']
elif player_event == 'volume_changed':
json_dict['volume'] = os.environ['VOLUME']
elif player_event in ('seeked', 'position_correction', 'playing', 'paused'):
json_dict['track_id'] = os.environ['TRACK_ID']
json_dict['position_ms'] = os.environ['POSITION_MS']
elif player_event in ('unavailable', 'end_of_track', 'preload_next', 'preloading', 'loading', 'stopped'):
json_dict['track_id'] = os.environ['TRACK_ID']
elif player_event == 'track_changed':
common_metadata_fields = {}
item_type = os.environ['ITEM_TYPE']
common_metadata_fields['item_type'] = item_type
common_metadata_fields['track_id'] = os.environ['TRACK_ID']
common_metadata_fields['uri'] = os.environ['URI']
common_metadata_fields['name'] = os.environ['NAME']
common_metadata_fields['duration_ms'] = os.environ['DURATION_MS']
common_metadata_fields['is_explicit'] = os.environ['IS_EXPLICIT']
common_metadata_fields['language'] = os.environ['LANGUAGE'].split('\n')
common_metadata_fields['covers'] = os.environ['COVERS'].split('\n')
json_dict['common_metadata_fields'] = common_metadata_fields
if item_type == 'Track':
track_metadata_fields = {}
track_metadata_fields['number'] = os.environ['NUMBER']
track_metadata_fields['disc_number'] = os.environ['DISC_NUMBER']
track_metadata_fields['popularity'] = os.environ['POPULARITY']
track_metadata_fields['album'] = os.environ['ALBUM']
track_metadata_fields['artists'] = os.environ['ARTISTS'].split('\n')
track_metadata_fields['album_artists'] = os.environ['ALBUM_ARTISTS'].split('\n')
json_dict['track_metadata_fields'] = track_metadata_fields
elif item_type == 'Episode':
episode_metadata_fields = {}
episode_metadata_fields['show_name'] = os.environ['SHOW_NAME']
publish_time = datetime.utcfromtimestamp(int(os.environ['PUBLISH_TIME'])).strftime('%Y-%m-%d')
episode_metadata_fields['publish_time'] = publish_time
episode_metadata_fields['description'] = os.environ['DESCRIPTION']
json_dict['episode_metadata_fields'] = episode_metadata_fields
print(json.dumps(json_dict, indent = 4))

View file

@ -1,120 +1,54 @@
[package]
name = "librespot-core"
version.workspace = true
rust-version.workspace = true
version = "0.4.2"
authors = ["Paul Lietar <paul@lietar.net>"]
license.workspace = true
description = "The core functionality provided by librespot"
repository.workspace = true
edition.workspace = true
build = "build.rs"
description = "The core functionality provided by librespot"
license = "MIT"
repository = "https://github.com/librespot-org/librespot"
edition = "2018"
[features]
# Refer to the workspace Cargo.toml for the list of features
default = ["native-tls"]
# TLS backends (mutually exclusive - see oauth/src/lib.rs for compile-time checks)
# Note: Validation is in oauth since it's compiled first in the dependency tree.
native-tls = [
"dep:hyper-tls",
"hyper-proxy2/tls",
"librespot-oauth/native-tls",
"tokio-tungstenite/native-tls",
]
rustls-tls-native-roots = [
"__rustls",
"hyper-proxy2/rustls",
"hyper-rustls/native-tokio",
"librespot-oauth/rustls-tls-native-roots",
"tokio-tungstenite/rustls-tls-native-roots",
]
rustls-tls-webpki-roots = [
"__rustls",
"hyper-proxy2/rustls-webpki",
"hyper-rustls/webpki-tokio",
"librespot-oauth/rustls-tls-webpki-roots",
"tokio-tungstenite/rustls-tls-webpki-roots",
]
# Internal features - these are not meant to be used by end users
__rustls = []
[dependencies.librespot-protocol]
path = "../protocol"
version = "0.4.2"
[dependencies]
librespot-oauth = { version = "0.7.1", path = "../oauth", default-features = false }
librespot-protocol = { version = "0.7.1", path = "../protocol", default-features = false }
aes = "0.8"
base64 = "0.22"
byteorder = "1.5"
bytes = "1"
data-encoding = "2.9"
flate2 = "1.1"
form_urlencoded = "1.2"
futures-core = "0.3"
futures-util = { version = "0.3", default-features = false, features = [
"alloc",
"bilock",
"unstable",
] }
governor = { version = "0.10", default-features = false, features = ["std"] }
hmac = "0.12"
httparse = "1.10"
http = "1.3"
http-body-util = "0.1"
hyper = { version = "1.6", features = ["http1", "http2"] }
hyper-proxy2 = { version = "0.1", default-features = false }
hyper-rustls = { version = "0.27", default-features = false, features = [
"http1",
"http2",
"ring",
], optional = true }
hyper-tls = { version = "0.6", optional = true }
hyper-util = { version = "0.1", default-features = false, features = [
"client",
"http1",
"http2",
] }
aes = "0.6"
base64 = "0.13"
byteorder = "1.4"
bytes = "1.0"
form_urlencoded = "1.0"
futures-core = { version = "0.3", default-features = false }
futures-util = { version = "0.3", default-features = false, features = ["alloc", "bilock", "unstable", "sink"] }
hmac = "0.11"
httparse = "1.3"
http = "0.2"
hyper = { version = "0.14", features = ["client", "tcp", "http1"] }
hyper-proxy = { version = "0.9.1", default-features = false }
log = "0.4"
nonzero_ext = "0.3"
num-bigint = "0.4"
num-derive = "0.4"
num-bigint = { version = "0.4", features = ["rand"] }
num-integer = "0.1"
num-traits = "0.2"
pbkdf2 = { version = "0.12", default-features = false, features = ["hmac"] }
pin-project-lite = "0.2"
priority-queue = "2.5"
protobuf = "3.7"
protobuf-json-mapping = "3.7"
quick-xml = { version = "0.38", features = ["serialize"] }
rand = { version = "0.9", default-features = false, features = ["thread_rng"] }
rsa = "0.9"
once_cell = "1.5.2"
pbkdf2 = { version = "0.8", default-features = false, features = ["hmac"] }
priority-queue = "1.2"
protobuf = "2.14.0"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
sha1 = { version = "0.10", features = ["oid"] }
shannon = "0.2"
sysinfo = { version = "0.36", default-features = false, features = ["system"] }
thiserror = "2"
time = { version = "0.3", features = ["formatting", "parsing"] }
tokio = { version = "1", features = [
"io-util",
"macros",
"net",
"rt",
"sync",
"time",
] }
tokio-stream = { version = "0.1", default-features = false }
tokio-tungstenite = { version = "0.27", default-features = false }
tokio-util = { version = "0.7", default-features = false }
url = "2"
uuid = { version = "1", default-features = false, features = ["v4"] }
sha-1 = "0.9"
shannon = "0.2.0"
thiserror = "1.0.7"
tokio = { version = "1.0", features = ["io-util", "net", "rt", "sync"] }
tokio-stream = "0.1.1"
tokio-util = { version = "0.7", features = ["codec"] }
url = "2.1"
uuid = { version = "1.0", default-features = false, features = ["v4"] }
[build-dependencies]
rand = { version = "0.9", default-features = false, features = ["thread_rng"] }
rand_distr = "0.5"
vergen-gitcl = { version = "1.0", default-features = false, features = [
"build",
] }
rand = "0.8"
vergen = "3.0.4"
[dev-dependencies]
tokio = { version = "1", features = ["macros"] }
env_logger = "0.9"
tokio = {version = "1.0", features = ["macros"] }

View file

@ -1,31 +1,21 @@
use rand::distributions::Alphanumeric;
use rand::Rng;
use rand_distr::Alphanumeric;
use vergen_gitcl::{BuildBuilder, Emitter, GitclBuilder};
use std::env;
use vergen::{generate_cargo_keys, ConstantsFlags};
fn main() -> Result<(), Box<dyn std::error::Error>> {
let gitcl = GitclBuilder::default()
.sha(true) // outputs 'VERGEN_GIT_SHA', and sets the 'short' flag true
.commit_date(true) // outputs 'VERGEN_GIT_COMMIT_DATE'
.build()?;
fn main() {
let mut flags = ConstantsFlags::all();
flags.toggle(ConstantsFlags::REBUILD_ON_HEAD_CHANGE);
generate_cargo_keys(ConstantsFlags::all()).expect("Unable to generate the cargo keys!");
let build = BuildBuilder::default()
.build_date(true) // outputs 'VERGEN_BUILD_DATE'
.build()?;
Emitter::default()
.add_instructions(&build)?
.add_instructions(&gitcl)?
.emit()
.expect("Unable to generate the cargo keys!");
let build_id = match std::env::var("SOURCE_DATE_EPOCH") {
let build_id = match env::var("SOURCE_DATE_EPOCH") {
Ok(val) => val,
Err(_) => rand::rng()
Err(_) => rand::thread_rng()
.sample_iter(Alphanumeric)
.take(8)
.map(char::from)
.collect(),
};
println!("cargo:rustc-env=LIBRESPOT_BUILD_ID={build_id}");
Ok(())
println!("cargo:rustc-env=LIBRESPOT_BUILD_ID={}", build_id);
}

View file

@ -1,153 +1,108 @@
use std::collections::VecDeque;
use std::error::Error;
use bytes::Bytes;
use hyper::{Method, Request};
use hyper::client::HttpConnector;
use hyper::{Body, Client, Method, Request, Uri};
use hyper_proxy::{Intercept, Proxy, ProxyConnector};
use serde::Deserialize;
use url::Url;
use crate::Error;
const APRESOLVE_ENDPOINT: &str = "http://apresolve.spotify.com:80";
const AP_FALLBACK: &str = "ap.spotify.com:443";
const AP_BLACKLIST: [&str; 2] = ["ap-gew4.spotify.com", "ap-gue1.spotify.com"];
pub type SocketAddress = (String, u16);
#[derive(Default)]
pub struct AccessPoints {
accesspoint: VecDeque<SocketAddress>,
dealer: VecDeque<SocketAddress>,
spclient: VecDeque<SocketAddress>,
#[derive(Clone, Debug, Deserialize)]
struct ApResolveData {
ap_list: Vec<String>,
}
#[derive(Deserialize, Default)]
pub struct ApResolveData {
accesspoint: Vec<String>,
dealer: Vec<String>,
spclient: Vec<String>,
}
async fn try_apresolve(
proxy: Option<&Url>,
ap_port: Option<u16>,
) -> Result<String, Box<dyn Error>> {
let port = ap_port.unwrap_or(443);
impl ApResolveData {
// These addresses probably do some geo-location based traffic management or at least DNS-based
// load balancing. They are known to fail when the normal resolvers are up, so that's why they
// should only be used as fallback.
fn fallback() -> Self {
Self {
accesspoint: vec![String::from("ap.spotify.com:443")],
dealer: vec![String::from("dealer.spotify.com:443")],
spclient: vec![String::from("spclient.wg.spotify.com:443")],
}
}
}
let mut req = Request::new(Body::empty());
*req.method_mut() = Method::GET;
// panic safety: APRESOLVE_ENDPOINT above is valid url.
*req.uri_mut() = APRESOLVE_ENDPOINT.parse().expect("invalid AP resolve URL");
impl AccessPoints {
fn is_any_empty(&self) -> bool {
self.accesspoint.is_empty() || self.dealer.is_empty() || self.spclient.is_empty()
}
}
let response = if let Some(url) = proxy {
// Panic safety: all URLs are valid URIs
let uri = url.to_string().parse().unwrap();
let proxy = Proxy::new(Intercept::All, uri);
let connector = HttpConnector::new();
let proxy_connector = ProxyConnector::from_proxy_unsecured(connector, proxy);
Client::builder()
.build(proxy_connector)
.request(req)
.await?
} else {
Client::new().request(req).await?
};
component! {
ApResolver : ApResolverInner {
data: AccessPoints = AccessPoints::default(),
}
}
let body = hyper::body::to_bytes(response.into_body()).await?;
let data: ApResolveData = serde_json::from_slice(body.as_ref())?;
impl ApResolver {
// return a port if a proxy URL and/or a proxy port was specified. This is useful even when
// there is no proxy, but firewalls only allow certain ports (e.g. 443 and not 4070).
pub fn port_config(&self) -> Option<u16> {
if self.session().config().proxy.is_some() || self.session().config().ap_port.is_some() {
Some(self.session().config().ap_port.unwrap_or(443))
} else {
None
}
}
fn process_ap_strings(&self, data: Vec<String>) -> VecDeque<SocketAddress> {
let filter_port = self.port_config();
data.into_iter()
.filter_map(|ap| {
let mut split = ap.rsplitn(2, ':');
let port = split.next()?;
let port: u16 = port.parse().ok()?;
let host = split.next()?.to_owned();
match filter_port {
Some(filter_port) if filter_port != port => None,
_ => Some((host, port)),
}
})
.collect()
}
fn parse_resolve_to_access_points(&self, resolve: ApResolveData) -> AccessPoints {
AccessPoints {
accesspoint: self.process_ap_strings(resolve.accesspoint),
dealer: self.process_ap_strings(resolve.dealer),
spclient: self.process_ap_strings(resolve.spclient),
}
}
pub async fn try_apresolve(&self) -> Result<ApResolveData, Error> {
let req = Request::builder()
.method(Method::GET)
.uri("https://apresolve.spotify.com/?type=accesspoint&type=dealer&type=spclient")
.body(Bytes::new())?;
let body = self.session().http_client().request_body(req).await?;
let data: ApResolveData = serde_json::from_slice(body.as_ref())?;
Ok(data)
}
async fn apresolve(&self) {
let result = self.try_apresolve().await;
self.lock(|inner| {
let (data, error) = match result {
Ok(data) => (data, None),
Err(e) => (ApResolveData::default(), Some(e)),
};
inner.data = self.parse_resolve_to_access_points(data);
if inner.data.is_any_empty() {
warn!("Failed to resolve all access points, using fallbacks");
if let Some(error) = error {
warn!("Resolve access points error: {error}");
}
let fallback = self.parse_resolve_to_access_points(ApResolveData::fallback());
inner.data.accesspoint.extend(fallback.accesspoint);
inner.data.dealer.extend(fallback.dealer);
inner.data.spclient.extend(fallback.spclient);
// filter APs that are known to cause channel errors
let aps: Vec<String> = data
.ap_list
.into_iter()
.filter_map(|ap| {
let host = ap.parse::<Uri>().ok()?.host()?.to_owned();
if !AP_BLACKLIST.iter().any(|&blacklisted| host == blacklisted) {
Some(ap)
} else {
warn!("Ignoring blacklisted access point {}", ap);
None
}
})
}
.collect();
fn is_any_empty(&self) -> bool {
self.lock(|inner| inner.data.is_any_empty())
}
pub async fn resolve(&self, endpoint: &str) -> Result<SocketAddress, Error> {
if self.is_any_empty() {
self.apresolve().await;
}
self.lock(|inner| {
let access_point = match endpoint {
// take the first position instead of the last with `pop`, because Spotify returns
// access points with ports 4070, 443 and 80 in order of preference from highest
// to lowest.
"accesspoint" => inner.data.accesspoint.pop_front(),
"dealer" => inner.data.dealer.pop_front(),
"spclient" => inner.data.spclient.pop_front(),
_ => {
return Err(Error::unimplemented(format!(
"No implementation to resolve access point {endpoint}"
)));
}
};
let access_point = access_point.ok_or_else(|| {
Error::unavailable(format!("No access point available for endpoint {endpoint}"))
})?;
Ok(access_point)
let ap = if ap_port.is_some() || proxy.is_some() {
// filter on ports if specified on the command line...
aps.into_iter().find_map(|ap| {
if ap.parse::<Uri>().ok()?.port()? == port {
Some(ap)
} else {
None
}
})
} else {
// ...or pick the first on the list
aps.into_iter().next()
}
.ok_or("Unable to resolve any viable access points.")?;
Ok(ap)
}
pub async fn apresolve(proxy: Option<&Url>, ap_port: Option<u16>) -> String {
try_apresolve(proxy, ap_port).await.unwrap_or_else(|e| {
warn!("Failed to resolve Access Point: {}", e);
warn!("Using fallback \"{}\"", AP_FALLBACK);
AP_FALLBACK.into()
})
}
#[cfg(test)]
mod test {
use std::net::ToSocketAddrs;
use super::try_apresolve;
#[tokio::test]
async fn test_apresolve() {
let ap = try_apresolve(None, None).await.unwrap();
// Assert that the result contains a valid host and port
ap.to_socket_addrs().unwrap().next().unwrap();
}
#[tokio::test]
async fn test_apresolve_port_443() {
let ap = try_apresolve(None, Some(443)).await.unwrap();
let port = ap.to_socket_addrs().unwrap().next().unwrap().port();
assert_eq!(port, 443);
}
}

View file

@ -1,84 +1,52 @@
use std::{collections::HashMap, io::Write, time::Duration};
use byteorder::{BigEndian, ByteOrder, WriteBytesExt};
use bytes::Bytes;
use thiserror::Error;
use std::collections::HashMap;
use std::io::Write;
use tokio::sync::oneshot;
use crate::{Error, FileId, SpotifyId, packet::PacketType, util::SeqGenerator};
use crate::spotify_id::{FileId, SpotifyId};
use crate::util::SeqGenerator;
#[derive(Debug, Hash, PartialEq, Eq, Copy, Clone)]
pub struct AudioKey(pub [u8; 16]);
#[derive(Debug, Error)]
pub enum AudioKeyError {
#[error("audio key error")]
AesKey,
#[error("other end of channel disconnected")]
Channel,
#[error("unexpected packet type {0}")]
Packet(u8),
#[error("sequence {0} not pending")]
Sequence(u32),
#[error("audio key response timeout")]
Timeout,
}
impl From<AudioKeyError> for Error {
fn from(err: AudioKeyError) -> Self {
match err {
AudioKeyError::AesKey => Error::unavailable(err),
AudioKeyError::Channel => Error::aborted(err),
AudioKeyError::Sequence(_) => Error::aborted(err),
AudioKeyError::Packet(_) => Error::unimplemented(err),
AudioKeyError::Timeout => Error::aborted(err),
}
}
}
#[derive(Debug, Hash, PartialEq, Eq, Copy, Clone)]
pub struct AudioKeyError;
component! {
AudioKeyManager : AudioKeyManagerInner {
sequence: SeqGenerator<u32> = SeqGenerator::new(0),
pending: HashMap<u32, oneshot::Sender<Result<AudioKey, Error>>> = HashMap::new(),
pending: HashMap<u32, oneshot::Sender<Result<AudioKey, AudioKeyError>>> = HashMap::new(),
}
}
impl AudioKeyManager {
pub(crate) fn dispatch(&self, cmd: PacketType, mut data: Bytes) -> Result<(), Error> {
pub(crate) fn dispatch(&self, cmd: u8, mut data: Bytes) {
let seq = BigEndian::read_u32(data.split_to(4).as_ref());
let sender = self
.lock(|inner| inner.pending.remove(&seq))
.ok_or(AudioKeyError::Sequence(seq))?;
let sender = self.lock(|inner| inner.pending.remove(&seq));
match cmd {
PacketType::AesKey => {
let mut key = [0u8; 16];
key.copy_from_slice(data.as_ref());
sender
.send(Ok(AudioKey(key)))
.map_err(|_| AudioKeyError::Channel)?
}
PacketType::AesKeyError => {
error!(
"error audio key {:x} {:x}",
data.as_ref()[0],
data.as_ref()[1]
);
sender
.send(Err(AudioKeyError::AesKey.into()))
.map_err(|_| AudioKeyError::Channel)?
}
_ => {
trace!("Did not expect {cmd:?} AES key packet with data {data:#?}");
return Err(AudioKeyError::Packet(cmd as u8).into());
if let Some(sender) = sender {
match cmd {
0xd => {
let mut key = [0u8; 16];
key.copy_from_slice(data.as_ref());
let _ = sender.send(Ok(AudioKey(key)));
}
0xe => {
warn!(
"error audio key {:x} {:x}",
data.as_ref()[0],
data.as_ref()[1]
);
let _ = sender.send(Err(AudioKeyError));
}
_ => (),
}
}
Ok(())
}
pub async fn request(&self, track: SpotifyId, file: FileId) -> Result<AudioKey, Error> {
pub async fn request(&self, track: SpotifyId, file: FileId) -> Result<AudioKey, AudioKeyError> {
let (tx, rx) = oneshot::channel();
let seq = self.lock(move |inner| {
@ -87,24 +55,17 @@ impl AudioKeyManager {
seq
});
self.send_key_request(seq, track, file)?;
const KEY_RESPONSE_TIMEOUT: Duration = Duration::from_millis(1500);
match tokio::time::timeout(KEY_RESPONSE_TIMEOUT, rx).await {
Err(_) => {
error!("Audio key response timeout");
Err(AudioKeyError::Timeout.into())
}
Ok(k) => k?,
}
self.send_key_request(seq, track, file);
rx.await.map_err(|_| AudioKeyError)?
}
fn send_key_request(&self, seq: u32, track: SpotifyId, file: FileId) -> Result<(), Error> {
fn send_key_request(&self, seq: u32, track: SpotifyId, file: FileId) {
let mut data: Vec<u8> = Vec::new();
data.write_all(&file.0)?;
data.write_all(&track.to_raw())?;
data.write_u32::<BigEndian>(seq)?;
data.write_u16::<BigEndian>(0x0000)?;
data.write(&file.0).unwrap();
data.write(&track.to_raw()).unwrap();
data.write_u32::<BigEndian>(seq).unwrap();
data.write_u16::<BigEndian>(0x0000).unwrap();
self.session().send_packet(PacketType::RequestKey, data)
self.session().send_packet(0xc, data)
}
}

View file

@ -1,35 +1,19 @@
use std::io::{self, Read};
use aes::Aes192;
use base64::engine::Engine as _;
use base64::engine::general_purpose::STANDARD as BASE64;
use byteorder::{BigEndian, ByteOrder};
use pbkdf2::pbkdf2_hmac;
use protobuf::Enum;
use hmac::Hmac;
use pbkdf2::pbkdf2;
use protobuf::ProtobufEnum;
use serde::{Deserialize, Serialize};
use sha1::{Digest, Sha1};
use thiserror::Error;
use crate::{Error, protocol::authentication::AuthenticationType};
#[derive(Debug, Error)]
pub enum AuthenticationError {
#[error("unknown authentication type {0}")]
AuthType(u32),
#[error("invalid key")]
Key,
}
impl From<AuthenticationError> for Error {
fn from(err: AuthenticationError) -> Self {
Error::invalid_argument(err)
}
}
use crate::protocol::authentication::AuthenticationType;
/// The credentials are used to log into the Spotify API.
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Credentials {
pub username: Option<String>,
pub username: String,
#[serde(serialize_with = "serialize_protobuf_enum")]
#[serde(deserialize_with = "deserialize_protobuf_enum")]
@ -50,27 +34,19 @@ impl Credentials {
///
/// let creds = Credentials::with_password("my account", "my password");
/// ```
pub fn with_password(username: impl Into<String>, password: impl Into<String>) -> Self {
Self {
username: Some(username.into()),
pub fn with_password(username: impl Into<String>, password: impl Into<String>) -> Credentials {
Credentials {
username: username.into(),
auth_type: AuthenticationType::AUTHENTICATION_USER_PASS,
auth_data: password.into().into_bytes(),
}
}
pub fn with_access_token(token: impl Into<String>) -> Self {
Self {
username: None,
auth_type: AuthenticationType::AUTHENTICATION_SPOTIFY_TOKEN,
auth_data: token.into().into_bytes(),
}
}
pub fn with_blob(
username: impl Into<String>,
encrypted_blob: impl AsRef<[u8]>,
device_id: impl AsRef<[u8]>,
) -> Result<Self, Error> {
) -> Credentials {
fn read_u8<R: Read>(stream: &mut R) -> io::Result<u8> {
let mut data = [0u8];
stream.read_exact(&mut data)?;
@ -84,7 +60,7 @@ impl Credentials {
}
let hi = read_u8(stream)? as u32;
Ok(lo & 0x7f | (hi << 7))
Ok(lo & 0x7f | hi << 7)
}
fn read_bytes<R: Read>(stream: &mut R) -> io::Result<Vec<u8>> {
@ -101,11 +77,7 @@ impl Credentials {
let key = {
let mut key = [0u8; 24];
if key.len() < 20 {
return Err(AuthenticationError::Key.into());
}
pbkdf2_hmac::<Sha1>(&secret, username.as_bytes(), 0x100, &mut key[0..20]);
pbkdf2::<Hmac<Sha1>>(&secret, username.as_bytes(), 0x100, &mut key[0..20]);
let hash = &Sha1::digest(&key[..20]);
key[..20].copy_from_slice(hash);
@ -115,13 +87,15 @@ impl Credentials {
// decrypt data using ECB mode without padding
let blob = {
use aes::cipher::generic_array::typenum::Unsigned;
use aes::cipher::generic_array::GenericArray;
use aes::cipher::{BlockDecrypt, BlockSizeUser, KeyInit};
use aes::cipher::{BlockCipher, NewBlockCipher};
let mut data = BASE64.decode(encrypted_blob)?;
let mut data = base64::decode(encrypted_blob).unwrap();
let cipher = Aes192::new(GenericArray::from_slice(&key));
let block_size = Aes192::block_size();
let block_size = <Aes192 as BlockCipher>::BlockSize::to_usize();
assert_eq!(data.len() % block_size, 0);
for chunk in data.chunks_exact_mut(block_size) {
cipher.decrypt_block(GenericArray::from_mut_slice(chunk));
}
@ -135,26 +109,25 @@ impl Credentials {
};
let mut cursor = io::Cursor::new(blob.as_slice());
read_u8(&mut cursor)?;
read_bytes(&mut cursor)?;
read_u8(&mut cursor)?;
let auth_type = read_int(&mut cursor)?;
let auth_type = AuthenticationType::from_i32(auth_type as i32)
.ok_or(AuthenticationError::AuthType(auth_type))?;
read_u8(&mut cursor)?;
let auth_data = read_bytes(&mut cursor)?;
read_u8(&mut cursor).unwrap();
read_bytes(&mut cursor).unwrap();
read_u8(&mut cursor).unwrap();
let auth_type = read_int(&mut cursor).unwrap();
let auth_type = AuthenticationType::from_i32(auth_type as i32).unwrap();
read_u8(&mut cursor).unwrap();
let auth_data = read_bytes(&mut cursor).unwrap();
Ok(Self {
username: Some(username),
Credentials {
username,
auth_type,
auth_data,
})
}
}
}
fn serialize_protobuf_enum<T, S>(v: &T, ser: S) -> Result<S::Ok, S::Error>
where
T: Enum,
T: ProtobufEnum,
S: serde::Serializer,
{
serde::Serialize::serialize(&v.value(), ser)
@ -162,7 +135,7 @@ where
fn deserialize_protobuf_enum<'de, T, D>(de: D) -> Result<T, D::Error>
where
T: Enum,
T: ProtobufEnum,
D: serde::Deserializer<'de>,
{
let v: i32 = serde::Deserialize::deserialize(de)?;
@ -174,7 +147,7 @@ where
T: AsRef<[u8]>,
S: serde::Serializer,
{
serde::Serialize::serialize(&BASE64.encode(v.as_ref()), ser)
serde::Serialize::serialize(&base64::encode(v.as_ref()), ser)
}
fn deserialize_base64<'de, D>(de: D) -> Result<Vec<u8>, D::Error>
@ -182,7 +155,5 @@ where
D: serde::Deserializer<'de>,
{
let v: String = serde::Deserialize::deserialize(de)?;
BASE64
.decode(v)
.map_err(|e| serde::de::Error::custom(e.to_string()))
base64::decode(&v).map_err(|e| serde::de::Error::custom(e.to_string()))
}

View file

@ -1,31 +1,15 @@
use std::{
cmp::Reverse,
collections::HashMap,
fs::{self, File},
io::{self, Read, Write},
path::{Path, PathBuf},
sync::{Arc, Mutex},
time::SystemTime,
};
use std::cmp::Reverse;
use std::collections::HashMap;
use std::fs::{self, File};
use std::io::{self, Error, ErrorKind, Read, Write};
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::time::SystemTime;
use priority_queue::PriorityQueue;
use thiserror::Error;
use crate::{Error, FileId, authentication::Credentials, error::ErrorKind};
const CACHE_LIMITER_POISON_MSG: &str = "cache limiter mutex should not be poisoned";
#[derive(Debug, Error)]
pub enum CacheError {
#[error("audio cache location is not configured")]
Path,
}
impl From<CacheError> for Error {
fn from(err: CacheError) -> Self {
Error::failed_precondition(err)
}
}
use crate::authentication::Credentials;
use crate::spotify_id::FileId;
/// Some kind of data structure that holds some paths, the size of these files and a timestamp.
/// It keeps track of the file sizes and is able to pop the path with the oldest timestamp if
@ -73,17 +57,16 @@ impl SizeLimiter {
/// to delete the file in the file system.
fn pop(&mut self) -> Option<PathBuf> {
if self.exceeds_limit() {
if let Some((next, _)) = self.queue.pop() {
if let Some(size) = self.sizes.remove(&next) {
self.in_use -= size;
} else {
error!("`queue` and `sizes` should have the same keys.");
}
Some(next)
} else {
error!("in_use was > 0, so the queue should have contained an item.");
None
}
let (next, _) = self
.queue
.pop()
.expect("in_use was > 0, so the queue should have contained an item.");
let size = self
.sizes
.remove(&next)
.expect("`queue` and `sizes` should have the same keys.");
self.in_use -= size;
Some(next)
} else {
None
}
@ -102,11 +85,11 @@ impl SizeLimiter {
return false;
}
if let Some(size) = self.sizes.remove(file) {
self.in_use -= size;
} else {
error!("`queue` and `sizes` should have the same keys.");
}
let size = self
.sizes
.remove(file)
.expect("`queue` and `sizes` should have the same keys.");
self.in_use -= size;
true
}
@ -142,7 +125,7 @@ impl FsSizeLimiter {
let list_dir = match fs::read_dir(path) {
Ok(list_dir) => list_dir,
Err(e) => {
warn!("Could not read directory {path:?} in cache dir: {e}");
warn!("Could not read directory {:?} in cache dir: {}", path, e);
return;
}
};
@ -151,7 +134,7 @@ impl FsSizeLimiter {
let entry = match entry {
Ok(entry) => entry,
Err(e) => {
warn!("Could not directory {path:?} in cache dir: {e}");
warn!("Could not directory {:?} in cache dir: {}", path, e);
return;
}
};
@ -167,7 +150,7 @@ impl FsSizeLimiter {
limiter.add(&path, size, access_time);
}
Err(e) => {
warn!("Could not read file {path:?} in cache dir: {e}")
warn!("Could not read file {:?} in cache dir: {}", path, e)
}
}
}
@ -192,28 +175,21 @@ impl FsSizeLimiter {
fn add(&self, file: &Path, size: u64) {
self.limiter
.lock()
.expect(CACHE_LIMITER_POISON_MSG)
.add(file, size, SystemTime::now())
.unwrap()
.add(file, size, SystemTime::now());
}
fn touch(&self, file: &Path) -> bool {
self.limiter
.lock()
.expect(CACHE_LIMITER_POISON_MSG)
.update(file, SystemTime::now())
self.limiter.lock().unwrap().update(file, SystemTime::now())
}
fn remove(&self, file: &Path) -> bool {
self.limiter
.lock()
.expect(CACHE_LIMITER_POISON_MSG)
.remove(file)
fn remove(&self, file: &Path) {
self.limiter.lock().unwrap().remove(file);
}
fn prune_internal<F: FnMut() -> Option<PathBuf>>(mut pop: F) -> Result<(), Error> {
fn prune_internal<F: FnMut() -> Option<PathBuf>>(mut pop: F) {
let mut first = true;
let mut count = 0;
let mut last_error = None;
while let Some(file) = pop() {
if first {
@ -221,39 +197,31 @@ impl FsSizeLimiter {
first = false;
}
let res = fs::remove_file(&file);
if let Err(e) = res {
warn!("Could not remove file {file:?} from cache dir: {e}");
last_error = Some(e);
if let Err(e) = fs::remove_file(&file) {
warn!("Could not remove file {:?} from cache dir: {}", file, e);
} else {
count += 1;
}
}
if count > 0 {
info!("Removed {count} cache files.");
}
if let Some(err) = last_error {
Err(err.into())
} else {
Ok(())
info!("Removed {} cache files.", count);
}
}
fn prune(&self) -> Result<(), Error> {
Self::prune_internal(|| self.limiter.lock().expect(CACHE_LIMITER_POISON_MSG).pop())
fn prune(&self) {
Self::prune_internal(|| self.limiter.lock().unwrap().pop())
}
fn new(path: &Path, limit: u64) -> Result<Self, Error> {
fn new(path: &Path, limit: u64) -> Self {
let mut limiter = SizeLimiter::new(limit);
Self::init_dir(&mut limiter, path);
Self::prune_internal(|| limiter.pop())?;
Self::prune_internal(|| limiter.pop());
Ok(Self {
Self {
limiter: Mutex::new(limiter),
})
}
}
}
@ -266,13 +234,15 @@ pub struct Cache {
size_limiter: Option<Arc<FsSizeLimiter>>,
}
pub struct RemoveFileError(());
impl Cache {
pub fn new<P: AsRef<Path>>(
credentials_path: Option<P>,
volume_path: Option<P>,
audio_path: Option<P>,
size_limit: Option<u64>,
) -> Result<Self, Error> {
) -> io::Result<Self> {
let mut size_limiter = None;
if let Some(location) = &credentials_path {
@ -293,7 +263,8 @@ impl Cache {
fs::create_dir_all(location)?;
if let Some(limit) = size_limit {
let limiter = FsSizeLimiter::new(location.as_ref(), limit)?;
let limiter = FsSizeLimiter::new(location.as_ref(), limit);
size_limiter = Some(Arc::new(limiter));
}
}
@ -314,11 +285,11 @@ impl Cache {
let location = self.credentials_location.as_ref()?;
// This closure is just convencience to enable the question mark operator
let read = || -> Result<Credentials, Error> {
let read = || {
let mut file = File::open(location)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
Ok(serde_json::from_str(&contents)?)
serde_json::from_str(&contents).map_err(|e| Error::new(ErrorKind::InvalidData, e))
};
match read() {
@ -326,8 +297,8 @@ impl Cache {
Err(e) => {
// If the file did not exist, the file was probably not written
// before. Otherwise, log the error.
if e.kind != ErrorKind::NotFound {
warn!("Error reading credentials from cache: {e}");
if e.kind() != ErrorKind::NotFound {
warn!("Error reading credentials from cache: {}", e);
}
None
}
@ -338,11 +309,11 @@ impl Cache {
if let Some(location) = &self.credentials_location {
let result = File::create(location).and_then(|mut file| {
let data = serde_json::to_string(cred)?;
write!(file, "{data}")
write!(file, "{}", data)
});
if let Err(e) = result {
warn!("Cannot save credentials to cache: {e}")
warn!("Cannot save credentials to cache: {}", e)
}
}
}
@ -350,18 +321,20 @@ impl Cache {
pub fn volume(&self) -> Option<u16> {
let location = self.volume_location.as_ref()?;
let read = || -> Result<u16, Error> {
let read = || {
let mut file = File::open(location)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
Ok(contents.parse()?)
contents
.parse()
.map_err(|e| Error::new(ErrorKind::InvalidData, e))
};
match read() {
Ok(v) => Some(v),
Err(e) => {
if e.kind != ErrorKind::NotFound {
warn!("Error reading volume from cache: {e}");
if e.kind() != ErrorKind::NotFound {
warn!("Error reading volume from cache: {}", e);
}
None
}
@ -370,14 +343,14 @@ impl Cache {
pub fn save_volume(&self, volume: u16) {
if let Some(ref location) = self.volume_location {
let result = File::create(location).and_then(|mut file| write!(file, "{volume}"));
let result = File::create(location).and_then(|mut file| write!(file, "{}", volume));
if let Err(e) = result {
warn!("Cannot save volume to cache: {e}");
warn!("Cannot save volume to cache: {}", e);
}
}
}
pub fn file_path(&self, file: FileId) -> Option<PathBuf> {
fn file_path(&self, file: FileId) -> Option<PathBuf> {
match file.to_base16() {
Ok(name) => self.audio_location.as_ref().map(|location| {
let mut path = location.join(&name[0..2]);
@ -385,7 +358,7 @@ impl Cache {
path
}),
Err(e) => {
warn!("Invalid FileId: {e}");
warn!("Invalid FileId: {}", e.utf8_error());
None
}
}
@ -396,48 +369,51 @@ impl Cache {
match File::open(&path) {
Ok(file) => {
if let Some(limiter) = self.size_limiter.as_deref() {
if !limiter.touch(&path) {
error!("limiter could not touch {path:?}");
}
limiter.touch(&path);
}
Some(file)
}
Err(e) => {
if e.kind() != io::ErrorKind::NotFound {
warn!("Error reading file from cache: {e}")
if e.kind() != ErrorKind::NotFound {
warn!("Error reading file from cache: {}", e)
}
None
}
}
}
pub fn save_file<F: Read>(&self, file: FileId, contents: &mut F) -> Result<PathBuf, Error> {
if let Some(path) = self.file_path(file) {
if let Some(parent) = path.parent() {
if let Ok(size) = fs::create_dir_all(parent)
.and_then(|_| File::create(&path))
.and_then(|mut file| io::copy(contents, &mut file))
{
if let Some(limiter) = self.size_limiter.as_deref() {
limiter.add(&path, size);
limiter.prune()?;
}
return Ok(path);
}
pub fn save_file<F: Read>(&self, file: FileId, contents: &mut F) {
let path = if let Some(path) = self.file_path(file) {
path
} else {
return;
};
let parent = path.parent().unwrap();
let result = fs::create_dir_all(parent)
.and_then(|_| File::create(&path))
.and_then(|mut file| io::copy(contents, &mut file));
if let Ok(size) = result {
if let Some(limiter) = self.size_limiter.as_deref() {
limiter.add(&path, size);
limiter.prune();
}
}
Err(CacheError::Path.into())
}
pub fn remove_file(&self, file: FileId) -> Result<(), Error> {
let path = self.file_path(file).ok_or(CacheError::Path)?;
pub fn remove_file(&self, file: FileId) -> Result<(), RemoveFileError> {
let path = self.file_path(file).ok_or(RemoveFileError(()))?;
fs::remove_file(&path)?;
if let Some(limiter) = self.size_limiter.as_deref() {
limiter.remove(&path);
if let Err(err) = fs::remove_file(&path) {
warn!("Unable to remove file from cache: {}", err);
Err(RemoveFileError(()))
} else {
if let Some(limiter) = self.size_limiter.as_deref() {
limiter.remove(&path);
}
Ok(())
}
Ok(())
}
}

View file

@ -1,259 +0,0 @@
use std::ops::{Deref, DerefMut};
use protobuf::Message;
use thiserror::Error;
use time::Duration;
use url::Url;
use super::{Error, FileId, Session, date::Date};
use librespot_protocol as protocol;
use protocol::storage_resolve::StorageResolveResponse as CdnUrlMessage;
use protocol::storage_resolve::storage_resolve_response::Result as StorageResolveResponse_Result;
#[derive(Debug, Clone)]
pub struct MaybeExpiringUrl(pub String, pub Option<Date>);
const CDN_URL_EXPIRY_MARGIN: Duration = Duration::seconds(5 * 60);
#[derive(Debug, Clone)]
pub struct MaybeExpiringUrls(pub Vec<MaybeExpiringUrl>);
impl Deref for MaybeExpiringUrls {
type Target = Vec<MaybeExpiringUrl>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for MaybeExpiringUrls {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Debug, Error)]
pub enum CdnUrlError {
#[error("all URLs expired")]
Expired,
#[error("resolved storage is not for CDN")]
Storage,
#[error("no URLs resolved")]
Unresolved,
}
impl From<CdnUrlError> for Error {
fn from(err: CdnUrlError) -> Self {
match err {
CdnUrlError::Expired => Error::deadline_exceeded(err),
CdnUrlError::Storage | CdnUrlError::Unresolved => Error::unavailable(err),
}
}
}
#[derive(Debug, Clone)]
pub struct CdnUrl {
pub file_id: FileId,
urls: MaybeExpiringUrls,
}
impl CdnUrl {
pub fn new(file_id: FileId) -> Self {
Self {
file_id,
urls: MaybeExpiringUrls(Vec::new()),
}
}
pub async fn resolve_audio(&self, session: &Session) -> Result<Self, Error> {
let file_id = self.file_id;
let response = session.spclient().get_audio_storage(&file_id).await?;
let msg = CdnUrlMessage::parse_from_bytes(&response)?;
let urls = MaybeExpiringUrls::try_from(msg)?;
let cdn_url = Self { file_id, urls };
trace!("Resolved CDN storage: {cdn_url:#?}");
Ok(cdn_url)
}
#[deprecated = "This function only returns the first valid URL. Use try_get_urls instead, which allows for fallback logic."]
pub fn try_get_url(&self) -> Result<&str, Error> {
if self.urls.is_empty() {
return Err(CdnUrlError::Unresolved.into());
}
let now = Date::now_utc();
let url = self.urls.iter().find(|url| match url.1 {
Some(expiry) => now < expiry,
None => true,
});
if let Some(url) = url {
Ok(&url.0)
} else {
Err(CdnUrlError::Expired.into())
}
}
pub fn try_get_urls(&self) -> Result<Vec<&str>, Error> {
if self.urls.is_empty() {
return Err(CdnUrlError::Unresolved.into());
}
let now = Date::now_utc();
let urls: Vec<&str> = self
.urls
.iter()
.filter_map(|MaybeExpiringUrl(url, expiry)| match *expiry {
Some(expiry) => {
if now < expiry {
Some(url.as_str())
} else {
None
}
}
None => Some(url.as_str()),
})
.collect();
if urls.is_empty() {
Err(CdnUrlError::Expired.into())
} else {
Ok(urls)
}
}
}
impl TryFrom<CdnUrlMessage> for MaybeExpiringUrls {
type Error = crate::Error;
fn try_from(msg: CdnUrlMessage) -> Result<Self, Self::Error> {
if !matches!(
msg.result.enum_value_or_default(),
StorageResolveResponse_Result::CDN
) {
return Err(CdnUrlError::Storage.into());
}
let is_expiring = !msg.fileid.is_empty();
let result = msg
.cdnurl
.iter()
.map(|cdn_url| {
let url = Url::parse(cdn_url)?;
let mut expiry: Option<Date> = None;
if is_expiring {
let mut expiry_str: Option<String> = None;
if let Some(token) = url
.query_pairs()
.into_iter()
.find(|(key, _value)| key == "verify")
{
// https://audio-cf.spotifycdn.com/audio/844ecdb297a87ebfee4399f28892ef85d9ba725f?verify=1750549951-4R3I2w2q7OfNkR%2FGH8qH7xtIKUPlDxywBuADY%2BsvMeU%3D
if let Some((expiry_str_candidate, _)) = token.1.split_once('-') {
expiry_str = Some(expiry_str_candidate.to_string());
}
} else if let Some(token) = url
.query_pairs()
.into_iter()
.find(|(key, _value)| key == "__token__")
{
//"https://audio-ak-spotify-com.akamaized.net/audio/4712bc9e47f7feb4ee3450ef2bb545e1d83c3d54?__token__=exp=1688165560~hmac=4e661527574fab5793adb99cf04e1c2ce12294c71fe1d39ffbfabdcfe8ce3b41",
if let Some(mut start) = token.1.find("exp=") {
start += 4;
if token.1.len() >= start {
let slice = &token.1[start..];
if let Some(end) = slice.find('~') {
// this is the only valid invariant for akamaized.net
expiry_str = Some(String::from(&slice[..end]));
} else {
expiry_str = Some(String::from(slice));
}
}
}
} else if let Some(token) = url
.query_pairs()
.into_iter()
.find(|(key, _value)| key == "Expires")
{
//"https://audio-gm-off.spotifycdn.com/audio/4712bc9e47f7feb4ee3450ef2bb545e1d83c3d54?Expires=1688165560~FullPath~hmac=IIZA28qptl8cuGLq15-SjHKHtLoxzpy_6r_JpAU4MfM=",
if let Some(end) = token.1.find('~') {
// this is the only valid invariant for spotifycdn.com
let slice = &token.1[..end];
expiry_str = Some(String::from(&slice[..end]));
}
} else if let Some(query) = url.query() {
//"https://audio4-fa.scdn.co/audio/4712bc9e47f7feb4ee3450ef2bb545e1d83c3d54?1688165560_0GKSyXjLaTW1BksFOyI4J7Tf9tZDbBUNNPu9Mt4mhH4=",
let mut items = query.split('_');
if let Some(first) = items.next() {
// this is the only valid invariant for scdn.co
expiry_str = Some(String::from(first));
}
}
if let Some(exp_str) = expiry_str {
if let Ok(expiry_parsed) = exp_str.parse::<i64>() {
if let Ok(expiry_at) = Date::from_timestamp_ms(expiry_parsed * 1_000) {
let with_margin = expiry_at.saturating_sub(CDN_URL_EXPIRY_MARGIN);
expiry = Some(Date::from(with_margin));
}
} else {
warn!(
"Cannot parse CDN URL expiry timestamp '{exp_str}' from '{cdn_url}'"
);
}
} else {
warn!("Unknown CDN URL format: {cdn_url}");
}
}
Ok(MaybeExpiringUrl(cdn_url.to_owned(), expiry))
})
.collect::<Result<Vec<MaybeExpiringUrl>, Error>>()?;
Ok(Self(result))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_maybe_expiring_urls() {
let timestamp = 1688165560;
let mut msg = CdnUrlMessage::new();
msg.result = StorageResolveResponse_Result::CDN.into();
msg.cdnurl = vec![
format!(
"https://audio-cf.spotifycdn.com/audio/844ecdb297a87ebfee4399f28892ef85d9ba725f?verify={timestamp}-4R3I2w2q7OfNkR%2FGH8qH7xtIKUPlDxywBuADY%2BsvMeU%3D"
),
format!(
"https://audio-ak-spotify-com.akamaized.net/audio/foo?__token__=exp={timestamp}~hmac=4e661527574fab5793adb99cf04e1c2ce12294c71fe1d39ffbfabdcfe8ce3b41"
),
format!(
"https://audio-gm-off.spotifycdn.com/audio/foo?Expires={timestamp}~FullPath~hmac=IIZA28qptl8cuGLq15-SjHKHtLoxzpy_6r_JpAU4MfM="
),
format!(
"https://audio4-fa.scdn.co/audio/foo?{timestamp}_0GKSyXjLaTW1BksFOyI4J7Tf9tZDbBUNNPu9Mt4mhH4="
),
"https://audio4-fa.scdn.co/foo?baz".to_string(),
];
msg.fileid = vec![0];
let urls = MaybeExpiringUrls::try_from(msg).expect("valid urls");
assert_eq!(urls.len(), 5);
assert!(urls[0].1.is_some());
assert!(urls[1].1.is_some());
assert!(urls[2].1.is_some());
assert!(urls[3].1.is_some());
assert!(urls[4].1.is_none());
let timestamp_margin = Duration::seconds(timestamp) - CDN_URL_EXPIRY_MARGIN;
assert_eq!(
urls[0].1.unwrap().as_timestamp_ms() as i128,
timestamp_margin.whole_milliseconds()
);
}
}

View file

@ -1,20 +1,16 @@
use std::{
collections::HashMap,
fmt,
pin::Pin,
task::{Context, Poll},
time::{Duration, Instant},
};
use std::collections::HashMap;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Instant;
use byteorder::{BigEndian, ByteOrder};
use bytes::Bytes;
use futures_core::Stream;
use futures_util::{StreamExt, lock::BiLock, ready};
use num_traits::FromPrimitive;
use thiserror::Error;
use futures_util::lock::BiLock;
use futures_util::{ready, StreamExt};
use tokio::sync::mpsc;
use crate::{Error, packet::PacketType, util::SeqGenerator};
use crate::util::SeqGenerator;
component! {
ChannelManager : ChannelManagerInner {
@ -27,23 +23,11 @@ component! {
}
}
const ONE_SECOND: Duration = Duration::from_secs(1);
const ONE_SECOND_IN_MS: usize = 1000;
#[derive(Debug, Error, Hash, PartialEq, Eq, Copy, Clone)]
#[derive(Debug, Hash, PartialEq, Eq, Copy, Clone)]
pub struct ChannelError;
impl From<ChannelError> for Error {
fn from(err: ChannelError) -> Self {
Error::aborted(err)
}
}
impl fmt::Display for ChannelError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "channel error")
}
}
pub struct Channel {
receiver: mpsc::UnboundedReceiver<(u8, Bytes)>,
state: ChannelState,
@ -84,7 +68,7 @@ impl ChannelManager {
(seq, channel)
}
pub(crate) fn dispatch(&self, cmd: PacketType, mut data: Bytes) -> Result<(), Error> {
pub(crate) fn dispatch(&self, cmd: u8, mut data: Bytes) {
use std::collections::hash_map::Entry;
let id: u16 = BigEndian::read_u16(data.split_to(2).as_ref());
@ -92,8 +76,10 @@ impl ChannelManager {
self.lock(|inner| {
let current_time = Instant::now();
if let Some(download_measurement_start) = inner.download_measurement_start {
if (current_time - download_measurement_start) > ONE_SECOND {
inner.download_rate_estimate = ONE_SECOND.as_millis() as usize
if (current_time - download_measurement_start).as_millis()
> ONE_SECOND_IN_MS as u128
{
inner.download_rate_estimate = ONE_SECOND_IN_MS
* inner.download_measurement_bytes
/ (current_time - download_measurement_start).as_millis() as usize;
inner.download_measurement_start = Some(current_time);
@ -106,14 +92,9 @@ impl ChannelManager {
inner.download_measurement_bytes += data.len();
if let Entry::Occupied(entry) = inner.channels.entry(id) {
entry
.get()
.send((cmd as u8, data))
.map_err(|_| ChannelError)?;
let _ = entry.get().send((cmd, data));
}
Ok(())
})
});
}
pub fn get_download_rate_estimate(&self) -> usize {
@ -133,8 +114,7 @@ impl Channel {
fn recv_packet(&mut self, cx: &mut Context<'_>) -> Poll<Result<Bytes, ChannelError>> {
let (cmd, packet) = ready!(self.receiver.poll_recv(cx)).ok_or(ChannelError)?;
let packet_type = FromPrimitive::from_u8(cmd);
if let Some(PacketType::ChannelError) = packet_type {
if cmd == 0xa {
let code = BigEndian::read_u16(&packet.as_ref()[..2]);
error!("channel error: {} {}", packet.len(), code);
@ -159,11 +139,7 @@ impl Stream for Channel {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop {
match self.state.clone() {
ChannelState::Closed => {
error!("Polling already terminated channel");
return Poll::Ready(None);
}
ChannelState::Closed => panic!("Polling already terminated channel"),
ChannelState::Header(mut data) => {
if data.is_empty() {
data = ready!(self.recv_packet(cx))?;
@ -171,6 +147,7 @@ impl Stream for Channel {
let length = BigEndian::read_u16(data.split_to(2).as_ref()) as usize;
if length == 0 {
assert_eq!(data.len(), 0);
self.state = ChannelState::Data;
} else {
let header_id = data.split_to(1).as_ref()[0];

View file

@ -1,5 +1,3 @@
pub(crate) const COMPONENT_POISON_MSG: &str = "component mutex should not be poisoned";
macro_rules! component {
($name:ident : $inner:ident { $($key:ident : $ty:ty = $value:expr,)* }) => {
#[derive(Clone)]
@ -16,8 +14,7 @@ macro_rules! component {
#[allow(dead_code)]
fn lock<F: FnOnce(&mut $inner) -> R, R>(&self, f: F) -> R {
let mut inner = (self.0).1.lock()
.expect($crate::component::COMPONENT_POISON_MSG);
let mut inner = (self.0).1.lock().expect("Mutex poisoned");
f(&mut inner)
}

View file

@ -1,70 +1,33 @@
use std::{fmt, path::PathBuf, str::FromStr};
use librespot_protocol::devices::DeviceType as ProtoDeviceType;
use std::fmt;
use std::str::FromStr;
use url::Url;
pub(crate) const KEYMASTER_CLIENT_ID: &str = "65b708073fc0480ea92a077233ca87bd";
pub(crate) const ANDROID_CLIENT_ID: &str = "9a8d2f0ce77a4e248bb71fefcb557637";
pub(crate) const IOS_CLIENT_ID: &str = "58bd3c95768941ea9eb4350aaa033eb3";
// Easily adjust the current platform to mock the behavior on it. If for example
// android or ios needs to be mocked, the `os_version` has to be set to a valid version.
// Otherwise, client-token or login5 requests will fail with a generic invalid-credential error.
/// See [std::env::consts::OS]
pub const OS: &str = std::env::consts::OS;
// valid versions for some os:
// 'android': 30
// 'ios': 17
/// See [sysinfo::System::os_version]
pub fn os_version() -> String {
sysinfo::System::os_version().unwrap_or("0".into())
}
#[derive(Clone, Debug)]
pub struct SessionConfig {
pub client_id: String,
pub user_agent: String,
pub device_id: String,
pub proxy: Option<Url>,
pub ap_port: Option<u16>,
pub tmp_dir: PathBuf,
pub autoplay: Option<bool>,
}
impl SessionConfig {
pub(crate) fn default_for_os(os: &str) -> Self {
let device_id = uuid::Uuid::new_v4().as_hyphenated().to_string();
let client_id = match os {
"android" => ANDROID_CLIENT_ID,
"ios" => IOS_CLIENT_ID,
_ => KEYMASTER_CLIENT_ID,
}
.to_owned();
Self {
client_id,
device_id,
proxy: None,
ap_port: None,
tmp_dir: std::env::temp_dir(),
autoplay: None,
}
}
}
impl Default for SessionConfig {
fn default() -> Self {
Self::default_for_os(OS)
fn default() -> SessionConfig {
let device_id = uuid::Uuid::new_v4().as_hyphenated().to_string();
SessionConfig {
user_agent: crate::version::VERSION_STRING.to_string(),
device_id,
proxy: None,
ap_port: None,
}
}
}
#[derive(Clone, Copy, Debug, Hash, PartialOrd, Ord, PartialEq, Eq, Default)]
#[derive(Clone, Copy, Debug, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub enum DeviceType {
Unknown = 0,
Computer = 1,
Tablet = 2,
Smartphone = 3,
#[default]
Speaker = 4,
Tv = 5,
Avr = 6,
@ -79,6 +42,7 @@ pub enum DeviceType {
UnknownSpotify = 100,
CarThing = 101,
Observer = 102,
HomeThing = 103,
}
impl FromStr for DeviceType {
@ -101,6 +65,7 @@ impl FromStr for DeviceType {
"smartwatch" => Ok(Smartwatch),
"chromebook" => Ok(Chromebook),
"carthing" => Ok(CarThing),
"homething" => Ok(HomeThing),
_ => Err(()),
}
}
@ -128,6 +93,7 @@ impl From<&DeviceType> for &str {
UnknownSpotify => "UnknownSpotify",
CarThing => "CarThing",
Observer => "Observer",
HomeThing => "HomeThing",
}
}
}
@ -139,33 +105,35 @@ impl From<DeviceType> for &str {
}
impl fmt::Display for DeviceType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let str: &str = self.into();
f.write_str(str)
}
}
impl From<DeviceType> for ProtoDeviceType {
fn from(value: DeviceType) -> Self {
match value {
DeviceType::Unknown => ProtoDeviceType::UNKNOWN,
DeviceType::Computer => ProtoDeviceType::COMPUTER,
DeviceType::Tablet => ProtoDeviceType::TABLET,
DeviceType::Smartphone => ProtoDeviceType::SMARTPHONE,
DeviceType::Speaker => ProtoDeviceType::SPEAKER,
DeviceType::Tv => ProtoDeviceType::TV,
DeviceType::Avr => ProtoDeviceType::AVR,
DeviceType::Stb => ProtoDeviceType::STB,
DeviceType::AudioDongle => ProtoDeviceType::AUDIO_DONGLE,
DeviceType::GameConsole => ProtoDeviceType::GAME_CONSOLE,
DeviceType::CastAudio => ProtoDeviceType::CAST_VIDEO,
DeviceType::CastVideo => ProtoDeviceType::CAST_AUDIO,
DeviceType::Automobile => ProtoDeviceType::AUTOMOBILE,
DeviceType::Smartwatch => ProtoDeviceType::SMARTWATCH,
DeviceType::Chromebook => ProtoDeviceType::CHROMEBOOK,
DeviceType::UnknownSpotify => ProtoDeviceType::UNKNOWN_SPOTIFY,
DeviceType::CarThing => ProtoDeviceType::CAR_THING,
DeviceType::Observer => ProtoDeviceType::OBSERVER,
impl Default for DeviceType {
fn default() -> DeviceType {
DeviceType::Speaker
}
}
#[derive(Clone, Debug)]
pub struct ConnectConfig {
pub name: String,
pub device_type: DeviceType,
pub initial_volume: Option<u16>,
pub has_volume_ctrl: bool,
pub autoplay: bool,
}
impl Default for ConnectConfig {
fn default() -> ConnectConfig {
ConnectConfig {
name: "Librespot".to_string(),
device_type: DeviceType::default(),
initial_volume: Some(50),
has_volume_ctrl: true,
autoplay: false,
}
}
}

View file

@ -1,20 +1,12 @@
use std::io;
use byteorder::{BigEndian, ByteOrder};
use bytes::{BufMut, Bytes, BytesMut};
use shannon::Shannon;
use thiserror::Error;
use std::io;
use tokio_util::codec::{Decoder, Encoder};
const HEADER_SIZE: usize = 3;
const MAC_SIZE: usize = 4;
#[derive(Debug, Error)]
pub enum ApCodecError {
#[error("payload was malformed")]
Payload,
}
#[derive(Debug)]
enum DecodeState {
Header,
@ -95,10 +87,7 @@ impl Decoder for ApCodec {
let mut payload = buf.split_to(size + MAC_SIZE);
self.decode_cipher
.decrypt(payload.get_mut(..size).ok_or_else(|| {
io::Error::new(io::ErrorKind::InvalidData, ApCodecError::Payload)
})?);
self.decode_cipher.decrypt(payload.get_mut(..size).unwrap());
let mac = payload.split_off(size);
self.decode_cipher.check_mac(mac.as_ref())?;

View file

@ -1,101 +1,33 @@
use std::{env::consts::ARCH, io};
use byteorder::{BigEndian, ByteOrder, WriteBytesExt};
use hmac::{Hmac, Mac};
use protobuf::Message;
use rand::RngCore;
use rsa::{BigUint, Pkcs1v15Sign, RsaPublicKey};
use sha1::{Digest, Sha1};
use thiserror::Error;
use hmac::{Hmac, Mac, NewMac};
use protobuf::{self, Message};
use rand::{thread_rng, RngCore};
use sha1::Sha1;
use std::io;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use tokio_util::codec::{Decoder, Framed};
use super::codec::ApCodec;
use crate::{diffie_hellman::DhLocalKeys, version};
use crate::diffie_hellman::DhLocalKeys;
use crate::protocol;
use crate::protocol::keyexchange::{
APResponseMessage, ClientHello, ClientResponsePlaintext, Platform, ProductFlags,
};
const SERVER_KEY: [u8; 256] = [
0xac, 0xe0, 0x46, 0x0b, 0xff, 0xc2, 0x30, 0xaf, 0xf4, 0x6b, 0xfe, 0xc3, 0xbf, 0xbf, 0x86, 0x3d,
0xa1, 0x91, 0xc6, 0xcc, 0x33, 0x6c, 0x93, 0xa1, 0x4f, 0xb3, 0xb0, 0x16, 0x12, 0xac, 0xac, 0x6a,
0xf1, 0x80, 0xe7, 0xf6, 0x14, 0xd9, 0x42, 0x9d, 0xbe, 0x2e, 0x34, 0x66, 0x43, 0xe3, 0x62, 0xd2,
0x32, 0x7a, 0x1a, 0x0d, 0x92, 0x3b, 0xae, 0xdd, 0x14, 0x02, 0xb1, 0x81, 0x55, 0x05, 0x61, 0x04,
0xd5, 0x2c, 0x96, 0xa4, 0x4c, 0x1e, 0xcc, 0x02, 0x4a, 0xd4, 0xb2, 0x0c, 0x00, 0x1f, 0x17, 0xed,
0xc2, 0x2f, 0xc4, 0x35, 0x21, 0xc8, 0xf0, 0xcb, 0xae, 0xd2, 0xad, 0xd7, 0x2b, 0x0f, 0x9d, 0xb3,
0xc5, 0x32, 0x1a, 0x2a, 0xfe, 0x59, 0xf3, 0x5a, 0x0d, 0xac, 0x68, 0xf1, 0xfa, 0x62, 0x1e, 0xfb,
0x2c, 0x8d, 0x0c, 0xb7, 0x39, 0x2d, 0x92, 0x47, 0xe3, 0xd7, 0x35, 0x1a, 0x6d, 0xbd, 0x24, 0xc2,
0xae, 0x25, 0x5b, 0x88, 0xff, 0xab, 0x73, 0x29, 0x8a, 0x0b, 0xcc, 0xcd, 0x0c, 0x58, 0x67, 0x31,
0x89, 0xe8, 0xbd, 0x34, 0x80, 0x78, 0x4a, 0x5f, 0xc9, 0x6b, 0x89, 0x9d, 0x95, 0x6b, 0xfc, 0x86,
0xd7, 0x4f, 0x33, 0xa6, 0x78, 0x17, 0x96, 0xc9, 0xc3, 0x2d, 0x0d, 0x32, 0xa5, 0xab, 0xcd, 0x05,
0x27, 0xe2, 0xf7, 0x10, 0xa3, 0x96, 0x13, 0xc4, 0x2f, 0x99, 0xc0, 0x27, 0xbf, 0xed, 0x04, 0x9c,
0x3c, 0x27, 0x58, 0x04, 0xb6, 0xb2, 0x19, 0xf9, 0xc1, 0x2f, 0x02, 0xe9, 0x48, 0x63, 0xec, 0xa1,
0xb6, 0x42, 0xa0, 0x9d, 0x48, 0x25, 0xf8, 0xb3, 0x9d, 0xd0, 0xe8, 0x6a, 0xf9, 0x48, 0x4d, 0xa1,
0xc2, 0xba, 0x86, 0x30, 0x42, 0xea, 0x9d, 0xb3, 0x08, 0x6c, 0x19, 0x0e, 0x48, 0xb3, 0x9d, 0x66,
0xeb, 0x00, 0x06, 0xa2, 0x5a, 0xee, 0xa1, 0x1b, 0x13, 0x87, 0x3c, 0xd7, 0x19, 0xe6, 0x55, 0xbd,
];
#[derive(Debug, Error)]
pub enum HandshakeError {
#[error("invalid key length")]
InvalidLength,
#[error("server key verification failed")]
VerificationFailed,
}
use crate::protocol::keyexchange::{APResponseMessage, ClientHello, ClientResponsePlaintext};
pub async fn handshake<T: AsyncRead + AsyncWrite + Unpin>(
mut connection: T,
) -> io::Result<Framed<T, ApCodec>> {
let local_keys = DhLocalKeys::random(&mut rand::rng());
let local_keys = DhLocalKeys::random(&mut thread_rng());
let gc = local_keys.public_key();
let mut accumulator = client_hello(&mut connection, gc).await?;
let message: APResponseMessage = recv_packet(&mut connection, &mut accumulator).await?;
let remote_key = message
.challenge
.get_or_default()
.login_crypto_challenge
.get_or_default()
.diffie_hellman
.get_or_default()
.gs()
.to_owned();
let remote_signature = message
.challenge
.get_or_default()
.login_crypto_challenge
.get_or_default()
.diffie_hellman
.get_or_default()
.gs_signature()
.get_challenge()
.get_login_crypto_challenge()
.get_diffie_hellman()
.get_gs()
.to_owned();
// Prevent man-in-the-middle attacks: check server signature
let n = BigUint::from_bytes_be(&SERVER_KEY);
let e = BigUint::new(vec![65537]);
let public_key = RsaPublicKey::new(n, e).map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidData,
HandshakeError::VerificationFailed,
)
})?;
let hash = Sha1::digest(&remote_key);
let padding = Pkcs1v15Sign::new::<Sha1>();
public_key
.verify(padding, &hash, &remote_signature)
.map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidData,
HandshakeError::VerificationFailed,
)
})?;
// OK to proceed
let shared_secret = local_keys.shared_secret(&remote_key);
let (challenge, send_key, recv_key) = compute_keys(&shared_secret, &accumulator)?;
let (challenge, send_key, recv_key) = compute_keys(&shared_secret, &accumulator);
let codec = ApCodec::new(&send_key, &recv_key);
client_response(&mut connection, challenge).await?;
@ -108,90 +40,34 @@ where
T: AsyncWrite + Unpin,
{
let mut client_nonce = vec![0; 0x10];
rand::rng().fill_bytes(&mut client_nonce);
let platform = match crate::config::OS {
"freebsd" | "netbsd" | "openbsd" => match ARCH {
"x86_64" => Platform::PLATFORM_FREEBSD_X86_64,
_ => Platform::PLATFORM_FREEBSD_X86,
},
"ios" => match ARCH {
"aarch64" => Platform::PLATFORM_IPHONE_ARM64,
_ => Platform::PLATFORM_IPHONE_ARM,
},
// Rather than sending `Platform::PLATFORM_ANDROID_ARM` for "android",
// we are spoofing "android" as "linux", as otherwise during Session::connect
// all APs will reject the client with TryAnotherAP, no matter the credentials
// used was obtained via OAuth using KEYMASTER or ANDROID's client ID or
// Login5Manager::login
"linux" | "android" => match ARCH {
"arm" | "aarch64" => Platform::PLATFORM_LINUX_ARM,
"blackfin" => Platform::PLATFORM_LINUX_BLACKFIN,
"mips" => Platform::PLATFORM_LINUX_MIPS,
"sh" => Platform::PLATFORM_LINUX_SH,
"x86_64" => Platform::PLATFORM_LINUX_X86_64,
_ => Platform::PLATFORM_LINUX_X86,
},
"macos" => match ARCH {
"ppc" | "ppc64" => Platform::PLATFORM_OSX_PPC,
"x86_64" => Platform::PLATFORM_OSX_X86_64,
_ => Platform::PLATFORM_OSX_X86,
},
"windows" => match ARCH {
"arm" | "aarch64" => Platform::PLATFORM_WINDOWS_CE_ARM,
"x86_64" => Platform::PLATFORM_WIN32_X86_64,
_ => Platform::PLATFORM_WIN32_X86,
},
_ => Platform::PLATFORM_LINUX_X86,
};
#[cfg(debug_assertions)]
const PRODUCT_FLAGS: ProductFlags = ProductFlags::PRODUCT_FLAG_DEV_BUILD;
#[cfg(not(debug_assertions))]
const PRODUCT_FLAGS: ProductFlags = ProductFlags::PRODUCT_FLAG_NONE;
thread_rng().fill_bytes(&mut client_nonce);
let mut packet = ClientHello::new();
packet
.build_info
.mut_or_insert_default()
// ProductInfo won't push autoplay and perhaps other settings
// when set to anything else than PRODUCT_CLIENT
.set_product(protocol::keyexchange::Product::PRODUCT_CLIENT);
.mut_build_info()
.set_product(protocol::keyexchange::Product::PRODUCT_PARTNER);
packet
.build_info
.mut_or_insert_default()
.product_flags
.push(PRODUCT_FLAGS.into());
.mut_build_info()
.set_platform(protocol::keyexchange::Platform::PLATFORM_LINUX_X86);
packet.mut_build_info().set_version(109800078);
packet
.build_info
.mut_or_insert_default()
.set_platform(platform);
.mut_cryptosuites_supported()
.push(protocol::keyexchange::Cryptosuite::CRYPTO_SUITE_SHANNON);
packet
.build_info
.mut_or_insert_default()
.set_version(version::SPOTIFY_VERSION);
packet
.cryptosuites_supported
.push(protocol::keyexchange::Cryptosuite::CRYPTO_SUITE_SHANNON.into());
packet
.login_crypto_hello
.mut_or_insert_default()
.diffie_hellman
.mut_or_insert_default()
.mut_login_crypto_hello()
.mut_diffie_hellman()
.set_gc(gc);
packet
.login_crypto_hello
.mut_or_insert_default()
.diffie_hellman
.mut_or_insert_default()
.mut_login_crypto_hello()
.mut_diffie_hellman()
.set_server_keys_known(1);
packet.set_client_nonce(client_nonce);
packet.set_padding(vec![0x1e]);
let mut buffer = vec![0, 4];
let size = 2 + 4 + packet.compute_size();
<Vec<u8> as WriteBytesExt>::write_u32::<BigEndian>(&mut buffer, size.try_into().unwrap())?;
packet.write_to_vec(&mut buffer)?;
<Vec<u8> as WriteBytesExt>::write_u32::<BigEndian>(&mut buffer, size).unwrap();
packet.write_to_vec(&mut buffer).unwrap();
connection.write_all(&buffer[..]).await?;
Ok(buffer)
@ -203,19 +79,16 @@ where
{
let mut packet = ClientResponsePlaintext::new();
packet
.login_crypto_response
.mut_or_insert_default()
.diffie_hellman
.mut_or_insert_default()
.mut_login_crypto_response()
.mut_diffie_hellman()
.set_hmac(challenge);
packet.pow_response.mut_or_insert_default();
packet.crypto_response.mut_or_insert_default();
packet.mut_pow_response();
packet.mut_crypto_response();
let mut buffer = vec![];
let size = 4 + packet.compute_size();
<Vec<u8> as WriteBytesExt>::write_u32::<BigEndian>(&mut buffer, size.try_into().unwrap())?;
packet.write_to_vec(&mut buffer)?;
<Vec<u8> as WriteBytesExt>::write_u32::<BigEndian>(&mut buffer, size).unwrap();
packet.write_to_vec(&mut buffer).unwrap();
connection.write_all(&buffer[..]).await?;
Ok(())
@ -229,12 +102,12 @@ where
let header = read_into_accumulator(connection, 4, acc).await?;
let size = BigEndian::read_u32(header) as usize;
let data = read_into_accumulator(connection, size - 4, acc).await?;
let message = M::parse_from_bytes(data)?;
let message = M::parse_from_bytes(data).unwrap();
Ok(message)
}
async fn read_into_accumulator<'b, T: AsyncRead + Unpin>(
connection: &mut T,
async fn read_into_accumulator<'a, 'b, T: AsyncRead + Unpin>(
connection: &'a mut T,
size: usize,
acc: &'b mut Vec<u8>,
) -> io::Result<&'b mut [u8]> {
@ -245,26 +118,24 @@ async fn read_into_accumulator<'b, T: AsyncRead + Unpin>(
Ok(&mut acc[offset..])
}
fn compute_keys(shared_secret: &[u8], packets: &[u8]) -> io::Result<(Vec<u8>, Vec<u8>, Vec<u8>)> {
fn compute_keys(shared_secret: &[u8], packets: &[u8]) -> (Vec<u8>, Vec<u8>, Vec<u8>) {
type HmacSha1 = Hmac<Sha1>;
let mut data = Vec::with_capacity(0x64);
for i in 1..6 {
let mut mac = HmacSha1::new_from_slice(shared_secret).map_err(|_| {
io::Error::new(io::ErrorKind::InvalidData, HandshakeError::InvalidLength)
})?;
let mut mac =
HmacSha1::new_from_slice(shared_secret).expect("HMAC can take key of any size");
mac.update(packets);
mac.update(&[i]);
data.extend_from_slice(&mac.finalize().into_bytes());
}
let mut mac = HmacSha1::new_from_slice(&data[..0x14])
.map_err(|_| io::Error::new(io::ErrorKind::InvalidData, HandshakeError::InvalidLength))?;
let mut mac = HmacSha1::new_from_slice(&data[..0x14]).expect("HMAC can take key of any size");
mac.update(packets);
Ok((
(
mac.finalize().into_bytes().to_vec(),
data[0x14..0x34].to_vec(),
data[0x34..0x54].to_vec(),
))
)
}

View file

@ -1,21 +1,23 @@
mod codec;
mod handshake;
pub use self::{codec::ApCodec, handshake::handshake};
pub use self::codec::ApCodec;
pub use self::handshake::handshake;
use std::{io, time::Duration};
use std::io::{self, ErrorKind};
use std::net::ToSocketAddrs;
use futures_util::{SinkExt, StreamExt};
use num_traits::FromPrimitive;
use protobuf::Message;
use protobuf::{self, Message, ProtobufError};
use thiserror::Error;
use tokio::net::TcpStream;
use tokio_util::codec::Framed;
use url::Url;
use crate::{Error, authentication::Credentials, packet::PacketType, version};
use crate::authentication::Credentials;
use crate::protocol::keyexchange::{APLoginFailed, ErrorCode};
use crate::proxytunnel;
use crate::version;
pub type Transport = Framed<TcpStream, ApCodec>;
@ -23,8 +25,8 @@ fn login_error_message(code: &ErrorCode) -> &'static str {
pub use ErrorCode::*;
match code {
ProtocolError => "Protocol error",
TryAnotherAP => "Try another access point",
BadConnectionId => "Bad connection ID",
TryAnotherAP => "Try another AP",
BadConnectionId => "Bad connection id",
TravelRestriction => "Travel restriction",
PremiumAccountRequired => "Premium account required",
BadCredentials => "Bad credentials",
@ -40,155 +42,127 @@ fn login_error_message(code: &ErrorCode) -> &'static str {
pub enum AuthenticationError {
#[error("Login failed with reason: {}", login_error_message(.0))]
LoginFailed(ErrorCode),
#[error("invalid packet {0}")]
Packet(u8),
#[error("transport returned no data")]
Transport,
#[error("Authentication failed: {0}")]
IoError(#[from] io::Error),
}
impl From<AuthenticationError> for Error {
fn from(err: AuthenticationError) -> Self {
match err {
AuthenticationError::LoginFailed(_) => Error::permission_denied(err),
AuthenticationError::Packet(_) => Error::unimplemented(err),
AuthenticationError::Transport => Error::unavailable(err),
}
impl From<ProtobufError> for AuthenticationError {
fn from(e: ProtobufError) -> Self {
io::Error::new(ErrorKind::InvalidData, e).into()
}
}
impl From<APLoginFailed> for AuthenticationError {
fn from(login_failure: APLoginFailed) -> Self {
Self::LoginFailed(login_failure.error_code())
Self::LoginFailed(login_failure.get_error_code())
}
}
pub async fn connect(host: &str, port: u16, proxy: Option<&Url>) -> io::Result<Transport> {
const TIMEOUT: Duration = Duration::from_secs(5);
tokio::time::timeout(TIMEOUT, {
let socket = crate::socket::connect(host, port, proxy).await?;
debug!("Connection to AP established.");
handshake(socket)
})
.await?
}
pub async fn connect(addr: String, proxy: Option<&Url>) -> io::Result<Transport> {
let socket = if let Some(proxy_url) = proxy {
info!("Using proxy \"{}\"", proxy_url);
pub async fn connect_with_retry(
host: &str,
port: u16,
proxy: Option<&Url>,
max_retries: u8,
) -> io::Result<Transport> {
let mut num_retries = 0;
loop {
match connect(host, port, proxy).await {
Ok(f) => return Ok(f),
Err(e) => {
debug!("Connection to \"{host}:{port}\" failed: {e}");
if num_retries < max_retries {
num_retries += 1;
debug!("Retry access point...");
continue;
}
return Err(e);
}
}
}
let socket_addr = proxy_url.socket_addrs(|| None).and_then(|addrs| {
addrs.into_iter().next().ok_or_else(|| {
io::Error::new(
io::ErrorKind::NotFound,
"Can't resolve proxy server address",
)
})
})?;
let socket = TcpStream::connect(&socket_addr).await?;
let uri = addr.parse::<http::Uri>().map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidData,
"Can't parse access point address",
)
})?;
let host = uri.host().ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"The access point address contains no hostname",
)
})?;
let port = uri.port().ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"The access point address contains no port",
)
})?;
proxytunnel::proxy_connect(socket, host, port.as_str()).await?
} else {
let socket_addr = addr.to_socket_addrs()?.next().ok_or_else(|| {
io::Error::new(
io::ErrorKind::NotFound,
"Can't resolve access point address",
)
})?;
TcpStream::connect(&socket_addr).await?
};
handshake(socket).await
}
pub async fn authenticate(
transport: &mut Transport,
credentials: Credentials,
device_id: &str,
) -> Result<Credentials, Error> {
) -> Result<Credentials, AuthenticationError> {
use crate::protocol::authentication::{APWelcome, ClientResponseEncrypted, CpuFamily, Os};
let cpu_family = match std::env::consts::ARCH {
"blackfin" => CpuFamily::CPU_BLACKFIN,
"arm" | "aarch64" => CpuFamily::CPU_ARM,
"ia64" => CpuFamily::CPU_IA64,
"mips" => CpuFamily::CPU_MIPS,
"ppc" => CpuFamily::CPU_PPC,
"ppc64" => CpuFamily::CPU_PPC_64,
"sh" => CpuFamily::CPU_SH,
"x86" => CpuFamily::CPU_X86,
"x86_64" => CpuFamily::CPU_X86_64,
_ => CpuFamily::CPU_UNKNOWN,
};
let os = match crate::config::OS {
"android" => Os::OS_ANDROID,
"freebsd" | "netbsd" | "openbsd" => Os::OS_FREEBSD,
"ios" => Os::OS_IPHONE,
"linux" => Os::OS_LINUX,
"macos" => Os::OS_OSX,
"windows" => Os::OS_WINDOWS,
_ => Os::OS_UNKNOWN,
};
let mut packet = ClientResponseEncrypted::new();
if let Some(username) = credentials.username {
packet
.login_credentials
.mut_or_insert_default()
.set_username(username);
}
packet
.login_credentials
.mut_or_insert_default()
.mut_login_credentials()
.set_username(credentials.username);
packet
.mut_login_credentials()
.set_typ(credentials.auth_type);
packet
.login_credentials
.mut_or_insert_default()
.mut_login_credentials()
.set_auth_data(credentials.auth_data);
packet
.system_info
.mut_or_insert_default()
.set_cpu_family(cpu_family);
packet.system_info.mut_or_insert_default().set_os(os);
.mut_system_info()
.set_cpu_family(CpuFamily::CPU_UNKNOWN);
packet.mut_system_info().set_os(Os::OS_UNKNOWN);
packet
.system_info
.mut_or_insert_default()
.mut_system_info()
.set_system_information_string(format!(
"librespot-{}-{}",
"librespot_{}_{}",
version::SHA_SHORT,
version::BUILD_ID
));
packet
.system_info
.mut_or_insert_default()
.mut_system_info()
.set_device_id(device_id.to_string());
packet.set_version_string(format!("librespot {}", version::SEMVER));
packet.set_version_string(version::VERSION_STRING.to_string());
let cmd = PacketType::Login;
let data = packet.write_to_bytes()?;
let cmd = 0xab;
let data = packet.write_to_bytes().unwrap();
debug!("Authenticating with AP using {:?}", credentials.auth_type);
transport.send((cmd as u8, data)).await?;
let (cmd, data) = transport
.next()
.await
.ok_or(AuthenticationError::Transport)??;
let packet_type = FromPrimitive::from_u8(cmd);
let result = match packet_type {
Some(PacketType::APWelcome) => {
transport.send((cmd, data)).await?;
let (cmd, data) = transport.next().await.expect("EOF")?;
match cmd {
0xac => {
let welcome_data = APWelcome::parse_from_bytes(data.as_ref())?;
let reusable_credentials = Credentials {
username: Some(welcome_data.canonical_username().to_owned()),
auth_type: welcome_data.reusable_auth_credentials_type(),
auth_data: welcome_data.reusable_auth_credentials().to_owned(),
username: welcome_data.get_canonical_username().to_owned(),
auth_type: welcome_data.get_reusable_auth_credentials_type(),
auth_data: welcome_data.get_reusable_auth_credentials().to_owned(),
};
Ok(reusable_credentials)
}
Some(PacketType::AuthFailure) => {
0xad => {
let error_data = APLoginFailed::parse_from_bytes(data.as_ref())?;
Err(error_data.into())
}
_ => {
trace!("Did not expect {cmd:?} AES key packet with data {data:#?}");
Err(AuthenticationError::Packet(cmd))
let msg = format!("Received invalid packet: {}", cmd);
Err(io::Error::new(ErrorKind::InvalidData, msg).into())
}
};
Ok(result?)
}
}

View file

@ -1,81 +0,0 @@
use std::{fmt::Debug, ops::Deref};
use time::{
Date as _Date, OffsetDateTime, PrimitiveDateTime, Time, error::ComponentRange,
format_description::well_known::Iso8601,
};
use crate::Error;
use librespot_protocol as protocol;
use protocol::metadata::Date as DateMessage;
impl From<ComponentRange> for Error {
fn from(err: ComponentRange) -> Self {
Error::out_of_range(err)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Date(pub OffsetDateTime);
impl Deref for Date {
type Target = OffsetDateTime;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Date {
pub fn as_timestamp_ms(&self) -> i64 {
(self.0.unix_timestamp_nanos() / 1_000_000) as i64
}
pub fn from_timestamp_ms(timestamp: i64) -> Result<Self, Error> {
let date_time = OffsetDateTime::from_unix_timestamp_nanos(timestamp as i128 * 1_000_000)?;
Ok(Self(date_time))
}
pub fn as_utc(&self) -> OffsetDateTime {
self.0
}
pub fn from_utc(date_time: PrimitiveDateTime) -> Self {
Self(date_time.assume_utc())
}
pub fn now_utc() -> Self {
Self(OffsetDateTime::now_utc())
}
pub fn from_iso8601(input: &str) -> Result<Self, Error> {
let date_time = OffsetDateTime::parse(input, &Iso8601::DEFAULT)?;
Ok(Self(date_time))
}
}
impl TryFrom<&DateMessage> for Date {
type Error = crate::Error;
fn try_from(msg: &DateMessage) -> Result<Self, Self::Error> {
// Some metadata contains a year, but no month. In that case just set January.
let month = if msg.has_month() {
msg.month() as u8
} else {
1
};
// Having no day will work, but may be unexpected: it will imply the last day
// of the month before. So prevent that, and just set day 1.
let day = if msg.has_day() { msg.day() as u8 } else { 1 };
let date = _Date::from_calendar_date(msg.year(), month.try_into()?, day)?;
let time = Time::from_hms(msg.hour() as u8, msg.minute() as u8, 0)?;
Ok(Self::from_utc(PrimitiveDateTime::new(date, time)))
}
}
impl From<OffsetDateTime> for Date {
fn from(datetime: OffsetDateTime) -> Self {
Self(datetime)
}
}

View file

@ -1,174 +0,0 @@
use futures_core::Stream;
use futures_util::StreamExt;
use std::{pin::Pin, str::FromStr, sync::OnceLock};
use thiserror::Error;
use tokio::sync::mpsc;
use tokio_stream::wrappers::UnboundedReceiverStream;
use url::Url;
use super::{
Builder, Dealer, GetUrlResult, Request, RequestHandler, Responder, Response, Subscription,
protocol::Message,
};
use crate::{Error, Session};
component! {
DealerManager: DealerManagerInner {
builder: OnceLock<Builder> = OnceLock::from(Builder::new()),
dealer: OnceLock<Dealer> = OnceLock::new(),
}
}
pub type BoxedStream<T> = Pin<Box<dyn Stream<Item = T> + Send>>;
pub type BoxedStreamResult<T> = BoxedStream<Result<T, Error>>;
#[derive(Error, Debug)]
enum DealerError {
#[error("Builder wasn't available")]
BuilderNotAvailable,
#[error("Websocket couldn't be started because: {0}")]
LaunchFailure(Error),
#[error("Failed to set dealer")]
CouldNotSetDealer,
}
impl From<DealerError> for Error {
fn from(err: DealerError) -> Self {
Error::failed_precondition(err)
}
}
#[derive(Debug)]
pub enum Reply {
Success,
Failure,
Unanswered,
}
pub type RequestReply = (Request, mpsc::UnboundedSender<Reply>);
type RequestReceiver = mpsc::UnboundedReceiver<RequestReply>;
type RequestSender = mpsc::UnboundedSender<RequestReply>;
struct DealerRequestHandler(RequestSender);
impl DealerRequestHandler {
pub fn new() -> (Self, RequestReceiver) {
let (tx, rx) = mpsc::unbounded_channel();
(DealerRequestHandler(tx), rx)
}
}
impl RequestHandler for DealerRequestHandler {
fn handle_request(&self, request: Request, responder: Responder) {
let (tx, mut rx) = mpsc::unbounded_channel();
if let Err(why) = self.0.send((request, tx)) {
error!("failed sending dealer request {why}");
responder.send(Response { success: false });
return;
}
tokio::spawn(async move {
let reply = rx.recv().await.unwrap_or(Reply::Failure);
debug!("replying to ws request: {reply:?}");
match reply {
Reply::Unanswered => responder.force_unanswered(),
Reply::Success | Reply::Failure => responder.send(Response {
success: matches!(reply, Reply::Success),
}),
}
});
}
}
impl DealerManager {
async fn get_url(session: Session) -> GetUrlResult {
let (host, port) = session.apresolver().resolve("dealer").await?;
let token = session.login5().auth_token().await?.access_token;
let url = format!("wss://{host}:{port}/?access_token={token}");
let url = Url::from_str(&url)?;
Ok(url)
}
pub fn add_listen_for(&self, url: impl Into<String>) -> Result<Subscription, Error> {
let url = url.into();
self.lock(|inner| {
if let Some(dealer) = inner.dealer.get() {
dealer.subscribe(&[&url])
} else if let Some(builder) = inner.builder.get_mut() {
builder.subscribe(&[&url])
} else {
Err(DealerError::BuilderNotAvailable.into())
}
})
}
pub fn listen_for<T>(
&self,
uri: impl Into<String>,
t: impl Fn(Message) -> Result<T, Error> + Send + 'static,
) -> Result<BoxedStreamResult<T>, Error> {
Ok(Box::pin(self.add_listen_for(uri)?.map(t)))
}
pub fn add_handle_for(&self, url: impl Into<String>) -> Result<RequestReceiver, Error> {
let url = url.into();
let (handler, receiver) = DealerRequestHandler::new();
self.lock(|inner| {
if let Some(dealer) = inner.dealer.get() {
dealer.add_handler(&url, handler).map(|_| receiver)
} else if let Some(builder) = inner.builder.get_mut() {
builder.add_handler(&url, handler).map(|_| receiver)
} else {
Err(DealerError::BuilderNotAvailable.into())
}
})
}
pub fn handle_for(&self, uri: impl Into<String>) -> Result<BoxedStream<RequestReply>, Error> {
Ok(Box::pin(
self.add_handle_for(uri).map(UnboundedReceiverStream::new)?,
))
}
pub fn handles(&self, uri: &str) -> bool {
self.lock(|inner| {
if let Some(dealer) = inner.dealer.get() {
dealer.handles(uri)
} else if let Some(builder) = inner.builder.get() {
builder.handles(uri)
} else {
false
}
})
}
pub async fn start(&self) -> Result<(), Error> {
debug!("Launching dealer");
let session = self.session();
// the url has to be a function that can retrieve a new url,
// otherwise when we later try to reconnect with the initial url/token
// and the token is expired we will just get 401 error
let get_url = move || Self::get_url(session.clone());
let dealer = self
.lock(move |inner| inner.builder.take())
.ok_or(DealerError::BuilderNotAvailable)?
.launch(get_url, None)
.await
.map_err(DealerError::LaunchFailure)?;
self.lock(|inner| inner.dealer.set(dealer))
.map_err(|_| DealerError::CouldNotSetDealer)?;
Ok(())
}
pub async fn close(&self) {
if let Some(dealer) = self.lock(|inner| inner.dealer.take()) {
dealer.close().await
}
}
}

View file

@ -1,155 +0,0 @@
use std::collections::HashMap;
use crate::Error;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum HandlerMapError {
#[error("request was already handled")]
AlreadyHandled,
}
impl From<HandlerMapError> for Error {
fn from(err: HandlerMapError) -> Self {
Error::aborted(err)
}
}
pub enum HandlerMap<T> {
Leaf(T),
Branch(HashMap<String, HandlerMap<T>>),
}
impl<T> Default for HandlerMap<T> {
fn default() -> Self {
Self::Branch(HashMap::new())
}
}
impl<T> HandlerMap<T> {
pub fn contains(&self, path: &str) -> bool {
matches!(self, HandlerMap::Branch(map) if map.contains_key(path))
}
pub fn insert<'a>(
&mut self,
mut path: impl Iterator<Item = &'a str>,
handler: T,
) -> Result<(), Error> {
match self {
Self::Leaf(_) => Err(HandlerMapError::AlreadyHandled.into()),
Self::Branch(children) => {
if let Some(component) = path.next() {
let node = children.entry(component.to_owned()).or_default();
node.insert(path, handler)
} else if children.is_empty() {
*self = Self::Leaf(handler);
Ok(())
} else {
Err(HandlerMapError::AlreadyHandled.into())
}
}
}
}
pub fn get<'a>(&self, mut path: impl Iterator<Item = &'a str>) -> Option<&T> {
match self {
Self::Leaf(t) => Some(t),
Self::Branch(m) => {
let component = path.next()?;
m.get(component)?.get(path)
}
}
}
pub fn remove<'a>(&mut self, mut path: impl Iterator<Item = &'a str>) -> Option<T> {
match self {
Self::Leaf(_) => match std::mem::take(self) {
Self::Leaf(t) => Some(t),
_ => unreachable!(),
},
Self::Branch(map) => {
let component = path.next()?;
let next = map.get_mut(component)?;
let result = next.remove(path);
match &*next {
Self::Branch(b) if b.is_empty() => {
map.remove(component);
}
_ => (),
}
result
}
}
}
}
pub struct SubscriberMap<T> {
subscribed: Vec<T>,
children: HashMap<String, SubscriberMap<T>>,
}
impl<T> Default for SubscriberMap<T> {
fn default() -> Self {
Self {
subscribed: Vec::new(),
children: HashMap::new(),
}
}
}
impl<T> SubscriberMap<T> {
pub fn insert<'a>(&mut self, mut path: impl Iterator<Item = &'a str>, handler: T) {
if let Some(component) = path.next() {
self.children
.entry(component.to_owned())
.or_default()
.insert(path, handler);
} else {
self.subscribed.push(handler);
}
}
pub fn contains<'a>(&self, mut path: impl Iterator<Item = &'a str>) -> bool {
if !self.subscribed.is_empty() {
return true;
}
if let Some(next) = path.next() {
if let Some(next_map) = self.children.get(next) {
return next_map.contains(path);
}
} else {
return !self.is_empty();
}
false
}
pub fn is_empty(&self) -> bool {
self.children.is_empty() && self.subscribed.is_empty()
}
pub fn retain<'a>(
&mut self,
mut path: impl Iterator<Item = &'a str>,
fun: &mut impl FnMut(&T) -> bool,
) -> bool {
let mut handled_by_any = false;
self.subscribed.retain(|x| {
handled_by_any = true;
fun(x)
});
if let Some(next) = path.next() {
if let Some(y) = self.children.get_mut(next) {
handled_by_any = handled_by_any || y.retain(path, fun);
if y.is_empty() {
self.children.remove(next);
}
}
}
handled_by_any
}
}

View file

@ -1,724 +0,0 @@
pub mod manager;
mod maps;
pub mod protocol;
use std::{
iter,
pin::Pin,
sync::{
Arc, Mutex,
atomic::{self, AtomicBool},
},
task::Poll,
time::Duration,
};
use futures_core::{Future, Stream};
use futures_util::{SinkExt, StreamExt, future::join_all};
use thiserror::Error;
use tokio::{
select,
sync::{
Semaphore,
mpsc::{self, UnboundedReceiver},
},
task::JoinHandle,
};
use tokio_tungstenite::tungstenite;
use tungstenite::error::UrlError;
use url::Url;
use self::{
maps::*,
protocol::{Message, MessageOrRequest, Request, WebsocketMessage, WebsocketRequest},
};
use crate::{
Error, socket,
util::{CancelOnDrop, TimeoutOnDrop, keep_flushing},
};
type WsMessage = tungstenite::Message;
type WsError = tungstenite::Error;
type WsResult<T> = Result<T, Error>;
type GetUrlResult = Result<Url, Error>;
impl From<WsError> for Error {
fn from(err: WsError) -> Self {
Error::failed_precondition(err)
}
}
const WEBSOCKET_CLOSE_TIMEOUT: Duration = Duration::from_secs(3);
const PING_INTERVAL: Duration = Duration::from_secs(30);
const PING_TIMEOUT: Duration = Duration::from_secs(3);
const RECONNECT_INTERVAL: Duration = Duration::from_secs(10);
const DEALER_REQUEST_HANDLERS_POISON_MSG: &str =
"dealer request handlers mutex should not be poisoned";
const DEALER_MESSAGE_HANDLERS_POISON_MSG: &str =
"dealer message handlers mutex should not be poisoned";
struct Response {
pub success: bool,
}
struct Responder {
key: String,
tx: mpsc::UnboundedSender<WsMessage>,
sent: bool,
}
impl Responder {
fn new(key: String, tx: mpsc::UnboundedSender<WsMessage>) -> Self {
Self {
key,
tx,
sent: false,
}
}
// Should only be called once
fn send_internal(&mut self, response: Response) {
let response = serde_json::json!({
"type": "reply",
"key": &self.key,
"payload": {
"success": response.success,
}
})
.to_string();
if let Err(e) = self.tx.send(WsMessage::Text(response.into())) {
warn!("Wasn't able to reply to dealer request: {e}");
}
}
pub fn send(mut self, response: Response) {
self.send_internal(response);
self.sent = true;
}
pub fn force_unanswered(mut self) {
self.sent = true;
}
}
impl Drop for Responder {
fn drop(&mut self) {
if !self.sent {
self.send_internal(Response { success: false });
}
}
}
trait IntoResponse {
fn respond(self, responder: Responder);
}
impl IntoResponse for Response {
fn respond(self, responder: Responder) {
responder.send(self)
}
}
impl<F> IntoResponse for F
where
F: Future<Output = Response> + Send + 'static,
{
fn respond(self, responder: Responder) {
tokio::spawn(async move {
responder.send(self.await);
});
}
}
impl<F, R> RequestHandler for F
where
F: (Fn(Request) -> R) + Send + 'static,
R: IntoResponse,
{
fn handle_request(&self, request: Request, responder: Responder) {
self(request).respond(responder);
}
}
trait RequestHandler: Send + 'static {
fn handle_request(&self, request: Request, responder: Responder);
}
type MessageHandler = mpsc::UnboundedSender<Message>;
// TODO: Maybe it's possible to unregister subscription directly when they
// are dropped instead of on next failed attempt.
pub struct Subscription(UnboundedReceiver<Message>);
impl Stream for Subscription {
type Item = Message;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Option<Self::Item>> {
self.0.poll_recv(cx)
}
}
fn split_uri(s: &str) -> Option<impl Iterator<Item = &'_ str>> {
let (scheme, sep, rest) = if let Some(rest) = s.strip_prefix("hm://") {
("hm", '/', rest)
} else if let Some(rest) = s.strip_prefix("spotify:") {
("spotify", ':', rest)
} else if s.contains('/') {
("", '/', s)
} else {
return None;
};
let rest = rest.trim_end_matches(sep);
let split = rest.split(sep);
Some(iter::once(scheme).chain(split))
}
#[derive(Debug, Clone, Error)]
enum AddHandlerError {
#[error("There is already a handler for the given uri")]
AlreadyHandled,
#[error("The specified uri {0} is invalid")]
InvalidUri(String),
}
impl From<AddHandlerError> for Error {
fn from(err: AddHandlerError) -> Self {
match err {
AddHandlerError::AlreadyHandled => Error::aborted(err),
AddHandlerError::InvalidUri(_) => Error::invalid_argument(err),
}
}
}
#[derive(Debug, Clone, Error)]
enum SubscriptionError {
#[error("The specified uri is invalid")]
InvalidUri(String),
}
impl From<SubscriptionError> for Error {
fn from(err: SubscriptionError) -> Self {
Error::invalid_argument(err)
}
}
fn add_handler(
map: &mut HandlerMap<Box<dyn RequestHandler>>,
uri: &str,
handler: impl RequestHandler,
) -> Result<(), Error> {
let split = split_uri(uri).ok_or_else(|| AddHandlerError::InvalidUri(uri.to_string()))?;
map.insert(split, Box::new(handler))
}
fn remove_handler<T>(map: &mut HandlerMap<T>, uri: &str) -> Option<T> {
map.remove(split_uri(uri)?)
}
fn subscribe(
map: &mut SubscriberMap<MessageHandler>,
uris: &[&str],
) -> Result<Subscription, Error> {
let (tx, rx) = mpsc::unbounded_channel();
for &uri in uris {
let split = split_uri(uri).ok_or_else(|| SubscriptionError::InvalidUri(uri.to_string()))?;
map.insert(split, tx.clone());
}
Ok(Subscription(rx))
}
fn handles(
req_map: &HandlerMap<Box<dyn RequestHandler>>,
msg_map: &SubscriberMap<MessageHandler>,
uri: &str,
) -> bool {
if req_map.contains(uri) {
return true;
}
match split_uri(uri) {
None => false,
Some(mut split) => msg_map.contains(&mut split),
}
}
#[derive(Default)]
struct Builder {
message_handlers: SubscriberMap<MessageHandler>,
request_handlers: HandlerMap<Box<dyn RequestHandler>>,
}
macro_rules! create_dealer {
($builder:expr, $shared:ident -> $body:expr) => {
match $builder {
builder => {
let shared = Arc::new(DealerShared {
message_handlers: Mutex::new(builder.message_handlers),
request_handlers: Mutex::new(builder.request_handlers),
notify_drop: Semaphore::new(0),
});
let handle = {
let $shared = Arc::clone(&shared);
tokio::spawn($body)
};
Dealer {
shared,
handle: TimeoutOnDrop::new(handle, WEBSOCKET_CLOSE_TIMEOUT),
}
}
}
};
}
impl Builder {
pub fn new() -> Self {
Self::default()
}
pub fn add_handler(&mut self, uri: &str, handler: impl RequestHandler) -> Result<(), Error> {
add_handler(&mut self.request_handlers, uri, handler)
}
pub fn subscribe(&mut self, uris: &[&str]) -> Result<Subscription, Error> {
subscribe(&mut self.message_handlers, uris)
}
pub fn handles(&self, uri: &str) -> bool {
handles(&self.request_handlers, &self.message_handlers, uri)
}
pub fn launch_in_background<Fut, F>(self, get_url: F, proxy: Option<Url>) -> Dealer
where
Fut: Future<Output = GetUrlResult> + Send + 'static,
F: (Fn() -> Fut) + Send + 'static,
{
create_dealer!(self, shared -> run(shared, None, get_url, proxy))
}
pub async fn launch<Fut, F>(self, get_url: F, proxy: Option<Url>) -> WsResult<Dealer>
where
Fut: Future<Output = GetUrlResult> + Send + 'static,
F: (Fn() -> Fut) + Send + 'static,
{
let dealer = create_dealer!(self, shared -> {
// Try to connect.
let url = get_url().await?;
let tasks = connect(&url, proxy.as_ref(), &shared).await?;
// If a connection is established, continue in a background task.
run(shared, Some(tasks), get_url, proxy)
});
Ok(dealer)
}
}
struct DealerShared {
message_handlers: Mutex<SubscriberMap<MessageHandler>>,
request_handlers: Mutex<HandlerMap<Box<dyn RequestHandler>>>,
// Semaphore with 0 permits. By closing this semaphore, we indicate
// that the actual Dealer struct has been dropped.
notify_drop: Semaphore,
}
impl DealerShared {
fn dispatch_message(&self, mut msg: WebsocketMessage) {
let msg = match msg.handle_payload() {
Ok(value) => Message {
headers: msg.headers,
payload: value,
uri: msg.uri,
},
Err(why) => {
warn!("failure during data parsing for {}: {why}", msg.uri);
return;
}
};
if let Some(split) = split_uri(&msg.uri) {
if self
.message_handlers
.lock()
.expect(DEALER_MESSAGE_HANDLERS_POISON_MSG)
.retain(split, &mut |tx| tx.send(msg.clone()).is_ok())
{
return;
}
}
debug!("No subscriber for msg.uri: {}", msg.uri);
}
fn dispatch_request(
&self,
request: WebsocketRequest,
send_tx: &mpsc::UnboundedSender<WsMessage>,
) {
trace!("dealer request {}", &request.message_ident);
let payload_request = match request.handle_payload() {
Ok(payload) => payload,
Err(why) => {
warn!("request payload handling failed because of {why}");
return;
}
};
// ResponseSender will automatically send "success: false" if it is dropped without an answer.
let responder = Responder::new(request.key.clone(), send_tx.clone());
let split = if let Some(split) = split_uri(&request.message_ident) {
split
} else {
warn!(
"Dealer request with invalid message_ident: {}",
&request.message_ident
);
return;
};
let handler_map = self
.request_handlers
.lock()
.expect(DEALER_REQUEST_HANDLERS_POISON_MSG);
if let Some(handler) = handler_map.get(split) {
handler.handle_request(payload_request, responder);
return;
}
warn!("No handler for message_ident: {}", &request.message_ident);
}
fn dispatch(&self, m: MessageOrRequest, send_tx: &mpsc::UnboundedSender<WsMessage>) {
match m {
MessageOrRequest::Message(m) => self.dispatch_message(m),
MessageOrRequest::Request(r) => self.dispatch_request(r, send_tx),
}
}
async fn closed(&self) {
if self.notify_drop.acquire().await.is_ok() {
error!("should never have gotten a permit");
}
}
fn is_closed(&self) -> bool {
self.notify_drop.is_closed()
}
}
struct Dealer {
shared: Arc<DealerShared>,
handle: TimeoutOnDrop<Result<(), Error>>,
}
impl Dealer {
pub fn add_handler<H>(&self, uri: &str, handler: H) -> Result<(), Error>
where
H: RequestHandler,
{
add_handler(
&mut self
.shared
.request_handlers
.lock()
.expect(DEALER_REQUEST_HANDLERS_POISON_MSG),
uri,
handler,
)
}
pub fn remove_handler(&self, uri: &str) -> Option<Box<dyn RequestHandler>> {
remove_handler(
&mut self
.shared
.request_handlers
.lock()
.expect(DEALER_REQUEST_HANDLERS_POISON_MSG),
uri,
)
}
pub fn subscribe(&self, uris: &[&str]) -> Result<Subscription, Error> {
subscribe(
&mut self
.shared
.message_handlers
.lock()
.expect(DEALER_MESSAGE_HANDLERS_POISON_MSG),
uris,
)
}
pub fn handles(&self, uri: &str) -> bool {
handles(
&self
.shared
.request_handlers
.lock()
.expect(DEALER_REQUEST_HANDLERS_POISON_MSG),
&self
.shared
.message_handlers
.lock()
.expect(DEALER_MESSAGE_HANDLERS_POISON_MSG),
uri,
)
}
pub async fn close(mut self) {
debug!("closing dealer");
self.shared.notify_drop.close();
if let Some(handle) = self.handle.take() {
if let Err(e) = CancelOnDrop(handle).await {
error!("error aborting dealer operations: {e}");
}
}
}
}
/// Initializes a connection and returns futures that will finish when the connection is closed/lost.
async fn connect(
address: &Url,
proxy: Option<&Url>,
shared: &Arc<DealerShared>,
) -> WsResult<(JoinHandle<()>, JoinHandle<()>)> {
let host = address
.host_str()
.ok_or(WsError::Url(UrlError::NoHostName))?;
let default_port = match address.scheme() {
"ws" => 80,
"wss" => 443,
_ => return Err(WsError::Url(UrlError::UnsupportedUrlScheme).into()),
};
let port = address.port().unwrap_or(default_port);
let stream = socket::connect(host, port, proxy).await?;
let (mut ws_tx, ws_rx) = tokio_tungstenite::client_async_tls(address.as_str(), stream)
.await?
.0
.split();
let (send_tx, mut send_rx) = mpsc::unbounded_channel::<WsMessage>();
// Spawn a task that will forward messages from the channel to the websocket.
let send_task = {
let shared = Arc::clone(shared);
tokio::spawn(async move {
let result = loop {
select! {
biased;
() = shared.closed() => {
break Ok(None);
}
msg = send_rx.recv() => {
if let Some(msg) = msg {
// New message arrived through channel
if let WsMessage::Close(close_frame) = msg {
break Ok(close_frame);
}
if let Err(e) = ws_tx.feed(msg).await {
break Err(e);
}
} else {
break Ok(None);
}
},
e = keep_flushing(&mut ws_tx) => {
break Err(e)
}
else => (),
}
};
send_rx.close();
// I don't trust in tokio_tungstenite's implementation of Sink::close.
let result = match result {
Ok(close_frame) => ws_tx.send(WsMessage::Close(close_frame)).await,
Err(WsError::AlreadyClosed) | Err(WsError::ConnectionClosed) => ws_tx.flush().await,
Err(e) => {
warn!("Dealer finished with an error: {e}");
ws_tx.send(WsMessage::Close(None)).await
}
};
if let Err(e) = result {
warn!("Error while closing websocket: {e}");
}
debug!("Dropping send task");
})
};
let shared = Arc::clone(shared);
// A task that receives messages from the web socket.
let receive_task = tokio::spawn(async {
let pong_received = AtomicBool::new(true);
let send_tx = send_tx;
let shared = shared;
let receive_task = async {
let mut ws_rx = ws_rx;
loop {
match ws_rx.next().await {
Some(Ok(msg)) => match msg {
WsMessage::Text(t) => match serde_json::from_str(&t) {
Ok(m) => shared.dispatch(m, &send_tx),
Err(e) => warn!("Message couldn't be parsed: {e}. Message was {t}"),
},
WsMessage::Binary(_) => {
info!("Received invalid binary message");
}
WsMessage::Pong(_) => {
trace!("Received pong");
pong_received.store(true, atomic::Ordering::Relaxed);
}
_ => (), // tungstenite handles Close and Ping automatically
},
Some(Err(e)) => {
warn!("Websocket connection failed: {e}");
break;
}
None => {
debug!("Websocket connection closed.");
break;
}
}
}
};
// Sends pings and checks whether a pong comes back.
let ping_task = async {
use tokio::time::{interval, sleep};
let mut timer = interval(PING_INTERVAL);
loop {
timer.tick().await;
pong_received.store(false, atomic::Ordering::Relaxed);
if send_tx
.send(WsMessage::Ping(bytes::Bytes::default()))
.is_err()
{
// The sender is closed.
break;
}
trace!("Sent ping");
sleep(PING_TIMEOUT).await;
if !pong_received.load(atomic::Ordering::SeqCst) {
// No response
warn!("Websocket peer does not respond.");
break;
}
}
};
// Exit this task as soon as one our subtasks fails.
// In both cases the connection is probably lost.
select! {
() = ping_task => (),
() = receive_task => ()
}
// Try to take send_task down with us, in case it's still alive.
let _ = send_tx.send(WsMessage::Close(None));
debug!("Dropping receive task");
});
Ok((send_task, receive_task))
}
/// The main background task for `Dealer`, which coordinates reconnecting.
async fn run<F, Fut>(
shared: Arc<DealerShared>,
initial_tasks: Option<(JoinHandle<()>, JoinHandle<()>)>,
mut get_url: F,
proxy: Option<Url>,
) -> Result<(), Error>
where
Fut: Future<Output = GetUrlResult> + Send + 'static,
F: (FnMut() -> Fut) + Send + 'static,
{
let init_task = |t| Some(TimeoutOnDrop::new(t, WEBSOCKET_CLOSE_TIMEOUT));
let mut tasks = if let Some((s, r)) = initial_tasks {
(init_task(s), init_task(r))
} else {
(None, None)
};
while !shared.is_closed() {
match &mut tasks {
(Some(t0), Some(t1)) => {
select! {
() = shared.closed() => break,
r = t0 => {
if let Err(e) = r {
error!("timeout on task 0: {e}");
}
tasks.0.take();
},
r = t1 => {
if let Err(e) = r {
error!("timeout on task 1: {e}");
}
tasks.1.take();
}
}
}
_ => {
let url = select! {
() = shared.closed() => {
break
},
e = get_url() => e
}?;
match connect(&url, proxy.as_ref(), &shared).await {
Ok((s, r)) => tasks = (init_task(s), init_task(r)),
Err(e) => {
error!("Error while connecting: {e}");
tokio::time::sleep(RECONNECT_INTERVAL).await;
}
}
}
}
}
let tasks = tasks.0.into_iter().chain(tasks.1);
let _ = join_all(tasks).await;
Ok(())
}

View file

@ -1,196 +0,0 @@
pub mod request;
pub use request::*;
use std::collections::HashMap;
use std::io::{Error as IoError, Read};
use crate::{Error, deserialize_with::json_proto};
use base64::{DecodeError, Engine, prelude::BASE64_STANDARD};
use flate2::read::GzDecoder;
use log::LevelFilter;
use serde::Deserialize;
use serde_json::Error as SerdeError;
use thiserror::Error;
const IGNORE_UNKNOWN: protobuf_json_mapping::ParseOptions = protobuf_json_mapping::ParseOptions {
ignore_unknown_fields: true,
_future_options: (),
};
type JsonValue = serde_json::Value;
#[derive(Debug, Error)]
enum ProtocolError {
#[error("base64 decoding failed: {0}")]
Base64(DecodeError),
#[error("gzip decoding failed: {0}")]
GZip(IoError),
#[error("deserialization failed: {0}")]
Deserialization(SerdeError),
#[error("payload had more then one value. had {0} values")]
MoreThenOneValue(usize),
#[error("received unexpected data {0:#?}")]
UnexpectedData(PayloadValue),
#[error("payload was empty")]
Empty,
}
impl From<ProtocolError> for Error {
fn from(err: ProtocolError) -> Self {
match err {
ProtocolError::UnexpectedData(_) => Error::unavailable(err),
_ => Error::failed_precondition(err),
}
}
}
#[derive(Clone, Debug, Deserialize)]
pub(super) struct Payload {
pub compressed: String,
}
#[derive(Clone, Debug, Deserialize)]
pub(super) struct WebsocketRequest {
#[serde(default)]
pub headers: HashMap<String, String>,
pub message_ident: String,
pub key: String,
pub payload: Payload,
}
#[derive(Clone, Debug, Deserialize)]
pub(super) struct WebsocketMessage {
#[serde(default)]
pub headers: HashMap<String, String>,
pub method: Option<String>,
#[serde(default)]
pub payloads: Vec<MessagePayloadValue>,
pub uri: String,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(untagged)]
pub enum MessagePayloadValue {
String(String),
Bytes(Vec<u8>),
Json(JsonValue),
}
#[derive(Clone, Debug, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub(super) enum MessageOrRequest {
Message(WebsocketMessage),
Request(WebsocketRequest),
}
#[derive(Clone, Debug)]
pub enum PayloadValue {
Empty,
Raw(Vec<u8>),
Json(String),
}
#[derive(Clone, Debug)]
pub struct Message {
pub headers: HashMap<String, String>,
pub payload: PayloadValue,
pub uri: String,
}
#[derive(Deserialize)]
#[serde(untagged)]
pub enum FallbackWrapper<T: protobuf::MessageFull> {
Inner(#[serde(deserialize_with = "json_proto")] T),
Fallback(JsonValue),
}
impl Message {
pub fn try_from_json<M: protobuf::MessageFull>(
value: Self,
) -> Result<FallbackWrapper<M>, Error> {
match value.payload {
PayloadValue::Json(json) => Ok(serde_json::from_str(&json)?),
other => Err(ProtocolError::UnexpectedData(other).into()),
}
}
pub fn from_raw<M: protobuf::Message>(value: Self) -> Result<M, Error> {
match value.payload {
PayloadValue::Raw(bytes) => {
M::parse_from_bytes(&bytes).map_err(Error::failed_precondition)
}
other => Err(ProtocolError::UnexpectedData(other).into()),
}
}
}
impl WebsocketMessage {
pub fn handle_payload(&mut self) -> Result<PayloadValue, Error> {
if self.payloads.is_empty() {
return Ok(PayloadValue::Empty);
} else if self.payloads.len() > 1 {
return Err(ProtocolError::MoreThenOneValue(self.payloads.len()).into());
}
let payload = self.payloads.pop().ok_or(ProtocolError::Empty)?;
let bytes = match payload {
MessagePayloadValue::String(string) => BASE64_STANDARD
.decode(string)
.map_err(ProtocolError::Base64)?,
MessagePayloadValue::Bytes(bytes) => bytes,
MessagePayloadValue::Json(json) => return Ok(PayloadValue::Json(json.to_string())),
};
handle_transfer_encoding(&self.headers, bytes).map(PayloadValue::Raw)
}
}
impl WebsocketRequest {
pub fn handle_payload(&self) -> Result<Request, Error> {
let payload_bytes = BASE64_STANDARD
.decode(&self.payload.compressed)
.map_err(ProtocolError::Base64)?;
let payload = handle_transfer_encoding(&self.headers, payload_bytes)?;
let payload = String::from_utf8(payload)?;
if log::max_level() >= LevelFilter::Trace {
if let Ok(json) = serde_json::from_str::<serde_json::Value>(&payload) {
trace!("websocket request: {json:#?}");
} else {
trace!("websocket request: {payload}");
}
}
serde_json::from_str(&payload)
.map_err(ProtocolError::Deserialization)
.map_err(Into::into)
}
}
fn handle_transfer_encoding(
headers: &HashMap<String, String>,
data: Vec<u8>,
) -> Result<Vec<u8>, Error> {
let encoding = headers.get("Transfer-Encoding").map(String::as_str);
if let Some(encoding) = encoding {
trace!("message was sent with {encoding} encoding ");
} else {
trace!("message was sent with no encoding ");
}
if !matches!(encoding, Some("gzip")) {
return Ok(data);
}
let mut gz = GzDecoder::new(&data[..]);
let mut bytes = vec![];
match gz.read_to_end(&mut bytes) {
Ok(i) if i == bytes.len() => Ok(bytes),
Ok(_) => Err(Error::failed_precondition(
"read bytes mismatched with expected bytes",
)),
Err(why) => Err(ProtocolError::GZip(why).into()),
}
}

View file

@ -1,213 +0,0 @@
use crate::{
deserialize_with::*,
protocol::{
context::Context,
context_player_options::ContextPlayerOptionOverrides,
player::{PlayOrigin, ProvidedTrack},
transfer_state::TransferState,
},
};
use serde::Deserialize;
use serde_json::Value;
use std::fmt::{Display, Formatter};
#[derive(Clone, Debug, Deserialize)]
pub struct Request {
pub message_id: u32,
// todo: did only send target_alias_id: null so far, maybe we just ignore it, will see
// pub target_alias_id: Option<()>,
pub sent_by_device_id: String,
pub command: Command,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(tag = "endpoint", rename_all = "snake_case")]
pub enum Command {
Transfer(TransferCommand),
#[serde(deserialize_with = "boxed")]
Play(Box<PlayCommand>),
Pause(PauseCommand),
SeekTo(SeekToCommand),
SetShufflingContext(SetValueCommand),
SetRepeatingTrack(SetValueCommand),
SetRepeatingContext(SetValueCommand),
AddToQueue(AddToQueueCommand),
SetQueue(SetQueueCommand),
SetOptions(SetOptionsCommand),
UpdateContext(UpdateContextCommand),
SkipNext(SkipNextCommand),
// commands that don't send any context (at least not usually...)
SkipPrev(GenericCommand),
Resume(GenericCommand),
// catch unknown commands, so that we can implement them later
#[serde(untagged)]
Unknown(Value),
}
impl Display for Command {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
use Command::*;
write!(
f,
"endpoint: {}{}",
matches!(self, Unknown(_))
.then_some("unknown ")
.unwrap_or_default(),
match self {
Transfer(_) => "transfer",
Play(_) => "play",
Pause(_) => "pause",
SeekTo(_) => "seek_to",
SetShufflingContext(_) => "set_shuffling_context",
SetRepeatingContext(_) => "set_repeating_context",
SetRepeatingTrack(_) => "set_repeating_track",
AddToQueue(_) => "add_to_queue",
SetQueue(_) => "set_queue",
SetOptions(_) => "set_options",
UpdateContext(_) => "update_context",
SkipNext(_) => "skip_next",
SkipPrev(_) => "skip_prev",
Resume(_) => "resume",
Unknown(json) => {
json.as_object()
.and_then(|obj| obj.get("endpoint").map(|v| v.as_str()))
.flatten()
.unwrap_or("???")
}
}
)
}
}
#[derive(Clone, Debug, Deserialize)]
pub struct TransferCommand {
#[serde(default, deserialize_with = "base64_proto")]
pub data: Option<TransferState>,
pub options: TransferOptions,
pub from_device_identifier: String,
pub logging_params: LoggingParams,
}
#[derive(Clone, Debug, Deserialize)]
pub struct PlayCommand {
#[serde(deserialize_with = "json_proto")]
pub context: Context,
#[serde(deserialize_with = "json_proto")]
pub play_origin: PlayOrigin,
pub options: PlayOptions,
pub logging_params: LoggingParams,
}
#[derive(Clone, Debug, Deserialize)]
pub struct PauseCommand {
// does send options with it, but seems to be empty, investigate which options are send here
pub logging_params: LoggingParams,
}
#[derive(Clone, Debug, Deserialize)]
pub struct SeekToCommand {
pub value: u32,
pub position: u32,
pub logging_params: LoggingParams,
}
#[derive(Clone, Debug, Deserialize)]
pub struct SkipNextCommand {
#[serde(default, deserialize_with = "option_json_proto")]
pub track: Option<ProvidedTrack>,
pub logging_params: LoggingParams,
}
#[derive(Clone, Debug, Deserialize)]
pub struct SetValueCommand {
pub value: bool,
pub logging_params: LoggingParams,
}
#[derive(Clone, Debug, Deserialize)]
pub struct AddToQueueCommand {
#[serde(deserialize_with = "json_proto")]
pub track: ProvidedTrack,
pub logging_params: LoggingParams,
}
#[derive(Clone, Debug, Deserialize)]
pub struct SetQueueCommand {
#[serde(deserialize_with = "vec_json_proto")]
pub next_tracks: Vec<ProvidedTrack>,
#[serde(deserialize_with = "vec_json_proto")]
pub prev_tracks: Vec<ProvidedTrack>,
// this queue revision is actually the last revision, so using it will not update the web ui
// might be that internally they use the last revision to create the next revision
pub queue_revision: String,
pub logging_params: LoggingParams,
}
#[derive(Clone, Debug, Deserialize)]
pub struct SetOptionsCommand {
pub shuffling_context: Option<bool>,
pub repeating_context: Option<bool>,
pub repeating_track: Option<bool>,
pub options: Option<OptionsOptions>,
pub logging_params: LoggingParams,
}
#[derive(Clone, Debug, Deserialize)]
pub struct UpdateContextCommand {
#[serde(deserialize_with = "json_proto")]
pub context: Context,
pub session_id: Option<String>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct GenericCommand {
pub logging_params: LoggingParams,
}
#[derive(Clone, Debug, Deserialize)]
pub struct TransferOptions {
pub restore_paused: String,
pub restore_position: String,
pub restore_track: String,
pub retain_session: String,
}
#[derive(Clone, Debug, Deserialize)]
pub struct PlayOptions {
pub skip_to: Option<SkipTo>,
#[serde(default, deserialize_with = "option_json_proto")]
pub player_options_override: Option<ContextPlayerOptionOverrides>,
pub license: Option<String>,
// possible to send wie web-api
pub seek_to: Option<u32>,
// mobile
pub always_play_something: Option<bool>,
pub audio_stream: Option<String>,
pub initially_paused: Option<bool>,
pub prefetch_level: Option<String>,
pub system_initiated: Option<bool>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct OptionsOptions {
only_for_local_device: bool,
override_restrictions: bool,
system_initiated: bool,
}
#[derive(Clone, Debug, Deserialize, Default)]
pub struct SkipTo {
pub track_uid: Option<String>,
pub track_uri: Option<String>,
pub track_index: Option<u32>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct LoggingParams {
pub interaction_ids: Option<Vec<String>>,
pub device_identifier: Option<String>,
pub command_initiated_time: Option<i64>,
pub page_instance_ids: Option<Vec<String>>,
pub command_id: Option<String>,
}

View file

@ -1,90 +0,0 @@
use base64::Engine;
use base64::prelude::BASE64_STANDARD;
use protobuf::MessageFull;
use serde::de::{Error, Unexpected};
use serde::{Deserialize, Deserializer};
use serde_json::Value;
const IGNORE_UNKNOWN: protobuf_json_mapping::ParseOptions = protobuf_json_mapping::ParseOptions {
ignore_unknown_fields: true,
_future_options: (),
};
fn parse_value_to_msg<T: MessageFull>(
value: &Value,
) -> Result<T, protobuf_json_mapping::ParseError> {
protobuf_json_mapping::parse_from_str_with_options::<T>(&value.to_string(), &IGNORE_UNKNOWN)
}
pub fn base64_proto<'de, T, D>(de: D) -> Result<Option<T>, D::Error>
where
T: MessageFull,
D: Deserializer<'de>,
{
let v: String = Deserialize::deserialize(de)?;
let bytes = BASE64_STANDARD
.decode(v)
.map_err(|e| Error::custom(e.to_string()))?;
T::parse_from_bytes(&bytes).map(Some).map_err(Error::custom)
}
pub fn json_proto<'de, T, D>(de: D) -> Result<T, D::Error>
where
T: MessageFull,
D: Deserializer<'de>,
{
let v: Value = Deserialize::deserialize(de)?;
parse_value_to_msg(&v).map_err(Error::custom)
}
pub fn option_json_proto<'de, T, D>(de: D) -> Result<Option<T>, D::Error>
where
T: MessageFull,
D: Deserializer<'de>,
{
let v: Value = Deserialize::deserialize(de)?;
parse_value_to_msg(&v).map(Some).map_err(Error::custom)
}
pub fn vec_json_proto<'de, T, D>(de: D) -> Result<Vec<T>, D::Error>
where
T: MessageFull,
D: Deserializer<'de>,
{
let v: Value = Deserialize::deserialize(de)?;
let array = match v {
Value::Array(array) => array,
_ => return Err(Error::custom("the value wasn't an array")),
};
let res = array
.iter()
.flat_map(parse_value_to_msg)
.collect::<Vec<T>>();
Ok(res)
}
pub fn boxed<'de, T, D>(de: D) -> Result<Box<T>, D::Error>
where
T: Deserialize<'de>,
D: Deserializer<'de>,
{
let v: T = Deserialize::deserialize(de)?;
Ok(Box::new(v))
}
pub fn bool_from_string<'de, D>(de: D) -> Result<bool, D::Error>
where
D: Deserializer<'de>,
{
match String::deserialize(de)?.as_ref() {
"true" => Ok(true),
"false" => Ok(false),
other => Err(Error::invalid_value(
Unexpected::Str(other),
&"true or false",
)),
}
}

View file

@ -1,12 +1,11 @@
use std::sync::LazyLock;
use num_bigint::BigUint;
use num_bigint::{BigUint, RandBigInt};
use num_integer::Integer;
use num_traits::{One, Zero};
use once_cell::sync::Lazy;
use rand::{CryptoRng, Rng};
static DH_GENERATOR: LazyLock<BigUint> = LazyLock::new(|| BigUint::from_bytes_be(&[0x02]));
static DH_PRIME: LazyLock<BigUint> = LazyLock::new(|| {
static DH_GENERATOR: Lazy<BigUint> = Lazy::new(|| BigUint::from_bytes_be(&[0x02]));
static DH_PRIME: Lazy<BigUint> = Lazy::new(|| {
BigUint::from_bytes_be(&[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x0f, 0xda, 0xa2, 0x21, 0x68, 0xc2,
0x34, 0xc4, 0xc6, 0x62, 0x8b, 0x80, 0xdc, 0x1c, 0xd1, 0x29, 0x02, 0x4e, 0x08, 0x8a, 0x67,
@ -41,9 +40,7 @@ pub struct DhLocalKeys {
impl DhLocalKeys {
pub fn random<R: Rng + CryptoRng>(rng: &mut R) -> DhLocalKeys {
let mut bytes = [0u8; 95];
rng.fill_bytes(&mut bytes);
let private_key = BigUint::from_bytes_le(&bytes);
let private_key = rng.gen_biguint(95 * 8);
let public_key = powm(&DH_GENERATOR, &private_key, &DH_PRIME);
DhLocalKeys {

View file

@ -1,507 +0,0 @@
use std::{
error, fmt,
num::{ParseIntError, TryFromIntError},
str::Utf8Error,
string::FromUtf8Error,
};
use base64::DecodeError;
use http::{
header::{InvalidHeaderName, InvalidHeaderValue, ToStrError},
method::InvalidMethod,
status::InvalidStatusCode,
uri::{InvalidUri, InvalidUriParts},
};
use protobuf::Error as ProtobufError;
use thiserror::Error;
use tokio::sync::{
AcquireError, TryAcquireError, mpsc::error::SendError, oneshot::error::RecvError,
};
use url::ParseError;
use librespot_oauth::OAuthError;
#[derive(Debug)]
pub struct Error {
pub kind: ErrorKind,
pub error: Box<dyn error::Error + Send + Sync>,
}
#[derive(Clone, Copy, Debug, Eq, Error, Hash, Ord, PartialEq, PartialOrd)]
pub enum ErrorKind {
#[error("The operation was cancelled by the caller")]
Cancelled = 1,
#[error("Unknown error")]
Unknown = 2,
#[error("Client specified an invalid argument")]
InvalidArgument = 3,
#[error("Deadline expired before operation could complete")]
DeadlineExceeded = 4,
#[error("Requested entity was not found")]
NotFound = 5,
#[error("Attempt to create entity that already exists")]
AlreadyExists = 6,
#[error("Permission denied")]
PermissionDenied = 7,
#[error("No valid authentication credentials")]
Unauthenticated = 16,
#[error("Resource has been exhausted")]
ResourceExhausted = 8,
#[error("Invalid state")]
FailedPrecondition = 9,
#[error("Operation aborted")]
Aborted = 10,
#[error("Operation attempted past the valid range")]
OutOfRange = 11,
#[error("Not implemented")]
Unimplemented = 12,
#[error("Internal error")]
Internal = 13,
#[error("Service unavailable")]
Unavailable = 14,
#[error("Unrecoverable data loss or corruption")]
DataLoss = 15,
#[error("Operation must not be used")]
DoNotUse = -1,
}
#[derive(Debug, Error)]
struct ErrorMessage(String);
impl fmt::Display for ErrorMessage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl Error {
pub fn new<E>(kind: ErrorKind, error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind,
error: error.into(),
}
}
pub fn aborted<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::Aborted,
error: error.into(),
}
}
pub fn already_exists<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::AlreadyExists,
error: error.into(),
}
}
pub fn cancelled<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::Cancelled,
error: error.into(),
}
}
pub fn data_loss<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::DataLoss,
error: error.into(),
}
}
pub fn deadline_exceeded<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::DeadlineExceeded,
error: error.into(),
}
}
pub fn do_not_use<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::DoNotUse,
error: error.into(),
}
}
pub fn failed_precondition<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::FailedPrecondition,
error: error.into(),
}
}
pub fn internal<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::Internal,
error: error.into(),
}
}
pub fn invalid_argument<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::InvalidArgument,
error: error.into(),
}
}
pub fn not_found<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::NotFound,
error: error.into(),
}
}
pub fn out_of_range<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::OutOfRange,
error: error.into(),
}
}
pub fn permission_denied<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::PermissionDenied,
error: error.into(),
}
}
pub fn resource_exhausted<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::ResourceExhausted,
error: error.into(),
}
}
pub fn unauthenticated<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::Unauthenticated,
error: error.into(),
}
}
pub fn unavailable<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::Unavailable,
error: error.into(),
}
}
pub fn unimplemented<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::Unimplemented,
error: error.into(),
}
}
pub fn unknown<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
Self {
kind: ErrorKind::Unknown,
error: error.into(),
}
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.error.source()
}
}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "{} {{ ", self.kind)?;
self.error.fmt(fmt)?;
write!(fmt, " }}")
}
}
impl From<OAuthError> for Error {
fn from(err: OAuthError) -> Self {
use OAuthError::*;
match err {
AuthCodeBadUri { .. }
| AuthCodeNotFound { .. }
| AuthCodeListenerRead
| AuthCodeListenerParse => Error::unavailable(err),
AuthCodeStdinRead
| AuthCodeListenerBind { .. }
| AuthCodeListenerTerminated
| AuthCodeListenerWrite
| Recv
| ExchangeCode { .. } => Error::internal(err),
_ => Error::failed_precondition(err),
}
}
}
impl From<DecodeError> for Error {
fn from(err: DecodeError) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<http::Error> for Error {
fn from(err: http::Error) -> Self {
if err.is::<InvalidHeaderName>()
|| err.is::<InvalidHeaderValue>()
|| err.is::<InvalidMethod>()
|| err.is::<InvalidUri>()
|| err.is::<InvalidUriParts>()
{
return Self::new(ErrorKind::InvalidArgument, err);
}
if err.is::<InvalidStatusCode>() {
return Self::new(ErrorKind::FailedPrecondition, err);
}
Self::new(ErrorKind::Unknown, err)
}
}
impl From<hyper::Error> for Error {
fn from(err: hyper::Error) -> Self {
if err.is_parse() || err.is_parse_status() || err.is_user() {
return Self::new(ErrorKind::Internal, err);
}
if err.is_canceled() {
return Self::new(ErrorKind::Cancelled, err);
}
if err.is_incomplete_message() {
return Self::new(ErrorKind::DataLoss, err);
}
if err.is_body_write_aborted() || err.is_closed() {
return Self::new(ErrorKind::Aborted, err);
}
if err.is_timeout() {
return Self::new(ErrorKind::DeadlineExceeded, err);
}
Self::new(ErrorKind::Unknown, err)
}
}
impl From<hyper_util::client::legacy::Error> for Error {
fn from(err: hyper_util::client::legacy::Error) -> Self {
if err.is_connect() {
return Self::new(ErrorKind::Unavailable, err);
}
Self::new(ErrorKind::Unknown, err)
}
}
impl From<time::error::Parse> for Error {
fn from(err: time::error::Parse) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<quick_xml::Error> for Error {
fn from(err: quick_xml::Error) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<serde_json::Error> for Error {
fn from(err: serde_json::Error) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
use std::io::ErrorKind as IoErrorKind;
match err.kind() {
IoErrorKind::NotFound => Self::new(ErrorKind::NotFound, err),
IoErrorKind::PermissionDenied => Self::new(ErrorKind::PermissionDenied, err),
IoErrorKind::AddrInUse | IoErrorKind::AlreadyExists => {
Self::new(ErrorKind::AlreadyExists, err)
}
IoErrorKind::AddrNotAvailable
| IoErrorKind::ConnectionRefused
| IoErrorKind::NotConnected => Self::new(ErrorKind::Unavailable, err),
IoErrorKind::BrokenPipe
| IoErrorKind::ConnectionReset
| IoErrorKind::ConnectionAborted => Self::new(ErrorKind::Aborted, err),
IoErrorKind::Interrupted | IoErrorKind::WouldBlock => {
Self::new(ErrorKind::Cancelled, err)
}
IoErrorKind::InvalidData | IoErrorKind::UnexpectedEof => {
Self::new(ErrorKind::FailedPrecondition, err)
}
IoErrorKind::TimedOut => Self::new(ErrorKind::DeadlineExceeded, err),
IoErrorKind::InvalidInput => Self::new(ErrorKind::InvalidArgument, err),
IoErrorKind::WriteZero => Self::new(ErrorKind::ResourceExhausted, err),
_ => Self::new(ErrorKind::Unknown, err),
}
}
}
impl From<FromUtf8Error> for Error {
fn from(err: FromUtf8Error) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<InvalidHeaderValue> for Error {
fn from(err: InvalidHeaderValue) -> Self {
Self::new(ErrorKind::InvalidArgument, err)
}
}
impl From<InvalidUri> for Error {
fn from(err: InvalidUri) -> Self {
Self::new(ErrorKind::InvalidArgument, err)
}
}
impl From<ParseError> for Error {
fn from(err: ParseError) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<ParseIntError> for Error {
fn from(err: ParseIntError) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<TryFromIntError> for Error {
fn from(err: TryFromIntError) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<ProtobufError> for Error {
fn from(err: ProtobufError) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<RecvError> for Error {
fn from(err: RecvError) -> Self {
Self::new(ErrorKind::Internal, err)
}
}
impl<T> From<SendError<T>> for Error {
fn from(err: SendError<T>) -> Self {
Self {
kind: ErrorKind::Internal,
error: ErrorMessage(err.to_string()).into(),
}
}
}
impl From<AcquireError> for Error {
fn from(err: AcquireError) -> Self {
Self {
kind: ErrorKind::ResourceExhausted,
error: ErrorMessage(err.to_string()).into(),
}
}
}
impl From<TryAcquireError> for Error {
fn from(err: TryAcquireError) -> Self {
Self {
kind: ErrorKind::ResourceExhausted,
error: ErrorMessage(err.to_string()).into(),
}
}
}
impl From<ToStrError> for Error {
fn from(err: ToStrError) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<Utf8Error> for Error {
fn from(err: Utf8Error) -> Self {
Self::new(ErrorKind::FailedPrecondition, err)
}
}
impl From<protobuf_json_mapping::ParseError> for Error {
fn from(err: protobuf_json_mapping::ParseError) -> Self {
Self::failed_precondition(err)
}
}

View file

@ -1,62 +0,0 @@
use std::fmt;
use librespot_protocol as protocol;
use crate::{Error, spotify_id::to_base16};
const RAW_LEN: usize = 20;
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct FileId(pub [u8; RAW_LEN]);
impl FileId {
pub fn from_raw(src: &[u8]) -> FileId {
let mut dst = [0u8; RAW_LEN];
let len = src.len();
// some tracks return 16 instead of 20 bytes: #1188
if len <= RAW_LEN {
dst[..len].clone_from_slice(src);
}
FileId(dst)
}
#[allow(clippy::wrong_self_convention)]
pub fn to_base16(&self) -> Result<String, Error> {
to_base16(&self.0, &mut [0u8; 40])
}
}
impl fmt::Debug for FileId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("FileId").field(&self.to_base16()).finish()
}
}
impl fmt::Display for FileId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.to_base16().unwrap_or_default())
}
}
impl From<&[u8]> for FileId {
fn from(src: &[u8]) -> Self {
Self::from_raw(src)
}
}
impl From<&protocol::metadata::Image> for FileId {
fn from(image: &protocol::metadata::Image) -> Self {
Self::from(image.file_id())
}
}
impl From<&protocol::metadata::AudioFile> for FileId {
fn from(file: &protocol::metadata::AudioFile) -> Self {
Self::from(file.file_id())
}
}
impl From<&protocol::metadata::VideoFile> for FileId {
fn from(video: &protocol::metadata::VideoFile) -> Self {
Self::from(video.file_id())
}
}

View file

@ -1,304 +0,0 @@
use std::{
sync::OnceLock,
time::{Duration, Instant},
};
use bytes::Bytes;
use futures_util::{FutureExt, future::IntoStream};
use governor::{
Quota, RateLimiter, clock::MonotonicClock, middleware::NoOpMiddleware,
state::keyed::DefaultKeyedStateStore,
};
use http::{Uri, header::HeaderValue};
use http_body_util::{BodyExt, Full};
use hyper::{HeaderMap, Request, Response, StatusCode, body::Incoming, header::USER_AGENT};
use hyper_proxy2::{Intercept, Proxy, ProxyConnector};
use hyper_util::{
client::legacy::{Client, ResponseFuture, connect::HttpConnector},
rt::TokioExecutor,
};
use nonzero_ext::nonzero;
use thiserror::Error;
use url::Url;
#[cfg(all(feature = "__rustls", not(feature = "native-tls")))]
use hyper_rustls::{HttpsConnector, HttpsConnectorBuilder};
#[cfg(all(feature = "native-tls", not(feature = "__rustls")))]
use hyper_tls::HttpsConnector;
use crate::{
Error,
config::{OS, os_version},
date::Date,
version::{FALLBACK_USER_AGENT, VERSION_STRING, spotify_version},
};
// The 30 seconds interval is documented by Spotify, but the calls per interval
// is a guesstimate and probably subject to licensing (purchasing extra calls)
// and may change at any time.
pub const RATE_LIMIT_INTERVAL: Duration = Duration::from_secs(30);
pub const RATE_LIMIT_MAX_WAIT: Duration = Duration::from_secs(10);
pub const RATE_LIMIT_CALLS_PER_INTERVAL: u32 = 300;
#[derive(Debug, Error)]
pub enum HttpClientError {
#[error("Response status code: {0}")]
StatusCode(hyper::StatusCode),
}
impl From<HttpClientError> for Error {
fn from(err: HttpClientError) -> Self {
match err {
HttpClientError::StatusCode(code) => {
// not exhaustive, but what reasonably could be expected
match code {
StatusCode::GATEWAY_TIMEOUT | StatusCode::REQUEST_TIMEOUT => {
Error::deadline_exceeded(err)
}
StatusCode::GONE
| StatusCode::NOT_FOUND
| StatusCode::MOVED_PERMANENTLY
| StatusCode::PERMANENT_REDIRECT
| StatusCode::TEMPORARY_REDIRECT => Error::not_found(err),
StatusCode::FORBIDDEN | StatusCode::PAYMENT_REQUIRED => {
Error::permission_denied(err)
}
StatusCode::NETWORK_AUTHENTICATION_REQUIRED
| StatusCode::PROXY_AUTHENTICATION_REQUIRED
| StatusCode::UNAUTHORIZED => Error::unauthenticated(err),
StatusCode::EXPECTATION_FAILED
| StatusCode::PRECONDITION_FAILED
| StatusCode::PRECONDITION_REQUIRED => Error::failed_precondition(err),
StatusCode::RANGE_NOT_SATISFIABLE => Error::out_of_range(err),
StatusCode::INTERNAL_SERVER_ERROR
| StatusCode::MISDIRECTED_REQUEST
| StatusCode::SERVICE_UNAVAILABLE
| StatusCode::UNAVAILABLE_FOR_LEGAL_REASONS => Error::unavailable(err),
StatusCode::BAD_REQUEST
| StatusCode::HTTP_VERSION_NOT_SUPPORTED
| StatusCode::LENGTH_REQUIRED
| StatusCode::METHOD_NOT_ALLOWED
| StatusCode::NOT_ACCEPTABLE
| StatusCode::PAYLOAD_TOO_LARGE
| StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE
| StatusCode::UNSUPPORTED_MEDIA_TYPE
| StatusCode::URI_TOO_LONG => Error::invalid_argument(err),
StatusCode::TOO_MANY_REQUESTS => Error::resource_exhausted(err),
StatusCode::NOT_IMPLEMENTED => Error::unimplemented(err),
_ => Error::unknown(err),
}
}
}
}
}
type HyperClient = Client<ProxyConnector<HttpsConnector<HttpConnector>>, Full<bytes::Bytes>>;
pub struct HttpClient {
user_agent: HeaderValue,
proxy_url: Option<Url>,
hyper_client: OnceLock<HyperClient>,
rate_limiter:
RateLimiter<String, DefaultKeyedStateStore<String>, MonotonicClock, NoOpMiddleware>,
}
impl HttpClient {
pub fn new(proxy_url: Option<&Url>) -> Self {
let zero_str = String::from("0");
let os_version = os_version();
let (spotify_platform, os_version) = match OS {
"android" => ("Android", os_version),
"ios" => ("iOS", os_version),
"macos" => ("OSX", zero_str),
"windows" => ("Win32", zero_str),
_ => ("Linux", zero_str),
};
let user_agent_str = &format!(
"Spotify/{} {}/{} ({})",
spotify_version(),
spotify_platform,
os_version,
VERSION_STRING
);
let user_agent = HeaderValue::from_str(user_agent_str).unwrap_or_else(|err| {
error!("Invalid user agent <{user_agent_str}>: {err}");
HeaderValue::from_static(FALLBACK_USER_AGENT)
});
let replenish_interval_ns =
RATE_LIMIT_INTERVAL.as_nanos() / RATE_LIMIT_CALLS_PER_INTERVAL as u128;
let quota = Quota::with_period(Duration::from_nanos(replenish_interval_ns as u64))
.expect("replenish interval should be valid")
.allow_burst(nonzero![RATE_LIMIT_CALLS_PER_INTERVAL]);
let rate_limiter = RateLimiter::keyed(quota);
Self {
user_agent,
proxy_url: proxy_url.cloned(),
hyper_client: OnceLock::new(),
rate_limiter,
}
}
fn try_create_hyper_client(proxy_url: Option<&Url>) -> Result<HyperClient, Error> {
// configuring TLS is expensive and should be done once per process
#[cfg(all(feature = "__rustls", not(feature = "native-tls")))]
let https_connector = {
#[cfg(feature = "rustls-tls-native-roots")]
let tls = HttpsConnectorBuilder::new().with_native_roots()?;
#[cfg(feature = "rustls-tls-webpki-roots")]
let tls = HttpsConnectorBuilder::new().with_webpki_roots();
tls.https_or_http().enable_http1().enable_http2().build()
};
#[cfg(all(feature = "native-tls", not(feature = "__rustls")))]
let https_connector = HttpsConnector::new();
// When not using a proxy a dummy proxy is configured that will not intercept any traffic.
// This prevents needing to carry the Client Connector generics through the whole project
let proxy = match &proxy_url {
Some(proxy_url) => Proxy::new(Intercept::All, proxy_url.to_string().parse()?),
None => Proxy::new(Intercept::None, Uri::from_static("0.0.0.0")),
};
let proxy_connector = ProxyConnector::from_proxy(https_connector, proxy)?;
let client = Client::builder(TokioExecutor::new())
.http2_adaptive_window(true)
.build(proxy_connector);
Ok(client)
}
fn hyper_client(&self) -> &HyperClient {
self.hyper_client
.get_or_init(|| Self::try_create_hyper_client(self.proxy_url.as_ref()).unwrap())
}
pub async fn request(&self, req: Request<Bytes>) -> Result<Response<Incoming>, Error> {
debug!("Requesting {}", req.uri());
// `Request` does not implement `Clone` because its `Body` may be a single-shot stream.
// As correct as that may be technically, we now need all this boilerplate to clone it
// ourselves, as any `Request` is moved in the loop.
let (parts, body_as_bytes) = req.into_parts();
loop {
let mut req = Request::builder()
.method(parts.method.clone())
.uri(parts.uri.clone())
.version(parts.version)
.body(body_as_bytes.clone())?;
*req.headers_mut() = parts.headers.clone();
let request = self.request_fut(req)?;
let response = request.await;
if let Ok(response) = &response {
let code = response.status();
if code == StatusCode::TOO_MANY_REQUESTS {
if let Some(duration) = Self::get_retry_after(response.headers()) {
warn!(
"Rate limited by service, retrying in {} seconds...",
duration.as_secs()
);
tokio::time::sleep(duration).await;
continue;
}
}
if !code.is_success() {
return Err(HttpClientError::StatusCode(code).into());
}
}
let response = response?;
return Ok(response);
}
}
pub async fn request_body(&self, req: Request<Bytes>) -> Result<Bytes, Error> {
let response = self.request(req).await?;
Ok(response.into_body().collect().await?.to_bytes())
}
pub fn request_stream(&self, req: Request<Bytes>) -> Result<IntoStream<ResponseFuture>, Error> {
Ok(self.request_fut(req)?.into_stream())
}
pub fn request_fut(&self, mut req: Request<Bytes>) -> Result<ResponseFuture, Error> {
let headers_mut = req.headers_mut();
headers_mut.insert(USER_AGENT, self.user_agent.clone());
// For rate limiting we cannot *just* depend on Spotify sending us HTTP/429
// Retry-After headers. For example, when there is a service interruption
// and HTTP/500 is returned, we don't want to DoS the Spotify infrastructure.
let domain = match req.uri().host() {
Some(host) => {
// strip the prefix from *.domain.tld (assume rate limit is per domain, not subdomain)
let mut parts = host
.split('.')
.map(|s| s.to_string())
.collect::<Vec<String>>();
let n = parts.len().saturating_sub(2);
parts.drain(n..).collect()
}
None => String::from(""),
};
self.rate_limiter.check_key(&domain).map_err(|e| {
Error::resource_exhausted(format!(
"rate limited for at least another {} seconds",
e.wait_time_from(Instant::now()).as_secs()
))
})?;
Ok(self.hyper_client().request(req.map(Full::new)))
}
pub fn get_retry_after(headers: &HeaderMap<HeaderValue>) -> Option<Duration> {
let now = Date::now_utc().as_timestamp_ms();
let mut retry_after_ms = None;
if let Some(header_val) = headers.get("X-RateLimit-Next") {
// *.akamaized.net (Akamai)
if let Ok(date_str) = header_val.to_str() {
if let Ok(target) = Date::from_iso8601(date_str) {
retry_after_ms = Some(target.as_timestamp_ms().saturating_sub(now))
}
}
} else if let Some(header_val) = headers.get("Fastly-RateLimit-Reset") {
// *.scdn.co (Fastly)
if let Ok(timestamp) = header_val.to_str() {
if let Ok(target) = timestamp.parse::<i64>() {
retry_after_ms = Some(target.saturating_sub(now))
}
}
} else if let Some(header_val) = headers.get("Retry-After") {
// Generic RFC compliant (including *.spotify.com)
if let Ok(retry_after) = header_val.to_str() {
if let Ok(duration) = retry_after.parse::<i64>() {
retry_after_ms = Some(duration * 1000)
}
}
}
if let Some(retry_after) = retry_after_ms {
let duration = Duration::from_millis(retry_after as u64);
if duration <= RATE_LIMIT_MAX_WAIT {
return Some(duration);
} else {
debug!(
"Waiting {} seconds would exceed {} second limit",
duration.as_secs(),
RATE_LIMIT_MAX_WAIT.as_secs()
);
}
}
None
}
}

26
core/src/keymaster.rs Normal file
View file

@ -0,0 +1,26 @@
use serde::Deserialize;
use crate::{mercury::MercuryError, session::Session};
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Token {
pub access_token: String,
pub expires_in: u32,
pub token_type: String,
pub scope: Vec<String>,
}
pub async fn get_token(
session: &Session,
client_id: &str,
scopes: &str,
) -> Result<Token, MercuryError> {
let url = format!(
"hm://keymaster/token/authenticated?client_id={}&scope={}",
client_id, scopes
);
let response = session.mercury().get(url).await?;
let data = response.payload.first().expect("Empty payload");
serde_json::from_slice(data.as_ref()).map_err(|_| MercuryError)
}

View file

@ -1,3 +1,5 @@
#![allow(clippy::unused_io_amount)]
#[macro_use]
extern crate log;
@ -6,41 +8,20 @@ use librespot_protocol as protocol;
#[macro_use]
mod component;
pub mod apresolve;
mod apresolve;
pub mod audio_key;
pub mod authentication;
pub mod cache;
pub mod cdn_url;
pub mod channel;
pub mod config;
mod connection;
pub mod date;
#[allow(dead_code)]
pub mod dealer;
pub mod deserialize_with;
#[doc(hidden)]
pub mod diffie_hellman;
pub mod error;
pub mod file_id;
pub mod http_client;
pub mod login5;
pub mod keymaster;
pub mod mercury;
pub mod packet;
mod proxytunnel;
pub mod session;
mod socket;
#[allow(dead_code)]
pub mod spclient;
pub mod spotify_id;
pub mod spotify_uri;
pub mod token;
#[doc(hidden)]
pub mod util;
pub mod version;
pub use config::SessionConfig;
pub use error::Error;
pub use file_id::FileId;
pub use session::Session;
pub use spotify_id::SpotifyId;
pub use spotify_uri::SpotifyUri;

View file

@ -1,270 +0,0 @@
use crate::config::OS;
use crate::spclient::CLIENT_TOKEN;
use crate::token::Token;
use crate::{Error, SessionConfig, util};
use bytes::Bytes;
use http::{HeaderValue, Method, Request, header::ACCEPT};
use librespot_protocol::login5::login_response::Response;
use librespot_protocol::{
client_info::ClientInfo,
credentials::{Password, StoredCredential},
hashcash::HashcashSolution,
login5::{
ChallengeSolution, LoginError, LoginOk, LoginRequest, LoginResponse,
login_request::Login_method,
},
};
use protobuf::well_known_types::duration::Duration as ProtoDuration;
use protobuf::{Message, MessageField};
use std::time::{Duration, Instant};
use thiserror::Error;
use tokio::time::sleep;
const MAX_LOGIN_TRIES: u8 = 3;
const LOGIN_TIMEOUT: Duration = Duration::from_secs(3);
component! {
Login5Manager : Login5ManagerInner {
auth_token: Option<Token> = None,
}
}
#[derive(Debug, Error)]
enum Login5Error {
#[error("Login request was denied: {0:?}")]
FaultyRequest(LoginError),
#[error("Code challenge is not supported")]
CodeChallenge,
#[error("Tried to acquire token without stored credentials")]
NoStoredCredentials,
#[error("Couldn't successfully authenticate after {0} times")]
RetriesFailed(u8),
#[error("Login via login5 is only allowed for android or ios")]
OnlyForMobile,
}
impl From<Login5Error> for Error {
fn from(err: Login5Error) -> Self {
match err {
Login5Error::NoStoredCredentials | Login5Error::OnlyForMobile => {
Error::unavailable(err)
}
Login5Error::RetriesFailed(_) | Login5Error::FaultyRequest(_) => {
Error::failed_precondition(err)
}
Login5Error::CodeChallenge => Error::unimplemented(err),
}
}
}
impl Login5Manager {
async fn request(&self, message: &LoginRequest) -> Result<Bytes, Error> {
let client_token = self.session().spclient().client_token().await?;
let body = message.write_to_bytes()?;
let request = Request::builder()
.method(&Method::POST)
.uri("https://login5.spotify.com/v3/login")
.header(ACCEPT, HeaderValue::from_static("application/x-protobuf"))
.header(CLIENT_TOKEN, HeaderValue::from_str(&client_token)?)
.body(body.into())?;
self.session().http_client().request_body(request).await
}
async fn login5_request(&self, login: Login_method) -> Result<LoginOk, Error> {
let client_id = match OS {
"macos" | "windows" => self.session().client_id(),
// StoredCredential is used to get an access_token from Session credentials.
// Using the session client_id allows user to use Keymaster on Android/IOS
// if their Credentials::with_access_token was obtained there, assuming
// they have overriden the SessionConfig::client_id with the Keymaster's.
_ if matches!(login, Login_method::StoredCredential(_)) => self.session().client_id(),
_ => SessionConfig::default().client_id,
};
let mut login_request = LoginRequest {
client_info: MessageField::some(ClientInfo {
client_id,
device_id: self.session().device_id().to_string(),
special_fields: Default::default(),
}),
login_method: Some(login),
..Default::default()
};
let mut response = self.request(&login_request).await?;
let mut count = 0;
loop {
count += 1;
let message = LoginResponse::parse_from_bytes(&response)?;
if let Some(Response::Ok(ok)) = message.response {
break Ok(ok);
}
if message.has_error() {
match message.error() {
LoginError::TIMEOUT | LoginError::TOO_MANY_ATTEMPTS => {
sleep(LOGIN_TIMEOUT).await
}
others => return Err(Login5Error::FaultyRequest(others).into()),
}
}
if message.has_challenges() {
// handles the challenges, and updates the login context with the response
Self::handle_challenges(&mut login_request, message)?;
}
if count < MAX_LOGIN_TRIES {
response = self.request(&login_request).await?;
} else {
return Err(Login5Error::RetriesFailed(MAX_LOGIN_TRIES).into());
}
}
}
/// Login for android and ios
///
/// This request doesn't require a connected session as it is the entrypoint for android or ios
///
/// This request will only work when:
/// - client_id => android or ios | can be easily adjusted in [SessionConfig::default_for_os]
/// - user-agent => android or ios | has to be adjusted in [HttpClient::new](crate::http_client::HttpClient::new)
pub async fn login(
&self,
id: impl Into<String>,
password: impl Into<String>,
) -> Result<(Token, Vec<u8>), Error> {
if !matches!(OS, "android" | "ios") {
// by manipulating the user-agent and client-id it can be also used/tested on desktop
return Err(Login5Error::OnlyForMobile.into());
}
let method = Login_method::Password(Password {
id: id.into(),
password: password.into(),
..Default::default()
});
let token_response = self.login5_request(method).await?;
let auth_token = Self::token_from_login(
token_response.access_token,
token_response.access_token_expires_in,
);
Ok((auth_token, token_response.stored_credential))
}
/// Retrieve the access_token via login5
///
/// This request will only work when the store credentials match the client-id. Meaning that
/// stored credentials generated with the keymaster client-id will not work, for example, with
/// the android client-id.
pub async fn auth_token(&self) -> Result<Token, Error> {
let auth_data = self.session().auth_data();
if auth_data.is_empty() {
return Err(Login5Error::NoStoredCredentials.into());
}
let auth_token = self.lock(|inner| {
if let Some(token) = &inner.auth_token {
if token.is_expired() {
inner.auth_token = None;
}
}
inner.auth_token.clone()
});
if let Some(auth_token) = auth_token {
return Ok(auth_token);
}
let method = Login_method::StoredCredential(StoredCredential {
username: self.session().username().to_string(),
data: auth_data,
..Default::default()
});
let token_response = self.login5_request(method).await?;
let auth_token = Self::token_from_login(
token_response.access_token,
token_response.access_token_expires_in,
);
let token = self.lock(|inner| {
inner.auth_token = Some(auth_token.clone());
inner.auth_token.clone()
});
trace!("Got auth token: {auth_token:?}");
token.ok_or(Login5Error::NoStoredCredentials.into())
}
fn handle_challenges(
login_request: &mut LoginRequest,
message: LoginResponse,
) -> Result<(), Error> {
let challenges = message.challenges();
debug!(
"Received {} challenges, solving...",
challenges.challenges.len()
);
for challenge in &challenges.challenges {
if challenge.has_code() {
return Err(Login5Error::CodeChallenge.into());
} else if !challenge.has_hashcash() {
debug!("Challenge was empty, skipping...");
continue;
}
let hash_cash_challenge = challenge.hashcash();
let mut suffix = [0u8; 0x10];
let duration = util::solve_hash_cash(
&message.login_context,
&hash_cash_challenge.prefix,
hash_cash_challenge.length,
&mut suffix,
)?;
let (seconds, nanos) = (duration.as_secs() as i64, duration.subsec_nanos() as i32);
debug!("Solving hashcash took {seconds}s {nanos}ns");
let mut solution = ChallengeSolution::new();
solution.set_hashcash(HashcashSolution {
suffix: Vec::from(suffix),
duration: MessageField::some(ProtoDuration {
seconds,
nanos,
..Default::default()
}),
..Default::default()
});
login_request
.challenge_solutions
.mut_or_insert_default()
.solutions
.push(solution);
}
login_request.login_context = message.login_context;
Ok(())
}
fn token_from_login(token: String, expires_in: i32) -> Token {
Token {
access_token: token,
expires_in: Duration::from_secs(expires_in.try_into().unwrap_or(3600)),
token_type: "Bearer".to_string(),
scopes: vec![],
timestamp: Instant::now(),
}
}
}

View file

@ -1,9 +1,9 @@
use std::{
collections::HashMap,
future::Future,
pin::Pin,
task::{Context, Poll},
};
use std::collections::HashMap;
use std::future::Future;
use std::mem;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
use byteorder::{BigEndian, ByteOrder};
use bytes::Bytes;
@ -11,7 +11,8 @@ use futures_util::FutureExt;
use protobuf::Message;
use tokio::sync::{mpsc, oneshot};
use crate::{Error, packet::PacketType, protocol, util::SeqGenerator};
use crate::protocol;
use crate::util::SeqGenerator;
mod types;
pub use self::types::*;
@ -31,18 +32,18 @@ component! {
pub struct MercuryPending {
parts: Vec<Vec<u8>>,
partial: Option<Vec<u8>>,
callback: Option<oneshot::Sender<Result<MercuryResponse, Error>>>,
callback: Option<oneshot::Sender<Result<MercuryResponse, MercuryError>>>,
}
pub struct MercuryFuture<T> {
receiver: oneshot::Receiver<Result<T, Error>>,
receiver: oneshot::Receiver<Result<T, MercuryError>>,
}
impl<T> Future for MercuryFuture<T> {
type Output = Result<T, Error>;
type Output = Result<T, MercuryError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.receiver.poll_unpin(cx)?
self.receiver.poll_unpin(cx).map_err(|_| MercuryError)?
}
}
@ -53,7 +54,7 @@ impl MercuryManager {
seq
}
fn request(&self, req: MercuryRequest) -> Result<MercuryFuture<MercuryResponse>, Error> {
fn request(&self, req: MercuryRequest) -> MercuryFuture<MercuryResponse> {
let (tx, rx) = oneshot::channel();
let pending = MercuryPending {
@ -70,13 +71,13 @@ impl MercuryManager {
});
let cmd = req.method.command();
let data = req.encode(&seq)?;
let data = req.encode(&seq);
self.session().send_packet(cmd, data)?;
Ok(MercuryFuture { receiver: rx })
self.session().send_packet(cmd, data);
MercuryFuture { receiver: rx }
}
pub fn get<T: Into<String>>(&self, uri: T) -> Result<MercuryFuture<MercuryResponse>, Error> {
pub fn get<T: Into<String>>(&self, uri: T) -> MercuryFuture<MercuryResponse> {
self.request(MercuryRequest {
method: MercuryMethod::Get,
uri: uri.into(),
@ -85,11 +86,7 @@ impl MercuryManager {
})
}
pub fn send<T: Into<String>>(
&self,
uri: T,
data: Vec<u8>,
) -> Result<MercuryFuture<MercuryResponse>, Error> {
pub fn send<T: Into<String>>(&self, uri: T, data: Vec<u8>) -> MercuryFuture<MercuryResponse> {
self.request(MercuryRequest {
method: MercuryMethod::Send,
uri: uri.into(),
@ -105,7 +102,7 @@ impl MercuryManager {
pub fn subscribe<T: Into<String>>(
&self,
uri: T,
) -> impl Future<Output = Result<mpsc::UnboundedReceiver<MercuryResponse>, Error>> + 'static
) -> impl Future<Output = Result<mpsc::UnboundedReceiver<MercuryResponse>, MercuryError>> + 'static
{
let uri = uri.into();
let request = self.request(MercuryRequest {
@ -117,7 +114,7 @@ impl MercuryManager {
let manager = self.clone();
async move {
let response = request?.await?;
let response = request.await?;
let (tx, rx) = mpsc::unbounded_channel();
@ -127,18 +124,13 @@ impl MercuryManager {
if !response.payload.is_empty() {
// Old subscription protocol, watch the provided list of URIs
for sub in response.payload {
match protocol::pubsub::Subscription::parse_from_bytes(&sub) {
Ok(mut sub) => {
let sub_uri = sub.take_uri();
let mut sub =
protocol::pubsub::Subscription::parse_from_bytes(&sub).unwrap();
let sub_uri = sub.take_uri();
debug!("subscribed sub_uri={sub_uri}");
debug!("subscribed sub_uri={}", sub_uri);
inner.subscriptions.push((sub_uri, tx.clone()));
}
Err(e) => {
error!("could not subscribe to {uri}: {e}");
}
}
inner.subscriptions.push((sub_uri, tx.clone()));
}
} else {
// New subscription protocol, watch the requested URI
@ -151,28 +143,7 @@ impl MercuryManager {
}
}
pub fn listen_for<T: Into<String>>(
&self,
uri: T,
) -> impl Future<Output = mpsc::UnboundedReceiver<MercuryResponse>> + 'static {
let uri = uri.into();
let manager = self.clone();
async move {
let (tx, rx) = mpsc::unbounded_channel();
manager.lock(move |inner| {
if !inner.invalid {
debug!("listening to uri={uri}");
inner.subscriptions.push((uri, tx));
}
});
rx
}
}
pub(crate) fn dispatch(&self, cmd: PacketType, mut data: Bytes) -> Result<(), Error> {
pub(crate) fn dispatch(&self, cmd: u8, mut data: Bytes) {
let seq_len = BigEndian::read_u16(data.split_to(2).as_ref()) as usize;
let seq = data.split_to(seq_len).as_ref().to_owned();
@ -183,23 +154,20 @@ impl MercuryManager {
let mut pending = match pending {
Some(pending) => pending,
None if cmd == 0xb5 => MercuryPending {
parts: Vec::new(),
partial: None,
callback: None,
},
None => {
if let PacketType::MercuryEvent = cmd {
MercuryPending {
parts: Vec::new(),
partial: None,
callback: None,
}
} else {
warn!("Ignore seq {:?} cmd {:x}", seq, cmd as u8);
return Err(MercuryError::Command(cmd).into());
}
warn!("Ignore seq {:?} cmd {:x}", seq, cmd);
return;
}
};
for i in 0..count {
let mut part = Self::parse_part(&mut data);
if let Some(mut partial) = pending.partial.take() {
if let Some(mut partial) = mem::replace(&mut pending.partial, None) {
partial.extend_from_slice(&part);
part = partial;
}
@ -212,12 +180,10 @@ impl MercuryManager {
}
if flags == 0x1 {
self.complete_request(cmd, pending)?;
self.complete_request(cmd, pending);
} else {
self.lock(move |inner| inner.pending.insert(seq, pending));
}
Ok(())
}
fn parse_part(data: &mut Bytes) -> Vec<u8> {
@ -225,44 +191,40 @@ impl MercuryManager {
data.split_to(size).as_ref().to_owned()
}
fn complete_request(&self, cmd: PacketType, mut pending: MercuryPending) -> Result<(), Error> {
fn complete_request(&self, cmd: u8, mut pending: MercuryPending) {
let header_data = pending.parts.remove(0);
let header = protocol::mercury::Header::parse_from_bytes(&header_data)?;
let header = protocol::mercury::Header::parse_from_bytes(&header_data).unwrap();
let response = MercuryResponse {
uri: header.uri().to_string(),
status_code: header.status_code(),
uri: header.get_uri().to_string(),
status_code: header.get_status_code(),
payload: pending.parts,
};
let status_code = response.status_code;
if status_code >= 500 {
error!("error {} for uri {}", status_code, &response.uri);
Err(MercuryError::Response(response).into())
} else if status_code >= 400 {
error!("error {} for uri {}", status_code, &response.uri);
if response.status_code >= 500 {
panic!("Spotify servers returned an error. Restart librespot.");
} else if response.status_code >= 400 {
warn!("error {} for uri {}", response.status_code, &response.uri);
if let Some(cb) = pending.callback {
cb.send(Err(MercuryError::Response(response.clone()).into()))
.map_err(|_| MercuryError::Channel)?;
let _ = cb.send(Err(MercuryError));
}
Err(MercuryError::Response(response).into())
} else if let PacketType::MercuryEvent = cmd {
// TODO: This is just a workaround to make utf-8 encoded usernames work.
// A better solution would be to use an uri struct and urlencode it directly
// before sending while saving the subscription under its unencoded form.
let mut uri_split = response.uri.split('/');
let encoded_uri = std::iter::once(uri_split.next().unwrap_or_default().to_string())
.chain(uri_split.map(|component| {
form_urlencoded::byte_serialize(component.as_bytes()).collect::<String>()
}))
.collect::<Vec<String>>()
.join("/");
let mut found = false;
} else if cmd == 0xb5 {
self.lock(|inner| {
inner.subscriptions.retain(|(prefix, sub)| {
let mut found = false;
// TODO: This is just a workaround to make utf-8 encoded usernames work.
// A better solution would be to use an uri struct and urlencode it directly
// before sending while saving the subscription under its unencoded form.
let mut uri_split = response.uri.split('/');
let encoded_uri = std::iter::once(uri_split.next().unwrap().to_string())
.chain(uri_split.map(|component| {
form_urlencoded::byte_serialize(component.as_bytes()).collect::<String>()
}))
.collect::<Vec<String>>()
.join("/");
inner.subscriptions.retain(|&(ref prefix, ref sub)| {
if encoded_uri.starts_with(prefix) {
found = true;
@ -274,24 +236,13 @@ impl MercuryManager {
true
}
});
});
if found {
Ok(())
} else if self.session().dealer().handles(&response.uri) {
trace!("mercury response <{}> is handled by dealer", response.uri);
Ok(())
} else {
debug!("unknown subscription uri={}", &response.uri);
trace!("response pushed over Mercury: {response:?}");
Err(MercuryError::Response(response).into())
}
if !found {
debug!("unknown subscription uri={}", response.uri);
}
})
} else if let Some(cb) = pending.callback {
cb.send(Ok(response)).map_err(|_| MercuryError::Channel)?;
Ok(())
} else {
error!("can't handle Mercury response: {response:?}");
Err(MercuryError::Response(response).into())
let _ = cb.send(Ok(response));
}
}

View file

@ -1,8 +1,6 @@
use std::collections::VecDeque;
use super::{MercuryFuture, MercuryManager, MercuryResponse};
use crate::Error;
use super::*;
pub struct MercurySender {
mercury: MercuryManager,
@ -25,13 +23,12 @@ impl MercurySender {
self.buffered_future.is_none() && self.pending.is_empty()
}
pub fn send(&mut self, item: Vec<u8>) -> Result<(), Error> {
let task = self.mercury.send(self.uri.clone(), item)?;
pub fn send(&mut self, item: Vec<u8>) {
let task = self.mercury.send(self.uri.clone(), item);
self.pending.push_back(task);
Ok(())
}
pub async fn flush(&mut self) -> Result<(), Error> {
pub async fn flush(&mut self) -> Result<(), MercuryError> {
if self.buffered_future.is_none() {
self.buffered_future = self.pending.pop_front();
}

View file

@ -1,10 +1,8 @@
use std::io::Write;
use byteorder::{BigEndian, WriteBytesExt};
use protobuf::Message;
use thiserror::Error;
use std::io::Write;
use crate::{Error, packet::PacketType, protocol};
use crate::protocol;
#[derive(Debug, PartialEq, Eq)]
pub enum MercuryMethod {
@ -29,56 +27,40 @@ pub struct MercuryResponse {
pub payload: Vec<Vec<u8>>,
}
#[derive(Debug, Error)]
pub enum MercuryError {
#[error("callback receiver was disconnected")]
Channel,
#[error("error handling packet type: {0:?}")]
Command(PacketType),
#[error("error handling Mercury response: {0:?}")]
Response(MercuryResponse),
}
#[derive(Debug, Hash, PartialEq, Eq, Copy, Clone)]
pub struct MercuryError;
impl From<MercuryError> for Error {
fn from(err: MercuryError) -> Self {
match err {
MercuryError::Channel => Error::aborted(err),
MercuryError::Command(_) => Error::unimplemented(err),
MercuryError::Response(_) => Error::unavailable(err),
}
}
}
impl std::fmt::Display for MercuryMethod {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match *self {
impl ToString for MercuryMethod {
fn to_string(&self) -> String {
match *self {
MercuryMethod::Get => "GET",
MercuryMethod::Sub => "SUB",
MercuryMethod::Unsub => "UNSUB",
MercuryMethod::Send => "SEND",
};
write!(f, "{s}")
}
.to_owned()
}
}
impl MercuryMethod {
pub fn command(&self) -> PacketType {
use PacketType::*;
pub fn command(&self) -> u8 {
match *self {
MercuryMethod::Get | MercuryMethod::Send => MercuryReq,
MercuryMethod::Sub => MercurySub,
MercuryMethod::Unsub => MercuryUnsub,
MercuryMethod::Get | MercuryMethod::Send => 0xb2,
MercuryMethod::Sub => 0xb3,
MercuryMethod::Unsub => 0xb4,
}
}
}
impl MercuryRequest {
pub fn encode(&self, seq: &[u8]) -> Result<Vec<u8>, Error> {
pub fn encode(&self, seq: &[u8]) -> Vec<u8> {
let mut packet = Vec::new();
packet.write_u16::<BigEndian>(seq.len() as u16)?;
packet.write_all(seq)?;
packet.write_u8(1)?; // Flags: FINAL
packet.write_u16::<BigEndian>(1 + self.payload.len() as u16)?; // Part count
packet.write_u16::<BigEndian>(seq.len() as u16).unwrap();
packet.write_all(seq).unwrap();
packet.write_u8(1).unwrap(); // Flags: FINAL
packet
.write_u16::<BigEndian>(1 + self.payload.len() as u16)
.unwrap(); // Part count
let mut header = protocol::mercury::Header::new();
header.set_uri(self.uri.clone());
@ -88,14 +70,16 @@ impl MercuryRequest {
header.set_content_type(content_type.clone());
}
packet.write_u16::<BigEndian>(header.compute_size() as u16)?;
header.write_to_writer(&mut packet)?;
packet
.write_u16::<BigEndian>(header.compute_size() as u16)
.unwrap();
header.write_to_writer(&mut packet).unwrap();
for p in &self.payload {
packet.write_u16::<BigEndian>(p.len() as u16)?;
packet.write_all(p)?;
packet.write_u16::<BigEndian>(p.len() as u16).unwrap();
packet.write(p).unwrap();
}
Ok(packet)
packet
}
}

View file

@ -1,41 +0,0 @@
// Ported from librespot-java. Relicensed under MIT with permission.
use num_derive::{FromPrimitive, ToPrimitive};
#[derive(Debug, Copy, Clone, FromPrimitive, ToPrimitive)]
pub enum PacketType {
SecretBlock = 0x02,
Ping = 0x04,
StreamChunk = 0x08,
StreamChunkRes = 0x09,
ChannelError = 0x0a,
ChannelAbort = 0x0b,
RequestKey = 0x0c,
AesKey = 0x0d,
AesKeyError = 0x0e,
Image = 0x19,
CountryCode = 0x1b,
Pong = 0x49,
PongAck = 0x4a,
Pause = 0x4b,
ProductInfo = 0x50,
LegacyWelcome = 0x69,
LicenseVersion = 0x76,
Login = 0xab,
APWelcome = 0xac,
AuthFailure = 0xad,
MercuryReq = 0xb2,
MercurySub = 0xb3,
MercuryUnsub = 0xb4,
MercuryEvent = 0xb5,
TrackEndedTime = 0x82,
UnknownDataAllZeros = 0x1f,
PreferredLocale = 0x74,
Unknown0x0f = 0x0f,
Unknown0x10 = 0x10,
Unknown0x4f = 0x4f,
// TODO - occurs when subscribing with an empty URI. Maybe a MercuryError?
// Payload: b"\0\x08\0\0\0\0\0\0\0\0\x01\0\x01\0\x03 \xb0\x06"
Unknown0xb6 = 0xb6,
}

View file

@ -22,7 +22,7 @@ pub async fn proxy_connect<T: AsyncRead + AsyncWrite + Unpin>(
loop {
let bytes_read = proxy_connection.read(&mut buffer[offset..]).await?;
if bytes_read == 0 {
return Err(io::Error::other("Early EOF from proxy"));
return Err(io::Error::new(io::ErrorKind::Other, "Early EOF from proxy"));
}
offset += bytes_read;
@ -31,17 +31,20 @@ pub async fn proxy_connect<T: AsyncRead + AsyncWrite + Unpin>(
let status = response
.parse(&buffer[..offset])
.map_err(io::Error::other)?;
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
if status.is_complete() {
return match response.code {
Some(200) => Ok(proxy_connection), // Proxy says all is well
Some(code) => {
let reason = response.reason.unwrap_or("no reason");
let msg = format!("Proxy responded with {code}: {reason}");
Err(io::Error::other(msg))
let msg = format!("Proxy responded with {}: {}", code, reason);
Err(io::Error::new(io::ErrorKind::Other, msg))
}
None => Err(io::Error::other("Malformed response from proxy")),
None => Err(io::Error::new(
io::ErrorKind::Other,
"Malformed response from proxy",
)),
};
}

File diff suppressed because it is too large Load diff

View file

@ -1,34 +0,0 @@
use std::{io, net::ToSocketAddrs};
use tokio::net::TcpStream;
use url::Url;
use crate::proxytunnel;
pub async fn connect(host: &str, port: u16, proxy: Option<&Url>) -> io::Result<TcpStream> {
let socket = if let Some(proxy_url) = proxy {
info!("Using proxy \"{proxy_url}\"");
let socket_addr = proxy_url.socket_addrs(|| None).and_then(|addrs| {
addrs.into_iter().next().ok_or_else(|| {
io::Error::new(
io::ErrorKind::NotFound,
"Can't resolve proxy server address",
)
})
})?;
let socket = TcpStream::connect(&socket_addr).await?;
proxytunnel::proxy_connect(socket, host, &port.to_string()).await?
} else {
let socket_addr = (host, port).to_socket_addrs()?.next().ok_or_else(|| {
io::Error::new(
io::ErrorKind::NotFound,
"Can't resolve access point address",
)
})?;
TcpStream::connect(&socket_addr).await?
};
Ok(socket)
}

View file

@ -1,905 +0,0 @@
use std::{
fmt::Write,
time::{Duration, Instant},
};
use crate::config::{OS, os_version};
use crate::{
Error, FileId, SpotifyId, SpotifyUri,
apresolve::SocketAddress,
config::SessionConfig,
error::ErrorKind,
protocol::{
autoplay_context_request::AutoplayContextRequest,
clienttoken_http::{
ChallengeAnswer, ChallengeType, ClientTokenRequest, ClientTokenRequestType,
ClientTokenResponse, ClientTokenResponseType,
},
connect::PutStateRequest,
context::Context,
extended_metadata::BatchedEntityRequest,
},
token::Token,
util,
version::spotify_semantic_version,
};
use bytes::Bytes;
use data_encoding::HEXUPPER_PERMISSIVE;
use futures_util::future::IntoStream;
use http::{Uri, header::HeaderValue};
use hyper::{
HeaderMap, Method, Request,
header::{ACCEPT, AUTHORIZATION, CONTENT_LENGTH, CONTENT_TYPE, HeaderName, RANGE},
};
use hyper_util::client::legacy::ResponseFuture;
use protobuf::{Enum, Message, MessageFull};
use rand::RngCore;
use sysinfo::System;
use thiserror::Error;
component! {
SpClient : SpClientInner {
accesspoint: Option<SocketAddress> = None,
strategy: RequestStrategy = RequestStrategy::default(),
client_token: Option<Token> = None,
}
}
pub type SpClientResult = Result<Bytes, Error>;
#[allow(clippy::declare_interior_mutable_const)]
pub const CLIENT_TOKEN: HeaderName = HeaderName::from_static("client-token");
#[allow(clippy::declare_interior_mutable_const)]
const CONNECTION_ID: HeaderName = HeaderName::from_static("x-spotify-connection-id");
const NO_METRICS_AND_SALT: RequestOptions = RequestOptions {
metrics: false,
salt: false,
base_url: None,
};
const SPCLIENT_FALLBACK_ENDPOINT: RequestOptions = RequestOptions {
metrics: true,
salt: true,
base_url: Some("https://spclient.wg.spotify.com"),
};
#[derive(Debug, Error)]
pub enum SpClientError {
#[error("missing attribute {0}")]
Attribute(String),
#[error("expected data but received none")]
NoData,
}
impl From<SpClientError> for Error {
fn from(err: SpClientError) -> Self {
Self::failed_precondition(err)
}
}
#[derive(Copy, Clone, Debug)]
pub enum RequestStrategy {
TryTimes(usize),
Infinitely,
}
impl Default for RequestStrategy {
fn default() -> Self {
RequestStrategy::TryTimes(10)
}
}
pub struct RequestOptions {
metrics: bool,
salt: bool,
base_url: Option<&'static str>,
}
impl Default for RequestOptions {
fn default() -> Self {
Self {
metrics: true,
salt: true,
base_url: None,
}
}
}
impl SpClient {
pub fn set_strategy(&self, strategy: RequestStrategy) {
self.lock(|inner| inner.strategy = strategy)
}
pub async fn flush_accesspoint(&self) {
self.lock(|inner| inner.accesspoint = None)
}
pub async fn get_accesspoint(&self) -> Result<SocketAddress, Error> {
// Memoize the current access point.
let ap = self.lock(|inner| inner.accesspoint.clone());
let tuple = match ap {
Some(tuple) => tuple,
None => {
let tuple = self.session().apresolver().resolve("spclient").await?;
self.lock(|inner| inner.accesspoint = Some(tuple.clone()));
info!(
"Resolved \"{}:{}\" as spclient access point",
tuple.0, tuple.1
);
tuple
}
};
Ok(tuple)
}
pub async fn base_url(&self) -> Result<String, Error> {
let ap = self.get_accesspoint().await?;
Ok(format!("https://{}:{}", ap.0, ap.1))
}
async fn client_token_request<M: Message>(&self, message: &M) -> Result<Bytes, Error> {
let body = message.write_to_bytes()?;
let request = Request::builder()
.method(&Method::POST)
.uri("https://clienttoken.spotify.com/v1/clienttoken")
.header(ACCEPT, HeaderValue::from_static("application/x-protobuf"))
.body(body.into())?;
self.session().http_client().request_body(request).await
}
pub async fn client_token(&self) -> Result<String, Error> {
let client_token = self.lock(|inner| {
if let Some(token) = &inner.client_token {
if token.is_expired() {
inner.client_token = None;
}
}
inner.client_token.clone()
});
if let Some(client_token) = client_token {
return Ok(client_token.access_token);
}
debug!("Client token unavailable or expired, requesting new token.");
let mut request = ClientTokenRequest::new();
request.request_type = ClientTokenRequestType::REQUEST_CLIENT_DATA_REQUEST.into();
let client_data = request.mut_client_data();
client_data.client_version = spotify_semantic_version();
// Current state of affairs: keymaster ID works on all tested platforms, but may be phased out,
// so it seems a good idea to mimick the real clients. `self.session().client_id()` returns the
// ID of the client that last connected, but requesting a client token with this ID only works
// on macOS and Windows. On Android and iOS we can send a platform-specific client ID and are
// then presented with a hash cash challenge. On Linux, we have to pass the old keymaster ID.
// We delegate most of this logic to `SessionConfig`.
let os = OS;
let client_id = match os {
"macos" | "windows" => self.session().client_id(),
os => SessionConfig::default_for_os(os).client_id,
};
client_data.client_id = client_id;
let connectivity_data = client_data.mut_connectivity_sdk_data();
connectivity_data.device_id = self.session().device_id().to_string();
let platform_data = connectivity_data
.platform_specific_data
.mut_or_insert_default();
let os_version = os_version();
let kernel_version = System::kernel_version().unwrap_or_else(|| String::from("0"));
match os {
"windows" => {
let os_version = os_version.parse::<f32>().unwrap_or(10.) as i32;
let kernel_version = kernel_version.parse::<i32>().unwrap_or(21370);
let (pe, image_file) = match std::env::consts::ARCH {
"arm" => (448, 452),
"aarch64" => (43620, 452),
"x86_64" => (34404, 34404),
_ => (332, 332), // x86
};
let windows_data = platform_data.mut_desktop_windows();
windows_data.os_version = os_version;
windows_data.os_build = kernel_version;
windows_data.platform_id = 2;
windows_data.unknown_value_6 = 9;
windows_data.image_file_machine = image_file;
windows_data.pe_machine = pe;
windows_data.unknown_value_10 = true;
}
"ios" => {
let ios_data = platform_data.mut_ios();
ios_data.user_interface_idiom = 0;
ios_data.target_iphone_simulator = false;
ios_data.hw_machine = "iPhone14,5".to_string();
ios_data.system_version = os_version;
}
"android" => {
let android_data = platform_data.mut_android();
android_data.android_version = os_version;
android_data.api_version = 31;
"Pixel".clone_into(&mut android_data.device_name);
"GF5KQ".clone_into(&mut android_data.model_str);
"Google".clone_into(&mut android_data.vendor);
}
"macos" => {
let macos_data = platform_data.mut_desktop_macos();
macos_data.system_version = os_version;
macos_data.hw_model = "iMac21,1".to_string();
macos_data.compiled_cpu_type = std::env::consts::ARCH.to_string();
}
_ => {
let linux_data = platform_data.mut_desktop_linux();
linux_data.system_name = "Linux".to_string();
linux_data.system_release = kernel_version;
linux_data.system_version = os_version;
linux_data.hardware = std::env::consts::ARCH.to_string();
}
}
let mut response = self.client_token_request(&request).await?;
let mut count = 0;
const MAX_TRIES: u8 = 3;
let token_response = loop {
count += 1;
let message = ClientTokenResponse::parse_from_bytes(&response)?;
match ClientTokenResponseType::from_i32(message.response_type.value()) {
// depending on the platform, you're either given a token immediately
// or are presented a hash cash challenge to solve first
Some(ClientTokenResponseType::RESPONSE_GRANTED_TOKEN_RESPONSE) => {
debug!("Received a granted token");
break message;
}
Some(ClientTokenResponseType::RESPONSE_CHALLENGES_RESPONSE) => {
debug!("Received a hash cash challenge, solving...");
let challenges = message.challenges().clone();
let state = challenges.state;
if let Some(challenge) = challenges.challenges.first() {
let hash_cash_challenge = challenge.evaluate_hashcash_parameters();
let ctx = vec![];
let prefix = HEXUPPER_PERMISSIVE
.decode(hash_cash_challenge.prefix.as_bytes())
.map_err(|e| {
Error::failed_precondition(format!(
"Unable to decode hash cash challenge: {e}"
))
})?;
let length = hash_cash_challenge.length;
let mut suffix = [0u8; 0x10];
let answer = util::solve_hash_cash(&ctx, &prefix, length, &mut suffix);
match answer {
Ok(_) => {
// the suffix must be in uppercase
let suffix = HEXUPPER_PERMISSIVE.encode(&suffix);
let mut answer_message = ClientTokenRequest::new();
answer_message.request_type =
ClientTokenRequestType::REQUEST_CHALLENGE_ANSWERS_REQUEST
.into();
let challenge_answers = answer_message.mut_challenge_answers();
let mut challenge_answer = ChallengeAnswer::new();
challenge_answer.mut_hash_cash().suffix = suffix;
challenge_answer.ChallengeType =
ChallengeType::CHALLENGE_HASH_CASH.into();
challenge_answers.state = state.to_string();
challenge_answers.answers.push(challenge_answer);
trace!("Answering hash cash challenge");
match self.client_token_request(&answer_message).await {
Ok(token) => {
response = token;
continue;
}
Err(e) => {
trace!("Answer not accepted {count}/{MAX_TRIES}: {e}");
}
}
}
Err(e) => trace!(
"Unable to solve hash cash challenge {count}/{MAX_TRIES}: {e}"
),
}
if count < MAX_TRIES {
response = self.client_token_request(&request).await?;
} else {
return Err(Error::failed_precondition(format!(
"Unable to solve any of {MAX_TRIES} hash cash challenges"
)));
}
} else {
return Err(Error::failed_precondition("No challenges found"));
}
}
Some(unknown) => {
return Err(Error::unimplemented(format!(
"Unknown client token response type: {unknown:?}"
)));
}
None => return Err(Error::failed_precondition("No client token response type")),
}
};
let granted_token = token_response.granted_token();
let access_token = granted_token.token.to_owned();
self.lock(|inner| {
let client_token = Token {
access_token: access_token.clone(),
expires_in: Duration::from_secs(
granted_token
.refresh_after_seconds
.try_into()
.unwrap_or(7200),
),
token_type: "client-token".to_string(),
scopes: granted_token
.domains
.iter()
.map(|d| d.domain.clone())
.collect(),
timestamp: Instant::now(),
};
inner.client_token = Some(client_token);
});
trace!("Got client token: {granted_token:?}");
Ok(access_token)
}
pub async fn request_with_protobuf<M: Message + MessageFull>(
&self,
method: &Method,
endpoint: &str,
headers: Option<HeaderMap>,
message: &M,
) -> SpClientResult {
self.request_with_protobuf_and_options(
method,
endpoint,
headers,
message,
&Default::default(),
)
.await
}
pub async fn request_with_protobuf_and_options<M: Message + MessageFull>(
&self,
method: &Method,
endpoint: &str,
headers: Option<HeaderMap>,
message: &M,
options: &RequestOptions,
) -> SpClientResult {
let body = message.write_to_bytes()?;
let mut headers = headers.unwrap_or_default();
headers.insert(
CONTENT_TYPE,
HeaderValue::from_static("application/x-protobuf"),
);
self.request_with_options(method, endpoint, Some(headers), Some(&body), options)
.await
}
pub async fn request_as_json(
&self,
method: &Method,
endpoint: &str,
headers: Option<HeaderMap>,
body: Option<&str>,
) -> SpClientResult {
let mut headers = headers.unwrap_or_default();
headers.insert(ACCEPT, HeaderValue::from_static("application/json"));
self.request(method, endpoint, Some(headers), body.map(|s| s.as_bytes()))
.await
}
pub async fn request(
&self,
method: &Method,
endpoint: &str,
headers: Option<HeaderMap>,
body: Option<&[u8]>,
) -> SpClientResult {
self.request_with_options(method, endpoint, headers, body, &Default::default())
.await
}
pub async fn request_with_options(
&self,
method: &Method,
endpoint: &str,
headers: Option<HeaderMap>,
body: Option<&[u8]>,
options: &RequestOptions,
) -> SpClientResult {
let mut tries: usize = 0;
let mut last_response;
let body = body.unwrap_or_default();
loop {
tries += 1;
// Reconnection logic: retrieve the endpoint every iteration, so we can try
// another access point when we are experiencing network issues (see below).
let mut url = match options.base_url {
Some(base_url) => base_url.to_string(),
None => self.base_url().await?,
};
url.push_str(endpoint);
// Add metrics. There is also an optional `partner` key with a value like
// `vodafone-uk` but we've yet to discover how we can find that value.
// For the sake of documentation you could also do "product=free" but
// we only support premium anyway.
if options.metrics && !url.contains("product=0") {
let _ = write!(
url,
"{}product=0&country={}",
util::get_next_query_separator(&url),
self.session().country()
);
}
// Defeat caches. Spotify-generated URLs already contain this.
if options.salt && !url.contains("salt=") {
let _ = write!(
url,
"{}salt={}",
util::get_next_query_separator(&url),
rand::rng().next_u32()
);
}
let mut request = Request::builder()
.method(method)
.uri(url)
.header(CONTENT_LENGTH, body.len())
.body(Bytes::copy_from_slice(body))?;
// Reconnection logic: keep getting (cached) tokens because they might have expired.
let token = self.session().login5().auth_token().await?;
let headers_mut = request.headers_mut();
if let Some(ref headers) = headers {
for (name, value) in headers {
headers_mut.insert(name, value.clone());
}
}
headers_mut.insert(
AUTHORIZATION,
HeaderValue::from_str(&format!("{} {}", token.token_type, token.access_token,))?,
);
match self.client_token().await {
Ok(client_token) => {
let _ = headers_mut.insert(CLIENT_TOKEN, HeaderValue::from_str(&client_token)?);
}
Err(e) => {
// currently these endpoints seem to work fine without it
warn!("Unable to get client token: {e} Trying to continue without...")
}
}
last_response = self.session().http_client().request_body(request).await;
if last_response.is_ok() {
return last_response;
}
// Break before the reconnection logic below, so that the current access point
// is retained when max_tries == 1. Leave it up to the caller when to flush.
if let RequestStrategy::TryTimes(max_tries) = self.lock(|inner| inner.strategy) {
if tries >= max_tries {
break;
}
}
// Reconnection logic: drop the current access point if we are experiencing issues.
// This will cause the next call to base_url() to resolve a new one.
if let Err(ref network_error) = last_response {
match network_error.kind {
ErrorKind::Unavailable | ErrorKind::DeadlineExceeded => {
// Keep trying the current access point three times before dropping it.
if tries % 3 == 0 {
self.flush_accesspoint().await
}
}
_ => break, // if we can't build the request now, then we won't ever
}
}
debug!("Error was: {last_response:?}");
}
last_response
}
pub async fn put_connect_state_request(&self, state: &PutStateRequest) -> SpClientResult {
let endpoint = format!("/connect-state/v1/devices/{}", self.session().device_id());
let mut headers = HeaderMap::new();
headers.insert(CONNECTION_ID, self.session().connection_id().parse()?);
self.request_with_protobuf(&Method::PUT, &endpoint, Some(headers), state)
.await
}
pub async fn delete_connect_state_request(&self) -> SpClientResult {
let endpoint = format!("/connect-state/v1/devices/{}", self.session().device_id());
self.request(&Method::DELETE, &endpoint, None, None).await
}
pub async fn put_connect_state_inactive(&self, notify: bool) -> SpClientResult {
let endpoint = format!(
"/connect-state/v1/devices/{}/inactive?notify={notify}",
self.session().device_id()
);
let mut headers = HeaderMap::new();
headers.insert(CONNECTION_ID, self.session().connection_id().parse()?);
self.request(&Method::PUT, &endpoint, Some(headers), None)
.await
}
pub async fn get_metadata(&self, scope: &str, id: &SpotifyId) -> SpClientResult {
let endpoint = format!("/metadata/4/{}/{}", scope, id.to_base16()?);
// For unknown reasons, metadata requests must now be sent through spclient.wg.spotify.com.
// Otherwise, the API will respond with 500 Internal Server Error responses.
// Context: https://github.com/librespot-org/librespot/issues/1527
self.request_with_options(
&Method::GET,
&endpoint,
None,
None,
&SPCLIENT_FALLBACK_ENDPOINT,
)
.await
}
pub async fn get_track_metadata(&self, track_id: &SpotifyId) -> SpClientResult {
self.get_metadata("track", track_id).await
}
pub async fn get_episode_metadata(&self, episode_id: &SpotifyId) -> SpClientResult {
self.get_metadata("episode", episode_id).await
}
pub async fn get_album_metadata(&self, album_id: &SpotifyId) -> SpClientResult {
self.get_metadata("album", album_id).await
}
pub async fn get_artist_metadata(&self, artist_id: &SpotifyId) -> SpClientResult {
self.get_metadata("artist", artist_id).await
}
pub async fn get_show_metadata(&self, show_id: &SpotifyId) -> SpClientResult {
self.get_metadata("show", show_id).await
}
pub async fn get_lyrics(&self, track_id: &SpotifyId) -> SpClientResult {
let endpoint = format!("/color-lyrics/v2/track/{}", track_id.to_base62()?);
self.request_as_json(&Method::GET, &endpoint, None, None)
.await
}
pub async fn get_lyrics_for_image(
&self,
track_id: &SpotifyId,
image_id: &FileId,
) -> SpClientResult {
let endpoint = format!(
"/color-lyrics/v2/track/{}/image/spotify:image:{}",
track_id.to_base62()?,
image_id
);
self.request_as_json(&Method::GET, &endpoint, None, None)
.await
}
pub async fn get_playlist(&self, playlist_id: &SpotifyId) -> SpClientResult {
let endpoint = format!("/playlist/v2/playlist/{}", playlist_id.to_base62()?);
self.request(&Method::GET, &endpoint, None, None).await
}
pub async fn get_user_profile(
&self,
username: &str,
playlist_limit: Option<u32>,
artist_limit: Option<u32>,
) -> SpClientResult {
let mut endpoint = format!("/user-profile-view/v3/profile/{username}");
if playlist_limit.is_some() || artist_limit.is_some() {
let _ = write!(endpoint, "?");
if let Some(limit) = playlist_limit {
let _ = write!(endpoint, "playlist_limit={limit}");
if artist_limit.is_some() {
let _ = write!(endpoint, "&");
}
}
if let Some(limit) = artist_limit {
let _ = write!(endpoint, "artist_limit={limit}");
}
}
self.request_as_json(&Method::GET, &endpoint, None, None)
.await
}
pub async fn get_user_followers(&self, username: &str) -> SpClientResult {
let endpoint = format!("/user-profile-view/v3/profile/{username}/followers");
self.request_as_json(&Method::GET, &endpoint, None, None)
.await
}
pub async fn get_user_following(&self, username: &str) -> SpClientResult {
let endpoint = format!("/user-profile-view/v3/profile/{username}/following");
self.request_as_json(&Method::GET, &endpoint, None, None)
.await
}
pub async fn get_radio_for_track(&self, track_uri: &SpotifyUri) -> SpClientResult {
let endpoint = format!(
"/inspiredby-mix/v2/seed_to_playlist/{}?response-format=json",
track_uri.to_uri()?
);
self.request_as_json(&Method::GET, &endpoint, None, None)
.await
}
// Known working scopes: stations, tracks
// For others see: https://gist.github.com/roderickvd/62df5b74d2179a12de6817a37bb474f9
//
// Seen-in-the-wild but unimplemented query parameters:
// - image_style=gradient_overlay
// - excludeClusters=true
// - language=en
// - count_tracks=0
// - market=from_token
pub async fn get_apollo_station(
&self,
scope: &str,
context_uri: &str,
count: Option<usize>,
previous_tracks: Vec<SpotifyId>,
autoplay: bool,
) -> SpClientResult {
let mut endpoint = format!("/radio-apollo/v3/{scope}/{context_uri}?autoplay={autoplay}");
// Spotify has a default of 50
if let Some(count) = count {
let _ = write!(endpoint, "&count={count}");
}
let previous_track_str = previous_tracks
.iter()
.map(|track| track.to_base62())
.collect::<Result<Vec<_>, _>>()?
.join(",");
// better than checking `previous_tracks.len() > 0` because the `filter_map` could still return 0 items
if !previous_track_str.is_empty() {
let _ = write!(endpoint, "&prev_tracks={previous_track_str}");
}
self.request_as_json(&Method::GET, &endpoint, None, None)
.await
}
pub async fn get_next_page(&self, next_page_uri: &str) -> SpClientResult {
let endpoint = next_page_uri.trim_start_matches("hm:/");
self.request_as_json(&Method::GET, endpoint, None, None)
.await
}
// TODO: Seen-in-the-wild but unimplemented endpoints
// - /presence-view/v1/buddylist
pub async fn get_extended_metadata(&self, request: BatchedEntityRequest) -> SpClientResult {
let endpoint = "/extended-metadata/v0/extended-metadata";
self.request_with_protobuf(&Method::POST, endpoint, None, &request)
.await
}
pub async fn get_audio_storage(&self, file_id: &FileId) -> SpClientResult {
let endpoint = format!(
"/storage-resolve/files/audio/interactive/{}",
file_id.to_base16()?
);
self.request(&Method::GET, &endpoint, None, None).await
}
pub fn stream_from_cdn<U>(
&self,
cdn_url: U,
offset: usize,
length: usize,
) -> Result<IntoStream<ResponseFuture>, Error>
where
U: TryInto<Uri>,
<U as TryInto<Uri>>::Error: Into<http::Error>,
{
let req = Request::builder()
.method(&Method::GET)
.uri(cdn_url)
.header(
RANGE,
HeaderValue::from_str(&format!("bytes={}-{}", offset, offset + length - 1))?,
)
.body(Bytes::new())?;
let stream = self.session().http_client().request_stream(req)?;
Ok(stream)
}
pub async fn request_url(&self, url: &str) -> SpClientResult {
let request = Request::builder()
.method(&Method::GET)
.uri(url)
.body(Bytes::new())?;
self.session().http_client().request_body(request).await
}
// Audio preview in 96 kbps MP3, unencrypted
pub async fn get_audio_preview(&self, preview_id: &FileId) -> SpClientResult {
let attribute = "audio-preview-url-template";
let template = self
.session()
.get_user_attribute(attribute)
.ok_or_else(|| SpClientError::Attribute(attribute.to_string()))?;
let mut url = template.replace("{id}", &preview_id.to_base16()?);
let separator = match url.find('?') {
Some(_) => "&",
None => "?",
};
let _ = write!(url, "{}cid={}", separator, self.session().client_id());
self.request_url(&url).await
}
// The first 128 kB of a track, unencrypted
pub async fn get_head_file(&self, file_id: &FileId) -> SpClientResult {
let attribute = "head-files-url";
let template = self
.session()
.get_user_attribute(attribute)
.ok_or_else(|| SpClientError::Attribute(attribute.to_string()))?;
let url = template.replace("{file_id}", &file_id.to_base16()?);
self.request_url(&url).await
}
pub async fn get_image(&self, image_id: &FileId) -> SpClientResult {
let attribute = "image-url";
let template = self
.session()
.get_user_attribute(attribute)
.ok_or_else(|| SpClientError::Attribute(attribute.to_string()))?;
let url = template.replace("{file_id}", &image_id.to_base16()?);
self.request_url(&url).await
}
/// Request the context for an uri
///
/// All [SpotifyId] uris are supported in addition to the following special uris:
/// - liked songs:
/// - all: `spotify:user:<user_id>:collection`
/// - of artist: `spotify:user:<user_id>:collection:artist:<artist_id>`
/// - search: `spotify:search:<search+query>` (whitespaces are replaced with `+`)
///
/// ## Query params found in the wild:
/// - include_video=true
///
/// ## Known results of uri types:
/// - uris of type `track`
/// - returns a single page with a single track
/// - when requesting a single track with a query in the request, the returned track uri
/// **will** contain the query
/// - uris of type `artist`
/// - returns 2 pages with tracks: 10 most popular tracks and latest/popular album
/// - remaining pages are artist albums sorted by popularity (only provided as page_url)
/// - uris of type `search`
/// - is massively influenced by the provided query
/// - the query result shown by the search expects no query at all
/// - uri looks like `spotify:search:never+gonna`
pub async fn get_context(&self, uri: &str) -> Result<Context, Error> {
let uri = format!("/context-resolve/v1/{uri}");
let res = self
.request_with_options(&Method::GET, &uri, None, None, &NO_METRICS_AND_SALT)
.await?;
let ctx_json = String::from_utf8(res.to_vec())?;
if ctx_json.is_empty() {
Err(SpClientError::NoData)?
}
let ctx = protobuf_json_mapping::parse_from_str::<Context>(&ctx_json);
if ctx.is_err() {
trace!("failed parsing context: {ctx_json}")
}
Ok(ctx?)
}
pub async fn get_autoplay_context(
&self,
context_request: &AutoplayContextRequest,
) -> Result<Context, Error> {
let res = self
.request_with_protobuf_and_options(
&Method::POST,
"/context-resolve/v1/autoplay",
None,
context_request,
&NO_METRICS_AND_SALT,
)
.await?;
let ctx_json = String::from_utf8(res.to_vec())?;
if ctx_json.is_empty() {
Err(SpClientError::NoData)?
}
let ctx = protobuf_json_mapping::parse_from_str::<Context>(&ctx_json);
if ctx.is_err() {
trace!("failed parsing context: {ctx_json}")
}
Ok(ctx?)
}
pub async fn get_rootlist(&self, from: usize, length: Option<usize>) -> SpClientResult {
let length = length.unwrap_or(120);
let user = self.session().username();
let endpoint = format!(
"/playlist/v2/user/{user}/rootlist?decorate=revision,attributes,length,owner,capabilities,status_code&from={from}&length={length}"
);
self.request(&Method::GET, &endpoint, None, None).await
}
}

View file

@ -1,32 +1,44 @@
#![allow(clippy::wrong_self_convention)]
use std::convert::TryInto;
use std::fmt;
use std::string::FromUtf8Error;
use thiserror::Error;
use crate::{Error, SpotifyUri};
// re-export FileId for historic reasons, when it was part of this mod
pub use crate::FileId;
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct SpotifyId {
pub id: u128,
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum SpotifyAudioType {
Track,
Podcast,
NonPlayable,
}
#[derive(Debug, Error, Clone, Copy, PartialEq, Eq)]
pub enum SpotifyIdError {
#[error("ID cannot be parsed")]
InvalidId,
#[error("not a valid Spotify ID")]
InvalidFormat,
}
impl From<SpotifyIdError> for Error {
fn from(err: SpotifyIdError) -> Self {
Error::invalid_argument(err)
impl From<&str> for SpotifyAudioType {
fn from(v: &str) -> Self {
match v {
"track" => SpotifyAudioType::Track,
"episode" => SpotifyAudioType::Podcast,
_ => SpotifyAudioType::NonPlayable,
}
}
}
pub type SpotifyIdResult = Result<SpotifyId, Error>;
impl From<SpotifyAudioType> for &str {
fn from(audio_type: SpotifyAudioType) -> &'static str {
match audio_type {
SpotifyAudioType::Track => "track",
SpotifyAudioType::Podcast => "episode",
SpotifyAudioType::NonPlayable => "unknown",
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct SpotifyId {
pub id: u128,
pub audio_type: SpotifyAudioType,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct SpotifyIdError;
const BASE62_DIGITS: &[u8; 62] = b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
const BASE16_DIGITS: &[u8; 16] = b"0123456789abcdef";
@ -36,40 +48,41 @@ impl SpotifyId {
const SIZE_BASE16: usize = 32;
const SIZE_BASE62: usize = 22;
fn track(n: u128) -> SpotifyId {
SpotifyId {
id: n,
audio_type: SpotifyAudioType::Track,
}
}
/// Parses a base16 (hex) encoded [Spotify ID] into a `SpotifyId`.
///
/// `src` is expected to be 32 bytes long and encoded using valid characters.
///
/// [Spotify ID]: https://developer.spotify.com/documentation/web-api/concepts/spotify-uris-ids
pub fn from_base16(src: &str) -> SpotifyIdResult {
if src.len() != 32 {
return Err(SpotifyIdError::InvalidId.into());
}
/// [Spotify ID]: https://developer.spotify.com/documentation/web-api/#spotify-uris-and-ids
pub fn from_base16(src: &str) -> Result<SpotifyId, SpotifyIdError> {
let mut dst: u128 = 0;
for c in src.as_bytes() {
let p = match c {
b'0'..=b'9' => c - b'0',
b'a'..=b'f' => c - b'a' + 10,
_ => return Err(SpotifyIdError::InvalidId.into()),
_ => return Err(SpotifyIdError),
} as u128;
dst <<= 4;
dst += p;
}
Ok(Self { id: dst })
Ok(SpotifyId::track(dst))
}
/// Parses a base62 encoded [Spotify ID] into a `u128`.
/// Parses a base62 encoded [Spotify ID] into a `SpotifyId`.
///
/// `src` is expected to be 22 bytes long and encoded using valid characters.
///
/// [Spotify ID]: https://developer.spotify.com/documentation/web-api/concepts/spotify-uris-ids
pub fn from_base62(src: &str) -> SpotifyIdResult {
if src.len() != Self::SIZE_BASE62 {
return Err(SpotifyIdError::InvalidId.into());
}
/// [Spotify ID]: https://developer.spotify.com/documentation/web-api/#spotify-uris-and-ids
pub fn from_base62(src: &str) -> Result<SpotifyId, SpotifyIdError> {
let mut dst: u128 = 0;
for c in src.as_bytes() {
@ -77,41 +90,62 @@ impl SpotifyId {
b'0'..=b'9' => c - b'0',
b'a'..=b'z' => c - b'a' + 10,
b'A'..=b'Z' => c - b'A' + 36,
_ => return Err(SpotifyIdError::InvalidId.into()),
_ => return Err(SpotifyIdError),
} as u128;
dst = dst.checked_mul(62).ok_or(SpotifyIdError::InvalidId)?;
dst = dst.checked_add(p).ok_or(SpotifyIdError::InvalidId)?;
dst *= 62;
dst += p;
}
Ok(Self { id: dst })
Ok(SpotifyId::track(dst))
}
/// Creates a `u128` from a copy of `SpotifyId::SIZE` (16) bytes in big-endian order.
/// Creates a `SpotifyId` from a copy of `SpotifyId::SIZE` (16) bytes in big-endian order.
///
/// The resulting `SpotifyId` will default to a `SpotifyItemType::Unknown`.
pub fn from_raw(src: &[u8]) -> SpotifyIdResult {
/// The resulting `SpotifyId` will default to a `SpotifyAudioType::TRACK`.
pub fn from_raw(src: &[u8]) -> Result<SpotifyId, SpotifyIdError> {
match src.try_into() {
Ok(dst) => Ok(Self {
id: u128::from_be_bytes(dst),
}),
Err(_) => Err(SpotifyIdError::InvalidId.into()),
Ok(dst) => Ok(SpotifyId::track(u128::from_be_bytes(dst))),
Err(_) => Err(SpotifyIdError),
}
}
/// Parses a [Spotify URI] into a `SpotifyId`.
///
/// `uri` is expected to be in the canonical form `spotify:{type}:{id}`, where `{type}`
/// can be arbitrary while `{id}` is a 22-character long, base62 encoded Spotify ID.
///
/// [Spotify URI]: https://developer.spotify.com/documentation/web-api/#spotify-uris-and-ids
pub fn from_uri(src: &str) -> Result<SpotifyId, SpotifyIdError> {
let src = src.strip_prefix("spotify:").ok_or(SpotifyIdError)?;
if src.len() <= SpotifyId::SIZE_BASE62 {
return Err(SpotifyIdError);
}
let colon_index = src.len() - SpotifyId::SIZE_BASE62 - 1;
if src.as_bytes()[colon_index] != b':' {
return Err(SpotifyIdError);
}
let mut id = SpotifyId::from_base62(&src[colon_index + 1..])?;
id.audio_type = src[..colon_index].into();
Ok(id)
}
/// Returns the `SpotifyId` as a base16 (hex) encoded, `SpotifyId::SIZE_BASE16` (32)
/// character long `String`.
#[allow(clippy::wrong_self_convention)]
pub fn to_base16(&self) -> Result<String, Error> {
to_base16(&self.to_raw(), &mut [0u8; Self::SIZE_BASE16])
pub fn to_base16(&self) -> Result<String, FromUtf8Error> {
to_base16(&self.to_raw(), &mut [0u8; SpotifyId::SIZE_BASE16])
}
/// Returns the `SpotifyId` as a [canonically] base62 encoded, `SpotifyId::SIZE_BASE62` (22)
/// character long `String`.
///
/// [canonically]: https://developer.spotify.com/documentation/web-api/concepts/spotify-uris-ids
#[allow(clippy::wrong_self_convention)]
pub fn to_base62(&self) -> Result<String, Error> {
/// [canonically]: https://developer.spotify.com/documentation/web-api/#spotify-uris-and-ids
pub fn to_base62(&self) -> Result<String, FromUtf8Error> {
let mut dst = [0u8; 22];
let mut i = 0;
let n = self.id;
@ -149,77 +183,61 @@ impl SpotifyId {
dst.reverse();
String::from_utf8(dst.to_vec()).map_err(|_| SpotifyIdError::InvalidId.into())
String::from_utf8(dst.to_vec())
}
/// Returns a copy of the `SpotifyId` as an array of `SpotifyId::SIZE` (16) bytes in
/// big-endian order.
#[allow(clippy::wrong_self_convention)]
pub fn to_raw(&self) -> [u8; Self::SIZE] {
pub fn to_raw(&self) -> [u8; SpotifyId::SIZE] {
self.id.to_be_bytes()
}
}
impl fmt::Debug for SpotifyId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("SpotifyId")
.field(&self.to_base62().unwrap_or_else(|_| "invalid uri".into()))
.finish()
/// Returns the `SpotifyId` as a [Spotify URI] in the canonical form `spotify:{type}:{id}`,
/// where `{type}` is an arbitrary string and `{id}` is a 22-character long, base62 encoded
/// Spotify ID.
///
/// If the `SpotifyId` has an associated type unrecognized by the library, `{type}` will
/// be encoded as `unknown`.
///
/// [Spotify URI]: https://developer.spotify.com/documentation/web-api/#spotify-uris-and-ids
pub fn to_uri(&self) -> Result<String, FromUtf8Error> {
// 8 chars for the "spotify:" prefix + 1 colon + 22 chars base62 encoded ID = 31
// + unknown size audio_type.
let audio_type: &str = self.audio_type.into();
let mut dst = String::with_capacity(31 + audio_type.len());
dst.push_str("spotify:");
dst.push_str(audio_type);
dst.push(':');
let base62 = self.to_base62()?;
dst.push_str(&base62);
Ok(dst)
}
}
impl fmt::Display for SpotifyId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.to_base62().unwrap_or_else(|_| "invalid uri".into()))
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct FileId(pub [u8; 20]);
impl FileId {
pub fn to_base16(&self) -> Result<String, FromUtf8Error> {
to_base16(&self.0, &mut [0u8; 40])
}
}
impl TryFrom<&[u8]> for SpotifyId {
type Error = crate::Error;
fn try_from(src: &[u8]) -> Result<Self, Self::Error> {
Self::from_raw(src)
impl fmt::Debug for FileId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("FileId").field(&self.to_base16()).finish()
}
}
impl TryFrom<&str> for SpotifyId {
type Error = crate::Error;
fn try_from(src: &str) -> Result<Self, Self::Error> {
Self::from_base62(src)
impl fmt::Display for FileId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.to_base16().unwrap_or_default())
}
}
impl TryFrom<String> for SpotifyId {
type Error = crate::Error;
fn try_from(src: String) -> Result<Self, Self::Error> {
Self::try_from(src.as_str())
}
}
impl TryFrom<&Vec<u8>> for SpotifyId {
type Error = crate::Error;
fn try_from(src: &Vec<u8>) -> Result<Self, Self::Error> {
Self::try_from(src.as_slice())
}
}
impl TryFrom<&SpotifyUri> for SpotifyId {
type Error = crate::Error;
fn try_from(value: &SpotifyUri) -> Result<Self, Self::Error> {
match value {
SpotifyUri::Album { id }
| SpotifyUri::Artist { id }
| SpotifyUri::Episode { id }
| SpotifyUri::Playlist { id, .. }
| SpotifyUri::Show { id }
| SpotifyUri::Track { id } => Ok(*id),
SpotifyUri::Local { .. } | SpotifyUri::Unknown { .. } => {
Err(SpotifyIdError::InvalidFormat.into())
}
}
}
}
pub fn to_base16(src: &[u8], buf: &mut [u8]) -> Result<String, Error> {
#[inline]
fn to_base16(src: &[u8], buf: &mut [u8]) -> Result<String, FromUtf8Error> {
let mut i = 0;
for v in src {
buf[i] = BASE16_DIGITS[(v >> 4) as usize];
@ -227,7 +245,7 @@ pub fn to_base16(src: &[u8], buf: &mut [u8]) -> Result<String, Error> {
i += 2;
}
String::from_utf8(buf.to_vec()).map_err(|_| SpotifyIdError::InvalidId.into())
String::from_utf8(buf.to_vec())
}
#[cfg(test)]
@ -236,14 +254,18 @@ mod tests {
struct ConversionCase {
id: u128,
kind: SpotifyAudioType,
uri: &'static str,
base16: &'static str,
base62: &'static str,
raw: &'static [u8],
}
static CONV_VALID: [ConversionCase; 5] = [
static CONV_VALID: [ConversionCase; 4] = [
ConversionCase {
id: 238762092608182713602505436543891614649,
kind: SpotifyAudioType::Track,
uri: "spotify:track:5sWHDYs0csV6RS48xBl0tH",
base16: "b39fe8081e1f4c54be38e8d6f9f12bb9",
base62: "5sWHDYs0csV6RS48xBl0tH",
raw: &[
@ -252,6 +274,8 @@ mod tests {
},
ConversionCase {
id: 204841891221366092811751085145916697048,
kind: SpotifyAudioType::Track,
uri: "spotify:track:4GNcXTGWmnZ3ySrqvol3o4",
base16: "9a1b1cfbc6f244569ae0356c77bbe9d8",
base62: "4GNcXTGWmnZ3ySrqvol3o4",
raw: &[
@ -260,6 +284,8 @@ mod tests {
},
ConversionCase {
id: 204841891221366092811751085145916697048,
kind: SpotifyAudioType::Podcast,
uri: "spotify:episode:4GNcXTGWmnZ3ySrqvol3o4",
base16: "9a1b1cfbc6f244569ae0356c77bbe9d8",
base62: "4GNcXTGWmnZ3ySrqvol3o4",
raw: &[
@ -268,23 +294,22 @@ mod tests {
},
ConversionCase {
id: 204841891221366092811751085145916697048,
kind: SpotifyAudioType::NonPlayable,
uri: "spotify:unknown:4GNcXTGWmnZ3ySrqvol3o4",
base16: "9a1b1cfbc6f244569ae0356c77bbe9d8",
base62: "4GNcXTGWmnZ3ySrqvol3o4",
raw: &[
154, 27, 28, 251, 198, 242, 68, 86, 154, 224, 53, 108, 119, 187, 233, 216,
],
},
ConversionCase {
id: 0,
base16: "00000000000000000000000000000000",
base62: "0000000000000000000000",
raw: &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
];
static CONV_INVALID: [ConversionCase; 5] = [
static CONV_INVALID: [ConversionCase; 3] = [
ConversionCase {
id: 0,
kind: SpotifyAudioType::NonPlayable,
// Invalid ID in the URI.
uri: "spotify:arbitrarywhatever:5sWHDYs0Bl0tH",
base16: "ZZZZZ8081e1f4c54be38e8d6f9f12bb9",
base62: "!!!!!Ys0csV6RS48xBl0tH",
raw: &[
@ -294,6 +319,9 @@ mod tests {
},
ConversionCase {
id: 0,
kind: SpotifyAudioType::NonPlayable,
// Missing colon between ID and type.
uri: "spotify:arbitrarywhatever5sWHDYs0csV6RS48xBl0tH",
base16: "--------------------",
base62: "....................",
raw: &[
@ -303,30 +331,11 @@ mod tests {
},
ConversionCase {
id: 0,
// too long, should return error but not panic overflow
base16: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
// too long, should return error but not panic overflow
base62: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
raw: &[
// Invalid length.
154, 27, 28, 251,
],
},
ConversionCase {
id: 0,
kind: SpotifyAudioType::NonPlayable,
// Uri too short
uri: "spotify:azb:aRS48xBl0tH",
base16: "--------------------",
// too short to encode a 128 bits int
base62: "aa",
raw: &[
// Invalid length.
154, 27, 28, 251,
],
},
ConversionCase {
id: 0,
base16: "--------------------",
// too high of a value, this would need a 132 bits int
base62: "ZZZZZZZZZZZZZZZZZZZZZZ",
base62: "....................",
raw: &[
// Invalid length.
154, 27, 28, 251,
@ -341,14 +350,17 @@ mod tests {
}
for c in &CONV_INVALID {
assert!(SpotifyId::from_base62(c.base62).is_err(),);
assert_eq!(SpotifyId::from_base62(c.base62), Err(SpotifyIdError));
}
}
#[test]
fn to_base62() {
for c in &CONV_VALID {
let id = SpotifyId { id: c.id };
let id = SpotifyId {
id: c.id,
audio_type: c.kind,
};
assert_eq!(id.to_base62().unwrap(), c.base62);
}
@ -361,19 +373,48 @@ mod tests {
}
for c in &CONV_INVALID {
assert!(SpotifyId::from_base16(c.base16).is_err(),);
assert_eq!(SpotifyId::from_base16(c.base16), Err(SpotifyIdError));
}
}
#[test]
fn to_base16() {
for c in &CONV_VALID {
let id = SpotifyId { id: c.id };
let id = SpotifyId {
id: c.id,
audio_type: c.kind,
};
assert_eq!(id.to_base16().unwrap(), c.base16);
}
}
#[test]
fn from_uri() {
for c in &CONV_VALID {
let actual = SpotifyId::from_uri(c.uri).unwrap();
assert_eq!(actual.id, c.id);
assert_eq!(actual.audio_type, c.kind);
}
for c in &CONV_INVALID {
assert_eq!(SpotifyId::from_uri(c.uri), Err(SpotifyIdError));
}
}
#[test]
fn to_uri() {
for c in &CONV_VALID {
let id = SpotifyId {
id: c.id,
audio_type: c.kind,
};
assert_eq!(id.to_uri().unwrap(), c.uri);
}
}
#[test]
fn from_raw() {
for c in &CONV_VALID {
@ -381,7 +422,7 @@ mod tests {
}
for c in &CONV_INVALID {
assert!(SpotifyId::from_raw(c.raw).is_err());
assert_eq!(SpotifyId::from_raw(c.raw), Err(SpotifyIdError));
}
}
}

View file

@ -1,583 +0,0 @@
use crate::{Error, SpotifyId};
use std::{borrow::Cow, fmt};
use thiserror::Error;
use librespot_protocol as protocol;
const SPOTIFY_ITEM_TYPE_ALBUM: &str = "album";
const SPOTIFY_ITEM_TYPE_ARTIST: &str = "artist";
const SPOTIFY_ITEM_TYPE_EPISODE: &str = "episode";
const SPOTIFY_ITEM_TYPE_PLAYLIST: &str = "playlist";
const SPOTIFY_ITEM_TYPE_SHOW: &str = "show";
const SPOTIFY_ITEM_TYPE_TRACK: &str = "track";
const SPOTIFY_ITEM_TYPE_LOCAL: &str = "local";
const SPOTIFY_ITEM_TYPE_UNKNOWN: &str = "unknown";
#[derive(Debug, Error, Clone, Copy, PartialEq, Eq)]
pub enum SpotifyUriError {
#[error("not a valid Spotify URI")]
InvalidFormat,
#[error("URI does not belong to Spotify")]
InvalidRoot,
}
impl From<SpotifyUriError> for Error {
fn from(err: SpotifyUriError) -> Self {
Error::invalid_argument(err)
}
}
pub type SpotifyUriResult = Result<SpotifyUri, Error>;
#[derive(Clone, PartialEq, Eq, Hash)]
pub enum SpotifyUri {
Album {
id: SpotifyId,
},
Artist {
id: SpotifyId,
},
Episode {
id: SpotifyId,
},
Playlist {
user: Option<String>,
id: SpotifyId,
},
Show {
id: SpotifyId,
},
Track {
id: SpotifyId,
},
Local {
artist: String,
album_title: String,
track_title: String,
duration: std::time::Duration,
},
Unknown {
kind: Cow<'static, str>,
id: String,
},
}
impl SpotifyUri {
/// Returns whether this `SpotifyUri` is for a playable audio item, if known.
pub fn is_playable(&self) -> bool {
matches!(self, SpotifyUri::Episode { .. } | SpotifyUri::Track { .. })
}
/// Gets the item type of this URI as a static string
pub fn item_type(&self) -> &'static str {
match &self {
SpotifyUri::Album { .. } => SPOTIFY_ITEM_TYPE_ALBUM,
SpotifyUri::Artist { .. } => SPOTIFY_ITEM_TYPE_ARTIST,
SpotifyUri::Episode { .. } => SPOTIFY_ITEM_TYPE_EPISODE,
SpotifyUri::Playlist { .. } => SPOTIFY_ITEM_TYPE_PLAYLIST,
SpotifyUri::Show { .. } => SPOTIFY_ITEM_TYPE_SHOW,
SpotifyUri::Track { .. } => SPOTIFY_ITEM_TYPE_TRACK,
SpotifyUri::Local { .. } => SPOTIFY_ITEM_TYPE_LOCAL,
SpotifyUri::Unknown { .. } => SPOTIFY_ITEM_TYPE_UNKNOWN,
}
}
/// Gets the ID of this URI. The resource ID is the component of the URI that identifies
/// the resource after its type label. If `self` is a named ID, the user will be omitted.
pub fn to_id(&self) -> Result<String, Error> {
match &self {
SpotifyUri::Album { id }
| SpotifyUri::Artist { id }
| SpotifyUri::Episode { id }
| SpotifyUri::Playlist { id, .. }
| SpotifyUri::Show { id }
| SpotifyUri::Track { id } => id.to_base62(),
SpotifyUri::Local {
artist,
album_title,
track_title,
duration,
} => {
let duration_secs = duration.as_secs();
Ok(format!(
"{artist}:{album_title}:{track_title}:{duration_secs}"
))
}
SpotifyUri::Unknown { id, .. } => Ok(id.clone()),
}
}
/// Parses a [Spotify URI] into a `SpotifyUri`.
///
/// `uri` is expected to be in the canonical form `spotify:{type}:{id}`, where `{type}`
/// can be arbitrary while `{id}` is in a format that varies based on the `{type}`:
///
/// - For most item types, a 22-character long, base62 encoded Spotify ID is expected.
/// - For local files, an arbitrary length string with the fields
/// `{artist}:{album_title}:{track_title}:{duration_in_seconds}` is expected.
///
/// Spotify URI: https://developer.spotify.com/documentation/web-api/concepts/spotify-uris-ids
pub fn from_uri(src: &str) -> SpotifyUriResult {
// Basic: `spotify:{type}:{id}`
// Named: `spotify:user:{user}:{type}:{id}`
// Local: `spotify:local:{artist}:{album_title}:{track_title}:{duration_in_seconds}`
let mut parts = src.split(':');
let scheme = parts.next().ok_or(SpotifyUriError::InvalidFormat)?;
if scheme != "spotify" {
return Err(SpotifyUriError::InvalidRoot.into());
}
let mut username: Option<String> = None;
let item_type = {
let next = parts.next().ok_or(SpotifyUriError::InvalidFormat)?;
if next == "user" {
username.replace(
parts
.next()
.ok_or(SpotifyUriError::InvalidFormat)?
.to_owned(),
);
parts.next().ok_or(SpotifyUriError::InvalidFormat)?
} else {
next
}
};
let name = parts.next().ok_or(SpotifyUriError::InvalidFormat)?;
match item_type {
SPOTIFY_ITEM_TYPE_ALBUM => Ok(Self::Album {
id: SpotifyId::from_base62(name)?,
}),
SPOTIFY_ITEM_TYPE_ARTIST => Ok(Self::Artist {
id: SpotifyId::from_base62(name)?,
}),
SPOTIFY_ITEM_TYPE_EPISODE => Ok(Self::Episode {
id: SpotifyId::from_base62(name)?,
}),
SPOTIFY_ITEM_TYPE_PLAYLIST => Ok(Self::Playlist {
id: SpotifyId::from_base62(name)?,
user: username,
}),
SPOTIFY_ITEM_TYPE_SHOW => Ok(Self::Show {
id: SpotifyId::from_base62(name)?,
}),
SPOTIFY_ITEM_TYPE_TRACK => Ok(Self::Track {
id: SpotifyId::from_base62(name)?,
}),
SPOTIFY_ITEM_TYPE_LOCAL => Ok(Self::Local {
artist: "unimplemented".to_owned(),
album_title: "unimplemented".to_owned(),
track_title: "unimplemented".to_owned(),
duration: Default::default(),
}),
_ => Ok(Self::Unknown {
kind: item_type.to_owned().into(),
id: name.to_owned(),
}),
}
}
/// Returns the `SpotifyUri` as a [Spotify URI] in the canonical form `spotify:{type}:{id}`,
/// where `{type}` is an arbitrary string and `{id}` is a 22-character long, base62 encoded
/// Spotify ID.
///
/// If the `SpotifyUri` has an associated type unrecognized by the library, `{type}` will
/// be encoded as `unknown`.
///
/// If the `SpotifyUri` is named, it will be returned in the form
/// `spotify:user:{user}:{type}:{id}`.
///
/// [Spotify URI]: https://developer.spotify.com/documentation/web-api/concepts/spotify-uris-ids
pub fn to_uri(&self) -> Result<String, Error> {
let item_type = self.item_type();
let name = self.to_id()?;
if let SpotifyUri::Playlist {
id,
user: Some(user),
} = self
{
Ok(format!("spotify:user:{user}:{item_type}:{id}"))
} else {
Ok(format!("spotify:{item_type}:{name}"))
}
}
/// Gets the name of this URI. The resource name is the component of the URI that identifies
/// the resource after its type label. If `self` is a named ID, the user will be omitted.
///
/// Deprecated: not all IDs can be represented in Base62, so this function has been renamed to
/// [SpotifyUri::to_id], which this implementation forwards to.
#[deprecated(since = "0.8.0", note = "use to_name instead")]
pub fn to_base62(&self) -> Result<String, Error> {
self.to_id()
}
}
impl fmt::Debug for SpotifyUri {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("SpotifyUri")
.field(&self.to_uri().unwrap_or_else(|_| "invalid uri".into()))
.finish()
}
}
impl fmt::Display for SpotifyUri {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.to_uri().unwrap_or_else(|_| "invalid uri".into()))
}
}
impl TryFrom<&protocol::metadata::Album> for SpotifyUri {
type Error = crate::Error;
fn try_from(album: &protocol::metadata::Album) -> Result<Self, Self::Error> {
Ok(Self::Album {
id: SpotifyId::from_raw(album.gid())?,
})
}
}
impl TryFrom<&protocol::metadata::Artist> for SpotifyUri {
type Error = crate::Error;
fn try_from(artist: &protocol::metadata::Artist) -> Result<Self, Self::Error> {
Ok(Self::Artist {
id: SpotifyId::from_raw(artist.gid())?,
})
}
}
impl TryFrom<&protocol::metadata::Episode> for SpotifyUri {
type Error = crate::Error;
fn try_from(episode: &protocol::metadata::Episode) -> Result<Self, Self::Error> {
Ok(Self::Episode {
id: SpotifyId::from_raw(episode.gid())?,
})
}
}
impl TryFrom<&protocol::metadata::Track> for SpotifyUri {
type Error = crate::Error;
fn try_from(track: &protocol::metadata::Track) -> Result<Self, Self::Error> {
Ok(Self::Track {
id: SpotifyId::from_raw(track.gid())?,
})
}
}
impl TryFrom<&protocol::metadata::Show> for SpotifyUri {
type Error = crate::Error;
fn try_from(show: &protocol::metadata::Show) -> Result<Self, Self::Error> {
Ok(Self::Show {
id: SpotifyId::from_raw(show.gid())?,
})
}
}
impl TryFrom<&protocol::metadata::ArtistWithRole> for SpotifyUri {
type Error = crate::Error;
fn try_from(artist: &protocol::metadata::ArtistWithRole) -> Result<Self, Self::Error> {
Ok(Self::Artist {
id: SpotifyId::from_raw(artist.artist_gid())?,
})
}
}
impl TryFrom<&protocol::playlist4_external::Item> for SpotifyUri {
type Error = crate::Error;
fn try_from(item: &protocol::playlist4_external::Item) -> Result<Self, Self::Error> {
Self::from_uri(item.uri())
}
}
// Note that this is the unique revision of an item's metadata on a playlist,
// not the ID of that item or playlist.
impl TryFrom<&protocol::playlist4_external::MetaItem> for SpotifyUri {
type Error = crate::Error;
fn try_from(item: &protocol::playlist4_external::MetaItem) -> Result<Self, Self::Error> {
Ok(Self::Unknown {
kind: "MetaItem".into(),
id: SpotifyId::try_from(item.revision())?.to_base62()?,
})
}
}
// Note that this is the unique revision of a playlist, not the ID of that playlist.
impl TryFrom<&protocol::playlist4_external::SelectedListContent> for SpotifyUri {
type Error = crate::Error;
fn try_from(
playlist: &protocol::playlist4_external::SelectedListContent,
) -> Result<Self, Self::Error> {
Ok(Self::Unknown {
kind: "SelectedListContent".into(),
id: SpotifyId::try_from(playlist.revision())?.to_base62()?,
})
}
}
// TODO: check meaning and format of this field in the wild. This might be a FileId,
// which is why we now don't create a separate `Playlist` enum value yet and choose
// to discard any item type.
impl TryFrom<&protocol::playlist_annotate3::TranscodedPicture> for SpotifyUri {
type Error = crate::Error;
fn try_from(
picture: &protocol::playlist_annotate3::TranscodedPicture,
) -> Result<Self, Self::Error> {
Ok(Self::Unknown {
kind: "TranscodedPicture".into(),
id: picture.uri().to_owned(),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
struct ConversionCase {
parsed: SpotifyUri,
uri: &'static str,
base62: &'static str,
}
static CONV_VALID: [ConversionCase; 4] = [
ConversionCase {
parsed: SpotifyUri::Track {
id: SpotifyId {
id: 238762092608182713602505436543891614649,
},
},
uri: "spotify:track:5sWHDYs0csV6RS48xBl0tH",
base62: "5sWHDYs0csV6RS48xBl0tH",
},
ConversionCase {
parsed: SpotifyUri::Track {
id: SpotifyId {
id: 204841891221366092811751085145916697048,
},
},
uri: "spotify:track:4GNcXTGWmnZ3ySrqvol3o4",
base62: "4GNcXTGWmnZ3ySrqvol3o4",
},
ConversionCase {
parsed: SpotifyUri::Episode {
id: SpotifyId {
id: 204841891221366092811751085145916697048,
},
},
uri: "spotify:episode:4GNcXTGWmnZ3ySrqvol3o4",
base62: "4GNcXTGWmnZ3ySrqvol3o4",
},
ConversionCase {
parsed: SpotifyUri::Show {
id: SpotifyId {
id: 204841891221366092811751085145916697048,
},
},
uri: "spotify:show:4GNcXTGWmnZ3ySrqvol3o4",
base62: "4GNcXTGWmnZ3ySrqvol3o4",
},
];
static CONV_INVALID: [ConversionCase; 5] = [
ConversionCase {
parsed: SpotifyUri::Track {
id: SpotifyId { id: 0 },
},
// Invalid ID in the URI.
uri: "spotify:track:5sWHDYs0Bl0tH",
base62: "!!!!!Ys0csV6RS48xBl0tH",
},
ConversionCase {
parsed: SpotifyUri::Track {
id: SpotifyId { id: 0 },
},
// Missing colon between ID and type.
uri: "spotify:arbitrarywhatever5sWHDYs0csV6RS48xBl0tH",
base62: "....................",
},
ConversionCase {
parsed: SpotifyUri::Track {
id: SpotifyId { id: 0 },
},
// Uri too short
uri: "spotify:track:aRS48xBl0tH",
// too long, should return error but not panic overflow
base62: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
ConversionCase {
parsed: SpotifyUri::Track {
id: SpotifyId { id: 0 },
},
// Uri too short
uri: "spotify:track:aRS48xBl0tH",
// too short to encode a 128 bits int
base62: "aa",
},
ConversionCase {
parsed: SpotifyUri::Track {
id: SpotifyId { id: 0 },
},
uri: "cleary invalid uri",
// too high of a value, this would need a 132 bits int
base62: "ZZZZZZZZZZZZZZZZZZZZZZ",
},
];
struct ItemTypeCase {
uri: SpotifyUri,
expected_type: &'static str,
}
static ITEM_TYPES: [ItemTypeCase; 6] = [
ItemTypeCase {
uri: SpotifyUri::Album {
id: SpotifyId { id: 0 },
},
expected_type: "album",
},
ItemTypeCase {
uri: SpotifyUri::Artist {
id: SpotifyId { id: 0 },
},
expected_type: "artist",
},
ItemTypeCase {
uri: SpotifyUri::Episode {
id: SpotifyId { id: 0 },
},
expected_type: "episode",
},
ItemTypeCase {
uri: SpotifyUri::Playlist {
user: None,
id: SpotifyId { id: 0 },
},
expected_type: "playlist",
},
ItemTypeCase {
uri: SpotifyUri::Show {
id: SpotifyId { id: 0 },
},
expected_type: "show",
},
ItemTypeCase {
uri: SpotifyUri::Track {
id: SpotifyId { id: 0 },
},
expected_type: "track",
},
];
#[test]
fn to_id() {
for c in &CONV_VALID {
assert_eq!(c.parsed.to_id().unwrap(), c.base62);
}
}
#[test]
fn item_type() {
for i in &ITEM_TYPES {
assert_eq!(i.uri.item_type(), i.expected_type);
}
// These need to use methods that can't be used in the static context like to_owned() and
// into().
let local_file = SpotifyUri::Local {
artist: "".to_owned(),
album_title: "".to_owned(),
track_title: "".to_owned(),
duration: Default::default(),
};
assert_eq!(local_file.item_type(), "local");
let unknown = SpotifyUri::Unknown {
kind: "not used".into(),
id: "".to_owned(),
};
assert_eq!(unknown.item_type(), "unknown");
}
#[test]
fn from_uri() {
for c in &CONV_VALID {
let actual = SpotifyUri::from_uri(c.uri).unwrap();
assert_eq!(actual, c.parsed);
}
for c in &CONV_INVALID {
assert!(SpotifyUri::from_uri(c.uri).is_err());
}
}
#[test]
fn from_invalid_type_uri() {
let actual =
SpotifyUri::from_uri("spotify:arbitrarywhatever:5sWHDYs0csV6RS48xBl0tH").unwrap();
assert_eq!(
actual,
SpotifyUri::Unknown {
kind: "arbitrarywhatever".into(),
id: "5sWHDYs0csV6RS48xBl0tH".to_owned()
}
)
}
#[test]
fn from_local_uri() {
let actual = SpotifyUri::from_uri("spotify:local:xyz:123").unwrap();
assert_eq!(
actual,
SpotifyUri::Local {
artist: "unimplemented".to_owned(),
album_title: "unimplemented".to_owned(),
track_title: "unimplemented".to_owned(),
duration: Default::default(),
}
);
}
#[test]
fn from_named_uri() {
let actual =
SpotifyUri::from_uri("spotify:user:spotify:playlist:37i9dQZF1DWSw8liJZcPOI").unwrap();
let SpotifyUri::Playlist { ref user, id } = actual else {
panic!("wrong id type");
};
assert_eq!(*user, Some("spotify".to_owned()));
assert_eq!(
id,
SpotifyId {
id: 136159921382084734723401526672209703396
},
);
}
#[test]
fn to_uri() {
for c in &CONV_VALID {
assert_eq!(c.parsed.to_uri().unwrap(), c.uri);
}
}
#[test]
fn to_named_uri() {
let string = "spotify:user:spotify:playlist:37i9dQZF1DWSw8liJZcPOI";
let actual =
SpotifyUri::from_uri("spotify:user:spotify:playlist:37i9dQZF1DWSw8liJZcPOI").unwrap();
assert_eq!(actual.to_uri().unwrap(), string);
}
}

View file

@ -1,143 +0,0 @@
// Ported from librespot-java. Relicensed under MIT with permission.
// Known scopes:
// ugc-image-upload, playlist-read-collaborative, playlist-modify-private,
// playlist-modify-public, playlist-read-private, user-read-playback-position,
// user-read-recently-played, user-top-read, user-modify-playback-state,
// user-read-currently-playing, user-read-playback-state, user-read-private, user-read-email,
// user-library-modify, user-library-read, user-follow-modify, user-follow-read, streaming,
// app-remote-control
use std::time::{Duration, Instant};
use serde::Deserialize;
use thiserror::Error;
use crate::Error;
component! {
TokenProvider : TokenProviderInner {
tokens: Vec<Token> = vec![],
}
}
#[derive(Debug, Error)]
pub enum TokenError {
#[error("no tokens available")]
Empty,
}
impl From<TokenError> for Error {
fn from(err: TokenError) -> Self {
Error::unavailable(err)
}
}
#[derive(Clone, Debug)]
pub struct Token {
pub access_token: String,
pub expires_in: Duration,
pub token_type: String,
pub scopes: Vec<String>,
pub timestamp: Instant,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct TokenData {
access_token: String,
expires_in: u64,
token_type: String,
scope: Vec<String>,
}
impl TokenProvider {
fn find_token(&self, scopes: Vec<&str>) -> Option<usize> {
self.lock(|inner| {
(0..inner.tokens.len()).find(|&i| inner.tokens[i].in_scopes(scopes.clone()))
})
}
// Not all combinations of scopes and client ID are allowed.
// Depending on the client ID currently used, the function may return an error for specific scopes.
// In this case get_token_with_client_id() can be used, where an appropriate client ID can be provided.
// scopes must be comma-separated
pub async fn get_token(&self, scopes: &str) -> Result<Token, Error> {
let client_id = self.session().client_id();
self.get_token_with_client_id(scopes, &client_id).await
}
pub async fn get_token_with_client_id(
&self,
scopes: &str,
client_id: &str,
) -> Result<Token, Error> {
if client_id.is_empty() {
return Err(Error::invalid_argument("Client ID cannot be empty"));
}
if let Some(index) = self.find_token(scopes.split(',').collect()) {
let cached_token = self.lock(|inner| inner.tokens[index].clone());
if cached_token.is_expired() {
self.lock(|inner| inner.tokens.remove(index));
} else {
return Ok(cached_token);
}
}
trace!(
"Requested token in scopes {scopes:?} unavailable or expired, requesting new token."
);
let query_uri = format!(
"hm://keymaster/token/authenticated?scope={}&client_id={}&device_id={}",
scopes,
client_id,
self.session().device_id(),
);
let request = self.session().mercury().get(query_uri)?;
let response = request.await?;
let data = response.payload.first().ok_or(TokenError::Empty)?.to_vec();
let token = Token::from_json(String::from_utf8(data)?)?;
trace!("Got token: {token:#?}");
self.lock(|inner| inner.tokens.push(token.clone()));
Ok(token)
}
}
impl Token {
const EXPIRY_THRESHOLD: Duration = Duration::from_secs(10);
pub fn from_json(body: String) -> Result<Self, Error> {
let data: TokenData = serde_json::from_slice(body.as_ref())?;
Ok(Self {
access_token: data.access_token,
expires_in: Duration::from_secs(data.expires_in),
token_type: data.token_type,
scopes: data.scope,
timestamp: Instant::now(),
})
}
pub fn is_expired(&self) -> bool {
self.timestamp + (self.expires_in.saturating_sub(Self::EXPIRY_THRESHOLD)) < Instant::now()
}
pub fn in_scope(&self, scope: &str) -> bool {
for s in &self.scopes {
if *s == scope {
return true;
}
}
false
}
pub fn in_scopes(&self, scopes: Vec<&str>) -> bool {
for s in scopes {
if !self.in_scope(s) {
return false;
}
}
true
}
}

View file

@ -1,102 +1,4 @@
use crate::Error;
use byteorder::{BigEndian, ByteOrder};
use futures_core::ready;
use futures_util::{FutureExt, Sink, SinkExt, future};
use hmac::digest::Digest;
use sha1::Sha1;
use std::time::{Duration, Instant};
use std::{
future::Future,
mem,
pin::Pin,
task::{Context, Poll},
};
use tokio::{task::JoinHandle, time::timeout};
/// Returns a future that will flush the sink, even if flushing is temporarily completed.
/// Finishes only if the sink throws an error.
pub(crate) fn keep_flushing<'a, T, S: Sink<T> + Unpin + 'a>(
mut s: S,
) -> impl Future<Output = S::Error> + 'a {
future::poll_fn(move |cx| match s.poll_flush_unpin(cx) {
Poll::Ready(Err(e)) => Poll::Ready(e),
_ => Poll::Pending,
})
}
pub struct CancelOnDrop<T>(pub JoinHandle<T>);
impl<T> Future for CancelOnDrop<T> {
type Output = <JoinHandle<T> as Future>::Output;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.0.poll_unpin(cx)
}
}
impl<T> Drop for CancelOnDrop<T> {
fn drop(&mut self) {
self.0.abort();
}
}
pub struct TimeoutOnDrop<T: Send + 'static> {
handle: Option<JoinHandle<T>>,
timeout: tokio::time::Duration,
}
impl<T: Send + 'static> TimeoutOnDrop<T> {
pub fn new(handle: JoinHandle<T>, timeout: tokio::time::Duration) -> Self {
Self {
handle: Some(handle),
timeout,
}
}
pub fn take(&mut self) -> Option<JoinHandle<T>> {
self.handle.take()
}
}
impl<T: Send + 'static> Future for TimeoutOnDrop<T> {
type Output = <JoinHandle<T> as Future>::Output;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let r = ready!(
self.handle
.as_mut()
.expect("Polled after ready")
.poll_unpin(cx)
);
self.handle = None;
Poll::Ready(r)
}
}
impl<T: Send + 'static> Drop for TimeoutOnDrop<T> {
fn drop(&mut self) {
let mut handle = if let Some(handle) = self.handle.take() {
handle
} else {
return;
};
if (&mut handle).now_or_never().is_some() {
// Already finished
return;
}
match tokio::runtime::Handle::try_current() {
Ok(h) => {
h.spawn(timeout(self.timeout, CancelOnDrop(handle)));
}
Err(_) => {
// Not in tokio context, can't spawn
handle.abort();
}
}
}
}
use std::mem;
pub trait Seq {
fn next(&self) -> Self;
@ -125,51 +27,3 @@ impl<T: Seq> SeqGenerator<T> {
mem::replace(&mut self.0, value)
}
}
pub fn solve_hash_cash(
ctx: &[u8],
prefix: &[u8],
length: i32,
dst: &mut [u8],
) -> Result<Duration, Error> {
// after a certain number of seconds, the challenge expires
const TIMEOUT: u64 = 5; // seconds
let now = Instant::now();
let md = Sha1::digest(ctx);
let mut counter: i64 = 0;
let target: i64 = BigEndian::read_i64(&md[12..20]);
let suffix = loop {
if now.elapsed().as_secs() >= TIMEOUT {
return Err(Error::deadline_exceeded(format!(
"{TIMEOUT} seconds expired"
)));
}
let suffix = [(target + counter).to_be_bytes(), counter.to_be_bytes()].concat();
let mut hasher = Sha1::new();
hasher.update(prefix);
hasher.update(&suffix);
let md = hasher.finalize();
if BigEndian::read_i64(&md[12..20]).trailing_zeros() >= (length as u32) {
break suffix;
}
counter += 1;
};
dst.copy_from_slice(&suffix);
Ok(now.elapsed())
}
pub fn get_next_query_separator(url: &str) -> &'static str {
match url.find('?') {
Some(_) => "&",
None => "?",
}
}

View file

@ -1,53 +1,17 @@
/// Version string of the form "librespot-\<sha\>"
pub const VERSION_STRING: &str = concat!("librespot-", env!("VERGEN_GIT_SHA"));
/// Version string of the form "librespot-<sha>"
pub const VERSION_STRING: &str = concat!("librespot-", env!("VERGEN_SHA_SHORT"));
/// Generate a timestamp string representing the build date (UTC).
pub const BUILD_DATE: &str = env!("VERGEN_BUILD_DATE");
/// Short sha of the latest git commit.
pub const SHA_SHORT: &str = env!("VERGEN_GIT_SHA");
pub const SHA_SHORT: &str = env!("VERGEN_SHA_SHORT");
/// Date of the latest git commit.
pub const COMMIT_DATE: &str = env!("VERGEN_GIT_COMMIT_DATE");
pub const COMMIT_DATE: &str = env!("VERGEN_COMMIT_DATE");
/// Librespot crate version.
pub const SEMVER: &str = env!("CARGO_PKG_VERSION");
/// A random build id.
pub const BUILD_ID: &str = env!("LIBRESPOT_BUILD_ID");
/// The protocol version of the Spotify desktop client.
pub const SPOTIFY_VERSION: u64 = 124200290;
/// The semantic version of the Spotify desktop client.
pub const SPOTIFY_SEMANTIC_VERSION: &str = "1.2.52.442";
/// `property_set_id` related to desktop version 1.2.52.442
pub const SPOTIFY_PROPERTY_SET_ID: &str = "b4c7e4b5835079ed94391b2e65fca0fdba65eb50";
/// The protocol version of the Spotify mobile app.
pub const SPOTIFY_MOBILE_VERSION: &str = "8.9.82.620";
/// `property_set_id` related to mobile version 8.9.82.620
pub const SPOTIFY_MOBILE_PROPERTY_SET_ID: &str =
"5ec87c2cc32e7c509703582cfaaa3c7ad253129d5701127c1f5eab5c9531736c";
/// The general spirc version
pub const SPOTIFY_SPIRC_VERSION: &str = "3.2.6";
/// The user agent to fall back to, if one could not be determined dynamically.
pub const FALLBACK_USER_AGENT: &str = "Spotify/124200290 Linux/0 (librespot)";
pub fn spotify_version() -> String {
match crate::config::OS {
"android" | "ios" => SPOTIFY_MOBILE_VERSION.to_owned(),
_ => SPOTIFY_VERSION.to_string(),
}
}
pub fn spotify_semantic_version() -> String {
match crate::config::OS {
"android" | "ios" => SPOTIFY_MOBILE_VERSION.to_owned(),
_ => SPOTIFY_SEMANTIC_VERSION.to_string(),
}
}

View file

@ -1,19 +1,25 @@
use std::time::Duration;
use tokio::time::timeout;
use librespot_core::authentication::Credentials;
use librespot_core::config::SessionConfig;
use librespot_core::session::Session;
use librespot_core::{authentication::Credentials, config::SessionConfig, session::Session};
use tokio::time::timeout;
#[tokio::test]
async fn test_connection() {
timeout(Duration::from_secs(30), async {
let result = Session::new(SessionConfig::default(), None)
.connect(Credentials::with_password("test", "test"), false)
.await;
let result = Session::connect(
SessionConfig::default(),
Credentials::with_password("test", "test"),
None,
false,
)
.await;
match result {
Ok(_) => panic!("Authentication succeeded despite of bad credentials."),
Err(e) => assert!(!e.to_string().is_empty()), // there should be some error message
Err(e) => assert_eq!(e.to_string(), "Login failed with reason: Bad credentials"),
}
})
.await

View file

@ -1,62 +1,39 @@
[package]
name = "librespot-discovery"
version.workspace = true
rust-version.workspace = true
version = "0.4.2"
authors = ["Paul Lietar <paul@lietar.net>"]
license.workspace = true
description = "The discovery logic for librespot"
repository.workspace = true
edition.workspace = true
[features]
# Refer to the workspace Cargo.toml for the list of features
default = ["with-libmdns", "native-tls"]
# Discovery backends
with-avahi = ["dep:serde", "dep:zbus"]
with-dns-sd = ["dep:dns-sd"]
with-libmdns = ["dep:libmdns"]
# TLS backend propagation
native-tls = ["librespot-core/native-tls"]
rustls-tls-native-roots = ["librespot-core/rustls-tls-native-roots"]
rustls-tls-webpki-roots = ["librespot-core/rustls-tls-webpki-roots"]
license = "MIT"
repository = "https://github.com/librespot-org/librespot"
edition = "2018"
[dependencies]
librespot-core = { version = "0.7.1", path = "../core", default-features = false }
aes = "0.8"
base64 = "0.22"
bytes = "1"
ctr = "0.9"
dns-sd = { version = "0.1", optional = true }
form_urlencoded = "1.2"
aes-ctr = "0.6"
base64 = "0.13"
form_urlencoded = "1.0"
futures-core = "0.3"
futures-util = { version = "0.3", default-features = false, features = ["std"] }
hmac = "0.12"
http-body-util = "0.1"
hyper = { version = "1.6", features = ["http1"] }
hyper-util = { version = "0.1", features = [
"server-auto",
"server-graceful",
"service",
] }
libmdns = { version = "0.10", optional = true }
hmac = "0.11"
hyper = { version = "0.14", features = ["server", "http1", "tcp"] }
libmdns = "0.7"
log = "0.4"
rand = { version = "0.9", default-features = false, features = ["thread_rng"] }
serde = { version = "1", default-features = false, features = [
"derive",
], optional = true }
serde_repr = "0.1"
serde_json = "1.0"
sha1 = "0.10"
thiserror = "2"
tokio = { version = "1", features = ["sync", "rt"] }
zbus = { version = "5", default-features = false, features = [
"tokio",
], optional = true }
rand = "0.8"
serde_json = "1.0.25"
sha-1 = "0.9"
thiserror = "1.0"
tokio = { version = "1.0", features = ["sync", "rt"] }
dns-sd = { version = "0.1.3", optional = true }
[dependencies.librespot-core]
path = "../core"
default_features = false
version = "0.4.2"
[dev-dependencies]
futures = "0.3"
hex = "0.4"
tokio = { version = "1", features = ["macros", "rt"] }
simple_logger = "2.1"
tokio = { version = "1.0", features = ["macros", "rt"] }
[features]
with-dns-sd = ["dns-sd"]

View file

@ -1,21 +1,25 @@
use futures::StreamExt;
use librespot_core::SessionConfig;
use librespot_discovery::DeviceType;
use sha1::{Digest, Sha1};
use simple_logger::SimpleLogger;
#[tokio::main(flavor = "current_thread")]
async fn main() {
SimpleLogger::new()
.with_level(log::LevelFilter::Debug)
.init()
.unwrap();
let name = "Librespot";
let device_id = hex::encode(Sha1::digest(name.as_bytes()));
let mut server =
librespot_discovery::Discovery::builder(device_id, SessionConfig::default().client_id)
.name(name)
.device_type(DeviceType::Computer)
.launch()
.unwrap();
let mut server = librespot_discovery::Discovery::builder(device_id)
.name(name)
.device_type(DeviceType::Computer)
.launch()
.unwrap();
while let Some(x) = server.next().await {
println!("Received {x:?}");
println!("Received {:?}", x);
}
}

View file

@ -1,22 +0,0 @@
use futures::StreamExt;
use librespot_core::SessionConfig;
use librespot_discovery::DeviceType;
use sha1::{Digest, Sha1};
#[tokio::main(flavor = "current_thread")]
async fn main() {
let name = "Librespot Group";
let device_id = hex::encode(Sha1::digest(name.as_bytes()));
let mut server =
librespot_discovery::Discovery::builder(device_id, SessionConfig::default().client_id)
.name(name)
.device_type(DeviceType::Speaker)
.is_group(true)
.launch()
.unwrap();
while let Some(x) = server.next().await {
println!("Received {x:?}");
}
}

View file

@ -1,149 +0,0 @@
#![cfg(feature = "with-avahi")]
#[allow(unused)]
pub use server::ServerProxy;
#[allow(unused)]
pub use entry_group::{
EntryGroupProxy, EntryGroupState, StateChangedStream as EntryGroupStateChangedStream,
};
mod server {
// This is not the full interface, just the methods we need!
// Avahi also implements a newer version of the interface ("org.freedesktop.Avahi.Server2"), but
// the additions are not relevant for us, and the older version is not intended to be deprecated.
// cf. the release notes for 0.8 at https://github.com/avahi/avahi/blob/master/docs/NEWS
#[zbus::proxy(
interface = "org.freedesktop.Avahi.Server",
default_service = "org.freedesktop.Avahi",
default_path = "/",
gen_blocking = false
)]
pub trait Server {
/// EntryGroupNew method
#[zbus(object = "super::entry_group::EntryGroup")]
fn entry_group_new(&self);
/// GetState method
fn get_state(&self) -> zbus::Result<i32>;
/// StateChanged signal
#[zbus(signal)]
fn state_changed(&self, state: i32, error: &str) -> zbus::Result<()>;
}
}
mod entry_group {
use serde_repr::Deserialize_repr;
use zbus::zvariant;
#[derive(Clone, Copy, Debug, Deserialize_repr)]
#[repr(i32)]
pub enum EntryGroupState {
// The group has not yet been committed, the user must still call avahi_entry_group_commit()
Uncommited = 0,
// The entries of the group are currently being registered
Registering = 1,
// The entries have successfully been established
Established = 2,
// A name collision for one of the entries in the group has been detected, the entries have been withdrawn
Collision = 3,
// Some kind of failure happened, the entries have been withdrawn
Failure = 4,
}
impl zvariant::Type for EntryGroupState {
const SIGNATURE: &'static zvariant::Signature = &zvariant::Signature::I32;
}
#[zbus::proxy(
interface = "org.freedesktop.Avahi.EntryGroup",
default_service = "org.freedesktop.Avahi",
gen_blocking = false
)]
pub trait EntryGroup {
/// AddAddress method
fn add_address(
&self,
interface: i32,
protocol: i32,
flags: u32,
name: &str,
address: &str,
) -> zbus::Result<()>;
/// AddRecord method
#[allow(clippy::too_many_arguments)]
fn add_record(
&self,
interface: i32,
protocol: i32,
flags: u32,
name: &str,
clazz: u16,
type_: u16,
ttl: u32,
rdata: &[u8],
) -> zbus::Result<()>;
/// AddService method
#[allow(clippy::too_many_arguments)]
fn add_service(
&self,
interface: i32,
protocol: i32,
flags: u32,
name: &str,
type_: &str,
domain: &str,
host: &str,
port: u16,
txt: &[&[u8]],
) -> zbus::Result<()>;
/// AddServiceSubtype method
#[allow(clippy::too_many_arguments)]
fn add_service_subtype(
&self,
interface: i32,
protocol: i32,
flags: u32,
name: &str,
type_: &str,
domain: &str,
subtype: &str,
) -> zbus::Result<()>;
/// Commit method
fn commit(&self) -> zbus::Result<()>;
/// Free method
fn free(&self) -> zbus::Result<()>;
/// GetState method
fn get_state(&self) -> zbus::Result<EntryGroupState>;
/// IsEmpty method
fn is_empty(&self) -> zbus::Result<bool>;
/// Reset method
fn reset(&self) -> zbus::Result<()>;
/// UpdateServiceTxt method
#[allow(clippy::too_many_arguments)]
fn update_service_txt(
&self,
interface: i32,
protocol: i32,
flags: u32,
name: &str,
type_: &str,
domain: &str,
txt: &[&[u8]],
) -> zbus::Result<()>;
/// StateChanged signal
#[zbus(signal)]
fn state_changed(&self, state: EntryGroupState, error: &str) -> zbus::Result<()>;
}
}

View file

@ -7,111 +7,27 @@
//! This library uses mDNS and DNS-SD so that other devices can find it,
//! and spawns an http server to answer requests of Spotify clients.
mod avahi;
#![warn(clippy::all, missing_docs, rust_2018_idioms)]
mod server;
use std::{
borrow::Cow,
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
use std::borrow::Cow;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_core::Stream;
use librespot_core as core;
use thiserror::Error;
use tokio::sync::{mpsc, oneshot};
use self::server::DiscoveryServer;
pub use crate::core::Error;
use librespot_core as core;
/// Credentials to be used in [`librespot`](`librespot_core`).
pub use crate::core::authentication::Credentials;
/// Determining the icon in the list of available devices.
pub use crate::core::config::DeviceType;
pub enum DiscoveryEvent {
Credentials(Credentials),
ServerError(DiscoveryError),
ZeroconfError(DiscoveryError),
}
enum ZeroconfCmd {
Shutdown,
}
pub struct DnsSdHandle {
task_handle: tokio::task::JoinHandle<()>,
shutdown_tx: oneshot::Sender<ZeroconfCmd>,
}
impl DnsSdHandle {
async fn shutdown(self) {
log::debug!("Shutting down zeroconf responder");
let Self {
task_handle,
shutdown_tx,
} = self;
if shutdown_tx.send(ZeroconfCmd::Shutdown).is_err() {
log::warn!("Zeroconf responder unexpectedly disappeared");
} else {
let _ = task_handle.await;
log::debug!("Zeroconf responder stopped");
}
}
}
pub type DnsSdServiceBuilder = fn(
Cow<'static, str>,
Vec<std::net::IpAddr>,
u16,
mpsc::UnboundedSender<DiscoveryEvent>,
) -> Result<DnsSdHandle, Error>;
// Default goes first: This matches the behaviour when feature flags were exlusive, i.e. when there
// was only `feature = "with-dns-sd"` or `not(feature = "with-dns-sd")`
pub const BACKENDS: &[(
&str,
// If None, the backend is known but wasn't compiled.
Option<DnsSdServiceBuilder>,
)] = &[
#[cfg(feature = "with-avahi")]
("avahi", Some(launch_avahi)),
#[cfg(not(feature = "with-avahi"))]
("avahi", None),
#[cfg(feature = "with-dns-sd")]
("dns-sd", Some(launch_dns_sd)),
#[cfg(not(feature = "with-dns-sd"))]
("dns-sd", None),
#[cfg(feature = "with-libmdns")]
("libmdns", Some(launch_libmdns)),
#[cfg(not(feature = "with-libmdns"))]
("libmdns", None),
];
pub fn find(name: Option<&str>) -> Result<DnsSdServiceBuilder, Error> {
if let Some(ref name) = name {
match BACKENDS.iter().find(|(id, _)| name == id) {
Some((_id, Some(launch_svc))) => Ok(*launch_svc),
Some((_id, None)) => Err(Error::unavailable(format!(
"librespot built without '{name}' support"
))),
None => Err(Error::not_found(format!(
"unknown zeroconf backend '{name}'"
))),
}
} else {
BACKENDS
.iter()
.find_map(|(_, launch_svc)| *launch_svc)
.ok_or(Error::unavailable(
"librespot built without zeroconf backends",
))
}
}
/// Makes this device visible to Spotify clients in the local network.
///
/// `Discovery` implements the [`Stream`] trait. Every time this device
@ -119,328 +35,39 @@ pub fn find(name: Option<&str>) -> Result<DnsSdServiceBuilder, Error> {
pub struct Discovery {
server: DiscoveryServer,
/// An opaque handle to the DNS-SD service. Dropping this will unregister the service.
#[allow(unused)]
svc: DnsSdHandle,
event_rx: mpsc::UnboundedReceiver<DiscoveryEvent>,
#[cfg(not(feature = "with-dns-sd"))]
_svc: libmdns::Service,
#[cfg(feature = "with-dns-sd")]
_svc: dns_sd::DNSService,
}
/// A builder for [`Discovery`].
pub struct Builder {
server_config: server::Config,
port: u16,
zeroconf_ip: Vec<std::net::IpAddr>,
zeroconf_backend: Option<DnsSdServiceBuilder>,
}
/// Errors that can occur while setting up a [`Discovery`] instance.
#[derive(Debug, Error)]
pub enum DiscoveryError {
#[error("Creating SHA1 block cipher failed")]
AesError(#[from] aes::cipher::InvalidLength),
pub enum Error {
/// Setting up service discovery via DNS-SD failed.
#[error("Setting up dns-sd failed: {0}")]
DnsSdError(#[source] Box<dyn StdError + Send + Sync>),
#[error("Creating SHA1 HMAC failed for base key {0:?}")]
HmacError(Vec<u8>),
#[error("Setting up the HTTP server failed: {0}")]
DnsSdError(#[from] io::Error),
/// Setting up the http server failed.
#[error("Setting up the http server failed: {0}")]
HttpServerError(#[from] hyper::Error),
#[error("Missing params for key {0}")]
ParamsError(&'static str),
}
#[cfg(feature = "with-avahi")]
impl From<zbus::Error> for DiscoveryError {
fn from(error: zbus::Error) -> Self {
Self::DnsSdError(Box::new(error))
}
}
impl From<DiscoveryError> for Error {
fn from(err: DiscoveryError) -> Self {
match err {
DiscoveryError::AesError(_) => Error::unavailable(err),
DiscoveryError::DnsSdError(_) => Error::unavailable(err),
DiscoveryError::HmacError(_) => Error::invalid_argument(err),
DiscoveryError::HttpServerError(_) => Error::unavailable(err),
DiscoveryError::ParamsError(_) => Error::invalid_argument(err),
}
}
}
#[allow(unused)]
const DNS_SD_SERVICE_NAME: &str = "_spotify-connect._tcp";
#[allow(unused)]
const TXT_RECORD: [&str; 2] = ["VERSION=1.0", "CPath=/"];
#[cfg(feature = "with-avahi")]
async fn avahi_task(
name: Cow<'static, str>,
port: u16,
entry_group: &mut Option<avahi::EntryGroupProxy<'_>>,
) -> Result<(), DiscoveryError> {
use self::avahi::{EntryGroupState, ServerProxy};
use futures_util::StreamExt;
let conn = zbus::Connection::system().await?;
// Wait for the daemon to show up.
// On error: Failed to listen for NameOwnerChanged signal => Fatal DBus issue
let bus = zbus::fdo::DBusProxy::new(&conn).await?;
let mut stream = bus
.receive_name_owner_changed_with_args(&[(0, "org.freedesktop.Avahi")])
.await?;
loop {
// Wait for Avahi daemon to be started
'wait_avahi: {
while let Poll::Ready(Some(_)) = futures_util::poll!(stream.next()) {
// Drain queued name owner changes, since we're going to connect in a second
}
// Ping after we connected to the signal since it might have shown up in the meantime
if let Ok(avahi_peer) =
zbus::fdo::PeerProxy::new(&conn, "org.freedesktop.Avahi", "/").await
{
if avahi_peer.ping().await.is_ok() {
log::debug!("Pinged Avahi: Available");
break 'wait_avahi;
}
}
log::warn!(
"Failed to connect to Avahi, zeroconf discovery will not work until avahi-daemon is started. Check that it is installed and running"
);
// If it didn't, wait for the signal
match stream.next().await {
Some(_signal) => {
log::debug!("Avahi appeared");
break 'wait_avahi;
}
// The stream ended, but this should never happen
None => {
return Err(zbus::Error::Failure("DBus disappeared".to_owned()).into());
}
}
}
// Connect to Avahi and publish the service
let avahi_server = ServerProxy::new(&conn).await?;
log::trace!("Connected to Avahi");
*entry_group = Some(avahi_server.entry_group_new().await?);
let mut entry_group_state_stream = entry_group
.as_mut()
.unwrap()
.receive_state_changed()
.await?;
entry_group
.as_mut()
.unwrap()
.add_service(
-1, // AVAHI_IF_UNSPEC
-1, // IPv4 and IPv6
0, // flags
&name,
DNS_SD_SERVICE_NAME, // type
"", // domain: let the server choose
"", // host: let the server choose
port,
&TXT_RECORD.map(|s| s.as_bytes()),
)
.await?;
entry_group.as_mut().unwrap().commit().await?;
log::debug!("Commited zeroconf service with name {}", &name);
'monitor_service: loop {
tokio::select! {
Some(state_changed) = entry_group_state_stream.next() => {
let (state, error) = match state_changed.args() {
Ok(sc) => (sc.state, sc.error),
Err(e) => {
log::warn!("Error on receiving EntryGroup state from Avahi: {}", e);
continue 'monitor_service;
}
};
match state {
EntryGroupState::Uncommited | EntryGroupState::Registering => {
// Not yet registered, ignore.
}
EntryGroupState::Established => {
log::info!("Published zeroconf service");
}
EntryGroupState::Collision => {
// This most likely means that librespot has unintentionally been started twice.
// Thus, don't retry with a new name, but abort.
//
// Note that the error would usually already be returned by
// entry_group.add_service above, so this state_changed handler
// won't be hit.
//
// EntryGroup has been withdrawn at this point already!
log::error!("zeroconf collision for name '{}'", &name);
return Err(zbus::Error::Failure(format!("zeroconf collision for name: {name}")).into());
}
EntryGroupState::Failure => {
// TODO: Back off/treat as fatal?
// EntryGroup has been withdrawn at this point already!
// There seems to be no code in Avahi that actually sets this state.
log::error!("zeroconf failure: {}", error);
return Err(zbus::Error::Failure(format!("zeroconf failure: {error}")).into());
}
}
}
_name_owner_change = stream.next() => {
break 'monitor_service;
}
}
}
// Avahi disappeared (or the service was immediately taken over by a
// new daemon) => drop all handles, and reconnect
log::info!("Avahi disappeared, trying to reconnect");
}
}
#[cfg(feature = "with-avahi")]
fn launch_avahi(
name: Cow<'static, str>,
_zeroconf_ip: Vec<std::net::IpAddr>,
port: u16,
status_tx: mpsc::UnboundedSender<DiscoveryEvent>,
) -> Result<DnsSdHandle, Error> {
let (shutdown_tx, shutdown_rx) = oneshot::channel();
let task_handle = tokio::spawn(async move {
let mut entry_group = None;
tokio::select! {
res = avahi_task(name, port, &mut entry_group) => {
if let Err(e) = res {
log::error!("Avahi error: {}", e);
let _ = status_tx.send(DiscoveryEvent::ZeroconfError(e));
}
},
_ = shutdown_rx => {
if let Some(entry_group) = entry_group.as_mut() {
if let Err(e) = entry_group.free().await {
log::warn!("Failed to un-publish zeroconf service: {}", e);
} else {
log::debug!("Un-published zeroconf service");
}
}
},
}
});
Ok(DnsSdHandle {
task_handle,
shutdown_tx,
})
}
#[cfg(feature = "with-dns-sd")]
fn launch_dns_sd(
name: Cow<'static, str>,
_zeroconf_ip: Vec<std::net::IpAddr>,
port: u16,
status_tx: mpsc::UnboundedSender<DiscoveryEvent>,
) -> Result<DnsSdHandle, Error> {
let (shutdown_tx, shutdown_rx) = oneshot::channel();
let task_handle = tokio::task::spawn_blocking(move || {
let inner = move || -> Result<(), DiscoveryError> {
let svc = dns_sd::DNSService::register(
Some(name.as_ref()),
DNS_SD_SERVICE_NAME,
None,
None,
port,
&TXT_RECORD,
)
.map_err(|e| DiscoveryError::DnsSdError(Box::new(e)))?;
let _ = shutdown_rx.blocking_recv();
std::mem::drop(svc);
Ok(())
};
if let Err(e) = inner() {
log::error!("dns_sd error: {}", e);
let _ = status_tx.send(DiscoveryEvent::ZeroconfError(e));
}
});
Ok(DnsSdHandle {
shutdown_tx,
task_handle,
})
}
#[cfg(feature = "with-libmdns")]
fn launch_libmdns(
name: Cow<'static, str>,
zeroconf_ip: Vec<std::net::IpAddr>,
port: u16,
status_tx: mpsc::UnboundedSender<DiscoveryEvent>,
) -> Result<DnsSdHandle, Error> {
let (shutdown_tx, shutdown_rx) = oneshot::channel();
let task_handle = tokio::task::spawn_blocking(move || {
let inner = move || -> Result<(), DiscoveryError> {
let responder = if !zeroconf_ip.is_empty() {
libmdns::Responder::spawn_with_ip_list(
&tokio::runtime::Handle::current(),
zeroconf_ip,
)
} else {
libmdns::Responder::spawn(&tokio::runtime::Handle::current())
}
.map_err(|e| DiscoveryError::DnsSdError(Box::new(e)))?;
let svc = responder.register(&DNS_SD_SERVICE_NAME, &name, port, &TXT_RECORD);
let _ = shutdown_rx.blocking_recv();
std::mem::drop(svc);
Ok(())
};
if let Err(e) = inner() {
log::error!("libmdns error: {e}");
let _ = status_tx.send(DiscoveryEvent::ZeroconfError(e));
}
});
Ok(DnsSdHandle {
shutdown_tx,
task_handle,
})
}
impl Builder {
/// Starts a new builder using the provided device and client IDs.
pub fn new<T: Into<String>>(device_id: T, client_id: T) -> Self {
/// Starts a new builder using the provided device id.
pub fn new(device_id: impl Into<String>) -> Self {
Self {
server_config: server::Config {
name: "Librespot".into(),
device_type: DeviceType::default(),
is_group: false,
device_id: device_id.into(),
client_id: client_id.into(),
},
port: 0,
zeroconf_ip: vec![],
zeroconf_backend: None,
}
}
@ -456,24 +83,6 @@ impl Builder {
self
}
/// Sets whether the device is a group. This affects the icon in Spotify clients. Default is `false`.
pub fn is_group(mut self, is_group: bool) -> Self {
self.server_config.is_group = is_group;
self
}
/// Set the ip addresses on which it should listen to incoming connections. The default is all interfaces.
pub fn zeroconf_ip(mut self, zeroconf_ip: Vec<std::net::IpAddr>) -> Self {
self.zeroconf_ip = zeroconf_ip;
self
}
/// Set the zeroconf (MDNS and DNS-SD) implementation to use.
pub fn zeroconf_backend(mut self, zeroconf_backend: DnsSdServiceBuilder) -> Self {
self.zeroconf_backend = Some(zeroconf_backend);
self
}
/// Sets the port on which it should listen to incoming connections.
/// The default value `0` means any port.
pub fn port(mut self, port: u16) -> Self {
@ -486,37 +95,42 @@ impl Builder {
/// # Errors
/// If setting up the mdns service or creating the server fails, this function returns an error.
pub fn launch(self) -> Result<Discovery, Error> {
let name = self.server_config.name.clone();
let zeroconf_ip = self.zeroconf_ip;
let (event_tx, event_rx) = mpsc::unbounded_channel();
let mut port = self.port;
let server = DiscoveryServer::new(self.server_config, &mut port, event_tx.clone())?;
let name = self.server_config.name.clone().into_owned();
let server = DiscoveryServer::new(self.server_config, &mut port)?;
let launch_svc = self.zeroconf_backend.unwrap_or(find(None)?);
let svc = launch_svc(name, zeroconf_ip, port, event_tx)?;
Ok(Discovery {
server,
svc,
event_rx,
})
#[cfg(feature = "with-dns-sd")]
let svc = dns_sd::DNSService::register(
Some(name.as_ref()),
"_spotify-connect._tcp",
None,
None,
port,
&["VERSION=1.0", "CPath=/"],
)
.map_err(|e| Error::DnsSdError(io::Error::new(io::ErrorKind::Unsupported, e)))?;
#[cfg(not(feature = "with-dns-sd"))]
let svc = libmdns::Responder::spawn(&tokio::runtime::Handle::current())?.register(
"_spotify-connect._tcp".to_owned(),
name,
port,
&["VERSION=1.0", "CPath=/"],
);
Ok(Discovery { server, _svc: svc })
}
}
impl Discovery {
/// Starts a [`Builder`] with the provided device id.
pub fn builder<T: Into<String>>(device_id: T, client_id: T) -> Builder {
Builder::new(device_id, client_id)
pub fn builder(device_id: impl Into<String>) -> Builder {
Builder::new(device_id)
}
/// Create a new instance with the specified device id and default paramaters.
pub fn new<T: Into<String>>(device_id: T, client_id: T) -> Result<Self, Error> {
Self::builder(device_id, client_id).launch()
}
pub async fn shutdown(self) {
tokio::join!(self.server.shutdown(), self.svc.shutdown(),);
pub fn new(device_id: impl Into<String>) -> Result<Self, Error> {
Self::builder(device_id).launch()
}
}
@ -524,15 +138,6 @@ impl Stream for Discovery {
type Item = Credentials;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match Pin::new(&mut self.event_rx).poll_recv(cx) {
// Yields credentials
Poll::Ready(Some(DiscoveryEvent::Credentials(creds))) => Poll::Ready(Some(creds)),
// Also terminate the stream on fatal server or MDNS/DNS-SD errors.
Poll::Ready(Some(
DiscoveryEvent::ServerError(_) | DiscoveryEvent::ZeroconfError(_),
)) => Poll::Ready(None),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
Pin::new(&mut self.server).poll_next(cx)
}
}

View file

@ -1,33 +1,26 @@
use std::{
borrow::Cow,
collections::BTreeMap,
net::{Ipv4Addr, Ipv6Addr, SocketAddr, TcpListener},
sync::{Arc, Mutex},
};
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::convert::Infallible;
use std::net::{Ipv4Addr, SocketAddr};
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use aes::cipher::{KeyIvInit, StreamCipher};
use base64::engine::Engine as _;
use base64::engine::general_purpose::STANDARD as BASE64;
use bytes::Bytes;
use futures_util::{FutureExt, TryFutureExt};
use hmac::{Hmac, Mac};
use http_body_util::{BodyExt, Full};
use hyper::{Method, Request, Response, StatusCode, body::Incoming};
use hyper_util::{rt::TokioIo, server::graceful::GracefulShutdown};
use log::{debug, error, warn};
use aes_ctr::cipher::generic_array::GenericArray;
use aes_ctr::cipher::{NewStreamCipher, SyncStreamCipher};
use aes_ctr::Aes128Ctr;
use futures_core::Stream;
use hmac::{Hmac, Mac, NewMac};
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, StatusCode};
use log::{debug, warn};
use serde_json::json;
use sha1::{Digest, Sha1};
use tokio::sync::{mpsc, oneshot};
use super::{DiscoveryError, DiscoveryEvent};
use crate::{
core::config::DeviceType,
core::{Error, authentication::Credentials, diffie_hellman::DhLocalKeys},
};
type Aes128Ctr = ctr::Ctr128BE<aes::Aes128>;
use crate::core::authentication::Credentials;
use crate::core::config::DeviceType;
use crate::core::diffie_hellman::DhLocalKeys;
type Params<'a> = BTreeMap<Cow<'a, str>, Cow<'a, str>>;
@ -35,142 +28,90 @@ pub struct Config {
pub name: Cow<'static, str>,
pub device_type: DeviceType,
pub device_id: String,
pub is_group: bool,
pub client_id: String,
}
struct RequestHandler {
config: Config,
username: Mutex<Option<String>>,
keys: DhLocalKeys,
event_tx: mpsc::UnboundedSender<DiscoveryEvent>,
tx: mpsc::UnboundedSender<Credentials>,
}
impl RequestHandler {
fn new(config: Config, event_tx: mpsc::UnboundedSender<DiscoveryEvent>) -> Self {
Self {
fn new(config: Config) -> (Self, mpsc::UnboundedReceiver<Credentials>) {
let (tx, rx) = mpsc::unbounded_channel();
let discovery = Self {
config,
username: Mutex::new(None),
keys: DhLocalKeys::random(&mut rand::rng()),
event_tx,
}
}
fn active_user(&self) -> String {
if let Ok(maybe_username) = self.username.lock() {
maybe_username.clone().unwrap_or(String::new())
} else {
warn!("username lock corrupted; read failed");
String::from("!")
}
}
fn handle_get_info(&self) -> Response<Full<Bytes>> {
let public_key = BASE64.encode(self.keys.public_key());
let device_type: &str = self.config.device_type.into();
let active_user = self.active_user();
// options based on zeroconf guide, search for `groupStatus` on page
let group_status = if self.config.is_group {
"GROUP"
} else {
"NONE"
keys: DhLocalKeys::random(&mut rand::thread_rng()),
tx,
};
// See: https://developer.spotify.com/documentation/commercial-hardware/implementation/guides/zeroconf/
let body = json!({
"status": 101,
"statusString": "OK",
"spotifyError": 0,
// departing from the Spotify documentation, Google Cast uses "5.0.0"
"version": "2.9.0",
"deviceID": (self.config.device_id),
"deviceType": (device_type),
"remoteName": (self.config.name),
// valid value seen in the wild: "empty"
"publicKey": (public_key),
"brandDisplayName": "librespot",
"modelDisplayName": "librespot",
"libraryVersion": crate::core::version::SEMVER,
"resolverVersion": "1",
// valid values are "GROUP" and "NONE"
"groupStatus": group_status,
// valid value documented & seen in the wild: "accesstoken"
// Using it will cause clients to fail to connect.
"tokenType": "default",
"clientID": (self.config.client_id),
"productID": 0,
// Other known scope: client-authorization-universal
// Comma-separated.
"scope": "streaming",
"availability": "",
"supported_drm_media_formats": [],
// TODO: bitmask but what are the flags?
"supported_capabilities": 1,
// undocumented but should still work
"accountReq": "PREMIUM",
"activeUser": active_user,
// others seen-in-the-wild:
// - "deviceAPI_isGroup": False
})
.to_string();
let body = Bytes::from(body);
Response::new(Full::new(body))
(discovery, rx)
}
fn handle_add_user(&self, params: &Params<'_>) -> Result<Response<Full<Bytes>>, Error> {
let username_key = "userName";
let username = params
.get(username_key)
.ok_or(DiscoveryError::ParamsError(username_key))?
.as_ref();
fn handle_get_info(&self) -> Response<hyper::Body> {
let public_key = base64::encode(&self.keys.public_key());
let device_type: &str = self.config.device_type.into();
let blob_key = "blob";
let encrypted_blob = params
.get(blob_key)
.ok_or(DiscoveryError::ParamsError(blob_key))?;
let body = json!({
"status": 101,
"statusString": "ERROR-OK",
"spotifyError": 0,
"version": "2.7.1",
"deviceID": (self.config.device_id),
"remoteName": (self.config.name),
"activeUser": "",
"publicKey": (public_key),
"deviceType": (device_type),
"libraryVersion": crate::core::version::SEMVER,
"accountReq": "PREMIUM",
"brandDisplayName": "librespot",
"modelDisplayName": "librespot",
"resolverVersion": "0",
"groupStatus": "NONE",
"voiceSupport": "NO",
})
.to_string();
let clientkey_key = "clientKey";
let client_key = params
.get(clientkey_key)
.ok_or(DiscoveryError::ParamsError(clientkey_key))?;
Response::new(Body::from(body))
}
let encrypted_blob = BASE64.decode(encrypted_blob.as_bytes())?;
fn handle_add_user(&self, params: &Params<'_>) -> Response<hyper::Body> {
let username = params.get("userName").unwrap().as_ref();
let encrypted_blob = params.get("blob").unwrap();
let client_key = params.get("clientKey").unwrap();
let client_key = BASE64.decode(client_key.as_bytes())?;
let encrypted_blob = base64::decode(encrypted_blob.as_bytes()).unwrap();
let client_key = base64::decode(client_key.as_bytes()).unwrap();
let shared_key = self.keys.shared_secret(&client_key);
let encrypted_blob_len = encrypted_blob.len();
if encrypted_blob_len < 16 {
return Err(DiscoveryError::HmacError(encrypted_blob.to_vec()).into());
}
let iv = &encrypted_blob[0..16];
let encrypted = &encrypted_blob[16..encrypted_blob_len - 20];
let cksum = &encrypted_blob[encrypted_blob_len - 20..encrypted_blob_len];
let encrypted = &encrypted_blob[16..encrypted_blob.len() - 20];
let cksum = &encrypted_blob[encrypted_blob.len() - 20..encrypted_blob.len()];
let base_key = Sha1::digest(shared_key);
let base_key = Sha1::digest(&shared_key);
let base_key = &base_key[..16];
let checksum_key = {
let mut h = Hmac::<Sha1>::new_from_slice(base_key)
.map_err(|_| DiscoveryError::HmacError(base_key.to_vec()))?;
let mut h =
Hmac::<Sha1>::new_from_slice(base_key).expect("HMAC can take key of any size");
h.update(b"checksum");
h.finalize().into_bytes()
};
let encryption_key = {
let mut h = Hmac::<Sha1>::new_from_slice(base_key)
.map_err(|_| DiscoveryError::HmacError(base_key.to_vec()))?;
let mut h =
Hmac::<Sha1>::new_from_slice(base_key).expect("HMAC can take key of any size");
h.update(b"encryption");
h.finalize().into_bytes()
};
let mut h = Hmac::<Sha1>::new_from_slice(&checksum_key)
.map_err(|_| DiscoveryError::HmacError(base_key.to_vec()))?;
let mut h =
Hmac::<Sha1>::new_from_slice(&checksum_key).expect("HMAC can take key of any size");
h.update(encrypted);
if h.verify_slice(cksum).is_err() {
warn!("Login error for user {username:?}: MAC mismatch");
if h.verify(cksum).is_err() {
warn!("Login error for user {:?}: MAC mismatch", username);
let result = json!({
"status": 102,
"spotifyError": 1,
@ -178,52 +119,40 @@ impl RequestHandler {
});
let body = result.to_string();
let body = Bytes::from(body);
return Ok(Response::new(Full::new(body)));
return Response::new(Body::from(body));
}
let decrypted = {
let mut data = encrypted.to_vec();
let mut cipher = Aes128Ctr::new_from_slices(&encryption_key[0..16], iv)
.map_err(DiscoveryError::AesError)?;
let mut cipher = Aes128Ctr::new(
GenericArray::from_slice(&encryption_key[0..16]),
GenericArray::from_slice(iv),
);
cipher.apply_keystream(&mut data);
data
};
let credentials = Credentials::with_blob(username, decrypted, &self.config.device_id)?;
let credentials = Credentials::with_blob(username, &decrypted, &self.config.device_id);
{
let maybe_username = self.username.lock();
self.event_tx
.send(DiscoveryEvent::Credentials(credentials))?;
if let Ok(mut username_field) = maybe_username {
*username_field = Some(String::from(username));
} else {
warn!("username lock corrupted; write failed");
}
}
self.tx.send(credentials).unwrap();
let result = json!({
"status": 101,
"spotifyError": 0,
"statusString": "OK",
"statusString": "ERROR-OK"
});
let body = result.to_string();
let body = Bytes::from(body);
Ok(Response::new(Full::new(body)))
Response::new(Body::from(body))
}
fn not_found(&self) -> Response<Full<Bytes>> {
fn not_found(&self) -> Response<hyper::Body> {
let mut res = Response::default();
*res.status_mut() = StatusCode::NOT_FOUND;
res
}
async fn handle(
self: Arc<Self>,
request: Request<Incoming>,
) -> Result<hyper::Result<Response<Full<Bytes>>>, Error> {
async fn handle(self: Arc<Self>, request: Request<Body>) -> hyper::Result<Response<Body>> {
let mut params = Params::new();
let (parts, body) = request.into_parts();
@ -237,122 +166,70 @@ impl RequestHandler {
debug!("{:?} {:?} {:?}", parts.method, parts.uri.path(), params);
}
let body = body.collect().await?.to_bytes();
let body = hyper::body::to_bytes(body).await?;
params.extend(form_urlencoded::parse(&body));
let action = params.get("action").map(Cow::as_ref);
Ok(Ok(match (parts.method, action) {
Ok(match (parts.method, action) {
(Method::GET, Some("getInfo")) => self.handle_get_info(),
(Method::POST, Some("addUser")) => self.handle_add_user(&params)?,
(Method::POST, Some("addUser")) => self.handle_add_user(&params),
_ => self.not_found(),
}))
})
}
}
pub(crate) enum DiscoveryServerCmd {
Shutdown,
}
pub struct DiscoveryServer {
close_tx: oneshot::Sender<DiscoveryServerCmd>,
task_handle: tokio::task::JoinHandle<()>,
cred_rx: mpsc::UnboundedReceiver<Credentials>,
_close_tx: oneshot::Sender<Infallible>,
}
impl DiscoveryServer {
pub fn new(
config: Config,
port: &mut u16,
event_tx: mpsc::UnboundedSender<DiscoveryEvent>,
) -> Result<Self, Error> {
let discovery = RequestHandler::new(config, event_tx);
let address = if cfg!(windows) {
SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), *port)
} else {
// this creates a dual stack socket on non-windows systems
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), *port)
};
pub fn new(config: Config, port: &mut u16) -> hyper::Result<Self> {
let (discovery, cred_rx) = RequestHandler::new(config);
let discovery = Arc::new(discovery);
let (close_tx, close_rx) = oneshot::channel();
let listener = match TcpListener::bind(address) {
Ok(listener) => listener,
Err(e) => {
warn!("Discovery server failed to start: {e}");
return Err(e.into());
let address = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), *port);
let make_service = make_service_fn(move |_| {
let discovery = discovery.clone();
async move {
Ok::<_, hyper::Error>(service_fn(move |request| discovery.clone().handle(request)))
}
};
});
listener.set_nonblocking(true)?;
let listener = tokio::net::TcpListener::from_std(listener)?;
let server = hyper::Server::try_bind(&address)?.serve(make_service);
match listener.local_addr() {
Ok(addr) => {
*port = addr.port();
debug!("Zeroconf server listening on 0.0.0.0:{}", *port);
*port = server.local_addr().port();
debug!("Zeroconf server listening on 0.0.0.0:{}", *port);
tokio::spawn(async {
let result = server
.with_graceful_shutdown(async {
close_rx.await.unwrap_err();
debug!("Shutting down discovery server");
})
.await;
if let Err(e) = result {
warn!("Discovery server failed: {}", e);
}
Err(e) => {
warn!("Discovery server failed to start: {e}");
return Err(e.into());
}
}
let task_handle = tokio::spawn(async move {
let discovery = Arc::new(discovery);
let server = hyper::server::conn::http1::Builder::new();
let graceful = GracefulShutdown::new();
let mut close_rx = std::pin::pin!(close_rx);
loop {
tokio::select! {
Ok((stream, _)) = listener.accept() => {
let io = TokioIo::new(stream);
let discovery = discovery.clone();
let svc = hyper::service::service_fn(move |request| {
discovery
.clone()
.handle(request)
.inspect_err(|e| error!("could not handle discovery request: {e}"))
.and_then(|x| async move { Ok(x) })
.map(Result::unwrap) // guaranteed by `and_then` above
});
let conn = server.serve_connection(io, svc);
let fut = graceful.watch(conn);
tokio::spawn(async move {
// Errors are logged in the service_fn
let _ = fut.await;
});
}
_ = &mut close_rx => {
break;
}
}
}
graceful.shutdown().await;
});
Ok(Self {
close_tx,
task_handle,
cred_rx,
_close_tx: close_tx,
})
}
}
pub async fn shutdown(self) {
let Self {
close_tx,
task_handle,
..
} = self;
log::debug!("Shutting down discovery server");
if close_tx.send(DiscoveryServerCmd::Shutdown).is_err() {
log::warn!("Discovery server unexpectedly disappeared");
} else {
let _ = task_handle.await;
log::debug!("Discovery server stopped");
}
impl Stream for DiscoveryServer {
type Item = Credentials;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Credentials>> {
self.cred_rx.poll_recv(cx)
}
}

View file

@ -31,7 +31,7 @@ The client solves a challenge based on these two packets, and sends it back usin
It also computes the shared keys used to encrypt the rest of the communication.
## Login challenge and cipher key computation.
The client starts by computing the DH shared secret using its private key and the server's public key.
The client starts by computing the DH shared secret using it's private key and the server's public key.
HMAC-SHA1 is then used to compute the send and receive keys, as well as the login challenge.
```

View file

@ -1,79 +0,0 @@
# Dealer
When talking about the dealer, we are speaking about a websocket that represents the player as
spotify-connect device. The dealer is primarily used to receive updates and not to update the
state.
## Messages and Requests
There are two types of messages that are received via the dealer, Messages and Requests.
Messages are fire-and-forget and don't need a responses, while request expect a reply if the
request was processed successfully or failed.
Because we publish our device with support for gzip, the message payload might be BASE64 encoded
and gzip compressed. If that is the case, the related headers send an entry for "Transfer-Encoding"
with the value of "gzip".
### Messages
Most messages librespot handles send bytes that can be easily converted into their respective
protobuf definition. Some outliers send json that can be usually mapped to an existing protobuf
definition. We use `protobuf-json-mapping` to a similar protobuf definition
> Note: The json sometimes doesn't map exactly and can provide more fields than the protobuf
> definition expects. For messages, we usually ignore unknown fields.
There are two types of messages, "informational" and "fire and forget commands".
**Informational:**
Informational messages send any changes done by the current user or of a client where the current user
is logged in. These messages contain for example changes to a own playlist, additions to the liked songs
or any update that a client sends.
**Fire and Forget commands:**
These are messages that send information that are requests to the current player. These are only send to
the active player. Volume update requests and the logout request are send as fire-forget-commands.
### Requests
The request payload is sent as json. There are almost usable protobuf definitions (see
files named like `es_<command in snakecase>(_request).proto`) for the commands, but they don't
align up with the expected values and are missing some major information we need for handling some
commands. Because of that we have our own model for the specific commands, see
[core/src/dealer/protocol/request.rs](../core/src/dealer/protocol/request.rs).
All request modify the player-state.
## Details
This sections is for details and special hiccups in regards to handling that isn't completely intuitive.
### UIDs
A spotify item is identifiable by their uri. The `ContextTrack` and `ProvidedTrack` both have a `uid`
field. When we receive a context via the `context-resolver` it can return items (`ContextTrack`) that
may have their respective uid set. Some context like the collection and albums don't provide this
information.
When a `uid` is missing, resorting the next tracks in an official client gets confused and sends
incorrect data via the `set_queue` request. To prevent this behavior we generate a uid for each
track that doesn't have an uid. Queue items become a "queue-uid" which is just a `q` with an
incrementing number.
### Metadata
For some client's (especially mobile) the metadata of a track is very important to display the
context correct. For example the "autoplay" metadata is relevant to display the correct context
info.
Metadata can also be used to store data like the iteration when repeating a context.
### Repeat
The context repeating implementation is partly mimicked from the official client. The official
client allows skipping into negative iterations, this is currently not supported.
Repeating is realized by filling the next tracks with multiple contexts separated by delimiters.
By that we only have to handle the delimiter when skipping to the next and previous track.

View file

@ -1,36 +0,0 @@
# Examples
This folder contains examples of how to use the `librespot` library for various purposes.
## How to run the examples
In general, to invoke an example, clone down the repo and use `cargo` as follows:
```
cargo run --example [filename]
```
in which `filename` is the file name of the example, for instance `get_token` or `play`.
### Acquiring an access token
Most examples require an access token as the first positional argument. **Note that an access token
gained by the client credentials flow will not work**. `librespot-oauth` provides a utility to
acquire an access token using an OAuth flow, which will be able to run the examples. To invoke this,
run:
```
cargo run --package librespot-oauth --example oauth_sync
```
A browser window will open and prompt you to authorize with Spotify. Once done, take the
`access_token` property from the dumped object response and proceed to use it in examples. You may
find it convenient to save it in a shell variable like `$ACCESS_TOKEN`.
Once you have obtained the token you can proceed to run the example. Check each individual
file to see what arguments are expected. As a demonstration, here is how to invoke the `play`
example to play a song -- the second argument is the URI of the track to play.
```
cargo run --example play "$ACCESS_TOKEN" 2WUy2Uywcj5cP0IXQagO3z
```

View file

@ -1,40 +1,33 @@
use std::env;
use librespot::core::{authentication::Credentials, config::SessionConfig, session::Session};
use librespot::core::authentication::Credentials;
use librespot::core::config::SessionConfig;
use librespot::core::keymaster;
use librespot::core::session::Session;
const SCOPES: &str =
"streaming,user-read-playback-state,user-modify-playback-state,user-read-currently-playing";
#[tokio::main]
async fn main() {
let mut builder = env_logger::Builder::new();
builder.parse_filters("librespot=trace");
builder.init();
let mut session_config = SessionConfig::default();
let session_config = SessionConfig::default();
let args: Vec<_> = env::args().collect();
if args.len() == 3 {
// Only special client IDs have sufficient privileges e.g. Spotify's.
session_config.client_id = args[2].clone()
} else if args.len() != 2 {
eprintln!("Usage: {} ACCESS_TOKEN [CLIENT_ID]", args[0]);
if args.len() != 4 {
eprintln!("Usage: {} USERNAME PASSWORD CLIENT_ID", args[0]);
return;
}
let access_token = &args[1];
// Now create a new session with that token.
let session = Session::new(session_config.clone(), None);
let credentials = Credentials::with_access_token(access_token);
println!("Connecting with token..");
match session.connect(credentials, false).await {
Ok(()) => println!("Session username: {:#?}", session.username()),
Err(e) => {
println!("Error connecting: {e}");
return;
}
};
println!("Connecting..");
let credentials = Credentials::with_password(&args[1], &args[2]);
let (session, _) = Session::connect(session_config, credentials, None, false)
.await
.unwrap();
let token = session.token_provider().get_token(SCOPES).await.unwrap();
println!("Got me a token: {token:#?}");
println!(
"Token: {:#?}",
keymaster::get_token(&session, &args[3], SCOPES)
.await
.unwrap()
);
}

Some files were not shown because too many files have changed in this diff Show more