Compare commits
No commits in common. 'main' and 'gh-pages' have entirely different histories.
415 changed files with 2319 additions and 51100 deletions
@ -1,6 +0,0 @@
@@ -1,6 +0,0 @@
|
||||
# do not add .git, since it is needed to extract the tag |
||||
# do not add /tmp, since it is needed by make dockerhub |
||||
/binaries |
||||
/coverage*.txt |
||||
/apidocs/*.html |
||||
**/hls.min.js |
||||
@ -1,16 +0,0 @@
@@ -1,16 +0,0 @@
|
||||
body: |
||||
|
||||
- type: textarea |
||||
attributes: |
||||
label: Question |
||||
validations: |
||||
required: true |
||||
|
||||
- type: markdown |
||||
attributes: |
||||
value: | |
||||
Note: If you are asking for help because you're having trouble doing something, provide enough informations to replicate the problem. In particular, include in the question: |
||||
|
||||
* the server version you are using |
||||
* precise instructions on how to replicate the problem |
||||
* server logs with setting `logLevel` set to `debug` |
||||
@ -1,72 +0,0 @@
@@ -1,72 +0,0 @@
|
||||
--- |
||||
name: Bug |
||||
about: Report a bug |
||||
title: '' |
||||
labels: '' |
||||
assignees: '' |
||||
|
||||
--- |
||||
|
||||
<!-- |
||||
To increase the chance of your issue getting fixed, open an issue FOR EACH problem. Please do not report multiple problems in a single issue, otherwise they'll probably never get ALL fixed. |
||||
|
||||
Please include all sections of this template into your issue, or it will be automatically closed. |
||||
--> |
||||
|
||||
## Which version are you using? |
||||
|
||||
v0.0.0 |
||||
|
||||
## Which operating system are you using? |
||||
|
||||
<!-- fill checkboxes with a x. Example: [x] Linux --> |
||||
|
||||
- [ ] Linux amd64 standard |
||||
- [ ] Linux amd64 Docker |
||||
- [ ] Linux arm64 standard |
||||
- [ ] Linux arm64 Docker |
||||
- [ ] Linux arm7 standard |
||||
- [ ] Linux arm7 Docker |
||||
- [ ] Linux arm6 standard |
||||
- [ ] Linux arm6 Docker |
||||
- [ ] Windows amd64 standard |
||||
- [ ] Windows amd64 Docker (WSL backend) |
||||
- [ ] macOS amd64 standard |
||||
- [ ] macOS amd64 Docker |
||||
- [ ] Other (please describe) |
||||
|
||||
## Describe the issue |
||||
|
||||
Description |
||||
|
||||
## Describe how to replicate the issue |
||||
|
||||
<!-- |
||||
the maintainers must be able to REPLICATE your issue to solve it - therefore, describe in a very detailed way how to replicate it. |
||||
--> |
||||
|
||||
1. start the server |
||||
2. publish with ... |
||||
3. read with ... |
||||
|
||||
## Did you attach the server logs? |
||||
|
||||
<!-- |
||||
Server logs are sometimes useful to identify the issue. |
||||
If you think this is the case, set the parameter 'logLevel' to 'debug' and attach the server logs. |
||||
--> |
||||
|
||||
yes / no |
||||
|
||||
## Did you attach a network dump? |
||||
|
||||
<!-- |
||||
If the bug arises when using MediaMTX with an external hardware or software, the most helpful content you can provide is a dump of the data exchanged between the server and the target (network dump), that can be generated in this way: |
||||
1) Download wireshark (https://www.wireshark.org/) |
||||
2) Start capturing on the interface used for exchanging packets (if the server and the external hardware or software are both installed on your pc, the interface is probably "loopback", otherwise it's the one of your network card) |
||||
3) Start the server and replicate the issue |
||||
4) Stop capturing, save the result in .pcap format |
||||
5) Attach |
||||
--> |
||||
|
||||
yes / no |
||||
@ -1,6 +0,0 @@
@@ -1,6 +0,0 @@
|
||||
blank_issues_enabled: false |
||||
|
||||
contact_links: |
||||
- name: Question |
||||
url: https://github.com/bluenviron/mediamtx/discussions/new?category=questions |
||||
about: Ask the community for help |
||||
@ -1,16 +0,0 @@
@@ -1,16 +0,0 @@
|
||||
--- |
||||
name: Feature Request |
||||
about: Share ideas for new features |
||||
title: '' |
||||
labels: '' |
||||
assignees: '' |
||||
|
||||
--- |
||||
|
||||
<!-- |
||||
Please create a request FOR EACH feature. Do not report multiple features in a single request, otherwise they'll probably never get ALL implemented. |
||||
--> |
||||
|
||||
## Describe the feature |
||||
|
||||
Description |
||||
@ -1,6 +0,0 @@
@@ -1,6 +0,0 @@
|
||||
version: 2 |
||||
updates: |
||||
- package-ecosystem: "gomod" |
||||
directory: "/" |
||||
schedule: |
||||
interval: "daily" |
||||
@ -1,65 +0,0 @@
@@ -1,65 +0,0 @@
|
||||
name: bump_hls_js |
||||
|
||||
on: |
||||
schedule: |
||||
- cron: '4 5 * * *' |
||||
workflow_dispatch: |
||||
|
||||
jobs: |
||||
bump_hls_js: |
||||
runs-on: ubuntu-20.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
with: |
||||
fetch-depth: 0 |
||||
|
||||
- run: > |
||||
git config user.name mediamtx-bot |
||||
&& git config user.email bot@mediamtx |
||||
&& ((git checkout deps/hlsjs && git rebase ${GITHUB_REF_NAME}) || git checkout -b deps/hlsjs) |
||||
|
||||
- run: > |
||||
VERSION=$(curl -s https://api.github.com/repos/video-dev/hls.js/releases?per_page=1 | grep tag_name | sed 's/\s\+"tag_name": "\(.\+\)",/\1/') |
||||
&& echo $VERSION > internal/servers/hls/hlsjsdownloader/VERSION |
||||
&& echo VERSION=$VERSION >> $GITHUB_ENV |
||||
|
||||
- id: check_repo |
||||
run: > |
||||
echo "clean=$(git status --porcelain)" >> "$GITHUB_OUTPUT" |
||||
|
||||
- if: ${{ steps.check_repo.outputs.clean != '' }} |
||||
run: > |
||||
git reset ${GITHUB_REF_NAME} |
||||
&& git add . |
||||
&& git commit -m "bump hls.js to ${VERSION}" |
||||
&& git push --set-upstream origin deps/hlsjs --force |
||||
|
||||
- if: ${{ steps.check_repo.outputs.clean != '' }} |
||||
uses: actions/github-script@v6 |
||||
with: |
||||
github-token: ${{ secrets.GITHUB_TOKEN }} |
||||
script: | |
||||
const prs = await github.rest.pulls.list({ |
||||
owner: context.repo.owner, |
||||
repo: context.repo.repo, |
||||
head: `${context.repo.owner}:deps/hlsjs`, |
||||
state: 'open', |
||||
}); |
||||
|
||||
if (prs.data.length == 0) { |
||||
await github.rest.pulls.create({ |
||||
owner: context.repo.owner, |
||||
repo: context.repo.repo, |
||||
head: 'deps/hlsjs', |
||||
base: context.ref.slice('refs/heads/'.length), |
||||
title: `bump hls-js to ${process.env.VERSION}`, |
||||
}); |
||||
} else { |
||||
github.rest.pulls.update({ |
||||
owner: context.repo.owner, |
||||
repo: context.repo.repo, |
||||
pull_number: prs.data[0].number, |
||||
title: `bump hls-js to ${process.env.VERSION}`, |
||||
}); |
||||
} |
||||
@ -1,46 +0,0 @@
@@ -1,46 +0,0 @@
|
||||
name: code_lint |
||||
|
||||
on: |
||||
push: |
||||
branches: [ main ] |
||||
pull_request: |
||||
branches: [ main ] |
||||
|
||||
jobs: |
||||
golangci_lint: |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- uses: actions/setup-go@v3 |
||||
with: |
||||
go-version: "1.21" |
||||
|
||||
- run: touch internal/servers/hls/hls.min.js |
||||
|
||||
- uses: golangci/golangci-lint-action@v4 |
||||
with: |
||||
version: v1.55.0 |
||||
|
||||
mod_tidy: |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- uses: actions/setup-go@v2 |
||||
with: |
||||
go-version: "1.21" |
||||
|
||||
- run: | |
||||
go mod tidy |
||||
git diff --exit-code |
||||
|
||||
api_docs: |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- run: make apidocs-lint |
||||
@ -1,40 +0,0 @@
@@ -1,40 +0,0 @@
|
||||
name: code_test |
||||
|
||||
on: |
||||
push: |
||||
branches: [ main ] |
||||
pull_request: |
||||
branches: [ main ] |
||||
|
||||
jobs: |
||||
test_64: |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- run: make test |
||||
|
||||
- uses: codecov/codecov-action@v3 |
||||
with: |
||||
token: ${{ secrets.CODECOV_TOKEN }} |
||||
|
||||
test_32: |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- run: make test32 |
||||
|
||||
test_highlevel: |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- uses: actions/setup-go@v2 |
||||
with: |
||||
go-version: "1.21" |
||||
|
||||
- run: make test-highlevel-nodocker |
||||
@ -1,52 +0,0 @@
@@ -1,52 +0,0 @@
|
||||
name: issue_lint |
||||
|
||||
on: |
||||
issues: |
||||
types: [opened] |
||||
|
||||
jobs: |
||||
issue_lint: |
||||
runs-on: ubuntu-latest |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- uses: actions/github-script@v6 |
||||
with: |
||||
github-token: ${{ secrets.GITHUB_TOKEN }} |
||||
script: | |
||||
const fs = require('fs').promises; |
||||
|
||||
const getTitles = (str) => ( |
||||
[...str.matchAll(/^## (.*)/gm)].map((m) => m[0]) |
||||
); |
||||
|
||||
const titles = getTitles(context.payload.issue.body); |
||||
|
||||
for (let file of await fs.readdir('.github/ISSUE_TEMPLATE')) { |
||||
if (!file.endsWith('.md')) { |
||||
continue; |
||||
} |
||||
|
||||
const template = await fs.readFile(`.github/ISSUE_TEMPLATE/${file}`, 'utf-8'); |
||||
const templateTitles = getTitles(template); |
||||
|
||||
if (templateTitles.every((title) => titles.includes(title))) { |
||||
process.exit(0); |
||||
} |
||||
} |
||||
|
||||
await github.rest.issues.createComment({ |
||||
owner: context.issue.owner, |
||||
repo: context.issue.repo, |
||||
issue_number: context.issue.number, |
||||
body: 'This issue is being automatically closed because it does not follow the issue template.\n' |
||||
+ 'Please reopen the issue and make sure to include all sections of the template.', |
||||
}); |
||||
|
||||
await github.rest.issues.update({ |
||||
owner: context.issue.owner, |
||||
repo: context.issue.repo, |
||||
issue_number: context.issue.number, |
||||
state: 'closed', |
||||
}); |
||||
@ -1,52 +0,0 @@
@@ -1,52 +0,0 @@
|
||||
name: issue_lock |
||||
|
||||
on: |
||||
schedule: |
||||
- cron: '40 15 * * *' |
||||
workflow_dispatch: |
||||
|
||||
jobs: |
||||
issue_lock: |
||||
runs-on: ubuntu-latest |
||||
|
||||
steps: |
||||
- uses: actions/github-script@v6 |
||||
with: |
||||
github-token: ${{ secrets.GITHUB_TOKEN }} |
||||
script: | |
||||
const { repo: { owner, repo } } = context; |
||||
|
||||
const now = new Date(); |
||||
|
||||
for await (const res of github.paginate.iterator( |
||||
github.rest.issues.listForRepo, { |
||||
owner, |
||||
repo, |
||||
state: 'closed', |
||||
})) { |
||||
for (const issue of res.data) { |
||||
if (issue.locked) { |
||||
continue; |
||||
} |
||||
|
||||
if ((now - new Date(issue.updated_at)) < 1000*60*60*24*31*6) { |
||||
continue; |
||||
} |
||||
|
||||
if (!issue.pull_request) { |
||||
await github.rest.issues.createComment({ |
||||
owner, |
||||
repo, |
||||
issue_number: issue.number, |
||||
body: 'This issue is being locked automatically because it has been closed for more than 6 months.\n' |
||||
+ 'Please open a new issue in case you encounter a similar problem.', |
||||
}); |
||||
} |
||||
|
||||
github.rest.issues.lock({ |
||||
owner, |
||||
repo, |
||||
issue_number: issue.number, |
||||
}); |
||||
} |
||||
} |
||||
@ -1,18 +0,0 @@
@@ -1,18 +0,0 @@
|
||||
name: nightly_binaries |
||||
|
||||
on: |
||||
workflow_dispatch: |
||||
|
||||
jobs: |
||||
nightly_binaries: |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- run: make binaries |
||||
|
||||
- uses: actions/upload-artifact@v3 |
||||
with: |
||||
name: binaries |
||||
path: binaries |
||||
@ -1,147 +0,0 @@
@@ -1,147 +0,0 @@
|
||||
name: release |
||||
|
||||
on: |
||||
push: |
||||
tags: |
||||
- 'v*' |
||||
|
||||
jobs: |
||||
binaries: |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- run: make binaries |
||||
|
||||
- uses: actions/upload-artifact@v3 |
||||
with: |
||||
name: binaries |
||||
path: binaries |
||||
|
||||
github_release: |
||||
needs: binaries |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/download-artifact@v3 |
||||
with: |
||||
name: binaries |
||||
path: binaries |
||||
|
||||
- uses: actions/github-script@v6 |
||||
with: |
||||
github-token: ${{ secrets.GITHUB_TOKEN }} |
||||
script: | |
||||
const fs = require('fs').promises; |
||||
const { repo: { owner, repo } } = context; |
||||
|
||||
const currentRelease = context.ref.split('/')[2]; |
||||
|
||||
const res = await github.rest.repos.createRelease({ |
||||
owner, |
||||
repo, |
||||
tag_name: currentRelease, |
||||
name: currentRelease, |
||||
}); |
||||
const release_id = res.data.id; |
||||
|
||||
for (const name of await fs.readdir('./binaries/')) { |
||||
await github.rest.repos.uploadReleaseAsset({ |
||||
owner, |
||||
repo, |
||||
release_id, |
||||
name, |
||||
data: await fs.readFile(`./binaries/${name}`), |
||||
}); |
||||
} |
||||
|
||||
github_notify_issues: |
||||
needs: github_release |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/github-script@v6 |
||||
with: |
||||
github-token: ${{ secrets.GITHUB_TOKEN }} |
||||
script: | |
||||
const { repo: { owner, repo } } = context; |
||||
|
||||
const tags = await github.rest.repos.listTags({ |
||||
owner, |
||||
repo, |
||||
}); |
||||
|
||||
const curTag = tags.data[0]; |
||||
const prevTag = tags.data[1]; |
||||
|
||||
const diff = await github.rest.repos.compareCommitsWithBasehead({ |
||||
owner, |
||||
repo, |
||||
basehead: `${prevTag.commit.sha}...${curTag.commit.sha}`, |
||||
}); |
||||
|
||||
const issues = {}; |
||||
|
||||
for (const commit of diff.data.commits) { |
||||
for (const match of commit.commit.message.matchAll(/(^| |\()#([0-9]+)( |\)|$)/g)) { |
||||
issues[match[2]] = 1; |
||||
} |
||||
} |
||||
|
||||
for (const issue in issues) { |
||||
try { |
||||
await github.rest.issues.createComment({ |
||||
owner, |
||||
repo, |
||||
issue_number: parseInt(issue), |
||||
body: `This issue is mentioned in release ${curTag.name} 🚀\n` |
||||
+ `Check out the entire changelog by [clicking here](https://github.com/${owner}/${repo}/releases/tag/${curTag.name})`, |
||||
}); |
||||
} catch (exc) {} |
||||
} |
||||
|
||||
dockerhub: |
||||
needs: binaries |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- uses: actions/download-artifact@v3 |
||||
with: |
||||
name: binaries |
||||
path: binaries |
||||
|
||||
- run: make dockerhub |
||||
env: |
||||
DOCKER_USER: ${{ secrets.DOCKER_USER }} |
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} |
||||
|
||||
dockerhub_legacy: |
||||
needs: dockerhub |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- run: make dockerhub-legacy |
||||
env: |
||||
DOCKER_USER_LEGACY: ${{ secrets.DOCKER_USER_LEGACY }} |
||||
DOCKER_PASSWORD_LEGACY: ${{ secrets.DOCKER_PASSWORD_LEGACY }} |
||||
|
||||
api_docs: |
||||
needs: binaries |
||||
runs-on: ubuntu-22.04 |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v3 |
||||
|
||||
- run: make apidocs-gen |
||||
|
||||
- run: mv apidocs/*.html apidocs/index.html |
||||
|
||||
- uses: peaceiris/actions-gh-pages@v3 |
||||
with: |
||||
github_token: ${{ secrets.GITHUB_TOKEN }} |
||||
publish_dir: ./apidocs |
||||
@ -1,5 +0,0 @@
@@ -1,5 +0,0 @@
|
||||
/tmp |
||||
/binaries |
||||
/coverage*.txt |
||||
/apidocs/*.html |
||||
**/hls.min.js |
||||
@ -1,71 +0,0 @@
@@ -1,71 +0,0 @@
|
||||
linters: |
||||
enable: |
||||
- asciicheck |
||||
- bidichk |
||||
- bodyclose |
||||
- dupl |
||||
- errorlint |
||||
- exportloopref |
||||
- gochecknoinits |
||||
- gocritic |
||||
- gofmt |
||||
- gofumpt |
||||
- lll |
||||
- misspell |
||||
- nilerr |
||||
- prealloc |
||||
- revive |
||||
- usestdlibvars |
||||
- unconvert |
||||
- tparallel |
||||
- wastedassign |
||||
- whitespace |
||||
|
||||
issues: |
||||
exclude-use-default: false |
||||
|
||||
linters-settings: |
||||
errcheck: |
||||
exclude-functions: |
||||
- io.Copy |
||||
- (io.Closer).Close |
||||
- (io.Writer).Write |
||||
- (hash.Hash).Write |
||||
- (net.Conn).Close |
||||
- (net.Conn).SetReadDeadline |
||||
- (net.Conn).SetWriteDeadline |
||||
- (*net.TCPConn).SetKeepAlive |
||||
- (*net.TCPConn).SetKeepAlivePeriod |
||||
- (*net.TCPConn).SetNoDelay |
||||
- (net.Listener).Close |
||||
- (net.PacketConn).Close |
||||
- (net.PacketConn).SetReadDeadline |
||||
- (net.PacketConn).SetWriteDeadline |
||||
- (net/http.ResponseWriter).Write |
||||
- (*net/http.Server).Serve |
||||
- (*net/http.Server).ServeTLS |
||||
- (*net/http.Server).Shutdown |
||||
- os.Chdir |
||||
- os.Mkdir |
||||
- os.MkdirAll |
||||
- os.Remove |
||||
- os.RemoveAll |
||||
- os.Setenv |
||||
- os.Unsetenv |
||||
- (*os.File).WriteString |
||||
- (*os.File).Close |
||||
- (github.com/datarhei/gosrt.Conn).Close |
||||
- (github.com/datarhei/gosrt.Conn).SetReadDeadline |
||||
- (github.com/datarhei/gosrt.Conn).SetWriteDeadline |
||||
- (*github.com/bluenviron/gortsplib/v4.Client).Close |
||||
- (*github.com/bluenviron/gortsplib/v4.Server).Close |
||||
- (*github.com/bluenviron/gortsplib/v4.ServerSession).Close |
||||
- (*github.com/bluenviron/gortsplib/v4.ServerStream).Close |
||||
- (*github.com/bluenviron/gortsplib/v4.ServerConn).Close |
||||
|
||||
govet: |
||||
enable-all: true |
||||
disable: |
||||
- fieldalignment |
||||
- reflectvaluecompare |
||||
- shadow |
||||
@ -1,21 +0,0 @@
@@ -1,21 +0,0 @@
|
||||
MIT License |
||||
|
||||
Copyright (c) 2019 aler9 |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all |
||||
copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||
SOFTWARE. |
||||
@ -1,36 +0,0 @@
@@ -1,36 +0,0 @@
|
||||
BASE_IMAGE = golang:1.21-alpine3.19 |
||||
LINT_IMAGE = golangci/golangci-lint:v1.56.2 |
||||
NODE_IMAGE = node:20-alpine3.19 |
||||
ALPINE_IMAGE = alpine:3.19 |
||||
RPI32_IMAGE = balenalib/raspberry-pi:bullseye-run-20230712 |
||||
RPI64_IMAGE = balenalib/raspberrypi3-64:bullseye-run-20230530 |
||||
|
||||
.PHONY: $(shell ls) |
||||
|
||||
help: |
||||
@echo "usage: make [action]" |
||||
@echo "" |
||||
@echo "available actions:" |
||||
@echo "" |
||||
@echo " mod-tidy run go mod tidy" |
||||
@echo " format format source files" |
||||
@echo " test run tests" |
||||
@echo " test32 run tests on a 32-bit system" |
||||
@echo " test-highlevel run high-level tests" |
||||
@echo " lint run linters" |
||||
@echo " bench NAME=n run bench environment" |
||||
@echo " run run app" |
||||
@echo " apidocs-lint run api docs linters" |
||||
@echo " apidocs-gen generate api docs HTML" |
||||
@echo " binaries build binaries for all platforms" |
||||
@echo " dockerhub build and push images to Docker Hub" |
||||
@echo " dockerhub-legacy build and push images to Docker Hub (legacy)" |
||||
@echo "" |
||||
|
||||
blank := |
||||
define NL |
||||
|
||||
$(blank) |
||||
endef |
||||
|
||||
include scripts/*.mk |
||||
@ -1,3 +0,0 @@
@@ -1,3 +0,0 @@
|
||||
# Security Policy |
||||
|
||||
Vulnerabilities can be reported privately by using the [Security Advisory](https://github.com/bluenviron/mediamtx/security/advisories/new) feature of GitHub. |
||||
@ -1,20 +0,0 @@
@@ -1,20 +0,0 @@
|
||||
ARG BASE_IMAGE |
||||
FROM $BASE_IMAGE |
||||
|
||||
RUN apk add --no-cache \ |
||||
ffmpeg |
||||
|
||||
RUN wget -O /video.mkv http://www.larmoire.info/jellyfish/media/jellyfish-10-mbps-hd-h264.mkv |
||||
|
||||
WORKDIR /s |
||||
|
||||
COPY go.mod go.sum ./ |
||||
RUN go mod download |
||||
|
||||
COPY . ./ |
||||
RUN go build -o /mediamtx . |
||||
|
||||
COPY bench/proxy/start.sh / |
||||
RUN chmod +x /start.sh |
||||
|
||||
ENTRYPOINT [ "/start.sh" ] |
||||
@ -1,51 +0,0 @@
@@ -1,51 +0,0 @@
|
||||
#!/bin/sh -e |
||||
|
||||
PROXY_COUNT=50 |
||||
PROXY_PROTOCOL=tcp |
||||
|
||||
##################################################### |
||||
# source |
||||
|
||||
CONF="" |
||||
CONF="${CONF}rtmp: no\n" |
||||
CONF="${CONF}hls: no\n" |
||||
CONF="${CONF}webrtc: no\n" |
||||
CONF="${CONF}srt: no\n" |
||||
CONF="${CONF}rtspAddress: :8555\n" |
||||
CONF="${CONF}rtpAddress: :8002\n" |
||||
CONF="${CONF}rtcpAddress: :8003\n" |
||||
CONF="${CONF}paths:\n" |
||||
CONF="${CONF} all_others:\n" |
||||
echo -e "$CONF" > /source.conf |
||||
|
||||
/mediamtx /source.conf & |
||||
|
||||
sleep 1 |
||||
|
||||
ffmpeg -hide_banner -loglevel error \ |
||||
-re -stream_loop -1 -i /video.mkv -c copy -f rtsp rtsp://localhost:8555/source & |
||||
|
||||
sleep 1 |
||||
|
||||
##################################################### |
||||
# proxy |
||||
|
||||
CONF="" |
||||
CONF="${CONF}rtmp: no\n" |
||||
CONF="${CONF}hls: no\n" |
||||
CONF="${CONF}webrtc: no\n" |
||||
CONF="${CONF}srt: no\n" |
||||
CONF="${CONF}pprof: yes\n" |
||||
CONF="${CONF}paths:\n" |
||||
for i in $(seq 1 $PROXY_COUNT); do |
||||
CONF="${CONF} proxy$i:\n" |
||||
CONF="${CONF} source: rtsp://localhost:8555/source\n" |
||||
CONF="${CONF} rtspTransport: $PROXY_PROTOCOL\n" |
||||
done |
||||
echo -e "$CONF" > /proxy.conf |
||||
|
||||
/mediamtx /proxy.conf & |
||||
|
||||
sleep 5 |
||||
|
||||
go tool pprof -text http://localhost:9999/debug/pprof/profile?seconds=15 |
||||
@ -1,20 +0,0 @@
@@ -1,20 +0,0 @@
|
||||
ARG BASE_IMAGE |
||||
FROM $BASE_IMAGE |
||||
|
||||
RUN apk add --no-cache \ |
||||
ffmpeg |
||||
|
||||
RUN wget -O /video.mkv http://www.larmoire.info/jellyfish/media/jellyfish-10-mbps-hd-h264.mkv |
||||
|
||||
WORKDIR /s |
||||
|
||||
COPY go.mod go.sum ./ |
||||
RUN go mod download |
||||
|
||||
COPY . ./ |
||||
RUN go build -o /mediamtx . |
||||
|
||||
COPY bench/publish/start.sh / |
||||
RUN chmod +x /start.sh |
||||
|
||||
ENTRYPOINT [ "/start.sh" ] |
||||
@ -1,27 +0,0 @@
@@ -1,27 +0,0 @@
|
||||
#!/bin/sh -e |
||||
|
||||
PUBLISHER_COUNT=50 |
||||
PUBLISHER_PROTOCOL=tcp |
||||
|
||||
##################################################### |
||||
# publishers |
||||
|
||||
CONF="" |
||||
CONF="${CONF}pprof: yes\n" |
||||
CONF="${CONF}paths:\n" |
||||
CONF="${CONF} all_others:\n" |
||||
echo -e "$CONF" > /source.conf |
||||
|
||||
/mediamtx /source.conf & |
||||
|
||||
sleep 1 |
||||
|
||||
for i in $(seq 1 $PUBLISHER_COUNT); do |
||||
ffmpeg -hide_banner -loglevel error \ |
||||
-re -stream_loop -1 -i /video.mkv -c copy -f rtsp \ |
||||
-rtsp_transport $PUBLISHER_PROTOCOL rtsp://localhost:8554/source$i & |
||||
done |
||||
|
||||
sleep 5 |
||||
|
||||
go tool pprof -text http://localhost:9999/debug/pprof/profile?seconds=15 |
||||
@ -1,20 +0,0 @@
@@ -1,20 +0,0 @@
|
||||
ARG BASE_IMAGE |
||||
FROM $BASE_IMAGE |
||||
|
||||
RUN apk add --no-cache \ |
||||
ffmpeg |
||||
|
||||
RUN wget -O /video.mkv http://www.larmoire.info/jellyfish/media/jellyfish-10-mbps-hd-h264.mkv |
||||
|
||||
WORKDIR /s |
||||
|
||||
COPY go.mod go.sum ./ |
||||
RUN go mod download |
||||
|
||||
COPY . ./ |
||||
RUN go build -o /mediamtx . |
||||
|
||||
COPY bench/read/start.sh / |
||||
RUN chmod +x /start.sh |
||||
|
||||
ENTRYPOINT [ "/start.sh" ] |
||||
@ -1,34 +0,0 @@
@@ -1,34 +0,0 @@
|
||||
#!/bin/sh -e |
||||
|
||||
READER_COUNT=50 |
||||
READER_PROTOCOL=tcp |
||||
|
||||
##################################################### |
||||
# source |
||||
|
||||
CONF="" |
||||
CONF="${CONF}pprof: yes\n" |
||||
CONF="${CONF}paths:\n" |
||||
CONF="${CONF} all_others:\n" |
||||
echo -e "$CONF" > /source.conf |
||||
|
||||
/mediamtx /source.conf & |
||||
|
||||
sleep 1 |
||||
|
||||
ffmpeg -re -stream_loop -1 -i /video.mkv -c copy -f rtsp rtsp://localhost:8554/source & |
||||
|
||||
sleep 1 |
||||
|
||||
##################################################### |
||||
# readers |
||||
|
||||
for i in $(seq 1 $READER_COUNT); do |
||||
ffmpeg -hide_banner -loglevel error \ |
||||
-rtsp_transport $READER_PROTOCOL \ |
||||
-i rtsp://localhost:8554/source -c copy -f null - & |
||||
done |
||||
|
||||
sleep 5 |
||||
|
||||
go tool pprof -text http://localhost:9999/debug/pprof/profile?seconds=15 |
||||
@ -1,82 +0,0 @@
@@ -1,82 +0,0 @@
|
||||
module github.com/bluenviron/mediamtx |
||||
|
||||
go 1.21 |
||||
|
||||
require ( |
||||
code.cloudfoundry.org/bytefmt v0.0.0 |
||||
github.com/MicahParks/keyfunc/v3 v3.3.2 |
||||
github.com/abema/go-mp4 v1.2.0 |
||||
github.com/alecthomas/kong v0.9.0 |
||||
github.com/bluenviron/gohlslib v1.3.0 |
||||
github.com/bluenviron/gortsplib/v4 v4.8.0 |
||||
github.com/bluenviron/mediacommon v1.9.2 |
||||
github.com/datarhei/gosrt v0.6.0 |
||||
github.com/fsnotify/fsnotify v1.7.0 |
||||
github.com/gin-gonic/gin v1.9.1 |
||||
github.com/golang-jwt/jwt/v5 v5.2.1 |
||||
github.com/google/uuid v1.6.0 |
||||
github.com/gookit/color v1.5.4 |
||||
github.com/gorilla/websocket v1.5.1 |
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 |
||||
github.com/matthewhartstonge/argon2 v1.0.0 |
||||
github.com/pion/ice/v2 v2.3.11 |
||||
github.com/pion/interceptor v0.1.27 |
||||
github.com/pion/logging v0.2.2 |
||||
github.com/pion/rtcp v1.2.14 |
||||
github.com/pion/rtp v1.8.5 |
||||
github.com/pion/sdp/v3 v3.0.9 |
||||
github.com/pion/webrtc/v3 v3.2.22 |
||||
github.com/stretchr/testify v1.9.0 |
||||
golang.org/x/crypto v0.22.0 |
||||
golang.org/x/sys v0.19.0 |
||||
golang.org/x/term v0.19.0 |
||||
gopkg.in/yaml.v2 v2.4.0 |
||||
) |
||||
|
||||
require ( |
||||
github.com/MicahParks/jwkset v0.5.17 // indirect |
||||
github.com/asticode/go-astikit v0.30.0 // indirect |
||||
github.com/asticode/go-astits v1.13.0 // indirect |
||||
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c // indirect |
||||
github.com/bytedance/sonic v1.9.1 // indirect |
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect |
||||
github.com/davecgh/go-spew v1.1.1 // indirect |
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect |
||||
github.com/gin-contrib/sse v0.1.0 // indirect |
||||
github.com/go-playground/locales v0.14.1 // indirect |
||||
github.com/go-playground/universal-translator v0.18.1 // indirect |
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect |
||||
github.com/goccy/go-json v0.10.2 // indirect |
||||
github.com/json-iterator/go v1.1.12 // indirect |
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect |
||||
github.com/leodido/go-urn v1.2.4 // indirect |
||||
github.com/mattn/go-isatty v0.0.19 // indirect |
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect |
||||
github.com/modern-go/reflect2 v1.0.2 // indirect |
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect |
||||
github.com/pion/datachannel v1.5.5 // indirect |
||||
github.com/pion/dtls/v2 v2.2.7 // indirect |
||||
github.com/pion/mdns v0.0.9 // indirect |
||||
github.com/pion/randutil v0.1.0 // indirect |
||||
github.com/pion/sctp v1.8.8 // indirect |
||||
github.com/pion/srtp/v2 v2.0.18 // indirect |
||||
github.com/pion/stun v0.6.1 // indirect |
||||
github.com/pion/transport/v2 v2.2.3 // indirect |
||||
github.com/pion/turn/v2 v2.1.3 // indirect |
||||
github.com/pmezard/go-difflib v1.0.0 // indirect |
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect |
||||
github.com/ugorji/go/codec v1.2.11 // indirect |
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect |
||||
golang.org/x/arch v0.3.0 // indirect |
||||
golang.org/x/net v0.21.0 // indirect |
||||
golang.org/x/text v0.14.0 // indirect |
||||
golang.org/x/time v0.5.0 // indirect |
||||
google.golang.org/protobuf v1.33.0 // indirect |
||||
gopkg.in/yaml.v3 v3.0.1 // indirect |
||||
) |
||||
|
||||
replace code.cloudfoundry.org/bytefmt => github.com/cloudfoundry/bytefmt v0.0.0-20211005130812-5bb3c17173e5 |
||||
|
||||
replace github.com/pion/ice/v2 => github.com/aler9/ice/v2 v2.0.0-20231112223552-32d34dfcf3a1 |
||||
|
||||
replace github.com/pion/webrtc/v3 => github.com/aler9/webrtc/v3 v3.0.0-20231112223655-e402ed2689c6 |
||||
@ -1,338 +0,0 @@
@@ -1,338 +0,0 @@
|
||||
github.com/MicahParks/jwkset v0.5.17 h1:DrcwyKwSP5adD0G2XJTvDulnWXjD6gbjROMgMXDbkKA= |
||||
github.com/MicahParks/jwkset v0.5.17/go.mod h1:q8ptTGn/Z9c4MwbcfeCDssADeVQb3Pk7PnVxrvi+2QY= |
||||
github.com/MicahParks/keyfunc/v3 v3.3.2 h1:YTtwc4dxalBZKFqHhqctBWN6VhbLdGhywmne9u5RQVM= |
||||
github.com/MicahParks/keyfunc/v3 v3.3.2/go.mod h1:GJBeEjnv25OnD9y2OYQa7ELU6gYahEMBNXINZb+qm34= |
||||
github.com/abema/go-mp4 v1.2.0 h1:gi4X8xg/m179N/J15Fn5ugywN9vtI6PLk6iLldHGLAk= |
||||
github.com/abema/go-mp4 v1.2.0/go.mod h1:vPl9t5ZK7K0x68jh12/+ECWBCXoWuIDtNgPtU2f04ws= |
||||
github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU= |
||||
github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= |
||||
github.com/alecthomas/kong v0.9.0 h1:G5diXxc85KvoV2f0ZRVuMsi45IrBgx9zDNGNj165aPA= |
||||
github.com/alecthomas/kong v0.9.0/go.mod h1:Y47y5gKfHp1hDc7CH7OeXgLIpp+Q2m1Ni0L5s3bI8Os= |
||||
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= |
||||
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= |
||||
github.com/aler9/ice/v2 v2.0.0-20231112223552-32d34dfcf3a1 h1:fD6eZt+3/t8bzFn6ZZA2eP63xBP06v3EPfPJu8DO8ys= |
||||
github.com/aler9/ice/v2 v2.0.0-20231112223552-32d34dfcf3a1/go.mod h1:lT3kv5uUIlHfXHU/ZRD7uKD/ufM202+eTa3C/umgGf4= |
||||
github.com/aler9/webrtc/v3 v3.0.0-20231112223655-e402ed2689c6 h1:wMd3D1mLghoYYh31STig8Kwm2qi8QyQKUy09qUUZrVw= |
||||
github.com/aler9/webrtc/v3 v3.0.0-20231112223655-e402ed2689c6/go.mod h1:1CaT2fcZzZ6VZA+O1i9yK2DU4EOcXVvSbWG9pr5jefs= |
||||
github.com/asticode/go-astikit v0.30.0 h1:DkBkRQRIxYcknlaU7W7ksNfn4gMFsB0tqMJflxkRsZA= |
||||
github.com/asticode/go-astikit v0.30.0/go.mod h1:h4ly7idim1tNhaVkdVBeXQZEE3L0xblP7fCWbgwipF0= |
||||
github.com/asticode/go-astits v1.13.0 h1:XOgkaadfZODnyZRR5Y0/DWkA9vrkLLPLeeOvDwfKZ1c= |
||||
github.com/asticode/go-astits v1.13.0/go.mod h1:QSHmknZ51pf6KJdHKZHJTLlMegIrhega3LPWz3ND/iI= |
||||
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4= |
||||
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c/go.mod h1:x1vxHcL/9AVzuk5HOloOEPrtJY0MaalYr78afXZ+pWI= |
||||
github.com/bluenviron/gohlslib v1.3.0 h1:I9t1Nba6VJKg5rLoXSzQFPkZZYBUwBqCU2Divp0oU2I= |
||||
github.com/bluenviron/gohlslib v1.3.0/go.mod h1:wD8ysO6HB90d17sxoIQXGHINo2KYj/mZirMnPtKLJZQ= |
||||
github.com/bluenviron/gortsplib/v4 v4.8.0 h1:nvFp6rHALcSep3G9uBFI0uogS9stVZLNq/92TzGZdQg= |
||||
github.com/bluenviron/gortsplib/v4 v4.8.0/go.mod h1:+d+veuyvhvikUNp0GRQkk6fEbd/DtcXNidMRm7FQRaA= |
||||
github.com/bluenviron/mediacommon v1.9.2 h1:EHcvoC5YMXRcFE010bTNf07ZiSlB/e/AdZyG7GsEYN0= |
||||
github.com/bluenviron/mediacommon v1.9.2/go.mod h1:lt8V+wMyPw8C69HAqDWV5tsAwzN9u2Z+ca8B6C//+n0= |
||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= |
||||
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= |
||||
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= |
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= |
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= |
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= |
||||
github.com/cloudfoundry/bytefmt v0.0.0-20211005130812-5bb3c17173e5 h1:xB7KkA98BcUdzVcwyZxb5R0FGIHxNPHgZOzkjPEY5gM= |
||||
github.com/cloudfoundry/bytefmt v0.0.0-20211005130812-5bb3c17173e5/go.mod h1:v4VVB6oBMz/c9fRY6vZrwr5xKRWOH5NPDjQZlPk0Gbs= |
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= |
||||
github.com/datarhei/gosrt v0.6.0 h1:HrrXAw90V78ok4WMIhX6se1aTHPCn82Sg2hj+PhdmGc= |
||||
github.com/datarhei/gosrt v0.6.0/go.mod h1:fsOWdLSHUHShHjgi/46h6wjtdQrtnSdAQFnlas8ONxs= |
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= |
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= |
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= |
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= |
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= |
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= |
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= |
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= |
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= |
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= |
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= |
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= |
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= |
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= |
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= |
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= |
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= |
||||
github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= |
||||
github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= |
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= |
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= |
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= |
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= |
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= |
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= |
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= |
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= |
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= |
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= |
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= |
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= |
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= |
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= |
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= |
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= |
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= |
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= |
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= |
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= |
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= |
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= |
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= |
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= |
||||
github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= |
||||
github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= |
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= |
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= |
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= |
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= |
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= |
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= |
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= |
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= |
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= |
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= |
||||
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= |
||||
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= |
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= |
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= |
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= |
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= |
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= |
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= |
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= |
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= |
||||
github.com/matthewhartstonge/argon2 v1.0.0 h1:e65fkae6O8Na6YTy2HAccUbXR+GQHOnpQxeWGqWCRIw= |
||||
github.com/matthewhartstonge/argon2 v1.0.0/go.mod h1:Fm4FHZxdxCM6hg21Jkz3YZVKnU7VnTlqDQ3ghS/Myok= |
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= |
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= |
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= |
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= |
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= |
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= |
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= |
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= |
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= |
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= |
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= |
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= |
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= |
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= |
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= |
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= |
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= |
||||
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= |
||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= |
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= |
||||
github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw= |
||||
github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0= |
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= |
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= |
||||
github.com/pion/datachannel v1.5.5 h1:10ef4kwdjije+M9d7Xm9im2Y3O6A6ccQb0zcqZcJew8= |
||||
github.com/pion/datachannel v1.5.5/go.mod h1:iMz+lECmfdCMqFRhXhcA/219B0SQlbpoR2V118yimL0= |
||||
github.com/pion/dtls/v2 v2.2.7 h1:cSUBsETxepsCSFSxC3mc/aDo14qQLMSL+O6IjG28yV8= |
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= |
||||
github.com/pion/interceptor v0.1.25/go.mod h1:wkbPYAak5zKsfpVDYMtEfWEy8D4zL+rpxCxPImLOg3Y= |
||||
github.com/pion/interceptor v0.1.27 h1:mZ01OiGiukwRxezmDGzYjjokCVlDOk4T6BfaL5qrtGo= |
||||
github.com/pion/interceptor v0.1.27/go.mod h1:/vVaqLwDjGv4GRbgmChIKZIT5EXFDijwmj4WmIYy9bI= |
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= |
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= |
||||
github.com/pion/mdns v0.0.7/go.mod h1:4iP2UbeFhLI/vWju/bw6ZfwjJzk0z8DNValjGxR/dD8= |
||||
github.com/pion/mdns v0.0.9 h1:7Ue5KZsqq8EuqStnpPWV33vYYEH0+skdDN5L7EiEsI4= |
||||
github.com/pion/mdns v0.0.9/go.mod h1:2JA5exfxwzXiCihmxpTKgFUpiQws2MnipoPK09vecIc= |
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= |
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= |
||||
github.com/pion/rtcp v1.2.10/go.mod h1:ztfEwXZNLGyF1oQDttz/ZKIBaeeg/oWbRYqzBM9TL1I= |
||||
github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= |
||||
github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= |
||||
github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= |
||||
github.com/pion/rtp v1.8.2/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= |
||||
github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= |
||||
github.com/pion/rtp v1.8.4/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= |
||||
github.com/pion/rtp v1.8.5 h1:uYzINfaK+9yWs7r537z/Rc1SvT8ILjBcmDOpJcTB+OU= |
||||
github.com/pion/rtp v1.8.5/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= |
||||
github.com/pion/sctp v1.8.5/go.mod h1:SUFFfDpViyKejTAdwD1d/HQsCu+V/40cCs2nZIvC3s0= |
||||
github.com/pion/sctp v1.8.8 h1:5EdnnKI4gpyR1a1TwbiS/wxEgcUWBHsc7ILAjARJB+U= |
||||
github.com/pion/sctp v1.8.8/go.mod h1:igF9nZBrjh5AtmKc7U30jXltsFHicFCXSmWA2GWRaWs= |
||||
github.com/pion/sdp/v3 v3.0.6/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw= |
||||
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= |
||||
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= |
||||
github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo= |
||||
github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= |
||||
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= |
||||
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= |
||||
github.com/pion/transport v0.14.1 h1:XSM6olwW+o8J4SCmOBb/BpwZypkHeyM0PGFCxNQBr40= |
||||
github.com/pion/transport v0.14.1/go.mod h1:4tGmbk00NeYA3rUa9+n+dzCCoKkcy3YlYb99Jn2fNnI= |
||||
github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc= |
||||
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= |
||||
github.com/pion/transport/v2 v2.2.3 h1:XcOE3/x41HOSKbl1BfyY1TF1dERx7lVvlMCbXU7kfvA= |
||||
github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= |
||||
github.com/pion/transport/v3 v3.0.1 h1:gDTlPJwROfSfz6QfSi0ZmeCSkFcnWWiiR9ES0ouANiM= |
||||
github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= |
||||
github.com/pion/turn/v2 v2.1.2/go.mod h1:1kjnPkBcex3dhCU2Am+AAmxDcGhLX3WnMfmkNpvSTQU= |
||||
github.com/pion/turn/v2 v2.1.3 h1:pYxTVWG2gpC97opdRc5IGsQ1lJ9O/IlNhkzj7MMrGAA= |
||||
github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= |
||||
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE= |
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= |
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
||||
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= |
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= |
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= |
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= |
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= |
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= |
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= |
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= |
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= |
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= |
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= |
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= |
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= |
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= |
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= |
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= |
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= |
||||
github.com/sunfish-shogi/bufseekio v0.0.0-20210207115823-a4185644b365/go.mod h1:dEzdXgvImkQ3WLI+0KQpmEx8T/C/ma9KeS3AfmU899I= |
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= |
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= |
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= |
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= |
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8= |
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= |
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= |
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= |
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= |
||||
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= |
||||
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= |
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= |
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= |
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= |
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= |
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= |
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= |
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= |
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= |
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= |
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= |
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= |
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= |
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= |
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= |
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= |
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= |
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= |
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= |
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= |
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= |
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= |
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= |
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= |
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= |
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= |
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= |
||||
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= |
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= |
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= |
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= |
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= |
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= |
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= |
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= |
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= |
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= |
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= |
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= |
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= |
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= |
||||
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= |
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= |
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= |
||||
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= |
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= |
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= |
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= |
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= |
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= |
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= |
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= |
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= |
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= |
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= |
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= |
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= |
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= |
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= |
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= |
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= |
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= |
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= |
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= |
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= |
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= |
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= |
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= |
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= |
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= |
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= |
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= |
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= |
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= |
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= |
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= |
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= |
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= |
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= |
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= |
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= |
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= |
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= |
||||
gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= |
||||
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= |
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= |
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= |
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= |
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= |
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= |
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= |
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= |
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= |
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= |
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= |
||||
File diff suppressed because one or more lines are too long
@ -1,701 +0,0 @@
@@ -1,701 +0,0 @@
|
||||
package api |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"io" |
||||
"net/http" |
||||
"net/url" |
||||
"os" |
||||
"path/filepath" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/auth" |
||||
"github.com/bluenviron/mediamtx/internal/conf" |
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
"github.com/bluenviron/mediamtx/internal/test" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
type testParent struct{} |
||||
|
||||
func (testParent) Log(_ logger.Level, _ string, _ ...interface{}) { |
||||
} |
||||
|
||||
func (testParent) APIConfigSet(_ *conf.Conf) {} |
||||
|
||||
func tempConf(t *testing.T, cnt string) *conf.Conf { |
||||
fi, err := test.CreateTempFile([]byte(cnt)) |
||||
require.NoError(t, err) |
||||
defer os.Remove(fi) |
||||
|
||||
cnf, _, err := conf.Load(fi, nil) |
||||
require.NoError(t, err) |
||||
|
||||
return cnf |
||||
} |
||||
|
||||
func httpRequest(t *testing.T, hc *http.Client, method string, ur string, in interface{}, out interface{}) { |
||||
buf := func() io.Reader { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
|
||||
byts, err := json.Marshal(in) |
||||
require.NoError(t, err) |
||||
|
||||
return bytes.NewBuffer(byts) |
||||
}() |
||||
|
||||
req, err := http.NewRequest(method, ur, buf) |
||||
require.NoError(t, err) |
||||
|
||||
res, err := hc.Do(req) |
||||
require.NoError(t, err) |
||||
defer res.Body.Close() |
||||
|
||||
if res.StatusCode != http.StatusOK { |
||||
t.Errorf("bad status code: %d", res.StatusCode) |
||||
} |
||||
|
||||
if out == nil { |
||||
return |
||||
} |
||||
|
||||
err = json.NewDecoder(res.Body).Decode(out) |
||||
require.NoError(t, err) |
||||
} |
||||
|
||||
func checkError(t *testing.T, msg string, body io.Reader) { |
||||
var resErr map[string]interface{} |
||||
err := json.NewDecoder(body).Decode(&resErr) |
||||
require.NoError(t, err) |
||||
require.Equal(t, map[string]interface{}{"error": msg}, resErr) |
||||
} |
||||
|
||||
func TestPaginate(t *testing.T) { |
||||
items := make([]int, 5) |
||||
for i := 0; i < 5; i++ { |
||||
items[i] = i |
||||
} |
||||
|
||||
pageCount, err := paginate(&items, "1", "1") |
||||
require.NoError(t, err) |
||||
require.Equal(t, 5, pageCount) |
||||
require.Equal(t, []int{1}, items) |
||||
|
||||
items = make([]int, 5) |
||||
for i := 0; i < 5; i++ { |
||||
items[i] = i |
||||
} |
||||
|
||||
pageCount, err = paginate(&items, "3", "2") |
||||
require.NoError(t, err) |
||||
require.Equal(t, 2, pageCount) |
||||
require.Equal(t, []int{}, items) |
||||
|
||||
items = make([]int, 6) |
||||
for i := 0; i < 6; i++ { |
||||
items[i] = i |
||||
} |
||||
|
||||
pageCount, err = paginate(&items, "4", "1") |
||||
require.NoError(t, err) |
||||
require.Equal(t, 2, pageCount) |
||||
require.Equal(t, []int{4, 5}, items) |
||||
} |
||||
|
||||
var authManager = &auth.Manager{ |
||||
Method: conf.AuthMethodInternal, |
||||
InternalUsers: []conf.AuthInternalUser{ |
||||
{ |
||||
User: "myuser", |
||||
Pass: "mypass", |
||||
Permissions: []conf.AuthInternalUserPermission{ |
||||
{ |
||||
Action: conf.AuthActionAPI, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
RTSPAuthMethods: nil, |
||||
} |
||||
|
||||
func TestConfigGlobalGet(t *testing.T) { |
||||
cnf := tempConf(t, "api: yes\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
var out map[string]interface{} |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/config/global/get", nil, &out) |
||||
require.Equal(t, true, out["api"]) |
||||
} |
||||
|
||||
func TestConfigGlobalPatch(t *testing.T) { |
||||
cnf := tempConf(t, "api: yes\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
httpRequest(t, hc, http.MethodPatch, "http://myuser:mypass@localhost:9997/v3/config/global/patch", |
||||
map[string]interface{}{ |
||||
"rtmp": false, |
||||
"readTimeout": "7s", |
||||
"protocols": []string{"tcp"}, |
||||
"readBufferCount": 4096, // test setting a deprecated parameter
|
||||
}, nil) |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
|
||||
var out map[string]interface{} |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/config/global/get", nil, &out) |
||||
require.Equal(t, false, out["rtmp"]) |
||||
require.Equal(t, "7s", out["readTimeout"]) |
||||
require.Equal(t, []interface{}{"tcp"}, out["protocols"]) |
||||
require.Equal(t, float64(4096), out["readBufferCount"]) |
||||
} |
||||
|
||||
func TestAPIConfigGlobalPatchUnknownField(t *testing.T) { //nolint:dupl
|
||||
cnf := tempConf(t, "api: yes\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
b := map[string]interface{}{ |
||||
"test": "asd", |
||||
} |
||||
|
||||
byts, err := json.Marshal(b) |
||||
require.NoError(t, err) |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, "http://myuser:mypass@localhost:9997/v3/config/global/patch", |
||||
bytes.NewReader(byts)) |
||||
require.NoError(t, err) |
||||
|
||||
res, err := hc.Do(req) |
||||
require.NoError(t, err) |
||||
defer res.Body.Close() |
||||
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode) |
||||
checkError(t, "json: unknown field \"test\"", res.Body) |
||||
} |
||||
|
||||
func TestAPIConfigPathDefaultsGet(t *testing.T) { |
||||
cnf := tempConf(t, "api: yes\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
var out map[string]interface{} |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/config/pathdefaults/get", nil, &out) |
||||
require.Equal(t, "publisher", out["source"]) |
||||
} |
||||
|
||||
func TestAPIConfigPathDefaultsPatch(t *testing.T) { |
||||
cnf := tempConf(t, "api: yes\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
httpRequest(t, hc, http.MethodPatch, "http://myuser:mypass@localhost:9997/v3/config/pathdefaults/patch", |
||||
map[string]interface{}{ |
||||
"readUser": "myuser", |
||||
"readPass": "mypass", |
||||
}, nil) |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
|
||||
var out map[string]interface{} |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/config/pathdefaults/get", nil, &out) |
||||
require.Equal(t, "myuser", out["readUser"]) |
||||
require.Equal(t, "mypass", out["readPass"]) |
||||
} |
||||
|
||||
func TestAPIConfigPathsList(t *testing.T) { |
||||
cnf := tempConf(t, "api: yes\n"+ |
||||
"paths:\n"+ |
||||
" path1:\n"+ |
||||
" readUser: myuser1\n"+ |
||||
" readPass: mypass1\n"+ |
||||
" path2:\n"+ |
||||
" readUser: myuser2\n"+ |
||||
" readPass: mypass2\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
type pathConfig map[string]interface{} |
||||
|
||||
type listRes struct { |
||||
ItemCount int `json:"itemCount"` |
||||
PageCount int `json:"pageCount"` |
||||
Items []pathConfig `json:"items"` |
||||
} |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
var out listRes |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/config/paths/list", nil, &out) |
||||
require.Equal(t, 2, out.ItemCount) |
||||
require.Equal(t, 1, out.PageCount) |
||||
require.Equal(t, "path1", out.Items[0]["name"]) |
||||
require.Equal(t, "myuser1", out.Items[0]["readUser"]) |
||||
require.Equal(t, "mypass1", out.Items[0]["readPass"]) |
||||
require.Equal(t, "path2", out.Items[1]["name"]) |
||||
require.Equal(t, "myuser2", out.Items[1]["readUser"]) |
||||
require.Equal(t, "mypass2", out.Items[1]["readPass"]) |
||||
} |
||||
|
||||
func TestAPIConfigPathsGet(t *testing.T) { |
||||
cnf := tempConf(t, "api: yes\n"+ |
||||
"paths:\n"+ |
||||
" my/path:\n"+ |
||||
" readUser: myuser\n"+ |
||||
" readPass: mypass\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
var out map[string]interface{} |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/config/paths/get/my/path", nil, &out) |
||||
require.Equal(t, "my/path", out["name"]) |
||||
require.Equal(t, "myuser", out["readUser"]) |
||||
} |
||||
|
||||
func TestAPIConfigPathsAdd(t *testing.T) { |
||||
cnf := tempConf(t, "api: yes\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
httpRequest(t, hc, http.MethodPost, "http://myuser:mypass@localhost:9997/v3/config/paths/add/my/path", |
||||
map[string]interface{}{ |
||||
"source": "rtsp://127.0.0.1:9999/mypath", |
||||
"sourceOnDemand": true, |
||||
"disablePublisherOverride": true, // test setting a deprecated parameter
|
||||
"rpiCameraVFlip": true, |
||||
}, nil) |
||||
|
||||
var out map[string]interface{} |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/config/paths/get/my/path", nil, &out) |
||||
require.Equal(t, "rtsp://127.0.0.1:9999/mypath", out["source"]) |
||||
require.Equal(t, true, out["sourceOnDemand"]) |
||||
require.Equal(t, true, out["disablePublisherOverride"]) |
||||
require.Equal(t, true, out["rpiCameraVFlip"]) |
||||
} |
||||
|
||||
func TestAPIConfigPathsAddUnknownField(t *testing.T) { //nolint:dupl
|
||||
cnf := tempConf(t, "api: yes\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
b := map[string]interface{}{ |
||||
"test": "asd", |
||||
} |
||||
|
||||
byts, err := json.Marshal(b) |
||||
require.NoError(t, err) |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
req, err := http.NewRequest(http.MethodPost, |
||||
"http://myuser:mypass@localhost:9997/v3/config/paths/add/my/path", bytes.NewReader(byts)) |
||||
require.NoError(t, err) |
||||
|
||||
res, err := hc.Do(req) |
||||
require.NoError(t, err) |
||||
defer res.Body.Close() |
||||
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode) |
||||
checkError(t, "json: unknown field \"test\"", res.Body) |
||||
} |
||||
|
||||
func TestAPIConfigPathsPatch(t *testing.T) { //nolint:dupl
|
||||
cnf := tempConf(t, "api: yes\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
httpRequest(t, hc, http.MethodPost, "http://myuser:mypass@localhost:9997/v3/config/paths/add/my/path", |
||||
map[string]interface{}{ |
||||
"source": "rtsp://127.0.0.1:9999/mypath", |
||||
"sourceOnDemand": true, |
||||
"disablePublisherOverride": true, // test setting a deprecated parameter
|
||||
"rpiCameraVFlip": true, |
||||
}, nil) |
||||
|
||||
httpRequest(t, hc, http.MethodPatch, "http://myuser:mypass@localhost:9997/v3/config/paths/patch/my/path", |
||||
map[string]interface{}{ |
||||
"source": "rtsp://127.0.0.1:9998/mypath", |
||||
"sourceOnDemand": true, |
||||
}, nil) |
||||
|
||||
var out map[string]interface{} |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/config/paths/get/my/path", nil, &out) |
||||
require.Equal(t, "rtsp://127.0.0.1:9998/mypath", out["source"]) |
||||
require.Equal(t, true, out["sourceOnDemand"]) |
||||
require.Equal(t, true, out["disablePublisherOverride"]) |
||||
require.Equal(t, true, out["rpiCameraVFlip"]) |
||||
} |
||||
|
||||
func TestAPIConfigPathsReplace(t *testing.T) { //nolint:dupl
|
||||
cnf := tempConf(t, "api: yes\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
httpRequest(t, hc, http.MethodPost, "http://myuser:mypass@localhost:9997/v3/config/paths/add/my/path", |
||||
map[string]interface{}{ |
||||
"source": "rtsp://127.0.0.1:9999/mypath", |
||||
"sourceOnDemand": true, |
||||
"disablePublisherOverride": true, // test setting a deprecated parameter
|
||||
"rpiCameraVFlip": true, |
||||
}, nil) |
||||
|
||||
httpRequest(t, hc, http.MethodPost, "http://myuser:mypass@localhost:9997/v3/config/paths/replace/my/path", |
||||
map[string]interface{}{ |
||||
"source": "rtsp://127.0.0.1:9998/mypath", |
||||
"sourceOnDemand": true, |
||||
}, nil) |
||||
|
||||
var out map[string]interface{} |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/config/paths/get/my/path", nil, &out) |
||||
require.Equal(t, "rtsp://127.0.0.1:9998/mypath", out["source"]) |
||||
require.Equal(t, true, out["sourceOnDemand"]) |
||||
require.Equal(t, nil, out["disablePublisherOverride"]) |
||||
require.Equal(t, false, out["rpiCameraVFlip"]) |
||||
} |
||||
|
||||
func TestAPIConfigPathsDelete(t *testing.T) { |
||||
cnf := tempConf(t, "api: yes\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err := api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
httpRequest(t, hc, http.MethodPost, "http://myuser:mypass@localhost:9997/v3/config/paths/add/my/path", |
||||
map[string]interface{}{ |
||||
"source": "rtsp://127.0.0.1:9999/mypath", |
||||
"sourceOnDemand": true, |
||||
}, nil) |
||||
|
||||
httpRequest(t, hc, http.MethodDelete, "http://myuser:mypass@localhost:9997/v3/config/paths/delete/my/path", nil, nil) |
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://myuser:mypass@localhost:9997/v3/config/paths/get/my/path", nil) |
||||
require.NoError(t, err) |
||||
|
||||
res, err := hc.Do(req) |
||||
require.NoError(t, err) |
||||
defer res.Body.Close() |
||||
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode) |
||||
checkError(t, "path configuration not found", res.Body) |
||||
} |
||||
|
||||
func TestRecordingsList(t *testing.T) { |
||||
dir, err := os.MkdirTemp("", "mediamtx-playback") |
||||
require.NoError(t, err) |
||||
defer os.RemoveAll(dir) |
||||
|
||||
cnf := tempConf(t, "pathDefaults:\n"+ |
||||
" recordPath: "+filepath.Join(dir, "%path/%Y-%m-%d_%H-%M-%S-%f")+"\n"+ |
||||
"paths:\n"+ |
||||
" all_others:\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err = api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
err = os.Mkdir(filepath.Join(dir, "mypath1"), 0o755) |
||||
require.NoError(t, err) |
||||
|
||||
err = os.Mkdir(filepath.Join(dir, "mypath2"), 0o755) |
||||
require.NoError(t, err) |
||||
|
||||
err = os.WriteFile(filepath.Join(dir, "mypath1", "2008-11-07_11-22-00-500000.mp4"), []byte(""), 0o644) |
||||
require.NoError(t, err) |
||||
|
||||
err = os.WriteFile(filepath.Join(dir, "mypath1", "2009-11-07_11-22-00-900000.mp4"), []byte(""), 0o644) |
||||
require.NoError(t, err) |
||||
|
||||
err = os.WriteFile(filepath.Join(dir, "mypath2", "2009-11-07_11-22-00-900000.mp4"), []byte(""), 0o644) |
||||
require.NoError(t, err) |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
var out interface{} |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/recordings/list", nil, &out) |
||||
require.Equal(t, map[string]interface{}{ |
||||
"itemCount": float64(2), |
||||
"pageCount": float64(1), |
||||
"items": []interface{}{ |
||||
map[string]interface{}{ |
||||
"name": "mypath1", |
||||
"segments": []interface{}{ |
||||
map[string]interface{}{ |
||||
"start": time.Date(2008, 11, 0o7, 11, 22, 0, 500000000, time.Local).Format(time.RFC3339Nano), |
||||
}, |
||||
map[string]interface{}{ |
||||
"start": time.Date(2009, 11, 0o7, 11, 22, 0, 900000000, time.Local).Format(time.RFC3339Nano), |
||||
}, |
||||
}, |
||||
}, |
||||
map[string]interface{}{ |
||||
"name": "mypath2", |
||||
"segments": []interface{}{ |
||||
map[string]interface{}{ |
||||
"start": time.Date(2009, 11, 0o7, 11, 22, 0, 900000000, time.Local).Format(time.RFC3339Nano), |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, out) |
||||
} |
||||
|
||||
func TestRecordingsGet(t *testing.T) { |
||||
dir, err := os.MkdirTemp("", "mediamtx-playback") |
||||
require.NoError(t, err) |
||||
defer os.RemoveAll(dir) |
||||
|
||||
cnf := tempConf(t, "pathDefaults:\n"+ |
||||
" recordPath: "+filepath.Join(dir, "%path/%Y-%m-%d_%H-%M-%S-%f")+"\n"+ |
||||
"paths:\n"+ |
||||
" all_others:\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err = api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
err = os.Mkdir(filepath.Join(dir, "mypath1"), 0o755) |
||||
require.NoError(t, err) |
||||
|
||||
err = os.WriteFile(filepath.Join(dir, "mypath1", "2008-11-07_11-22-00-000000.mp4"), []byte(""), 0o644) |
||||
require.NoError(t, err) |
||||
|
||||
err = os.WriteFile(filepath.Join(dir, "mypath1", "2009-11-07_11-22-00-900000.mp4"), []byte(""), 0o644) |
||||
require.NoError(t, err) |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
var out interface{} |
||||
httpRequest(t, hc, http.MethodGet, "http://myuser:mypass@localhost:9997/v3/recordings/get/mypath1", nil, &out) |
||||
require.Equal(t, map[string]interface{}{ |
||||
"name": "mypath1", |
||||
"segments": []interface{}{ |
||||
map[string]interface{}{ |
||||
"start": time.Date(2008, 11, 0o7, 11, 22, 0, 0, time.Local).Format(time.RFC3339Nano), |
||||
}, |
||||
map[string]interface{}{ |
||||
"start": time.Date(2009, 11, 0o7, 11, 22, 0, 900000000, time.Local).Format(time.RFC3339Nano), |
||||
}, |
||||
}, |
||||
}, out) |
||||
} |
||||
|
||||
func TestRecordingsDeleteSegment(t *testing.T) { |
||||
dir, err := os.MkdirTemp("", "mediamtx-playback") |
||||
require.NoError(t, err) |
||||
defer os.RemoveAll(dir) |
||||
|
||||
cnf := tempConf(t, "pathDefaults:\n"+ |
||||
" recordPath: "+filepath.Join(dir, "%path/%Y-%m-%d_%H-%M-%S-%f")+"\n"+ |
||||
"paths:\n"+ |
||||
" all_others:\n") |
||||
|
||||
api := API{ |
||||
Address: "localhost:9997", |
||||
ReadTimeout: conf.StringDuration(10 * time.Second), |
||||
Conf: cnf, |
||||
AuthManager: authManager, |
||||
Parent: &testParent{}, |
||||
} |
||||
err = api.Initialize() |
||||
require.NoError(t, err) |
||||
defer api.Close() |
||||
|
||||
err = os.Mkdir(filepath.Join(dir, "mypath1"), 0o755) |
||||
require.NoError(t, err) |
||||
|
||||
err = os.WriteFile(filepath.Join(dir, "mypath1", "2008-11-07_11-22-00-900000.mp4"), []byte(""), 0o644) |
||||
require.NoError(t, err) |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
u, err := url.Parse("http://myuser:mypass@localhost:9997/v3/recordings/deletesegment") |
||||
require.NoError(t, err) |
||||
|
||||
v := url.Values{} |
||||
v.Set("path", "mypath1") |
||||
v.Set("start", time.Date(2008, 11, 0o7, 11, 22, 0, 900000000, time.Local).Format(time.RFC3339Nano)) |
||||
u.RawQuery = v.Encode() |
||||
|
||||
req, err := http.NewRequest(http.MethodDelete, u.String(), nil) |
||||
require.NoError(t, err) |
||||
|
||||
res, err := hc.Do(req) |
||||
require.NoError(t, err) |
||||
defer res.Body.Close() |
||||
require.Equal(t, http.StatusOK, res.StatusCode) |
||||
} |
||||
@ -1,139 +0,0 @@
@@ -1,139 +0,0 @@
|
||||
package api |
||||
|
||||
import ( |
||||
"errors" |
||||
"io/fs" |
||||
"path/filepath" |
||||
"sort" |
||||
"strings" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/conf" |
||||
"github.com/bluenviron/mediamtx/internal/defs" |
||||
"github.com/bluenviron/mediamtx/internal/playback" |
||||
"github.com/bluenviron/mediamtx/internal/record" |
||||
) |
||||
|
||||
var errFound = errors.New("found") |
||||
|
||||
func fixedPathHasRecordings(pathConf *conf.Path) bool { |
||||
recordPath := record.PathAddExtension( |
||||
strings.ReplaceAll(pathConf.RecordPath, "%path", pathConf.Name), |
||||
pathConf.RecordFormat, |
||||
) |
||||
|
||||
// we have to convert to absolute paths
|
||||
// otherwise, recordPath and fpath inside Walk() won't have common elements
|
||||
recordPath, _ = filepath.Abs(recordPath) |
||||
|
||||
commonPath := record.CommonPath(recordPath) |
||||
|
||||
err := filepath.Walk(commonPath, func(fpath string, info fs.FileInfo, err error) error { |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if !info.IsDir() { |
||||
var pa record.Path |
||||
ok := pa.Decode(recordPath, fpath) |
||||
if ok { |
||||
return errFound |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
}) |
||||
if err != nil && !errors.Is(err, errFound) { |
||||
return false |
||||
} |
||||
|
||||
return errors.Is(err, errFound) |
||||
} |
||||
|
||||
func regexpPathGetRecordings(pathConf *conf.Path) []string { |
||||
recordPath := record.PathAddExtension( |
||||
pathConf.RecordPath, |
||||
pathConf.RecordFormat, |
||||
) |
||||
|
||||
// we have to convert to absolute paths
|
||||
// otherwise, recordPath and fpath inside Walk() won't have common elements
|
||||
recordPath, _ = filepath.Abs(recordPath) |
||||
|
||||
commonPath := record.CommonPath(recordPath) |
||||
|
||||
var ret []string |
||||
|
||||
filepath.Walk(commonPath, func(fpath string, info fs.FileInfo, err error) error { //nolint:errcheck
|
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if !info.IsDir() { |
||||
var pa record.Path |
||||
ok := pa.Decode(recordPath, fpath) |
||||
if ok && pathConf.Regexp.FindStringSubmatch(pa.Path) != nil { |
||||
ret = append(ret, pa.Path) |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
}) |
||||
|
||||
return ret |
||||
} |
||||
|
||||
func removeDuplicatesAndSort(in []string) []string { |
||||
ma := make(map[string]struct{}, len(in)) |
||||
for _, i := range in { |
||||
ma[i] = struct{}{} |
||||
} |
||||
|
||||
out := []string{} |
||||
|
||||
for k := range ma { |
||||
out = append(out, k) |
||||
} |
||||
|
||||
sort.Strings(out) |
||||
|
||||
return out |
||||
} |
||||
|
||||
func getAllPathsWithRecordings(paths map[string]*conf.Path) []string { |
||||
pathNames := []string{} |
||||
|
||||
for _, pathConf := range paths { |
||||
if pathConf.Playback { |
||||
if pathConf.Regexp == nil { |
||||
if fixedPathHasRecordings(pathConf) { |
||||
pathNames = append(pathNames, pathConf.Name) |
||||
} |
||||
} else { |
||||
pathNames = append(pathNames, regexpPathGetRecordings(pathConf)...) |
||||
} |
||||
} |
||||
} |
||||
|
||||
return removeDuplicatesAndSort(pathNames) |
||||
} |
||||
|
||||
func recordingEntry( |
||||
pathConf *conf.Path, |
||||
pathName string, |
||||
) *defs.APIRecording { |
||||
ret := &defs.APIRecording{ |
||||
Name: pathName, |
||||
} |
||||
|
||||
segments, _ := playback.FindSegments(pathConf, pathName) |
||||
|
||||
ret.Segments = make([]*defs.APIRecordingSegment, len(segments)) |
||||
|
||||
for i, seg := range segments { |
||||
ret.Segments[i] = &defs.APIRecordingSegment{ |
||||
Start: seg.Start, |
||||
} |
||||
} |
||||
|
||||
return ret |
||||
} |
||||
@ -1,76 +0,0 @@
@@ -1,76 +0,0 @@
|
||||
// Package asyncwriter contains an asynchronous writer.
|
||||
package asyncwriter |
||||
|
||||
import ( |
||||
"fmt" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/ringbuffer" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
) |
||||
|
||||
// Writer is an asynchronous writer.
|
||||
type Writer struct { |
||||
writeErrLogger logger.Writer |
||||
buffer *ringbuffer.RingBuffer |
||||
|
||||
// out
|
||||
err chan error |
||||
} |
||||
|
||||
// New allocates a Writer.
|
||||
func New( |
||||
queueSize int, |
||||
parent logger.Writer, |
||||
) *Writer { |
||||
buffer, _ := ringbuffer.New(uint64(queueSize)) |
||||
|
||||
return &Writer{ |
||||
writeErrLogger: logger.NewLimitedLogger(parent), |
||||
buffer: buffer, |
||||
err: make(chan error), |
||||
} |
||||
} |
||||
|
||||
// Start starts the writer routine.
|
||||
func (w *Writer) Start() { |
||||
go w.run() |
||||
} |
||||
|
||||
// Stop stops the writer routine.
|
||||
func (w *Writer) Stop() { |
||||
w.buffer.Close() |
||||
<-w.err |
||||
} |
||||
|
||||
// Error returns whenever there's an error.
|
||||
func (w *Writer) Error() chan error { |
||||
return w.err |
||||
} |
||||
|
||||
func (w *Writer) run() { |
||||
w.err <- w.runInner() |
||||
close(w.err) |
||||
} |
||||
|
||||
func (w *Writer) runInner() error { |
||||
for { |
||||
cb, ok := w.buffer.Pull() |
||||
if !ok { |
||||
return fmt.Errorf("terminated") |
||||
} |
||||
|
||||
err := cb.(func() error)() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Push appends an element to the queue.
|
||||
func (w *Writer) Push(cb func() error) { |
||||
ok := w.buffer.Push(cb) |
||||
if !ok { |
||||
w.writeErrLogger.Log(logger.Warn, "write queue is full") |
||||
} |
||||
} |
||||
@ -1,22 +0,0 @@
@@ -1,22 +0,0 @@
|
||||
package asyncwriter |
||||
|
||||
import ( |
||||
"fmt" |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestAsyncWriter(t *testing.T) { |
||||
w := New(512, nil) |
||||
|
||||
w.Start() |
||||
defer w.Stop() |
||||
|
||||
w.Push(func() error { |
||||
return fmt.Errorf("testerror") |
||||
}) |
||||
|
||||
err := <-w.Error() |
||||
require.EqualError(t, err, "testerror") |
||||
} |
||||
@ -1,327 +0,0 @@
@@ -1,327 +0,0 @@
|
||||
// Package auth contains the authentication system.
|
||||
package auth |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"fmt" |
||||
"io" |
||||
"net" |
||||
"net/http" |
||||
"net/url" |
||||
"regexp" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/MicahParks/keyfunc/v3" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/auth" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/base" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/headers" |
||||
"github.com/bluenviron/mediamtx/internal/conf" |
||||
"github.com/golang-jwt/jwt/v5" |
||||
"github.com/google/uuid" |
||||
) |
||||
|
||||
const ( |
||||
// PauseAfterError is the pause to apply after an authentication failure.
|
||||
PauseAfterError = 2 * time.Second |
||||
|
||||
rtspAuthRealm = "IPCAM" |
||||
jwtRefreshPeriod = 60 * 60 * time.Second |
||||
) |
||||
|
||||
// Protocol is a protocol.
|
||||
type Protocol string |
||||
|
||||
// protocols.
|
||||
const ( |
||||
ProtocolRTSP Protocol = "rtsp" |
||||
ProtocolRTMP Protocol = "rtmp" |
||||
ProtocolHLS Protocol = "hls" |
||||
ProtocolWebRTC Protocol = "webrtc" |
||||
ProtocolSRT Protocol = "srt" |
||||
) |
||||
|
||||
// Request is an authentication request.
|
||||
type Request struct { |
||||
User string |
||||
Pass string |
||||
IP net.IP |
||||
Action conf.AuthAction |
||||
|
||||
// only for ActionPublish, ActionRead, ActionPlayback
|
||||
Path string |
||||
Protocol Protocol |
||||
ID *uuid.UUID |
||||
Query string |
||||
RTSPRequest *base.Request |
||||
RTSPBaseURL *base.URL |
||||
RTSPNonce string |
||||
} |
||||
|
||||
// Error is a authentication error.
|
||||
type Error struct { |
||||
Message string |
||||
} |
||||
|
||||
// Error implements the error interface.
|
||||
func (e Error) Error() string { |
||||
return "authentication failed: " + e.Message |
||||
} |
||||
|
||||
func matchesPermission(perms []conf.AuthInternalUserPermission, req *Request) bool { |
||||
for _, perm := range perms { |
||||
if perm.Action == req.Action { |
||||
if perm.Action == conf.AuthActionPublish || |
||||
perm.Action == conf.AuthActionRead || |
||||
perm.Action == conf.AuthActionPlayback { |
||||
switch { |
||||
case perm.Path == "": |
||||
return true |
||||
|
||||
case strings.HasPrefix(perm.Path, "~"): |
||||
regexp, err := regexp.Compile(perm.Path[1:]) |
||||
if err == nil && regexp.MatchString(req.Path) { |
||||
return true |
||||
} |
||||
|
||||
case perm.Path == req.Path: |
||||
return true |
||||
} |
||||
} else { |
||||
return true |
||||
} |
||||
} |
||||
} |
||||
|
||||
return false |
||||
} |
||||
|
||||
type customClaims struct { |
||||
jwt.RegisteredClaims |
||||
MediaMTXPermissions []conf.AuthInternalUserPermission `json:"mediamtx_permissions"` |
||||
} |
||||
|
||||
// Manager is the authentication manager.
|
||||
type Manager struct { |
||||
Method conf.AuthMethod |
||||
InternalUsers []conf.AuthInternalUser |
||||
HTTPAddress string |
||||
HTTPExclude []conf.AuthInternalUserPermission |
||||
JWTJWKS string |
||||
ReadTimeout time.Duration |
||||
RTSPAuthMethods []headers.AuthMethod |
||||
|
||||
mutex sync.RWMutex |
||||
jwtHTTPClient *http.Client |
||||
jwtLastRefresh time.Time |
||||
jwtKeyFunc keyfunc.Keyfunc |
||||
} |
||||
|
||||
// ReloadInternalUsers reloads InternalUsers.
|
||||
func (m *Manager) ReloadInternalUsers(u []conf.AuthInternalUser) { |
||||
m.mutex.Lock() |
||||
defer m.mutex.Unlock() |
||||
m.InternalUsers = u |
||||
} |
||||
|
||||
// Authenticate authenticates a request.
|
||||
func (m *Manager) Authenticate(req *Request) error { |
||||
err := m.authenticateInner(req) |
||||
if err != nil { |
||||
return Error{Message: err.Error()} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Manager) authenticateInner(req *Request) error { |
||||
// if this is a RTSP request, fill username and password
|
||||
var rtspAuthHeader headers.Authorization |
||||
if req.RTSPRequest != nil { |
||||
err := rtspAuthHeader.Unmarshal(req.RTSPRequest.Header["Authorization"]) |
||||
if err == nil { |
||||
switch rtspAuthHeader.Method { |
||||
case headers.AuthBasic: |
||||
req.User = rtspAuthHeader.BasicUser |
||||
req.Pass = rtspAuthHeader.BasicPass |
||||
|
||||
case headers.AuthDigestMD5: |
||||
req.User = rtspAuthHeader.Username |
||||
|
||||
default: |
||||
return fmt.Errorf("unsupported RTSP authentication method") |
||||
} |
||||
} |
||||
} |
||||
|
||||
switch m.Method { |
||||
case conf.AuthMethodInternal: |
||||
return m.authenticateInternal(req, &rtspAuthHeader) |
||||
|
||||
case conf.AuthMethodHTTP: |
||||
return m.authenticateHTTP(req) |
||||
|
||||
default: |
||||
return m.authenticateJWT(req) |
||||
} |
||||
} |
||||
|
||||
func (m *Manager) authenticateInternal(req *Request, rtspAuthHeader *headers.Authorization) error { |
||||
m.mutex.RLock() |
||||
defer m.mutex.RUnlock() |
||||
|
||||
for _, u := range m.InternalUsers { |
||||
if err := m.authenticateWithUser(req, rtspAuthHeader, &u); err == nil { |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
return fmt.Errorf("authentication failed") |
||||
} |
||||
|
||||
func (m *Manager) authenticateWithUser( |
||||
req *Request, |
||||
rtspAuthHeader *headers.Authorization, |
||||
u *conf.AuthInternalUser, |
||||
) error { |
||||
if u.User != "any" && !u.User.Check(req.User) { |
||||
return fmt.Errorf("wrong user") |
||||
} |
||||
|
||||
if len(u.IPs) != 0 && !u.IPs.Contains(req.IP) { |
||||
return fmt.Errorf("IP not allowed") |
||||
} |
||||
|
||||
if !matchesPermission(u.Permissions, req) { |
||||
return fmt.Errorf("user doesn't have permission to perform action") |
||||
} |
||||
|
||||
if u.User != "any" { |
||||
if req.RTSPRequest != nil && rtspAuthHeader.Method == headers.AuthDigestMD5 { |
||||
err := auth.Validate( |
||||
req.RTSPRequest, |
||||
string(u.User), |
||||
string(u.Pass), |
||||
req.RTSPBaseURL, |
||||
m.RTSPAuthMethods, |
||||
rtspAuthRealm, |
||||
req.RTSPNonce) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} else if !u.Pass.Check(req.Pass) { |
||||
return fmt.Errorf("invalid credentials") |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Manager) authenticateHTTP(req *Request) error { |
||||
if matchesPermission(m.HTTPExclude, req) { |
||||
return nil |
||||
} |
||||
|
||||
enc, _ := json.Marshal(struct { |
||||
IP string `json:"ip"` |
||||
User string `json:"user"` |
||||
Password string `json:"password"` |
||||
Action string `json:"action"` |
||||
Path string `json:"path"` |
||||
Protocol string `json:"protocol"` |
||||
ID *uuid.UUID `json:"id"` |
||||
Query string `json:"query"` |
||||
}{ |
||||
IP: req.IP.String(), |
||||
User: req.User, |
||||
Password: req.Pass, |
||||
Action: string(req.Action), |
||||
Path: req.Path, |
||||
Protocol: string(req.Protocol), |
||||
ID: req.ID, |
||||
Query: req.Query, |
||||
}) |
||||
|
||||
res, err := http.Post(m.HTTPAddress, "application/json", bytes.NewReader(enc)) |
||||
if err != nil { |
||||
return fmt.Errorf("HTTP request failed: %w", err) |
||||
} |
||||
defer res.Body.Close() |
||||
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 { |
||||
if resBody, err := io.ReadAll(res.Body); err == nil && len(resBody) != 0 { |
||||
return fmt.Errorf("server replied with code %d: %s", res.StatusCode, string(resBody)) |
||||
} |
||||
|
||||
return fmt.Errorf("server replied with code %d", res.StatusCode) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Manager) authenticateJWT(req *Request) error { |
||||
keyfunc, err := m.pullJWTJWKS() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
v, err := url.ParseQuery(req.Query) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if len(v["jwt"]) != 1 { |
||||
return fmt.Errorf("JWT not provided") |
||||
} |
||||
|
||||
var customClaims customClaims |
||||
_, err = jwt.ParseWithClaims(v["jwt"][0], &customClaims, keyfunc) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if !matchesPermission(customClaims.MediaMTXPermissions, req) { |
||||
return fmt.Errorf("user doesn't have permission to perform action") |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Manager) pullJWTJWKS() (jwt.Keyfunc, error) { |
||||
now := time.Now() |
||||
|
||||
m.mutex.Lock() |
||||
defer m.mutex.Unlock() |
||||
|
||||
if now.Sub(m.jwtLastRefresh) >= jwtRefreshPeriod { |
||||
if m.jwtHTTPClient == nil { |
||||
m.jwtHTTPClient = &http.Client{ |
||||
Timeout: (m.ReadTimeout), |
||||
Transport: &http.Transport{}, |
||||
} |
||||
} |
||||
|
||||
res, err := m.jwtHTTPClient.Get(m.JWTJWKS) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer res.Body.Close() |
||||
|
||||
var raw json.RawMessage |
||||
err = json.NewDecoder(res.Body).Decode(&raw) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
tmp, err := keyfunc.NewJWKSetJSON(raw) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
m.jwtKeyFunc = tmp |
||||
m.jwtLastRefresh = now |
||||
} |
||||
|
||||
return m.jwtKeyFunc.Keyfunc, nil |
||||
} |
||||
@ -1,309 +0,0 @@
@@ -1,309 +0,0 @@
|
||||
package auth |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"net" |
||||
"net/http" |
||||
"testing" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/auth" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/base" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/headers" |
||||
"github.com/bluenviron/mediamtx/internal/conf" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func mustParseCIDR(v string) net.IPNet { |
||||
_, ne, err := net.ParseCIDR(v) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
if ipv4 := ne.IP.To4(); ipv4 != nil { |
||||
return net.IPNet{IP: ipv4, Mask: ne.Mask[len(ne.Mask)-4 : len(ne.Mask)]} |
||||
} |
||||
return *ne |
||||
} |
||||
|
||||
type testHTTPAuthenticator struct { |
||||
*http.Server |
||||
} |
||||
|
||||
func (ts *testHTTPAuthenticator) initialize(t *testing.T, protocol string, action string) { |
||||
firstReceived := false |
||||
|
||||
ts.Server = &http.Server{ |
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||
require.Equal(t, http.MethodPost, r.Method) |
||||
require.Equal(t, "/auth", r.URL.Path) |
||||
|
||||
var in struct { |
||||
IP string `json:"ip"` |
||||
User string `json:"user"` |
||||
Password string `json:"password"` |
||||
Path string `json:"path"` |
||||
Protocol string `json:"protocol"` |
||||
ID string `json:"id"` |
||||
Action string `json:"action"` |
||||
Query string `json:"query"` |
||||
} |
||||
err := json.NewDecoder(r.Body).Decode(&in) |
||||
require.NoError(t, err) |
||||
|
||||
var user string |
||||
if action == "publish" { |
||||
user = "testpublisher" |
||||
} else { |
||||
user = "testreader" |
||||
} |
||||
|
||||
if in.IP != "127.0.0.1" || |
||||
in.User != user || |
||||
in.Password != "testpass" || |
||||
in.Path != "teststream" || |
||||
in.Protocol != protocol || |
||||
(firstReceived && in.ID == "") || |
||||
in.Action != action || |
||||
(in.Query != "user=testreader&pass=testpass¶m=value" && |
||||
in.Query != "user=testpublisher&pass=testpass¶m=value" && |
||||
in.Query != "param=value") { |
||||
w.WriteHeader(http.StatusBadRequest) |
||||
return |
||||
} |
||||
|
||||
firstReceived = true |
||||
}), |
||||
} |
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:9120") |
||||
require.NoError(t, err) |
||||
|
||||
go ts.Server.Serve(ln) |
||||
} |
||||
|
||||
func (ts *testHTTPAuthenticator) close() { |
||||
ts.Server.Shutdown(context.Background()) |
||||
} |
||||
|
||||
func TestAuthInternal(t *testing.T) { |
||||
for _, outcome := range []string{ |
||||
"ok", |
||||
"wrong user", |
||||
"wrong pass", |
||||
"wrong ip", |
||||
"wrong action", |
||||
"wrong path", |
||||
} { |
||||
for _, encryption := range []string{ |
||||
"plain", |
||||
"sha256", |
||||
"argon2", |
||||
} { |
||||
t.Run(outcome+" "+encryption, func(t *testing.T) { |
||||
m := Manager{ |
||||
Method: conf.AuthMethodInternal, |
||||
InternalUsers: []conf.AuthInternalUser{ |
||||
{ |
||||
IPs: conf.IPNetworks{mustParseCIDR("127.1.1.1/32")}, |
||||
Permissions: []conf.AuthInternalUserPermission{ |
||||
{ |
||||
Action: conf.AuthActionPublish, |
||||
Path: "mypath", |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
HTTPAddress: "", |
||||
RTSPAuthMethods: nil, |
||||
} |
||||
|
||||
switch encryption { |
||||
case "plain": |
||||
m.InternalUsers[0].User = conf.Credential("testuser") |
||||
m.InternalUsers[0].Pass = conf.Credential("testpass") |
||||
|
||||
case "sha256": |
||||
m.InternalUsers[0].User = conf.Credential("sha256:rl3rgi4NcZkpAEcacZnQ2VuOfJ0FxAqCRaKB/SwdZoQ=") |
||||
m.InternalUsers[0].Pass = conf.Credential("sha256:E9JJ8stBJ7QM+nV4ZoUCeHk/gU3tPFh/5YieiJp6n2w=") |
||||
|
||||
case "argon2": |
||||
m.InternalUsers[0].User = conf.Credential( |
||||
"argon2:$argon2id$v=19$m=4096,t=3,p=1$MTIzNDU2Nzg$Ux/LWeTgJQPyfMMJo1myR64+o8rALHoPmlE1i/TR+58") |
||||
m.InternalUsers[0].Pass = conf.Credential( |
||||
"argon2:$argon2i$v=19$m=4096,t=3,p=1$MTIzNDU2Nzg$/mrZ42TiTv1mcPnpMUera5oi0SFYbbyueAbdx5sUvWo") |
||||
} |
||||
|
||||
switch outcome { |
||||
case "ok": |
||||
err := m.Authenticate(&Request{ |
||||
User: "testuser", |
||||
Pass: "testpass", |
||||
IP: net.ParseIP("127.1.1.1"), |
||||
Action: conf.AuthActionPublish, |
||||
Path: "mypath", |
||||
}) |
||||
require.NoError(t, err) |
||||
|
||||
case "wrong user": |
||||
err := m.Authenticate(&Request{ |
||||
User: "wrong", |
||||
Pass: "testpass", |
||||
IP: net.ParseIP("127.1.1.1"), |
||||
Action: conf.AuthActionPublish, |
||||
Path: "mypath", |
||||
}) |
||||
require.Error(t, err) |
||||
|
||||
case "wrong pass": |
||||
err := m.Authenticate(&Request{ |
||||
User: "testuser", |
||||
Pass: "wrong", |
||||
IP: net.ParseIP("127.1.1.1"), |
||||
Action: conf.AuthActionPublish, |
||||
Path: "mypath", |
||||
}) |
||||
require.Error(t, err) |
||||
|
||||
case "wrong ip": |
||||
err := m.Authenticate(&Request{ |
||||
User: "testuser", |
||||
Pass: "testpass", |
||||
IP: net.ParseIP("127.1.1.2"), |
||||
Action: conf.AuthActionPublish, |
||||
Path: "mypath", |
||||
}) |
||||
require.Error(t, err) |
||||
|
||||
case "wrong action": |
||||
err := m.Authenticate(&Request{ |
||||
User: "testuser", |
||||
Pass: "testpass", |
||||
IP: net.ParseIP("127.1.1.1"), |
||||
Action: conf.AuthActionRead, |
||||
Path: "mypath", |
||||
}) |
||||
require.Error(t, err) |
||||
|
||||
case "wrong path": |
||||
err := m.Authenticate(&Request{ |
||||
User: "testuser", |
||||
Pass: "testpass", |
||||
IP: net.ParseIP("127.1.1.1"), |
||||
Action: conf.AuthActionPublish, |
||||
Path: "wrong", |
||||
}) |
||||
require.Error(t, err) |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestAuthInternalRTSPDigest(t *testing.T) { |
||||
m := Manager{ |
||||
Method: conf.AuthMethodInternal, |
||||
InternalUsers: []conf.AuthInternalUser{ |
||||
{ |
||||
User: "myuser", |
||||
Pass: "mypass", |
||||
IPs: conf.IPNetworks{mustParseCIDR("127.1.1.1/32")}, |
||||
Permissions: []conf.AuthInternalUserPermission{ |
||||
{ |
||||
Action: conf.AuthActionPublish, |
||||
Path: "mypath", |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
HTTPAddress: "", |
||||
RTSPAuthMethods: []headers.AuthMethod{headers.AuthDigestMD5}, |
||||
} |
||||
|
||||
u, err := base.ParseURL("rtsp://127.0.0.1:8554/mypath") |
||||
require.NoError(t, err) |
||||
|
||||
s, err := auth.NewSender( |
||||
auth.GenerateWWWAuthenticate([]headers.AuthMethod{headers.AuthDigestMD5}, "IPCAM", "mynonce"), |
||||
"myuser", |
||||
"mypass", |
||||
) |
||||
require.NoError(t, err) |
||||
|
||||
req := &base.Request{ |
||||
Method: "ANNOUNCE", |
||||
URL: u, |
||||
} |
||||
|
||||
s.AddAuthorization(req) |
||||
|
||||
err = m.Authenticate(&Request{ |
||||
IP: net.ParseIP("127.1.1.1"), |
||||
Action: conf.AuthActionPublish, |
||||
Path: "mypath", |
||||
RTSPRequest: req, |
||||
RTSPNonce: "mynonce", |
||||
}) |
||||
require.NoError(t, err) |
||||
} |
||||
|
||||
func TestAuthHTTP(t *testing.T) { |
||||
for _, outcome := range []string{"ok", "fail"} { |
||||
t.Run(outcome, func(t *testing.T) { |
||||
m := Manager{ |
||||
Method: conf.AuthMethodHTTP, |
||||
HTTPAddress: "http://127.0.0.1:9120/auth", |
||||
RTSPAuthMethods: nil, |
||||
} |
||||
|
||||
au := &testHTTPAuthenticator{} |
||||
au.initialize(t, "rtsp", "publish") |
||||
defer au.close() |
||||
|
||||
if outcome == "ok" { |
||||
err := m.Authenticate(&Request{ |
||||
User: "testpublisher", |
||||
Pass: "testpass", |
||||
IP: net.ParseIP("127.0.0.1"), |
||||
Action: conf.AuthActionPublish, |
||||
Path: "teststream", |
||||
Protocol: ProtocolRTSP, |
||||
Query: "param=value", |
||||
}) |
||||
require.NoError(t, err) |
||||
} else { |
||||
err := m.Authenticate(&Request{ |
||||
User: "invalid", |
||||
Pass: "testpass", |
||||
IP: net.ParseIP("127.0.0.1"), |
||||
Action: conf.AuthActionPublish, |
||||
Path: "teststream", |
||||
Protocol: ProtocolRTSP, |
||||
Query: "param=value", |
||||
}) |
||||
require.Error(t, err) |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestAuthHTTPExclude(t *testing.T) { |
||||
m := Manager{ |
||||
Method: conf.AuthMethodHTTP, |
||||
HTTPAddress: "http://not-to-be-used:9120/auth", |
||||
HTTPExclude: []conf.AuthInternalUserPermission{{ |
||||
Action: conf.AuthActionPublish, |
||||
}}, |
||||
RTSPAuthMethods: nil, |
||||
} |
||||
|
||||
err := m.Authenticate(&Request{ |
||||
User: "", |
||||
Pass: "", |
||||
IP: net.ParseIP("127.0.0.1"), |
||||
Action: conf.AuthActionPublish, |
||||
Path: "teststream", |
||||
Protocol: ProtocolRTSP, |
||||
Query: "param=value", |
||||
}) |
||||
require.NoError(t, err) |
||||
} |
||||
@ -1,52 +0,0 @@
@@ -1,52 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
) |
||||
|
||||
// AuthAction is an authentication action.
|
||||
type AuthAction string |
||||
|
||||
// auth actions
|
||||
const ( |
||||
AuthActionPublish AuthAction = "publish" |
||||
AuthActionRead AuthAction = "read" |
||||
AuthActionPlayback AuthAction = "playback" |
||||
AuthActionAPI AuthAction = "api" |
||||
AuthActionMetrics AuthAction = "metrics" |
||||
AuthActionPprof AuthAction = "pprof" |
||||
) |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d AuthAction) MarshalJSON() ([]byte, error) { |
||||
return json.Marshal(string(d)) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *AuthAction) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
switch in { |
||||
case string(AuthActionPublish), |
||||
string(AuthActionRead), |
||||
string(AuthActionPlayback), |
||||
string(AuthActionAPI), |
||||
string(AuthActionMetrics), |
||||
string(AuthActionPprof): |
||||
*d = AuthAction(in) |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid auth action: '%s'", in) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *AuthAction) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
@ -1,15 +0,0 @@
@@ -1,15 +0,0 @@
|
||||
package conf |
||||
|
||||
// AuthInternalUserPermission is a permission of a user.
|
||||
type AuthInternalUserPermission struct { |
||||
Action AuthAction `json:"action"` |
||||
Path string `json:"path"` |
||||
} |
||||
|
||||
// AuthInternalUser is an user.
|
||||
type AuthInternalUser struct { |
||||
User Credential `json:"user"` |
||||
Pass Credential `json:"pass"` |
||||
IPs IPNetworks `json:"ips"` |
||||
Permissions []AuthInternalUserPermission `json:"permissions"` |
||||
} |
||||
@ -1,63 +0,0 @@
@@ -1,63 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
) |
||||
|
||||
// AuthMethod is an authentication method.
|
||||
type AuthMethod int |
||||
|
||||
// authentication methods.
|
||||
const ( |
||||
AuthMethodInternal AuthMethod = iota |
||||
AuthMethodHTTP |
||||
AuthMethodJWT |
||||
) |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d AuthMethod) MarshalJSON() ([]byte, error) { |
||||
var out string |
||||
|
||||
switch d { |
||||
case AuthMethodInternal: |
||||
out = "internal" |
||||
|
||||
case AuthMethodHTTP: |
||||
out = "http" |
||||
|
||||
default: |
||||
out = "jwt" |
||||
} |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *AuthMethod) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
switch in { |
||||
case "internal": |
||||
*d = AuthMethodInternal |
||||
|
||||
case "http": |
||||
*d = AuthMethodHTTP |
||||
|
||||
case "jwt": |
||||
*d = AuthMethodJWT |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid authMethod: '%s'", in) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *AuthMethod) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
@ -1,734 +0,0 @@
@@ -1,734 +0,0 @@
|
||||
// Package conf contains the struct that holds the configuration of the software.
|
||||
package conf |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"net" |
||||
"os" |
||||
"reflect" |
||||
"sort" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gohlslib" |
||||
"github.com/bluenviron/gortsplib/v4" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/headers" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/conf/decrypt" |
||||
"github.com/bluenviron/mediamtx/internal/conf/env" |
||||
"github.com/bluenviron/mediamtx/internal/conf/yaml" |
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
) |
||||
|
||||
// ErrPathNotFound is returned when a path is not found.
|
||||
var ErrPathNotFound = errors.New("path not found") |
||||
|
||||
func sortedKeys(paths map[string]*OptionalPath) []string { |
||||
ret := make([]string, len(paths)) |
||||
i := 0 |
||||
for name := range paths { |
||||
ret[i] = name |
||||
i++ |
||||
} |
||||
sort.Strings(ret) |
||||
return ret |
||||
} |
||||
|
||||
func firstThatExists(paths []string) string { |
||||
for _, pa := range paths { |
||||
_, err := os.Stat(pa) |
||||
if err == nil { |
||||
return pa |
||||
} |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func contains(list []headers.AuthMethod, item headers.AuthMethod) bool { |
||||
for _, i := range list { |
||||
if i == item { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func copyStructFields(dest interface{}, source interface{}) { |
||||
rvsource := reflect.ValueOf(source).Elem() |
||||
rvdest := reflect.ValueOf(dest) |
||||
nf := rvsource.NumField() |
||||
var zero reflect.Value |
||||
|
||||
for i := 0; i < nf; i++ { |
||||
fnew := rvsource.Field(i) |
||||
f := rvdest.Elem().FieldByName(rvsource.Type().Field(i).Name) |
||||
if f == zero { |
||||
continue |
||||
} |
||||
|
||||
if fnew.Kind() == reflect.Pointer { |
||||
if !fnew.IsNil() { |
||||
if f.Kind() == reflect.Ptr { |
||||
f.Set(fnew) |
||||
} else { |
||||
f.Set(fnew.Elem()) |
||||
} |
||||
} |
||||
} else { |
||||
f.Set(fnew) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func mustParseCIDR(v string) net.IPNet { |
||||
_, ne, err := net.ParseCIDR(v) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
if ipv4 := ne.IP.To4(); ipv4 != nil { |
||||
return net.IPNet{IP: ipv4, Mask: ne.Mask[len(ne.Mask)-4 : len(ne.Mask)]} |
||||
} |
||||
return *ne |
||||
} |
||||
|
||||
func credentialIsNotEmpty(c *Credential) bool { |
||||
return c != nil && *c != "" |
||||
} |
||||
|
||||
func ipNetworkIsNotEmpty(i *IPNetworks) bool { |
||||
return i != nil && len(*i) != 0 |
||||
} |
||||
|
||||
func anyPathHasDeprecatedCredentials(paths map[string]*OptionalPath) bool { |
||||
for _, pa := range paths { |
||||
if pa != nil { |
||||
rva := reflect.ValueOf(pa.Values).Elem() |
||||
if credentialIsNotEmpty(rva.FieldByName("PublishUser").Interface().(*Credential)) || |
||||
credentialIsNotEmpty(rva.FieldByName("PublishPass").Interface().(*Credential)) || |
||||
ipNetworkIsNotEmpty(rva.FieldByName("PublishIPs").Interface().(*IPNetworks)) || |
||||
credentialIsNotEmpty(rva.FieldByName("ReadUser").Interface().(*Credential)) || |
||||
credentialIsNotEmpty(rva.FieldByName("ReadPass").Interface().(*Credential)) || |
||||
ipNetworkIsNotEmpty(rva.FieldByName("ReadIPs").Interface().(*IPNetworks)) { |
||||
return true |
||||
} |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// Conf is a configuration.
|
||||
type Conf struct { |
||||
// General
|
||||
LogLevel LogLevel `json:"logLevel"` |
||||
LogDestinations LogDestinations `json:"logDestinations"` |
||||
LogFile string `json:"logFile"` |
||||
ReadTimeout StringDuration `json:"readTimeout"` |
||||
WriteTimeout StringDuration `json:"writeTimeout"` |
||||
ReadBufferCount *int `json:"readBufferCount,omitempty"` // deprecated
|
||||
WriteQueueSize int `json:"writeQueueSize"` |
||||
UDPMaxPayloadSize int `json:"udpMaxPayloadSize"` |
||||
Metrics bool `json:"metrics"` |
||||
MetricsAddress string `json:"metricsAddress"` |
||||
PPROF bool `json:"pprof"` |
||||
PPROFAddress string `json:"pprofAddress"` |
||||
RunOnConnect string `json:"runOnConnect"` |
||||
RunOnConnectRestart bool `json:"runOnConnectRestart"` |
||||
RunOnDisconnect string `json:"runOnDisconnect"` |
||||
|
||||
// Authentication
|
||||
AuthMethod AuthMethod `json:"authMethod"` |
||||
AuthInternalUsers []AuthInternalUser `json:"authInternalUsers"` |
||||
AuthHTTPAddress string `json:"authHTTPAddress"` |
||||
ExternalAuthenticationURL *string `json:"externalAuthenticationURL,omitempty"` // deprecated
|
||||
AuthHTTPExclude []AuthInternalUserPermission `json:"authHTTPExclude"` |
||||
AuthJWTJWKS string `json:"authJWTJWKS"` |
||||
|
||||
// API
|
||||
API bool `json:"api"` |
||||
APIAddress string `json:"apiAddress"` |
||||
|
||||
// Playback
|
||||
Playback bool `json:"playback"` |
||||
PlaybackAddress string `json:"playbackAddress"` |
||||
|
||||
// RTSP server
|
||||
RTSP bool `json:"rtsp"` |
||||
RTSPDisable *bool `json:"rtspDisable,omitempty"` // deprecated
|
||||
Protocols Protocols `json:"protocols"` |
||||
Encryption Encryption `json:"encryption"` |
||||
RTSPAddress string `json:"rtspAddress"` |
||||
RTSPSAddress string `json:"rtspsAddress"` |
||||
RTPAddress string `json:"rtpAddress"` |
||||
RTCPAddress string `json:"rtcpAddress"` |
||||
MulticastIPRange string `json:"multicastIPRange"` |
||||
MulticastRTPPort int `json:"multicastRTPPort"` |
||||
MulticastRTCPPort int `json:"multicastRTCPPort"` |
||||
ServerKey string `json:"serverKey"` |
||||
ServerCert string `json:"serverCert"` |
||||
AuthMethods *RTSPAuthMethods `json:"authMethods,omitempty"` // deprecated
|
||||
RTSPAuthMethods RTSPAuthMethods `json:"rtspAuthMethods"` |
||||
|
||||
// RTMP server
|
||||
RTMP bool `json:"rtmp"` |
||||
RTMPDisable *bool `json:"rtmpDisable,omitempty"` // deprecated
|
||||
RTMPAddress string `json:"rtmpAddress"` |
||||
RTMPEncryption Encryption `json:"rtmpEncryption"` |
||||
RTMPSAddress string `json:"rtmpsAddress"` |
||||
RTMPServerKey string `json:"rtmpServerKey"` |
||||
RTMPServerCert string `json:"rtmpServerCert"` |
||||
|
||||
// HLS server
|
||||
HLS bool `json:"hls"` |
||||
HLSDisable *bool `json:"hlsDisable,omitempty"` // depreacted
|
||||
HLSAddress string `json:"hlsAddress"` |
||||
HLSEncryption bool `json:"hlsEncryption"` |
||||
HLSServerKey string `json:"hlsServerKey"` |
||||
HLSServerCert string `json:"hlsServerCert"` |
||||
HLSAlwaysRemux bool `json:"hlsAlwaysRemux"` |
||||
HLSVariant HLSVariant `json:"hlsVariant"` |
||||
HLSSegmentCount int `json:"hlsSegmentCount"` |
||||
HLSSegmentDuration StringDuration `json:"hlsSegmentDuration"` |
||||
HLSPartDuration StringDuration `json:"hlsPartDuration"` |
||||
HLSSegmentMaxSize StringSize `json:"hlsSegmentMaxSize"` |
||||
HLSAllowOrigin string `json:"hlsAllowOrigin"` |
||||
HLSTrustedProxies IPNetworks `json:"hlsTrustedProxies"` |
||||
HLSDirectory string `json:"hlsDirectory"` |
||||
|
||||
// WebRTC server
|
||||
WebRTC bool `json:"webrtc"` |
||||
WebRTCDisable *bool `json:"webrtcDisable,omitempty"` // deprecated
|
||||
WebRTCAddress string `json:"webrtcAddress"` |
||||
WebRTCEncryption bool `json:"webrtcEncryption"` |
||||
WebRTCServerKey string `json:"webrtcServerKey"` |
||||
WebRTCServerCert string `json:"webrtcServerCert"` |
||||
WebRTCAllowOrigin string `json:"webrtcAllowOrigin"` |
||||
WebRTCTrustedProxies IPNetworks `json:"webrtcTrustedProxies"` |
||||
WebRTCLocalUDPAddress string `json:"webrtcLocalUDPAddress"` |
||||
WebRTCLocalTCPAddress string `json:"webrtcLocalTCPAddress"` |
||||
WebRTCIPsFromInterfaces bool `json:"webrtcIPsFromInterfaces"` |
||||
WebRTCIPsFromInterfacesList []string `json:"webrtcIPsFromInterfacesList"` |
||||
WebRTCAdditionalHosts []string `json:"webrtcAdditionalHosts"` |
||||
WebRTCICEServers2 []WebRTCICEServer `json:"webrtcICEServers2"` |
||||
WebRTCICEUDPMuxAddress *string `json:"webrtcICEUDPMuxAddress,omitempty"` // deprecated
|
||||
WebRTCICETCPMuxAddress *string `json:"webrtcICETCPMuxAddress,omitempty"` // deprecated
|
||||
WebRTCICEHostNAT1To1IPs *[]string `json:"webrtcICEHostNAT1To1IPs,omitempty"` // deprecated
|
||||
WebRTCICEServers *[]string `json:"webrtcICEServers,omitempty"` // deprecated
|
||||
|
||||
// SRT server
|
||||
SRT bool `json:"srt"` |
||||
SRTAddress string `json:"srtAddress"` |
||||
|
||||
// Record (deprecated)
|
||||
Record *bool `json:"record,omitempty"` // deprecated
|
||||
RecordPath *string `json:"recordPath,omitempty"` // deprecated
|
||||
RecordFormat *RecordFormat `json:"recordFormat,omitempty"` // deprecated
|
||||
RecordPartDuration *StringDuration `json:"recordPartDuration,omitempty"` // deprecated
|
||||
RecordSegmentDuration *StringDuration `json:"recordSegmentDuration,omitempty"` // deprecated
|
||||
RecordDeleteAfter *StringDuration `json:"recordDeleteAfter,omitempty"` // deprecated
|
||||
|
||||
// Path defaults
|
||||
PathDefaults Path `json:"pathDefaults"` |
||||
|
||||
// Paths
|
||||
OptionalPaths map[string]*OptionalPath `json:"paths"` |
||||
Paths map[string]*Path `json:"-"` // filled by Check()
|
||||
} |
||||
|
||||
func (conf *Conf) setDefaults() { |
||||
// General
|
||||
conf.LogLevel = LogLevel(logger.Info) |
||||
conf.LogDestinations = LogDestinations{logger.DestinationStdout} |
||||
conf.LogFile = "mediamtx.log" |
||||
conf.ReadTimeout = 10 * StringDuration(time.Second) |
||||
conf.WriteTimeout = 10 * StringDuration(time.Second) |
||||
conf.WriteQueueSize = 512 |
||||
conf.UDPMaxPayloadSize = 1472 |
||||
conf.MetricsAddress = ":9998" |
||||
conf.PPROFAddress = ":9999" |
||||
|
||||
// Authentication
|
||||
conf.AuthInternalUsers = []AuthInternalUser{ |
||||
{ |
||||
User: "any", |
||||
Pass: "", |
||||
Permissions: []AuthInternalUserPermission{ |
||||
{ |
||||
Action: AuthActionPublish, |
||||
}, |
||||
{ |
||||
Action: AuthActionRead, |
||||
}, |
||||
{ |
||||
Action: AuthActionPlayback, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
User: "any", |
||||
Pass: "", |
||||
IPs: IPNetworks{mustParseCIDR("127.0.0.1/32"), mustParseCIDR("::1/128")}, |
||||
Permissions: []AuthInternalUserPermission{ |
||||
{ |
||||
Action: AuthActionAPI, |
||||
}, |
||||
{ |
||||
Action: AuthActionMetrics, |
||||
}, |
||||
{ |
||||
Action: AuthActionPprof, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
conf.AuthHTTPExclude = []AuthInternalUserPermission{ |
||||
{ |
||||
Action: AuthActionAPI, |
||||
}, |
||||
{ |
||||
Action: AuthActionMetrics, |
||||
}, |
||||
{ |
||||
Action: AuthActionPprof, |
||||
}, |
||||
} |
||||
|
||||
// API
|
||||
conf.APIAddress = ":9997" |
||||
|
||||
// Playback server
|
||||
conf.PlaybackAddress = ":9996" |
||||
|
||||
// RTSP server
|
||||
conf.RTSP = true |
||||
conf.Protocols = Protocols{ |
||||
Protocol(gortsplib.TransportUDP): {}, |
||||
Protocol(gortsplib.TransportUDPMulticast): {}, |
||||
Protocol(gortsplib.TransportTCP): {}, |
||||
} |
||||
conf.RTSPAddress = ":8554" |
||||
conf.RTSPSAddress = ":8322" |
||||
conf.RTPAddress = ":8000" |
||||
conf.RTCPAddress = ":8001" |
||||
conf.MulticastIPRange = "224.1.0.0/16" |
||||
conf.MulticastRTPPort = 8002 |
||||
conf.MulticastRTCPPort = 8003 |
||||
conf.ServerKey = "server.key" |
||||
conf.ServerCert = "server.crt" |
||||
conf.RTSPAuthMethods = RTSPAuthMethods{headers.AuthBasic} |
||||
|
||||
// RTMP server
|
||||
conf.RTMP = true |
||||
conf.RTMPAddress = ":1935" |
||||
conf.RTMPSAddress = ":1936" |
||||
conf.RTMPServerKey = "server.key" |
||||
conf.RTMPServerCert = "server.crt" |
||||
|
||||
// HLS
|
||||
conf.HLS = true |
||||
conf.HLSAddress = ":8888" |
||||
conf.HLSServerKey = "server.key" |
||||
conf.HLSServerCert = "server.crt" |
||||
conf.HLSVariant = HLSVariant(gohlslib.MuxerVariantLowLatency) |
||||
conf.HLSSegmentCount = 7 |
||||
conf.HLSSegmentDuration = 1 * StringDuration(time.Second) |
||||
conf.HLSPartDuration = 200 * StringDuration(time.Millisecond) |
||||
conf.HLSSegmentMaxSize = 50 * 1024 * 1024 |
||||
conf.HLSAllowOrigin = "*" |
||||
|
||||
// WebRTC server
|
||||
conf.WebRTC = true |
||||
conf.WebRTCAddress = ":8889" |
||||
conf.WebRTCServerKey = "server.key" |
||||
conf.WebRTCServerCert = "server.crt" |
||||
conf.WebRTCAllowOrigin = "*" |
||||
conf.WebRTCLocalUDPAddress = ":8189" |
||||
conf.WebRTCIPsFromInterfaces = true |
||||
conf.WebRTCIPsFromInterfacesList = []string{} |
||||
conf.WebRTCAdditionalHosts = []string{} |
||||
conf.WebRTCICEServers2 = []WebRTCICEServer{} |
||||
|
||||
// SRT server
|
||||
conf.SRT = true |
||||
conf.SRTAddress = ":8890" |
||||
|
||||
conf.PathDefaults.setDefaults() |
||||
} |
||||
|
||||
// Load loads a Conf.
|
||||
func Load(fpath string, defaultConfPaths []string) (*Conf, string, error) { |
||||
conf := &Conf{} |
||||
|
||||
fpath, err := conf.loadFromFile(fpath, defaultConfPaths) |
||||
if err != nil { |
||||
return nil, "", err |
||||
} |
||||
|
||||
err = env.Load("RTSP", conf) // legacy prefix
|
||||
if err != nil { |
||||
return nil, "", err |
||||
} |
||||
|
||||
err = env.Load("MTX", conf) |
||||
if err != nil { |
||||
return nil, "", err |
||||
} |
||||
|
||||
err = conf.Validate() |
||||
if err != nil { |
||||
return nil, "", err |
||||
} |
||||
|
||||
return conf, fpath, nil |
||||
} |
||||
|
||||
func (conf *Conf) loadFromFile(fpath string, defaultConfPaths []string) (string, error) { |
||||
if fpath == "" { |
||||
fpath = firstThatExists(defaultConfPaths) |
||||
|
||||
// when the configuration file is not explicitly set,
|
||||
// it is optional.
|
||||
if fpath == "" { |
||||
conf.setDefaults() |
||||
return "", nil |
||||
} |
||||
} |
||||
|
||||
byts, err := os.ReadFile(fpath) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
if key, ok := os.LookupEnv("RTSP_CONFKEY"); ok { // legacy format
|
||||
byts, err = decrypt.Decrypt(key, byts) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
} |
||||
|
||||
if key, ok := os.LookupEnv("MTX_CONFKEY"); ok { |
||||
byts, err = decrypt.Decrypt(key, byts) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
} |
||||
|
||||
err = yaml.Load(byts, conf) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
return fpath, nil |
||||
} |
||||
|
||||
// Clone clones the configuration.
|
||||
func (conf Conf) Clone() *Conf { |
||||
enc, err := json.Marshal(conf) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
var dest Conf |
||||
err = json.Unmarshal(enc, &dest) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
return &dest |
||||
} |
||||
|
||||
// Validate checks the configuration for errors.
|
||||
func (conf *Conf) Validate() error { |
||||
// General
|
||||
|
||||
if conf.ReadBufferCount != nil { |
||||
conf.WriteQueueSize = *conf.ReadBufferCount |
||||
} |
||||
if (conf.WriteQueueSize & (conf.WriteQueueSize - 1)) != 0 { |
||||
return fmt.Errorf("'writeQueueSize' must be a power of two") |
||||
} |
||||
if conf.UDPMaxPayloadSize > 1472 { |
||||
return fmt.Errorf("'udpMaxPayloadSize' must be less than 1472") |
||||
} |
||||
|
||||
// Authentication
|
||||
|
||||
if conf.ExternalAuthenticationURL != nil { |
||||
conf.AuthMethod = AuthMethodHTTP |
||||
conf.AuthHTTPAddress = *conf.ExternalAuthenticationURL |
||||
} |
||||
if conf.AuthHTTPAddress != "" && |
||||
!strings.HasPrefix(conf.AuthHTTPAddress, "http://") && |
||||
!strings.HasPrefix(conf.AuthHTTPAddress, "https://") { |
||||
return fmt.Errorf("'externalAuthenticationURL' must be a HTTP URL") |
||||
} |
||||
if conf.AuthJWTJWKS != "" && |
||||
!strings.HasPrefix(conf.AuthJWTJWKS, "http://") && |
||||
!strings.HasPrefix(conf.AuthJWTJWKS, "https://") { |
||||
return fmt.Errorf("'authJWTJWKS' must be a HTTP URL") |
||||
} |
||||
deprecatedCredentialsMode := false |
||||
if credentialIsNotEmpty(conf.PathDefaults.PublishUser) || |
||||
credentialIsNotEmpty(conf.PathDefaults.PublishPass) || |
||||
ipNetworkIsNotEmpty(conf.PathDefaults.PublishIPs) || |
||||
credentialIsNotEmpty(conf.PathDefaults.ReadUser) || |
||||
credentialIsNotEmpty(conf.PathDefaults.ReadPass) || |
||||
ipNetworkIsNotEmpty(conf.PathDefaults.ReadIPs) || |
||||
anyPathHasDeprecatedCredentials(conf.OptionalPaths) { |
||||
conf.AuthInternalUsers = []AuthInternalUser{ |
||||
{ |
||||
User: "any", |
||||
Pass: "", |
||||
Permissions: []AuthInternalUserPermission{ |
||||
{ |
||||
Action: AuthActionPlayback, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
User: "any", |
||||
Pass: "", |
||||
IPs: IPNetworks{mustParseCIDR("127.0.0.1/32"), mustParseCIDR("::1/128")}, |
||||
Permissions: []AuthInternalUserPermission{ |
||||
{ |
||||
Action: AuthActionAPI, |
||||
}, |
||||
{ |
||||
Action: AuthActionMetrics, |
||||
}, |
||||
{ |
||||
Action: AuthActionPprof, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
deprecatedCredentialsMode = true |
||||
} |
||||
switch conf.AuthMethod { |
||||
case AuthMethodHTTP: |
||||
if conf.AuthHTTPAddress == "" { |
||||
return fmt.Errorf("'authHTTPAddress' is empty") |
||||
} |
||||
|
||||
case AuthMethodJWT: |
||||
if conf.AuthJWTJWKS == "" { |
||||
return fmt.Errorf("'authJWTJWKS' is empty") |
||||
} |
||||
} |
||||
|
||||
// RTSP
|
||||
|
||||
if conf.RTSPDisable != nil { |
||||
conf.RTSP = !*conf.RTSPDisable |
||||
} |
||||
if conf.Encryption == EncryptionStrict { |
||||
if _, ok := conf.Protocols[Protocol(gortsplib.TransportUDP)]; ok { |
||||
return fmt.Errorf("strict encryption can't be used with the UDP transport protocol") |
||||
} |
||||
if _, ok := conf.Protocols[Protocol(gortsplib.TransportUDPMulticast)]; ok { |
||||
return fmt.Errorf("strict encryption can't be used with the UDP-multicast transport protocol") |
||||
} |
||||
} |
||||
if conf.AuthMethods != nil { |
||||
conf.RTSPAuthMethods = *conf.AuthMethods |
||||
} |
||||
if contains(conf.RTSPAuthMethods, headers.AuthDigestMD5) { |
||||
if conf.AuthMethod != AuthMethodInternal { |
||||
return fmt.Errorf("when RTSP digest is enabled, the only supported auth method is 'internal'") |
||||
} |
||||
for _, user := range conf.AuthInternalUsers { |
||||
if user.User.IsHashed() || user.Pass.IsHashed() { |
||||
return fmt.Errorf("when RTSP digest is enabled, hashed credentials cannot be used") |
||||
} |
||||
} |
||||
} |
||||
|
||||
// RTMP
|
||||
|
||||
if conf.RTMPDisable != nil { |
||||
conf.RTMP = !*conf.RTMPDisable |
||||
} |
||||
|
||||
// HLS
|
||||
|
||||
if conf.HLSDisable != nil { |
||||
conf.HLS = !*conf.HLSDisable |
||||
} |
||||
|
||||
// WebRTC
|
||||
|
||||
if conf.WebRTCDisable != nil { |
||||
conf.WebRTC = !*conf.WebRTCDisable |
||||
} |
||||
if conf.WebRTCICEUDPMuxAddress != nil { |
||||
conf.WebRTCLocalUDPAddress = *conf.WebRTCICEUDPMuxAddress |
||||
} |
||||
if conf.WebRTCICETCPMuxAddress != nil { |
||||
conf.WebRTCLocalTCPAddress = *conf.WebRTCICETCPMuxAddress |
||||
} |
||||
if conf.WebRTCICEHostNAT1To1IPs != nil { |
||||
conf.WebRTCAdditionalHosts = *conf.WebRTCICEHostNAT1To1IPs |
||||
} |
||||
if conf.WebRTCICEServers != nil { |
||||
for _, server := range *conf.WebRTCICEServers { |
||||
parts := strings.Split(server, ":") |
||||
if len(parts) == 5 { |
||||
conf.WebRTCICEServers2 = append(conf.WebRTCICEServers2, WebRTCICEServer{ |
||||
URL: parts[0] + ":" + parts[3] + ":" + parts[4], |
||||
Username: parts[1], |
||||
Password: parts[2], |
||||
}) |
||||
} else { |
||||
conf.WebRTCICEServers2 = append(conf.WebRTCICEServers2, WebRTCICEServer{ |
||||
URL: server, |
||||
}) |
||||
} |
||||
} |
||||
} |
||||
for _, server := range conf.WebRTCICEServers2 { |
||||
if !strings.HasPrefix(server.URL, "stun:") && |
||||
!strings.HasPrefix(server.URL, "turn:") && |
||||
!strings.HasPrefix(server.URL, "turns:") { |
||||
return fmt.Errorf("invalid ICE server: '%s'", server.URL) |
||||
} |
||||
} |
||||
if conf.WebRTCLocalUDPAddress == "" && |
||||
conf.WebRTCLocalTCPAddress == "" && |
||||
len(conf.WebRTCICEServers2) == 0 { |
||||
return fmt.Errorf("at least one between 'webrtcLocalUDPAddress'," + |
||||
" 'webrtcLocalTCPAddress' or 'webrtcICEServers2' must be filled") |
||||
} |
||||
if conf.WebRTCLocalUDPAddress != "" || conf.WebRTCLocalTCPAddress != "" { |
||||
if !conf.WebRTCIPsFromInterfaces && len(conf.WebRTCAdditionalHosts) == 0 { |
||||
return fmt.Errorf("at least one between 'webrtcIPsFromInterfaces' or 'webrtcAdditionalHosts' must be filled") |
||||
} |
||||
} |
||||
|
||||
// Record (deprecated)
|
||||
if conf.Record != nil { |
||||
conf.PathDefaults.Record = *conf.Record |
||||
} |
||||
if conf.RecordPath != nil { |
||||
conf.PathDefaults.RecordPath = *conf.RecordPath |
||||
} |
||||
if conf.RecordFormat != nil { |
||||
conf.PathDefaults.RecordFormat = *conf.RecordFormat |
||||
} |
||||
if conf.RecordPartDuration != nil { |
||||
conf.PathDefaults.RecordPartDuration = *conf.RecordPartDuration |
||||
} |
||||
if conf.RecordSegmentDuration != nil { |
||||
conf.PathDefaults.RecordSegmentDuration = *conf.RecordSegmentDuration |
||||
} |
||||
if conf.RecordDeleteAfter != nil { |
||||
conf.PathDefaults.RecordDeleteAfter = *conf.RecordDeleteAfter |
||||
} |
||||
|
||||
hasAllOthers := false |
||||
for name := range conf.OptionalPaths { |
||||
if name == "all" || name == "all_others" || name == "~^.*$" { |
||||
if hasAllOthers { |
||||
return fmt.Errorf("all_others, all and '~^.*$' are aliases") |
||||
} |
||||
hasAllOthers = true |
||||
} |
||||
} |
||||
|
||||
conf.Paths = make(map[string]*Path) |
||||
|
||||
for _, name := range sortedKeys(conf.OptionalPaths) { |
||||
optional := conf.OptionalPaths[name] |
||||
if optional == nil { |
||||
optional = &OptionalPath{ |
||||
Values: newOptionalPathValues(), |
||||
} |
||||
} |
||||
|
||||
pconf := newPath(&conf.PathDefaults, optional) |
||||
conf.Paths[name] = pconf |
||||
|
||||
err := pconf.validate(conf, name, deprecatedCredentialsMode) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (conf *Conf) UnmarshalJSON(b []byte) error { |
||||
conf.setDefaults() |
||||
|
||||
type alias Conf |
||||
d := json.NewDecoder(bytes.NewReader(b)) |
||||
d.DisallowUnknownFields() |
||||
return d.Decode((*alias)(conf)) |
||||
} |
||||
|
||||
// Global returns the global part of Conf.
|
||||
func (conf *Conf) Global() *Global { |
||||
g := &Global{ |
||||
Values: newGlobalValues(), |
||||
} |
||||
copyStructFields(g.Values, conf) |
||||
return g |
||||
} |
||||
|
||||
// PatchGlobal patches the global configuration.
|
||||
func (conf *Conf) PatchGlobal(optional *OptionalGlobal) { |
||||
copyStructFields(conf, optional.Values) |
||||
} |
||||
|
||||
// PatchPathDefaults patches path default settings.
|
||||
func (conf *Conf) PatchPathDefaults(optional *OptionalPath) { |
||||
copyStructFields(&conf.PathDefaults, optional.Values) |
||||
} |
||||
|
||||
// AddPath adds a path.
|
||||
func (conf *Conf) AddPath(name string, p *OptionalPath) error { |
||||
if _, ok := conf.OptionalPaths[name]; ok { |
||||
return fmt.Errorf("path already exists") |
||||
} |
||||
|
||||
if conf.OptionalPaths == nil { |
||||
conf.OptionalPaths = make(map[string]*OptionalPath) |
||||
} |
||||
|
||||
conf.OptionalPaths[name] = p |
||||
return nil |
||||
} |
||||
|
||||
// PatchPath patches a path.
|
||||
func (conf *Conf) PatchPath(name string, optional2 *OptionalPath) error { |
||||
optional, ok := conf.OptionalPaths[name] |
||||
if !ok { |
||||
return ErrPathNotFound |
||||
} |
||||
|
||||
copyStructFields(optional.Values, optional2.Values) |
||||
return nil |
||||
} |
||||
|
||||
// ReplacePath replaces a path.
|
||||
func (conf *Conf) ReplacePath(name string, optional2 *OptionalPath) error { |
||||
_, ok := conf.OptionalPaths[name] |
||||
if !ok { |
||||
return ErrPathNotFound |
||||
} |
||||
|
||||
conf.OptionalPaths[name] = optional2 |
||||
return nil |
||||
} |
||||
|
||||
// RemovePath removes a path.
|
||||
func (conf *Conf) RemovePath(name string) error { |
||||
if _, ok := conf.OptionalPaths[name]; !ok { |
||||
return ErrPathNotFound |
||||
} |
||||
|
||||
delete(conf.OptionalPaths, name) |
||||
return nil |
||||
} |
||||
@ -1,327 +0,0 @@
@@ -1,327 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"crypto/rand" |
||||
"encoding/base64" |
||||
"io" |
||||
"os" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4" |
||||
"github.com/stretchr/testify/require" |
||||
"golang.org/x/crypto/nacl/secretbox" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
) |
||||
|
||||
func createTempFile(byts []byte) (string, error) { |
||||
tmpf, err := os.CreateTemp(os.TempDir(), "rtsp-") |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
defer tmpf.Close() |
||||
|
||||
_, err = tmpf.Write(byts) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
return tmpf.Name(), nil |
||||
} |
||||
|
||||
func TestConfFromFile(t *testing.T) { |
||||
func() { |
||||
tmpf, err := createTempFile([]byte("logLevel: debug\n" + |
||||
"paths:\n" + |
||||
" cam1:\n" + |
||||
" runOnDemandStartTimeout: 5s\n")) |
||||
require.NoError(t, err) |
||||
defer os.Remove(tmpf) |
||||
|
||||
conf, confPath, err := Load(tmpf, nil) |
||||
require.NoError(t, err) |
||||
require.Equal(t, tmpf, confPath) |
||||
|
||||
require.Equal(t, LogLevel(logger.Debug), conf.LogLevel) |
||||
|
||||
pa, ok := conf.Paths["cam1"] |
||||
require.Equal(t, true, ok) |
||||
require.Equal(t, &Path{ |
||||
Name: "cam1", |
||||
Source: "publisher", |
||||
SourceOnDemandStartTimeout: 10 * StringDuration(time.Second), |
||||
SourceOnDemandCloseAfter: 10 * StringDuration(time.Second), |
||||
Playback: true, |
||||
RecordPath: "./recordings/%path/%Y-%m-%d_%H-%M-%S-%f", |
||||
RecordFormat: RecordFormatFMP4, |
||||
RecordPartDuration: 100000000, |
||||
RecordSegmentDuration: 3600000000000, |
||||
RecordDeleteAfter: 86400000000000, |
||||
OverridePublisher: true, |
||||
RPICameraWidth: 1920, |
||||
RPICameraHeight: 1080, |
||||
RPICameraContrast: 1, |
||||
RPICameraSaturation: 1, |
||||
RPICameraSharpness: 1, |
||||
RPICameraExposure: "normal", |
||||
RPICameraAWB: "auto", |
||||
RPICameraAWBGains: []float64{0, 0}, |
||||
RPICameraDenoise: "off", |
||||
RPICameraMetering: "centre", |
||||
RPICameraFPS: 30, |
||||
RPICameraIDRPeriod: 60, |
||||
RPICameraBitrate: 1000000, |
||||
RPICameraProfile: "main", |
||||
RPICameraLevel: "4.1", |
||||
RPICameraAfMode: "continuous", |
||||
RPICameraAfRange: "normal", |
||||
RPICameraAfSpeed: "normal", |
||||
RPICameraTextOverlay: "%Y-%m-%d %H:%M:%S - MediaMTX", |
||||
RunOnDemandStartTimeout: 5 * StringDuration(time.Second), |
||||
RunOnDemandCloseAfter: 10 * StringDuration(time.Second), |
||||
}, pa) |
||||
}() |
||||
|
||||
func() { |
||||
tmpf, err := createTempFile([]byte(``)) |
||||
require.NoError(t, err) |
||||
defer os.Remove(tmpf) |
||||
|
||||
_, _, err = Load(tmpf, nil) |
||||
require.NoError(t, err) |
||||
}() |
||||
|
||||
func() { |
||||
tmpf, err := createTempFile([]byte(`paths:`)) |
||||
require.NoError(t, err) |
||||
defer os.Remove(tmpf) |
||||
|
||||
_, _, err = Load(tmpf, nil) |
||||
require.NoError(t, err) |
||||
}() |
||||
|
||||
func() { |
||||
tmpf, err := createTempFile([]byte( |
||||
"paths:\n" + |
||||
" mypath:\n")) |
||||
require.NoError(t, err) |
||||
defer os.Remove(tmpf) |
||||
|
||||
_, _, err = Load(tmpf, nil) |
||||
require.NoError(t, err) |
||||
}() |
||||
} |
||||
|
||||
func TestConfFromFileAndEnv(t *testing.T) { |
||||
// global parameter
|
||||
t.Setenv("RTSP_PROTOCOLS", "tcp") |
||||
|
||||
// path parameter
|
||||
t.Setenv("MTX_PATHS_CAM1_SOURCE", "rtsp://testing") |
||||
|
||||
// deprecated global parameter
|
||||
t.Setenv("MTX_RTMPDISABLE", "yes") |
||||
|
||||
// deprecated path parameter
|
||||
t.Setenv("MTX_PATHS_CAM2_DISABLEPUBLISHEROVERRIDE", "yes") |
||||
|
||||
tmpf, err := createTempFile([]byte("{}")) |
||||
require.NoError(t, err) |
||||
defer os.Remove(tmpf) |
||||
|
||||
conf, confPath, err := Load(tmpf, nil) |
||||
require.NoError(t, err) |
||||
require.Equal(t, tmpf, confPath) |
||||
|
||||
require.Equal(t, Protocols{Protocol(gortsplib.TransportTCP): {}}, conf.Protocols) |
||||
require.Equal(t, false, conf.RTMP) |
||||
|
||||
pa, ok := conf.Paths["cam1"] |
||||
require.Equal(t, true, ok) |
||||
require.Equal(t, "rtsp://testing", pa.Source) |
||||
|
||||
pa, ok = conf.Paths["cam2"] |
||||
require.Equal(t, true, ok) |
||||
require.Equal(t, false, pa.OverridePublisher) |
||||
} |
||||
|
||||
func TestConfFromEnvOnly(t *testing.T) { |
||||
t.Setenv("MTX_PATHS_CAM1_SOURCE", "rtsp://testing") |
||||
|
||||
conf, confPath, err := Load("", nil) |
||||
require.NoError(t, err) |
||||
require.Equal(t, "", confPath) |
||||
|
||||
pa, ok := conf.Paths["cam1"] |
||||
require.Equal(t, true, ok) |
||||
require.Equal(t, "rtsp://testing", pa.Source) |
||||
} |
||||
|
||||
func TestConfEncryption(t *testing.T) { |
||||
key := "testing123testin" |
||||
plaintext := "paths:\n" + |
||||
" path1:\n" + |
||||
" path2:\n" |
||||
|
||||
encryptedConf := func() string { |
||||
var secretKey [32]byte |
||||
copy(secretKey[:], key) |
||||
|
||||
var nonce [24]byte |
||||
_, err := io.ReadFull(rand.Reader, nonce[:]) |
||||
require.NoError(t, err) |
||||
|
||||
encrypted := secretbox.Seal(nonce[:], []byte(plaintext), &nonce, &secretKey) |
||||
return base64.StdEncoding.EncodeToString(encrypted) |
||||
}() |
||||
|
||||
t.Setenv("RTSP_CONFKEY", key) |
||||
|
||||
tmpf, err := createTempFile([]byte(encryptedConf)) |
||||
require.NoError(t, err) |
||||
defer os.Remove(tmpf) |
||||
|
||||
conf, confPath, err := Load(tmpf, nil) |
||||
require.NoError(t, err) |
||||
require.Equal(t, tmpf, confPath) |
||||
|
||||
_, ok := conf.Paths["path1"] |
||||
require.Equal(t, true, ok) |
||||
|
||||
_, ok = conf.Paths["path2"] |
||||
require.Equal(t, true, ok) |
||||
} |
||||
|
||||
func TestConfErrors(t *testing.T) { |
||||
for _, ca := range []struct { |
||||
name string |
||||
conf string |
||||
err string |
||||
}{ |
||||
{ |
||||
"non existent parameter 1", |
||||
`invalid: param`, |
||||
"json: unknown field \"invalid\"", |
||||
}, |
||||
{ |
||||
"invalid writeQueueSize", |
||||
"writeQueueSize: 1001\n", |
||||
"'writeQueueSize' must be a power of two", |
||||
}, |
||||
{ |
||||
"invalid udpMaxPayloadSize", |
||||
"udpMaxPayloadSize: 5000\n", |
||||
"'udpMaxPayloadSize' must be less than 1472", |
||||
}, |
||||
{ |
||||
"invalid strict encryption 1", |
||||
"encryption: strict\n" + |
||||
"protocols: [udp]\n", |
||||
"strict encryption can't be used with the UDP transport protocol", |
||||
}, |
||||
{ |
||||
"invalid strict encryption 2", |
||||
"encryption: strict\n" + |
||||
"protocols: [multicast]\n", |
||||
"strict encryption can't be used with the UDP-multicast transport protocol", |
||||
}, |
||||
{ |
||||
"invalid ICE server", |
||||
"webrtcICEServers: [testing]\n", |
||||
"invalid ICE server: 'testing'", |
||||
}, |
||||
{ |
||||
"non existent parameter 2", |
||||
"paths:\n" + |
||||
" mypath:\n" + |
||||
" invalid: parameter\n", |
||||
"json: unknown field \"invalid\"", |
||||
}, |
||||
{ |
||||
"invalid path name", |
||||
"paths:\n" + |
||||
" '':\n" + |
||||
" source: publisher\n", |
||||
"invalid path name '': cannot be empty", |
||||
}, |
||||
{ |
||||
"double raspberry pi camera", |
||||
"paths:\n" + |
||||
" cam1:\n" + |
||||
" source: rpiCamera\n" + |
||||
" cam2:\n" + |
||||
" source: rpiCamera\n", |
||||
"'rpiCamera' with same camera ID 0 is used as source in two paths, 'cam2' and 'cam1'", |
||||
}, |
||||
{ |
||||
"invalid srt publish passphrase", |
||||
"paths:\n" + |
||||
" mypath:\n" + |
||||
" srtPublishPassphrase: a\n", |
||||
`invalid 'srtPublishPassphrase': must be between 10 and 79 characters`, |
||||
}, |
||||
{ |
||||
"invalid srt read passphrase", |
||||
"paths:\n" + |
||||
" mypath:\n" + |
||||
" srtReadPassphrase: a\n", |
||||
`invalid 'readRTPassphrase': must be between 10 and 79 characters`, |
||||
}, |
||||
{ |
||||
"all_others aliases", |
||||
"paths:\n" + |
||||
" all:\n" + |
||||
" all_others:\n", |
||||
`all_others, all and '~^.*$' are aliases`, |
||||
}, |
||||
{ |
||||
"all_others aliases", |
||||
"paths:\n" + |
||||
" all_others:\n" + |
||||
" ~^.*$:\n", |
||||
`all_others, all and '~^.*$' are aliases`, |
||||
}, |
||||
} { |
||||
t.Run(ca.name, func(t *testing.T) { |
||||
tmpf, err := createTempFile([]byte(ca.conf)) |
||||
require.NoError(t, err) |
||||
defer os.Remove(tmpf) |
||||
|
||||
_, _, err = Load(tmpf, nil) |
||||
require.EqualError(t, err, ca.err) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestSampleConfFile(t *testing.T) { |
||||
func() { |
||||
conf1, confPath1, err := Load("../../mediamtx.yml", nil) |
||||
require.NoError(t, err) |
||||
require.Equal(t, "../../mediamtx.yml", confPath1) |
||||
conf1.Paths = make(map[string]*Path) |
||||
conf1.OptionalPaths = nil |
||||
|
||||
conf2, confPath2, err := Load("", nil) |
||||
require.NoError(t, err) |
||||
require.Equal(t, "", confPath2) |
||||
|
||||
require.Equal(t, conf1, conf2) |
||||
}() |
||||
|
||||
func() { |
||||
conf1, confPath1, err := Load("../../mediamtx.yml", nil) |
||||
require.NoError(t, err) |
||||
require.Equal(t, "../../mediamtx.yml", confPath1) |
||||
|
||||
tmpf, err := createTempFile([]byte("paths:\n all_others:")) |
||||
require.NoError(t, err) |
||||
defer os.Remove(tmpf) |
||||
|
||||
conf2, confPath2, err := Load(tmpf, nil) |
||||
require.NoError(t, err) |
||||
require.Equal(t, tmpf, confPath2) |
||||
|
||||
require.Equal(t, conf1.Paths, conf2.Paths) |
||||
}() |
||||
} |
||||
@ -1,108 +0,0 @@
@@ -1,108 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"crypto/sha256" |
||||
"encoding/base64" |
||||
"encoding/json" |
||||
"fmt" |
||||
"regexp" |
||||
"strings" |
||||
|
||||
"github.com/matthewhartstonge/argon2" |
||||
) |
||||
|
||||
var ( |
||||
rePlainCredential = regexp.MustCompile(`^[a-zA-Z0-9!\$\(\)\*\+\.;<=>\[\]\^_\-\{\}@#&]+$`) |
||||
reBase64 = regexp.MustCompile(`^sha256:[a-zA-Z0-9\+/=]+$`) |
||||
) |
||||
|
||||
const plainCredentialSupportedChars = "A-Z,0-9,!,$,(,),*,+,.,;,<,=,>,[,],^,_,-,\",\",@,#,&" |
||||
|
||||
func sha256Base64(in string) string { |
||||
h := sha256.New() |
||||
h.Write([]byte(in)) |
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil)) |
||||
} |
||||
|
||||
// Credential is a parameter that is used as username or password.
|
||||
type Credential string |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d Credential) MarshalJSON() ([]byte, error) { |
||||
return json.Marshal(string(d)) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *Credential) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
*d = Credential(in) |
||||
|
||||
return d.validate() |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *Credential) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
|
||||
// IsSha256 returns true if the credential is a sha256 hash.
|
||||
func (d Credential) IsSha256() bool { |
||||
return strings.HasPrefix(string(d), "sha256:") |
||||
} |
||||
|
||||
// IsArgon2 returns true if the credential is an argon2 hash.
|
||||
func (d Credential) IsArgon2() bool { |
||||
return strings.HasPrefix(string(d), "argon2:") |
||||
} |
||||
|
||||
// IsHashed returns true if the credential is a sha256 or argon2 hash.
|
||||
func (d Credential) IsHashed() bool { |
||||
return d.IsSha256() || d.IsArgon2() |
||||
} |
||||
|
||||
// Check returns true if the given value matches the credential.
|
||||
func (d Credential) Check(guess string) bool { |
||||
if d.IsSha256() { |
||||
return string(d)[len("sha256:"):] == sha256Base64(guess) |
||||
} |
||||
|
||||
if d.IsArgon2() { |
||||
// TODO: remove matthewhartstonge/argon2 when this PR gets merged into mainline Go:
|
||||
// https://go-review.googlesource.com/c/crypto/+/502515
|
||||
ok, err := argon2.VerifyEncoded([]byte(guess), []byte(string(d)[len("argon2:"):])) |
||||
return ok && err == nil |
||||
} |
||||
|
||||
if d != "" { |
||||
return string(d) == guess |
||||
} |
||||
|
||||
return true |
||||
} |
||||
|
||||
func (d Credential) validate() error { |
||||
if d != "" { |
||||
switch { |
||||
case d.IsSha256(): |
||||
if !reBase64.MatchString(string(d)) { |
||||
return fmt.Errorf("credential contains unsupported characters, sha256 hash must be base64 encoded") |
||||
} |
||||
case d.IsArgon2(): |
||||
// TODO: remove matthewhartstonge/argon2 when this PR gets merged into mainline Go:
|
||||
// https://go-review.googlesource.com/c/crypto/+/502515
|
||||
_, err := argon2.Decode([]byte(string(d)[len("argon2:"):])) |
||||
if err != nil { |
||||
return fmt.Errorf("invalid argon2 hash: %w", err) |
||||
} |
||||
default: |
||||
if !rePlainCredential.MatchString(string(d)) { |
||||
return fmt.Errorf("credential contains unsupported characters. Supported are: %s", plainCredentialSupportedChars) |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
@ -1,151 +0,0 @@
@@ -1,151 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/assert" |
||||
) |
||||
|
||||
func TestCredential(t *testing.T) { |
||||
t.Run("MarshalJSON", func(t *testing.T) { |
||||
cred := Credential("password") |
||||
expectedJSON := []byte(`"password"`) |
||||
actualJSON, err := cred.MarshalJSON() |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, expectedJSON, actualJSON) |
||||
}) |
||||
|
||||
t.Run("UnmarshalJSON", func(t *testing.T) { |
||||
expectedCred := Credential("password") |
||||
jsonData := []byte(`"password"`) |
||||
var actualCred Credential |
||||
err := actualCred.UnmarshalJSON(jsonData) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, expectedCred, actualCred) |
||||
}) |
||||
|
||||
t.Run("UnmarshalEnv", func(t *testing.T) { |
||||
cred := Credential("") |
||||
err := cred.UnmarshalEnv("", "password") |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, Credential("password"), cred) |
||||
}) |
||||
|
||||
t.Run("IsSha256", func(t *testing.T) { |
||||
cred := Credential("") |
||||
assert.False(t, cred.IsSha256()) |
||||
assert.False(t, cred.IsHashed()) |
||||
|
||||
cred = "sha256:j1tsRqDEw9xvq/D7/9tMx6Jh/jMhk3UfjwIB2f1zgMo=" |
||||
assert.True(t, cred.IsSha256()) |
||||
assert.True(t, cred.IsHashed()) |
||||
|
||||
cred = "argon2:$argon2id$v=19$m=65536,t=1," + |
||||
"p=4$WXJGqwIB2qd+pRmxMOw9Dg$X4gvR0ZB2DtQoN8vOnJPR2SeFdUhH9TyVzfV98sfWeE" |
||||
assert.False(t, cred.IsSha256()) |
||||
assert.True(t, cred.IsHashed()) |
||||
}) |
||||
|
||||
t.Run("IsArgon2", func(t *testing.T) { |
||||
cred := Credential("") |
||||
assert.False(t, cred.IsArgon2()) |
||||
assert.False(t, cred.IsHashed()) |
||||
|
||||
cred = "sha256:j1tsRqDEw9xvq/D7/9tMx6Jh/jMhk3UfjwIB2f1zgMo=" |
||||
assert.False(t, cred.IsArgon2()) |
||||
assert.True(t, cred.IsHashed()) |
||||
|
||||
cred = "argon2:$argon2id$v=19$m=65536,t=1," + |
||||
"p=4$WXJGqwIB2qd+pRmxMOw9Dg$X4gvR0ZB2DtQoN8vOnJPR2SeFdUhH9TyVzfV98sfWeE" |
||||
assert.True(t, cred.IsArgon2()) |
||||
assert.True(t, cred.IsHashed()) |
||||
}) |
||||
|
||||
t.Run("Check-plain", func(t *testing.T) { |
||||
cred := Credential("password") |
||||
assert.True(t, cred.Check("password")) |
||||
assert.False(t, cred.Check("wrongpassword")) |
||||
}) |
||||
|
||||
t.Run("Check-sha256", func(t *testing.T) { |
||||
cred := Credential("password") |
||||
assert.True(t, cred.Check("password")) |
||||
assert.False(t, cred.Check("wrongpassword")) |
||||
}) |
||||
|
||||
t.Run("Check-sha256", func(t *testing.T) { |
||||
cred := Credential("sha256:rl3rgi4NcZkpAEcacZnQ2VuOfJ0FxAqCRaKB/SwdZoQ=") |
||||
assert.True(t, cred.Check("testuser")) |
||||
assert.False(t, cred.Check("notestuser")) |
||||
}) |
||||
|
||||
t.Run("Check-argon2", func(t *testing.T) { |
||||
cred := Credential("argon2:$argon2id$v=19$m=4096,t=3," + |
||||
"p=1$MTIzNDU2Nzg$Ux/LWeTgJQPyfMMJo1myR64+o8rALHoPmlE1i/TR+58") |
||||
assert.True(t, cred.Check("testuser")) |
||||
assert.False(t, cred.Check("notestuser")) |
||||
}) |
||||
|
||||
t.Run("validate", func(t *testing.T) { |
||||
tests := []struct { |
||||
name string |
||||
cred Credential |
||||
wantErr bool |
||||
}{ |
||||
{ |
||||
name: "Empty credential", |
||||
cred: Credential(""), |
||||
wantErr: false, |
||||
}, |
||||
{ |
||||
name: "Valid plain credential", |
||||
cred: Credential("validPlain123"), |
||||
wantErr: false, |
||||
}, |
||||
{ |
||||
name: "Invalid plain credential", |
||||
cred: Credential("invalid/Plain"), |
||||
wantErr: true, |
||||
}, |
||||
{ |
||||
name: "Valid sha256 credential", |
||||
cred: Credential("sha256:validBase64EncodedHash=="), |
||||
wantErr: false, |
||||
}, |
||||
{ |
||||
name: "Invalid sha256 credential", |
||||
cred: Credential("sha256:inval*idBase64"), |
||||
wantErr: true, |
||||
}, |
||||
{ |
||||
name: "Valid Argon2 credential", |
||||
cred: Credential("argon2:$argon2id$v=19$m=4096," + |
||||
"t=3,p=1$MTIzNDU2Nzg$zarsL19s86GzUWlAkvwt4gJBFuU/A9CVuCjNI4fksow"), |
||||
wantErr: false, |
||||
}, |
||||
{ |
||||
name: "Invalid Argon2 credential", |
||||
cred: Credential("argon2:invalid"), |
||||
wantErr: true, |
||||
}, |
||||
{ |
||||
name: "Invalid Argon2 credential", |
||||
// testing argon2d errors, because it's not supported
|
||||
cred: Credential("$argon2d$v=19$m=4096,t=3," + |
||||
"p=1$MTIzNDU2Nzg$Xqyd4R7LzXvvAEHaVU12+Nzf5OkHoYcwIEIIYJUDpz0"), |
||||
wantErr: true, |
||||
}, |
||||
} |
||||
|
||||
for _, tt := range tests { |
||||
t.Run(tt.name, func(t *testing.T) { |
||||
err := tt.cred.validate() |
||||
if tt.wantErr { |
||||
assert.Error(t, err) |
||||
} else { |
||||
assert.NoError(t, err) |
||||
} |
||||
}) |
||||
} |
||||
}) |
||||
} |
||||
@ -1,29 +0,0 @@
@@ -1,29 +0,0 @@
|
||||
// Package decrypt contains the Decrypt function.
|
||||
package decrypt |
||||
|
||||
import ( |
||||
"encoding/base64" |
||||
"fmt" |
||||
|
||||
"golang.org/x/crypto/nacl/secretbox" |
||||
) |
||||
|
||||
// Decrypt decrypts the configuration with the given key.
|
||||
func Decrypt(key string, byts []byte) ([]byte, error) { |
||||
enc, err := base64.StdEncoding.DecodeString(string(byts)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
var secretKey [32]byte |
||||
copy(secretKey[:], key) |
||||
|
||||
var decryptNonce [24]byte |
||||
copy(decryptNonce[:], enc[:24]) |
||||
decrypted, ok := secretbox.Open(nil, enc[24:], &decryptNonce, &secretKey) |
||||
if !ok { |
||||
return nil, fmt.Errorf("decryption error") |
||||
} |
||||
|
||||
return decrypted, nil |
||||
} |
||||
@ -1,63 +0,0 @@
@@ -1,63 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
) |
||||
|
||||
// Encryption is the encryption parameter.
|
||||
type Encryption int |
||||
|
||||
// values.
|
||||
const ( |
||||
EncryptionNo Encryption = iota |
||||
EncryptionOptional |
||||
EncryptionStrict |
||||
) |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d Encryption) MarshalJSON() ([]byte, error) { |
||||
var out string |
||||
|
||||
switch d { |
||||
case EncryptionNo: |
||||
out = "no" |
||||
|
||||
case EncryptionOptional: |
||||
out = "optional" |
||||
|
||||
default: |
||||
out = "strict" |
||||
} |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *Encryption) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
switch in { |
||||
case "no", "false": |
||||
*d = EncryptionNo |
||||
|
||||
case "optional": |
||||
*d = EncryptionOptional |
||||
|
||||
case "strict", "yes", "true": |
||||
*d = EncryptionStrict |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid encryption: '%s'", in) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *Encryption) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
@ -1,258 +0,0 @@
@@ -1,258 +0,0 @@
|
||||
// Package env contains a function to load configuration from environment.
|
||||
package env |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"reflect" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
// Unmarshaler can be implemented to override the unmarshaling process.
|
||||
type Unmarshaler interface { |
||||
UnmarshalEnv(prefix string, v string) error |
||||
} |
||||
|
||||
func envHasAtLeastAKeyWithPrefix(env map[string]string, prefix string) bool { |
||||
for key := range env { |
||||
if strings.HasPrefix(key, prefix) { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func loadEnvInternal(env map[string]string, prefix string, prv reflect.Value) error { |
||||
if prv.Kind() != reflect.Pointer { |
||||
return loadEnvInternal(env, prefix, prv.Addr()) |
||||
} |
||||
|
||||
rt := prv.Type().Elem() |
||||
|
||||
if i, ok := prv.Interface().(Unmarshaler); ok { |
||||
if ev, ok := env[prefix]; ok { |
||||
if prv.IsNil() { |
||||
prv.Set(reflect.New(rt)) |
||||
i = prv.Interface().(Unmarshaler) |
||||
} |
||||
err := i.UnmarshalEnv(prefix, ev) |
||||
if err != nil { |
||||
return fmt.Errorf("%s: %w", prefix, err) |
||||
} |
||||
} else if envHasAtLeastAKeyWithPrefix(env, prefix) { |
||||
err := i.UnmarshalEnv(prefix, "") |
||||
if err != nil { |
||||
return fmt.Errorf("%s: %w", prefix, err) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
switch rt { |
||||
case reflect.TypeOf(""): |
||||
if ev, ok := env[prefix]; ok { |
||||
if prv.IsNil() { |
||||
prv.Set(reflect.New(rt)) |
||||
} |
||||
prv.Elem().SetString(ev) |
||||
} |
||||
return nil |
||||
|
||||
case reflect.TypeOf(int(0)): |
||||
if ev, ok := env[prefix]; ok { |
||||
if prv.IsNil() { |
||||
prv.Set(reflect.New(rt)) |
||||
} |
||||
iv, err := strconv.ParseInt(ev, 10, 32) |
||||
if err != nil { |
||||
return fmt.Errorf("%s: %w", prefix, err) |
||||
} |
||||
prv.Elem().SetInt(iv) |
||||
} |
||||
return nil |
||||
|
||||
case reflect.TypeOf(uint64(0)): |
||||
if ev, ok := env[prefix]; ok { |
||||
if prv.IsNil() { |
||||
prv.Set(reflect.New(rt)) |
||||
} |
||||
iv, err := strconv.ParseUint(ev, 10, 32) |
||||
if err != nil { |
||||
return fmt.Errorf("%s: %w", prefix, err) |
||||
} |
||||
prv.Elem().SetUint(iv) |
||||
} |
||||
return nil |
||||
|
||||
case reflect.TypeOf(float64(0)): |
||||
if ev, ok := env[prefix]; ok { |
||||
if prv.IsNil() { |
||||
prv.Set(reflect.New(rt)) |
||||
} |
||||
iv, err := strconv.ParseFloat(ev, 64) |
||||
if err != nil { |
||||
return fmt.Errorf("%s: %w", prefix, err) |
||||
} |
||||
prv.Elem().SetFloat(iv) |
||||
} |
||||
return nil |
||||
|
||||
case reflect.TypeOf(bool(false)): |
||||
if ev, ok := env[prefix]; ok { |
||||
if prv.IsNil() { |
||||
prv.Set(reflect.New(rt)) |
||||
} |
||||
switch strings.ToLower(ev) { |
||||
case "yes", "true": |
||||
prv.Elem().SetBool(true) |
||||
|
||||
case "no", "false": |
||||
prv.Elem().SetBool(false) |
||||
|
||||
default: |
||||
return fmt.Errorf("%s: invalid value '%s'", prefix, ev) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
switch rt.Kind() { |
||||
case reflect.Map: |
||||
for k := range env { |
||||
if !strings.HasPrefix(k, prefix+"_") { |
||||
continue |
||||
} |
||||
|
||||
mapKey := strings.Split(k[len(prefix+"_"):], "_")[0] |
||||
if len(mapKey) == 0 { |
||||
continue |
||||
} |
||||
|
||||
// allow only keys in uppercase
|
||||
if mapKey != strings.ToUpper(mapKey) { |
||||
continue |
||||
} |
||||
|
||||
// initialize only if there's at least one key
|
||||
if prv.Elem().IsNil() { |
||||
prv.Elem().Set(reflect.MakeMap(rt)) |
||||
} |
||||
|
||||
mapKeyLower := strings.ToLower(mapKey) |
||||
nv := prv.Elem().MapIndex(reflect.ValueOf(mapKeyLower)) |
||||
zero := reflect.Value{} |
||||
if nv == zero { |
||||
nv = reflect.New(rt.Elem().Elem()) |
||||
prv.Elem().SetMapIndex(reflect.ValueOf(mapKeyLower), nv) |
||||
} |
||||
|
||||
err := loadEnvInternal(env, prefix+"_"+mapKey, nv.Elem()) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
|
||||
case reflect.Struct: |
||||
flen := rt.NumField() |
||||
for i := 0; i < flen; i++ { |
||||
f := rt.Field(i) |
||||
jsonTag := f.Tag.Get("json") |
||||
|
||||
// load only public fields
|
||||
if jsonTag == "-" { |
||||
continue |
||||
} |
||||
|
||||
err := loadEnvInternal(env, prefix+"_"+ |
||||
strings.ToUpper(strings.TrimSuffix(jsonTag, ",omitempty")), prv.Elem().Field(i)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
|
||||
case reflect.Slice: |
||||
switch { |
||||
case rt.Elem() == reflect.TypeOf(""): |
||||
if ev, ok := env[prefix]; ok { |
||||
if ev == "" { |
||||
prv.Elem().Set(reflect.MakeSlice(prv.Elem().Type(), 0, 0)) |
||||
} else { |
||||
if prv.IsNil() { |
||||
prv.Set(reflect.New(rt)) |
||||
} |
||||
prv.Elem().Set(reflect.ValueOf(strings.Split(ev, ","))) |
||||
} |
||||
} |
||||
return nil |
||||
|
||||
case rt.Elem() == reflect.TypeOf(float64(0)): |
||||
if ev, ok := env[prefix]; ok { |
||||
if ev == "" { |
||||
prv.Elem().Set(reflect.MakeSlice(prv.Elem().Type(), 0, 0)) |
||||
} else { |
||||
if prv.IsNil() { |
||||
prv.Set(reflect.New(rt)) |
||||
} |
||||
|
||||
raw := strings.Split(ev, ",") |
||||
vals := make([]float64, len(raw)) |
||||
|
||||
for i, v := range raw { |
||||
tmp, err := strconv.ParseFloat(v, 64) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
vals[i] = tmp |
||||
} |
||||
|
||||
prv.Elem().Set(reflect.ValueOf(vals)) |
||||
} |
||||
} |
||||
return nil |
||||
|
||||
case rt.Elem().Kind() == reflect.Struct: |
||||
if ev, ok := env[prefix]; ok && ev == "" { // special case: empty list
|
||||
prv.Elem().Set(reflect.MakeSlice(prv.Elem().Type(), 0, 0)) |
||||
} else { |
||||
for i := 0; ; i++ { |
||||
itemPrefix := prefix + "_" + strconv.FormatInt(int64(i), 10) |
||||
if !envHasAtLeastAKeyWithPrefix(env, itemPrefix) { |
||||
break |
||||
} |
||||
|
||||
elem := reflect.New(rt.Elem()) |
||||
err := loadEnvInternal(env, itemPrefix, elem.Elem()) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
prv.Elem().Set(reflect.Append(prv.Elem(), elem.Elem())) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
return fmt.Errorf("unsupported type: %v", rt) |
||||
} |
||||
|
||||
func loadWithEnv(env map[string]string, prefix string, v interface{}) error { |
||||
return loadEnvInternal(env, prefix, reflect.ValueOf(v).Elem()) |
||||
} |
||||
|
||||
func envToMap() map[string]string { |
||||
env := make(map[string]string) |
||||
for _, kv := range os.Environ() { |
||||
tmp := strings.SplitN(kv, "=", 2) |
||||
env[tmp[0]] = tmp[1] |
||||
} |
||||
return env |
||||
} |
||||
|
||||
// Load loads the configuration from the environment.
|
||||
func Load(prefix string, v interface{}) error { |
||||
return loadWithEnv(envToMap(), prefix, v) |
||||
} |
||||
@ -1,205 +0,0 @@
@@ -1,205 +0,0 @@
|
||||
package env |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func stringPtr(v string) *string { |
||||
return &v |
||||
} |
||||
|
||||
func intPtr(v int) *int { |
||||
return &v |
||||
} |
||||
|
||||
func uint64Ptr(v uint64) *uint64 { |
||||
return &v |
||||
} |
||||
|
||||
func boolPtr(v bool) *bool { |
||||
return &v |
||||
} |
||||
|
||||
func float64Ptr(v float64) *float64 { |
||||
return &v |
||||
} |
||||
|
||||
func durationPtr(v time.Duration) *time.Duration { |
||||
return &v |
||||
} |
||||
|
||||
type myDuration time.Duration |
||||
|
||||
func (d *myDuration) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
du, err := time.ParseDuration(in) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
*d = myDuration(du) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *myDuration) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
|
||||
type subStruct struct { |
||||
MyParam int `json:"myParam"` |
||||
} |
||||
|
||||
type mapEntry struct { |
||||
MyValue string `json:"myValue"` |
||||
MyStruct subStruct `json:"myStruct"` |
||||
} |
||||
|
||||
type mySubStruct struct { |
||||
URL string `json:"url"` |
||||
Username string `json:"username"` |
||||
Password string `json:"password"` |
||||
MyInt2 int `json:"myInt2"` |
||||
} |
||||
|
||||
type testStruct struct { |
||||
MyString string `json:"myString"` |
||||
MyStringOpt *string `json:"myStringOpt"` |
||||
MyInt int `json:"myInt"` |
||||
MyIntOpt *int `json:"myIntOpt"` |
||||
MyUint uint64 `json:"myUint"` |
||||
MyUintOpt *uint64 `json:"myUintOpt"` |
||||
MyFloat float64 `json:"myFloat"` |
||||
MyFloatOpt *float64 `json:"myFloatOpt"` |
||||
MyBool bool `json:"myBool"` |
||||
MyBoolOpt *bool `json:"myBoolOpt"` |
||||
MyDuration myDuration `json:"myDuration"` |
||||
MyDurationOpt *myDuration `json:"myDurationOpt"` |
||||
MyDurationOptUnset *myDuration `json:"myDurationOptUnset"` |
||||
MyMap map[string]*mapEntry `json:"myMap"` |
||||
MySliceFloat []float64 `json:"mySliceFloat"` |
||||
MySliceString []string `json:"mySliceString"` |
||||
MySliceStringEmpty []string `json:"mySliceStringEmpty"` |
||||
MySliceStringOpt *[]string `json:"mySliceStringOpt"` |
||||
MySliceStringOptUnset *[]string `json:"mySliceStringOptUnset"` |
||||
MySliceSubStruct []mySubStruct `json:"mySliceSubStruct"` |
||||
MySliceSubStructEmpty []mySubStruct `json:"mySliceSubStructEmpty"` |
||||
MySliceSubStructOpt *[]mySubStruct `json:"mySliceSubStructOpt"` |
||||
MySliceSubStructOptUnset *[]mySubStruct `json:"mySliceSubStructOptUnset"` |
||||
Unset *bool `json:"unset"` |
||||
} |
||||
|
||||
func TestLoad(t *testing.T) { |
||||
env := map[string]string{ |
||||
"MYPREFIX_MYSTRING": "testcontent", |
||||
"MYPREFIX_MYSTRINGOPT": "testcontent2", |
||||
"MYPREFIX_MYINT": "123", |
||||
"MYPREFIX_MYINTOPT": "456", |
||||
"MYPREFIX_MYUINT": "8910", |
||||
"MYPREFIX_MYUINTOPT": "112313", |
||||
"MYPREFIX_MYFLOAT": "15.2", |
||||
"MYPREFIX_MYFLOATOPT": "16.2", |
||||
"MYPREFIX_MYBOOL": "yes", |
||||
"MYPREFIX_MYBOOLOPT": "false", |
||||
"MYPREFIX_MYDURATION": "22s", |
||||
"MYPREFIX_MYDURATIONOPT": "30s", |
||||
"MYPREFIX_MYMAP_MYKEY": "", |
||||
"MYPREFIX_MYMAP_MYKEY2_MYVALUE": "asd", |
||||
"MYPREFIX_MYMAP_MYKEY2_MYSTRUCT_MYPARAM": "456", |
||||
"MYPREFIX_MYSLICEFLOAT": "0.5,0.5", |
||||
"MYPREFIX_MYSLICESTRING": "val1,val2", |
||||
"MYPREFIX_MYSLICESTRINGEMPTY": "", |
||||
"MYPREFIX_MYSLICESTRINGOPT": "aa", |
||||
"MYPREFIX_MYSLICESUBSTRUCT_0_URL": "url1", |
||||
"MYPREFIX_MYSLICESUBSTRUCT_0_USERNAME": "user1", |
||||
"MYPREFIX_MYSLICESUBSTRUCT_0_PASSWORD": "pass1", |
||||
"MYPREFIX_MYSLICESUBSTRUCT_1_URL": "url2", |
||||
"MYPREFIX_MYSLICESUBSTRUCT_1_PASSWORD": "pass2", |
||||
"MYPREFIX_MYSLICESUBSTRUCTEMPTY": "", |
||||
"MYPREFIX_MYSLICESUBSTRUCTOPT_1_PASSWORD": "pwd", |
||||
} |
||||
|
||||
for key, val := range env { |
||||
t.Setenv(key, val) |
||||
} |
||||
|
||||
var s testStruct |
||||
err := Load("MYPREFIX", &s) |
||||
require.NoError(t, err) |
||||
|
||||
require.Equal(t, testStruct{ |
||||
MyString: "testcontent", |
||||
MyStringOpt: stringPtr("testcontent2"), |
||||
MyInt: 123, |
||||
MyIntOpt: intPtr(456), |
||||
MyUint: 8910, |
||||
MyUintOpt: uint64Ptr(112313), |
||||
MyFloat: 15.2, |
||||
MyFloatOpt: float64Ptr(16.2), |
||||
MyBool: true, |
||||
MyBoolOpt: boolPtr(false), |
||||
MyDuration: 22000000000, |
||||
MyDurationOpt: (*myDuration)(durationPtr(30000000000)), |
||||
MyMap: map[string]*mapEntry{ |
||||
"mykey": { |
||||
MyValue: "", |
||||
MyStruct: subStruct{ |
||||
MyParam: 0, |
||||
}, |
||||
}, |
||||
"mykey2": { |
||||
MyValue: "asd", |
||||
MyStruct: subStruct{ |
||||
MyParam: 456, |
||||
}, |
||||
}, |
||||
}, |
||||
MySliceFloat: []float64{0.5, 0.5}, |
||||
MySliceString: []string{ |
||||
"val1", |
||||
"val2", |
||||
}, |
||||
MySliceStringEmpty: []string{}, |
||||
MySliceStringOpt: &[]string{"aa"}, |
||||
MySliceSubStruct: []mySubStruct{ |
||||
{ |
||||
URL: "url1", |
||||
Username: "user1", |
||||
Password: "pass1", |
||||
}, |
||||
{ |
||||
URL: "url2", |
||||
Username: "", |
||||
Password: "pass2", |
||||
}, |
||||
}, |
||||
MySliceSubStructEmpty: []mySubStruct{}, |
||||
}, s) |
||||
} |
||||
|
||||
func FuzzLoad(f *testing.F) { |
||||
f.Add("MYPREFIX_MYINT", "a") |
||||
f.Add("MYPREFIX_MYUINT", "a") |
||||
f.Add("MYPREFIX_MYFLOAT", "a") |
||||
f.Add("MYPREFIX_MYBOOL", "a") |
||||
f.Add("MYPREFIX_MYSLICESUBSTRUCT_0_MYINT2", "a") |
||||
f.Add("MYPREFIX_MYDURATION", "a") |
||||
f.Add("MYPREFIX_MYDURATION_A", "a") |
||||
|
||||
f.Fuzz(func(_ *testing.T, key string, val string) { |
||||
env := map[string]string{ |
||||
key: val, |
||||
} |
||||
|
||||
var s testStruct |
||||
loadWithEnv(env, "MYPREFIX", &s) //nolint:errcheck
|
||||
}) |
||||
} |
||||
@ -1,41 +0,0 @@
@@ -1,41 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"reflect" |
||||
) |
||||
|
||||
var globalValuesType = func() reflect.Type { |
||||
var fields []reflect.StructField |
||||
rt := reflect.TypeOf(Conf{}) |
||||
nf := rt.NumField() |
||||
|
||||
for i := 0; i < nf; i++ { |
||||
f := rt.Field(i) |
||||
j := f.Tag.Get("json") |
||||
|
||||
if j != "-" && j != "pathDefaults" && j != "paths" { |
||||
fields = append(fields, reflect.StructField{ |
||||
Name: f.Name, |
||||
Type: f.Type, |
||||
Tag: f.Tag, |
||||
}) |
||||
} |
||||
} |
||||
|
||||
return reflect.StructOf(fields) |
||||
}() |
||||
|
||||
func newGlobalValues() interface{} { |
||||
return reflect.New(globalValuesType).Interface() |
||||
} |
||||
|
||||
// Global is the global part of Conf.
|
||||
type Global struct { |
||||
Values interface{} |
||||
} |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (p *Global) MarshalJSON() ([]byte, error) { |
||||
return json.Marshal(p.Values) |
||||
} |
||||
@ -1,58 +0,0 @@
@@ -1,58 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
|
||||
"github.com/bluenviron/gohlslib" |
||||
) |
||||
|
||||
// HLSVariant is the hlsVariant parameter.
|
||||
type HLSVariant gohlslib.MuxerVariant |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d HLSVariant) MarshalJSON() ([]byte, error) { |
||||
var out string |
||||
|
||||
switch d { |
||||
case HLSVariant(gohlslib.MuxerVariantMPEGTS): |
||||
out = "mpegts" |
||||
|
||||
case HLSVariant(gohlslib.MuxerVariantFMP4): |
||||
out = "fmp4" |
||||
|
||||
default: |
||||
out = "lowLatency" |
||||
} |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *HLSVariant) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
switch in { |
||||
case "mpegts": |
||||
*d = HLSVariant(gohlslib.MuxerVariantMPEGTS) |
||||
|
||||
case "fmp4": |
||||
*d = HLSVariant(gohlslib.MuxerVariantFMP4) |
||||
|
||||
case "lowLatency": |
||||
*d = HLSVariant(gohlslib.MuxerVariantLowLatency) |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid HLS variant: '%s'", in) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *HLSVariant) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
@ -1,84 +0,0 @@
@@ -1,84 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"net" |
||||
"sort" |
||||
"strings" |
||||
) |
||||
|
||||
// IPNetworks is a parameter that contains a list of IP networks.
|
||||
type IPNetworks []net.IPNet |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d IPNetworks) MarshalJSON() ([]byte, error) { |
||||
out := make([]string, len(d)) |
||||
|
||||
for i, v := range d { |
||||
out[i] = v.String() |
||||
} |
||||
|
||||
sort.Strings(out) |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *IPNetworks) UnmarshalJSON(b []byte) error { |
||||
var in []string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
*d = nil |
||||
|
||||
if len(in) == 0 { |
||||
return nil |
||||
} |
||||
|
||||
for _, t := range in { |
||||
if _, ipnet, err := net.ParseCIDR(t); err == nil { |
||||
if ipv4 := ipnet.IP.To4(); ipv4 != nil { |
||||
*d = append(*d, net.IPNet{IP: ipv4, Mask: ipnet.Mask[len(ipnet.Mask)-4 : len(ipnet.Mask)]}) |
||||
} else { |
||||
*d = append(*d, *ipnet) |
||||
} |
||||
} else if ip := net.ParseIP(t); ip != nil { |
||||
if ipv4 := ip.To4(); ipv4 != nil { |
||||
*d = append(*d, net.IPNet{IP: ipv4, Mask: net.CIDRMask(32, 32)}) |
||||
} else { |
||||
*d = append(*d, net.IPNet{IP: ip, Mask: net.CIDRMask(128, 128)}) |
||||
} |
||||
} else { |
||||
return fmt.Errorf("unable to parse IP/CIDR '%s'", t) |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *IPNetworks) UnmarshalEnv(_ string, v string) error { |
||||
byts, _ := json.Marshal(strings.Split(v, ",")) |
||||
return d.UnmarshalJSON(byts) |
||||
} |
||||
|
||||
// ToTrustedProxies converts IPNetworks into a string slice for SetTrustedProxies.
|
||||
func (d *IPNetworks) ToTrustedProxies() []string { |
||||
ret := make([]string, len(*d)) |
||||
for i, entry := range *d { |
||||
ret[i] = entry.String() |
||||
} |
||||
return ret |
||||
} |
||||
|
||||
// Contains checks whether the IP is part of one of the networks.
|
||||
func (d IPNetworks) Contains(ip net.IP) bool { |
||||
for _, network := range d { |
||||
if network.Contains(ip) { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
@ -1,88 +0,0 @@
@@ -1,88 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"strings" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
) |
||||
|
||||
// LogDestinations is the logDestionations parameter.
|
||||
type LogDestinations []logger.Destination |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d LogDestinations) MarshalJSON() ([]byte, error) { |
||||
out := make([]string, len(d)) |
||||
i := 0 |
||||
|
||||
for _, p := range d { |
||||
var v string |
||||
|
||||
switch p { |
||||
case logger.DestinationStdout: |
||||
v = "stdout" |
||||
|
||||
case logger.DestinationFile: |
||||
v = "file" |
||||
|
||||
default: |
||||
v = "syslog" |
||||
} |
||||
|
||||
out[i] = v |
||||
i++ |
||||
} |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
func (d *LogDestinations) contains(v logger.Destination) bool { |
||||
for _, item := range *d { |
||||
if item == v { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *LogDestinations) UnmarshalJSON(b []byte) error { |
||||
var in []string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
*d = nil |
||||
|
||||
for _, dest := range in { |
||||
var v logger.Destination |
||||
switch dest { |
||||
case "stdout": |
||||
v = logger.DestinationStdout |
||||
|
||||
case "file": |
||||
v = logger.DestinationFile |
||||
|
||||
case "syslog": |
||||
v = logger.DestinationSyslog |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid log destination: %s", dest) |
||||
} |
||||
|
||||
if d.contains(v) { |
||||
return fmt.Errorf("log destination set twice") |
||||
} |
||||
|
||||
*d = append(*d, v) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *LogDestinations) UnmarshalEnv(_ string, v string) error { |
||||
byts, _ := json.Marshal(strings.Split(v, ",")) |
||||
return d.UnmarshalJSON(byts) |
||||
} |
||||
@ -1,64 +0,0 @@
@@ -1,64 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
) |
||||
|
||||
// LogLevel is the logLevel parameter.
|
||||
type LogLevel logger.Level |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d LogLevel) MarshalJSON() ([]byte, error) { |
||||
var out string |
||||
|
||||
switch d { |
||||
case LogLevel(logger.Error): |
||||
out = "error" |
||||
|
||||
case LogLevel(logger.Warn): |
||||
out = "warn" |
||||
|
||||
case LogLevel(logger.Info): |
||||
out = "info" |
||||
|
||||
default: |
||||
out = "debug" |
||||
} |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *LogLevel) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
switch in { |
||||
case "error": |
||||
*d = LogLevel(logger.Error) |
||||
|
||||
case "warn": |
||||
*d = LogLevel(logger.Warn) |
||||
|
||||
case "info": |
||||
*d = LogLevel(logger.Info) |
||||
|
||||
case "debug": |
||||
*d = LogLevel(logger.Debug) |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid log level: '%s'", in) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *LogLevel) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
@ -1,60 +0,0 @@
@@ -1,60 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"reflect" |
||||
"strings" |
||||
) |
||||
|
||||
var optionalGlobalValuesType = func() reflect.Type { |
||||
var fields []reflect.StructField |
||||
rt := reflect.TypeOf(Conf{}) |
||||
nf := rt.NumField() |
||||
|
||||
for i := 0; i < nf; i++ { |
||||
f := rt.Field(i) |
||||
j := f.Tag.Get("json") |
||||
|
||||
if j != "-" && j != "pathDefaults" && j != "paths" { |
||||
if !strings.Contains(j, ",omitempty") { |
||||
j += ",omitempty" |
||||
} |
||||
|
||||
typ := f.Type |
||||
if typ.Kind() != reflect.Pointer { |
||||
typ = reflect.PtrTo(typ) |
||||
} |
||||
|
||||
fields = append(fields, reflect.StructField{ |
||||
Name: f.Name, |
||||
Type: typ, |
||||
Tag: reflect.StructTag(`json:"` + j + `"`), |
||||
}) |
||||
} |
||||
} |
||||
|
||||
return reflect.StructOf(fields) |
||||
}() |
||||
|
||||
func newOptionalGlobalValues() interface{} { |
||||
return reflect.New(optionalGlobalValuesType).Interface() |
||||
} |
||||
|
||||
// OptionalGlobal is a Conf whose values can all be optional.
|
||||
type OptionalGlobal struct { |
||||
Values interface{} |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (p *OptionalGlobal) UnmarshalJSON(b []byte) error { |
||||
p.Values = newOptionalGlobalValues() |
||||
d := json.NewDecoder(bytes.NewReader(b)) |
||||
d.DisallowUnknownFields() |
||||
return d.Decode(p.Values) |
||||
} |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (p *OptionalGlobal) MarshalJSON() ([]byte, error) { |
||||
return json.Marshal(p.Values) |
||||
} |
||||
@ -1,70 +0,0 @@
@@ -1,70 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"reflect" |
||||
"strings" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/conf/env" |
||||
) |
||||
|
||||
var optionalPathValuesType = func() reflect.Type { |
||||
var fields []reflect.StructField |
||||
rt := reflect.TypeOf(Path{}) |
||||
nf := rt.NumField() |
||||
|
||||
for i := 0; i < nf; i++ { |
||||
f := rt.Field(i) |
||||
j := f.Tag.Get("json") |
||||
|
||||
if j != "-" { |
||||
if !strings.Contains(j, ",omitempty") { |
||||
j += ",omitempty" |
||||
} |
||||
|
||||
typ := f.Type |
||||
if typ.Kind() != reflect.Pointer { |
||||
typ = reflect.PtrTo(typ) |
||||
} |
||||
|
||||
fields = append(fields, reflect.StructField{ |
||||
Name: f.Name, |
||||
Type: typ, |
||||
Tag: reflect.StructTag(`json:"` + j + `"`), |
||||
}) |
||||
} |
||||
} |
||||
|
||||
return reflect.StructOf(fields) |
||||
}() |
||||
|
||||
func newOptionalPathValues() interface{} { |
||||
return reflect.New(optionalPathValuesType).Interface() |
||||
} |
||||
|
||||
// OptionalPath is a Path whose values can all be optional.
|
||||
type OptionalPath struct { |
||||
Values interface{} |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (p *OptionalPath) UnmarshalJSON(b []byte) error { |
||||
p.Values = newOptionalPathValues() |
||||
d := json.NewDecoder(bytes.NewReader(b)) |
||||
d.DisallowUnknownFields() |
||||
return d.Decode(p.Values) |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (p *OptionalPath) UnmarshalEnv(prefix string, _ string) error { |
||||
if p.Values == nil { |
||||
p.Values = newOptionalPathValues() |
||||
} |
||||
return env.Load(prefix, p.Values) |
||||
} |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (p *OptionalPath) MarshalJSON() ([]byte, error) { |
||||
return json.Marshal(p.Values) |
||||
} |
||||
@ -1,578 +0,0 @@
@@ -1,578 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"net" |
||||
gourl "net/url" |
||||
"reflect" |
||||
"regexp" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/base" |
||||
) |
||||
|
||||
var rePathName = regexp.MustCompile(`^[0-9a-zA-Z_\-/\.~]+$`) |
||||
|
||||
func isValidPathName(name string) error { |
||||
if name == "" { |
||||
return fmt.Errorf("cannot be empty") |
||||
} |
||||
|
||||
if name[0] == '/' { |
||||
return fmt.Errorf("can't begin with a slash") |
||||
} |
||||
|
||||
if name[len(name)-1] == '/' { |
||||
return fmt.Errorf("can't end with a slash") |
||||
} |
||||
|
||||
if !rePathName.MatchString(name) { |
||||
return fmt.Errorf("can contain only alphanumeric characters, underscore, dot, tilde, minus or slash") |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func srtCheckPassphrase(passphrase string) error { |
||||
switch { |
||||
case len(passphrase) < 10 || len(passphrase) > 79: |
||||
return fmt.Errorf("must be between 10 and 79 characters") |
||||
|
||||
default: |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// FindPathConf returns the configuration corresponding to the given path name.
|
||||
func FindPathConf(pathConfs map[string]*Path, name string) (string, *Path, []string, error) { |
||||
err := isValidPathName(name) |
||||
if err != nil { |
||||
return "", nil, nil, fmt.Errorf("invalid path name: %w (%s)", err, name) |
||||
} |
||||
|
||||
// normal path
|
||||
if pathConf, ok := pathConfs[name]; ok { |
||||
return name, pathConf, nil, nil |
||||
} |
||||
|
||||
// regular expression-based path
|
||||
for pathConfName, pathConf := range pathConfs { |
||||
if pathConf.Regexp != nil && pathConfName != "all" && pathConfName != "all_others" { |
||||
m := pathConf.Regexp.FindStringSubmatch(name) |
||||
if m != nil { |
||||
return pathConfName, pathConf, m, nil |
||||
} |
||||
} |
||||
} |
||||
|
||||
// all_others
|
||||
for pathConfName, pathConf := range pathConfs { |
||||
if pathConfName == "all" || pathConfName == "all_others" { |
||||
m := pathConf.Regexp.FindStringSubmatch(name) |
||||
if m != nil { |
||||
return pathConfName, pathConf, m, nil |
||||
} |
||||
} |
||||
} |
||||
|
||||
return "", nil, nil, fmt.Errorf("path '%s' is not configured", name) |
||||
} |
||||
|
||||
// Path is a path configuration.
|
||||
type Path struct { |
||||
Regexp *regexp.Regexp `json:"-"` // filled by Check()
|
||||
Name string `json:"name"` // filled by Check()
|
||||
|
||||
// General
|
||||
Source string `json:"source"` |
||||
SourceFingerprint string `json:"sourceFingerprint"` |
||||
SourceOnDemand bool `json:"sourceOnDemand"` |
||||
SourceOnDemandStartTimeout StringDuration `json:"sourceOnDemandStartTimeout"` |
||||
SourceOnDemandCloseAfter StringDuration `json:"sourceOnDemandCloseAfter"` |
||||
MaxReaders int `json:"maxReaders"` |
||||
SRTReadPassphrase string `json:"srtReadPassphrase"` |
||||
Fallback string `json:"fallback"` |
||||
|
||||
// Record and playback
|
||||
Record bool `json:"record"` |
||||
Playback bool `json:"playback"` |
||||
RecordPath string `json:"recordPath"` |
||||
RecordFormat RecordFormat `json:"recordFormat"` |
||||
RecordPartDuration StringDuration `json:"recordPartDuration"` |
||||
RecordSegmentDuration StringDuration `json:"recordSegmentDuration"` |
||||
RecordDeleteAfter StringDuration `json:"recordDeleteAfter"` |
||||
|
||||
// Authentication (deprecated)
|
||||
PublishUser *Credential `json:"publishUser,omitempty"` // deprecated
|
||||
PublishPass *Credential `json:"publishPass,omitempty"` // deprecated
|
||||
PublishIPs *IPNetworks `json:"publishIPs,omitempty"` // deprecated
|
||||
ReadUser *Credential `json:"readUser,omitempty"` // deprecated
|
||||
ReadPass *Credential `json:"readPass,omitempty"` // deprecated
|
||||
ReadIPs *IPNetworks `json:"readIPs,omitempty"` // deprecated
|
||||
|
||||
// Publisher source
|
||||
OverridePublisher bool `json:"overridePublisher"` |
||||
DisablePublisherOverride *bool `json:"disablePublisherOverride,omitempty"` // deprecated
|
||||
SRTPublishPassphrase string `json:"srtPublishPassphrase"` |
||||
|
||||
// RTSP source
|
||||
RTSPTransport RTSPTransport `json:"rtspTransport"` |
||||
RTSPAnyPort bool `json:"rtspAnyPort"` |
||||
SourceProtocol *RTSPTransport `json:"sourceProtocol,omitempty"` // deprecated
|
||||
SourceAnyPortEnable *bool `json:"sourceAnyPortEnable,omitempty"` // deprecated
|
||||
RTSPRangeType RTSPRangeType `json:"rtspRangeType"` |
||||
RTSPRangeStart string `json:"rtspRangeStart"` |
||||
|
||||
// Redirect source
|
||||
SourceRedirect string `json:"sourceRedirect"` |
||||
|
||||
// Raspberry Pi Camera source
|
||||
RPICameraCamID int `json:"rpiCameraCamID"` |
||||
RPICameraWidth int `json:"rpiCameraWidth"` |
||||
RPICameraHeight int `json:"rpiCameraHeight"` |
||||
RPICameraHFlip bool `json:"rpiCameraHFlip"` |
||||
RPICameraVFlip bool `json:"rpiCameraVFlip"` |
||||
RPICameraBrightness float64 `json:"rpiCameraBrightness"` |
||||
RPICameraContrast float64 `json:"rpiCameraContrast"` |
||||
RPICameraSaturation float64 `json:"rpiCameraSaturation"` |
||||
RPICameraSharpness float64 `json:"rpiCameraSharpness"` |
||||
RPICameraExposure string `json:"rpiCameraExposure"` |
||||
RPICameraAWB string `json:"rpiCameraAWB"` |
||||
RPICameraAWBGains []float64 `json:"rpiCameraAWBGains"` |
||||
RPICameraDenoise string `json:"rpiCameraDenoise"` |
||||
RPICameraShutter int `json:"rpiCameraShutter"` |
||||
RPICameraMetering string `json:"rpiCameraMetering"` |
||||
RPICameraGain float64 `json:"rpiCameraGain"` |
||||
RPICameraEV float64 `json:"rpiCameraEV"` |
||||
RPICameraROI string `json:"rpiCameraROI"` |
||||
RPICameraHDR bool `json:"rpiCameraHDR"` |
||||
RPICameraTuningFile string `json:"rpiCameraTuningFile"` |
||||
RPICameraMode string `json:"rpiCameraMode"` |
||||
RPICameraFPS float64 `json:"rpiCameraFPS"` |
||||
RPICameraIDRPeriod int `json:"rpiCameraIDRPeriod"` |
||||
RPICameraBitrate int `json:"rpiCameraBitrate"` |
||||
RPICameraProfile string `json:"rpiCameraProfile"` |
||||
RPICameraLevel string `json:"rpiCameraLevel"` |
||||
RPICameraAfMode string `json:"rpiCameraAfMode"` |
||||
RPICameraAfRange string `json:"rpiCameraAfRange"` |
||||
RPICameraAfSpeed string `json:"rpiCameraAfSpeed"` |
||||
RPICameraLensPosition float64 `json:"rpiCameraLensPosition"` |
||||
RPICameraAfWindow string `json:"rpiCameraAfWindow"` |
||||
RPICameraTextOverlayEnable bool `json:"rpiCameraTextOverlayEnable"` |
||||
RPICameraTextOverlay string `json:"rpiCameraTextOverlay"` |
||||
|
||||
// Hooks
|
||||
RunOnInit string `json:"runOnInit"` |
||||
RunOnInitRestart bool `json:"runOnInitRestart"` |
||||
RunOnDemand string `json:"runOnDemand"` |
||||
RunOnDemandRestart bool `json:"runOnDemandRestart"` |
||||
RunOnDemandStartTimeout StringDuration `json:"runOnDemandStartTimeout"` |
||||
RunOnDemandCloseAfter StringDuration `json:"runOnDemandCloseAfter"` |
||||
RunOnUnDemand string `json:"runOnUnDemand"` |
||||
RunOnReady string `json:"runOnReady"` |
||||
RunOnReadyRestart bool `json:"runOnReadyRestart"` |
||||
RunOnNotReady string `json:"runOnNotReady"` |
||||
RunOnRead string `json:"runOnRead"` |
||||
RunOnReadRestart bool `json:"runOnReadRestart"` |
||||
RunOnUnread string `json:"runOnUnread"` |
||||
RunOnRecordSegmentCreate string `json:"runOnRecordSegmentCreate"` |
||||
RunOnRecordSegmentComplete string `json:"runOnRecordSegmentComplete"` |
||||
} |
||||
|
||||
func (pconf *Path) setDefaults() { |
||||
// General
|
||||
pconf.Source = "publisher" |
||||
pconf.SourceOnDemandStartTimeout = 10 * StringDuration(time.Second) |
||||
pconf.SourceOnDemandCloseAfter = 10 * StringDuration(time.Second) |
||||
|
||||
// Record and playback
|
||||
pconf.Playback = true |
||||
pconf.RecordPath = "./recordings/%path/%Y-%m-%d_%H-%M-%S-%f" |
||||
pconf.RecordFormat = RecordFormatFMP4 |
||||
pconf.RecordPartDuration = 100 * StringDuration(time.Millisecond) |
||||
pconf.RecordSegmentDuration = 3600 * StringDuration(time.Second) |
||||
pconf.RecordDeleteAfter = 24 * 3600 * StringDuration(time.Second) |
||||
|
||||
// Publisher source
|
||||
pconf.OverridePublisher = true |
||||
|
||||
// Raspberry Pi Camera source
|
||||
pconf.RPICameraWidth = 1920 |
||||
pconf.RPICameraHeight = 1080 |
||||
pconf.RPICameraContrast = 1 |
||||
pconf.RPICameraSaturation = 1 |
||||
pconf.RPICameraSharpness = 1 |
||||
pconf.RPICameraExposure = "normal" |
||||
pconf.RPICameraAWB = "auto" |
||||
pconf.RPICameraAWBGains = []float64{0, 0} |
||||
pconf.RPICameraDenoise = "off" |
||||
pconf.RPICameraMetering = "centre" |
||||
pconf.RPICameraFPS = 30 |
||||
pconf.RPICameraIDRPeriod = 60 |
||||
pconf.RPICameraBitrate = 1000000 |
||||
pconf.RPICameraProfile = "main" |
||||
pconf.RPICameraLevel = "4.1" |
||||
pconf.RPICameraAfMode = "continuous" |
||||
pconf.RPICameraAfRange = "normal" |
||||
pconf.RPICameraAfSpeed = "normal" |
||||
pconf.RPICameraTextOverlay = "%Y-%m-%d %H:%M:%S - MediaMTX" |
||||
|
||||
// Hooks
|
||||
pconf.RunOnDemandStartTimeout = 10 * StringDuration(time.Second) |
||||
pconf.RunOnDemandCloseAfter = 10 * StringDuration(time.Second) |
||||
} |
||||
|
||||
func newPath(defaults *Path, partial *OptionalPath) *Path { |
||||
pconf := &Path{} |
||||
copyStructFields(pconf, defaults) |
||||
copyStructFields(pconf, partial.Values) |
||||
return pconf |
||||
} |
||||
|
||||
// Clone clones the configuration.
|
||||
func (pconf Path) Clone() *Path { |
||||
enc, err := json.Marshal(pconf) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
var dest Path |
||||
err = json.Unmarshal(enc, &dest) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
dest.Regexp = pconf.Regexp |
||||
|
||||
return &dest |
||||
} |
||||
|
||||
func (pconf *Path) validate( |
||||
conf *Conf, |
||||
name string, |
||||
deprecatedCredentialsMode bool, |
||||
) error { |
||||
pconf.Name = name |
||||
|
||||
switch { |
||||
case name == "all_others", name == "all": |
||||
pconf.Regexp = regexp.MustCompile("^.*$") |
||||
|
||||
case name == "" || name[0] != '~': // normal path
|
||||
err := isValidPathName(name) |
||||
if err != nil { |
||||
return fmt.Errorf("invalid path name '%s': %w", name, err) |
||||
} |
||||
|
||||
default: // regular expression-based path
|
||||
regexp, err := regexp.Compile(name[1:]) |
||||
if err != nil { |
||||
return fmt.Errorf("invalid regular expression: %s", name[1:]) |
||||
} |
||||
pconf.Regexp = regexp |
||||
} |
||||
|
||||
// General
|
||||
|
||||
if pconf.Source != "publisher" && pconf.Source != "redirect" && |
||||
pconf.Regexp != nil && !pconf.SourceOnDemand { |
||||
return fmt.Errorf("a path with a regular expression (or path 'all') and a static source" + |
||||
" must have 'sourceOnDemand' set to true") |
||||
} |
||||
switch { |
||||
case pconf.Source == "publisher": |
||||
|
||||
case strings.HasPrefix(pconf.Source, "rtsp://") || |
||||
strings.HasPrefix(pconf.Source, "rtsps://"): |
||||
_, err := base.ParseURL(pconf.Source) |
||||
if err != nil { |
||||
return fmt.Errorf("'%s' is not a valid URL", pconf.Source) |
||||
} |
||||
|
||||
case strings.HasPrefix(pconf.Source, "rtmp://") || |
||||
strings.HasPrefix(pconf.Source, "rtmps://"): |
||||
u, err := gourl.Parse(pconf.Source) |
||||
if err != nil { |
||||
return fmt.Errorf("'%s' is not a valid URL", pconf.Source) |
||||
} |
||||
|
||||
if u.User != nil { |
||||
pass, _ := u.User.Password() |
||||
user := u.User.Username() |
||||
if user != "" && pass == "" || |
||||
user == "" && pass != "" { |
||||
return fmt.Errorf("username and password must be both provided") |
||||
} |
||||
} |
||||
|
||||
case strings.HasPrefix(pconf.Source, "http://") || |
||||
strings.HasPrefix(pconf.Source, "https://"): |
||||
u, err := gourl.Parse(pconf.Source) |
||||
if err != nil { |
||||
return fmt.Errorf("'%s' is not a valid URL", pconf.Source) |
||||
} |
||||
if u.Scheme != "http" && u.Scheme != "https" { |
||||
return fmt.Errorf("'%s' is not a valid URL", pconf.Source) |
||||
} |
||||
|
||||
if u.User != nil { |
||||
pass, _ := u.User.Password() |
||||
user := u.User.Username() |
||||
if user != "" && pass == "" || |
||||
user == "" && pass != "" { |
||||
return fmt.Errorf("username and password must be both provided") |
||||
} |
||||
} |
||||
|
||||
case strings.HasPrefix(pconf.Source, "udp://"): |
||||
_, _, err := net.SplitHostPort(pconf.Source[len("udp://"):]) |
||||
if err != nil { |
||||
return fmt.Errorf("'%s' is not a valid UDP URL", pconf.Source) |
||||
} |
||||
|
||||
case strings.HasPrefix(pconf.Source, "srt://"): |
||||
|
||||
_, err := gourl.Parse(pconf.Source) |
||||
if err != nil { |
||||
return fmt.Errorf("'%s' is not a valid URL", pconf.Source) |
||||
} |
||||
|
||||
case strings.HasPrefix(pconf.Source, "whep://") || |
||||
strings.HasPrefix(pconf.Source, "wheps://"): |
||||
_, err := gourl.Parse(pconf.Source) |
||||
if err != nil { |
||||
return fmt.Errorf("'%s' is not a valid URL", pconf.Source) |
||||
} |
||||
|
||||
case pconf.Source == "redirect": |
||||
|
||||
case pconf.Source == "rpiCamera": |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid source: '%s'", pconf.Source) |
||||
} |
||||
if pconf.SourceOnDemand { |
||||
if pconf.Source == "publisher" { |
||||
return fmt.Errorf("'sourceOnDemand' is useless when source is 'publisher'") |
||||
} |
||||
} |
||||
if pconf.SRTReadPassphrase != "" { |
||||
err := srtCheckPassphrase(pconf.SRTReadPassphrase) |
||||
if err != nil { |
||||
return fmt.Errorf("invalid 'readRTPassphrase': %w", err) |
||||
} |
||||
} |
||||
if pconf.Fallback != "" { |
||||
if strings.HasPrefix(pconf.Fallback, "/") { |
||||
err := isValidPathName(pconf.Fallback[1:]) |
||||
if err != nil { |
||||
return fmt.Errorf("'%s': %w", pconf.Fallback, err) |
||||
} |
||||
} else { |
||||
_, err := base.ParseURL(pconf.Fallback) |
||||
if err != nil { |
||||
return fmt.Errorf("'%s' is not a valid RTSP URL", pconf.Fallback) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Authentication (deprecated)
|
||||
|
||||
if deprecatedCredentialsMode { |
||||
func() { |
||||
var user Credential = "any" |
||||
if credentialIsNotEmpty(pconf.PublishUser) { |
||||
user = *pconf.PublishUser |
||||
} |
||||
|
||||
var pass Credential |
||||
if credentialIsNotEmpty(pconf.PublishPass) { |
||||
pass = *pconf.PublishPass |
||||
} |
||||
|
||||
ips := IPNetworks{mustParseCIDR("0.0.0.0/0")} |
||||
if ipNetworkIsNotEmpty(pconf.PublishIPs) { |
||||
ips = *pconf.PublishIPs |
||||
} |
||||
|
||||
pathName := name |
||||
if name == "all_others" || name == "all" { |
||||
pathName = "~^.*$" |
||||
} |
||||
|
||||
conf.AuthInternalUsers = append(conf.AuthInternalUsers, AuthInternalUser{ |
||||
User: user, |
||||
Pass: pass, |
||||
IPs: ips, |
||||
Permissions: []AuthInternalUserPermission{{ |
||||
Action: AuthActionPublish, |
||||
Path: pathName, |
||||
}}, |
||||
}) |
||||
}() |
||||
|
||||
func() { |
||||
var user Credential = "any" |
||||
if credentialIsNotEmpty(pconf.ReadUser) { |
||||
user = *pconf.ReadUser |
||||
} |
||||
|
||||
var pass Credential |
||||
if credentialIsNotEmpty(pconf.ReadPass) { |
||||
pass = *pconf.ReadPass |
||||
} |
||||
|
||||
ips := IPNetworks{mustParseCIDR("0.0.0.0/0")} |
||||
if ipNetworkIsNotEmpty(pconf.ReadIPs) { |
||||
ips = *pconf.ReadIPs |
||||
} |
||||
|
||||
pathName := name |
||||
if name == "all_others" || name == "all" { |
||||
pathName = "~^.*$" |
||||
} |
||||
|
||||
conf.AuthInternalUsers = append(conf.AuthInternalUsers, AuthInternalUser{ |
||||
User: user, |
||||
Pass: pass, |
||||
IPs: ips, |
||||
Permissions: []AuthInternalUserPermission{{ |
||||
Action: AuthActionRead, |
||||
Path: pathName, |
||||
}}, |
||||
}) |
||||
}() |
||||
} |
||||
|
||||
// Publisher source
|
||||
|
||||
if pconf.DisablePublisherOverride != nil { |
||||
pconf.OverridePublisher = !*pconf.DisablePublisherOverride |
||||
} |
||||
if pconf.SRTPublishPassphrase != "" { |
||||
if pconf.Source != "publisher" { |
||||
return fmt.Errorf("'srtPublishPassphase' can only be used when source is 'publisher'") |
||||
} |
||||
|
||||
err := srtCheckPassphrase(pconf.SRTPublishPassphrase) |
||||
if err != nil { |
||||
return fmt.Errorf("invalid 'srtPublishPassphrase': %w", err) |
||||
} |
||||
} |
||||
|
||||
// RTSP source
|
||||
|
||||
if pconf.SourceProtocol != nil { |
||||
pconf.RTSPTransport = *pconf.SourceProtocol |
||||
} |
||||
if pconf.SourceAnyPortEnable != nil { |
||||
pconf.RTSPAnyPort = *pconf.SourceAnyPortEnable |
||||
} |
||||
|
||||
// Redirect source
|
||||
|
||||
if pconf.Source == "redirect" { |
||||
if pconf.SourceRedirect == "" { |
||||
return fmt.Errorf("source redirect must be filled") |
||||
} |
||||
|
||||
_, err := base.ParseURL(pconf.SourceRedirect) |
||||
if err != nil { |
||||
return fmt.Errorf("'%s' is not a valid RTSP URL", pconf.SourceRedirect) |
||||
} |
||||
} |
||||
|
||||
// Raspberry Pi Camera source
|
||||
|
||||
if pconf.Source == "rpiCamera" { |
||||
for otherName, otherPath := range conf.Paths { |
||||
if otherPath != pconf && otherPath != nil && |
||||
otherPath.Source == "rpiCamera" && otherPath.RPICameraCamID == pconf.RPICameraCamID { |
||||
return fmt.Errorf("'rpiCamera' with same camera ID %d is used as source in two paths, '%s' and '%s'", |
||||
pconf.RPICameraCamID, name, otherName) |
||||
} |
||||
} |
||||
} |
||||
switch pconf.RPICameraExposure { |
||||
case "normal", "short", "long", "custom": |
||||
default: |
||||
return fmt.Errorf("invalid 'rpiCameraExposure' value") |
||||
} |
||||
switch pconf.RPICameraAWB { |
||||
case "auto", "incandescent", "tungsten", "fluorescent", "indoor", "daylight", "cloudy", "custom": |
||||
default: |
||||
return fmt.Errorf("invalid 'rpiCameraAWB' value") |
||||
} |
||||
if len(pconf.RPICameraAWBGains) != 2 { |
||||
return fmt.Errorf("invalid 'rpiCameraAWBGains' value") |
||||
} |
||||
switch pconf.RPICameraDenoise { |
||||
case "off", "cdn_off", "cdn_fast", "cdn_hq": |
||||
default: |
||||
return fmt.Errorf("invalid 'rpiCameraDenoise' value") |
||||
} |
||||
switch pconf.RPICameraMetering { |
||||
case "centre", "spot", "matrix", "custom": |
||||
default: |
||||
return fmt.Errorf("invalid 'rpiCameraMetering' value") |
||||
} |
||||
switch pconf.RPICameraAfMode { |
||||
case "auto", "manual", "continuous": |
||||
default: |
||||
return fmt.Errorf("invalid 'rpiCameraAfMode' value") |
||||
} |
||||
switch pconf.RPICameraAfRange { |
||||
case "normal", "macro", "full": |
||||
default: |
||||
return fmt.Errorf("invalid 'rpiCameraAfRange' value") |
||||
} |
||||
switch pconf.RPICameraAfSpeed { |
||||
case "normal", "fast": |
||||
default: |
||||
return fmt.Errorf("invalid 'rpiCameraAfSpeed' value") |
||||
} |
||||
|
||||
// Hooks
|
||||
|
||||
if pconf.RunOnInit != "" && pconf.Regexp != nil { |
||||
return fmt.Errorf("a path with a regular expression (or path 'all')" + |
||||
" does not support option 'runOnInit'; use another path") |
||||
} |
||||
if (pconf.RunOnDemand != "" || pconf.RunOnUnDemand != "") && pconf.Source != "publisher" { |
||||
return fmt.Errorf("'runOnDemand' and 'runOnUnDemand' can be used only when source is 'publisher'") |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// Equal checks whether two Paths are equal.
|
||||
func (pconf *Path) Equal(other *Path) bool { |
||||
return reflect.DeepEqual(pconf, other) |
||||
} |
||||
|
||||
// HasStaticSource checks whether the path has a static source.
|
||||
func (pconf Path) HasStaticSource() bool { |
||||
return strings.HasPrefix(pconf.Source, "rtsp://") || |
||||
strings.HasPrefix(pconf.Source, "rtsps://") || |
||||
strings.HasPrefix(pconf.Source, "rtmp://") || |
||||
strings.HasPrefix(pconf.Source, "rtmps://") || |
||||
strings.HasPrefix(pconf.Source, "http://") || |
||||
strings.HasPrefix(pconf.Source, "https://") || |
||||
strings.HasPrefix(pconf.Source, "udp://") || |
||||
strings.HasPrefix(pconf.Source, "srt://") || |
||||
strings.HasPrefix(pconf.Source, "whep://") || |
||||
strings.HasPrefix(pconf.Source, "wheps://") || |
||||
pconf.Source == "rpiCamera" |
||||
} |
||||
|
||||
// HasOnDemandStaticSource checks whether the path has a on demand static source.
|
||||
func (pconf Path) HasOnDemandStaticSource() bool { |
||||
return pconf.HasStaticSource() && pconf.SourceOnDemand |
||||
} |
||||
|
||||
// HasOnDemandPublisher checks whether the path has a on-demand publisher.
|
||||
func (pconf Path) HasOnDemandPublisher() bool { |
||||
return pconf.RunOnDemand != "" |
||||
} |
||||
@ -1,78 +0,0 @@
@@ -1,78 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"sort" |
||||
"strings" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4" |
||||
) |
||||
|
||||
// Protocol is a RTSP transport.
|
||||
type Protocol gortsplib.Transport |
||||
|
||||
// Protocols is the protocols parameter.
|
||||
type Protocols map[Protocol]struct{} |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d Protocols) MarshalJSON() ([]byte, error) { |
||||
out := make([]string, len(d)) |
||||
i := 0 |
||||
|
||||
for p := range d { |
||||
var v string |
||||
|
||||
switch p { |
||||
case Protocol(gortsplib.TransportUDP): |
||||
v = "udp" |
||||
|
||||
case Protocol(gortsplib.TransportUDPMulticast): |
||||
v = "multicast" |
||||
|
||||
default: |
||||
v = "tcp" |
||||
} |
||||
|
||||
out[i] = v |
||||
i++ |
||||
} |
||||
|
||||
sort.Strings(out) |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *Protocols) UnmarshalJSON(b []byte) error { |
||||
var in []string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
*d = make(Protocols) |
||||
|
||||
for _, proto := range in { |
||||
switch proto { |
||||
case "udp": |
||||
(*d)[Protocol(gortsplib.TransportUDP)] = struct{}{} |
||||
|
||||
case "multicast": |
||||
(*d)[Protocol(gortsplib.TransportUDPMulticast)] = struct{}{} |
||||
|
||||
case "tcp": |
||||
(*d)[Protocol(gortsplib.TransportTCP)] = struct{}{} |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid protocol: %s", proto) |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *Protocols) UnmarshalEnv(_ string, v string) error { |
||||
byts, _ := json.Marshal(strings.Split(v, ",")) |
||||
return d.UnmarshalJSON(byts) |
||||
} |
||||
@ -1,56 +0,0 @@
@@ -1,56 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
) |
||||
|
||||
// RecordFormat is the recordFormat parameter.
|
||||
type RecordFormat int |
||||
|
||||
// supported values.
|
||||
const ( |
||||
RecordFormatFMP4 RecordFormat = iota |
||||
RecordFormatMPEGTS |
||||
) |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d RecordFormat) MarshalJSON() ([]byte, error) { |
||||
var out string |
||||
|
||||
switch d { |
||||
case RecordFormatMPEGTS: |
||||
out = "mpegts" |
||||
|
||||
default: |
||||
out = "fmp4" |
||||
} |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *RecordFormat) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
switch in { |
||||
case "mpegts": |
||||
*d = RecordFormatMPEGTS |
||||
|
||||
case "fmp4": |
||||
*d = RecordFormatFMP4 |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid record format '%s'", in) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *RecordFormat) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
@ -1,63 +0,0 @@
@@ -1,63 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"sort" |
||||
"strings" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/headers" |
||||
) |
||||
|
||||
// RTSPAuthMethods is the rtspAuthMethods parameter.
|
||||
type RTSPAuthMethods []headers.AuthMethod |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d RTSPAuthMethods) MarshalJSON() ([]byte, error) { |
||||
out := make([]string, len(d)) |
||||
|
||||
for i, v := range d { |
||||
switch v { |
||||
case headers.AuthBasic: |
||||
out[i] = "basic" |
||||
|
||||
default: |
||||
out[i] = "digest" |
||||
} |
||||
} |
||||
|
||||
sort.Strings(out) |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *RTSPAuthMethods) UnmarshalJSON(b []byte) error { |
||||
var in []string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
*d = nil |
||||
|
||||
for _, v := range in { |
||||
switch v { |
||||
case "basic": |
||||
*d = append(*d, headers.AuthBasic) |
||||
|
||||
case "digest": |
||||
*d = append(*d, headers.AuthDigestMD5) |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid authentication method: '%s'", v) |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *RTSPAuthMethods) UnmarshalEnv(_ string, v string) error { |
||||
byts, _ := json.Marshal(strings.Split(v, ",")) |
||||
return d.UnmarshalJSON(byts) |
||||
} |
||||
@ -1,70 +0,0 @@
@@ -1,70 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
) |
||||
|
||||
// RTSPRangeType is the type used in the Range header.
|
||||
type RTSPRangeType int |
||||
|
||||
// supported values.
|
||||
const ( |
||||
RTSPRangeTypeUndefined RTSPRangeType = iota |
||||
RTSPRangeTypeClock |
||||
RTSPRangeTypeNPT |
||||
RTSPRangeTypeSMPTE |
||||
) |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d RTSPRangeType) MarshalJSON() ([]byte, error) { |
||||
var out string |
||||
|
||||
switch d { |
||||
case RTSPRangeTypeClock: |
||||
out = "clock" |
||||
|
||||
case RTSPRangeTypeNPT: |
||||
out = "npt" |
||||
|
||||
case RTSPRangeTypeSMPTE: |
||||
out = "smpte" |
||||
|
||||
default: |
||||
out = "" |
||||
} |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *RTSPRangeType) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
switch in { |
||||
case "clock": |
||||
*d = RTSPRangeTypeClock |
||||
|
||||
case "npt": |
||||
*d = RTSPRangeTypeNPT |
||||
|
||||
case "smpte": |
||||
*d = RTSPRangeTypeSMPTE |
||||
|
||||
case "": |
||||
*d = RTSPRangeTypeUndefined |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid rtsp range type: '%s'", in) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *RTSPRangeType) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
@ -1,70 +0,0 @@
@@ -1,70 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4" |
||||
) |
||||
|
||||
// RTSPTransport is the rtspTransport parameter.
|
||||
type RTSPTransport struct { |
||||
*gortsplib.Transport |
||||
} |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d RTSPTransport) MarshalJSON() ([]byte, error) { |
||||
var out string |
||||
|
||||
if d.Transport == nil { |
||||
out = "automatic" |
||||
} else { |
||||
switch *d.Transport { |
||||
case gortsplib.TransportUDP: |
||||
out = "udp" |
||||
|
||||
case gortsplib.TransportUDPMulticast: |
||||
out = "multicast" |
||||
|
||||
default: |
||||
out = "tcp" |
||||
} |
||||
} |
||||
|
||||
return json.Marshal(out) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *RTSPTransport) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
switch in { |
||||
case "udp": |
||||
v := gortsplib.TransportUDP |
||||
d.Transport = &v |
||||
|
||||
case "multicast": |
||||
v := gortsplib.TransportUDPMulticast |
||||
d.Transport = &v |
||||
|
||||
case "tcp": |
||||
v := gortsplib.TransportTCP |
||||
d.Transport = &v |
||||
|
||||
case "automatic": |
||||
d.Transport = nil |
||||
|
||||
default: |
||||
return fmt.Errorf("invalid protocol '%s'", in) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *RTSPTransport) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
@ -1,36 +0,0 @@
@@ -1,36 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"time" |
||||
) |
||||
|
||||
// StringDuration is a duration that is unmarshaled from a string.
|
||||
// Durations are normally unmarshaled from numbers.
|
||||
type StringDuration time.Duration |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (d StringDuration) MarshalJSON() ([]byte, error) { |
||||
return json.Marshal(time.Duration(d).String()) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (d *StringDuration) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
du, err := time.ParseDuration(in) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
*d = StringDuration(du) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (d *StringDuration) UnmarshalEnv(_ string, v string) error { |
||||
return d.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
@ -1,36 +0,0 @@
@@ -1,36 +0,0 @@
|
||||
package conf |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
|
||||
"code.cloudfoundry.org/bytefmt" |
||||
) |
||||
|
||||
// StringSize is a size that is unmarshaled from a string.
|
||||
type StringSize uint64 |
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (s StringSize) MarshalJSON() ([]byte, error) { |
||||
return []byte(`"` + bytefmt.ByteSize(uint64(s)) + `"`), nil |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (s *StringSize) UnmarshalJSON(b []byte) error { |
||||
var in string |
||||
if err := json.Unmarshal(b, &in); err != nil { |
||||
return err |
||||
} |
||||
|
||||
v, err := bytefmt.ToBytes(in) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
*s = StringSize(v) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// UnmarshalEnv implements env.Unmarshaler.
|
||||
func (s *StringSize) UnmarshalEnv(_ string, v string) error { |
||||
return s.UnmarshalJSON([]byte(`"` + v + `"`)) |
||||
} |
||||
@ -1,9 +0,0 @@
@@ -1,9 +0,0 @@
|
||||
package conf |
||||
|
||||
// WebRTCICEServer is a WebRTC ICE Server.
|
||||
type WebRTCICEServer struct { |
||||
URL string `json:"url"` |
||||
Username string `json:"username"` |
||||
Password string `json:"password"` |
||||
ClientOnly bool `json:"clientOnly"` |
||||
} |
||||
@ -1,67 +0,0 @@
@@ -1,67 +0,0 @@
|
||||
// Package yaml contains a yaml loader.
|
||||
package yaml |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
|
||||
"gopkg.in/yaml.v2" |
||||
) |
||||
|
||||
func convertKeys(i interface{}) (interface{}, error) { |
||||
switch x := i.(type) { |
||||
case map[interface{}]interface{}: |
||||
m2 := map[string]interface{}{} |
||||
for k, v := range x { |
||||
ks, ok := k.(string) |
||||
if !ok { |
||||
return nil, fmt.Errorf("integer keys are not supported (%v)", k) |
||||
} |
||||
|
||||
var err error |
||||
m2[ks], err = convertKeys(v) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
return m2, nil |
||||
|
||||
case []interface{}: |
||||
a2 := make([]interface{}, len(x)) |
||||
for i, v := range x { |
||||
var err error |
||||
a2[i], err = convertKeys(v) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
return a2, nil |
||||
} |
||||
|
||||
return i, nil |
||||
} |
||||
|
||||
// Load loads the configuration from Yaml.
|
||||
func Load(buf []byte, dest interface{}) error { |
||||
// load YAML into a generic map
|
||||
var temp interface{} |
||||
err := yaml.Unmarshal(buf, &temp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// convert interface{} keys into string keys to avoid JSON errors
|
||||
temp, err = convertKeys(temp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// convert the generic map into JSON
|
||||
buf, err = json.Marshal(temp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// load JSON into destination
|
||||
return json.Unmarshal(buf, dest) |
||||
} |
||||
@ -1,123 +0,0 @@
@@ -1,123 +0,0 @@
|
||||
// Package confwatcher contains a configuration watcher.
|
||||
package confwatcher |
||||
|
||||
import ( |
||||
"os" |
||||
"path/filepath" |
||||
"time" |
||||
|
||||
"github.com/fsnotify/fsnotify" |
||||
) |
||||
|
||||
const ( |
||||
minInterval = 1 * time.Second |
||||
additionalWait = 10 * time.Millisecond |
||||
) |
||||
|
||||
// ConfWatcher is a configuration file watcher.
|
||||
type ConfWatcher struct { |
||||
inner *fsnotify.Watcher |
||||
watchedPath string |
||||
|
||||
// in
|
||||
terminate chan struct{} |
||||
|
||||
// out
|
||||
signal chan struct{} |
||||
done chan struct{} |
||||
} |
||||
|
||||
// New allocates a ConfWatcher.
|
||||
func New(confPath string) (*ConfWatcher, error) { |
||||
if _, err := os.Stat(confPath); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
inner, err := fsnotify.NewWatcher() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// use absolute paths to support Darwin
|
||||
absolutePath, _ := filepath.Abs(confPath) |
||||
parentPath := filepath.Dir(absolutePath) |
||||
|
||||
err = inner.Add(parentPath) |
||||
if err != nil { |
||||
inner.Close() //nolint:errcheck
|
||||
return nil, err |
||||
} |
||||
|
||||
w := &ConfWatcher{ |
||||
inner: inner, |
||||
watchedPath: absolutePath, |
||||
terminate: make(chan struct{}), |
||||
signal: make(chan struct{}), |
||||
done: make(chan struct{}), |
||||
} |
||||
|
||||
go w.run() |
||||
|
||||
return w, nil |
||||
} |
||||
|
||||
// Close closes a ConfWatcher.
|
||||
func (w *ConfWatcher) Close() { |
||||
close(w.terminate) |
||||
<-w.done |
||||
} |
||||
|
||||
func (w *ConfWatcher) run() { |
||||
defer close(w.done) |
||||
|
||||
var lastCalled time.Time |
||||
previousWatchedPath, _ := filepath.EvalSymlinks(w.watchedPath) |
||||
|
||||
outer: |
||||
for { |
||||
select { |
||||
case event := <-w.inner.Events: |
||||
if time.Since(lastCalled) < minInterval { |
||||
continue |
||||
} |
||||
|
||||
currentWatchedPath, _ := filepath.EvalSymlinks(w.watchedPath) |
||||
eventPath, _ := filepath.Abs(event.Name) |
||||
eventPath, _ = filepath.EvalSymlinks(eventPath) |
||||
|
||||
if currentWatchedPath == "" { |
||||
// watched file was removed; wait for write event to trigger reload
|
||||
previousWatchedPath = "" |
||||
} else if currentWatchedPath != previousWatchedPath || |
||||
(eventPath == currentWatchedPath && |
||||
((event.Op&fsnotify.Write) == fsnotify.Write || |
||||
(event.Op&fsnotify.Create) == fsnotify.Create)) { |
||||
// wait some additional time to allow the writer to complete its job
|
||||
time.Sleep(additionalWait) |
||||
previousWatchedPath = currentWatchedPath |
||||
|
||||
lastCalled = time.Now() |
||||
|
||||
select { |
||||
case w.signal <- struct{}{}: |
||||
case <-w.terminate: |
||||
break outer |
||||
} |
||||
} |
||||
|
||||
case <-w.inner.Errors: |
||||
break outer |
||||
|
||||
case <-w.terminate: |
||||
break outer |
||||
} |
||||
} |
||||
|
||||
close(w.signal) |
||||
w.inner.Close() //nolint:errcheck
|
||||
} |
||||
|
||||
// Watch returns a channel that is called after the configuration file has changed.
|
||||
func (w *ConfWatcher) Watch() chan struct{} { |
||||
return w.signal |
||||
} |
||||
@ -1,141 +0,0 @@
@@ -1,141 +0,0 @@
|
||||
package confwatcher |
||||
|
||||
import ( |
||||
"os" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/test" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestNoFile(t *testing.T) { |
||||
_, err := New("/nonexistent") |
||||
require.Error(t, err) |
||||
} |
||||
|
||||
func TestWrite(t *testing.T) { |
||||
fpath, err := test.CreateTempFile([]byte("{}")) |
||||
require.NoError(t, err) |
||||
|
||||
w, err := New(fpath) |
||||
require.NoError(t, err) |
||||
defer w.Close() |
||||
|
||||
func() { |
||||
f, err := os.Create(fpath) |
||||
require.NoError(t, err) |
||||
defer f.Close() |
||||
|
||||
_, err = f.Write([]byte("{}")) |
||||
require.NoError(t, err) |
||||
}() |
||||
|
||||
select { |
||||
case <-w.Watch(): |
||||
case <-time.After(500 * time.Millisecond): |
||||
t.Errorf("timed out") |
||||
return |
||||
} |
||||
} |
||||
|
||||
func TestWriteMultipleTimes(t *testing.T) { |
||||
fpath, err := test.CreateTempFile([]byte("{}")) |
||||
require.NoError(t, err) |
||||
|
||||
w, err := New(fpath) |
||||
require.NoError(t, err) |
||||
defer w.Close() |
||||
|
||||
func() { |
||||
f, err := os.Create(fpath) |
||||
require.NoError(t, err) |
||||
defer f.Close() |
||||
|
||||
_, err = f.Write([]byte("{}")) |
||||
require.NoError(t, err) |
||||
}() |
||||
|
||||
time.Sleep(10 * time.Millisecond) |
||||
|
||||
func() { |
||||
f, err := os.Create(fpath) |
||||
require.NoError(t, err) |
||||
defer f.Close() |
||||
|
||||
_, err = f.Write([]byte("{}")) |
||||
require.NoError(t, err) |
||||
}() |
||||
|
||||
select { |
||||
case <-w.Watch(): |
||||
case <-time.After(500 * time.Millisecond): |
||||
t.Errorf("timed out") |
||||
return |
||||
} |
||||
|
||||
select { |
||||
case <-time.After(500 * time.Millisecond): |
||||
case <-w.Watch(): |
||||
t.Errorf("should not happen") |
||||
return |
||||
} |
||||
} |
||||
|
||||
func TestDeleteCreate(t *testing.T) { |
||||
fpath, err := test.CreateTempFile([]byte("{}")) |
||||
require.NoError(t, err) |
||||
|
||||
w, err := New(fpath) |
||||
require.NoError(t, err) |
||||
defer w.Close() |
||||
|
||||
os.Remove(fpath) |
||||
time.Sleep(10 * time.Millisecond) |
||||
|
||||
func() { |
||||
f, err := os.Create(fpath) |
||||
require.NoError(t, err) |
||||
defer f.Close() |
||||
|
||||
_, err = f.Write([]byte("{}")) |
||||
require.NoError(t, err) |
||||
}() |
||||
|
||||
select { |
||||
case <-w.Watch(): |
||||
case <-time.After(500 * time.Millisecond): |
||||
t.Errorf("timed out") |
||||
return |
||||
} |
||||
} |
||||
|
||||
func TestSymlinkDeleteCreate(t *testing.T) { |
||||
fpath, err := test.CreateTempFile([]byte("{}")) |
||||
require.NoError(t, err) |
||||
|
||||
err = os.Symlink(fpath, fpath+"-sym") |
||||
require.NoError(t, err) |
||||
|
||||
w, err := New(fpath + "-sym") |
||||
require.NoError(t, err) |
||||
defer w.Close() |
||||
|
||||
os.Remove(fpath) |
||||
|
||||
func() { |
||||
f, err := os.Create(fpath) |
||||
require.NoError(t, err) |
||||
defer f.Close() |
||||
|
||||
_, err = f.Write([]byte("{}")) |
||||
require.NoError(t, err) |
||||
}() |
||||
|
||||
select { |
||||
case <-w.Watch(): |
||||
case <-time.After(500 * time.Millisecond): |
||||
t.Errorf("timed out") |
||||
return |
||||
} |
||||
} |
||||
@ -1,981 +0,0 @@
@@ -1,981 +0,0 @@
|
||||
// Package core contains the main struct of the software.
|
||||
package core |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"os" |
||||
"os/signal" |
||||
"path/filepath" |
||||
"reflect" |
||||
"sort" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/alecthomas/kong" |
||||
"github.com/bluenviron/gortsplib/v4" |
||||
"github.com/gin-gonic/gin" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/api" |
||||
"github.com/bluenviron/mediamtx/internal/auth" |
||||
"github.com/bluenviron/mediamtx/internal/conf" |
||||
"github.com/bluenviron/mediamtx/internal/confwatcher" |
||||
"github.com/bluenviron/mediamtx/internal/externalcmd" |
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
"github.com/bluenviron/mediamtx/internal/metrics" |
||||
"github.com/bluenviron/mediamtx/internal/playback" |
||||
"github.com/bluenviron/mediamtx/internal/pprof" |
||||
"github.com/bluenviron/mediamtx/internal/record" |
||||
"github.com/bluenviron/mediamtx/internal/rlimit" |
||||
"github.com/bluenviron/mediamtx/internal/servers/hls" |
||||
"github.com/bluenviron/mediamtx/internal/servers/rtmp" |
||||
"github.com/bluenviron/mediamtx/internal/servers/rtsp" |
||||
"github.com/bluenviron/mediamtx/internal/servers/srt" |
||||
"github.com/bluenviron/mediamtx/internal/servers/webrtc" |
||||
) |
||||
|
||||
var version = "v0.0.0" |
||||
|
||||
var defaultConfPaths = []string{ |
||||
"rtsp-simple-server.yml", |
||||
"mediamtx.yml", |
||||
"/usr/local/etc/mediamtx.yml", |
||||
"/usr/etc/mediamtx.yml", |
||||
"/etc/mediamtx/mediamtx.yml", |
||||
} |
||||
|
||||
func gatherCleanerEntries(paths map[string]*conf.Path) []record.CleanerEntry { |
||||
out := make(map[record.CleanerEntry]struct{}) |
||||
|
||||
for _, pa := range paths { |
||||
if pa.Record && pa.RecordDeleteAfter != 0 { |
||||
entry := record.CleanerEntry{ |
||||
Path: pa.RecordPath, |
||||
Format: pa.RecordFormat, |
||||
DeleteAfter: time.Duration(pa.RecordDeleteAfter), |
||||
} |
||||
out[entry] = struct{}{} |
||||
} |
||||
} |
||||
|
||||
out2 := make([]record.CleanerEntry, len(out)) |
||||
i := 0 |
||||
|
||||
for v := range out { |
||||
out2[i] = v |
||||
i++ |
||||
} |
||||
|
||||
sort.Slice(out2, func(i, j int) bool { |
||||
if out2[i].Path != out2[j].Path { |
||||
return out2[i].Path < out2[j].Path |
||||
} |
||||
return out2[i].DeleteAfter < out2[j].DeleteAfter |
||||
}) |
||||
|
||||
return out2 |
||||
} |
||||
|
||||
var cli struct { |
||||
Version bool `help:"print version"` |
||||
Confpath string `arg:"" default:""` |
||||
} |
||||
|
||||
// Core is an instance of MediaMTX.
|
||||
type Core struct { |
||||
ctx context.Context |
||||
ctxCancel func() |
||||
confPath string |
||||
conf *conf.Conf |
||||
logger *logger.Logger |
||||
externalCmdPool *externalcmd.Pool |
||||
authManager *auth.Manager |
||||
metrics *metrics.Metrics |
||||
pprof *pprof.PPROF |
||||
recordCleaner *record.Cleaner |
||||
playbackServer *playback.Server |
||||
pathManager *pathManager |
||||
rtspServer *rtsp.Server |
||||
rtspsServer *rtsp.Server |
||||
rtmpServer *rtmp.Server |
||||
rtmpsServer *rtmp.Server |
||||
hlsServer *hls.Server |
||||
webRTCServer *webrtc.Server |
||||
srtServer *srt.Server |
||||
api *api.API |
||||
confWatcher *confwatcher.ConfWatcher |
||||
|
||||
// in
|
||||
chAPIConfigSet chan *conf.Conf |
||||
|
||||
// out
|
||||
done chan struct{} |
||||
} |
||||
|
||||
// New allocates a Core.
|
||||
func New(args []string) (*Core, bool) { |
||||
parser, err := kong.New(&cli, |
||||
kong.Description("MediaMTX "+version), |
||||
kong.UsageOnError(), |
||||
kong.ValueFormatter(func(value *kong.Value) string { |
||||
switch value.Name { |
||||
case "confpath": |
||||
return "path to a config file. The default is mediamtx.yml." |
||||
|
||||
default: |
||||
return kong.DefaultHelpValueFormatter(value) |
||||
} |
||||
})) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
_, err = parser.Parse(args) |
||||
parser.FatalIfErrorf(err) |
||||
|
||||
if cli.Version { |
||||
fmt.Println(version) |
||||
os.Exit(0) |
||||
} |
||||
|
||||
ctx, ctxCancel := context.WithCancel(context.Background()) |
||||
|
||||
p := &Core{ |
||||
ctx: ctx, |
||||
ctxCancel: ctxCancel, |
||||
chAPIConfigSet: make(chan *conf.Conf), |
||||
done: make(chan struct{}), |
||||
} |
||||
|
||||
p.conf, p.confPath, err = conf.Load(cli.Confpath, defaultConfPaths) |
||||
if err != nil { |
||||
fmt.Printf("ERR: %s\n", err) |
||||
return nil, false |
||||
} |
||||
|
||||
err = p.createResources(true) |
||||
if err != nil { |
||||
if p.logger != nil { |
||||
p.Log(logger.Error, "%s", err) |
||||
} else { |
||||
fmt.Printf("ERR: %s\n", err) |
||||
} |
||||
p.closeResources(nil, false) |
||||
return nil, false |
||||
} |
||||
|
||||
go p.run() |
||||
|
||||
return p, true |
||||
} |
||||
|
||||
// Close closes Core and waits for all goroutines to return.
|
||||
func (p *Core) Close() { |
||||
p.ctxCancel() |
||||
<-p.done |
||||
} |
||||
|
||||
// Wait waits for the Core to exit.
|
||||
func (p *Core) Wait() { |
||||
<-p.done |
||||
} |
||||
|
||||
// Log implements logger.Writer.
|
||||
func (p *Core) Log(level logger.Level, format string, args ...interface{}) { |
||||
p.logger.Log(level, format, args...) |
||||
} |
||||
|
||||
func (p *Core) run() { |
||||
defer close(p.done) |
||||
|
||||
confChanged := func() chan struct{} { |
||||
if p.confWatcher != nil { |
||||
return p.confWatcher.Watch() |
||||
} |
||||
return make(chan struct{}) |
||||
}() |
||||
|
||||
interrupt := make(chan os.Signal, 1) |
||||
signal.Notify(interrupt, os.Interrupt) |
||||
|
||||
outer: |
||||
for { |
||||
select { |
||||
case <-confChanged: |
||||
p.Log(logger.Info, "reloading configuration (file changed)") |
||||
|
||||
newConf, _, err := conf.Load(p.confPath, nil) |
||||
if err != nil { |
||||
p.Log(logger.Error, "%s", err) |
||||
break outer |
||||
} |
||||
|
||||
err = p.reloadConf(newConf, false) |
||||
if err != nil { |
||||
p.Log(logger.Error, "%s", err) |
||||
break outer |
||||
} |
||||
|
||||
case newConf := <-p.chAPIConfigSet: |
||||
p.Log(logger.Info, "reloading configuration (API request)") |
||||
|
||||
err := p.reloadConf(newConf, true) |
||||
if err != nil { |
||||
p.Log(logger.Error, "%s", err) |
||||
break outer |
||||
} |
||||
|
||||
case <-interrupt: |
||||
p.Log(logger.Info, "shutting down gracefully") |
||||
break outer |
||||
|
||||
case <-p.ctx.Done(): |
||||
break outer |
||||
} |
||||
} |
||||
|
||||
p.ctxCancel() |
||||
|
||||
p.closeResources(nil, false) |
||||
} |
||||
|
||||
func (p *Core) createResources(initial bool) error { |
||||
var err error |
||||
|
||||
if p.logger == nil { |
||||
p.logger, err = logger.New( |
||||
logger.Level(p.conf.LogLevel), |
||||
p.conf.LogDestinations, |
||||
p.conf.LogFile, |
||||
) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
if initial { |
||||
p.Log(logger.Info, "MediaMTX %s", version) |
||||
|
||||
if p.confPath != "" { |
||||
a, _ := filepath.Abs(p.confPath) |
||||
p.Log(logger.Info, "configuration loaded from %s", a) |
||||
} else { |
||||
list := make([]string, len(defaultConfPaths)) |
||||
for i, pa := range defaultConfPaths { |
||||
a, _ := filepath.Abs(pa) |
||||
list[i] = a |
||||
} |
||||
|
||||
p.Log(logger.Warn, |
||||
"configuration file not found (looked in %s), using an empty configuration", |
||||
strings.Join(list, ", ")) |
||||
} |
||||
|
||||
// on Linux, try to raise the number of file descriptors that can be opened
|
||||
// to allow the maximum possible number of clients.
|
||||
rlimit.Raise() //nolint:errcheck
|
||||
|
||||
gin.SetMode(gin.ReleaseMode) |
||||
|
||||
p.externalCmdPool = externalcmd.NewPool() |
||||
} |
||||
|
||||
if p.authManager == nil { |
||||
p.authManager = &auth.Manager{ |
||||
Method: p.conf.AuthMethod, |
||||
InternalUsers: p.conf.AuthInternalUsers, |
||||
HTTPAddress: p.conf.AuthHTTPAddress, |
||||
HTTPExclude: p.conf.AuthHTTPExclude, |
||||
JWTJWKS: p.conf.AuthJWTJWKS, |
||||
ReadTimeout: time.Duration(p.conf.ReadTimeout), |
||||
RTSPAuthMethods: p.conf.RTSPAuthMethods, |
||||
} |
||||
} |
||||
|
||||
if p.conf.Metrics && |
||||
p.metrics == nil { |
||||
i := &metrics.Metrics{ |
||||
Address: p.conf.MetricsAddress, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
AuthManager: p.authManager, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.metrics = i |
||||
} |
||||
|
||||
if p.conf.PPROF && |
||||
p.pprof == nil { |
||||
i := &pprof.PPROF{ |
||||
Address: p.conf.PPROFAddress, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
AuthManager: p.authManager, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.pprof = i |
||||
} |
||||
|
||||
cleanerEntries := gatherCleanerEntries(p.conf.Paths) |
||||
if len(cleanerEntries) != 0 && |
||||
p.recordCleaner == nil { |
||||
p.recordCleaner = &record.Cleaner{ |
||||
Entries: cleanerEntries, |
||||
Parent: p, |
||||
} |
||||
p.recordCleaner.Initialize() |
||||
} |
||||
|
||||
if p.conf.Playback && |
||||
p.playbackServer == nil { |
||||
i := &playback.Server{ |
||||
Address: p.conf.PlaybackAddress, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
PathConfs: p.conf.Paths, |
||||
AuthManager: p.authManager, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.playbackServer = i |
||||
} |
||||
|
||||
if p.pathManager == nil { |
||||
p.pathManager = &pathManager{ |
||||
logLevel: p.conf.LogLevel, |
||||
authManager: p.authManager, |
||||
rtspAddress: p.conf.RTSPAddress, |
||||
readTimeout: p.conf.ReadTimeout, |
||||
writeTimeout: p.conf.WriteTimeout, |
||||
writeQueueSize: p.conf.WriteQueueSize, |
||||
udpMaxPayloadSize: p.conf.UDPMaxPayloadSize, |
||||
pathConfs: p.conf.Paths, |
||||
externalCmdPool: p.externalCmdPool, |
||||
parent: p, |
||||
} |
||||
p.pathManager.initialize() |
||||
|
||||
if p.metrics != nil { |
||||
p.metrics.SetPathManager(p.pathManager) |
||||
} |
||||
} |
||||
|
||||
if p.conf.RTSP && |
||||
(p.conf.Encryption == conf.EncryptionNo || |
||||
p.conf.Encryption == conf.EncryptionOptional) && |
||||
p.rtspServer == nil { |
||||
_, useUDP := p.conf.Protocols[conf.Protocol(gortsplib.TransportUDP)] |
||||
_, useMulticast := p.conf.Protocols[conf.Protocol(gortsplib.TransportUDPMulticast)] |
||||
|
||||
i := &rtsp.Server{ |
||||
Address: p.conf.RTSPAddress, |
||||
AuthMethods: p.conf.RTSPAuthMethods, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
WriteTimeout: p.conf.WriteTimeout, |
||||
WriteQueueSize: p.conf.WriteQueueSize, |
||||
UseUDP: useUDP, |
||||
UseMulticast: useMulticast, |
||||
RTPAddress: p.conf.RTPAddress, |
||||
RTCPAddress: p.conf.RTCPAddress, |
||||
MulticastIPRange: p.conf.MulticastIPRange, |
||||
MulticastRTPPort: p.conf.MulticastRTPPort, |
||||
MulticastRTCPPort: p.conf.MulticastRTCPPort, |
||||
IsTLS: false, |
||||
ServerCert: "", |
||||
ServerKey: "", |
||||
RTSPAddress: p.conf.RTSPAddress, |
||||
Protocols: p.conf.Protocols, |
||||
RunOnConnect: p.conf.RunOnConnect, |
||||
RunOnConnectRestart: p.conf.RunOnConnectRestart, |
||||
RunOnDisconnect: p.conf.RunOnDisconnect, |
||||
ExternalCmdPool: p.externalCmdPool, |
||||
PathManager: p.pathManager, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.rtspServer = i |
||||
|
||||
if p.metrics != nil { |
||||
p.metrics.SetRTSPServer(p.rtspServer) |
||||
} |
||||
} |
||||
|
||||
if p.conf.RTSP && |
||||
(p.conf.Encryption == conf.EncryptionStrict || |
||||
p.conf.Encryption == conf.EncryptionOptional) && |
||||
p.rtspsServer == nil { |
||||
i := &rtsp.Server{ |
||||
Address: p.conf.RTSPSAddress, |
||||
AuthMethods: p.conf.RTSPAuthMethods, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
WriteTimeout: p.conf.WriteTimeout, |
||||
WriteQueueSize: p.conf.WriteQueueSize, |
||||
UseUDP: false, |
||||
UseMulticast: false, |
||||
RTPAddress: "", |
||||
RTCPAddress: "", |
||||
MulticastIPRange: "", |
||||
MulticastRTPPort: 0, |
||||
MulticastRTCPPort: 0, |
||||
IsTLS: true, |
||||
ServerCert: p.conf.ServerCert, |
||||
ServerKey: p.conf.ServerKey, |
||||
RTSPAddress: p.conf.RTSPAddress, |
||||
Protocols: p.conf.Protocols, |
||||
RunOnConnect: p.conf.RunOnConnect, |
||||
RunOnConnectRestart: p.conf.RunOnConnectRestart, |
||||
RunOnDisconnect: p.conf.RunOnDisconnect, |
||||
ExternalCmdPool: p.externalCmdPool, |
||||
PathManager: p.pathManager, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.rtspsServer = i |
||||
|
||||
if p.metrics != nil { |
||||
p.metrics.SetRTSPSServer(p.rtspsServer) |
||||
} |
||||
} |
||||
|
||||
if p.conf.RTMP && |
||||
(p.conf.RTMPEncryption == conf.EncryptionNo || |
||||
p.conf.RTMPEncryption == conf.EncryptionOptional) && |
||||
p.rtmpServer == nil { |
||||
i := &rtmp.Server{ |
||||
Address: p.conf.RTMPAddress, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
WriteTimeout: p.conf.WriteTimeout, |
||||
WriteQueueSize: p.conf.WriteQueueSize, |
||||
IsTLS: false, |
||||
ServerCert: "", |
||||
ServerKey: "", |
||||
RTSPAddress: p.conf.RTSPAddress, |
||||
RunOnConnect: p.conf.RunOnConnect, |
||||
RunOnConnectRestart: p.conf.RunOnConnectRestart, |
||||
RunOnDisconnect: p.conf.RunOnDisconnect, |
||||
ExternalCmdPool: p.externalCmdPool, |
||||
PathManager: p.pathManager, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.rtmpServer = i |
||||
|
||||
if p.metrics != nil { |
||||
p.metrics.SetRTMPServer(p.rtmpServer) |
||||
} |
||||
} |
||||
|
||||
if p.conf.RTMP && |
||||
(p.conf.RTMPEncryption == conf.EncryptionStrict || |
||||
p.conf.RTMPEncryption == conf.EncryptionOptional) && |
||||
p.rtmpsServer == nil { |
||||
i := &rtmp.Server{ |
||||
Address: p.conf.RTMPSAddress, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
WriteTimeout: p.conf.WriteTimeout, |
||||
WriteQueueSize: p.conf.WriteQueueSize, |
||||
IsTLS: true, |
||||
ServerCert: p.conf.RTMPServerCert, |
||||
ServerKey: p.conf.RTMPServerKey, |
||||
RTSPAddress: p.conf.RTSPAddress, |
||||
RunOnConnect: p.conf.RunOnConnect, |
||||
RunOnConnectRestart: p.conf.RunOnConnectRestart, |
||||
RunOnDisconnect: p.conf.RunOnDisconnect, |
||||
ExternalCmdPool: p.externalCmdPool, |
||||
PathManager: p.pathManager, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.rtmpsServer = i |
||||
|
||||
if p.metrics != nil { |
||||
p.metrics.SetRTMPSServer(p.rtmpsServer) |
||||
} |
||||
} |
||||
|
||||
if p.conf.HLS && |
||||
p.hlsServer == nil { |
||||
i := &hls.Server{ |
||||
Address: p.conf.HLSAddress, |
||||
Encryption: p.conf.HLSEncryption, |
||||
ServerKey: p.conf.HLSServerKey, |
||||
ServerCert: p.conf.HLSServerCert, |
||||
AlwaysRemux: p.conf.HLSAlwaysRemux, |
||||
Variant: p.conf.HLSVariant, |
||||
SegmentCount: p.conf.HLSSegmentCount, |
||||
SegmentDuration: p.conf.HLSSegmentDuration, |
||||
PartDuration: p.conf.HLSPartDuration, |
||||
SegmentMaxSize: p.conf.HLSSegmentMaxSize, |
||||
AllowOrigin: p.conf.HLSAllowOrigin, |
||||
TrustedProxies: p.conf.HLSTrustedProxies, |
||||
Directory: p.conf.HLSDirectory, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
WriteQueueSize: p.conf.WriteQueueSize, |
||||
PathManager: p.pathManager, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.hlsServer = i |
||||
|
||||
p.pathManager.setHLSServer(p.hlsServer) |
||||
|
||||
if p.metrics != nil { |
||||
p.metrics.SetHLSServer(p.hlsServer) |
||||
} |
||||
} |
||||
|
||||
if p.conf.WebRTC && |
||||
p.webRTCServer == nil { |
||||
i := &webrtc.Server{ |
||||
Address: p.conf.WebRTCAddress, |
||||
Encryption: p.conf.WebRTCEncryption, |
||||
ServerKey: p.conf.WebRTCServerKey, |
||||
ServerCert: p.conf.WebRTCServerCert, |
||||
AllowOrigin: p.conf.WebRTCAllowOrigin, |
||||
TrustedProxies: p.conf.WebRTCTrustedProxies, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
WriteQueueSize: p.conf.WriteQueueSize, |
||||
LocalUDPAddress: p.conf.WebRTCLocalUDPAddress, |
||||
LocalTCPAddress: p.conf.WebRTCLocalTCPAddress, |
||||
IPsFromInterfaces: p.conf.WebRTCIPsFromInterfaces, |
||||
IPsFromInterfacesList: p.conf.WebRTCIPsFromInterfacesList, |
||||
AdditionalHosts: p.conf.WebRTCAdditionalHosts, |
||||
ICEServers: p.conf.WebRTCICEServers2, |
||||
ExternalCmdPool: p.externalCmdPool, |
||||
PathManager: p.pathManager, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.webRTCServer = i |
||||
|
||||
if p.metrics != nil { |
||||
p.metrics.SetWebRTCServer(p.webRTCServer) |
||||
} |
||||
} |
||||
|
||||
if p.conf.SRT && |
||||
p.srtServer == nil { |
||||
i := &srt.Server{ |
||||
Address: p.conf.SRTAddress, |
||||
RTSPAddress: p.conf.RTSPAddress, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
WriteTimeout: p.conf.WriteTimeout, |
||||
WriteQueueSize: p.conf.WriteQueueSize, |
||||
UDPMaxPayloadSize: p.conf.UDPMaxPayloadSize, |
||||
RunOnConnect: p.conf.RunOnConnect, |
||||
RunOnConnectRestart: p.conf.RunOnConnectRestart, |
||||
RunOnDisconnect: p.conf.RunOnDisconnect, |
||||
ExternalCmdPool: p.externalCmdPool, |
||||
PathManager: p.pathManager, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.srtServer = i |
||||
|
||||
if p.metrics != nil { |
||||
p.metrics.SetSRTServer(p.srtServer) |
||||
} |
||||
} |
||||
|
||||
if p.conf.API && |
||||
p.api == nil { |
||||
i := &api.API{ |
||||
Address: p.conf.APIAddress, |
||||
ReadTimeout: p.conf.ReadTimeout, |
||||
Conf: p.conf, |
||||
AuthManager: p.authManager, |
||||
PathManager: p.pathManager, |
||||
RTSPServer: p.rtspServer, |
||||
RTSPSServer: p.rtspsServer, |
||||
RTMPServer: p.rtmpServer, |
||||
RTMPSServer: p.rtmpsServer, |
||||
HLSServer: p.hlsServer, |
||||
WebRTCServer: p.webRTCServer, |
||||
SRTServer: p.srtServer, |
||||
Parent: p, |
||||
} |
||||
err := i.Initialize() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
p.api = i |
||||
} |
||||
|
||||
if initial && p.confPath != "" { |
||||
p.confWatcher, err = confwatcher.New(p.confPath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (p *Core) closeResources(newConf *conf.Conf, calledByAPI bool) { |
||||
closeLogger := newConf == nil || |
||||
newConf.LogLevel != p.conf.LogLevel || |
||||
!reflect.DeepEqual(newConf.LogDestinations, p.conf.LogDestinations) || |
||||
newConf.LogFile != p.conf.LogFile |
||||
|
||||
closeAuthManager := newConf == nil || |
||||
newConf.AuthMethod != p.conf.AuthMethod || |
||||
newConf.AuthHTTPAddress != p.conf.AuthHTTPAddress || |
||||
!reflect.DeepEqual(newConf.AuthHTTPExclude, p.conf.AuthHTTPExclude) || |
||||
newConf.AuthJWTJWKS != p.conf.AuthJWTJWKS || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
!reflect.DeepEqual(newConf.RTSPAuthMethods, p.conf.RTSPAuthMethods) |
||||
if !closeAuthManager && !reflect.DeepEqual(newConf.AuthInternalUsers, p.conf.AuthInternalUsers) { |
||||
p.authManager.ReloadInternalUsers(newConf.AuthInternalUsers) |
||||
} |
||||
|
||||
closeMetrics := newConf == nil || |
||||
newConf.Metrics != p.conf.Metrics || |
||||
newConf.MetricsAddress != p.conf.MetricsAddress || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
closeAuthManager || |
||||
closeLogger |
||||
|
||||
closePPROF := newConf == nil || |
||||
newConf.PPROF != p.conf.PPROF || |
||||
newConf.PPROFAddress != p.conf.PPROFAddress || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
closeAuthManager || |
||||
closeLogger |
||||
|
||||
closeRecorderCleaner := newConf == nil || |
||||
!reflect.DeepEqual(gatherCleanerEntries(newConf.Paths), gatherCleanerEntries(p.conf.Paths)) || |
||||
closeLogger |
||||
|
||||
closePlaybackServer := newConf == nil || |
||||
newConf.Playback != p.conf.Playback || |
||||
newConf.PlaybackAddress != p.conf.PlaybackAddress || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
closeAuthManager || |
||||
closeLogger |
||||
if !closePlaybackServer && p.playbackServer != nil && !reflect.DeepEqual(newConf.Paths, p.conf.Paths) { |
||||
p.playbackServer.ReloadPathConfs(newConf.Paths) |
||||
} |
||||
|
||||
closePathManager := newConf == nil || |
||||
newConf.LogLevel != p.conf.LogLevel || |
||||
newConf.RTSPAddress != p.conf.RTSPAddress || |
||||
!reflect.DeepEqual(newConf.RTSPAuthMethods, p.conf.RTSPAuthMethods) || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
newConf.WriteTimeout != p.conf.WriteTimeout || |
||||
newConf.WriteQueueSize != p.conf.WriteQueueSize || |
||||
newConf.UDPMaxPayloadSize != p.conf.UDPMaxPayloadSize || |
||||
closeMetrics || |
||||
closeAuthManager || |
||||
closeLogger |
||||
if !closePathManager && !reflect.DeepEqual(newConf.Paths, p.conf.Paths) { |
||||
p.pathManager.ReloadPathConfs(newConf.Paths) |
||||
} |
||||
|
||||
closeRTSPServer := newConf == nil || |
||||
newConf.RTSP != p.conf.RTSP || |
||||
newConf.Encryption != p.conf.Encryption || |
||||
newConf.RTSPAddress != p.conf.RTSPAddress || |
||||
!reflect.DeepEqual(newConf.RTSPAuthMethods, p.conf.RTSPAuthMethods) || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
newConf.WriteTimeout != p.conf.WriteTimeout || |
||||
newConf.WriteQueueSize != p.conf.WriteQueueSize || |
||||
!reflect.DeepEqual(newConf.Protocols, p.conf.Protocols) || |
||||
newConf.RTPAddress != p.conf.RTPAddress || |
||||
newConf.RTCPAddress != p.conf.RTCPAddress || |
||||
newConf.MulticastIPRange != p.conf.MulticastIPRange || |
||||
newConf.MulticastRTPPort != p.conf.MulticastRTPPort || |
||||
newConf.MulticastRTCPPort != p.conf.MulticastRTCPPort || |
||||
newConf.RTSPAddress != p.conf.RTSPAddress || |
||||
!reflect.DeepEqual(newConf.Protocols, p.conf.Protocols) || |
||||
newConf.RunOnConnect != p.conf.RunOnConnect || |
||||
newConf.RunOnConnectRestart != p.conf.RunOnConnectRestart || |
||||
newConf.RunOnDisconnect != p.conf.RunOnDisconnect || |
||||
closeMetrics || |
||||
closePathManager || |
||||
closeLogger |
||||
|
||||
closeRTSPSServer := newConf == nil || |
||||
newConf.RTSP != p.conf.RTSP || |
||||
newConf.Encryption != p.conf.Encryption || |
||||
newConf.RTSPSAddress != p.conf.RTSPSAddress || |
||||
!reflect.DeepEqual(newConf.RTSPAuthMethods, p.conf.RTSPAuthMethods) || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
newConf.WriteTimeout != p.conf.WriteTimeout || |
||||
newConf.WriteQueueSize != p.conf.WriteQueueSize || |
||||
newConf.ServerCert != p.conf.ServerCert || |
||||
newConf.ServerKey != p.conf.ServerKey || |
||||
newConf.RTSPAddress != p.conf.RTSPAddress || |
||||
!reflect.DeepEqual(newConf.Protocols, p.conf.Protocols) || |
||||
newConf.RunOnConnect != p.conf.RunOnConnect || |
||||
newConf.RunOnConnectRestart != p.conf.RunOnConnectRestart || |
||||
newConf.RunOnDisconnect != p.conf.RunOnDisconnect || |
||||
closeMetrics || |
||||
closePathManager || |
||||
closeLogger |
||||
|
||||
closeRTMPServer := newConf == nil || |
||||
newConf.RTMP != p.conf.RTMP || |
||||
newConf.RTMPEncryption != p.conf.RTMPEncryption || |
||||
newConf.RTMPAddress != p.conf.RTMPAddress || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
newConf.WriteTimeout != p.conf.WriteTimeout || |
||||
newConf.WriteQueueSize != p.conf.WriteQueueSize || |
||||
newConf.RTSPAddress != p.conf.RTSPAddress || |
||||
newConf.RunOnConnect != p.conf.RunOnConnect || |
||||
newConf.RunOnConnectRestart != p.conf.RunOnConnectRestart || |
||||
newConf.RunOnDisconnect != p.conf.RunOnDisconnect || |
||||
closeMetrics || |
||||
closePathManager || |
||||
closeLogger |
||||
|
||||
closeRTMPSServer := newConf == nil || |
||||
newConf.RTMP != p.conf.RTMP || |
||||
newConf.RTMPEncryption != p.conf.RTMPEncryption || |
||||
newConf.RTMPSAddress != p.conf.RTMPSAddress || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
newConf.WriteTimeout != p.conf.WriteTimeout || |
||||
newConf.WriteQueueSize != p.conf.WriteQueueSize || |
||||
newConf.RTMPServerCert != p.conf.RTMPServerCert || |
||||
newConf.RTMPServerKey != p.conf.RTMPServerKey || |
||||
newConf.RTSPAddress != p.conf.RTSPAddress || |
||||
newConf.RunOnConnect != p.conf.RunOnConnect || |
||||
newConf.RunOnConnectRestart != p.conf.RunOnConnectRestart || |
||||
newConf.RunOnDisconnect != p.conf.RunOnDisconnect || |
||||
closeMetrics || |
||||
closePathManager || |
||||
closeLogger |
||||
|
||||
closeHLSServer := newConf == nil || |
||||
newConf.HLS != p.conf.HLS || |
||||
newConf.HLSAddress != p.conf.HLSAddress || |
||||
newConf.HLSEncryption != p.conf.HLSEncryption || |
||||
newConf.HLSServerKey != p.conf.HLSServerKey || |
||||
newConf.HLSServerCert != p.conf.HLSServerCert || |
||||
newConf.HLSAlwaysRemux != p.conf.HLSAlwaysRemux || |
||||
newConf.HLSVariant != p.conf.HLSVariant || |
||||
newConf.HLSSegmentCount != p.conf.HLSSegmentCount || |
||||
newConf.HLSSegmentDuration != p.conf.HLSSegmentDuration || |
||||
newConf.HLSPartDuration != p.conf.HLSPartDuration || |
||||
newConf.HLSSegmentMaxSize != p.conf.HLSSegmentMaxSize || |
||||
newConf.HLSAllowOrigin != p.conf.HLSAllowOrigin || |
||||
!reflect.DeepEqual(newConf.HLSTrustedProxies, p.conf.HLSTrustedProxies) || |
||||
newConf.HLSDirectory != p.conf.HLSDirectory || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
newConf.WriteQueueSize != p.conf.WriteQueueSize || |
||||
closePathManager || |
||||
closeMetrics || |
||||
closeLogger |
||||
|
||||
closeWebRTCServer := newConf == nil || |
||||
newConf.WebRTC != p.conf.WebRTC || |
||||
newConf.WebRTCAddress != p.conf.WebRTCAddress || |
||||
newConf.WebRTCEncryption != p.conf.WebRTCEncryption || |
||||
newConf.WebRTCServerKey != p.conf.WebRTCServerKey || |
||||
newConf.WebRTCServerCert != p.conf.WebRTCServerCert || |
||||
newConf.WebRTCAllowOrigin != p.conf.WebRTCAllowOrigin || |
||||
!reflect.DeepEqual(newConf.WebRTCTrustedProxies, p.conf.WebRTCTrustedProxies) || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
newConf.WriteQueueSize != p.conf.WriteQueueSize || |
||||
newConf.WebRTCLocalUDPAddress != p.conf.WebRTCLocalUDPAddress || |
||||
newConf.WebRTCLocalTCPAddress != p.conf.WebRTCLocalTCPAddress || |
||||
newConf.WebRTCIPsFromInterfaces != p.conf.WebRTCIPsFromInterfaces || |
||||
!reflect.DeepEqual(newConf.WebRTCIPsFromInterfacesList, p.conf.WebRTCIPsFromInterfacesList) || |
||||
!reflect.DeepEqual(newConf.WebRTCAdditionalHosts, p.conf.WebRTCAdditionalHosts) || |
||||
!reflect.DeepEqual(newConf.WebRTCICEServers2, p.conf.WebRTCICEServers2) || |
||||
closeMetrics || |
||||
closePathManager || |
||||
closeLogger |
||||
|
||||
closeSRTServer := newConf == nil || |
||||
newConf.SRT != p.conf.SRT || |
||||
newConf.SRTAddress != p.conf.SRTAddress || |
||||
newConf.RTSPAddress != p.conf.RTSPAddress || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
newConf.WriteTimeout != p.conf.WriteTimeout || |
||||
newConf.WriteQueueSize != p.conf.WriteQueueSize || |
||||
newConf.UDPMaxPayloadSize != p.conf.UDPMaxPayloadSize || |
||||
newConf.RunOnConnect != p.conf.RunOnConnect || |
||||
newConf.RunOnConnectRestart != p.conf.RunOnConnectRestart || |
||||
newConf.RunOnDisconnect != p.conf.RunOnDisconnect || |
||||
closePathManager || |
||||
closeLogger |
||||
|
||||
closeAPI := newConf == nil || |
||||
newConf.API != p.conf.API || |
||||
newConf.APIAddress != p.conf.APIAddress || |
||||
newConf.ReadTimeout != p.conf.ReadTimeout || |
||||
closeAuthManager || |
||||
closePathManager || |
||||
closeRTSPServer || |
||||
closeRTSPSServer || |
||||
closeRTMPServer || |
||||
closeHLSServer || |
||||
closeWebRTCServer || |
||||
closeSRTServer || |
||||
closeLogger |
||||
|
||||
if newConf == nil && p.confWatcher != nil { |
||||
p.confWatcher.Close() |
||||
p.confWatcher = nil |
||||
} |
||||
|
||||
if p.api != nil { |
||||
if closeAPI { |
||||
p.api.Close() |
||||
p.api = nil |
||||
} else if !calledByAPI { // avoid a loop
|
||||
p.api.ReloadConf(newConf) |
||||
} |
||||
} |
||||
|
||||
if closeSRTServer && p.srtServer != nil { |
||||
if p.metrics != nil { |
||||
p.metrics.SetSRTServer(nil) |
||||
} |
||||
|
||||
p.srtServer.Close() |
||||
p.srtServer = nil |
||||
} |
||||
|
||||
if closeWebRTCServer && p.webRTCServer != nil { |
||||
if p.metrics != nil { |
||||
p.metrics.SetWebRTCServer(nil) |
||||
} |
||||
|
||||
p.webRTCServer.Close() |
||||
p.webRTCServer = nil |
||||
} |
||||
|
||||
if closeHLSServer && p.hlsServer != nil { |
||||
if p.metrics != nil { |
||||
p.metrics.SetHLSServer(nil) |
||||
} |
||||
|
||||
p.pathManager.setHLSServer(nil) |
||||
|
||||
p.hlsServer.Close() |
||||
p.hlsServer = nil |
||||
} |
||||
|
||||
if closeRTMPSServer && p.rtmpsServer != nil { |
||||
if p.metrics != nil { |
||||
p.metrics.SetRTMPSServer(nil) |
||||
} |
||||
|
||||
p.rtmpsServer.Close() |
||||
p.rtmpsServer = nil |
||||
} |
||||
|
||||
if closeRTMPServer && p.rtmpServer != nil { |
||||
if p.metrics != nil { |
||||
p.metrics.SetRTMPServer(nil) |
||||
} |
||||
|
||||
p.rtmpServer.Close() |
||||
p.rtmpServer = nil |
||||
} |
||||
|
||||
if closeRTSPSServer && p.rtspsServer != nil { |
||||
if p.metrics != nil { |
||||
p.metrics.SetRTSPSServer(nil) |
||||
} |
||||
|
||||
p.rtspsServer.Close() |
||||
p.rtspsServer = nil |
||||
} |
||||
|
||||
if closeRTSPServer && p.rtspServer != nil { |
||||
if p.metrics != nil { |
||||
p.metrics.SetRTSPServer(nil) |
||||
} |
||||
|
||||
p.rtspServer.Close() |
||||
p.rtspServer = nil |
||||
} |
||||
|
||||
if closePathManager && p.pathManager != nil { |
||||
if p.metrics != nil { |
||||
p.metrics.SetPathManager(nil) |
||||
} |
||||
|
||||
p.pathManager.close() |
||||
p.pathManager = nil |
||||
} |
||||
|
||||
if closePlaybackServer && p.playbackServer != nil { |
||||
p.playbackServer.Close() |
||||
p.playbackServer = nil |
||||
} |
||||
|
||||
if closeRecorderCleaner && p.recordCleaner != nil { |
||||
p.recordCleaner.Close() |
||||
p.recordCleaner = nil |
||||
} |
||||
|
||||
if closePPROF && p.pprof != nil { |
||||
p.pprof.Close() |
||||
p.pprof = nil |
||||
} |
||||
|
||||
if closeMetrics && p.metrics != nil { |
||||
p.metrics.Close() |
||||
p.metrics = nil |
||||
} |
||||
|
||||
if closeAuthManager && p.authManager != nil { |
||||
p.authManager = nil |
||||
} |
||||
|
||||
if newConf == nil && p.externalCmdPool != nil { |
||||
p.Log(logger.Info, "waiting for running hooks") |
||||
p.externalCmdPool.Close() |
||||
} |
||||
|
||||
if closeLogger && p.logger != nil { |
||||
p.logger.Close() |
||||
p.logger = nil |
||||
} |
||||
} |
||||
|
||||
func (p *Core) reloadConf(newConf *conf.Conf, calledByAPI bool) error { |
||||
p.closeResources(newConf, calledByAPI) |
||||
p.conf = newConf |
||||
return p.createResources(false) |
||||
} |
||||
|
||||
// APIConfigSet is called by api.
|
||||
func (p *Core) APIConfigSet(conf *conf.Conf) { |
||||
select { |
||||
case p.chAPIConfigSet <- conf: |
||||
case <-p.ctx.Done(): |
||||
} |
||||
} |
||||
@ -1,133 +0,0 @@
@@ -1,133 +0,0 @@
|
||||
package core |
||||
|
||||
import ( |
||||
"os" |
||||
"path/filepath" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/description" |
||||
"github.com/bluenviron/mediamtx/internal/test" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func newInstance(conf string) (*Core, bool) { |
||||
if conf == "" { |
||||
return New([]string{}) |
||||
} |
||||
|
||||
tmpf, err := test.CreateTempFile([]byte(conf)) |
||||
if err != nil { |
||||
return nil, false |
||||
} |
||||
defer os.Remove(tmpf) |
||||
|
||||
return New([]string{tmpf}) |
||||
} |
||||
|
||||
func TestCoreErrors(t *testing.T) { |
||||
for _, ca := range []struct { |
||||
name string |
||||
conf string |
||||
}{ |
||||
{ |
||||
"logger", |
||||
"logDestinations: [file]\n" + |
||||
"logFile: /nonexisting/nonexist\n", |
||||
}, |
||||
{ |
||||
"metrics", |
||||
"metrics: yes\n" + |
||||
"metricsAddress: invalid\n", |
||||
}, |
||||
{ |
||||
"pprof", |
||||
"pprof: yes\n" + |
||||
"pprofAddress: invalid\n", |
||||
}, |
||||
{ |
||||
"playback", |
||||
"playback: yes\n" + |
||||
"playbackAddress: invalid\n", |
||||
}, |
||||
{ |
||||
"rtsp", |
||||
"rtspAddress: invalid\n", |
||||
}, |
||||
{ |
||||
"rtsps", |
||||
"encryption: strict\n" + |
||||
"rtspAddress: invalid\n", |
||||
}, |
||||
{ |
||||
"rtmp", |
||||
"rtmpAddress: invalid\n", |
||||
}, |
||||
{ |
||||
"rtmps", |
||||
"rtmpEncryption: strict\n" + |
||||
"rtmpAddress: invalid\n", |
||||
}, |
||||
{ |
||||
"hls", |
||||
"hlsAddress: invalid\n", |
||||
}, |
||||
{ |
||||
"webrtc", |
||||
"webrtcAddress: invalid\n", |
||||
}, |
||||
{ |
||||
"srt", |
||||
"srtAddress: invalid\n", |
||||
}, |
||||
{ |
||||
"api", |
||||
"api: yes\n" + |
||||
"apiAddress: invalid\n", |
||||
}, |
||||
} { |
||||
t.Run(ca.name, func(t *testing.T) { |
||||
_, ok := newInstance(ca.conf) |
||||
require.Equal(t, false, ok) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestCoreHotReloading(t *testing.T) { |
||||
confPath := filepath.Join(os.TempDir(), "rtsp-conf") |
||||
|
||||
err := os.WriteFile(confPath, []byte("paths:\n"+ |
||||
" test1:\n"+ |
||||
" publishUser: myuser\n"+ |
||||
" publishPass: mypass\n"), |
||||
0o644) |
||||
require.NoError(t, err) |
||||
defer os.Remove(confPath) |
||||
|
||||
p, ok := New([]string{confPath}) |
||||
require.Equal(t, true, ok) |
||||
defer p.Close() |
||||
|
||||
func() { |
||||
c := gortsplib.Client{} |
||||
err = c.StartRecording("rtsp://localhost:8554/test1", |
||||
&description.Session{Medias: []*description.Media{test.UniqueMediaH264()}}) |
||||
require.EqualError(t, err, "bad status code: 401 (Unauthorized)") |
||||
}() |
||||
|
||||
err = os.WriteFile(confPath, []byte("paths:\n"+ |
||||
" test1:\n"), |
||||
0o644) |
||||
require.NoError(t, err) |
||||
|
||||
time.Sleep(1 * time.Second) |
||||
|
||||
func() { |
||||
conn := gortsplib.Client{} |
||||
err = conn.StartRecording("rtsp://localhost:8554/test1", |
||||
&description.Session{Medias: []*description.Media{test.UniqueMediaH264()}}) |
||||
require.NoError(t, err) |
||||
defer conn.Close() |
||||
}() |
||||
} |
||||
@ -1,363 +0,0 @@
@@ -1,363 +0,0 @@
|
||||
package core |
||||
|
||||
import ( |
||||
"bufio" |
||||
"context" |
||||
"crypto/tls" |
||||
"io" |
||||
"net" |
||||
"net/http" |
||||
"net/url" |
||||
"os" |
||||
"sync" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/description" |
||||
"github.com/bluenviron/mediacommon/pkg/formats/mpegts" |
||||
srt "github.com/datarhei/gosrt" |
||||
"github.com/pion/rtp" |
||||
"github.com/stretchr/testify/require" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/protocols/rtmp" |
||||
"github.com/bluenviron/mediamtx/internal/protocols/webrtc" |
||||
"github.com/bluenviron/mediamtx/internal/test" |
||||
) |
||||
|
||||
func httpPullFile(t *testing.T, hc *http.Client, u string) []byte { |
||||
res, err := hc.Get(u) |
||||
require.NoError(t, err) |
||||
defer res.Body.Close() |
||||
|
||||
if res.StatusCode != http.StatusOK { |
||||
t.Errorf("bad status code: %v", res.StatusCode) |
||||
} |
||||
|
||||
byts, err := io.ReadAll(res.Body) |
||||
require.NoError(t, err) |
||||
|
||||
return byts |
||||
} |
||||
|
||||
func TestMetrics(t *testing.T) { |
||||
serverCertFpath, err := test.CreateTempFile(test.TLSCertPub) |
||||
require.NoError(t, err) |
||||
defer os.Remove(serverCertFpath) |
||||
|
||||
serverKeyFpath, err := test.CreateTempFile(test.TLSCertKey) |
||||
require.NoError(t, err) |
||||
defer os.Remove(serverKeyFpath) |
||||
|
||||
p, ok := newInstance("api: yes\n" + |
||||
"hlsAlwaysRemux: yes\n" + |
||||
"metrics: yes\n" + |
||||
"webrtcServerCert: " + serverCertFpath + "\n" + |
||||
"webrtcServerKey: " + serverKeyFpath + "\n" + |
||||
"encryption: optional\n" + |
||||
"serverCert: " + serverCertFpath + "\n" + |
||||
"serverKey: " + serverKeyFpath + "\n" + |
||||
"rtmpEncryption: optional\n" + |
||||
"rtmpServerCert: " + serverCertFpath + "\n" + |
||||
"rtmpServerKey: " + serverKeyFpath + "\n" + |
||||
"paths:\n" + |
||||
" all_others:\n") |
||||
require.Equal(t, true, ok) |
||||
defer p.Close() |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
t.Run("initial", func(t *testing.T) { |
||||
bo := httpPullFile(t, hc, "http://localhost:9998/metrics") |
||||
|
||||
require.Equal(t, `paths 0 |
||||
hls_muxers 0 |
||||
hls_muxers_bytes_sent 0 |
||||
rtsp_conns 0 |
||||
rtsp_conns_bytes_received 0 |
||||
rtsp_conns_bytes_sent 0 |
||||
rtsp_sessions 0 |
||||
rtsp_sessions_bytes_received 0 |
||||
rtsp_sessions_bytes_sent 0 |
||||
rtsps_conns 0 |
||||
rtsps_conns_bytes_received 0 |
||||
rtsps_conns_bytes_sent 0 |
||||
rtsps_sessions 0 |
||||
rtsps_sessions_bytes_received 0 |
||||
rtsps_sessions_bytes_sent 0 |
||||
rtmp_conns 0 |
||||
rtmp_conns_bytes_received 0 |
||||
rtmp_conns_bytes_sent 0 |
||||
rtmps_conns 0 |
||||
rtmps_conns_bytes_received 0 |
||||
rtmps_conns_bytes_sent 0 |
||||
srt_conns 0 |
||||
srt_conns_bytes_received 0 |
||||
srt_conns_bytes_sent 0 |
||||
webrtc_sessions 0 |
||||
webrtc_sessions_bytes_received 0 |
||||
webrtc_sessions_bytes_sent 0 |
||||
`, string(bo)) |
||||
}) |
||||
|
||||
t.Run("with data", func(t *testing.T) { |
||||
terminate := make(chan struct{}) |
||||
var wg sync.WaitGroup |
||||
wg.Add(6) |
||||
|
||||
go func() { |
||||
defer wg.Done() |
||||
source := gortsplib.Client{} |
||||
err := source.StartRecording("rtsp://localhost:8554/rtsp_path", |
||||
&description.Session{Medias: []*description.Media{test.UniqueMediaH264()}}) |
||||
require.NoError(t, err) |
||||
defer source.Close() |
||||
<-terminate |
||||
}() |
||||
|
||||
go func() { |
||||
defer wg.Done() |
||||
source2 := gortsplib.Client{TLSConfig: &tls.Config{InsecureSkipVerify: true}} |
||||
err := source2.StartRecording("rtsps://localhost:8322/rtsps_path", |
||||
&description.Session{Medias: []*description.Media{test.UniqueMediaH264()}}) |
||||
require.NoError(t, err) |
||||
defer source2.Close() |
||||
<-terminate |
||||
}() |
||||
|
||||
go func() { |
||||
defer wg.Done() |
||||
u, err := url.Parse("rtmp://localhost:1935/rtmp_path") |
||||
require.NoError(t, err) |
||||
|
||||
nconn, err := net.Dial("tcp", u.Host) |
||||
require.NoError(t, err) |
||||
defer nconn.Close() |
||||
|
||||
conn, err := rtmp.NewClientConn(nconn, u, true) |
||||
require.NoError(t, err) |
||||
|
||||
_, err = rtmp.NewWriter(conn, test.FormatH264, nil) |
||||
require.NoError(t, err) |
||||
<-terminate |
||||
}() |
||||
|
||||
go func() { |
||||
defer wg.Done() |
||||
u, err := url.Parse("rtmp://localhost:1936/rtmps_path") |
||||
require.NoError(t, err) |
||||
|
||||
nconn, err := tls.Dial("tcp", u.Host, &tls.Config{InsecureSkipVerify: true}) |
||||
require.NoError(t, err) |
||||
defer nconn.Close() //nolint:errcheck
|
||||
|
||||
conn, err := rtmp.NewClientConn(nconn, u, true) |
||||
require.NoError(t, err) |
||||
|
||||
_, err = rtmp.NewWriter(conn, test.FormatH264, nil) |
||||
require.NoError(t, err) |
||||
<-terminate |
||||
}() |
||||
|
||||
go func() { |
||||
defer wg.Done() |
||||
|
||||
su, err := url.Parse("http://localhost:8889/webrtc_path/whip") |
||||
require.NoError(t, err) |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
s := &webrtc.WHIPClient{ |
||||
HTTPClient: hc, |
||||
URL: su, |
||||
Log: test.NilLogger{}, |
||||
} |
||||
|
||||
tracks, err := s.Publish(context.Background(), test.MediaH264.Formats[0], nil) |
||||
require.NoError(t, err) |
||||
defer checkClose(t, s.Close) |
||||
|
||||
err = tracks[0].WriteRTP(&rtp.Packet{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 123, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: []byte{1}, |
||||
}) |
||||
require.NoError(t, err) |
||||
<-terminate |
||||
}() |
||||
|
||||
go func() { |
||||
defer wg.Done() |
||||
|
||||
srtConf := srt.DefaultConfig() |
||||
address, err := srtConf.UnmarshalURL("srt://localhost:8890?streamid=publish:srt_path") |
||||
require.NoError(t, err) |
||||
|
||||
err = srtConf.Validate() |
||||
require.NoError(t, err) |
||||
|
||||
publisher, err := srt.Dial("srt", address, srtConf) |
||||
require.NoError(t, err) |
||||
defer publisher.Close() |
||||
|
||||
track := &mpegts.Track{ |
||||
Codec: &mpegts.CodecH264{}, |
||||
} |
||||
|
||||
bw := bufio.NewWriter(publisher) |
||||
w := mpegts.NewWriter(bw, []*mpegts.Track{track}) |
||||
require.NoError(t, err) |
||||
|
||||
err = w.WriteH26x(track, 0, 0, true, [][]byte{ |
||||
test.FormatH264.SPS, |
||||
test.FormatH264.PPS, |
||||
{0x05, 1}, // IDR
|
||||
}) |
||||
require.NoError(t, err) |
||||
|
||||
err = bw.Flush() |
||||
require.NoError(t, err) |
||||
<-terminate |
||||
}() |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
|
||||
bo := httpPullFile(t, hc, "http://localhost:9998/metrics") |
||||
|
||||
require.Regexp(t, |
||||
`^paths\{name=".*?",state="ready"\} 1`+"\n"+ |
||||
`paths_bytes_received\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths_bytes_sent\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths\{name=".*?",state="ready"\} 1`+"\n"+ |
||||
`paths_bytes_received\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths_bytes_sent\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths\{name=".*?",state="ready"\} 1`+"\n"+ |
||||
`paths_bytes_received\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths_bytes_sent\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths\{name=".*?",state="ready"\} 1`+"\n"+ |
||||
`paths_bytes_received\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths_bytes_sent\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths\{name=".*?",state="ready"\} 1`+"\n"+ |
||||
`paths_bytes_received\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths_bytes_sent\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths\{name=".*?",state="ready"\} 1`+"\n"+ |
||||
`paths_bytes_received\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`paths_bytes_sent\{name=".*?",state="ready"\} [0-9]+`+"\n"+ |
||||
`hls_muxers\{name=".*?"\} 1`+"\n"+ |
||||
`hls_muxers_bytes_sent\{name=".*?"\} 0`+"\n"+ |
||||
`hls_muxers\{name=".*?"\} 1`+"\n"+ |
||||
`hls_muxers_bytes_sent\{name=".*?"\} 0`+"\n"+ |
||||
`hls_muxers\{name=".*?"\} 1`+"\n"+ |
||||
`hls_muxers_bytes_sent\{name=".*?"\} 0`+"\n"+ |
||||
`hls_muxers\{name=".*?"\} 1`+"\n"+ |
||||
`hls_muxers_bytes_sent\{name=".*?"\} 0`+"\n"+ |
||||
`hls_muxers\{name=".*?"\} 1`+"\n"+ |
||||
`hls_muxers_bytes_sent\{name=".*?"\} 0`+"\n"+ |
||||
`hls_muxers\{name=".*?"\} 1`+"\n"+ |
||||
`hls_muxers_bytes_sent\{name=".*?"\} 0`+"\n"+ |
||||
`rtsp_conns\{id=".*?"\} 1`+"\n"+ |
||||
`rtsp_conns_bytes_received\{id=".*?"\} [0-9]+`+"\n"+ |
||||
`rtsp_conns_bytes_sent\{id=".*?"\} [0-9]+`+"\n"+ |
||||
`rtsp_sessions\{id=".*?",state="publish"\} 1`+"\n"+ |
||||
`rtsp_sessions_bytes_received\{id=".*?",state="publish"\} 0`+"\n"+ |
||||
`rtsp_sessions_bytes_sent\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`rtsps_conns\{id=".*?"\} 1`+"\n"+ |
||||
`rtsps_conns_bytes_received\{id=".*?"\} [0-9]+`+"\n"+ |
||||
`rtsps_conns_bytes_sent\{id=".*?"\} [0-9]+`+"\n"+ |
||||
`rtsps_sessions\{id=".*?",state="publish"\} 1`+"\n"+ |
||||
`rtsps_sessions_bytes_received\{id=".*?",state="publish"\} 0`+"\n"+ |
||||
`rtsps_sessions_bytes_sent\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`rtmp_conns\{id=".*?",state="publish"\} 1`+"\n"+ |
||||
`rtmp_conns_bytes_received\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`rtmp_conns_bytes_sent\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`rtmps_conns\{id=".*?",state="publish"\} 1`+"\n"+ |
||||
`rtmps_conns_bytes_received\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`rtmps_conns_bytes_sent\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns\{id=".*?",state="publish"\} 1`+"\n"+ |
||||
`srt_conns_packets_sent\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_sent_unique\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received_unique\{id=".*?",state="publish"\} 1`+"\n"+ |
||||
`srt_conns_packets_send_loss\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received_loss\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_retrans\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received_retrans\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_sent_ack\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received_ack\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_sent_nak\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received_nak\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_sent_km\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received_km\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_us_snd_duration\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_send_drop\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received_drop\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received_undecrypt\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_sent\{id=".*?",state="publish"\} 0`+"\n"+ |
||||
`srt_conns_bytes_received\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_sent_unique\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_received_unique\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_received_loss\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_retrans\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_received_retrans\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_send_drop\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_received_drop\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_received_undecrypt\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_us_packets_send_period\{id=".*?",state="publish"\} \d+\.\d+`+"\n"+ |
||||
`srt_conns_packets_flow_window\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_flight_size\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_ms_rtt\{id=".*?",state="publish"\} \d+\.\d+`+"\n"+ |
||||
`srt_conns_mbps_send_rate\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_mbps_receive_rate\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_mbps_link_capacity\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_avail_send_buf\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_avail_receive_buf\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_mbps_max_bw\{id=".*?",state="publish"\} -1`+"\n"+ |
||||
`srt_conns_bytes_mss\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_send_buf\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_send_buf\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_ms_send_buf\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_ms_send_tsb_pd_delay\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_receive_buf\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_bytes_receive_buf\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_ms_receive_buf\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_ms_receive_tsb_pd_delay\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_reorder_tolerance\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received_avg_belated_time\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_send_loss_rate\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`srt_conns_packets_received_loss_rate\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`webrtc_sessions\{id=".*?",state="publish"\} 1`+"\n"+ |
||||
`webrtc_sessions_bytes_received\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
`webrtc_sessions_bytes_sent\{id=".*?",state="publish"\} [0-9]+`+"\n"+ |
||||
"$", |
||||
string(bo)) |
||||
|
||||
close(terminate) |
||||
wg.Wait() |
||||
}) |
||||
|
||||
t.Run("servers deleted", func(t *testing.T) { |
||||
httpRequest(t, hc, http.MethodPatch, "http://localhost:9997/v3/config/global/patch", map[string]interface{}{ |
||||
"rtsp": false, |
||||
"rtmp": false, |
||||
"srt": false, |
||||
"hls": false, |
||||
"webrtc": false, |
||||
}, nil) |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
|
||||
bo := httpPullFile(t, hc, "http://localhost:9998/metrics") |
||||
|
||||
require.Equal(t, "paths 0\n", string(bo)) |
||||
}) |
||||
} |
||||
@ -1,540 +0,0 @@
@@ -1,540 +0,0 @@
|
||||
package core |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"sort" |
||||
"sync" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/auth" |
||||
"github.com/bluenviron/mediamtx/internal/conf" |
||||
"github.com/bluenviron/mediamtx/internal/defs" |
||||
"github.com/bluenviron/mediamtx/internal/externalcmd" |
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
"github.com/bluenviron/mediamtx/internal/stream" |
||||
) |
||||
|
||||
func pathConfCanBeUpdated(oldPathConf *conf.Path, newPathConf *conf.Path) bool { |
||||
clone := oldPathConf.Clone() |
||||
|
||||
clone.Record = newPathConf.Record |
||||
|
||||
clone.RPICameraBrightness = newPathConf.RPICameraBrightness |
||||
clone.RPICameraContrast = newPathConf.RPICameraContrast |
||||
clone.RPICameraSaturation = newPathConf.RPICameraSaturation |
||||
clone.RPICameraSharpness = newPathConf.RPICameraSharpness |
||||
clone.RPICameraExposure = newPathConf.RPICameraExposure |
||||
clone.RPICameraAWB = newPathConf.RPICameraAWB |
||||
clone.RPICameraAWBGains = newPathConf.RPICameraAWBGains |
||||
clone.RPICameraDenoise = newPathConf.RPICameraDenoise |
||||
clone.RPICameraShutter = newPathConf.RPICameraShutter |
||||
clone.RPICameraMetering = newPathConf.RPICameraMetering |
||||
clone.RPICameraGain = newPathConf.RPICameraGain |
||||
clone.RPICameraEV = newPathConf.RPICameraEV |
||||
clone.RPICameraFPS = newPathConf.RPICameraFPS |
||||
clone.RPICameraIDRPeriod = newPathConf.RPICameraIDRPeriod |
||||
clone.RPICameraBitrate = newPathConf.RPICameraBitrate |
||||
|
||||
return newPathConf.Equal(clone) |
||||
} |
||||
|
||||
type pathManagerHLSServer interface { |
||||
PathReady(defs.Path) |
||||
PathNotReady(defs.Path) |
||||
} |
||||
|
||||
type pathManagerParent interface { |
||||
logger.Writer |
||||
} |
||||
|
||||
type pathManager struct { |
||||
logLevel conf.LogLevel |
||||
authManager *auth.Manager |
||||
rtspAddress string |
||||
readTimeout conf.StringDuration |
||||
writeTimeout conf.StringDuration |
||||
writeQueueSize int |
||||
udpMaxPayloadSize int |
||||
pathConfs map[string]*conf.Path |
||||
externalCmdPool *externalcmd.Pool |
||||
parent pathManagerParent |
||||
|
||||
ctx context.Context |
||||
ctxCancel func() |
||||
wg sync.WaitGroup |
||||
hlsManager pathManagerHLSServer |
||||
paths map[string]*path |
||||
pathsByConf map[string]map[*path]struct{} |
||||
|
||||
// in
|
||||
chReloadConf chan map[string]*conf.Path |
||||
chSetHLSServer chan pathManagerHLSServer |
||||
chClosePath chan *path |
||||
chPathReady chan *path |
||||
chPathNotReady chan *path |
||||
chFindPathConf chan defs.PathFindPathConfReq |
||||
chDescribe chan defs.PathDescribeReq |
||||
chAddReader chan defs.PathAddReaderReq |
||||
chAddPublisher chan defs.PathAddPublisherReq |
||||
chAPIPathsList chan pathAPIPathsListReq |
||||
chAPIPathsGet chan pathAPIPathsGetReq |
||||
} |
||||
|
||||
func (pm *pathManager) initialize() { |
||||
ctx, ctxCancel := context.WithCancel(context.Background()) |
||||
|
||||
pm.ctx = ctx |
||||
pm.ctxCancel = ctxCancel |
||||
pm.paths = make(map[string]*path) |
||||
pm.pathsByConf = make(map[string]map[*path]struct{}) |
||||
pm.chReloadConf = make(chan map[string]*conf.Path) |
||||
pm.chSetHLSServer = make(chan pathManagerHLSServer) |
||||
pm.chClosePath = make(chan *path) |
||||
pm.chPathReady = make(chan *path) |
||||
pm.chPathNotReady = make(chan *path) |
||||
pm.chFindPathConf = make(chan defs.PathFindPathConfReq) |
||||
pm.chDescribe = make(chan defs.PathDescribeReq) |
||||
pm.chAddReader = make(chan defs.PathAddReaderReq) |
||||
pm.chAddPublisher = make(chan defs.PathAddPublisherReq) |
||||
pm.chAPIPathsList = make(chan pathAPIPathsListReq) |
||||
pm.chAPIPathsGet = make(chan pathAPIPathsGetReq) |
||||
|
||||
for pathConfName, pathConf := range pm.pathConfs { |
||||
if pathConf.Regexp == nil { |
||||
pm.createPath(pathConfName, pathConf, pathConfName, nil) |
||||
} |
||||
} |
||||
|
||||
pm.Log(logger.Debug, "path manager created") |
||||
|
||||
pm.wg.Add(1) |
||||
go pm.run() |
||||
} |
||||
|
||||
func (pm *pathManager) close() { |
||||
pm.Log(logger.Debug, "path manager is shutting down") |
||||
pm.ctxCancel() |
||||
pm.wg.Wait() |
||||
} |
||||
|
||||
// Log implements logger.Writer.
|
||||
func (pm *pathManager) Log(level logger.Level, format string, args ...interface{}) { |
||||
pm.parent.Log(level, format, args...) |
||||
} |
||||
|
||||
func (pm *pathManager) run() { |
||||
defer pm.wg.Done() |
||||
|
||||
outer: |
||||
for { |
||||
select { |
||||
case newPaths := <-pm.chReloadConf: |
||||
pm.doReloadConf(newPaths) |
||||
|
||||
case m := <-pm.chSetHLSServer: |
||||
pm.doSetHLSServer(m) |
||||
|
||||
case pa := <-pm.chClosePath: |
||||
pm.doClosePath(pa) |
||||
|
||||
case pa := <-pm.chPathReady: |
||||
pm.doPathReady(pa) |
||||
|
||||
case pa := <-pm.chPathNotReady: |
||||
pm.doPathNotReady(pa) |
||||
|
||||
case req := <-pm.chFindPathConf: |
||||
pm.doFindPathConf(req) |
||||
|
||||
case req := <-pm.chDescribe: |
||||
pm.doDescribe(req) |
||||
|
||||
case req := <-pm.chAddReader: |
||||
pm.doAddReader(req) |
||||
|
||||
case req := <-pm.chAddPublisher: |
||||
pm.doAddPublisher(req) |
||||
|
||||
case req := <-pm.chAPIPathsList: |
||||
pm.doAPIPathsList(req) |
||||
|
||||
case req := <-pm.chAPIPathsGet: |
||||
pm.doAPIPathsGet(req) |
||||
|
||||
case <-pm.ctx.Done(): |
||||
break outer |
||||
} |
||||
} |
||||
|
||||
pm.ctxCancel() |
||||
} |
||||
|
||||
func (pm *pathManager) doReloadConf(newPaths map[string]*conf.Path) { |
||||
for confName, pathConf := range pm.pathConfs { |
||||
if newPath, ok := newPaths[confName]; ok { |
||||
// configuration has changed
|
||||
if !newPath.Equal(pathConf) { |
||||
if pathConfCanBeUpdated(pathConf, newPath) { // paths associated with the configuration can be updated
|
||||
for pa := range pm.pathsByConf[confName] { |
||||
go pa.reloadConf(newPath) |
||||
} |
||||
} else { // paths associated with the configuration must be recreated
|
||||
for pa := range pm.pathsByConf[confName] { |
||||
pm.removePath(pa) |
||||
pa.close() |
||||
pa.wait() // avoid conflicts between sources
|
||||
} |
||||
} |
||||
} |
||||
} else { |
||||
// configuration has been deleted, remove associated paths
|
||||
for pa := range pm.pathsByConf[confName] { |
||||
pm.removePath(pa) |
||||
pa.close() |
||||
pa.wait() // avoid conflicts between sources
|
||||
} |
||||
} |
||||
} |
||||
|
||||
pm.pathConfs = newPaths |
||||
|
||||
// add new paths
|
||||
for pathConfName, pathConf := range pm.pathConfs { |
||||
if _, ok := pm.paths[pathConfName]; !ok && pathConf.Regexp == nil { |
||||
pm.createPath(pathConfName, pathConf, pathConfName, nil) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (pm *pathManager) doSetHLSServer(m pathManagerHLSServer) { |
||||
pm.hlsManager = m |
||||
} |
||||
|
||||
func (pm *pathManager) doClosePath(pa *path) { |
||||
if pmpa, ok := pm.paths[pa.name]; !ok || pmpa != pa { |
||||
return |
||||
} |
||||
pm.removePath(pa) |
||||
} |
||||
|
||||
func (pm *pathManager) doPathReady(pa *path) { |
||||
if pm.hlsManager != nil { |
||||
pm.hlsManager.PathReady(pa) |
||||
} |
||||
} |
||||
|
||||
func (pm *pathManager) doPathNotReady(pa *path) { |
||||
if pm.hlsManager != nil { |
||||
pm.hlsManager.PathNotReady(pa) |
||||
} |
||||
} |
||||
|
||||
func (pm *pathManager) doFindPathConf(req defs.PathFindPathConfReq) { |
||||
_, pathConf, _, err := conf.FindPathConf(pm.pathConfs, req.AccessRequest.Name) |
||||
if err != nil { |
||||
req.Res <- defs.PathFindPathConfRes{Err: err} |
||||
return |
||||
} |
||||
|
||||
err = pm.authManager.Authenticate(req.AccessRequest.ToAuthRequest()) |
||||
if err != nil { |
||||
req.Res <- defs.PathFindPathConfRes{Err: err} |
||||
return |
||||
} |
||||
|
||||
req.Res <- defs.PathFindPathConfRes{Conf: pathConf} |
||||
} |
||||
|
||||
func (pm *pathManager) doDescribe(req defs.PathDescribeReq) { |
||||
pathConfName, pathConf, pathMatches, err := conf.FindPathConf(pm.pathConfs, req.AccessRequest.Name) |
||||
if err != nil { |
||||
req.Res <- defs.PathDescribeRes{Err: err} |
||||
return |
||||
} |
||||
|
||||
err = pm.authManager.Authenticate(req.AccessRequest.ToAuthRequest()) |
||||
if err != nil { |
||||
req.Res <- defs.PathDescribeRes{Err: err} |
||||
return |
||||
} |
||||
|
||||
// create path if it doesn't exist
|
||||
if _, ok := pm.paths[req.AccessRequest.Name]; !ok { |
||||
pm.createPath(pathConfName, pathConf, req.AccessRequest.Name, pathMatches) |
||||
} |
||||
|
||||
req.Res <- defs.PathDescribeRes{Path: pm.paths[req.AccessRequest.Name]} |
||||
} |
||||
|
||||
func (pm *pathManager) doAddReader(req defs.PathAddReaderReq) { |
||||
pathConfName, pathConf, pathMatches, err := conf.FindPathConf(pm.pathConfs, req.AccessRequest.Name) |
||||
if err != nil { |
||||
req.Res <- defs.PathAddReaderRes{Err: err} |
||||
return |
||||
} |
||||
|
||||
if !req.AccessRequest.SkipAuth { |
||||
err = pm.authManager.Authenticate(req.AccessRequest.ToAuthRequest()) |
||||
if err != nil { |
||||
req.Res <- defs.PathAddReaderRes{Err: err} |
||||
return |
||||
} |
||||
} |
||||
|
||||
// create path if it doesn't exist
|
||||
if _, ok := pm.paths[req.AccessRequest.Name]; !ok { |
||||
pm.createPath(pathConfName, pathConf, req.AccessRequest.Name, pathMatches) |
||||
} |
||||
|
||||
req.Res <- defs.PathAddReaderRes{Path: pm.paths[req.AccessRequest.Name]} |
||||
} |
||||
|
||||
func (pm *pathManager) doAddPublisher(req defs.PathAddPublisherReq) { |
||||
pathConfName, pathConf, pathMatches, err := conf.FindPathConf(pm.pathConfs, req.AccessRequest.Name) |
||||
if err != nil { |
||||
req.Res <- defs.PathAddPublisherRes{Err: err} |
||||
return |
||||
} |
||||
|
||||
if !req.AccessRequest.SkipAuth { |
||||
err = pm.authManager.Authenticate(req.AccessRequest.ToAuthRequest()) |
||||
if err != nil { |
||||
req.Res <- defs.PathAddPublisherRes{Err: err} |
||||
return |
||||
} |
||||
} |
||||
|
||||
// create path if it doesn't exist
|
||||
if _, ok := pm.paths[req.AccessRequest.Name]; !ok { |
||||
pm.createPath(pathConfName, pathConf, req.AccessRequest.Name, pathMatches) |
||||
} |
||||
|
||||
req.Res <- defs.PathAddPublisherRes{Path: pm.paths[req.AccessRequest.Name]} |
||||
} |
||||
|
||||
func (pm *pathManager) doAPIPathsList(req pathAPIPathsListReq) { |
||||
paths := make(map[string]*path) |
||||
|
||||
for name, pa := range pm.paths { |
||||
paths[name] = pa |
||||
} |
||||
|
||||
req.res <- pathAPIPathsListRes{paths: paths} |
||||
} |
||||
|
||||
func (pm *pathManager) doAPIPathsGet(req pathAPIPathsGetReq) { |
||||
path, ok := pm.paths[req.name] |
||||
if !ok { |
||||
req.res <- pathAPIPathsGetRes{err: conf.ErrPathNotFound} |
||||
return |
||||
} |
||||
|
||||
req.res <- pathAPIPathsGetRes{path: path} |
||||
} |
||||
|
||||
func (pm *pathManager) createPath( |
||||
pathConfName string, |
||||
pathConf *conf.Path, |
||||
name string, |
||||
matches []string, |
||||
) { |
||||
pa := &path{ |
||||
parentCtx: pm.ctx, |
||||
logLevel: pm.logLevel, |
||||
rtspAddress: pm.rtspAddress, |
||||
readTimeout: pm.readTimeout, |
||||
writeTimeout: pm.writeTimeout, |
||||
writeQueueSize: pm.writeQueueSize, |
||||
udpMaxPayloadSize: pm.udpMaxPayloadSize, |
||||
confName: pathConfName, |
||||
conf: pathConf, |
||||
name: name, |
||||
matches: matches, |
||||
wg: &pm.wg, |
||||
externalCmdPool: pm.externalCmdPool, |
||||
parent: pm, |
||||
} |
||||
pa.initialize() |
||||
|
||||
pm.paths[name] = pa |
||||
|
||||
if _, ok := pm.pathsByConf[pathConfName]; !ok { |
||||
pm.pathsByConf[pathConfName] = make(map[*path]struct{}) |
||||
} |
||||
pm.pathsByConf[pathConfName][pa] = struct{}{} |
||||
} |
||||
|
||||
func (pm *pathManager) removePath(pa *path) { |
||||
delete(pm.pathsByConf[pa.confName], pa) |
||||
if len(pm.pathsByConf[pa.confName]) == 0 { |
||||
delete(pm.pathsByConf, pa.confName) |
||||
} |
||||
delete(pm.paths, pa.name) |
||||
} |
||||
|
||||
// ReloadPathConfs is called by core.
|
||||
func (pm *pathManager) ReloadPathConfs(pathConfs map[string]*conf.Path) { |
||||
select { |
||||
case pm.chReloadConf <- pathConfs: |
||||
case <-pm.ctx.Done(): |
||||
} |
||||
} |
||||
|
||||
// pathReady is called by path.
|
||||
func (pm *pathManager) pathReady(pa *path) { |
||||
select { |
||||
case pm.chPathReady <- pa: |
||||
case <-pm.ctx.Done(): |
||||
case <-pa.ctx.Done(): // in case pathManager is blocked by path.wait()
|
||||
} |
||||
} |
||||
|
||||
// pathNotReady is called by path.
|
||||
func (pm *pathManager) pathNotReady(pa *path) { |
||||
select { |
||||
case pm.chPathNotReady <- pa: |
||||
case <-pm.ctx.Done(): |
||||
case <-pa.ctx.Done(): // in case pathManager is blocked by path.wait()
|
||||
} |
||||
} |
||||
|
||||
// closePath is called by path.
|
||||
func (pm *pathManager) closePath(pa *path) { |
||||
select { |
||||
case pm.chClosePath <- pa: |
||||
case <-pm.ctx.Done(): |
||||
case <-pa.ctx.Done(): // in case pathManager is blocked by path.wait()
|
||||
} |
||||
} |
||||
|
||||
// GetConfForPath is called by a reader or publisher.
|
||||
func (pm *pathManager) FindPathConf(req defs.PathFindPathConfReq) (*conf.Path, error) { |
||||
req.Res = make(chan defs.PathFindPathConfRes) |
||||
select { |
||||
case pm.chFindPathConf <- req: |
||||
res := <-req.Res |
||||
return res.Conf, res.Err |
||||
|
||||
case <-pm.ctx.Done(): |
||||
return nil, fmt.Errorf("terminated") |
||||
} |
||||
} |
||||
|
||||
// Describe is called by a reader or publisher.
|
||||
func (pm *pathManager) Describe(req defs.PathDescribeReq) defs.PathDescribeRes { |
||||
req.Res = make(chan defs.PathDescribeRes) |
||||
select { |
||||
case pm.chDescribe <- req: |
||||
res1 := <-req.Res |
||||
if res1.Err != nil { |
||||
return res1 |
||||
} |
||||
|
||||
res2 := res1.Path.(*path).describe(req) |
||||
if res2.Err != nil { |
||||
return res2 |
||||
} |
||||
|
||||
res2.Path = res1.Path |
||||
return res2 |
||||
|
||||
case <-pm.ctx.Done(): |
||||
return defs.PathDescribeRes{Err: fmt.Errorf("terminated")} |
||||
} |
||||
} |
||||
|
||||
// AddPublisher is called by a publisher.
|
||||
func (pm *pathManager) AddPublisher(req defs.PathAddPublisherReq) (defs.Path, error) { |
||||
req.Res = make(chan defs.PathAddPublisherRes) |
||||
select { |
||||
case pm.chAddPublisher <- req: |
||||
res := <-req.Res |
||||
if res.Err != nil { |
||||
return nil, res.Err |
||||
} |
||||
|
||||
return res.Path.(*path).addPublisher(req) |
||||
|
||||
case <-pm.ctx.Done(): |
||||
return nil, fmt.Errorf("terminated") |
||||
} |
||||
} |
||||
|
||||
// AddReader is called by a reader.
|
||||
func (pm *pathManager) AddReader(req defs.PathAddReaderReq) (defs.Path, *stream.Stream, error) { |
||||
req.Res = make(chan defs.PathAddReaderRes) |
||||
select { |
||||
case pm.chAddReader <- req: |
||||
res := <-req.Res |
||||
if res.Err != nil { |
||||
return nil, nil, res.Err |
||||
} |
||||
|
||||
return res.Path.(*path).addReader(req) |
||||
|
||||
case <-pm.ctx.Done(): |
||||
return nil, nil, fmt.Errorf("terminated") |
||||
} |
||||
} |
||||
|
||||
// setHLSServer is called by hlsManager.
|
||||
func (pm *pathManager) setHLSServer(s pathManagerHLSServer) { |
||||
select { |
||||
case pm.chSetHLSServer <- s: |
||||
case <-pm.ctx.Done(): |
||||
} |
||||
} |
||||
|
||||
// APIPathsList is called by api.
|
||||
func (pm *pathManager) APIPathsList() (*defs.APIPathList, error) { |
||||
req := pathAPIPathsListReq{ |
||||
res: make(chan pathAPIPathsListRes), |
||||
} |
||||
|
||||
select { |
||||
case pm.chAPIPathsList <- req: |
||||
res := <-req.res |
||||
|
||||
res.data = &defs.APIPathList{ |
||||
Items: []*defs.APIPath{}, |
||||
} |
||||
|
||||
for _, pa := range res.paths { |
||||
item, err := pa.APIPathsGet(pathAPIPathsGetReq{}) |
||||
if err == nil { |
||||
res.data.Items = append(res.data.Items, item) |
||||
} |
||||
} |
||||
|
||||
sort.Slice(res.data.Items, func(i, j int) bool { |
||||
return res.data.Items[i].Name < res.data.Items[j].Name |
||||
}) |
||||
|
||||
return res.data, nil |
||||
|
||||
case <-pm.ctx.Done(): |
||||
return nil, fmt.Errorf("terminated") |
||||
} |
||||
} |
||||
|
||||
// APIPathsGet is called by api.
|
||||
func (pm *pathManager) APIPathsGet(name string) (*defs.APIPath, error) { |
||||
req := pathAPIPathsGetReq{ |
||||
name: name, |
||||
res: make(chan pathAPIPathsGetRes), |
||||
} |
||||
|
||||
select { |
||||
case pm.chAPIPathsGet <- req: |
||||
res := <-req.res |
||||
if res.err != nil { |
||||
return nil, res.err |
||||
} |
||||
|
||||
data, err := res.path.APIPathsGet(req) |
||||
return data, err |
||||
|
||||
case <-pm.ctx.Done(): |
||||
return nil, fmt.Errorf("terminated") |
||||
} |
||||
} |
||||
@ -1,84 +0,0 @@
@@ -1,84 +0,0 @@
|
||||
package core |
||||
|
||||
import ( |
||||
"bufio" |
||||
"net" |
||||
"testing" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/base" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/headers" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestPathAutoDeletion(t *testing.T) { |
||||
for _, ca := range []string{"describe", "setup"} { |
||||
t.Run(ca, func(t *testing.T) { |
||||
p, ok := newInstance("paths:\n" + |
||||
" all_others:\n") |
||||
require.Equal(t, true, ok) |
||||
defer p.Close() |
||||
|
||||
func() { |
||||
conn, err := net.Dial("tcp", "localhost:8554") |
||||
require.NoError(t, err) |
||||
defer conn.Close() |
||||
br := bufio.NewReader(conn) |
||||
|
||||
if ca == "describe" { |
||||
u, err := base.ParseURL("rtsp://localhost:8554/mypath") |
||||
require.NoError(t, err) |
||||
|
||||
byts, _ := base.Request{ |
||||
Method: base.Describe, |
||||
URL: u, |
||||
Header: base.Header{ |
||||
"CSeq": base.HeaderValue{"1"}, |
||||
}, |
||||
}.Marshal() |
||||
_, err = conn.Write(byts) |
||||
require.NoError(t, err) |
||||
|
||||
var res base.Response |
||||
err = res.Unmarshal(br) |
||||
require.NoError(t, err) |
||||
require.Equal(t, base.StatusNotFound, res.StatusCode) |
||||
} else { |
||||
u, err := base.ParseURL("rtsp://localhost:8554/mypath/trackID=0") |
||||
require.NoError(t, err) |
||||
|
||||
byts, _ := base.Request{ |
||||
Method: base.Setup, |
||||
URL: u, |
||||
Header: base.Header{ |
||||
"CSeq": base.HeaderValue{"1"}, |
||||
"Transport": headers.Transport{ |
||||
Mode: func() *headers.TransportMode { |
||||
v := headers.TransportModePlay |
||||
return &v |
||||
}(), |
||||
Delivery: func() *headers.TransportDelivery { |
||||
v := headers.TransportDeliveryUnicast |
||||
return &v |
||||
}(), |
||||
Protocol: headers.TransportProtocolUDP, |
||||
ClientPorts: &[2]int{35466, 35467}, |
||||
}.Marshal(), |
||||
}, |
||||
}.Marshal() |
||||
_, err = conn.Write(byts) |
||||
require.NoError(t, err) |
||||
|
||||
var res base.Response |
||||
err = res.Unmarshal(br) |
||||
require.NoError(t, err) |
||||
require.Equal(t, base.StatusNotFound, res.StatusCode) |
||||
} |
||||
}() |
||||
|
||||
data, err := p.pathManager.APIPathsList() |
||||
require.NoError(t, err) |
||||
|
||||
require.Equal(t, 0, len(data.Items)) |
||||
}) |
||||
} |
||||
} |
||||
@ -1,797 +0,0 @@
@@ -1,797 +0,0 @@
|
||||
package core |
||||
|
||||
import ( |
||||
"bufio" |
||||
"context" |
||||
"fmt" |
||||
"net" |
||||
"net/http" |
||||
"net/url" |
||||
"os" |
||||
"os/exec" |
||||
"path/filepath" |
||||
"strings" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/base" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/description" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/headers" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/sdp" |
||||
srt "github.com/datarhei/gosrt" |
||||
"github.com/pion/rtp" |
||||
"github.com/stretchr/testify/require" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/defs" |
||||
"github.com/bluenviron/mediamtx/internal/protocols/rtmp" |
||||
"github.com/bluenviron/mediamtx/internal/protocols/webrtc" |
||||
"github.com/bluenviron/mediamtx/internal/test" |
||||
) |
||||
|
||||
var runOnDemandSampleScript = ` |
||||
package main |
||||
|
||||
import ( |
||||
"os" |
||||
"os/signal" |
||||
"syscall" |
||||
"github.com/bluenviron/gortsplib/v4" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/description" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
) |
||||
|
||||
func main() { |
||||
if os.Getenv("MTX_PATH") != "ondemand" || |
||||
os.Getenv("MTX_QUERY") != "param=value" || |
||||
os.Getenv("G1") != "on" { |
||||
panic("environment not set") |
||||
} |
||||
|
||||
medi := &description.Media{ |
||||
Type: description.MediaTypeVideo, |
||||
Formats: []format.Format{&format.H264{ |
||||
PayloadTyp: 96, |
||||
SPS: []byte{ |
||||
0x67, 0x42, 0xc0, 0x28, 0xd9, 0x00, 0x78, 0x02, |
||||
0x27, 0xe5, 0x84, 0x00, 0x00, 0x03, 0x00, 0x04, |
||||
0x00, 0x00, 0x03, 0x00, 0xf0, 0x3c, 0x60, 0xc9, 0x20, |
||||
}, |
||||
PPS: []byte{0x01, 0x02, 0x03, 0x04}, |
||||
PacketizationMode: 1, |
||||
}}, |
||||
} |
||||
|
||||
source := gortsplib.Client{} |
||||
|
||||
err := source.StartRecording( |
||||
"rtsp://localhost:" + os.Getenv("RTSP_PORT") + "/" + os.Getenv("MTX_PATH"), |
||||
&description.Session{Medias: []*description.Media{medi}}) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
defer source.Close() |
||||
|
||||
c := make(chan os.Signal, 1) |
||||
signal.Notify(c, syscall.SIGINT) |
||||
<-c |
||||
|
||||
err = os.WriteFile("ON_DEMAND_FILE", []byte(""), 0644) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
} |
||||
` |
||||
|
||||
type testServer struct { |
||||
onDescribe func(*gortsplib.ServerHandlerOnDescribeCtx) (*base.Response, *gortsplib.ServerStream, error) |
||||
onSetup func(*gortsplib.ServerHandlerOnSetupCtx) (*base.Response, *gortsplib.ServerStream, error) |
||||
onPlay func(*gortsplib.ServerHandlerOnPlayCtx) (*base.Response, error) |
||||
} |
||||
|
||||
func (sh *testServer) OnDescribe(ctx *gortsplib.ServerHandlerOnDescribeCtx, |
||||
) (*base.Response, *gortsplib.ServerStream, error) { |
||||
return sh.onDescribe(ctx) |
||||
} |
||||
|
||||
func (sh *testServer) OnSetup(ctx *gortsplib.ServerHandlerOnSetupCtx) (*base.Response, *gortsplib.ServerStream, error) { |
||||
return sh.onSetup(ctx) |
||||
} |
||||
|
||||
func (sh *testServer) OnPlay(ctx *gortsplib.ServerHandlerOnPlayCtx) (*base.Response, error) { |
||||
return sh.onPlay(ctx) |
||||
} |
||||
|
||||
var _ defs.Path = &path{} |
||||
|
||||
func TestPathRunOnDemand(t *testing.T) { |
||||
onDemandFile := filepath.Join(os.TempDir(), "ondemand") |
||||
onUnDemandFile := filepath.Join(os.TempDir(), "onundemand") |
||||
|
||||
srcFile := filepath.Join(os.TempDir(), "ondemand.go") |
||||
err := os.WriteFile(srcFile, |
||||
[]byte(strings.ReplaceAll(runOnDemandSampleScript, "ON_DEMAND_FILE", onDemandFile)), 0o644) |
||||
require.NoError(t, err) |
||||
|
||||
execFile := filepath.Join(os.TempDir(), "ondemand_cmd") |
||||
cmd := exec.Command("go", "build", "-o", execFile, srcFile) |
||||
cmd.Stdout = os.Stdout |
||||
cmd.Stderr = os.Stderr |
||||
err = cmd.Run() |
||||
require.NoError(t, err) |
||||
defer os.Remove(execFile) |
||||
|
||||
os.Remove(srcFile) |
||||
|
||||
for _, ca := range []string{"describe", "setup", "describe and setup"} { |
||||
t.Run(ca, func(t *testing.T) { |
||||
defer os.Remove(onDemandFile) |
||||
defer os.Remove(onUnDemandFile) |
||||
|
||||
p1, ok := newInstance(fmt.Sprintf("rtmp: no\n"+ |
||||
"hls: no\n"+ |
||||
"webrtc: no\n"+ |
||||
"paths:\n"+ |
||||
" '~^(on)demand$':\n"+ |
||||
" runOnDemand: %s\n"+ |
||||
" runOnDemandCloseAfter: 1s\n"+ |
||||
" runOnUnDemand: touch %s\n", execFile, onUnDemandFile)) |
||||
require.Equal(t, true, ok) |
||||
defer p1.Close() |
||||
|
||||
var control string |
||||
|
||||
func() { |
||||
conn, err := net.Dial("tcp", "localhost:8554") |
||||
require.NoError(t, err) |
||||
defer conn.Close() |
||||
br := bufio.NewReader(conn) |
||||
|
||||
if ca == "describe" || ca == "describe and setup" { |
||||
u, err := base.ParseURL("rtsp://localhost:8554/ondemand?param=value") |
||||
require.NoError(t, err) |
||||
|
||||
byts, _ := base.Request{ |
||||
Method: base.Describe, |
||||
URL: u, |
||||
Header: base.Header{ |
||||
"CSeq": base.HeaderValue{"1"}, |
||||
}, |
||||
}.Marshal() |
||||
_, err = conn.Write(byts) |
||||
require.NoError(t, err) |
||||
|
||||
var res base.Response |
||||
err = res.Unmarshal(br) |
||||
require.NoError(t, err) |
||||
require.Equal(t, base.StatusOK, res.StatusCode) |
||||
|
||||
var desc sdp.SessionDescription |
||||
err = desc.Unmarshal(res.Body) |
||||
require.NoError(t, err) |
||||
control, _ = desc.MediaDescriptions[0].Attribute("control") |
||||
} else { |
||||
control = "rtsp://localhost:8554/ondemand?param=value/" |
||||
} |
||||
|
||||
if ca == "setup" || ca == "describe and setup" { |
||||
u, err := base.ParseURL(control) |
||||
require.NoError(t, err) |
||||
|
||||
byts, _ := base.Request{ |
||||
Method: base.Setup, |
||||
URL: u, |
||||
Header: base.Header{ |
||||
"CSeq": base.HeaderValue{"2"}, |
||||
"Transport": headers.Transport{ |
||||
Mode: func() *headers.TransportMode { |
||||
v := headers.TransportModePlay |
||||
return &v |
||||
}(), |
||||
Protocol: headers.TransportProtocolTCP, |
||||
InterleavedIDs: &[2]int{0, 1}, |
||||
}.Marshal(), |
||||
}, |
||||
}.Marshal() |
||||
_, err = conn.Write(byts) |
||||
require.NoError(t, err) |
||||
|
||||
var res base.Response |
||||
err = res.Unmarshal(br) |
||||
require.NoError(t, err) |
||||
require.Equal(t, base.StatusOK, res.StatusCode) |
||||
} |
||||
}() |
||||
|
||||
for { |
||||
_, err := os.Stat(onUnDemandFile) |
||||
if err == nil { |
||||
break |
||||
} |
||||
time.Sleep(100 * time.Millisecond) |
||||
} |
||||
|
||||
_, err := os.Stat(onDemandFile) |
||||
require.NoError(t, err) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestPathRunOnConnect(t *testing.T) { |
||||
for _, ca := range []string{"rtsp", "rtmp", "srt"} { |
||||
t.Run(ca, func(t *testing.T) { |
||||
onConnectFile := filepath.Join(os.TempDir(), "onconnect") |
||||
defer os.Remove(onConnectFile) |
||||
|
||||
onDisconnectFile := filepath.Join(os.TempDir(), "ondisconnect") |
||||
defer os.Remove(onDisconnectFile) |
||||
|
||||
func() { |
||||
p, ok := newInstance(fmt.Sprintf( |
||||
"paths:\n"+ |
||||
" test:\n"+ |
||||
"runOnConnect: touch %s\n"+ |
||||
"runOnDisconnect: touch %s\n", |
||||
onConnectFile, onDisconnectFile)) |
||||
require.Equal(t, true, ok) |
||||
defer p.Close() |
||||
|
||||
switch ca { |
||||
case "rtsp": |
||||
c := gortsplib.Client{} |
||||
|
||||
err := c.StartRecording( |
||||
"rtsp://localhost:8554/test", |
||||
&description.Session{Medias: []*description.Media{test.UniqueMediaH264()}}) |
||||
require.NoError(t, err) |
||||
defer c.Close() |
||||
|
||||
case "rtmp": |
||||
u, err := url.Parse("rtmp://127.0.0.1:1935/test") |
||||
require.NoError(t, err) |
||||
|
||||
nconn, err := net.Dial("tcp", u.Host) |
||||
require.NoError(t, err) |
||||
defer nconn.Close() |
||||
|
||||
_, err = rtmp.NewClientConn(nconn, u, true) |
||||
require.NoError(t, err) |
||||
|
||||
case "srt": |
||||
conf := srt.DefaultConfig() |
||||
address, err := conf.UnmarshalURL("srt://localhost:8890?streamid=publish:test") |
||||
require.NoError(t, err) |
||||
|
||||
err = conf.Validate() |
||||
require.NoError(t, err) |
||||
|
||||
c, err := srt.Dial("srt", address, conf) |
||||
require.NoError(t, err) |
||||
defer c.Close() |
||||
} |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
}() |
||||
|
||||
_, err := os.Stat(onConnectFile) |
||||
require.NoError(t, err) |
||||
|
||||
_, err = os.Stat(onDisconnectFile) |
||||
require.NoError(t, err) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestPathRunOnReady(t *testing.T) { |
||||
onReadyFile := filepath.Join(os.TempDir(), "onready") |
||||
defer os.Remove(onReadyFile) |
||||
|
||||
onNotReadyFile := filepath.Join(os.TempDir(), "onunready") |
||||
defer os.Remove(onNotReadyFile) |
||||
|
||||
func() { |
||||
p, ok := newInstance(fmt.Sprintf("rtmp: no\n"+ |
||||
"hls: no\n"+ |
||||
"webrtc: no\n"+ |
||||
"paths:\n"+ |
||||
" test:\n"+ |
||||
" runOnReady: sh -c 'echo \"$MTX_PATH $MTX_QUERY\" > %s'\n"+ |
||||
" runOnNotReady: sh -c 'echo \"$MTX_PATH $MTX_QUERY\" > %s'\n", |
||||
onReadyFile, onNotReadyFile)) |
||||
require.Equal(t, true, ok) |
||||
defer p.Close() |
||||
|
||||
c := gortsplib.Client{} |
||||
|
||||
err := c.StartRecording( |
||||
"rtsp://localhost:8554/test?query=value", |
||||
&description.Session{Medias: []*description.Media{test.UniqueMediaH264()}}) |
||||
require.NoError(t, err) |
||||
defer c.Close() |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
}() |
||||
|
||||
byts, err := os.ReadFile(onReadyFile) |
||||
require.NoError(t, err) |
||||
require.Equal(t, "test query=value\n", string(byts)) |
||||
|
||||
byts, err = os.ReadFile(onNotReadyFile) |
||||
require.NoError(t, err) |
||||
require.Equal(t, "test query=value\n", string(byts)) |
||||
} |
||||
|
||||
func TestPathRunOnRead(t *testing.T) { |
||||
for _, ca := range []string{"rtsp", "rtmp", "srt", "webrtc"} { |
||||
t.Run(ca, func(t *testing.T) { |
||||
onReadFile := filepath.Join(os.TempDir(), "onread") |
||||
defer os.Remove(onReadFile) |
||||
|
||||
onUnreadFile := filepath.Join(os.TempDir(), "onunread") |
||||
defer os.Remove(onUnreadFile) |
||||
|
||||
func() { |
||||
p, ok := newInstance(fmt.Sprintf( |
||||
"paths:\n"+ |
||||
" test:\n"+ |
||||
" runOnRead: sh -c 'echo \"$MTX_PATH $MTX_QUERY\" > %s'\n"+ |
||||
" runOnUnread: sh -c 'echo \"$MTX_PATH $MTX_QUERY\" > %s'\n", |
||||
onReadFile, onUnreadFile)) |
||||
require.Equal(t, true, ok) |
||||
defer p.Close() |
||||
|
||||
media0 := test.UniqueMediaH264() |
||||
|
||||
source := gortsplib.Client{} |
||||
|
||||
err := source.StartRecording( |
||||
"rtsp://localhost:8554/test", |
||||
&description.Session{Medias: []*description.Media{media0}}) |
||||
require.NoError(t, err) |
||||
defer source.Close() |
||||
|
||||
switch ca { |
||||
case "rtsp": |
||||
reader := gortsplib.Client{} |
||||
|
||||
u, err := base.ParseURL("rtsp://127.0.0.1:8554/test?query=value") |
||||
require.NoError(t, err) |
||||
|
||||
err = reader.Start(u.Scheme, u.Host) |
||||
require.NoError(t, err) |
||||
defer reader.Close() |
||||
|
||||
desc, _, err := reader.Describe(u) |
||||
require.NoError(t, err) |
||||
|
||||
err = reader.SetupAll(desc.BaseURL, desc.Medias) |
||||
require.NoError(t, err) |
||||
|
||||
_, err = reader.Play(nil) |
||||
require.NoError(t, err) |
||||
|
||||
case "rtmp": |
||||
u, err := url.Parse("rtmp://127.0.0.1:1935/test?query=value") |
||||
require.NoError(t, err) |
||||
|
||||
nconn, err := net.Dial("tcp", u.Host) |
||||
require.NoError(t, err) |
||||
defer nconn.Close() |
||||
|
||||
conn, err := rtmp.NewClientConn(nconn, u, false) |
||||
require.NoError(t, err) |
||||
|
||||
_, err = rtmp.NewReader(conn) |
||||
require.NoError(t, err) |
||||
|
||||
case "srt": |
||||
conf := srt.DefaultConfig() |
||||
address, err := conf.UnmarshalURL("srt://localhost:8890?streamid=read:test:query=value") |
||||
require.NoError(t, err) |
||||
|
||||
err = conf.Validate() |
||||
require.NoError(t, err) |
||||
|
||||
reader, err := srt.Dial("srt", address, conf) |
||||
require.NoError(t, err) |
||||
defer reader.Close() |
||||
|
||||
case "webrtc": |
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
u, err := url.Parse("http://localhost:8889/test/whep?query=value") |
||||
require.NoError(t, err) |
||||
|
||||
c := &webrtc.WHIPClient{ |
||||
HTTPClient: hc, |
||||
URL: u, |
||||
Log: test.NilLogger{}, |
||||
} |
||||
|
||||
writerDone := make(chan struct{}) |
||||
defer func() { <-writerDone }() |
||||
|
||||
writerTerminate := make(chan struct{}) |
||||
defer close(writerTerminate) |
||||
|
||||
go func() { |
||||
defer close(writerDone) |
||||
i := uint16(0) |
||||
for { |
||||
select { |
||||
case <-time.After(100 * time.Millisecond): |
||||
case <-writerTerminate: |
||||
return |
||||
} |
||||
err := source.WritePacketRTP(media0, &rtp.Packet{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 123 + i, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: []byte{5}, |
||||
}) |
||||
require.NoError(t, err) |
||||
i++ |
||||
} |
||||
}() |
||||
|
||||
_, err = c.Read(context.Background()) |
||||
require.NoError(t, err) |
||||
defer checkClose(t, c.Close) |
||||
} |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
}() |
||||
|
||||
byts, err := os.ReadFile(onReadFile) |
||||
require.NoError(t, err) |
||||
require.Equal(t, "test query=value\n", string(byts)) |
||||
|
||||
byts, err = os.ReadFile(onUnreadFile) |
||||
require.NoError(t, err) |
||||
require.Equal(t, "test query=value\n", string(byts)) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestPathMaxReaders(t *testing.T) { |
||||
p, ok := newInstance("paths:\n" + |
||||
" all_others:\n" + |
||||
" maxReaders: 1\n") |
||||
require.Equal(t, true, ok) |
||||
defer p.Close() |
||||
|
||||
source := gortsplib.Client{} |
||||
|
||||
err := source.StartRecording( |
||||
"rtsp://localhost:8554/mystream", |
||||
&description.Session{Medias: []*description.Media{ |
||||
test.UniqueMediaH264(), |
||||
test.UniqueMediaMPEG4Audio(), |
||||
}}) |
||||
require.NoError(t, err) |
||||
defer source.Close() |
||||
|
||||
for i := 0; i < 2; i++ { |
||||
reader := gortsplib.Client{} |
||||
|
||||
u, err := base.ParseURL("rtsp://127.0.0.1:8554/mystream") |
||||
require.NoError(t, err) |
||||
|
||||
err = reader.Start(u.Scheme, u.Host) |
||||
require.NoError(t, err) |
||||
defer reader.Close() |
||||
|
||||
desc, _, err := reader.Describe(u) |
||||
require.NoError(t, err) |
||||
|
||||
err = reader.SetupAll(desc.BaseURL, desc.Medias) |
||||
if i != 1 { |
||||
require.NoError(t, err) |
||||
} else { |
||||
require.Error(t, err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestPathRecord(t *testing.T) { |
||||
dir, err := os.MkdirTemp("", "rtsp-path-record") |
||||
require.NoError(t, err) |
||||
defer os.RemoveAll(dir) |
||||
|
||||
p, ok := newInstance("api: yes\n" + |
||||
"record: yes\n" + |
||||
"recordPath: " + filepath.Join(dir, "%path/%Y-%m-%d_%H-%M-%S-%f") + "\n" + |
||||
"paths:\n" + |
||||
" all_others:\n" + |
||||
" record: yes\n") |
||||
require.Equal(t, true, ok) |
||||
defer p.Close() |
||||
|
||||
media0 := test.UniqueMediaH264() |
||||
|
||||
source := gortsplib.Client{} |
||||
|
||||
err = source.StartRecording( |
||||
"rtsp://localhost:8554/mystream", |
||||
&description.Session{Medias: []*description.Media{media0}}) |
||||
require.NoError(t, err) |
||||
defer source.Close() |
||||
|
||||
for i := 0; i < 4; i++ { |
||||
err := source.WritePacketRTP(media0, &rtp.Packet{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 1123 + uint16(i), |
||||
Timestamp: 45343 + 90000*uint32(i), |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: []byte{5}, |
||||
}) |
||||
require.NoError(t, err) |
||||
} |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
|
||||
files, err := os.ReadDir(filepath.Join(dir, "mystream")) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 1, len(files)) |
||||
|
||||
tr := &http.Transport{} |
||||
defer tr.CloseIdleConnections() |
||||
hc := &http.Client{Transport: tr} |
||||
|
||||
httpRequest(t, hc, http.MethodPatch, "http://localhost:9997/v3/config/paths/patch/all_others", map[string]interface{}{ |
||||
"record": false, |
||||
}, nil) |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
|
||||
httpRequest(t, hc, http.MethodPatch, "http://localhost:9997/v3/config/paths/patch/all_others", map[string]interface{}{ |
||||
"record": true, |
||||
}, nil) |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
|
||||
for i := 4; i < 8; i++ { |
||||
err := source.WritePacketRTP(media0, &rtp.Packet{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 1123 + uint16(i), |
||||
Timestamp: 45343 + 90000*uint32(i), |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: []byte{5}, |
||||
}) |
||||
require.NoError(t, err) |
||||
} |
||||
|
||||
time.Sleep(500 * time.Millisecond) |
||||
|
||||
files, err = os.ReadDir(filepath.Join(dir, "mystream")) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 2, len(files)) |
||||
} |
||||
|
||||
func TestPathFallback(t *testing.T) { |
||||
for _, ca := range []string{ |
||||
"absolute", |
||||
"relative", |
||||
"source", |
||||
} { |
||||
t.Run(ca, func(t *testing.T) { |
||||
var conf string |
||||
|
||||
switch ca { |
||||
case "absolute": |
||||
conf = "paths:\n" + |
||||
" path1:\n" + |
||||
" fallback: rtsp://localhost:8554/path2\n" + |
||||
" path2:\n" |
||||
|
||||
case "relative": |
||||
conf = "paths:\n" + |
||||
" path1:\n" + |
||||
" fallback: /path2\n" + |
||||
" path2:\n" |
||||
|
||||
case "source": |
||||
conf = "paths:\n" + |
||||
" path1:\n" + |
||||
" fallback: /path2\n" + |
||||
" source: rtsp://localhost:3333/nonexistent\n" + |
||||
" path2:\n" |
||||
} |
||||
|
||||
p1, ok := newInstance(conf) |
||||
require.Equal(t, true, ok) |
||||
defer p1.Close() |
||||
|
||||
source := gortsplib.Client{} |
||||
err := source.StartRecording("rtsp://localhost:8554/path2", |
||||
&description.Session{Medias: []*description.Media{test.UniqueMediaH264()}}) |
||||
require.NoError(t, err) |
||||
defer source.Close() |
||||
|
||||
u, err := base.ParseURL("rtsp://localhost:8554/path1") |
||||
require.NoError(t, err) |
||||
|
||||
dest := gortsplib.Client{} |
||||
err = dest.Start(u.Scheme, u.Host) |
||||
require.NoError(t, err) |
||||
defer dest.Close() |
||||
|
||||
desc, _, err := dest.Describe(u) |
||||
require.NoError(t, err) |
||||
require.Equal(t, 1, len(desc.Medias)) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestPathSourceRegexp(t *testing.T) { |
||||
var stream *gortsplib.ServerStream |
||||
|
||||
s := gortsplib.Server{ |
||||
Handler: &testServer{ |
||||
onDescribe: func(ctx *gortsplib.ServerHandlerOnDescribeCtx, |
||||
) (*base.Response, *gortsplib.ServerStream, error) { |
||||
require.Equal(t, "/a", ctx.Path) |
||||
return &base.Response{ |
||||
StatusCode: base.StatusOK, |
||||
}, stream, nil |
||||
}, |
||||
onSetup: func(_ *gortsplib.ServerHandlerOnSetupCtx) (*base.Response, *gortsplib.ServerStream, error) { |
||||
return &base.Response{ |
||||
StatusCode: base.StatusOK, |
||||
}, stream, nil |
||||
}, |
||||
onPlay: func(_ *gortsplib.ServerHandlerOnPlayCtx) (*base.Response, error) { |
||||
return &base.Response{ |
||||
StatusCode: base.StatusOK, |
||||
}, nil |
||||
}, |
||||
}, |
||||
RTSPAddress: "127.0.0.1:8555", |
||||
} |
||||
|
||||
err := s.Start() |
||||
require.NoError(t, err) |
||||
defer s.Close() |
||||
|
||||
stream = gortsplib.NewServerStream(&s, &description.Session{Medias: []*description.Media{test.MediaH264}}) |
||||
defer stream.Close() |
||||
|
||||
p, ok := newInstance( |
||||
"paths:\n" + |
||||
" '~^test_(.+)$':\n" + |
||||
" source: rtsp://127.0.0.1:8555/$G1\n" + |
||||
" sourceOnDemand: yes\n" + |
||||
" 'all':\n") |
||||
require.Equal(t, true, ok) |
||||
defer p.Close() |
||||
|
||||
reader := gortsplib.Client{} |
||||
|
||||
u, err := base.ParseURL("rtsp://127.0.0.1:8554/test_a") |
||||
require.NoError(t, err) |
||||
|
||||
err = reader.Start(u.Scheme, u.Host) |
||||
require.NoError(t, err) |
||||
defer reader.Close() |
||||
|
||||
_, _, err = reader.Describe(u) |
||||
require.NoError(t, err) |
||||
} |
||||
|
||||
func TestPathOverridePublisher(t *testing.T) { |
||||
for _, ca := range []string{ |
||||
"enabled", |
||||
"disabled", |
||||
} { |
||||
t.Run(ca, func(t *testing.T) { |
||||
conf := "rtmp: no\n" + |
||||
"paths:\n" + |
||||
" all_others:\n" |
||||
|
||||
if ca == "disabled" { |
||||
conf += " overridePublisher: no\n" |
||||
} |
||||
|
||||
p, ok := newInstance(conf) |
||||
require.Equal(t, true, ok) |
||||
defer p.Close() |
||||
|
||||
medi := test.UniqueMediaH264() |
||||
|
||||
s1 := gortsplib.Client{} |
||||
|
||||
err := s1.StartRecording("rtsp://localhost:8554/teststream", |
||||
&description.Session{Medias: []*description.Media{medi}}) |
||||
require.NoError(t, err) |
||||
defer s1.Close() |
||||
|
||||
s2 := gortsplib.Client{} |
||||
|
||||
err = s2.StartRecording("rtsp://localhost:8554/teststream", |
||||
&description.Session{Medias: []*description.Media{medi}}) |
||||
if ca == "enabled" { |
||||
require.NoError(t, err) |
||||
defer s2.Close() |
||||
} else { |
||||
require.Error(t, err) |
||||
} |
||||
|
||||
frameRecv := make(chan struct{}) |
||||
|
||||
c := gortsplib.Client{} |
||||
|
||||
u, err := base.ParseURL("rtsp://localhost:8554/teststream") |
||||
require.NoError(t, err) |
||||
|
||||
err = c.Start(u.Scheme, u.Host) |
||||
require.NoError(t, err) |
||||
defer c.Close() |
||||
|
||||
desc, _, err := c.Describe(u) |
||||
require.NoError(t, err) |
||||
|
||||
err = c.SetupAll(desc.BaseURL, desc.Medias) |
||||
require.NoError(t, err) |
||||
|
||||
c.OnPacketRTP(desc.Medias[0], desc.Medias[0].Formats[0], func(pkt *rtp.Packet) { |
||||
if ca == "enabled" { |
||||
require.Equal(t, []byte{5, 15, 16, 17, 18}, pkt.Payload) |
||||
} else { |
||||
require.Equal(t, []byte{5, 11, 12, 13, 14}, pkt.Payload) |
||||
} |
||||
close(frameRecv) |
||||
}) |
||||
|
||||
_, err = c.Play(nil) |
||||
require.NoError(t, err) |
||||
|
||||
if ca == "enabled" { |
||||
err := s1.Wait() |
||||
require.EqualError(t, err, "EOF") |
||||
|
||||
err = s2.WritePacketRTP(medi, &rtp.Packet{ |
||||
Header: rtp.Header{ |
||||
Version: 0x02, |
||||
PayloadType: 96, |
||||
SequenceNumber: 57899, |
||||
Timestamp: 345234345, |
||||
SSRC: 978651231, |
||||
Marker: true, |
||||
}, |
||||
Payload: []byte{5, 15, 16, 17, 18}, |
||||
}) |
||||
require.NoError(t, err) |
||||
} else { |
||||
err = s1.WritePacketRTP(medi, &rtp.Packet{ |
||||
Header: rtp.Header{ |
||||
Version: 0x02, |
||||
PayloadType: 96, |
||||
SequenceNumber: 57899, |
||||
Timestamp: 345234345, |
||||
SSRC: 978651231, |
||||
Marker: true, |
||||
}, |
||||
Payload: []byte{5, 11, 12, 13, 14}, |
||||
}) |
||||
require.NoError(t, err) |
||||
} |
||||
|
||||
<-frameRecv |
||||
}) |
||||
} |
||||
} |
||||
@ -1,20 +0,0 @@
@@ -1,20 +0,0 @@
|
||||
package core |
||||
|
||||
import ( |
||||
"github.com/bluenviron/mediamtx/internal/defs" |
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
) |
||||
|
||||
// sourceRedirect is a source that redirects to another one.
|
||||
type sourceRedirect struct{} |
||||
|
||||
func (*sourceRedirect) Log(logger.Level, string, ...interface{}) { |
||||
} |
||||
|
||||
// APISourceDescribe implements source.
|
||||
func (*sourceRedirect) APISourceDescribe() defs.APIPathSourceOrReader { |
||||
return defs.APIPathSourceOrReader{ |
||||
Type: "redirect", |
||||
ID: "", |
||||
} |
||||
} |
||||
@ -1,264 +0,0 @@
@@ -1,264 +0,0 @@
|
||||
package core |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/conf" |
||||
"github.com/bluenviron/mediamtx/internal/defs" |
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
hlssource "github.com/bluenviron/mediamtx/internal/staticsources/hls" |
||||
rpicamerasource "github.com/bluenviron/mediamtx/internal/staticsources/rpicamera" |
||||
rtmpsource "github.com/bluenviron/mediamtx/internal/staticsources/rtmp" |
||||
rtspsource "github.com/bluenviron/mediamtx/internal/staticsources/rtsp" |
||||
srtsource "github.com/bluenviron/mediamtx/internal/staticsources/srt" |
||||
udpsource "github.com/bluenviron/mediamtx/internal/staticsources/udp" |
||||
webrtcsource "github.com/bluenviron/mediamtx/internal/staticsources/webrtc" |
||||
) |
||||
|
||||
const ( |
||||
staticSourceHandlerRetryPause = 5 * time.Second |
||||
) |
||||
|
||||
type staticSourceHandlerParent interface { |
||||
logger.Writer |
||||
staticSourceHandlerSetReady(context.Context, defs.PathSourceStaticSetReadyReq) |
||||
staticSourceHandlerSetNotReady(context.Context, defs.PathSourceStaticSetNotReadyReq) |
||||
} |
||||
|
||||
// staticSourceHandler is a static source handler.
|
||||
type staticSourceHandler struct { |
||||
conf *conf.Path |
||||
logLevel conf.LogLevel |
||||
readTimeout conf.StringDuration |
||||
writeTimeout conf.StringDuration |
||||
writeQueueSize int |
||||
resolvedSource string |
||||
parent staticSourceHandlerParent |
||||
|
||||
ctx context.Context |
||||
ctxCancel func() |
||||
instance defs.StaticSource |
||||
running bool |
||||
|
||||
// in
|
||||
chReloadConf chan *conf.Path |
||||
chInstanceSetReady chan defs.PathSourceStaticSetReadyReq |
||||
chInstanceSetNotReady chan defs.PathSourceStaticSetNotReadyReq |
||||
|
||||
// out
|
||||
done chan struct{} |
||||
} |
||||
|
||||
func (s *staticSourceHandler) initialize() { |
||||
s.chReloadConf = make(chan *conf.Path) |
||||
s.chInstanceSetReady = make(chan defs.PathSourceStaticSetReadyReq) |
||||
s.chInstanceSetNotReady = make(chan defs.PathSourceStaticSetNotReadyReq) |
||||
|
||||
switch { |
||||
case strings.HasPrefix(s.resolvedSource, "rtsp://") || |
||||
strings.HasPrefix(s.resolvedSource, "rtsps://"): |
||||
s.instance = &rtspsource.Source{ |
||||
ResolvedSource: s.resolvedSource, |
||||
ReadTimeout: s.readTimeout, |
||||
WriteTimeout: s.writeTimeout, |
||||
WriteQueueSize: s.writeQueueSize, |
||||
Parent: s, |
||||
} |
||||
|
||||
case strings.HasPrefix(s.resolvedSource, "rtmp://") || |
||||
strings.HasPrefix(s.resolvedSource, "rtmps://"): |
||||
s.instance = &rtmpsource.Source{ |
||||
ResolvedSource: s.resolvedSource, |
||||
ReadTimeout: s.readTimeout, |
||||
WriteTimeout: s.writeTimeout, |
||||
Parent: s, |
||||
} |
||||
|
||||
case strings.HasPrefix(s.resolvedSource, "http://") || |
||||
strings.HasPrefix(s.resolvedSource, "https://"): |
||||
s.instance = &hlssource.Source{ |
||||
ResolvedSource: s.resolvedSource, |
||||
ReadTimeout: s.readTimeout, |
||||
Parent: s, |
||||
} |
||||
|
||||
case strings.HasPrefix(s.resolvedSource, "udp://"): |
||||
s.instance = &udpsource.Source{ |
||||
ResolvedSource: s.resolvedSource, |
||||
ReadTimeout: s.readTimeout, |
||||
Parent: s, |
||||
} |
||||
|
||||
case strings.HasPrefix(s.resolvedSource, "srt://"): |
||||
s.instance = &srtsource.Source{ |
||||
ResolvedSource: s.resolvedSource, |
||||
ReadTimeout: s.readTimeout, |
||||
Parent: s, |
||||
} |
||||
|
||||
case strings.HasPrefix(s.resolvedSource, "whep://") || |
||||
strings.HasPrefix(s.resolvedSource, "wheps://"): |
||||
s.instance = &webrtcsource.Source{ |
||||
ResolvedSource: s.resolvedSource, |
||||
ReadTimeout: s.readTimeout, |
||||
Parent: s, |
||||
} |
||||
|
||||
case s.resolvedSource == "rpiCamera": |
||||
s.instance = &rpicamerasource.Source{ |
||||
LogLevel: s.logLevel, |
||||
Parent: s, |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (s *staticSourceHandler) close(reason string) { |
||||
s.stop(reason) |
||||
} |
||||
|
||||
func (s *staticSourceHandler) start(onDemand bool) { |
||||
if s.running { |
||||
panic("should not happen") |
||||
} |
||||
|
||||
s.running = true |
||||
s.instance.Log(logger.Info, "started%s", |
||||
func() string { |
||||
if onDemand { |
||||
return " on demand" |
||||
} |
||||
return "" |
||||
}()) |
||||
|
||||
s.ctx, s.ctxCancel = context.WithCancel(context.Background()) |
||||
s.done = make(chan struct{}) |
||||
|
||||
go s.run() |
||||
} |
||||
|
||||
func (s *staticSourceHandler) stop(reason string) { |
||||
if !s.running { |
||||
panic("should not happen") |
||||
} |
||||
|
||||
s.running = false |
||||
s.instance.Log(logger.Info, "stopped: %s", reason) |
||||
|
||||
s.ctxCancel() |
||||
|
||||
// we must wait since s.ctx is not thread safe
|
||||
<-s.done |
||||
} |
||||
|
||||
// Log implements logger.Writer.
|
||||
func (s *staticSourceHandler) Log(level logger.Level, format string, args ...interface{}) { |
||||
s.parent.Log(level, format, args...) |
||||
} |
||||
|
||||
func (s *staticSourceHandler) run() { |
||||
defer close(s.done) |
||||
|
||||
var runCtx context.Context |
||||
var runCtxCancel func() |
||||
runErr := make(chan error) |
||||
runReloadConf := make(chan *conf.Path) |
||||
|
||||
recreate := func() { |
||||
runCtx, runCtxCancel = context.WithCancel(context.Background()) |
||||
go func() { |
||||
runErr <- s.instance.Run(defs.StaticSourceRunParams{ |
||||
Context: runCtx, |
||||
Conf: s.conf, |
||||
ReloadConf: runReloadConf, |
||||
}) |
||||
}() |
||||
} |
||||
|
||||
recreate() |
||||
|
||||
recreating := false |
||||
recreateTimer := emptyTimer() |
||||
|
||||
for { |
||||
select { |
||||
case err := <-runErr: |
||||
runCtxCancel() |
||||
s.instance.Log(logger.Error, err.Error()) |
||||
recreating = true |
||||
recreateTimer = time.NewTimer(staticSourceHandlerRetryPause) |
||||
|
||||
case req := <-s.chInstanceSetReady: |
||||
s.parent.staticSourceHandlerSetReady(s.ctx, req) |
||||
|
||||
case req := <-s.chInstanceSetNotReady: |
||||
s.parent.staticSourceHandlerSetNotReady(s.ctx, req) |
||||
|
||||
case newConf := <-s.chReloadConf: |
||||
s.conf = newConf |
||||
if !recreating { |
||||
cReloadConf := runReloadConf |
||||
cInnerCtx := runCtx |
||||
go func() { |
||||
select { |
||||
case cReloadConf <- newConf: |
||||
case <-cInnerCtx.Done(): |
||||
} |
||||
}() |
||||
} |
||||
|
||||
case <-recreateTimer.C: |
||||
recreate() |
||||
recreating = false |
||||
|
||||
case <-s.ctx.Done(): |
||||
if !recreating { |
||||
runCtxCancel() |
||||
<-runErr |
||||
} |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (s *staticSourceHandler) reloadConf(newConf *conf.Path) { |
||||
select { |
||||
case s.chReloadConf <- newConf: |
||||
case <-s.ctx.Done(): |
||||
} |
||||
} |
||||
|
||||
// APISourceDescribe instanceements source.
|
||||
func (s *staticSourceHandler) APISourceDescribe() defs.APIPathSourceOrReader { |
||||
return s.instance.APISourceDescribe() |
||||
} |
||||
|
||||
// setReady is called by a staticSource.
|
||||
func (s *staticSourceHandler) SetReady(req defs.PathSourceStaticSetReadyReq) defs.PathSourceStaticSetReadyRes { |
||||
req.Res = make(chan defs.PathSourceStaticSetReadyRes) |
||||
select { |
||||
case s.chInstanceSetReady <- req: |
||||
res := <-req.Res |
||||
|
||||
if res.Err == nil { |
||||
s.instance.Log(logger.Info, "ready: %s", defs.MediasInfo(req.Desc.Medias)) |
||||
} |
||||
|
||||
return res |
||||
|
||||
case <-s.ctx.Done(): |
||||
return defs.PathSourceStaticSetReadyRes{Err: fmt.Errorf("terminated")} |
||||
} |
||||
} |
||||
|
||||
// setNotReady is called by a staticSource.
|
||||
func (s *staticSourceHandler) SetNotReady(req defs.PathSourceStaticSetNotReadyReq) { |
||||
req.Res = make(chan struct{}) |
||||
select { |
||||
case s.chInstanceSetNotReady <- req: |
||||
<-req.Res |
||||
case <-s.ctx.Done(): |
||||
} |
||||
} |
||||
@ -1,334 +0,0 @@
@@ -1,334 +0,0 @@
|
||||
package defs |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/google/uuid" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/conf" |
||||
) |
||||
|
||||
// APIError is a generic error.
|
||||
type APIError struct { |
||||
Error string `json:"error"` |
||||
} |
||||
|
||||
// APIPathConfList is a list of path configurations.
|
||||
type APIPathConfList struct { |
||||
ItemCount int `json:"itemCount"` |
||||
PageCount int `json:"pageCount"` |
||||
Items []*conf.Path `json:"items"` |
||||
} |
||||
|
||||
// APIPathSourceOrReader is a source or a reader.
|
||||
type APIPathSourceOrReader struct { |
||||
Type string `json:"type"` |
||||
ID string `json:"id"` |
||||
} |
||||
|
||||
// APIPath is a path.
|
||||
type APIPath struct { |
||||
Name string `json:"name"` |
||||
ConfName string `json:"confName"` |
||||
Source *APIPathSourceOrReader `json:"source"` |
||||
Ready bool `json:"ready"` |
||||
ReadyTime *time.Time `json:"readyTime"` |
||||
Tracks []string `json:"tracks"` |
||||
BytesReceived uint64 `json:"bytesReceived"` |
||||
BytesSent uint64 `json:"bytesSent"` |
||||
Readers []APIPathSourceOrReader `json:"readers"` |
||||
} |
||||
|
||||
// APIPathList is a list of paths.
|
||||
type APIPathList struct { |
||||
ItemCount int `json:"itemCount"` |
||||
PageCount int `json:"pageCount"` |
||||
Items []*APIPath `json:"items"` |
||||
} |
||||
|
||||
// APIHLSMuxer is an HLS muxer.
|
||||
type APIHLSMuxer struct { |
||||
Path string `json:"path"` |
||||
Created time.Time `json:"created"` |
||||
LastRequest time.Time `json:"lastRequest"` |
||||
BytesSent uint64 `json:"bytesSent"` |
||||
} |
||||
|
||||
// APIHLSMuxerList is a list of HLS muxers.
|
||||
type APIHLSMuxerList struct { |
||||
ItemCount int `json:"itemCount"` |
||||
PageCount int `json:"pageCount"` |
||||
Items []*APIHLSMuxer `json:"items"` |
||||
} |
||||
|
||||
// APIRTMPConnState is the state of a RTMP connection.
|
||||
type APIRTMPConnState string |
||||
|
||||
// states.
|
||||
const ( |
||||
APIRTMPConnStateIdle APIRTMPConnState = "idle" |
||||
APIRTMPConnStateRead APIRTMPConnState = "read" |
||||
APIRTMPConnStatePublish APIRTMPConnState = "publish" |
||||
) |
||||
|
||||
// APIRTMPConn is a RTMP connection.
|
||||
type APIRTMPConn struct { |
||||
ID uuid.UUID `json:"id"` |
||||
Created time.Time `json:"created"` |
||||
RemoteAddr string `json:"remoteAddr"` |
||||
State APIRTMPConnState `json:"state"` |
||||
Path string `json:"path"` |
||||
Query string `json:"query"` |
||||
BytesReceived uint64 `json:"bytesReceived"` |
||||
BytesSent uint64 `json:"bytesSent"` |
||||
} |
||||
|
||||
// APIRTMPConnList is a list of RTMP connections.
|
||||
type APIRTMPConnList struct { |
||||
ItemCount int `json:"itemCount"` |
||||
PageCount int `json:"pageCount"` |
||||
Items []*APIRTMPConn `json:"items"` |
||||
} |
||||
|
||||
// APIRTSPConn is a RTSP connection.
|
||||
type APIRTSPConn struct { |
||||
ID uuid.UUID `json:"id"` |
||||
Created time.Time `json:"created"` |
||||
RemoteAddr string `json:"remoteAddr"` |
||||
BytesReceived uint64 `json:"bytesReceived"` |
||||
BytesSent uint64 `json:"bytesSent"` |
||||
} |
||||
|
||||
// APIRTSPConnsList is a list of RTSP connections.
|
||||
type APIRTSPConnsList struct { |
||||
ItemCount int `json:"itemCount"` |
||||
PageCount int `json:"pageCount"` |
||||
Items []*APIRTSPConn `json:"items"` |
||||
} |
||||
|
||||
// APIRTSPSessionState is the state of a RTSP session.
|
||||
type APIRTSPSessionState string |
||||
|
||||
// states.
|
||||
const ( |
||||
APIRTSPSessionStateIdle APIRTSPSessionState = "idle" |
||||
APIRTSPSessionStateRead APIRTSPSessionState = "read" |
||||
APIRTSPSessionStatePublish APIRTSPSessionState = "publish" |
||||
) |
||||
|
||||
// APIRTSPSession is a RTSP session.
|
||||
type APIRTSPSession struct { |
||||
ID uuid.UUID `json:"id"` |
||||
Created time.Time `json:"created"` |
||||
RemoteAddr string `json:"remoteAddr"` |
||||
State APIRTSPSessionState `json:"state"` |
||||
Path string `json:"path"` |
||||
Query string `json:"query"` |
||||
Transport *string `json:"transport"` |
||||
BytesReceived uint64 `json:"bytesReceived"` |
||||
BytesSent uint64 `json:"bytesSent"` |
||||
} |
||||
|
||||
// APIRTSPSessionList is a list of RTSP sessions.
|
||||
type APIRTSPSessionList struct { |
||||
ItemCount int `json:"itemCount"` |
||||
PageCount int `json:"pageCount"` |
||||
Items []*APIRTSPSession `json:"items"` |
||||
} |
||||
|
||||
// APISRTConnState is the state of a SRT connection.
|
||||
type APISRTConnState string |
||||
|
||||
// states.
|
||||
const ( |
||||
APISRTConnStateIdle APISRTConnState = "idle" |
||||
APISRTConnStateRead APISRTConnState = "read" |
||||
APISRTConnStatePublish APISRTConnState = "publish" |
||||
) |
||||
|
||||
// APISRTConn is a SRT connection.
|
||||
type APISRTConn struct { |
||||
ID uuid.UUID `json:"id"` |
||||
Created time.Time `json:"created"` |
||||
RemoteAddr string `json:"remoteAddr"` |
||||
State APISRTConnState `json:"state"` |
||||
Path string `json:"path"` |
||||
Query string `json:"query"` |
||||
|
||||
// The metric names/comments are pulled from GoSRT
|
||||
|
||||
// The total number of sent DATA packets, including retransmitted packets
|
||||
PacketsSent uint64 `json:"packetsSent"` |
||||
// The total number of received DATA packets, including retransmitted packets
|
||||
PacketsReceived uint64 `json:"packetsReceived"` |
||||
// The total number of unique DATA packets sent by the SRT sender
|
||||
PacketsSentUnique uint64 `json:"packetsSentUnique"` |
||||
// The total number of unique original, retransmitted or recovered by the packet filter DATA packets
|
||||
// received in time, decrypted without errors and, as a result, scheduled for delivery to the
|
||||
// upstream application by the SRT receiver.
|
||||
PacketsReceivedUnique uint64 `json:"packetsReceivedUnique"` |
||||
// The total number of data packets considered or reported as lost at the sender side.
|
||||
// Does not correspond to the packets detected as lost at the receiver side.
|
||||
PacketsSendLoss uint64 `json:"packetsSendLoss"` |
||||
// The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side
|
||||
PacketsReceivedLoss uint64 `json:"packetsReceivedLoss"` |
||||
// The total number of retransmitted packets sent by the SRT sender
|
||||
PacketsRetrans uint64 `json:"packetsRetrans"` |
||||
// The total number of retransmitted packets registered at the receiver side
|
||||
PacketsReceivedRetrans uint64 `json:"packetsReceivedRetrans"` |
||||
// The total number of sent ACK (Acknowledgement) control packets
|
||||
PacketsSentACK uint64 `json:"packetsSentACK"` |
||||
// The total number of received ACK (Acknowledgement) control packets
|
||||
PacketsReceivedACK uint64 `json:"packetsReceivedACK"` |
||||
// The total number of sent NAK (Negative Acknowledgement) control packets
|
||||
PacketsSentNAK uint64 `json:"packetsSentNAK"` |
||||
// The total number of received NAK (Negative Acknowledgement) control packets
|
||||
PacketsReceivedNAK uint64 `json:"packetsReceivedNAK"` |
||||
// The total number of sent KM (Key Material) control packets
|
||||
PacketsSentKM uint64 `json:"packetsSentKM"` |
||||
// The total number of received KM (Key Material) control packets
|
||||
PacketsReceivedKM uint64 `json:"packetsReceivedKM"` |
||||
// The total accumulated time in microseconds, during which the SRT sender has some data to transmit,
|
||||
// including packets that have been sent, but not yet acknowledged
|
||||
UsSndDuration uint64 `json:"usSndDuration"` |
||||
// ??
|
||||
PacketsReceivedBelated uint64 `json:"packetsReceivedBelated"` |
||||
// The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time
|
||||
PacketsSendDrop uint64 `json:"packetsSendDrop"` |
||||
// The total number of dropped by the SRT receiver and, as a result,
|
||||
// not delivered to the upstream application DATA packets
|
||||
PacketsReceivedDrop uint64 `json:"packetsReceivedDrop"` |
||||
// The total number of packets that failed to be decrypted at the receiver side
|
||||
PacketsReceivedUndecrypt uint64 `json:"packetsReceivedUndecrypt"` |
||||
|
||||
// Same as packetsReceived, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
BytesReceived uint64 `json:"bytesReceived"` |
||||
// Same as packetsSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
BytesSent uint64 `json:"bytesSent"` |
||||
// Same as packetsSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
BytesSentUnique uint64 `json:"bytesSentUnique"` |
||||
// Same as packetsReceivedUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
BytesReceivedUnique uint64 `json:"bytesReceivedUnique"` |
||||
// Same as packetsReceivedLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT),
|
||||
// bytes for the presently missing (either reordered or lost) packets' payloads are estimated
|
||||
// based on the average packet size
|
||||
BytesReceivedLoss uint64 `json:"bytesReceivedLoss"` |
||||
// Same as packetsRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
BytesRetrans uint64 `json:"bytesRetrans"` |
||||
// Same as packetsReceivedRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
BytesReceivedRetrans uint64 `json:"bytesReceivedRetrans"` |
||||
// Same as PacketsReceivedBelated, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
BytesReceivedBelated uint64 `json:"bytesReceivedBelated"` |
||||
// Same as packetsSendDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
BytesSendDrop uint64 `json:"bytesSendDrop"` |
||||
// Same as packetsReceivedDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
BytesReceivedDrop uint64 `json:"bytesReceivedDrop"` |
||||
// Same as packetsReceivedUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
BytesReceivedUndecrypt uint64 `json:"bytesReceivedUndecrypt"` |
||||
|
||||
// Current minimum time interval between which consecutive packets are sent, in microseconds
|
||||
UsPacketsSendPeriod float64 `json:"usPacketsSendPeriod"` |
||||
// The maximum number of packets that can be "in flight"
|
||||
PacketsFlowWindow uint64 `json:"packetsFlowWindow"` |
||||
// The number of packets in flight
|
||||
PacketsFlightSize uint64 `json:"packetsFlightSize"` |
||||
// Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA)
|
||||
// of an endpoint's RTT samples, in milliseconds
|
||||
MsRTT float64 `json:"msRTT"` |
||||
// Current transmission bandwidth, in Mbps
|
||||
MbpsSendRate float64 `json:"mbpsSendRate"` |
||||
// Current receiving bandwidth, in Mbps
|
||||
MbpsReceiveRate float64 `json:"mbpsReceiveRate"` |
||||
// Estimated capacity of the network link, in Mbps
|
||||
MbpsLinkCapacity float64 `json:"mbpsLinkCapacity"` |
||||
// The available space in the sender's buffer, in bytes
|
||||
BytesAvailSendBuf uint64 `json:"bytesAvailSendBuf"` |
||||
// The available space in the receiver's buffer, in bytes
|
||||
BytesAvailReceiveBuf uint64 `json:"bytesAvailReceiveBuf"` |
||||
// Transmission bandwidth limit, in Mbps
|
||||
MbpsMaxBW float64 `json:"mbpsMaxBW"` |
||||
// Maximum Segment Size (MSS), in bytes
|
||||
ByteMSS uint64 `json:"byteMSS"` |
||||
// The number of packets in the sender's buffer that are already scheduled
|
||||
// for sending or even possibly sent, but not yet acknowledged
|
||||
PacketsSendBuf uint64 `json:"packetsSendBuf"` |
||||
// Instantaneous (current) value of packetsSndBuf, but expressed in bytes,
|
||||
// including payload and all headers (IP, TCP, SRT)
|
||||
BytesSendBuf uint64 `json:"bytesSendBuf"` |
||||
// The timespan (msec) of packets in the sender's buffer (unacknowledged packets)
|
||||
MsSendBuf uint64 `json:"msSendBuf"` |
||||
// Timestamp-based Packet Delivery Delay value of the peer
|
||||
MsSendTsbPdDelay uint64 `json:"msSendTsbPdDelay"` |
||||
// The number of acknowledged packets in receiver's buffer
|
||||
PacketsReceiveBuf uint64 `json:"packetsReceiveBuf"` |
||||
// Instantaneous (current) value of packetsRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)
|
||||
BytesReceiveBuf uint64 `json:"bytesReceiveBuf"` |
||||
// The timespan (msec) of acknowledged packets in the receiver's buffer
|
||||
MsReceiveBuf uint64 `json:"msReceiveBuf"` |
||||
// Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY
|
||||
MsReceiveTsbPdDelay uint64 `json:"msReceiveTsbPdDelay"` |
||||
// Instant value of the packet reorder tolerance
|
||||
PacketsReorderTolerance uint64 `json:"packetsReorderTolerance"` |
||||
// Accumulated difference between the current time and the time-to-play of a packet that is received late
|
||||
PacketsReceivedAvgBelatedTime uint64 `json:"packetsReceivedAvgBelatedTime"` |
||||
// Percentage of resent data vs. sent data
|
||||
PacketsSendLossRate float64 `json:"packetsSendLossRate"` |
||||
// Percentage of retransmitted data vs. received data
|
||||
PacketsReceivedLossRate float64 `json:"packetsReceivedLossRate"` |
||||
} |
||||
|
||||
// APISRTConnList is a list of SRT connections.
|
||||
type APISRTConnList struct { |
||||
ItemCount int `json:"itemCount"` |
||||
PageCount int `json:"pageCount"` |
||||
Items []*APISRTConn `json:"items"` |
||||
} |
||||
|
||||
// APIWebRTCSessionState is the state of a WebRTC connection.
|
||||
type APIWebRTCSessionState string |
||||
|
||||
// states.
|
||||
const ( |
||||
APIWebRTCSessionStateRead APIWebRTCSessionState = "read" |
||||
APIWebRTCSessionStatePublish APIWebRTCSessionState = "publish" |
||||
) |
||||
|
||||
// APIWebRTCSession is a WebRTC session.
|
||||
type APIWebRTCSession struct { |
||||
ID uuid.UUID `json:"id"` |
||||
Created time.Time `json:"created"` |
||||
RemoteAddr string `json:"remoteAddr"` |
||||
PeerConnectionEstablished bool `json:"peerConnectionEstablished"` |
||||
LocalCandidate string `json:"localCandidate"` |
||||
RemoteCandidate string `json:"remoteCandidate"` |
||||
State APIWebRTCSessionState `json:"state"` |
||||
Path string `json:"path"` |
||||
Query string `json:"query"` |
||||
BytesReceived uint64 `json:"bytesReceived"` |
||||
BytesSent uint64 `json:"bytesSent"` |
||||
} |
||||
|
||||
// APIWebRTCSessionList is a list of WebRTC sessions.
|
||||
type APIWebRTCSessionList struct { |
||||
ItemCount int `json:"itemCount"` |
||||
PageCount int `json:"pageCount"` |
||||
Items []*APIWebRTCSession `json:"items"` |
||||
} |
||||
|
||||
// APIRecordingSegment is a recording segment.
|
||||
type APIRecordingSegment struct { |
||||
Start time.Time `json:"start"` |
||||
} |
||||
|
||||
// APIRecording is a recording.
|
||||
type APIRecording struct { |
||||
Name string `json:"name"` |
||||
Segments []*APIRecordingSegment `json:"segments"` |
||||
} |
||||
|
||||
// APIRecordingList is a list of recordings.
|
||||
type APIRecordingList struct { |
||||
ItemCount int `json:"itemCount"` |
||||
PageCount int `json:"pageCount"` |
||||
Items []*APIRecording `json:"items"` |
||||
} |
||||
@ -1,2 +0,0 @@
@@ -1,2 +0,0 @@
|
||||
// Package defs contains shared definitions.
|
||||
package defs |
||||
@ -1,179 +0,0 @@
@@ -1,179 +0,0 @@
|
||||
package defs |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/base" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/description" |
||||
"github.com/google/uuid" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/auth" |
||||
"github.com/bluenviron/mediamtx/internal/conf" |
||||
"github.com/bluenviron/mediamtx/internal/externalcmd" |
||||
"github.com/bluenviron/mediamtx/internal/stream" |
||||
) |
||||
|
||||
// PathNoOnePublishingError is returned when no one is publishing.
|
||||
type PathNoOnePublishingError struct { |
||||
PathName string |
||||
} |
||||
|
||||
// Error implements the error interface.
|
||||
func (e PathNoOnePublishingError) Error() string { |
||||
return fmt.Sprintf("no one is publishing to path '%s'", e.PathName) |
||||
} |
||||
|
||||
// Path is a path.
|
||||
type Path interface { |
||||
Name() string |
||||
SafeConf() *conf.Path |
||||
ExternalCmdEnv() externalcmd.Environment |
||||
StartPublisher(req PathStartPublisherReq) (*stream.Stream, error) |
||||
StopPublisher(req PathStopPublisherReq) |
||||
RemovePublisher(req PathRemovePublisherReq) |
||||
RemoveReader(req PathRemoveReaderReq) |
||||
} |
||||
|
||||
// PathAccessRequest is an access request.
|
||||
type PathAccessRequest struct { |
||||
Name string |
||||
Query string |
||||
Publish bool |
||||
SkipAuth bool |
||||
|
||||
// only if skipAuth = false
|
||||
IP net.IP |
||||
User string |
||||
Pass string |
||||
Proto auth.Protocol |
||||
ID *uuid.UUID |
||||
RTSPRequest *base.Request |
||||
RTSPBaseURL *base.URL |
||||
RTSPNonce string |
||||
} |
||||
|
||||
// ToAuthRequest converts a path access request into an authentication request.
|
||||
func (r *PathAccessRequest) ToAuthRequest() *auth.Request { |
||||
return &auth.Request{ |
||||
User: r.User, |
||||
Pass: r.Pass, |
||||
IP: r.IP, |
||||
Action: func() conf.AuthAction { |
||||
if r.Publish { |
||||
return conf.AuthActionPublish |
||||
} |
||||
return conf.AuthActionRead |
||||
}(), |
||||
Path: r.Name, |
||||
Protocol: r.Proto, |
||||
ID: r.ID, |
||||
Query: r.Query, |
||||
RTSPRequest: r.RTSPRequest, |
||||
RTSPBaseURL: r.RTSPBaseURL, |
||||
RTSPNonce: r.RTSPNonce, |
||||
} |
||||
} |
||||
|
||||
// PathFindPathConfRes contains the response of FindPathConf().
|
||||
type PathFindPathConfRes struct { |
||||
Conf *conf.Path |
||||
Err error |
||||
} |
||||
|
||||
// PathFindPathConfReq contains arguments of FindPathConf().
|
||||
type PathFindPathConfReq struct { |
||||
AccessRequest PathAccessRequest |
||||
Res chan PathFindPathConfRes |
||||
} |
||||
|
||||
// PathDescribeRes contains the response of Describe().
|
||||
type PathDescribeRes struct { |
||||
Path Path |
||||
Stream *stream.Stream |
||||
Redirect string |
||||
Err error |
||||
} |
||||
|
||||
// PathDescribeReq contains arguments of Describe().
|
||||
type PathDescribeReq struct { |
||||
AccessRequest PathAccessRequest |
||||
Res chan PathDescribeRes |
||||
} |
||||
|
||||
// PathAddPublisherRes contains the response of AddPublisher().
|
||||
type PathAddPublisherRes struct { |
||||
Path Path |
||||
Err error |
||||
} |
||||
|
||||
// PathAddPublisherReq contains arguments of AddPublisher().
|
||||
type PathAddPublisherReq struct { |
||||
Author Publisher |
||||
AccessRequest PathAccessRequest |
||||
Res chan PathAddPublisherRes |
||||
} |
||||
|
||||
// PathRemovePublisherReq contains arguments of RemovePublisher().
|
||||
type PathRemovePublisherReq struct { |
||||
Author Publisher |
||||
Res chan struct{} |
||||
} |
||||
|
||||
// PathStartPublisherRes contains the response of StartPublisher().
|
||||
type PathStartPublisherRes struct { |
||||
Stream *stream.Stream |
||||
Err error |
||||
} |
||||
|
||||
// PathStartPublisherReq contains arguments of StartPublisher().
|
||||
type PathStartPublisherReq struct { |
||||
Author Publisher |
||||
Desc *description.Session |
||||
GenerateRTPPackets bool |
||||
Res chan PathStartPublisherRes |
||||
} |
||||
|
||||
// PathStopPublisherReq contains arguments of StopPublisher().
|
||||
type PathStopPublisherReq struct { |
||||
Author Publisher |
||||
Res chan struct{} |
||||
} |
||||
|
||||
// PathAddReaderRes contains the response of AddReader().
|
||||
type PathAddReaderRes struct { |
||||
Path Path |
||||
Stream *stream.Stream |
||||
Err error |
||||
} |
||||
|
||||
// PathAddReaderReq contains arguments of AddReader().
|
||||
type PathAddReaderReq struct { |
||||
Author Reader |
||||
AccessRequest PathAccessRequest |
||||
Res chan PathAddReaderRes |
||||
} |
||||
|
||||
// PathRemoveReaderReq contains arguments of RemoveReader().
|
||||
type PathRemoveReaderReq struct { |
||||
Author Reader |
||||
Res chan struct{} |
||||
} |
||||
|
||||
// PathSourceStaticSetReadyRes contains the response of SetReadu().
|
||||
type PathSourceStaticSetReadyRes struct { |
||||
Stream *stream.Stream |
||||
Err error |
||||
} |
||||
|
||||
// PathSourceStaticSetReadyReq contains arguments of SetReady().
|
||||
type PathSourceStaticSetReadyReq struct { |
||||
Desc *description.Session |
||||
GenerateRTPPackets bool |
||||
Res chan PathSourceStaticSetReadyRes |
||||
} |
||||
|
||||
// PathSourceStaticSetNotReadyReq contains arguments of SetNotReady().
|
||||
type PathSourceStaticSetNotReadyReq struct { |
||||
Res chan struct{} |
||||
} |
||||
@ -1,7 +0,0 @@
@@ -1,7 +0,0 @@
|
||||
package defs |
||||
|
||||
// Publisher is an entity that can publish a stream.
|
||||
type Publisher interface { |
||||
Source |
||||
Close() |
||||
} |
||||
@ -1,7 +0,0 @@
@@ -1,7 +0,0 @@
|
||||
package defs |
||||
|
||||
// Reader is an entity that can read a stream.
|
||||
type Reader interface { |
||||
Close() |
||||
APIReaderDescribe() APIPathSourceOrReader |
||||
} |
||||
@ -1,63 +0,0 @@
@@ -1,63 +0,0 @@
|
||||
package defs |
||||
|
||||
import ( |
||||
"fmt" |
||||
"strings" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/description" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
) |
||||
|
||||
// Source is an entity that can provide a stream.
|
||||
// it can be:
|
||||
// - publisher
|
||||
// - staticSourceHandler
|
||||
// - redirectSource
|
||||
type Source interface { |
||||
logger.Writer |
||||
APISourceDescribe() APIPathSourceOrReader |
||||
} |
||||
|
||||
// FormatsToCodecs returns the name of codecs of given formats.
|
||||
func FormatsToCodecs(formats []format.Format) []string { |
||||
ret := make([]string, len(formats)) |
||||
for i, forma := range formats { |
||||
ret[i] = forma.Codec() |
||||
} |
||||
return ret |
||||
} |
||||
|
||||
// FormatsInfo returns a description of formats.
|
||||
func FormatsInfo(formats []format.Format) string { |
||||
return fmt.Sprintf("%d %s (%s)", |
||||
len(formats), |
||||
func() string { |
||||
if len(formats) == 1 { |
||||
return "track" |
||||
} |
||||
return "tracks" |
||||
}(), |
||||
strings.Join(FormatsToCodecs(formats), ", ")) |
||||
} |
||||
|
||||
// MediasToCodecs returns the name of codecs of given formats.
|
||||
func MediasToCodecs(medias []*description.Media) []string { |
||||
var formats []format.Format |
||||
for _, media := range medias { |
||||
formats = append(formats, media.Formats...) |
||||
} |
||||
|
||||
return FormatsToCodecs(formats) |
||||
} |
||||
|
||||
// MediasInfo returns a description of medias.
|
||||
func MediasInfo(medias []*description.Media) string { |
||||
var formats []format.Format |
||||
for _, media := range medias { |
||||
formats = append(formats, media.Formats...) |
||||
} |
||||
|
||||
return FormatsInfo(formats) |
||||
} |
||||
@ -1,29 +0,0 @@
@@ -1,29 +0,0 @@
|
||||
package defs |
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/conf" |
||||
"github.com/bluenviron/mediamtx/internal/logger" |
||||
) |
||||
|
||||
// StaticSource is a static source.
|
||||
type StaticSource interface { |
||||
logger.Writer |
||||
Run(StaticSourceRunParams) error |
||||
APISourceDescribe() APIPathSourceOrReader |
||||
} |
||||
|
||||
// StaticSourceParent is the parent of a static source.
|
||||
type StaticSourceParent interface { |
||||
logger.Writer |
||||
SetReady(req PathSourceStaticSetReadyReq) PathSourceStaticSetReadyRes |
||||
SetNotReady(req PathSourceStaticSetNotReadyReq) |
||||
} |
||||
|
||||
// StaticSourceRunParams is the set of params passed to Run().
|
||||
type StaticSourceRunParams struct { |
||||
Context context.Context |
||||
Conf *conf.Path |
||||
ReloadConf chan *conf.Path |
||||
} |
||||
@ -1,110 +0,0 @@
@@ -1,110 +0,0 @@
|
||||
// Package externalcmd allows to launch external commands.
|
||||
package externalcmd |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"os" |
||||
"time" |
||||
) |
||||
|
||||
const ( |
||||
restartPause = 5 * time.Second |
||||
) |
||||
|
||||
var errTerminated = errors.New("terminated") |
||||
|
||||
// OnExitFunc is the prototype of onExit.
|
||||
type OnExitFunc func(error) |
||||
|
||||
// Environment is a Cmd environment.
|
||||
type Environment map[string]string |
||||
|
||||
// Cmd is an external command.
|
||||
type Cmd struct { |
||||
pool *Pool |
||||
cmdstr string |
||||
restart bool |
||||
env Environment |
||||
onExit func(error) |
||||
|
||||
// in
|
||||
terminate chan struct{} |
||||
} |
||||
|
||||
// NewCmd allocates a Cmd.
|
||||
func NewCmd( |
||||
pool *Pool, |
||||
cmdstr string, |
||||
restart bool, |
||||
env Environment, |
||||
onExit OnExitFunc, |
||||
) *Cmd { |
||||
// replace variables in both Linux and Windows, in order to allow using the
|
||||
// same commands on both of them.
|
||||
cmdstr = os.Expand(cmdstr, func(variable string) string { |
||||
if value, ok := env[variable]; ok { |
||||
return value |
||||
} |
||||
return os.Getenv(variable) |
||||
}) |
||||
|
||||
if onExit == nil { |
||||
onExit = func(_ error) {} |
||||
} |
||||
|
||||
e := &Cmd{ |
||||
pool: pool, |
||||
cmdstr: cmdstr, |
||||
restart: restart, |
||||
env: env, |
||||
onExit: onExit, |
||||
terminate: make(chan struct{}), |
||||
} |
||||
|
||||
pool.wg.Add(1) |
||||
|
||||
go e.run() |
||||
|
||||
return e |
||||
} |
||||
|
||||
// Close closes the command. It doesn't wait for the command to exit.
|
||||
func (e *Cmd) Close() { |
||||
close(e.terminate) |
||||
} |
||||
|
||||
func (e *Cmd) run() { |
||||
defer e.pool.wg.Done() |
||||
|
||||
env := append([]string(nil), os.Environ()...) |
||||
for key, val := range e.env { |
||||
env = append(env, key+"="+val) |
||||
} |
||||
|
||||
for { |
||||
err := e.runOSSpecific(env) |
||||
if errors.Is(err, errTerminated) { |
||||
return |
||||
} |
||||
|
||||
if !e.restart { |
||||
if err != nil { |
||||
e.onExit(err) |
||||
} |
||||
return |
||||
} |
||||
|
||||
if err != nil { |
||||
e.onExit(err) |
||||
} else { |
||||
e.onExit(fmt.Errorf("command exited with code 0")) |
||||
} |
||||
|
||||
select { |
||||
case <-time.After(restartPause): |
||||
case <-e.terminate: |
||||
return |
||||
} |
||||
} |
||||
} |
||||
@ -1,64 +0,0 @@
@@ -1,64 +0,0 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package externalcmd |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"os" |
||||
"os/exec" |
||||
"syscall" |
||||
|
||||
"github.com/kballard/go-shellquote" |
||||
) |
||||
|
||||
func (e *Cmd) runOSSpecific(env []string) error { |
||||
cmdParts, err := shellquote.Split(e.cmdstr) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
cmd := exec.Command(cmdParts[0], cmdParts[1:]...) |
||||
|
||||
cmd.Env = env |
||||
cmd.Stdout = os.Stdout |
||||
cmd.Stderr = os.Stderr |
||||
|
||||
// set process group in order to allow killing subprocesses
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} |
||||
|
||||
err = cmd.Start() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
cmdDone := make(chan int) |
||||
go func() { |
||||
cmdDone <- func() int { |
||||
err := cmd.Wait() |
||||
if err == nil { |
||||
return 0 |
||||
} |
||||
var ee *exec.ExitError |
||||
if errors.As(err, &ee) { |
||||
ee.ExitCode() |
||||
} |
||||
return 0 |
||||
}() |
||||
}() |
||||
|
||||
select { |
||||
case <-e.terminate: |
||||
// the minus is needed to kill all subprocesses
|
||||
syscall.Kill(-cmd.Process.Pid, syscall.SIGINT) //nolint:errcheck
|
||||
<-cmdDone |
||||
return errTerminated |
||||
|
||||
case c := <-cmdDone: |
||||
if c != 0 { |
||||
return fmt.Errorf("command exited with code %d", c) |
||||
} |
||||
return nil |
||||
} |
||||
} |
||||
@ -1,131 +0,0 @@
@@ -1,131 +0,0 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package externalcmd |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"os/exec" |
||||
"strings" |
||||
"syscall" |
||||
"unsafe" |
||||
|
||||
"github.com/kballard/go-shellquote" |
||||
"golang.org/x/sys/windows" |
||||
) |
||||
|
||||
// taken from
|
||||
// https://gist.github.com/hallazzang/76f3970bfc949831808bbebc8ca15209
|
||||
func createProcessGroup() (windows.Handle, error) { |
||||
h, err := windows.CreateJobObject(nil, nil) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{ |
||||
BasicLimitInformation: windows.JOBOBJECT_BASIC_LIMIT_INFORMATION{ |
||||
LimitFlags: windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE, |
||||
}, |
||||
} |
||||
_, err = windows.SetInformationJobObject( |
||||
h, |
||||
windows.JobObjectExtendedLimitInformation, |
||||
uintptr(unsafe.Pointer(&info)), |
||||
uint32(unsafe.Sizeof(info))) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
return h, nil |
||||
} |
||||
|
||||
func closeProcessGroup(h windows.Handle) error { |
||||
return windows.CloseHandle(h) |
||||
} |
||||
|
||||
func addProcessToGroup(h windows.Handle, p *os.Process) error { |
||||
type process struct { |
||||
Pid int |
||||
Handle uintptr |
||||
} |
||||
|
||||
return windows.AssignProcessToJobObject(h, |
||||
windows.Handle((*process)(unsafe.Pointer(p)).Handle)) |
||||
} |
||||
|
||||
func (e *Cmd) runOSSpecific(env []string) error { |
||||
var cmd *exec.Cmd |
||||
|
||||
// from Golang documentation:
|
||||
// On Windows, processes receive the whole command line as a single string and do their own parsing.
|
||||
// Command combines and quotes Args into a command line string with an algorithm compatible with
|
||||
// applications using CommandLineToArgvW (which is the most common way). Notable exceptions are
|
||||
// msiexec.exe and cmd.exe (and thus, all batch files), which have a different unquoting algorithm.
|
||||
// In these or other similar cases, you can do the quoting yourself and provide the full command
|
||||
// line in SysProcAttr.CmdLine, leaving Args empty.
|
||||
if strings.HasPrefix(e.cmdstr, "cmd ") || strings.HasPrefix(e.cmdstr, "cmd.exe ") { |
||||
args := strings.TrimPrefix(strings.TrimPrefix(e.cmdstr, "cmd "), "cmd.exe ") |
||||
|
||||
cmd = exec.Command("cmd.exe") |
||||
cmd.SysProcAttr = &syscall.SysProcAttr{ |
||||
CmdLine: args, |
||||
} |
||||
} else { |
||||
cmdParts, err := shellquote.Split(e.cmdstr) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
cmd = exec.Command(cmdParts[0], cmdParts[1:]...) |
||||
} |
||||
|
||||
cmd.Env = env |
||||
cmd.Stdout = os.Stdout |
||||
cmd.Stderr = os.Stderr |
||||
|
||||
// create a process group to kill all subprocesses
|
||||
g, err := createProcessGroup() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
err = cmd.Start() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
err = addProcessToGroup(g, cmd.Process) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
cmdDone := make(chan int) |
||||
go func() { |
||||
cmdDone <- func() int { |
||||
err := cmd.Wait() |
||||
if err == nil { |
||||
return 0 |
||||
} |
||||
ee, ok := err.(*exec.ExitError) |
||||
if !ok { |
||||
return 0 |
||||
} |
||||
return ee.ExitCode() |
||||
}() |
||||
}() |
||||
|
||||
select { |
||||
case <-e.terminate: |
||||
closeProcessGroup(g) |
||||
<-cmdDone |
||||
return errTerminated |
||||
|
||||
case c := <-cmdDone: |
||||
closeProcessGroup(g) |
||||
if c != 0 { |
||||
return fmt.Errorf("command exited with code %d", c) |
||||
} |
||||
return nil |
||||
} |
||||
} |
||||
@ -1,20 +0,0 @@
@@ -1,20 +0,0 @@
|
||||
package externalcmd |
||||
|
||||
import ( |
||||
"sync" |
||||
) |
||||
|
||||
// Pool is a pool of external commands.
|
||||
type Pool struct { |
||||
wg sync.WaitGroup |
||||
} |
||||
|
||||
// NewPool allocates a Pool.
|
||||
func NewPool() *Pool { |
||||
return &Pool{} |
||||
} |
||||
|
||||
// Close waits for all external commands to exit.
|
||||
func (p *Pool) Close() { |
||||
p.wg.Wait() |
||||
} |
||||
@ -1,123 +0,0 @@
@@ -1,123 +0,0 @@
|
||||
package formatprocessor |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/format/rtpac3" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/rtptime" |
||||
"github.com/pion/rtp" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
) |
||||
|
||||
type formatProcessorAC3 struct { |
||||
udpMaxPayloadSize int |
||||
format *format.AC3 |
||||
timeEncoder *rtptime.Encoder |
||||
encoder *rtpac3.Encoder |
||||
decoder *rtpac3.Decoder |
||||
} |
||||
|
||||
func newAC3( |
||||
udpMaxPayloadSize int, |
||||
forma *format.AC3, |
||||
generateRTPPackets bool, |
||||
) (*formatProcessorAC3, error) { |
||||
t := &formatProcessorAC3{ |
||||
udpMaxPayloadSize: udpMaxPayloadSize, |
||||
format: forma, |
||||
} |
||||
|
||||
if generateRTPPackets { |
||||
err := t.createEncoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
t.timeEncoder = &rtptime.Encoder{ |
||||
ClockRate: forma.ClockRate(), |
||||
} |
||||
err = t.timeEncoder.Initialize() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
return t, nil |
||||
} |
||||
|
||||
func (t *formatProcessorAC3) createEncoder() error { |
||||
t.encoder = &rtpac3.Encoder{ |
||||
PayloadType: t.format.PayloadTyp, |
||||
} |
||||
return t.encoder.Init() |
||||
} |
||||
|
||||
func (t *formatProcessorAC3) ProcessUnit(uu unit.Unit) error { //nolint:dupl
|
||||
u := uu.(*unit.AC3) |
||||
|
||||
pkts, err := t.encoder.Encode(u.Frames) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
u.RTPPackets = pkts |
||||
|
||||
ts := t.timeEncoder.Encode(u.PTS) |
||||
for _, pkt := range u.RTPPackets { |
||||
pkt.Timestamp += ts |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (t *formatProcessorAC3) ProcessRTPPacket( //nolint:dupl
|
||||
pkt *rtp.Packet, |
||||
ntp time.Time, |
||||
pts time.Duration, |
||||
hasNonRTSPReaders bool, |
||||
) (Unit, error) { |
||||
u := &unit.AC3{ |
||||
Base: unit.Base{ |
||||
RTPPackets: []*rtp.Packet{pkt}, |
||||
NTP: ntp, |
||||
PTS: pts, |
||||
}, |
||||
} |
||||
|
||||
// remove padding
|
||||
pkt.Header.Padding = false |
||||
pkt.PaddingSize = 0 |
||||
|
||||
if pkt.MarshalSize() > t.udpMaxPayloadSize { |
||||
return nil, fmt.Errorf("payload size (%d) is greater than maximum allowed (%d)", |
||||
pkt.MarshalSize(), t.udpMaxPayloadSize) |
||||
} |
||||
|
||||
// decode from RTP
|
||||
if hasNonRTSPReaders || t.decoder != nil { |
||||
if t.decoder == nil { |
||||
var err error |
||||
t.decoder, err = t.format.CreateDecoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
frames, err := t.decoder.Decode(pkt) |
||||
if err != nil { |
||||
if errors.Is(err, rtpac3.ErrNonStartingPacketAndNoPrevious) || |
||||
errors.Is(err, rtpac3.ErrMorePacketsNeeded) { |
||||
return u, nil |
||||
} |
||||
return nil, err |
||||
} |
||||
|
||||
u.Frames = frames |
||||
} |
||||
|
||||
// route packet as is
|
||||
return u, nil |
||||
} |
||||
@ -1,124 +0,0 @@
@@ -1,124 +0,0 @@
|
||||
package formatprocessor //nolint:dupl
|
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/format/rtpav1" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/rtptime" |
||||
"github.com/pion/rtp" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
) |
||||
|
||||
type formatProcessorAV1 struct { |
||||
udpMaxPayloadSize int |
||||
format *format.AV1 |
||||
timeEncoder *rtptime.Encoder |
||||
encoder *rtpav1.Encoder |
||||
decoder *rtpav1.Decoder |
||||
} |
||||
|
||||
func newAV1( |
||||
udpMaxPayloadSize int, |
||||
forma *format.AV1, |
||||
generateRTPPackets bool, |
||||
) (*formatProcessorAV1, error) { |
||||
t := &formatProcessorAV1{ |
||||
udpMaxPayloadSize: udpMaxPayloadSize, |
||||
format: forma, |
||||
} |
||||
|
||||
if generateRTPPackets { |
||||
err := t.createEncoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
t.timeEncoder = &rtptime.Encoder{ |
||||
ClockRate: forma.ClockRate(), |
||||
} |
||||
err = t.timeEncoder.Initialize() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
return t, nil |
||||
} |
||||
|
||||
func (t *formatProcessorAV1) createEncoder() error { |
||||
t.encoder = &rtpav1.Encoder{ |
||||
PayloadMaxSize: t.udpMaxPayloadSize - 12, |
||||
PayloadType: t.format.PayloadTyp, |
||||
} |
||||
return t.encoder.Init() |
||||
} |
||||
|
||||
func (t *formatProcessorAV1) ProcessUnit(uu unit.Unit) error { //nolint:dupl
|
||||
u := uu.(*unit.AV1) |
||||
|
||||
pkts, err := t.encoder.Encode(u.TU) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
u.RTPPackets = pkts |
||||
|
||||
ts := t.timeEncoder.Encode(u.PTS) |
||||
for _, pkt := range u.RTPPackets { |
||||
pkt.Timestamp += ts |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (t *formatProcessorAV1) ProcessRTPPacket( //nolint:dupl
|
||||
pkt *rtp.Packet, |
||||
ntp time.Time, |
||||
pts time.Duration, |
||||
hasNonRTSPReaders bool, |
||||
) (Unit, error) { |
||||
u := &unit.AV1{ |
||||
Base: unit.Base{ |
||||
RTPPackets: []*rtp.Packet{pkt}, |
||||
NTP: ntp, |
||||
PTS: pts, |
||||
}, |
||||
} |
||||
|
||||
// remove padding
|
||||
pkt.Header.Padding = false |
||||
pkt.PaddingSize = 0 |
||||
|
||||
if pkt.MarshalSize() > t.udpMaxPayloadSize { |
||||
return nil, fmt.Errorf("payload size (%d) is greater than maximum allowed (%d)", |
||||
pkt.MarshalSize(), t.udpMaxPayloadSize) |
||||
} |
||||
|
||||
// decode from RTP
|
||||
if hasNonRTSPReaders || t.decoder != nil { |
||||
if t.decoder == nil { |
||||
var err error |
||||
t.decoder, err = t.format.CreateDecoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
tu, err := t.decoder.Decode(pkt) |
||||
if err != nil { |
||||
if errors.Is(err, rtpav1.ErrNonStartingPacketAndNoPrevious) || |
||||
errors.Is(err, rtpav1.ErrMorePacketsNeeded) { |
||||
return u, nil |
||||
} |
||||
return nil, err |
||||
} |
||||
|
||||
u.TU = tu |
||||
} |
||||
|
||||
// route packet as is
|
||||
return u, nil |
||||
} |
||||
@ -1,121 +0,0 @@
@@ -1,121 +0,0 @@
|
||||
package formatprocessor //nolint:dupl
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/format/rtplpcm" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/rtptime" |
||||
"github.com/pion/rtp" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
) |
||||
|
||||
type formatProcessorG711 struct { |
||||
udpMaxPayloadSize int |
||||
format *format.G711 |
||||
timeEncoder *rtptime.Encoder |
||||
encoder *rtplpcm.Encoder |
||||
decoder *rtplpcm.Decoder |
||||
} |
||||
|
||||
func newG711( |
||||
udpMaxPayloadSize int, |
||||
forma *format.G711, |
||||
generateRTPPackets bool, |
||||
) (*formatProcessorG711, error) { |
||||
t := &formatProcessorG711{ |
||||
udpMaxPayloadSize: udpMaxPayloadSize, |
||||
format: forma, |
||||
} |
||||
|
||||
if generateRTPPackets { |
||||
err := t.createEncoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
t.timeEncoder = &rtptime.Encoder{ |
||||
ClockRate: forma.ClockRate(), |
||||
} |
||||
err = t.timeEncoder.Initialize() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
return t, nil |
||||
} |
||||
|
||||
func (t *formatProcessorG711) createEncoder() error { |
||||
t.encoder = &rtplpcm.Encoder{ |
||||
PayloadMaxSize: t.udpMaxPayloadSize - 12, |
||||
PayloadType: t.format.PayloadType(), |
||||
BitDepth: 8, |
||||
ChannelCount: t.format.ChannelCount, |
||||
} |
||||
return t.encoder.Init() |
||||
} |
||||
|
||||
func (t *formatProcessorG711) ProcessUnit(uu unit.Unit) error { //nolint:dupl
|
||||
u := uu.(*unit.G711) |
||||
|
||||
pkts, err := t.encoder.Encode(u.Samples) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
u.RTPPackets = pkts |
||||
|
||||
ts := t.timeEncoder.Encode(u.PTS) |
||||
for _, pkt := range u.RTPPackets { |
||||
pkt.Timestamp += ts |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (t *formatProcessorG711) ProcessRTPPacket( //nolint:dupl
|
||||
pkt *rtp.Packet, |
||||
ntp time.Time, |
||||
pts time.Duration, |
||||
hasNonRTSPReaders bool, |
||||
) (Unit, error) { |
||||
u := &unit.G711{ |
||||
Base: unit.Base{ |
||||
RTPPackets: []*rtp.Packet{pkt}, |
||||
NTP: ntp, |
||||
PTS: pts, |
||||
}, |
||||
} |
||||
|
||||
// remove padding
|
||||
pkt.Header.Padding = false |
||||
pkt.PaddingSize = 0 |
||||
|
||||
if pkt.MarshalSize() > t.udpMaxPayloadSize { |
||||
return nil, fmt.Errorf("payload size (%d) is greater than maximum allowed (%d)", |
||||
pkt.MarshalSize(), t.udpMaxPayloadSize) |
||||
} |
||||
|
||||
// decode from RTP
|
||||
if hasNonRTSPReaders || t.decoder != nil { |
||||
if t.decoder == nil { |
||||
var err error |
||||
t.decoder, err = t.format.CreateDecoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
samples, err := t.decoder.Decode(pkt) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
u.Samples = samples |
||||
} |
||||
|
||||
// route packet as is
|
||||
return u, nil |
||||
} |
||||
@ -1,70 +0,0 @@
@@ -1,70 +0,0 @@
|
||||
package formatprocessor |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
"github.com/pion/rtp" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestG611Encode(t *testing.T) { |
||||
t.Run("alaw", func(t *testing.T) { |
||||
forma := &format.G711{ |
||||
PayloadTyp: 8, |
||||
MULaw: false, |
||||
SampleRate: 8000, |
||||
ChannelCount: 1, |
||||
} |
||||
|
||||
p, err := New(1472, forma, true) |
||||
require.NoError(t, err) |
||||
|
||||
unit := &unit.G711{ |
||||
Samples: []byte{1, 2, 3, 4}, |
||||
} |
||||
|
||||
err = p.ProcessUnit(unit) |
||||
require.NoError(t, err) |
||||
require.Equal(t, []*rtp.Packet{{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
PayloadType: 8, |
||||
SequenceNumber: unit.RTPPackets[0].SequenceNumber, |
||||
Timestamp: unit.RTPPackets[0].Timestamp, |
||||
SSRC: unit.RTPPackets[0].SSRC, |
||||
}, |
||||
Payload: []byte{1, 2, 3, 4}, |
||||
}}, unit.RTPPackets) |
||||
}) |
||||
|
||||
t.Run("mulaw", func(t *testing.T) { |
||||
forma := &format.G711{ |
||||
PayloadTyp: 0, |
||||
MULaw: true, |
||||
SampleRate: 8000, |
||||
ChannelCount: 1, |
||||
} |
||||
|
||||
p, err := New(1472, forma, true) |
||||
require.NoError(t, err) |
||||
|
||||
unit := &unit.G711{ |
||||
Samples: []byte{1, 2, 3, 4}, |
||||
} |
||||
|
||||
err = p.ProcessUnit(unit) |
||||
require.NoError(t, err) |
||||
require.Equal(t, []*rtp.Packet{{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
PayloadType: 0, |
||||
SequenceNumber: unit.RTPPackets[0].SequenceNumber, |
||||
Timestamp: unit.RTPPackets[0].Timestamp, |
||||
SSRC: unit.RTPPackets[0].SSRC, |
||||
}, |
||||
Payload: []byte{1, 2, 3, 4}, |
||||
}}, unit.RTPPackets) |
||||
}) |
||||
} |
||||
@ -1,59 +0,0 @@
@@ -1,59 +0,0 @@
|
||||
package formatprocessor |
||||
|
||||
import ( |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/pion/rtp" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
) |
||||
|
||||
type formatProcessorGeneric struct { |
||||
udpMaxPayloadSize int |
||||
} |
||||
|
||||
func newGeneric( |
||||
udpMaxPayloadSize int, |
||||
forma format.Format, |
||||
generateRTPPackets bool, |
||||
) (*formatProcessorGeneric, error) { |
||||
if generateRTPPackets { |
||||
return nil, fmt.Errorf("we don't know how to generate RTP packets of format %+v", forma) |
||||
} |
||||
|
||||
return &formatProcessorGeneric{ |
||||
udpMaxPayloadSize: udpMaxPayloadSize, |
||||
}, nil |
||||
} |
||||
|
||||
func (t *formatProcessorGeneric) ProcessUnit(_ unit.Unit) error { |
||||
return fmt.Errorf("using a generic unit without RTP is not supported") |
||||
} |
||||
|
||||
func (t *formatProcessorGeneric) ProcessRTPPacket( |
||||
pkt *rtp.Packet, |
||||
ntp time.Time, |
||||
pts time.Duration, |
||||
_ bool, |
||||
) (Unit, error) { |
||||
u := &unit.Generic{ |
||||
Base: unit.Base{ |
||||
RTPPackets: []*rtp.Packet{pkt}, |
||||
NTP: ntp, |
||||
PTS: pts, |
||||
}, |
||||
} |
||||
|
||||
// remove padding
|
||||
pkt.Header.Padding = false |
||||
pkt.PaddingSize = 0 |
||||
|
||||
if pkt.MarshalSize() > t.udpMaxPayloadSize { |
||||
return nil, fmt.Errorf("payload size (%d) is greater than maximum allowed (%d)", |
||||
pkt.MarshalSize(), t.udpMaxPayloadSize) |
||||
} |
||||
|
||||
return u, nil |
||||
} |
||||
@ -1,51 +0,0 @@
@@ -1,51 +0,0 @@
|
||||
package formatprocessor |
||||
|
||||
import ( |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/pion/rtp" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestGenericRemovePadding(t *testing.T) { |
||||
forma := &format.Generic{ |
||||
PayloadTyp: 96, |
||||
RTPMa: "private/90000", |
||||
} |
||||
err := forma.Init() |
||||
require.NoError(t, err) |
||||
|
||||
p, err := New(1472, forma, false) |
||||
require.NoError(t, err) |
||||
|
||||
pkt := &rtp.Packet{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 123, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
Padding: true, |
||||
}, |
||||
Payload: []byte{1, 2, 3, 4}, |
||||
PaddingSize: 20, |
||||
} |
||||
|
||||
_, err = p.ProcessRTPPacket(pkt, time.Time{}, 0, false) |
||||
require.NoError(t, err) |
||||
|
||||
require.Equal(t, &rtp.Packet{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 123, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: []byte{1, 2, 3, 4}, |
||||
}, pkt) |
||||
} |
||||
@ -1,325 +0,0 @@
@@ -1,325 +0,0 @@
|
||||
package formatprocessor |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/format/rtph264" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/rtptime" |
||||
"github.com/bluenviron/mediacommon/pkg/codecs/h264" |
||||
"github.com/pion/rtp" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
) |
||||
|
||||
// extract SPS and PPS without decoding RTP packets
|
||||
func rtpH264ExtractParams(payload []byte) ([]byte, []byte) { |
||||
if len(payload) < 1 { |
||||
return nil, nil |
||||
} |
||||
|
||||
typ := h264.NALUType(payload[0] & 0x1F) |
||||
|
||||
switch typ { |
||||
case h264.NALUTypeSPS: |
||||
return payload, nil |
||||
|
||||
case h264.NALUTypePPS: |
||||
return nil, payload |
||||
|
||||
case h264.NALUTypeSTAPA: |
||||
payload := payload[1:] |
||||
var sps []byte |
||||
var pps []byte |
||||
|
||||
for len(payload) > 0 { |
||||
if len(payload) < 2 { |
||||
break |
||||
} |
||||
|
||||
size := uint16(payload[0])<<8 | uint16(payload[1]) |
||||
payload = payload[2:] |
||||
|
||||
if size == 0 { |
||||
break |
||||
} |
||||
|
||||
if int(size) > len(payload) { |
||||
return nil, nil |
||||
} |
||||
|
||||
nalu := payload[:size] |
||||
payload = payload[size:] |
||||
|
||||
typ = h264.NALUType(nalu[0] & 0x1F) |
||||
|
||||
switch typ { |
||||
case h264.NALUTypeSPS: |
||||
sps = nalu |
||||
|
||||
case h264.NALUTypePPS: |
||||
pps = nalu |
||||
} |
||||
} |
||||
|
||||
return sps, pps |
||||
|
||||
default: |
||||
return nil, nil |
||||
} |
||||
} |
||||
|
||||
type formatProcessorH264 struct { |
||||
udpMaxPayloadSize int |
||||
format *format.H264 |
||||
timeEncoder *rtptime.Encoder |
||||
encoder *rtph264.Encoder |
||||
decoder *rtph264.Decoder |
||||
} |
||||
|
||||
func newH264( |
||||
udpMaxPayloadSize int, |
||||
forma *format.H264, |
||||
generateRTPPackets bool, |
||||
) (*formatProcessorH264, error) { |
||||
t := &formatProcessorH264{ |
||||
udpMaxPayloadSize: udpMaxPayloadSize, |
||||
format: forma, |
||||
} |
||||
|
||||
if generateRTPPackets { |
||||
err := t.createEncoder(nil, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
t.timeEncoder = &rtptime.Encoder{ |
||||
ClockRate: forma.ClockRate(), |
||||
} |
||||
err = t.timeEncoder.Initialize() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
return t, nil |
||||
} |
||||
|
||||
func (t *formatProcessorH264) createEncoder( |
||||
ssrc *uint32, |
||||
initialSequenceNumber *uint16, |
||||
) error { |
||||
t.encoder = &rtph264.Encoder{ |
||||
PayloadMaxSize: t.udpMaxPayloadSize - 12, |
||||
PayloadType: t.format.PayloadTyp, |
||||
SSRC: ssrc, |
||||
InitialSequenceNumber: initialSequenceNumber, |
||||
PacketizationMode: t.format.PacketizationMode, |
||||
} |
||||
return t.encoder.Init() |
||||
} |
||||
|
||||
func (t *formatProcessorH264) updateTrackParametersFromRTPPacket(payload []byte) { |
||||
sps, pps := rtpH264ExtractParams(payload) |
||||
|
||||
if (sps != nil && !bytes.Equal(sps, t.format.SPS)) || |
||||
(pps != nil && !bytes.Equal(pps, t.format.PPS)) { |
||||
if sps == nil { |
||||
sps = t.format.SPS |
||||
} |
||||
if pps == nil { |
||||
pps = t.format.PPS |
||||
} |
||||
t.format.SafeSetParams(sps, pps) |
||||
} |
||||
} |
||||
|
||||
func (t *formatProcessorH264) updateTrackParametersFromAU(au [][]byte) { |
||||
sps := t.format.SPS |
||||
pps := t.format.PPS |
||||
update := false |
||||
|
||||
for _, nalu := range au { |
||||
typ := h264.NALUType(nalu[0] & 0x1F) |
||||
|
||||
switch typ { |
||||
case h264.NALUTypeSPS: |
||||
if !bytes.Equal(nalu, sps) { |
||||
sps = nalu |
||||
update = true |
||||
} |
||||
|
||||
case h264.NALUTypePPS: |
||||
if !bytes.Equal(nalu, pps) { |
||||
pps = nalu |
||||
update = true |
||||
} |
||||
} |
||||
} |
||||
|
||||
if update { |
||||
t.format.SafeSetParams(sps, pps) |
||||
} |
||||
} |
||||
|
||||
func (t *formatProcessorH264) remuxAccessUnit(au [][]byte) [][]byte { |
||||
isKeyFrame := false |
||||
n := 0 |
||||
|
||||
for _, nalu := range au { |
||||
typ := h264.NALUType(nalu[0] & 0x1F) |
||||
|
||||
switch typ { |
||||
case h264.NALUTypeSPS, h264.NALUTypePPS: // parameters: remove
|
||||
continue |
||||
|
||||
case h264.NALUTypeAccessUnitDelimiter: // AUD: remove
|
||||
continue |
||||
|
||||
case h264.NALUTypeIDR: // key frame
|
||||
if !isKeyFrame { |
||||
isKeyFrame = true |
||||
|
||||
// prepend parameters
|
||||
if t.format.SPS != nil && t.format.PPS != nil { |
||||
n += 2 |
||||
} |
||||
} |
||||
} |
||||
n++ |
||||
} |
||||
|
||||
if n == 0 { |
||||
return nil |
||||
} |
||||
|
||||
filteredNALUs := make([][]byte, n) |
||||
i := 0 |
||||
|
||||
if isKeyFrame && t.format.SPS != nil && t.format.PPS != nil { |
||||
filteredNALUs[0] = t.format.SPS |
||||
filteredNALUs[1] = t.format.PPS |
||||
i = 2 |
||||
} |
||||
|
||||
for _, nalu := range au { |
||||
typ := h264.NALUType(nalu[0] & 0x1F) |
||||
|
||||
switch typ { |
||||
case h264.NALUTypeSPS, h264.NALUTypePPS: |
||||
continue |
||||
|
||||
case h264.NALUTypeAccessUnitDelimiter: |
||||
continue |
||||
} |
||||
|
||||
filteredNALUs[i] = nalu |
||||
i++ |
||||
} |
||||
|
||||
return filteredNALUs |
||||
} |
||||
|
||||
func (t *formatProcessorH264) ProcessUnit(uu unit.Unit) error { |
||||
u := uu.(*unit.H264) |
||||
|
||||
t.updateTrackParametersFromAU(u.AU) |
||||
u.AU = t.remuxAccessUnit(u.AU) |
||||
|
||||
if u.AU != nil { |
||||
pkts, err := t.encoder.Encode(u.AU) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
u.RTPPackets = pkts |
||||
|
||||
ts := t.timeEncoder.Encode(u.PTS) |
||||
for _, pkt := range u.RTPPackets { |
||||
pkt.Timestamp += ts |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (t *formatProcessorH264) ProcessRTPPacket( //nolint:dupl
|
||||
pkt *rtp.Packet, |
||||
ntp time.Time, |
||||
pts time.Duration, |
||||
hasNonRTSPReaders bool, |
||||
) (Unit, error) { |
||||
u := &unit.H264{ |
||||
Base: unit.Base{ |
||||
RTPPackets: []*rtp.Packet{pkt}, |
||||
NTP: ntp, |
||||
PTS: pts, |
||||
}, |
||||
} |
||||
|
||||
t.updateTrackParametersFromRTPPacket(pkt.Payload) |
||||
|
||||
if t.encoder == nil { |
||||
// remove padding
|
||||
pkt.Header.Padding = false |
||||
pkt.PaddingSize = 0 |
||||
|
||||
// RTP packets exceed maximum size: start re-encoding them
|
||||
if pkt.MarshalSize() > t.udpMaxPayloadSize { |
||||
v1 := pkt.SSRC |
||||
v2 := pkt.SequenceNumber |
||||
err := t.createEncoder(&v1, &v2) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
} |
||||
|
||||
// decode from RTP
|
||||
if hasNonRTSPReaders || t.decoder != nil || t.encoder != nil { |
||||
if t.decoder == nil { |
||||
var err error |
||||
t.decoder, err = t.format.CreateDecoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
au, err := t.decoder.Decode(pkt) |
||||
|
||||
if t.encoder != nil { |
||||
u.RTPPackets = nil |
||||
} |
||||
|
||||
if err != nil { |
||||
if errors.Is(err, rtph264.ErrNonStartingPacketAndNoPrevious) || |
||||
errors.Is(err, rtph264.ErrMorePacketsNeeded) { |
||||
return u, nil |
||||
} |
||||
return nil, err |
||||
} |
||||
|
||||
u.AU = t.remuxAccessUnit(au) |
||||
} |
||||
|
||||
// route packet as is
|
||||
if t.encoder == nil { |
||||
return u, nil |
||||
} |
||||
|
||||
// encode into RTP
|
||||
if len(u.AU) != 0 { |
||||
pkts, err := t.encoder.Encode(u.AU) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
u.RTPPackets = pkts |
||||
|
||||
for _, newPKT := range u.RTPPackets { |
||||
newPKT.Timestamp = pkt.Timestamp |
||||
} |
||||
} |
||||
|
||||
return u, nil |
||||
} |
||||
@ -1,208 +0,0 @@
@@ -1,208 +0,0 @@
|
||||
package formatprocessor |
||||
|
||||
import ( |
||||
"bytes" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/mediacommon/pkg/codecs/h264" |
||||
"github.com/pion/rtp" |
||||
"github.com/stretchr/testify/require" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
) |
||||
|
||||
func TestH264DynamicParams(t *testing.T) { |
||||
for _, ca := range []string{"standard", "aggregated"} { |
||||
t.Run(ca, func(t *testing.T) { |
||||
forma := &format.H264{ |
||||
PayloadTyp: 96, |
||||
PacketizationMode: 1, |
||||
} |
||||
|
||||
p, err := New(1472, forma, false) |
||||
require.NoError(t, err) |
||||
|
||||
enc, err := forma.CreateEncoder() |
||||
require.NoError(t, err) |
||||
|
||||
pkts, err := enc.Encode([][]byte{{byte(h264.NALUTypeIDR)}}) |
||||
require.NoError(t, err) |
||||
|
||||
data, err := p.ProcessRTPPacket(pkts[0], time.Time{}, 0, true) |
||||
require.NoError(t, err) |
||||
|
||||
require.Equal(t, [][]byte{ |
||||
{byte(h264.NALUTypeIDR)}, |
||||
}, data.(*unit.H264).AU) |
||||
|
||||
if ca == "standard" { |
||||
pkts, err = enc.Encode([][]byte{{7, 4, 5, 6}}) // SPS
|
||||
require.NoError(t, err) |
||||
|
||||
_, err = p.ProcessRTPPacket(pkts[0], time.Time{}, 0, false) |
||||
require.NoError(t, err) |
||||
|
||||
pkts, err = enc.Encode([][]byte{{8, 1}}) // PPS
|
||||
require.NoError(t, err) |
||||
|
||||
_, err = p.ProcessRTPPacket(pkts[0], time.Time{}, 0, false) |
||||
require.NoError(t, err) |
||||
} else { |
||||
pkts, err = enc.Encode([][]byte{ |
||||
{7, 4, 5, 6}, // SPS
|
||||
{8, 1}, // PPS
|
||||
}) |
||||
require.NoError(t, err) |
||||
|
||||
_, err = p.ProcessRTPPacket(pkts[0], time.Time{}, 0, false) |
||||
require.NoError(t, err) |
||||
} |
||||
|
||||
require.Equal(t, []byte{7, 4, 5, 6}, forma.SPS) |
||||
require.Equal(t, []byte{8, 1}, forma.PPS) |
||||
|
||||
pkts, err = enc.Encode([][]byte{{byte(h264.NALUTypeIDR)}}) |
||||
require.NoError(t, err) |
||||
|
||||
data, err = p.ProcessRTPPacket(pkts[0], time.Time{}, 0, true) |
||||
require.NoError(t, err) |
||||
|
||||
require.Equal(t, [][]byte{ |
||||
{0x07, 4, 5, 6}, |
||||
{0x08, 1}, |
||||
{byte(h264.NALUTypeIDR)}, |
||||
}, data.(*unit.H264).AU) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestH264OversizedPackets(t *testing.T) { |
||||
forma := &format.H264{ |
||||
PayloadTyp: 96, |
||||
SPS: []byte{0x01, 0x02, 0x03, 0x04}, |
||||
PPS: []byte{0x01, 0x02, 0x03, 0x04}, |
||||
PacketizationMode: 1, |
||||
} |
||||
|
||||
p, err := New(1472, forma, false) |
||||
require.NoError(t, err) |
||||
|
||||
var out []*rtp.Packet |
||||
|
||||
for _, pkt := range []*rtp.Packet{ |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 123, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
Padding: true, |
||||
}, |
||||
Payload: []byte{0x01, 0x02, 0x03, 0x04}, |
||||
}, |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: false, |
||||
PayloadType: 96, |
||||
SequenceNumber: 124, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
Padding: true, |
||||
}, |
||||
Payload: append([]byte{0x1c, 0b10000000}, bytes.Repeat([]byte{0x01, 0x02, 0x03, 0x04}, 2000/4)...), |
||||
}, |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 125, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
Padding: true, |
||||
}, |
||||
Payload: []byte{0x1c, 0b01000000, 0x01, 0x02, 0x03, 0x04}, |
||||
}, |
||||
} { |
||||
data, err := p.ProcessRTPPacket(pkt, time.Time{}, 0, false) |
||||
require.NoError(t, err) |
||||
|
||||
out = append(out, data.GetRTPPackets()...) |
||||
} |
||||
|
||||
require.Equal(t, []*rtp.Packet{ |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 123, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: []byte{0x01, 0x02, 0x03, 0x04}, |
||||
}, |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: false, |
||||
PayloadType: 96, |
||||
SequenceNumber: 124, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: append( |
||||
append([]byte{0x1c, 0x80}, bytes.Repeat([]byte{0x01, 0x02, 0x03, 0x04}, 364)...), |
||||
[]byte{0x01, 0x02}..., |
||||
), |
||||
}, |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 125, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: append( |
||||
[]byte{0x1c, 0x40, 0x03, 0x04}, |
||||
bytes.Repeat([]byte{0x01, 0x02, 0x03, 0x04}, 136)..., |
||||
), |
||||
}, |
||||
}, out) |
||||
} |
||||
|
||||
func TestH264EmptyPacket(t *testing.T) { |
||||
forma := &format.H264{ |
||||
PayloadTyp: 96, |
||||
PacketizationMode: 1, |
||||
} |
||||
|
||||
p, err := New(1472, forma, true) |
||||
require.NoError(t, err) |
||||
|
||||
unit := &unit.H264{ |
||||
AU: [][]byte{ |
||||
{0x07, 0x01, 0x02, 0x03}, // SPS
|
||||
{0x08, 0x01, 0x02}, // PPS
|
||||
}, |
||||
} |
||||
|
||||
err = p.ProcessUnit(unit) |
||||
require.NoError(t, err) |
||||
|
||||
// if all NALUs have been removed, no RTP packets must be generated.
|
||||
require.Equal(t, []*rtp.Packet(nil), unit.RTPPackets) |
||||
} |
||||
|
||||
func FuzzRTPH264ExtractParams(f *testing.F) { |
||||
f.Fuzz(func(_ *testing.T, b []byte) { |
||||
rtpH264ExtractParams(b) |
||||
}) |
||||
} |
||||
@ -1,344 +0,0 @@
@@ -1,344 +0,0 @@
|
||||
package formatprocessor |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/format/rtph265" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/rtptime" |
||||
"github.com/bluenviron/mediacommon/pkg/codecs/h265" |
||||
"github.com/pion/rtp" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
) |
||||
|
||||
// extract VPS, SPS and PPS without decoding RTP packets
|
||||
func rtpH265ExtractParams(payload []byte) ([]byte, []byte, []byte) { |
||||
if len(payload) < 2 { |
||||
return nil, nil, nil |
||||
} |
||||
|
||||
typ := h265.NALUType((payload[0] >> 1) & 0b111111) |
||||
|
||||
switch typ { |
||||
case h265.NALUType_VPS_NUT: |
||||
return payload, nil, nil |
||||
|
||||
case h265.NALUType_SPS_NUT: |
||||
return nil, payload, nil |
||||
|
||||
case h265.NALUType_PPS_NUT: |
||||
return nil, nil, payload |
||||
|
||||
case h265.NALUType_AggregationUnit: |
||||
payload := payload[2:] |
||||
var vps []byte |
||||
var sps []byte |
||||
var pps []byte |
||||
|
||||
for len(payload) > 0 { |
||||
if len(payload) < 2 { |
||||
break |
||||
} |
||||
|
||||
size := uint16(payload[0])<<8 | uint16(payload[1]) |
||||
payload = payload[2:] |
||||
|
||||
if size == 0 { |
||||
break |
||||
} |
||||
|
||||
if int(size) > len(payload) { |
||||
return nil, nil, nil |
||||
} |
||||
|
||||
nalu := payload[:size] |
||||
payload = payload[size:] |
||||
|
||||
typ = h265.NALUType((nalu[0] >> 1) & 0b111111) |
||||
|
||||
switch typ { |
||||
case h265.NALUType_VPS_NUT: |
||||
vps = nalu |
||||
|
||||
case h265.NALUType_SPS_NUT: |
||||
sps = nalu |
||||
|
||||
case h265.NALUType_PPS_NUT: |
||||
pps = nalu |
||||
} |
||||
} |
||||
|
||||
return vps, sps, pps |
||||
|
||||
default: |
||||
return nil, nil, nil |
||||
} |
||||
} |
||||
|
||||
type formatProcessorH265 struct { |
||||
udpMaxPayloadSize int |
||||
format *format.H265 |
||||
timeEncoder *rtptime.Encoder |
||||
encoder *rtph265.Encoder |
||||
decoder *rtph265.Decoder |
||||
} |
||||
|
||||
func newH265( |
||||
udpMaxPayloadSize int, |
||||
forma *format.H265, |
||||
generateRTPPackets bool, |
||||
) (*formatProcessorH265, error) { |
||||
t := &formatProcessorH265{ |
||||
udpMaxPayloadSize: udpMaxPayloadSize, |
||||
format: forma, |
||||
} |
||||
|
||||
if generateRTPPackets { |
||||
err := t.createEncoder(nil, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
t.timeEncoder = &rtptime.Encoder{ |
||||
ClockRate: forma.ClockRate(), |
||||
} |
||||
err = t.timeEncoder.Initialize() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
return t, nil |
||||
} |
||||
|
||||
func (t *formatProcessorH265) createEncoder( |
||||
ssrc *uint32, |
||||
initialSequenceNumber *uint16, |
||||
) error { |
||||
t.encoder = &rtph265.Encoder{ |
||||
PayloadMaxSize: t.udpMaxPayloadSize - 12, |
||||
PayloadType: t.format.PayloadTyp, |
||||
SSRC: ssrc, |
||||
InitialSequenceNumber: initialSequenceNumber, |
||||
MaxDONDiff: t.format.MaxDONDiff, |
||||
} |
||||
return t.encoder.Init() |
||||
} |
||||
|
||||
func (t *formatProcessorH265) updateTrackParametersFromRTPPacket(payload []byte) { |
||||
vps, sps, pps := rtpH265ExtractParams(payload) |
||||
|
||||
if (vps != nil && !bytes.Equal(vps, t.format.VPS)) || |
||||
(sps != nil && !bytes.Equal(sps, t.format.SPS)) || |
||||
(pps != nil && !bytes.Equal(pps, t.format.PPS)) { |
||||
if vps == nil { |
||||
vps = t.format.VPS |
||||
} |
||||
if sps == nil { |
||||
sps = t.format.SPS |
||||
} |
||||
if pps == nil { |
||||
pps = t.format.PPS |
||||
} |
||||
t.format.SafeSetParams(vps, sps, pps) |
||||
} |
||||
} |
||||
|
||||
func (t *formatProcessorH265) updateTrackParametersFromAU(au [][]byte) { |
||||
vps := t.format.VPS |
||||
sps := t.format.SPS |
||||
pps := t.format.PPS |
||||
update := false |
||||
|
||||
for _, nalu := range au { |
||||
typ := h265.NALUType((nalu[0] >> 1) & 0b111111) |
||||
|
||||
switch typ { |
||||
case h265.NALUType_VPS_NUT: |
||||
if !bytes.Equal(nalu, t.format.VPS) { |
||||
vps = nalu |
||||
update = true |
||||
} |
||||
|
||||
case h265.NALUType_SPS_NUT: |
||||
if !bytes.Equal(nalu, t.format.SPS) { |
||||
sps = nalu |
||||
update = true |
||||
} |
||||
|
||||
case h265.NALUType_PPS_NUT: |
||||
if !bytes.Equal(nalu, t.format.PPS) { |
||||
pps = nalu |
||||
update = true |
||||
} |
||||
} |
||||
} |
||||
|
||||
if update { |
||||
t.format.SafeSetParams(vps, sps, pps) |
||||
} |
||||
} |
||||
|
||||
func (t *formatProcessorH265) remuxAccessUnit(au [][]byte) [][]byte { |
||||
isKeyFrame := false |
||||
n := 0 |
||||
|
||||
for _, nalu := range au { |
||||
typ := h265.NALUType((nalu[0] >> 1) & 0b111111) |
||||
|
||||
switch typ { |
||||
case h265.NALUType_VPS_NUT, h265.NALUType_SPS_NUT, h265.NALUType_PPS_NUT: // parameters: remove
|
||||
continue |
||||
|
||||
case h265.NALUType_AUD_NUT: // AUD: remove
|
||||
continue |
||||
|
||||
case h265.NALUType_IDR_W_RADL, h265.NALUType_IDR_N_LP, h265.NALUType_CRA_NUT: // key frame
|
||||
if !isKeyFrame { |
||||
isKeyFrame = true |
||||
|
||||
// prepend parameters
|
||||
if t.format.VPS != nil && t.format.SPS != nil && t.format.PPS != nil { |
||||
n += 3 |
||||
} |
||||
} |
||||
} |
||||
n++ |
||||
} |
||||
|
||||
if n == 0 { |
||||
return nil |
||||
} |
||||
|
||||
filteredNALUs := make([][]byte, n) |
||||
i := 0 |
||||
|
||||
if isKeyFrame && t.format.VPS != nil && t.format.SPS != nil && t.format.PPS != nil { |
||||
filteredNALUs[0] = t.format.VPS |
||||
filteredNALUs[1] = t.format.SPS |
||||
filteredNALUs[2] = t.format.PPS |
||||
i = 3 |
||||
} |
||||
|
||||
for _, nalu := range au { |
||||
typ := h265.NALUType((nalu[0] >> 1) & 0b111111) |
||||
|
||||
switch typ { |
||||
case h265.NALUType_VPS_NUT, h265.NALUType_SPS_NUT, h265.NALUType_PPS_NUT: |
||||
continue |
||||
|
||||
case h265.NALUType_AUD_NUT: |
||||
continue |
||||
} |
||||
|
||||
filteredNALUs[i] = nalu |
||||
i++ |
||||
} |
||||
|
||||
return filteredNALUs |
||||
} |
||||
|
||||
func (t *formatProcessorH265) ProcessUnit(uu unit.Unit) error { //nolint:dupl
|
||||
u := uu.(*unit.H265) |
||||
|
||||
t.updateTrackParametersFromAU(u.AU) |
||||
u.AU = t.remuxAccessUnit(u.AU) |
||||
|
||||
if u.AU != nil { |
||||
pkts, err := t.encoder.Encode(u.AU) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
u.RTPPackets = pkts |
||||
|
||||
ts := t.timeEncoder.Encode(u.PTS) |
||||
for _, pkt := range u.RTPPackets { |
||||
pkt.Timestamp += ts |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (t *formatProcessorH265) ProcessRTPPacket( //nolint:dupl
|
||||
pkt *rtp.Packet, |
||||
ntp time.Time, |
||||
pts time.Duration, |
||||
hasNonRTSPReaders bool, |
||||
) (Unit, error) { |
||||
u := &unit.H265{ |
||||
Base: unit.Base{ |
||||
RTPPackets: []*rtp.Packet{pkt}, |
||||
NTP: ntp, |
||||
PTS: pts, |
||||
}, |
||||
} |
||||
|
||||
t.updateTrackParametersFromRTPPacket(pkt.Payload) |
||||
|
||||
if t.encoder == nil { |
||||
// remove padding
|
||||
pkt.Header.Padding = false |
||||
pkt.PaddingSize = 0 |
||||
|
||||
// RTP packets exceed maximum size: start re-encoding them
|
||||
if pkt.MarshalSize() > t.udpMaxPayloadSize { |
||||
v1 := pkt.SSRC |
||||
v2 := pkt.SequenceNumber |
||||
err := t.createEncoder(&v1, &v2) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
} |
||||
|
||||
// decode from RTP
|
||||
if hasNonRTSPReaders || t.decoder != nil || t.encoder != nil { |
||||
if t.decoder == nil { |
||||
var err error |
||||
t.decoder, err = t.format.CreateDecoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
au, err := t.decoder.Decode(pkt) |
||||
|
||||
if t.encoder != nil { |
||||
u.RTPPackets = nil |
||||
} |
||||
|
||||
if err != nil { |
||||
if errors.Is(err, rtph265.ErrNonStartingPacketAndNoPrevious) || |
||||
errors.Is(err, rtph265.ErrMorePacketsNeeded) { |
||||
return u, nil |
||||
} |
||||
return nil, err |
||||
} |
||||
|
||||
u.AU = t.remuxAccessUnit(au) |
||||
} |
||||
|
||||
// route packet as is
|
||||
if t.encoder == nil { |
||||
return u, nil |
||||
} |
||||
|
||||
// encode into RTP
|
||||
if len(u.AU) != 0 { |
||||
pkts, err := t.encoder.Encode(u.AU) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
u.RTPPackets = pkts |
||||
|
||||
for _, newPKT := range u.RTPPackets { |
||||
newPKT.Timestamp = pkt.Timestamp |
||||
} |
||||
} |
||||
|
||||
return u, nil |
||||
} |
||||
@ -1,204 +0,0 @@
@@ -1,204 +0,0 @@
|
||||
package formatprocessor |
||||
|
||||
import ( |
||||
"bytes" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/mediacommon/pkg/codecs/h265" |
||||
"github.com/pion/rtp" |
||||
"github.com/stretchr/testify/require" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
) |
||||
|
||||
func TestH265DynamicParams(t *testing.T) { |
||||
for _, ca := range []string{"standard", "aggregated"} { |
||||
t.Run(ca, func(t *testing.T) { |
||||
forma := &format.H265{ |
||||
PayloadTyp: 96, |
||||
} |
||||
|
||||
p, err := New(1472, forma, false) |
||||
require.NoError(t, err) |
||||
|
||||
enc, err := forma.CreateEncoder() |
||||
require.NoError(t, err) |
||||
|
||||
pkts, err := enc.Encode([][]byte{{byte(h265.NALUType_CRA_NUT) << 1, 0}}) |
||||
require.NoError(t, err) |
||||
|
||||
data, err := p.ProcessRTPPacket(pkts[0], time.Time{}, 0, true) |
||||
require.NoError(t, err) |
||||
|
||||
require.Equal(t, [][]byte{ |
||||
{byte(h265.NALUType_CRA_NUT) << 1, 0}, |
||||
}, data.(*unit.H265).AU) |
||||
|
||||
if ca == "standard" { |
||||
pkts, err = enc.Encode([][]byte{{byte(h265.NALUType_VPS_NUT) << 1, 1, 2, 3}}) |
||||
require.NoError(t, err) |
||||
|
||||
_, err = p.ProcessRTPPacket(pkts[0], time.Time{}, 0, false) |
||||
require.NoError(t, err) |
||||
|
||||
pkts, err = enc.Encode([][]byte{{byte(h265.NALUType_SPS_NUT) << 1, 4, 5, 6}}) |
||||
require.NoError(t, err) |
||||
|
||||
_, err = p.ProcessRTPPacket(pkts[0], time.Time{}, 0, false) |
||||
require.NoError(t, err) |
||||
|
||||
pkts, err = enc.Encode([][]byte{{byte(h265.NALUType_PPS_NUT) << 1, 7, 8, 9}}) |
||||
require.NoError(t, err) |
||||
|
||||
_, err = p.ProcessRTPPacket(pkts[0], time.Time{}, 0, false) |
||||
require.NoError(t, err) |
||||
} else { |
||||
pkts, err = enc.Encode([][]byte{ |
||||
{byte(h265.NALUType_VPS_NUT) << 1, 1, 2, 3}, |
||||
{byte(h265.NALUType_SPS_NUT) << 1, 4, 5, 6}, |
||||
{byte(h265.NALUType_PPS_NUT) << 1, 7, 8, 9}, |
||||
}) |
||||
require.NoError(t, err) |
||||
|
||||
_, err = p.ProcessRTPPacket(pkts[0], time.Time{}, 0, false) |
||||
require.NoError(t, err) |
||||
} |
||||
|
||||
require.Equal(t, []byte{byte(h265.NALUType_VPS_NUT) << 1, 1, 2, 3}, forma.VPS) |
||||
require.Equal(t, []byte{byte(h265.NALUType_SPS_NUT) << 1, 4, 5, 6}, forma.SPS) |
||||
require.Equal(t, []byte{byte(h265.NALUType_PPS_NUT) << 1, 7, 8, 9}, forma.PPS) |
||||
|
||||
pkts, err = enc.Encode([][]byte{{byte(h265.NALUType_CRA_NUT) << 1, 0}}) |
||||
require.NoError(t, err) |
||||
|
||||
data, err = p.ProcessRTPPacket(pkts[0], time.Time{}, 0, true) |
||||
require.NoError(t, err) |
||||
|
||||
require.Equal(t, [][]byte{ |
||||
{byte(h265.NALUType_VPS_NUT) << 1, 1, 2, 3}, |
||||
{byte(h265.NALUType_SPS_NUT) << 1, 4, 5, 6}, |
||||
{byte(h265.NALUType_PPS_NUT) << 1, 7, 8, 9}, |
||||
{byte(h265.NALUType_CRA_NUT) << 1, 0}, |
||||
}, data.(*unit.H265).AU) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestH265OversizedPackets(t *testing.T) { |
||||
forma := &format.H265{ |
||||
PayloadTyp: 96, |
||||
VPS: []byte{byte(h265.NALUType_VPS_NUT) << 1, 10, 11, 12}, |
||||
SPS: []byte{byte(h265.NALUType_SPS_NUT) << 1, 13, 14, 15}, |
||||
PPS: []byte{byte(h265.NALUType_PPS_NUT) << 1, 16, 17, 18}, |
||||
} |
||||
|
||||
p, err := New(1472, forma, false) |
||||
require.NoError(t, err) |
||||
|
||||
var out []*rtp.Packet |
||||
|
||||
for _, pkt := range []*rtp.Packet{ |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 123, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
Padding: true, |
||||
}, |
||||
Payload: []byte{0x01, 0x02, 0x03, 0x04}, |
||||
}, |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 124, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
Padding: true, |
||||
}, |
||||
Payload: bytes.Repeat([]byte{0x01, 0x02, 0x03, 0x04}, 2000/4), |
||||
}, |
||||
} { |
||||
data, err := p.ProcessRTPPacket(pkt, time.Time{}, 0, false) |
||||
require.NoError(t, err) |
||||
|
||||
out = append(out, data.GetRTPPackets()...) |
||||
} |
||||
|
||||
require.Equal(t, []*rtp.Packet{ |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 123, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: []byte{0x01, 0x02, 0x03, 0x04}, |
||||
}, |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: false, |
||||
PayloadType: 96, |
||||
SequenceNumber: 124, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: append( |
||||
append([]byte{0x63, 0x02, 0x80, 0x03, 0x04}, bytes.Repeat([]byte{0x01, 0x02, 0x03, 0x04}, 363)...), |
||||
[]byte{0x01, 0x02, 0x03}..., |
||||
), |
||||
}, |
||||
{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
Marker: true, |
||||
PayloadType: 96, |
||||
SequenceNumber: 125, |
||||
Timestamp: 45343, |
||||
SSRC: 563423, |
||||
}, |
||||
Payload: append( |
||||
[]byte{0x63, 0x02, 0x40, 0x04}, |
||||
bytes.Repeat([]byte{0x01, 0x02, 0x03, 0x04}, 135)..., |
||||
), |
||||
}, |
||||
}, out) |
||||
} |
||||
|
||||
func TestH265EmptyPacket(t *testing.T) { |
||||
forma := &format.H265{ |
||||
PayloadTyp: 96, |
||||
} |
||||
|
||||
p, err := New(1472, forma, true) |
||||
require.NoError(t, err) |
||||
|
||||
unit := &unit.H265{ |
||||
AU: [][]byte{ |
||||
{byte(h265.NALUType_VPS_NUT) << 1, 10, 11, 12}, // VPS
|
||||
{byte(h265.NALUType_SPS_NUT) << 1, 13, 14, 15}, // SPS
|
||||
{byte(h265.NALUType_PPS_NUT) << 1, 16, 17, 18}, // PPS
|
||||
}, |
||||
} |
||||
|
||||
err = p.ProcessUnit(unit) |
||||
require.NoError(t, err) |
||||
|
||||
// if all NALUs have been removed, no RTP packets must be generated.
|
||||
require.Equal(t, []*rtp.Packet(nil), unit.RTPPackets) |
||||
} |
||||
|
||||
func FuzzRTPH265ExtractParams(f *testing.F) { |
||||
f.Fuzz(func(_ *testing.T, b []byte) { |
||||
rtpH265ExtractParams(b) |
||||
}) |
||||
} |
||||
@ -1,121 +0,0 @@
@@ -1,121 +0,0 @@
|
||||
package formatprocessor //nolint:dupl
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/format/rtplpcm" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/rtptime" |
||||
"github.com/pion/rtp" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
) |
||||
|
||||
type formatProcessorLPCM struct { |
||||
udpMaxPayloadSize int |
||||
format *format.LPCM |
||||
timeEncoder *rtptime.Encoder |
||||
encoder *rtplpcm.Encoder |
||||
decoder *rtplpcm.Decoder |
||||
} |
||||
|
||||
func newLPCM( |
||||
udpMaxPayloadSize int, |
||||
forma *format.LPCM, |
||||
generateRTPPackets bool, |
||||
) (*formatProcessorLPCM, error) { |
||||
t := &formatProcessorLPCM{ |
||||
udpMaxPayloadSize: udpMaxPayloadSize, |
||||
format: forma, |
||||
} |
||||
|
||||
if generateRTPPackets { |
||||
err := t.createEncoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
t.timeEncoder = &rtptime.Encoder{ |
||||
ClockRate: forma.ClockRate(), |
||||
} |
||||
err = t.timeEncoder.Initialize() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
return t, nil |
||||
} |
||||
|
||||
func (t *formatProcessorLPCM) createEncoder() error { |
||||
t.encoder = &rtplpcm.Encoder{ |
||||
PayloadMaxSize: t.udpMaxPayloadSize - 12, |
||||
PayloadType: t.format.PayloadTyp, |
||||
BitDepth: t.format.BitDepth, |
||||
ChannelCount: t.format.ChannelCount, |
||||
} |
||||
return t.encoder.Init() |
||||
} |
||||
|
||||
func (t *formatProcessorLPCM) ProcessUnit(uu unit.Unit) error { //nolint:dupl
|
||||
u := uu.(*unit.LPCM) |
||||
|
||||
pkts, err := t.encoder.Encode(u.Samples) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
u.RTPPackets = pkts |
||||
|
||||
ts := t.timeEncoder.Encode(u.PTS) |
||||
for _, pkt := range u.RTPPackets { |
||||
pkt.Timestamp += ts |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (t *formatProcessorLPCM) ProcessRTPPacket( //nolint:dupl
|
||||
pkt *rtp.Packet, |
||||
ntp time.Time, |
||||
pts time.Duration, |
||||
hasNonRTSPReaders bool, |
||||
) (Unit, error) { |
||||
u := &unit.LPCM{ |
||||
Base: unit.Base{ |
||||
RTPPackets: []*rtp.Packet{pkt}, |
||||
NTP: ntp, |
||||
PTS: pts, |
||||
}, |
||||
} |
||||
|
||||
// remove padding
|
||||
pkt.Header.Padding = false |
||||
pkt.PaddingSize = 0 |
||||
|
||||
if pkt.MarshalSize() > t.udpMaxPayloadSize { |
||||
return nil, fmt.Errorf("payload size (%d) is greater than maximum allowed (%d)", |
||||
pkt.MarshalSize(), t.udpMaxPayloadSize) |
||||
} |
||||
|
||||
// decode from RTP
|
||||
if hasNonRTSPReaders || t.decoder != nil { |
||||
if t.decoder == nil { |
||||
var err error |
||||
t.decoder, err = t.format.CreateDecoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
samples, err := t.decoder.Decode(pkt) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
u.Samples = samples |
||||
} |
||||
|
||||
// route packet as is
|
||||
return u, nil |
||||
} |
||||
@ -1,39 +0,0 @@
@@ -1,39 +0,0 @@
|
||||
package formatprocessor |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
"github.com/pion/rtp" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestLPCMEncode(t *testing.T) { |
||||
forma := &format.LPCM{ |
||||
PayloadTyp: 96, |
||||
BitDepth: 16, |
||||
SampleRate: 44100, |
||||
ChannelCount: 2, |
||||
} |
||||
|
||||
p, err := New(1472, forma, true) |
||||
require.NoError(t, err) |
||||
|
||||
unit := &unit.LPCM{ |
||||
Samples: []byte{1, 2, 3, 4}, |
||||
} |
||||
|
||||
err = p.ProcessUnit(unit) |
||||
require.NoError(t, err) |
||||
require.Equal(t, []*rtp.Packet{{ |
||||
Header: rtp.Header{ |
||||
Version: 2, |
||||
PayloadType: 96, |
||||
SequenceNumber: unit.RTPPackets[0].SequenceNumber, |
||||
Timestamp: unit.RTPPackets[0].Timestamp, |
||||
SSRC: unit.RTPPackets[0].SSRC, |
||||
}, |
||||
Payload: []byte{1, 2, 3, 4}, |
||||
}}, unit.RTPPackets) |
||||
} |
||||
@ -1,124 +0,0 @@
@@ -1,124 +0,0 @@
|
||||
package formatprocessor //nolint:dupl
|
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/bluenviron/gortsplib/v4/pkg/format" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/format/rtpmjpeg" |
||||
"github.com/bluenviron/gortsplib/v4/pkg/rtptime" |
||||
"github.com/pion/rtp" |
||||
|
||||
"github.com/bluenviron/mediamtx/internal/unit" |
||||
) |
||||
|
||||
type formatProcessorMJPEG struct { |
||||
udpMaxPayloadSize int |
||||
format *format.MJPEG |
||||
timeEncoder *rtptime.Encoder |
||||
encoder *rtpmjpeg.Encoder |
||||
decoder *rtpmjpeg.Decoder |
||||
} |
||||
|
||||
func newMJPEG( |
||||
udpMaxPayloadSize int, |
||||
forma *format.MJPEG, |
||||
generateRTPPackets bool, |
||||
) (*formatProcessorMJPEG, error) { |
||||
t := &formatProcessorMJPEG{ |
||||
udpMaxPayloadSize: udpMaxPayloadSize, |
||||
format: forma, |
||||
} |
||||
|
||||
if generateRTPPackets { |
||||
err := t.createEncoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
t.timeEncoder = &rtptime.Encoder{ |
||||
ClockRate: forma.ClockRate(), |
||||
} |
||||
err = t.timeEncoder.Initialize() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
return t, nil |
||||
} |
||||
|
||||
func (t *formatProcessorMJPEG) createEncoder() error { |
||||
t.encoder = &rtpmjpeg.Encoder{ |
||||
PayloadMaxSize: t.udpMaxPayloadSize - 12, |
||||
} |
||||
return t.encoder.Init() |
||||
} |
||||
|
||||
func (t *formatProcessorMJPEG) ProcessUnit(uu unit.Unit) error { //nolint:dupl
|
||||
u := uu.(*unit.MJPEG) |
||||
|
||||
// encode into RTP
|
||||
pkts, err := t.encoder.Encode(u.Frame) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
u.RTPPackets = pkts |
||||
|
||||
ts := t.timeEncoder.Encode(u.PTS) |
||||
for _, pkt := range u.RTPPackets { |
||||
pkt.Timestamp += ts |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (t *formatProcessorMJPEG) ProcessRTPPacket( //nolint:dupl
|
||||
pkt *rtp.Packet, |
||||
ntp time.Time, |
||||
pts time.Duration, |
||||
hasNonRTSPReaders bool, |
||||
) (Unit, error) { |
||||
u := &unit.MJPEG{ |
||||
Base: unit.Base{ |
||||
RTPPackets: []*rtp.Packet{pkt}, |
||||
NTP: ntp, |
||||
PTS: pts, |
||||
}, |
||||
} |
||||
|
||||
// remove padding
|
||||
pkt.Header.Padding = false |
||||
pkt.PaddingSize = 0 |
||||
|
||||
if pkt.MarshalSize() > t.udpMaxPayloadSize { |
||||
return nil, fmt.Errorf("payload size (%d) is greater than maximum allowed (%d)", |
||||
pkt.MarshalSize(), t.udpMaxPayloadSize) |
||||
} |
||||
|
||||
// decode from RTP
|
||||
if hasNonRTSPReaders || t.decoder != nil { |
||||
if t.decoder == nil { |
||||
var err error |
||||
t.decoder, err = t.format.CreateDecoder() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
frame, err := t.decoder.Decode(pkt) |
||||
if err != nil { |
||||
if errors.Is(err, rtpmjpeg.ErrNonStartingPacketAndNoPrevious) || |
||||
errors.Is(err, rtpmjpeg.ErrMorePacketsNeeded) { |
||||
return u, nil |
||||
} |
||||
return nil, err |
||||
} |
||||
|
||||
u.Frame = frame |
||||
} |
||||
|
||||
// route packet as is
|
||||
return u, nil |
||||
} |
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue