Compare commits

..

4 Commits

Author SHA1 Message Date
David Robertson
4d343db081 Get rid of my home dir, whoops 2021-11-16 16:37:43 +00:00
David Robertson
a1367dcf8c Require networkx 2021-11-16 16:34:33 +00:00
David Robertson
9e361c8550 Changelog 2021-11-16 13:52:59 +00:00
David Robertson
51fec1a534 Commit hacky script to visualise store inheritance
Use e.g. with `scripts-dev/storage_inheritance.py DataStore --show`.
2021-11-16 13:51:50 +00:00
895 changed files with 53009 additions and 80271 deletions

View File

@@ -1,93 +0,0 @@
{{- /*gotype: github.com/haveyoudebuggedit/gotestfmt/parser.Package*/ -}}
{{- /*
This template contains the format for an individual package. GitHub actions does not currently support nested groups so
we are creating a stylized header for each package.
This template is based on https://github.com/haveyoudebuggedit/gotestfmt/blob/f179b0e462a9dcf7101515d87eec4e4d7e58b92a/.gotestfmt/github/package.gotpl
which is under the Unlicense licence.
*/ -}}
{{- $settings := .Settings -}}
{{- if and (or (not $settings.HideSuccessfulPackages) (ne .Result "PASS")) (or (not $settings.HideEmptyPackages) (ne .Result "SKIP") (ne (len .TestCases) 0)) -}}
{{- if eq .Result "PASS" -}}
{{ "\033" }}[0;32m
{{- else if eq .Result "SKIP" -}}
{{ "\033" }}[0;33m
{{- else -}}
{{ "\033" }}[0;31m
{{- end -}}
📦 {{ .Name }}{{- "\033" }}[0m
{{- with .Coverage -}}
{{- "\033" -}}[0;37m ({{ . }}% coverage){{- "\033" -}}[0m
{{- end -}}
{{- "\n" -}}
{{- with .Reason -}}
{{- " " -}}🛑 {{ . -}}{{- "\n" -}}
{{- end -}}
{{- with .Output -}}
{{- . -}}{{- "\n" -}}
{{- end -}}
{{- with .TestCases -}}
{{- /* Failing tests are first */ -}}
{{- range . -}}
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- /* Then skipped tests are second */ -}}
{{- range . -}}
{{- if eq .Result "SKIP" -}}
::group::{{ "\033" }}[0;33m🚧{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- /* Then passing tests are last */ -}}
{{- range . -}}
{{- if eq .Result "PASS" -}}
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- "\n" -}}
{{- end -}}

View File

@@ -1,4 +0,0 @@
---
title: CI run against latest deps is failing
---
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}

8
.ci/patch_for_twisted_trunk.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
# replaces the dependency on Twisted in `python_dependencies` with trunk.
set -e
cd "$(dirname "$0")"/..
sed -i -e 's#"Twisted.*"#"Twisted @ git+https://github.com/twisted/twisted"#' synapse/python_dependencies.py

View File

@@ -1,25 +0,0 @@
#!/bin/bash
#
# Fetches a version of complement which best matches the current build.
#
# The tarball is unpacked into `./complement`.
set -e
mkdir -p complement
# Pick an appropriate version of complement. Depending on whether this is a PR or release,
# etc. we need to use different fallbacks:
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("HEAD").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done

View File

@@ -1,36 +0,0 @@
#!/bin/sh
#
# Common commands to set up Complement's prerequisites in a GitHub Actions CI run.
#
# Must be called after Synapse has been checked out to `synapse/`.
#
set -eu
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
block Set Go Version
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
endblock
block Install custom gotestfmt template
mkdir .gotestfmt/github -p
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
endblock
block Check out Complement
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
synapse/.ci/scripts/checkout_complement.sh
endblock

View File

@@ -2,24 +2,29 @@
# Test for the export-data admin command against sqlite and postgres
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Run the export-data command on the sqlite test database
poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
@@ -32,14 +37,14 @@ else
fi
# Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# Run the export-data command on postgres database
poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory

View File

@@ -1,83 +1,16 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
# Prevent tzdata from asking for user input
export DEBIAN_FRONTEND=noninteractive
# this script is run by GitHub Actions in a plain `bionic` container; it installs the
# minimal requirements for tox and hands over to the py3-old tox environment.
set -ex
apt-get update
apt-get install -y \
python3 python3-dev python3-pip python3-venv pipx \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
export LANG="C.UTF-8"
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
# TODO: in the future, we could use an implementation of
# https://github.com/python-poetry/poetry/issues/3527
# https://github.com/pypa/pip/issues/8085
# to select the lowest possible versions, rather than resorting to this sed script.
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Replace all caret bounds---but not the one that defines the supported Python version!
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Use pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Omit systemd: we're not logging to journal here.
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
# runs on 3.8 (#12343).
sed -i \
-e "s/[~>]=/==/g" \
-e '/^python = "^/!s/\^/==/g' \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \
pyproject.toml
# Use poetry to do the installation. This ensures that the versions are all mutually
# compatible (as far the package metadata declares, anyway); pip's package resolver
# is more lax.
#
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install --user toml
REMOVE_DEV_DEPENDENCIES="
import toml
with open('pyproject.toml', 'r') as f:
data = toml.loads(f.read())
del data['tool']['poetry']['dev-dependencies']
with open('pyproject.toml', 'w') as f:
toml.dump(data, f)
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pipx install poetry==1.1.12
~/.local/bin/poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"
~/.local/bin/poetry install -E "all test"
~/.local/bin/poetry run trial --jobs=2 tests
exec tox -e py3-old,combine

View File

@@ -1,37 +1,41 @@
#!/usr/bin/env bash
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
# - sets up synapse and deps
# - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2 coverage coverage-enable-subprocess
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# We should be able to run twice against the same database.
echo "+++ Run synapse_port_db a second time"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
#####
@@ -42,12 +46,12 @@ echo "--- Prepare empty SQLite database"
# we do this by deleting the sqlite db, and then doing the same again.
rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py \
.ci/scripts/postgres_exec.py \
"DROP DATABASE synapse" \
"CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml

View File

@@ -3,9 +3,11 @@
# things to include
!docker
!scripts
!synapse
!MANIFEST.in
!README.rst
!pyproject.toml
!poetry.lock
!setup.py
!synctl
**/__pycache__

View File

@@ -7,4 +7,3 @@ root = true
[*.py]
indent_style = space
indent_size = 4
max_line_length = 88

11
.flake8
View File

@@ -1,11 +0,0 @@
# TODO: incorporate this into pyproject.toml if flake8 supports it in the future.
# See https://github.com/PyCQA/flake8/issues/234
[flake8]
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
# for error codes. The ones we ignore are:
# W503: line break before binary operator
# W504: line break after binary operator
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
ignore=W503,W504,E203,E731,E501

View File

@@ -6,6 +6,3 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
# Update black to 20.8b1 (#9381).
0a00b7ff14890987f09112a2ae696c61001e6cf1
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
c4268e3da64f1abb5b31deaeb5769adb6510c0a7

72
.github/ISSUE_TEMPLATE/BUG_REPORT.md vendored Normal file
View File

@@ -0,0 +1,72 @@
---
name: Bug report
about: Create a report to help us improve
---
<!--
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**,
please ask in **#synapse:matrix.org** (using a matrix.org account if necessary)
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report template. By following the instructions below and
filling out the sections with your information, you will help the us to get all
the necessary data to fix your issue.
You can also preview your report before submitting it. You may remove sections
that aren't relevant to your particular case.
Text between <!-- and --> marks will be invisible in the report.
-->
### Description
<!-- Describe here the problem that you are experiencing -->
### Steps to reproduce
- list the steps
- that reproduce the bug
- using hyphens as bullet points
<!--
Describe how what happens differs from what you expected.
If you can identify any relevant log snippets from _homeserver.log_, please include
those (please be careful to remove any personal or private data). Please surround them with
``` (three backticks, on a line on their own), so that they are formatted legibly.
-->
### Version information
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
<!-- Was this issue identified on matrix.org or another homeserver? -->
- **Homeserver**:
If not matrix.org:
<!--
What version of Synapse is running?
You can find the Synapse version with this command:
$ curl http://localhost:8008/_synapse/admin/v1/server_version
(You may need to replace `localhost:8008` if Synapse is not configured to
listen on that port.)
-->
- **Version**:
- **Install method**:
<!-- examples: package manager/git clone/pip -->
- **Platform**:
<!--
Tell us about the environment in which your homeserver is operating
distro, hardware, if it's running in a vm/container, etc.
-->

View File

@@ -1,103 +0,0 @@
name: Bug report
description: Create a report to help us improve
body:
- type: markdown
attributes:
value: |
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
You can also preview your report before submitting it.
- type: textarea
id: description
attributes:
label: Description
description: Describe the problem that you are experiencing
validations:
required: true
- type: textarea
id: reproduction_steps
attributes:
label: Steps to reproduce
description: |
Describe the series of steps that leads you to the problem.
Describe how what happens differs from what you expected.
placeholder: Tell us what you see!
value: |
- list the steps
- that reproduce the bug
- using hyphens as bullet points
validations:
required: true
- type: markdown
attributes:
value: |
---
**IMPORTANT**: please answer the following questions, to help us narrow down the problem.
- type: input
id: homeserver
attributes:
label: Homeserver
description: Which homeserver was this issue identified on? (matrix.org, another homeserver, etc)
validations:
required: true
- type: input
id: version
attributes:
label: Synapse Version
description: |
What version of Synapse is this homeserver running?
You can find the Synapse version by visiting https://yourserver.example.com/_matrix/federation/v1/version
or with this command:
```
$ curl http://localhost:8008/_synapse/admin/v1/server_version
```
(You may need to replace `localhost:8008` if Synapse is not configured to listen on that port.)
validations:
required: true
- type: dropdown
id: install_method
attributes:
label: Installation Method
options:
- Docker (matrixdotorg/synapse)
- Debian packages from packages.matrix.org
- pip (from PyPI)
- Other (please mention below)
- type: textarea
id: platform
attributes:
label: Platform
description: |
Tell us about the environment in which your homeserver is operating...
e.g. distro, hardware, if it's running in a vm/container, etc.
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
This will be automatically formatted into code, so there is no need for backticks.
Please be careful to remove any personal or private data.
**Bug reports are usually very difficult to diagnose without logging.**
render: shell
validations:
required: true
- type: textarea
id: anything_else
attributes:
label: Anything else that would be useful to know?

View File

@@ -8,7 +8,6 @@
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry.
* [ ] Pull request includes a [sign off](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#sign-off)
* [ ] [Code style](https://matrix-org.github.io/synapse/latest/code_style.html) is correct
(run the [linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))

View File

@@ -5,7 +5,7 @@ name: Build docker images
on:
push:
tags: ["v*"]
branches: [ master, main, develop ]
branches: [ master, main ]
workflow_dispatch:
permissions:
@@ -36,22 +36,37 @@ jobs:
- name: Calculate docker image tag
id: set-tag
uses: docker/metadata-action@master
run: |
case "${GITHUB_REF}" in
refs/heads/master|refs/heads/main)
tag=latest
;;
refs/tags/*)
tag=${GITHUB_REF#refs/tags/}
;;
*)
tag=${GITHUB_SHA}
;;
esac
echo "::set-output name=tag::$tag"
# for release builds, we want to get the amd64 image out asap, so first
# we do an amd64-only build, before following up with a multiarch build.
- name: Build and push amd64
uses: docker/build-push-action@v2
if: "${{ startsWith(github.ref, 'refs/tags/v') }}"
with:
images: matrixdotorg/synapse
flavor: |
latest=false
tags: |
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
type=pep440,pattern={{raw}}
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
file: "docker/Dockerfile"
platforms: linux/amd64
- name: Build and push all platforms
uses: docker/build-push-action@v2
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64

View File

@@ -22,7 +22,7 @@ jobs:
- name: Setup mdbook
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
with:
mdbook-version: '0.4.17'
mdbook-version: '0.4.9'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.

View File

@@ -1,159 +0,0 @@
# People who are freshly `pip install`ing from PyPI will pull in the latest versions of
# dependencies which match the broad requirements. Since most CI runs are against
# the locked poetry environment, run specifically against the latest dependencies to
# know if there's an upcoming breaking change.
#
# As an overview this workflow:
# - checks out develop,
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - runs mypy and test suites in that checkout.
#
# Based on the twisted trunk CI job.
name: Latest dependencies
on:
schedule:
- cron: 0 7 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.2.0b1"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
# `pip install matrix-synapse[all]` as closely as possible.
- run: poetry update --no-dev
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- database: "sqlite"
- database: "postgres"
postgres-version: "14"
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install .[all,test]
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: python -m twisted.trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:testing
volumes:
- ${{ github.workspace }}:/src
strategy:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: focal
postgres: postgres
workers: workers
redis: redis
env:
POSTGRES: ${{ matrix.postgres && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v2
- name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
run: rm /src/poetry.lock
working-directory: /src
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
# TODO: run complement (as with twisted trunk, see #12473).
# open an issue if the build fails, so we know about it.
open-issue:
if: failure()
needs:
# TODO: should mypy be included here? It feels more brittle than the other two.
- mypy
- trial
- sytest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/latest_deps_build_failed_issue_template.md

View File

@@ -7,7 +7,7 @@ on:
# of things breaking (but only build one set of debs)
pull_request:
push:
branches: ["develop", "release-*"]
branches: ["develop"]
# we do the full build on tags.
tags: ["v*"]
@@ -31,7 +31,7 @@ jobs:
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
dists='["debian:sid"]'
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
dists=$(scripts-dev/build_debian_packages --show-dists-json)
fi
echo "::set-output name=distros::$dists"
# map the step outputs to job outputs
@@ -74,7 +74,7 @@ jobs:
# see https://github.com/docker/build-push-action/issues/252
# for the cache magic here
run: |
./src/scripts-dev/build_debian_packages.py \
./src/scripts-dev/build_debian_packages \
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
--docker-build-arg=--progress=plain \
@@ -91,7 +91,17 @@ jobs:
build-sdist:
name: "Build pypi distribution files"
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install wheel
- run: |
python setup.py sdist bdist_wheel
- uses: actions/upload-artifact@v2
with:
name: python-dist
path: dist/*
# if it's a tag, create a release and attach the artifacts to it
attach-assets:
@@ -112,8 +122,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: |
Sdist/*
Wheel/*
python-dist/*
debs.tar.xz
# if it's not already published, keep the release as a draft.
draft: true

View File

@@ -10,27 +10,22 @@ concurrency:
cancel-in-progress: true
jobs:
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install .
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
check-schema-delta:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
with:
typechecking-extras: "all"
runs-on: ubuntu-latest
strategy:
matrix:
toxenv:
- "check-sampleconfig"
- "check_codestyle"
- "check_isort"
- "mypy"
- "packaging"
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install tox
- run: tox -e ${{ matrix.toxenv }}
lint-crlf:
runs-on: ubuntu-latest
@@ -48,15 +43,29 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v2
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
- run: pip install tox
- run: scripts-dev/check-newsfragment
env:
PULL_REQUEST_NUMBER: ${{ github.event.number }}
lint-sdist:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install wheel
- run: python setup.py sdist bdist_wheel
- uses: actions/upload-artifact@v2
with:
name: Python Distributions
path: dist/*
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig, check-schema-delta]
needs: [lint, lint-crlf, lint-newsfile, lint-sdist]
runs-on: ubuntu-latest
steps:
- run: "true"
@@ -67,25 +76,25 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"]
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"]
extras: ["all"]
toxenv: ["py"]
include:
# Newest Python without optional deps
- python-version: "3.10"
extras: ""
toxenv: "py-noextras"
# Oldest Python with PostgreSQL
- python-version: "3.7"
- python-version: "3.6"
database: "postgres"
postgres-version: "10"
extras: "all"
postgres-version: "9.6"
toxenv: "py"
# Newest Python with newest PostgreSQL
- python-version: "3.10"
database: "postgres"
postgres-version: "14"
extras: "all"
toxenv: "py"
steps:
- uses: actions/checkout@v2
@@ -97,16 +106,17 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- run: pip install tox
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=2 tests
- run: tox -e ${{ matrix.toxenv }}
env:
TRIAL_FLAGS: "--jobs=2"
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
@@ -125,19 +135,18 @@ jobs:
|| true
trial-olddeps:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Test with old deps
uses: docker://ubuntu:focal # For old python and sqlite
# Note: focal seems to be using 3.8, but the oldest is 3.7?
# See https://github.com/matrix-org/synapse/issues/12343
uses: docker://ubuntu:bionic # For old python and sqlite
with:
workdir: /github/workspace
entrypoint: .ci/scripts/test_old_deps.sh
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -153,24 +162,23 @@ jobs:
trial-pypy:
# Very slow; only run if the branch name includes 'pypy'
# Note: sqlite only; no postgres. Completely untested since poetry move.
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.7"]
extras: ["all"]
python-version: ["pypy-3.6"]
steps:
- uses: actions/checkout@v2
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- run: poetry run trial --jobs=2 tests
- run: pip install tox
- run: tox -e py
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -205,15 +213,15 @@ jobs:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: bionic
- sytest-tag: focal
- sytest-tag: bionic
postgres: postgres
- sytest-tag: testing
postgres: postgres
- sytest-tag: focal
- sytest-tag: bionic
postgres: multi-postgres
workers: workers
@@ -269,10 +277,9 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
python-version: "3.9"
- run: .ci/scripts/test_export_data_command.sh
portdb:
@@ -284,8 +291,8 @@ jobs:
strategy:
matrix:
include:
- python-version: "3.7"
postgres-version: "10"
- python-version: "3.6"
postgres-version: "9.6"
- python-version: "3.10"
postgres-version: "14"
@@ -307,29 +314,24 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
complement:
if: "${{ !failure() && !cancelled() }}"
if: ${{ !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
container:
# https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile
image: matrixdotorg/complement:latest
env:
CI: true
ports:
- 8448:8448
volumes:
- /var/run/docker.sock:/var/run/docker.sock
steps:
- name: Run actions/checkout@v2 for synapse
@@ -337,35 +339,79 @@ jobs:
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to master.
- name: Checkout complement
shell: bash
name: Run Complement Tests
run: |
mkdir -p complement
# Attempt to use the version of complement which best matches the current
# build. Depending on whether this is a PR or release, etc. we need to
# use different fallbacks.
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("master").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "master"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done
# Build initial Synapse image
- run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
working-directory: synapse
# Build a ready-to-run Synapse image based on the initial image above.
# This new image includes a config file, keys for signing and TLS, and
# other settings to make it suitable for testing under Complement.
- run: docker build -t complement-synapse -f Synapse.Dockerfile .
working-directory: complement/dockerfiles
# Run Complement
- run: go test -v -tags synapse_blacklist,msc2403,msc2946,msc3083 ./tests/...
env:
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
working-directory: complement
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}
needs:
- check-sampleconfig
- lint
- lint-crlf
- lint-newsfile
- lint-sdist
- trial
- trial-olddeps
- sytest
- export-data
- portdb
- complement
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
with:
needs: ${{ toJSON(needs) }}
- name: Set build result
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
# the `jq` incantation dumps out a series of "<job> <result>" lines.
# we set it to an intermediate variable to avoid a pipe, which makes it
# hard to set $rc.
run: |
rc=0
results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT)
while read job result ; do
# The newsfile lint may be skipped on non PR builds
if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then
continue
fi
# The newsfile lint may be skipped on non PR builds
skippable:
lint-newsfile
if [ "$result" != "success" ]; then
echo "::set-failed ::Job $job returned $result"
rc=1
fi
done <<< $results
exit $rc

View File

@@ -6,27 +6,16 @@ on:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
extras: "all"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
- uses: actions/setup-python@v2
- run: .ci/patch_for_twisted_trunk.sh
- run: pip install tox
- run: tox -e mypy
trial:
runs-on: ubuntu-latest
@@ -34,15 +23,14 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: "3.x"
extras: "all test"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- run: poetry run trial --jobs 2 tests
python-version: 3.6
- run: .ci/patch_for_twisted_trunk.sh
- run: pip install tox
- run: tox -e py
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Logs are most useful when the command fails, always include them.
@@ -67,23 +55,11 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Patch dependencies
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
# but the sytest-synapse container expects it to be in /venv/.
# We symlink it before running poetry so that poetry actually
# ends up installing to `/venv`.
run: |
ln -s -T /venv /src/.venv
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
run: .ci/patch_for_twisted_trunk.sh
working-directory: /src
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
env:
# Use offline mode to avoid reinstalling the pinned version of
# twisted.
OFFLINE: 1
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
@@ -96,51 +72,6 @@ jobs:
/logs/results.tap
/logs/**/*.log*
complement:
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
# This step is specific to the 'Twisted trunk' test run:
- name: Patch dependencies
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==1.1.12
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
# NOT IN 1.1.12 poetry lock --check
working-directory: synapse
- run: |
set -o pipefail
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
# open an issue if the build fails, so we know about it.
open-issue:
if: failure()
@@ -148,7 +79,6 @@ jobs:
- mypy
- trial
- sytest
- complement
runs-on: ubuntu-latest

10
.gitignore vendored
View File

@@ -15,9 +15,6 @@ _trial_temp*/
.DS_Store
__pycache__/
# We do want the poetry lockfile.
!poetry.lock
# stuff that is likely to exist when you run a server locally
/*.db
/*.log
@@ -33,9 +30,6 @@ __pycache__/
/media_store/
/uploads
# For direnv users
/.envrc
# IDEs
/.idea/
/.ropeproject/
@@ -56,7 +50,3 @@ __pycache__/
# docs
book/
# complement
/complement-*
/master.tar.gz

10024
CHANGES.md

File diff suppressed because it is too large Load Diff

56
MANIFEST.in Normal file
View File

@@ -0,0 +1,56 @@
include synctl
include LICENSE
include VERSION
include *.rst
include *.md
include demo/README
include demo/demo.tls.dh
include demo/*.py
include demo/*.sh
include synapse/py.typed
recursive-include synapse/storage *.sql
recursive-include synapse/storage *.sql.postgres
recursive-include synapse/storage *.sql.sqlite
recursive-include synapse/storage *.py
recursive-include synapse/storage *.txt
recursive-include synapse/storage *.md
recursive-include docs *
recursive-include scripts *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.py
recursive-include tests *.pem
recursive-include tests *.p8
recursive-include tests *.crt
recursive-include tests *.key
recursive-include synapse/res *
recursive-include synapse/static *.css
recursive-include synapse/static *.gif
recursive-include synapse/static *.html
recursive-include synapse/static *.js
exclude .codecov.yml
exclude .coveragerc
exclude .dockerignore
exclude .editorconfig
exclude Dockerfile
exclude mypy.ini
exclude sytest-blacklist
exclude test_postgresql.sh
include book.toml
include pyproject.toml
recursive-include changelog.d *
prune .circleci
prune .github
prune .ci
prune contrib
prune debian
prune demo/etc
prune docker
prune snap
prune stubs

View File

@@ -55,7 +55,7 @@ solutions. The hope is for Matrix to act as the building blocks for a new
generation of fully open and interoperable messaging and VoIP apps for the
internet.
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
team, written in Python 3/Twisted.
In Matrix, every user runs one or more Matrix clients, which connect through to
@@ -246,7 +246,7 @@ Password reset
==============
Users can reset their password through their client. Alternatively, a server admin
can reset a users password using the `admin API <docs/admin_api/user_admin_api.md#reset-password>`_
can reset a users password using the `admin API <docs/admin_api/user_admin_api.rst#reset-password>`_
or by directly editing the database as shown below.
First calculate the hash of the new password::
@@ -293,42 +293,36 @@ directory of your choice::
git clone https://github.com/matrix-org/synapse.git
cd synapse
Synapse has a number of external dependencies. We maintain a fixed development
environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
Synapse has a number of external dependencies, that are easiest
to install using pip and a virtualenv::
pip install --user pipx
pipx install poetry
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
for other installation methods.) Then ask poetry to create a virtual environment
from the project and install Synapse's dependencies::
poetry install --extras "all test"
python3 -m venv ./env
source ./env/bin/activate
pip install -e ".[all,dev]"
This will run a process of downloading and installing all the needed
dependencies into a virtual env.
dependencies into a virtual env. If any dependencies fail to install,
try installing the failing modules individually::
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
pip install -e "module-name"
poetry run ./demo/start.sh
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
(to stop, you can use ``poetry run ./demo/stop.sh``)
./demo/start.sh
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
for more information.
(to stop, you can use `./demo/stop.sh`)
If you just want to start a single instance of the app and run it directly::
# Create the homeserver.yaml config once
poetry run synapse_homeserver \
python -m synapse.app.homeserver \
--server-name my.domain.name \
--config-path homeserver.yaml \
--generate-config \
--report-stats=[yes|no]
# Start the app
poetry run synapse_homeserver --config-path homeserver.yaml
python -m synapse.app.homeserver --config-path homeserver.yaml
Running the unit tests
@@ -337,7 +331,7 @@ Running the unit tests
After getting up and running, you may wish to run Synapse's unit tests to
check that everything is installed correctly::
poetry run trial tests
trial tests
This should end with a 'PASSED' result (note that exact numbers will
differ)::

View File

@@ -34,14 +34,6 @@ additional-css = [
"docs/website_files/table-of-contents.css",
"docs/website_files/remove-nav-buttons.css",
"docs/website_files/indent-section-headers.css",
"docs/website_files/version-picker.css",
]
additional-js = [
"docs/website_files/table-of-contents.js",
"docs/website_files/version-picker.js",
"docs/website_files/version.js",
]
theme = "docs/website_files/theme"
[preprocessor.schema_versions]
command = "./scripts-dev/schema_versions.py"
additional-js = ["docs/website_files/table-of-contents.js"]
theme = "docs/website_files/theme"

View File

@@ -0,0 +1 @@
Add a new version of delete room admin API `DELETE /_synapse/admin/v2/rooms/<room_id>` to run it in background. Contributed by @dklimpel.

View File

@@ -0,0 +1 @@
Allow the admin [Delete Room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api) to block a room without the need to join it.

2
changelog.d/11230.bugfix Normal file
View File

@@ -0,0 +1,2 @@
Fix a long-standing bug wherein display names or avatar URLs containing null bytes cause an internal server error
when stored in the DB.

View File

@@ -0,0 +1 @@
Support filtering by relation senders & types per [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440).

1
changelog.d/11242.misc Normal file
View File

@@ -0,0 +1 @@
Split out federated PDU retrieval function into a non-cached version.

1
changelog.d/11247.misc Normal file
View File

@@ -0,0 +1 @@
Clean up code relating to to-device messages and sending ephemeral events to application services.

1
changelog.d/11278.misc Normal file
View File

@@ -0,0 +1 @@
Fix a small typo in the error response when a relation type other than 'm.annotation' is passed to `GET /rooms/{room_id}/aggregations/{event_id}`.

1
changelog.d/11280.misc Normal file
View File

@@ -0,0 +1 @@
Drop unused db tables `room_stats_historical` and `user_stats_historical`.

1
changelog.d/11281.doc Normal file
View File

@@ -0,0 +1 @@
Suggest users of the Debian packages add configuration to `/etc/matrix-synapse/conf.d/` to prevent, upon upgrade, being asked to choose between their configuration and the maintainer's.

1
changelog.d/11282.misc Normal file
View File

@@ -0,0 +1 @@
Require all files in synapse/ and tests/ to pass mypy unless specifically excluded.

1
changelog.d/11285.misc Normal file
View File

@@ -0,0 +1 @@
Require all files in synapse/ and tests/ to pass mypy unless specifically excluded.

1
changelog.d/11286.doc Normal file
View File

@@ -0,0 +1 @@
Fix typo in the word `available` and fix HTTP method (should be `GET`) for the `username_available` admin API. Contributed by Stanislav Motylkov.

1
changelog.d/11287.misc Normal file
View File

@@ -0,0 +1 @@
Add missing type hints to `synapse.app`.

1
changelog.d/11288.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a long-standing bug where uploading extremely thin images (e.g. 1000x1) would fail. Contributed by @Neeeflix.

1
changelog.d/11292.misc Normal file
View File

@@ -0,0 +1 @@
Remove unused parameters on `FederationEventHandler._check_event_auth`.

1
changelog.d/11297.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to `synapse._scripts`.

1
changelog.d/11298.doc Normal file
View File

@@ -0,0 +1 @@
Add Single Sign-On, SAML and CAS pages to the documentation.

1
changelog.d/11303.misc Normal file
View File

@@ -0,0 +1 @@
Fix an issue which prevented the 'remove deleted devices from device_inbox column' background process from running when updating from a recent Synapse version.

1
changelog.d/11307.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11310.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11311.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11312.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11313.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11314.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11316.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11321.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to `synapse.util`.

1
changelog.d/11322.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11323.misc Normal file
View File

@@ -0,0 +1 @@
Improve type annotations in Synapse's test suite.

1
changelog.d/11327.misc Normal file
View File

@@ -0,0 +1 @@
Test that room alias deletion works as intended.

1
changelog.d/11332.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

View File

@@ -0,0 +1 @@
Support the stable version of [MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778): the `m.login.application_service` login type. Contributed by @tulir.

1
changelog.d/11339.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11342.misc Normal file
View File

@@ -0,0 +1 @@
Add type hints to storage classes.

1
changelog.d/11357.misc Normal file
View File

@@ -0,0 +1 @@
Add a development script for visualising the storage class inheritance hierarchy.

View File

@@ -16,7 +16,6 @@
""" Starts a synapse client console. """
import argparse
import binascii
import cmd
import getpass
import json
@@ -27,8 +26,9 @@ import urllib
from http import TwistedHttpClient
from typing import Optional
import nacl.encoding
import nacl.signing
import urlparse
from signedjson.key import NACL_ED25519, decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from twisted.internet import defer, reactor, threads
@@ -41,6 +41,7 @@ TRUSTED_ID_SERVERS = ["localhost:8001"]
class SynapseCmd(cmd.Cmd):
"""Basic synapse command-line processor.
This processes commands from the user and calls the relevant HTTP methods.
@@ -419,8 +420,8 @@ class SynapseCmd(cmd.Cmd):
pubKey = None
pubKeyObj = yield self.http_client.do_request("GET", url)
if "public_key" in pubKeyObj:
pubKey = decode_verify_key_bytes(
NACL_ED25519, binascii.unhexlify(pubKeyObj["public_key"])
pubKey = nacl.signing.VerifyKey(
pubKeyObj["public_key"], encoder=nacl.encoding.HexEncoder
)
else:
print("No public key found in pubkey response!")

View File

@@ -14,7 +14,6 @@ services:
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
# NOTE: You must edit homeserver.yaml to use postgres, it defaults to sqlite
environment:
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
volumes:

View File

@@ -1,125 +0,0 @@
# Setting up Synapse with Workers using Docker Compose
This directory describes how deploy and manage Synapse and workers via [Docker Compose](https://docs.docker.com/compose/).
Example worker configuration files can be found [here](workers).
All examples and snippets assume that your Synapse service is called `synapse` in your Docker Compose file.
An example Docker Compose file can be found [here](docker-compose.yaml).
## Worker Service Examples in Docker Compose
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).
### Generic Worker Example
```yaml
synapse-generic-worker-1:
image: matrixdotorg/synapse:latest
container_name: synapse-generic-worker-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
healthcheck:
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
start_period: "5s"
interval: "15s"
timeout: "5s"
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.generic_worker
# Expose port if required so your reverse proxy can send requests to this worker
# Port configuration will depend on how the http listener is defined in the worker configuration file
ports:
- 8081:8081
depends_on:
- synapse
```
### Federation Sender Example
Please note: The federation sender does not receive REST API calls so no exposed ports are required.
```yaml
synapse-federation-sender-1:
image: matrixdotorg/synapse:latest
container_name: synapse-federation-sender-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
healthcheck:
disable: true
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.federation_sender
depends_on:
- synapse
```
## `homeserver.yaml` Configuration
### Enable Redis
Locate the `redis` section of your `homeserver.yaml` and enable and configure it:
```yaml
redis:
enabled: true
host: redis
port: 6379
# password: <secret_password>
```
This assumes that your Redis service is called `redis` in your Docker Compose file.
### Add a replication Listener
Locate the `listeners` section of your `homeserver.yaml` and add the following replication listener:
```yaml
listeners:
# Other listeners
- port: 9093
type: http
resources:
- names: [replication]
```
This listener is used by the workers for replication and is referred to in worker config files using the following settings:
```yaml
worker_replication_host: synapse
worker_replication_http_port: 9093
```
### Add Workers to `instance_map`
Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
```yaml
instance_map:
synapse-generic-worker-1: # The worker_name setting in your worker configuration file
host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
port: 8034 # The port assigned to the replication listener in your worker config file
synapse-federation-sender-1:
host: synapse-federation-sender-1
port: 8034
```
### Configure Federation Senders
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
```yaml
# This will disable federation sending on the main Synapse instance
send_federation: false
federation_sender_instances:
- synapse-federation-sender-1 # The worker_name setting in your federation sender worker configuration file
```
## Other Worker types
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.

View File

@@ -1,77 +0,0 @@
networks:
backend:
services:
postgres:
image: postgres:latest
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/var/lib/postgresql/data:/var/lib/postgresql/data:rw
networks:
- backend
environment:
POSTGRES_DB: synapse
POSTGRES_USER: synapse_user
POSTGRES_PASSWORD: postgres
POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C
redis:
image: redis:latest
restart: unless-stopped
networks:
- backend
synapse:
image: matrixdotorg/synapse:latest
container_name: synapse
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/data:/data:rw
ports:
- 8008:8008
networks:
- backend
environment:
SYNAPSE_CONFIG_DIR: /data
SYNAPSE_CONFIG_PATH: /data/homeserver.yaml
depends_on:
- postgres
synapse-generic-worker-1:
image: matrixdotorg/synapse:latest
container_name: synapse-generic-worker-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
healthcheck:
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
start_period: "5s"
interval: "15s"
timeout: "5s"
networks:
- backend
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.generic_worker
# Expose port if required so your reverse proxy can send requests to this worker
# Port configuration will depend on how the http listener is defined in the worker configuration file
ports:
- 8081:8081
depends_on:
- synapse
synapse-federation-sender-1:
image: matrixdotorg/synapse:latest
container_name: synapse-federation-sender-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
healthcheck:
disable: true
networks:
- backend
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.federation_sender
depends_on:
- synapse

View File

@@ -1,14 +0,0 @@
worker_app: synapse.app.federation_sender
worker_name: synapse-federation-sender-1
# The replication listener on the main synapse process.
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8034
resources:
- names: [replication]
worker_log_config: /data/federation_sender.log.config

View File

@@ -1,19 +0,0 @@
worker_app: synapse.app.generic_worker
worker_name: synapse-generic-worker-1
# The replication listener on the main synapse process.
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8034
resources:
- names: [replication]
- type: http
port: 8081
x_forwarded: true
resources:
- names: [client, federation]
worker_log_config: /data/worker.log.config

View File

@@ -0,0 +1,165 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import curses
import curses.wrapper
from curses.ascii import isprint
from twisted.internet import reactor
class CursesStdIO:
def __init__(self, stdscr, callback=None):
self.statusText = "Synapse test app -"
self.searchText = ""
self.stdscr = stdscr
self.logLine = ""
self.callback = callback
self._setup()
def _setup(self):
self.stdscr.nodelay(1) # Make non blocking
self.rows, self.cols = self.stdscr.getmaxyx()
self.lines = []
curses.use_default_colors()
self.paintStatus(self.statusText)
self.stdscr.refresh()
def set_callback(self, callback):
self.callback = callback
def fileno(self):
"""We want to select on FD 0"""
return 0
def connectionLost(self, reason):
self.close()
def print_line(self, text):
"""add a line to the internal list of lines"""
self.lines.append(text)
self.redraw()
def print_log(self, text):
self.logLine = text
self.redraw()
def redraw(self):
"""method for redisplaying lines based on internal list of lines"""
self.stdscr.clear()
self.paintStatus(self.statusText)
i = 0
index = len(self.lines) - 1
while i < (self.rows - 3) and index >= 0:
self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index], curses.A_NORMAL)
i = i + 1
index = index - 1
self.printLogLine(self.logLine)
self.stdscr.refresh()
def paintStatus(self, text):
if len(text) > self.cols:
raise RuntimeError("TextTooLongError")
self.stdscr.addstr(
self.rows - 2, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def printLogLine(self, text):
self.stdscr.addstr(
0, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def doRead(self):
"""Input is ready!"""
curses.noecho()
c = self.stdscr.getch() # read a character
if c == curses.KEY_BACKSPACE:
self.searchText = self.searchText[:-1]
elif c == curses.KEY_ENTER or c == 10:
text = self.searchText
self.searchText = ""
self.print_line(">> %s" % text)
try:
if self.callback:
self.callback.on_line(text)
except Exception as e:
self.print_line(str(e))
self.stdscr.refresh()
elif isprint(c):
if len(self.searchText) == self.cols - 2:
return
self.searchText = self.searchText + chr(c)
self.stdscr.addstr(
self.rows - 1,
0,
self.searchText + (" " * (self.cols - len(self.searchText) - 2)),
)
self.paintStatus(self.statusText + " %d" % len(self.searchText))
self.stdscr.move(self.rows - 1, len(self.searchText))
self.stdscr.refresh()
def logPrefix(self):
return "CursesStdIO"
def close(self):
"""clean up"""
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
class Callback:
def __init__(self, stdio):
self.stdio = stdio
def on_line(self, text):
self.stdio.print_line(text)
def main(stdscr):
screen = CursesStdIO(stdscr) # create Screen object
callback = Callback(screen)
screen.set_callback(callback)
stdscr.refresh()
reactor.addReader(screen)
reactor.run()
screen.close()
if __name__ == "__main__":
curses.wrapper(main)

View File

@@ -0,0 +1,367 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is an example of using the server to server implementation to do a
basic chat style thing. It accepts commands from stdin and outputs to stdout.
It assumes that ucids are of the form <user>@<domain>, and uses <domain> as
the address of the remote home server to hit.
Usage:
python test_messaging.py <port>
Currently assumes the local address is localhost:<port>
"""
import argparse
import curses.wrapper
import json
import logging
import os
import re
import cursesio
from twisted.internet import defer, reactor
from twisted.python import log
from synapse.app.homeserver import SynapseHomeServer
from synapse.federation import ReplicationHandler
from synapse.federation.units import Pdu
from synapse.util import origin_from_ucid
# from synapse.logging.utils import log_function
logger = logging.getLogger("example")
def excpetion_errback(failure):
logging.exception(failure)
class InputOutput:
"""This is responsible for basic I/O so that a user can interact with
the example app.
"""
def __init__(self, screen, user):
self.screen = screen
self.user = user
def set_home_server(self, server):
self.server = server
def on_line(self, line):
"""This is where we process commands."""
try:
m = re.match(r"^join (\S+)$", line)
if m:
# The `sender` wants to join a room.
(room_name,) = m.groups()
self.print_line("%s joining %s" % (self.user, room_name))
self.server.join_room(room_name, self.user, self.user)
# self.print_line("OK.")
return
m = re.match(r"^invite (\S+) (\S+)$", line)
if m:
# `sender` wants to invite someone to a room
room_name, invitee = m.groups()
self.print_line("%s invited to %s" % (invitee, room_name))
self.server.invite_to_room(room_name, self.user, invitee)
# self.print_line("OK.")
return
m = re.match(r"^send (\S+) (.*)$", line)
if m:
# `sender` wants to message a room
room_name, body = m.groups()
self.print_line("%s send to %s" % (self.user, room_name))
self.server.send_message(room_name, self.user, body)
# self.print_line("OK.")
return
m = re.match(r"^backfill (\S+)$", line)
if m:
# we want to backfill a room
(room_name,) = m.groups()
self.print_line("backfill %s" % room_name)
self.server.backfill(room_name)
return
self.print_line("Unrecognized command")
except Exception as e:
logger.exception(e)
def print_line(self, text):
self.screen.print_line(text)
def print_log(self, text):
self.screen.print_log(text)
class IOLoggerHandler(logging.Handler):
def __init__(self, io):
logging.Handler.__init__(self)
self.io = io
def emit(self, record):
if record.levelno < logging.WARN:
return
msg = self.format(record)
self.io.print_log(msg)
class Room:
"""Used to store (in memory) the current membership state of a room, and
which home servers we should send PDUs associated with the room to.
"""
def __init__(self, room_name):
self.room_name = room_name
self.invited = set()
self.participants = set()
self.servers = set()
self.oldest_server = None
self.have_got_metadata = False
def add_participant(self, participant):
"""Someone has joined the room"""
self.participants.add(participant)
self.invited.discard(participant)
server = origin_from_ucid(participant)
self.servers.add(server)
if not self.oldest_server:
self.oldest_server = server
def add_invited(self, invitee):
"""Someone has been invited to the room"""
self.invited.add(invitee)
self.servers.add(origin_from_ucid(invitee))
class HomeServer(ReplicationHandler):
"""A very basic home server implentation that allows people to join a
room and then invite other people.
"""
def __init__(self, server_name, replication_layer, output):
self.server_name = server_name
self.replication_layer = replication_layer
self.replication_layer.set_handler(self)
self.joined_rooms = {}
self.output = output
def on_receive_pdu(self, pdu):
"""We just received a PDU"""
pdu_type = pdu.pdu_type
if pdu_type == "sy.room.message":
self._on_message(pdu)
elif pdu_type == "sy.room.member" and "membership" in pdu.content:
if pdu.content["membership"] == "join":
self._on_join(pdu.context, pdu.state_key)
elif pdu.content["membership"] == "invite":
self._on_invite(pdu.origin, pdu.context, pdu.state_key)
else:
self.output.print_line(
"#%s (unrec) %s = %s"
% (pdu.context, pdu.pdu_type, json.dumps(pdu.content))
)
def _on_message(self, pdu):
"""We received a message"""
self.output.print_line(
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
)
def _on_join(self, context, joinee):
"""Someone has joined a room, either a remote user or a local user"""
room = self._get_or_create_room(context)
room.add_participant(joinee)
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
def _on_invite(self, origin, context, invitee):
"""Someone has been invited"""
room = self._get_or_create_room(context)
room.add_invited(invitee)
self.output.print_line("#%s %s %s" % (context, invitee, "*** INVITED"))
if not room.have_got_metadata and origin is not self.server_name:
logger.debug("Get room state")
self.replication_layer.get_state_for_context(origin, context)
room.have_got_metadata = True
@defer.inlineCallbacks
def send_message(self, room_name, sender, body):
"""Send a message to a room!"""
destinations = yield self.get_servers_for_context(room_name)
try:
yield self.replication_layer.send_pdu(
Pdu.create_new(
context=room_name,
pdu_type="sy.room.message",
content={"sender": sender, "body": body},
origin=self.server_name,
destinations=destinations,
)
)
except Exception as e:
logger.exception(e)
@defer.inlineCallbacks
def join_room(self, room_name, sender, joinee):
"""Join a room!"""
self._on_join(room_name, joinee)
destinations = yield self.get_servers_for_context(room_name)
try:
pdu = Pdu.create_new(
context=room_name,
pdu_type="sy.room.member",
is_state=True,
state_key=joinee,
content={"membership": "join"},
origin=self.server_name,
destinations=destinations,
)
yield self.replication_layer.send_pdu(pdu)
except Exception as e:
logger.exception(e)
@defer.inlineCallbacks
def invite_to_room(self, room_name, sender, invitee):
"""Invite someone to a room!"""
self._on_invite(self.server_name, room_name, invitee)
destinations = yield self.get_servers_for_context(room_name)
try:
yield self.replication_layer.send_pdu(
Pdu.create_new(
context=room_name,
is_state=True,
pdu_type="sy.room.member",
state_key=invitee,
content={"membership": "invite"},
origin=self.server_name,
destinations=destinations,
)
)
except Exception as e:
logger.exception(e)
def backfill(self, room_name, limit=5):
room = self.joined_rooms.get(room_name)
if not room:
return
dest = room.oldest_server
return self.replication_layer.backfill(dest, room_name, limit)
def _get_room_remote_servers(self, room_name):
return list(self.joined_rooms.setdefault(room_name).servers)
def _get_or_create_room(self, room_name):
return self.joined_rooms.setdefault(room_name, Room(room_name))
def get_servers_for_context(self, context):
return defer.succeed(
self.joined_rooms.setdefault(context, Room(context)).servers
)
def main(stdscr):
parser = argparse.ArgumentParser()
parser.add_argument("user", type=str)
parser.add_argument("-v", "--verbose", action="count")
args = parser.parse_args()
user = args.user
server_name = origin_from_ucid(user)
# Set up logging
root_logger = logging.getLogger()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
)
if not os.path.exists("logs"):
os.makedirs("logs")
fh = logging.FileHandler("logs/%s" % user)
fh.setFormatter(formatter)
root_logger.addHandler(fh)
root_logger.setLevel(logging.DEBUG)
# Hack: The only way to get it to stop logging to sys.stderr :(
log.theLogPublisher.observers = []
observer = log.PythonLoggingObserver()
observer.start()
# Set up synapse server
curses_stdio = cursesio.CursesStdIO(stdscr)
input_output = InputOutput(curses_stdio, user)
curses_stdio.set_callback(input_output)
app_hs = SynapseHomeServer(server_name, db_name="dbs/%s" % user)
replication = app_hs.get_replication_layer()
hs = HomeServer(server_name, replication, curses_stdio)
input_output.set_home_server(hs)
# Add input_output logger
io_logger = IOLoggerHandler(input_output)
io_logger.setFormatter(formatter)
root_logger.addHandler(io_logger)
# Start!
try:
port = int(server_name.split(":")[1])
except Exception:
port = 12345
app_hs.get_http_server().start_listening(port)
reactor.addReader(curses_stdio)
reactor.run()
if __name__ == "__main__":
curses.wrapper(main)

View File

@@ -66,18 +66,6 @@
],
"title": "Dashboards",
"type": "dashboards"
},
{
"asDropdown": false,
"icon": "external link",
"includeVars": false,
"keepTime": false,
"tags": [],
"targetBlank": true,
"title": "Synapse Documentation",
"tooltip": "Open Documentation",
"type": "link",
"url": "https://matrix-org.github.io/synapse/latest/"
}
],
"panels": [
@@ -10901,4 +10889,4 @@
"title": "Synapse",
"uid": "000000012",
"version": 100
}
}

View File

@@ -1,3 +1,11 @@
import argparse
import cgi
import datetime
import json
import pydot
import urllib2
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,25 +20,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cgi
import datetime
import json
import urllib.request
from typing import List
import pydot
def make_name(pdu_id, origin):
return "%s@%s" % (pdu_id, origin)
def make_name(pdu_id: str, origin: str) -> str:
return f"{pdu_id}@{origin}"
def make_graph(pdus: List[dict], filename_prefix: str) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by querying a homeserver.
"""
def make_graph(pdus, room, filename_prefix):
pdu_map = {}
node_map = {}
@@ -116,10 +111,10 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
graph.write_svg("%s.svg" % filename_prefix, prog="dot")
def get_pdus(host: str, room: str) -> List[dict]:
def get_pdus(host, room):
transaction = json.loads(
urllib.request.urlopen(
f"http://{host}/_matrix/federation/v1/context/{room}/"
urllib2.urlopen(
"http://%s/_matrix/federation/v1/context/%s/" % (host, room)
).read()
)
@@ -146,4 +141,4 @@ if __name__ == "__main__":
pdus = get_pdus(host, room)
make_graph(pdus, prefix)
make_graph(pdus, room, prefix)

View File

@@ -14,31 +14,22 @@
import argparse
import cgi
import datetime
import html
import json
import sqlite3
import pydot
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by reading from a Synapse SQLite database.
"""
def make_graph(db_name, room_id, file_prefix, limit):
conn = sqlite3.connect(db_name)
sql = "SELECT room_version FROM rooms WHERE room_id = ?"
c = conn.execute(sql, (room_id,))
room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]]
sql = (
"SELECT json, internal_metadata FROM event_json as j "
"SELECT json FROM event_json as j "
"INNER JOIN events as e ON e.event_id = j.event_id "
"WHERE j.room_id = ?"
)
@@ -52,10 +43,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
c = conn.execute(sql, args)
events = [
make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1]))
for e in c.fetchall()
]
events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()]
events.sort(key=lambda e: e.depth)
@@ -96,7 +84,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
"name": event.event_id,
"type": event.type,
"state_key": event.get("state_key", None),
"content": html.escape(content, quote=True),
"content": cgi.escape(content, quote=True),
"time": t,
"depth": event.depth,
"state_group": state_group,
@@ -108,11 +96,11 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
graph.add_node(node)
for event in events:
for prev_id in event.prev_event_ids():
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
node_map[prev_id] = end_node
graph.add_node(end_node)
@@ -124,7 +112,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
if len(event_ids) <= 1:
continue
cluster = pydot.Cluster(str(group), label=f"<State Group: {str(group)}>")
cluster = pydot.Cluster(str(group), label="<State Group: %s>" % (str(group),))
for event_id in event_ids:
cluster.add_node(node_map[event_id])
@@ -138,7 +126,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by talking "
"to the given Synapse SQLite file to get the list of PDUs. \n"
"to the given homeserver to get the list of PDUs. \n"
"Requires pydot."
)
parser.add_argument(

View File

@@ -1,3 +1,13 @@
import argparse
import cgi
import datetime
import pydot
import simplejson as json
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,35 +22,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import html
import json
import pydot
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.util.frozenutils import unfreeze
def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by reading line-delimited JSON from a file.
"""
def make_graph(file_name, room_id, file_prefix, limit):
print("Reading lines")
with open(file_name) as f:
lines = f.readlines()
print("Read lines")
# Figure out the room version, assume the first line is the create event.
room_version = KNOWN_ROOM_VERSIONS[
json.loads(lines[0]).get("content", {}).get("room_version")
]
events = [make_event_from_dict(json.loads(line), room_version) for line in lines]
events = [FrozenEvent(json.loads(line)) for line in lines]
print("Loaded events.")
@@ -76,8 +66,8 @@ def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
content.append(
"<b>%s</b>: %s,"
% (
html.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
html.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
cgi.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
cgi.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
)
)
@@ -111,11 +101,11 @@ def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
print("Created Nodes")
for event in events:
for prev_id in event.prev_event_ids():
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
node_map[prev_id] = end_node
graph.add_node(end_node)
@@ -149,7 +139,8 @@ if __name__ == "__main__":
)
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
parser.add_argument("event_file")
parser.add_argument("room")
args = parser.parse_args()
make_graph(args.event_file, args.prefix, args.limit)
make_graph(args.event_file, args.room, args.prefix, args.limit)

View File

@@ -0,0 +1,295 @@
#!/usr/bin/env python
"""
This is an attempt at bridging matrix clients into a Jitis meet room via Matrix
video call. It uses hard-coded xml strings overg XMPP BOSH. It can display one
of the streams from the Jitsi bridge until the second lot of SDP comes down and
we set the remote SDP at which point the stream ends. Our video never gets to
the bridge.
Requires:
npm install jquery jsdom
"""
import json
import subprocess
import time
import gevent
import grequests
from BeautifulSoup import BeautifulSoup
ACCESS_TOKEN = ""
MATRIXBASE = "https://matrix.org/_matrix/client/api/v1/"
MYUSERNAME = "@davetest:matrix.org"
HTTPBIND = "https://meet.jit.si/http-bind"
# HTTPBIND = 'https://jitsi.vuc.me/http-bind'
# ROOMNAME = "matrix"
ROOMNAME = "pibble"
HOST = "guest.jit.si"
# HOST="jitsi.vuc.me"
TURNSERVER = "turn.guest.jit.si"
# TURNSERVER="turn.jitsi.vuc.me"
ROOMDOMAIN = "meet.jit.si"
# ROOMDOMAIN="conference.jitsi.vuc.me"
class TrivialMatrixClient:
def __init__(self, access_token):
self.token = None
self.access_token = access_token
def getEvent(self):
while True:
url = (
MATRIXBASE
+ "events?access_token="
+ self.access_token
+ "&timeout=60000"
)
if self.token:
url += "&from=" + self.token
req = grequests.get(url)
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("incoming from matrix", obj)
if "end" not in obj:
continue
self.token = obj["end"]
if len(obj["chunk"]):
return obj["chunk"][0]
def joinRoom(self, roomId):
url = MATRIXBASE + "rooms/" + roomId + "/join?access_token=" + self.access_token
print(url)
headers = {"Content-Type": "application/json"}
req = grequests.post(url, headers=headers, data="{}")
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("response: ", obj)
def sendEvent(self, roomId, evType, event):
url = (
MATRIXBASE
+ "rooms/"
+ roomId
+ "/send/"
+ evType
+ "?access_token="
+ self.access_token
)
print(url)
print(json.dumps(event))
headers = {"Content-Type": "application/json"}
req = grequests.post(url, headers=headers, data=json.dumps(event))
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("response: ", obj)
xmppClients = {}
def matrixLoop():
while True:
ev = matrixCli.getEvent()
print(ev)
if ev["type"] == "m.room.member":
print("membership event")
if ev["membership"] == "invite" and ev["state_key"] == MYUSERNAME:
roomId = ev["room_id"]
print("joining room %s" % (roomId))
matrixCli.joinRoom(roomId)
elif ev["type"] == "m.room.message":
if ev["room_id"] in xmppClients:
print("already have a bridge for that user, ignoring")
continue
print("got message, connecting")
xmppClients[ev["room_id"]] = TrivialXmppClient(ev["room_id"], ev["user_id"])
gevent.spawn(xmppClients[ev["room_id"]].xmppLoop)
elif ev["type"] == "m.call.invite":
print("Incoming call")
# sdp = ev['content']['offer']['sdp']
# print "sdp: %s" % (sdp)
# xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
# gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
elif ev["type"] == "m.call.answer":
print("Call answered")
sdp = ev["content"]["answer"]["sdp"]
if ev["room_id"] not in xmppClients:
print("We didn't have a call for that room")
continue
# should probably check call ID too
xmppCli = xmppClients[ev["room_id"]]
xmppCli.sendAnswer(sdp)
elif ev["type"] == "m.call.hangup":
if ev["room_id"] in xmppClients:
xmppClients[ev["room_id"]].stop()
del xmppClients[ev["room_id"]]
class TrivialXmppClient:
def __init__(self, matrixRoom, userId):
self.rid = 0
self.matrixRoom = matrixRoom
self.userId = userId
self.running = True
def stop(self):
self.running = False
def nextRid(self):
self.rid += 1
return "%d" % (self.rid)
def sendIq(self, xml):
fullXml = (
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>"
% (self.nextRid(), self.sid, xml)
)
# print "\t>>>%s" % (fullXml)
return self.xmppPoke(fullXml)
def xmppPoke(self, xml):
headers = {"Content-Type": "application/xml"}
req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml)
resps = grequests.map([req])
obj = BeautifulSoup(resps[0].content)
return obj
def sendAnswer(self, answer):
print("sdp from matrix client", answer)
p = subprocess.Popen(
["node", "unjingle/unjingle.js", "--sdp"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
jingle, out_err = p.communicate(answer)
jingle = jingle % {
"tojid": self.callfrom,
"action": "session-accept",
"initiator": self.callfrom,
"responder": self.jid,
"sid": self.callsid,
}
print("answer jingle from sdp", jingle)
res = self.sendIq(jingle)
print("reply from answer: ", res)
self.ssrcs = {}
jingleSoup = BeautifulSoup(jingle)
for cont in jingleSoup.iq.jingle.findAll("content"):
if cont.description:
self.ssrcs[cont["name"]] = cont.description["ssrc"]
print("my ssrcs:", self.ssrcs)
gevent.joinall([gevent.spawn(self.advertiseSsrcs)])
def advertiseSsrcs(self):
time.sleep(7)
print("SSRC spammer started")
while self.running:
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
"nick": self.userId,
"assrc": self.ssrcs["audio"],
"vssrc": self.ssrcs["video"],
}
res = self.sendIq(ssrcMsg)
print("reply from ssrc announce: ", res)
time.sleep(10)
def xmppLoop(self):
self.matrixCallId = time.time()
res = self.xmppPoke(
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>"
% (self.nextRid(), HOST)
)
print(res)
self.sid = res.body["sid"]
print("sid %s" % (self.sid))
res = self.sendIq(
"<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>"
)
res = self.xmppPoke(
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>"
% (self.nextRid(), self.sid, HOST)
)
res = self.sendIq(
"<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>"
)
print(res)
self.jid = res.body.iq.bind.jid.string
print("jid: %s" % (self.jid))
self.shortJid = self.jid.split("-")[0]
res = self.sendIq(
"<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>"
)
# randomthing = res.body.iq['to']
# whatsitpart = randomthing.split('-')[0]
# print "other random bind thing: %s" % (randomthing)
# advertise preence to the jitsi room, with our nick
res = self.sendIq(
"<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>"
% (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId)
)
self.muc = {"users": []}
for p in res.body.findAll("presence"):
u = {}
u["shortJid"] = p["from"].split("/")[1]
if p.c and p.c.nick:
u["nick"] = p.c.nick.string
self.muc["users"].append(u)
print("muc: ", self.muc)
# wait for stuff
while True:
print("waiting...")
res = self.sendIq("")
print("got from stream: ", res)
if res.body.iq:
jingles = res.body.iq.findAll("jingle")
if len(jingles):
self.callfrom = res.body.iq["from"]
self.handleInvite(jingles[0])
elif "type" in res.body and res.body["type"] == "terminate":
self.running = False
del xmppClients[self.matrixRoom]
return
def handleInvite(self, jingle):
self.initiator = jingle["initiator"]
self.callsid = jingle["sid"]
p = subprocess.Popen(
["node", "unjingle/unjingle.js", "--jingle"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
print("raw jingle invite", str(jingle))
sdp, out_err = p.communicate(str(jingle))
print("transformed remote offer sdp", sdp)
inviteEvent = {
"offer": {"type": "offer", "sdp": sdp},
"call_id": self.matrixCallId,
"version": 0,
"lifetime": 30000,
}
matrixCli.sendEvent(self.matrixRoom, "m.call.invite", inviteEvent)
matrixCli = TrivialMatrixClient(ACCESS_TOKEN) # Undefined name
gevent.joinall([gevent.spawn(matrixLoop)])

View File

@@ -0,0 +1,188 @@
diff --git a/syweb/webclient/app/components/matrix/matrix-call.js b/syweb/webclient/app/components/matrix/matrix-call.js
index 9fbfff0..dc68077 100644
--- a/syweb/webclient/app/components/matrix/matrix-call.js
+++ b/syweb/webclient/app/components/matrix/matrix-call.js
@@ -16,6 +16,45 @@ limitations under the License.
'use strict';
+
+function sendKeyframe(pc) {
+ console.log('sendkeyframe', pc.iceConnectionState);
+ if (pc.iceConnectionState !== 'connected') return; // safe...
+ pc.setRemoteDescription(
+ pc.remoteDescription,
+ function () {
+ pc.createAnswer(
+ function (modifiedAnswer) {
+ pc.setLocalDescription(
+ modifiedAnswer,
+ function () {
+ // noop
+ },
+ function (error) {
+ console.log('triggerKeyframe setLocalDescription failed', error);
+ messageHandler.showError();
+ }
+ );
+ },
+ function (error) {
+ console.log('triggerKeyframe createAnswer failed', error);
+ messageHandler.showError();
+ }
+ );
+ },
+ function (error) {
+ console.log('triggerKeyframe setRemoteDescription failed', error);
+ messageHandler.showError();
+ }
+ );
+}
+
+
+
+
+
+
+
var forAllVideoTracksOnStream = function(s, f) {
var tracks = s.getVideoTracks();
for (var i = 0; i < tracks.length; i++) {
@@ -83,7 +122,7 @@ angular.module('MatrixCall', [])
}
// FIXME: we should prevent any calls from being placed or accepted before this has finished
- MatrixCall.getTurnServer();
+ //MatrixCall.getTurnServer();
MatrixCall.CALL_TIMEOUT = 60000;
MatrixCall.FALLBACK_STUN_SERVER = 'stun:stun.l.google.com:19302';
@@ -132,6 +171,22 @@ angular.module('MatrixCall', [])
pc.onsignalingstatechange = function() { self.onSignallingStateChanged(); };
pc.onicecandidate = function(c) { self.gotLocalIceCandidate(c); };
pc.onaddstream = function(s) { self.onAddStream(s); };
+
+ var datachan = pc.createDataChannel('RTCDataChannel', {
+ reliable: false
+ });
+ console.log("data chan: "+datachan);
+ datachan.onopen = function() {
+ console.log("data channel open");
+ };
+ datachan.onmessage = function() {
+ console.log("data channel message");
+ };
+ pc.ondatachannel = function(event) {
+ console.log("have data channel");
+ event.channel.binaryType = 'blob';
+ };
+
return pc;
}
@@ -200,6 +255,12 @@ angular.module('MatrixCall', [])
}, this.msg.lifetime - event.age);
};
+ MatrixCall.prototype.receivedInvite = function(event) {
+ console.log("Got second invite for call "+this.call_id);
+ this.peerConn.setRemoteDescription(new RTCSessionDescription(this.msg.offer), this.onSetRemoteDescriptionSuccess, this.onSetRemoteDescriptionError);
+ };
+
+
// perverse as it may seem, sometimes we want to instantiate a call with a hangup message
// (because when getting the state of the room on load, events come in reverse order and
// we want to remember that a call has been hung up)
@@ -349,7 +410,7 @@ angular.module('MatrixCall', [])
'mandatory': {
'OfferToReceiveAudio': true,
'OfferToReceiveVideo': this.type == 'video'
- },
+ }
};
this.peerConn.createAnswer(function(d) { self.createdAnswer(d); }, function(e) {}, constraints);
// This can't be in an apply() because it's called by a predecessor call under glare conditions :(
@@ -359,8 +420,20 @@ angular.module('MatrixCall', [])
MatrixCall.prototype.gotLocalIceCandidate = function(event) {
if (event.candidate) {
console.log("Got local ICE "+event.candidate.sdpMid+" candidate: "+event.candidate.candidate);
- this.sendCandidate(event.candidate);
- }
+ //this.sendCandidate(event.candidate);
+ } else {
+ console.log("have all candidates, sending answer");
+ var content = {
+ version: 0,
+ call_id: this.call_id,
+ answer: this.peerConn.localDescription
+ };
+ this.sendEventWithRetry('m.call.answer', content);
+ var self = this;
+ $rootScope.$apply(function() {
+ self.state = 'connecting';
+ });
+ }
}
MatrixCall.prototype.gotRemoteIceCandidate = function(cand) {
@@ -418,15 +491,6 @@ angular.module('MatrixCall', [])
console.log("Created answer: "+description);
var self = this;
this.peerConn.setLocalDescription(description, function() {
- var content = {
- version: 0,
- call_id: self.call_id,
- answer: self.peerConn.localDescription
- };
- self.sendEventWithRetry('m.call.answer', content);
- $rootScope.$apply(function() {
- self.state = 'connecting';
- });
}, function() { console.log("Error setting local description!"); } );
};
@@ -448,6 +512,9 @@ angular.module('MatrixCall', [])
$rootScope.$apply(function() {
self.state = 'connected';
self.didConnect = true;
+ /*$timeout(function() {
+ sendKeyframe(self.peerConn);
+ }, 1000);*/
});
} else if (this.peerConn.iceConnectionState == 'failed') {
this.hangup('ice_failed');
@@ -518,6 +585,7 @@ angular.module('MatrixCall', [])
MatrixCall.prototype.onRemoteStreamEnded = function(event) {
console.log("Remote stream ended");
+ return;
var self = this;
$rootScope.$apply(function() {
self.state = 'ended';
diff --git a/syweb/webclient/app/components/matrix/matrix-phone-service.js b/syweb/webclient/app/components/matrix/matrix-phone-service.js
index 55dbbf5..272fa27 100644
--- a/syweb/webclient/app/components/matrix/matrix-phone-service.js
+++ b/syweb/webclient/app/components/matrix/matrix-phone-service.js
@@ -48,6 +48,13 @@ angular.module('matrixPhoneService', [])
return;
}
+ // do we already have an entry for this call ID?
+ var existingEntry = matrixPhoneService.allCalls[msg.call_id];
+ if (existingEntry) {
+ existingEntry.receivedInvite(msg);
+ return;
+ }
+
var call = undefined;
if (!isLive) {
// if this event wasn't live then this call may already be over
@@ -108,7 +115,7 @@ angular.module('matrixPhoneService', [])
call.hangup();
}
} else {
- $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
+ $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
}
} else if (event.type == 'm.call.answer') {
var call = matrixPhoneService.allCalls[msg.call_id];

View File

@@ -0,0 +1,712 @@
/* jshint -W117 */
// SDP STUFF
function SDP(sdp) {
this.media = sdp.split('\r\nm=');
for (var i = 1; i < this.media.length; i++) {
this.media[i] = 'm=' + this.media[i];
if (i != this.media.length - 1) {
this.media[i] += '\r\n';
}
}
this.session = this.media.shift() + '\r\n';
this.raw = this.session + this.media.join('');
}
exports.SDP = SDP;
var jsdom = require("jsdom");
var window = jsdom.jsdom().parentWindow;
var $ = require('jquery')(window);
var SDPUtil = require('./strophe.jingle.sdp.util.js').SDPUtil;
/**
* Returns map of MediaChannel mapped per channel idx.
*/
SDP.prototype.getMediaSsrcMap = function() {
var self = this;
var media_ssrcs = {};
for (channelNum = 0; channelNum < self.media.length; channelNum++) {
modified = true;
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc:');
var type = SDPUtil.parse_mid(SDPUtil.find_line(self.media[channelNum], 'a=mid:'));
var channel = new MediaChannel(channelNum, type);
media_ssrcs[channelNum] = channel;
tmp.forEach(function (line) {
var linessrc = line.substring(7).split(' ')[0];
// allocate new ChannelSsrc
if(!channel.ssrcs[linessrc]) {
channel.ssrcs[linessrc] = new ChannelSsrc(linessrc, type);
}
channel.ssrcs[linessrc].lines.push(line);
});
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc-group:');
tmp.forEach(function(line){
var semantics = line.substr(0, idx).substr(13);
var ssrcs = line.substr(14 + semantics.length).split(' ');
if (ssrcs.length != 0) {
var ssrcGroup = new ChannelSsrcGroup(semantics, ssrcs);
channel.ssrcGroups.push(ssrcGroup);
}
});
}
return media_ssrcs;
};
/**
* Returns <tt>true</tt> if this SDP contains given SSRC.
* @param ssrc the ssrc to check.
* @returns {boolean} <tt>true</tt> if this SDP contains given SSRC.
*/
SDP.prototype.containsSSRC = function(ssrc) {
var channels = this.getMediaSsrcMap();
var contains = false;
Object.keys(channels).forEach(function(chNumber){
var channel = channels[chNumber];
//console.log("Check", channel, ssrc);
if(Object.keys(channel.ssrcs).indexOf(ssrc) != -1){
contains = true;
}
});
return contains;
};
/**
* Returns map of MediaChannel that contains only media not contained in <tt>otherSdp</tt>. Mapped by channel idx.
* @param otherSdp the other SDP to check ssrc with.
*/
SDP.prototype.getNewMedia = function(otherSdp) {
// this could be useful in Array.prototype.
function arrayEquals(array) {
// if the other array is a falsy value, return
if (!array)
return false;
// compare lengths - can save a lot of time
if (this.length != array.length)
return false;
for (var i = 0, l=this.length; i < l; i++) {
// Check if we have nested arrays
if (this[i] instanceof Array && array[i] instanceof Array) {
// recurse into the nested arrays
if (!this[i].equals(array[i]))
return false;
}
else if (this[i] != array[i]) {
// Warning - two different object instances will never be equal: {x:20} != {x:20}
return false;
}
}
return true;
}
var myMedia = this.getMediaSsrcMap();
var othersMedia = otherSdp.getMediaSsrcMap();
var newMedia = {};
Object.keys(othersMedia).forEach(function(channelNum) {
var myChannel = myMedia[channelNum];
var othersChannel = othersMedia[channelNum];
if(!myChannel && othersChannel) {
// Add whole channel
newMedia[channelNum] = othersChannel;
return;
}
// Look for new ssrcs accross the channel
Object.keys(othersChannel.ssrcs).forEach(function(ssrc) {
if(Object.keys(myChannel.ssrcs).indexOf(ssrc) === -1) {
// Allocate channel if we've found ssrc that doesn't exist in our channel
if(!newMedia[channelNum]){
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
}
newMedia[channelNum].ssrcs[ssrc] = othersChannel.ssrcs[ssrc];
}
});
// Look for new ssrc groups across the channels
othersChannel.ssrcGroups.forEach(function(otherSsrcGroup){
// try to match the other ssrc-group with an ssrc-group of ours
var matched = false;
for (var i = 0; i < myChannel.ssrcGroups.length; i++) {
var mySsrcGroup = myChannel.ssrcGroups[i];
if (otherSsrcGroup.semantics == mySsrcGroup.semantics
&& arrayEquals.apply(otherSsrcGroup.ssrcs, [mySsrcGroup.ssrcs])) {
matched = true;
break;
}
}
if (!matched) {
// Allocate channel if we've found an ssrc-group that doesn't
// exist in our channel
if(!newMedia[channelNum]){
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
}
newMedia[channelNum].ssrcGroups.push(otherSsrcGroup);
}
});
});
return newMedia;
};
// remove iSAC and CN from SDP
SDP.prototype.mangle = function () {
var i, j, mline, lines, rtpmap, newdesc;
for (i = 0; i < this.media.length; i++) {
lines = this.media[i].split('\r\n');
lines.pop(); // remove empty last element
mline = SDPUtil.parse_mline(lines.shift());
if (mline.media != 'audio')
continue;
newdesc = '';
mline.fmt.length = 0;
for (j = 0; j < lines.length; j++) {
if (lines[j].substr(0, 9) == 'a=rtpmap:') {
rtpmap = SDPUtil.parse_rtpmap(lines[j]);
if (rtpmap.name == 'CN' || rtpmap.name == 'ISAC')
continue;
mline.fmt.push(rtpmap.id);
newdesc += lines[j] + '\r\n';
} else {
newdesc += lines[j] + '\r\n';
}
}
this.media[i] = SDPUtil.build_mline(mline) + '\r\n';
this.media[i] += newdesc;
}
this.raw = this.session + this.media.join('');
};
// remove lines matching prefix from session section
SDP.prototype.removeSessionLines = function(prefix) {
var self = this;
var lines = SDPUtil.find_lines(this.session, prefix);
lines.forEach(function(line) {
self.session = self.session.replace(line + '\r\n', '');
});
this.raw = this.session + this.media.join('');
return lines;
}
// remove lines matching prefix from a media section specified by mediaindex
// TODO: non-numeric mediaindex could match mid
SDP.prototype.removeMediaLines = function(mediaindex, prefix) {
var self = this;
var lines = SDPUtil.find_lines(this.media[mediaindex], prefix);
lines.forEach(function(line) {
self.media[mediaindex] = self.media[mediaindex].replace(line + '\r\n', '');
});
this.raw = this.session + this.media.join('');
return lines;
}
// add content's to a jingle element
SDP.prototype.toJingle = function (elem, thecreator) {
var i, j, k, mline, ssrc, rtpmap, tmp, line, lines;
var self = this;
// new bundle plan
if (SDPUtil.find_line(this.session, 'a=group:')) {
lines = SDPUtil.find_lines(this.session, 'a=group:');
for (i = 0; i < lines.length; i++) {
tmp = lines[i].split(' ');
var semantics = tmp.shift().substr(8);
elem.c('group', {xmlns: 'urn:xmpp:jingle:apps:grouping:0', semantics:semantics});
for (j = 0; j < tmp.length; j++) {
elem.c('content', {name: tmp[j]}).up();
}
elem.up();
}
}
// old bundle plan, to be removed
var bundle = [];
if (SDPUtil.find_line(this.session, 'a=group:BUNDLE')) {
bundle = SDPUtil.find_line(this.session, 'a=group:BUNDLE ').split(' ');
bundle.shift();
}
for (i = 0; i < this.media.length; i++) {
mline = SDPUtil.parse_mline(this.media[i].split('\r\n')[0]);
if (!(mline.media === 'audio' ||
mline.media === 'video' ||
mline.media === 'application'))
{
continue;
}
if (SDPUtil.find_line(this.media[i], 'a=ssrc:')) {
ssrc = SDPUtil.find_line(this.media[i], 'a=ssrc:').substring(7).split(' ')[0]; // take the first
} else {
ssrc = false;
}
elem.c('content', {creator: thecreator, name: mline.media});
if (SDPUtil.find_line(this.media[i], 'a=mid:')) {
// prefer identifier from a=mid if present
var mid = SDPUtil.parse_mid(SDPUtil.find_line(this.media[i], 'a=mid:'));
elem.attrs({ name: mid });
// old BUNDLE plan, to be removed
if (bundle.indexOf(mid) !== -1) {
elem.c('bundle', {xmlns: 'http://estos.de/ns/bundle'}).up();
bundle.splice(bundle.indexOf(mid), 1);
}
}
if (SDPUtil.find_line(this.media[i], 'a=rtpmap:').length)
{
elem.c('description',
{xmlns: 'urn:xmpp:jingle:apps:rtp:1',
media: mline.media });
if (ssrc) {
elem.attrs({ssrc: ssrc});
}
for (j = 0; j < mline.fmt.length; j++) {
rtpmap = SDPUtil.find_line(this.media[i], 'a=rtpmap:' + mline.fmt[j]);
elem.c('payload-type', SDPUtil.parse_rtpmap(rtpmap));
// put any 'a=fmtp:' + mline.fmt[j] lines into <param name=foo value=bar/>
if (SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j])) {
tmp = SDPUtil.parse_fmtp(SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j]));
for (k = 0; k < tmp.length; k++) {
elem.c('parameter', tmp[k]).up();
}
}
this.RtcpFbToJingle(i, elem, mline.fmt[j]); // XEP-0293 -- map a=rtcp-fb
elem.up();
}
if (SDPUtil.find_line(this.media[i], 'a=crypto:', this.session)) {
elem.c('encryption', {required: 1});
var crypto = SDPUtil.find_lines(this.media[i], 'a=crypto:', this.session);
crypto.forEach(function(line) {
elem.c('crypto', SDPUtil.parse_crypto(line)).up();
});
elem.up(); // end of encryption
}
if (ssrc) {
// new style mapping
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
// FIXME: group by ssrc and support multiple different ssrcs
var ssrclines = SDPUtil.find_lines(this.media[i], 'a=ssrc:');
ssrclines.forEach(function(line) {
idx = line.indexOf(' ');
var linessrc = line.substr(0, idx).substr(7);
if (linessrc != ssrc) {
elem.up();
ssrc = linessrc;
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
}
var kv = line.substr(idx + 1);
elem.c('parameter');
if (kv.indexOf(':') == -1) {
elem.attrs({ name: kv });
} else {
elem.attrs({ name: kv.split(':', 2)[0] });
elem.attrs({ value: kv.split(':', 2)[1] });
}
elem.up();
});
elem.up();
// old proprietary mapping, to be removed at some point
tmp = SDPUtil.parse_ssrc(this.media[i]);
tmp.xmlns = 'http://estos.de/ns/ssrc';
tmp.ssrc = ssrc;
elem.c('ssrc', tmp).up(); // ssrc is part of description
// XEP-0339 handle ssrc-group attributes
var ssrc_group_lines = SDPUtil.find_lines(this.media[i], 'a=ssrc-group:');
ssrc_group_lines.forEach(function(line) {
idx = line.indexOf(' ');
var semantics = line.substr(0, idx).substr(13);
var ssrcs = line.substr(14 + semantics.length).split(' ');
if (ssrcs.length != 0) {
elem.c('ssrc-group', { semantics: semantics, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
ssrcs.forEach(function(ssrc) {
elem.c('source', { ssrc: ssrc })
.up();
});
elem.up();
}
});
}
if (SDPUtil.find_line(this.media[i], 'a=rtcp-mux')) {
elem.c('rtcp-mux').up();
}
// XEP-0293 -- map a=rtcp-fb:*
this.RtcpFbToJingle(i, elem, '*');
// XEP-0294
if (SDPUtil.find_line(this.media[i], 'a=extmap:')) {
lines = SDPUtil.find_lines(this.media[i], 'a=extmap:');
for (j = 0; j < lines.length; j++) {
tmp = SDPUtil.parse_extmap(lines[j]);
elem.c('rtp-hdrext', { xmlns: 'urn:xmpp:jingle:apps:rtp:rtp-hdrext:0',
uri: tmp.uri,
id: tmp.value });
if (tmp.hasOwnProperty('direction')) {
switch (tmp.direction) {
case 'sendonly':
elem.attrs({senders: 'responder'});
break;
case 'recvonly':
elem.attrs({senders: 'initiator'});
break;
case 'sendrecv':
elem.attrs({senders: 'both'});
break;
case 'inactive':
elem.attrs({senders: 'none'});
break;
}
}
// TODO: handle params
elem.up();
}
}
elem.up(); // end of description
}
// map ice-ufrag/pwd, dtls fingerprint, candidates
this.TransportToJingle(i, elem);
if (SDPUtil.find_line(this.media[i], 'a=sendrecv', this.session)) {
elem.attrs({senders: 'both'});
} else if (SDPUtil.find_line(this.media[i], 'a=sendonly', this.session)) {
elem.attrs({senders: 'initiator'});
} else if (SDPUtil.find_line(this.media[i], 'a=recvonly', this.session)) {
elem.attrs({senders: 'responder'});
} else if (SDPUtil.find_line(this.media[i], 'a=inactive', this.session)) {
elem.attrs({senders: 'none'});
}
if (mline.port == '0') {
// estos hack to reject an m-line
elem.attrs({senders: 'rejected'});
}
elem.up(); // end of content
}
elem.up();
return elem;
};
SDP.prototype.TransportToJingle = function (mediaindex, elem) {
var i = mediaindex;
var tmp;
var self = this;
elem.c('transport');
// XEP-0343 DTLS/SCTP
if (SDPUtil.find_line(this.media[mediaindex], 'a=sctpmap:').length)
{
var sctpmap = SDPUtil.find_line(
this.media[i], 'a=sctpmap:', self.session);
if (sctpmap)
{
var sctpAttrs = SDPUtil.parse_sctpmap(sctpmap);
elem.c('sctpmap',
{
xmlns: 'urn:xmpp:jingle:transports:dtls-sctp:1',
number: sctpAttrs[0], /* SCTP port */
protocol: sctpAttrs[1], /* protocol */
});
// Optional stream count attribute
if (sctpAttrs.length > 2)
elem.attrs({ streams: sctpAttrs[2]});
elem.up();
}
}
// XEP-0320
var fingerprints = SDPUtil.find_lines(this.media[mediaindex], 'a=fingerprint:', this.session);
fingerprints.forEach(function(line) {
tmp = SDPUtil.parse_fingerprint(line);
tmp.xmlns = 'urn:xmpp:jingle:apps:dtls:0';
elem.c('fingerprint').t(tmp.fingerprint);
delete tmp.fingerprint;
line = SDPUtil.find_line(self.media[mediaindex], 'a=setup:', self.session);
if (line) {
tmp.setup = line.substr(8);
}
elem.attrs(tmp);
elem.up(); // end of fingerprint
});
tmp = SDPUtil.iceparams(this.media[mediaindex], this.session);
if (tmp) {
tmp.xmlns = 'urn:xmpp:jingle:transports:ice-udp:1';
elem.attrs(tmp);
// XEP-0176
if (SDPUtil.find_line(this.media[mediaindex], 'a=candidate:', this.session)) { // add any a=candidate lines
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=candidate:', this.session);
lines.forEach(function (line) {
elem.c('candidate', SDPUtil.candidateToJingle(line)).up();
});
}
}
elem.up(); // end of transport
}
SDP.prototype.RtcpFbToJingle = function (mediaindex, elem, payloadtype) { // XEP-0293
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=rtcp-fb:' + payloadtype);
lines.forEach(function (line) {
var tmp = SDPUtil.parse_rtcpfb(line);
if (tmp.type == 'trr-int') {
elem.c('rtcp-fb-trr-int', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', value: tmp.params[0]});
elem.up();
} else {
elem.c('rtcp-fb', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', type: tmp.type});
if (tmp.params.length > 0) {
elem.attrs({'subtype': tmp.params[0]});
}
elem.up();
}
});
};
SDP.prototype.RtcpFbFromJingle = function (elem, payloadtype) { // XEP-0293
var media = '';
var tmp = elem.find('>rtcp-fb-trr-int[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
if (tmp.length) {
media += 'a=rtcp-fb:' + '*' + ' ' + 'trr-int' + ' ';
if (tmp.attr('value')) {
media += tmp.attr('value');
} else {
media += '0';
}
media += '\r\n';
}
tmp = elem.find('>rtcp-fb[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
tmp.each(function () {
media += 'a=rtcp-fb:' + payloadtype + ' ' + $(this).attr('type');
if ($(this).attr('subtype')) {
media += ' ' + $(this).attr('subtype');
}
media += '\r\n';
});
return media;
};
// construct an SDP from a jingle stanza
SDP.prototype.fromJingle = function (jingle) {
var self = this;
this.raw = 'v=0\r\n' +
'o=- ' + '1923518516' + ' 2 IN IP4 0.0.0.0\r\n' +// FIXME
's=-\r\n' +
't=0 0\r\n';
// http://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-04#section-8
if ($(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').length) {
$(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').each(function (idx, group) {
var contents = $(group).find('>content').map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (contents.length > 0) {
self.raw += 'a=group:' + (group.getAttribute('semantics') || group.getAttribute('type')) + ' ' + contents.join(' ') + '\r\n';
}
});
} else if ($(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').length) {
// temporary namespace, not to be used. to be removed soon.
$(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').each(function (idx, group) {
var contents = $(group).find('>content').map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (group.getAttribute('type') !== null && contents.length > 0) {
self.raw += 'a=group:' + group.getAttribute('type') + ' ' + contents.join(' ') + '\r\n';
}
});
} else {
// for backward compability, to be removed soon
// assume all contents are in the same bundle group, can be improved upon later
var bundle = $(jingle).find('>content').filter(function (idx, content) {
//elem.c('bundle', {xmlns:'http://estos.de/ns/bundle'});
return $(content).find('>bundle').length > 0;
}).map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (bundle.length) {
this.raw += 'a=group:BUNDLE ' + bundle.join(' ') + '\r\n';
}
}
this.session = this.raw;
jingle.find('>content').each(function () {
var m = self.jingle2media($(this));
self.media.push(m);
});
// reconstruct msid-semantic -- apparently not necessary
/*
var msid = SDPUtil.parse_ssrc(this.raw);
if (msid.hasOwnProperty('mslabel')) {
this.session += "a=msid-semantic: WMS " + msid.mslabel + "\r\n";
}
*/
this.raw = this.session + this.media.join('');
};
// translate a jingle content element into an an SDP media part
SDP.prototype.jingle2media = function (content) {
var media = '',
desc = content.find('description'),
ssrc = desc.attr('ssrc'),
self = this,
tmp;
var sctp = content.find(
'>transport>sctpmap[xmlns="urn:xmpp:jingle:transports:dtls-sctp:1"]');
tmp = { media: desc.attr('media') };
tmp.port = '1';
if (content.attr('senders') == 'rejected') {
// estos hack to reject an m-line.
tmp.port = '0';
}
if (content.find('>transport>fingerprint').length || desc.find('encryption').length) {
if (sctp.length)
tmp.proto = 'DTLS/SCTP';
else
tmp.proto = 'RTP/SAVPF';
} else {
tmp.proto = 'RTP/AVPF';
}
if (!sctp.length)
{
tmp.fmt = desc.find('payload-type').map(
function () { return this.getAttribute('id'); }).get();
media += SDPUtil.build_mline(tmp) + '\r\n';
}
else
{
media += 'm=application 1 DTLS/SCTP ' + sctp.attr('number') + '\r\n';
media += 'a=sctpmap:' + sctp.attr('number') +
' ' + sctp.attr('protocol');
var streamCount = sctp.attr('streams');
if (streamCount)
media += ' ' + streamCount + '\r\n';
else
media += '\r\n';
}
media += 'c=IN IP4 0.0.0.0\r\n';
if (!sctp.length)
media += 'a=rtcp:1 IN IP4 0.0.0.0\r\n';
//tmp = content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
tmp = content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
//console.log('transports: '+content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
//console.log('bundle.transports: '+content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
//console.log("tmp fingerprint: "+tmp.find('>fingerprint').innerHTML);
if (tmp.length) {
if (tmp.attr('ufrag')) {
media += SDPUtil.build_iceufrag(tmp.attr('ufrag')) + '\r\n';
}
if (tmp.attr('pwd')) {
media += SDPUtil.build_icepwd(tmp.attr('pwd')) + '\r\n';
}
tmp.find('>fingerprint').each(function () {
// FIXME: check namespace at some point
media += 'a=fingerprint:' + this.getAttribute('hash');
media += ' ' + $(this).text();
media += '\r\n';
//console.log("mline "+media);
if (this.getAttribute('setup')) {
media += 'a=setup:' + this.getAttribute('setup') + '\r\n';
}
});
}
switch (content.attr('senders')) {
case 'initiator':
media += 'a=sendonly\r\n';
break;
case 'responder':
media += 'a=recvonly\r\n';
break;
case 'none':
media += 'a=inactive\r\n';
break;
case 'both':
media += 'a=sendrecv\r\n';
break;
}
media += 'a=mid:' + content.attr('name') + '\r\n';
/*if (content.attr('name') == 'video') {
media += 'a=x-google-flag:conference' + '\r\n';
}*/
// <description><rtcp-mux/></description>
// see http://code.google.com/p/libjingle/issues/detail?id=309 -- no spec though
// and http://mail.jabber.org/pipermail/jingle/2011-December/001761.html
if (desc.find('rtcp-mux').length) {
media += 'a=rtcp-mux\r\n';
}
if (desc.find('encryption').length) {
desc.find('encryption>crypto').each(function () {
media += 'a=crypto:' + this.getAttribute('tag');
media += ' ' + this.getAttribute('crypto-suite');
media += ' ' + this.getAttribute('key-params');
if (this.getAttribute('session-params')) {
media += ' ' + this.getAttribute('session-params');
}
media += '\r\n';
});
}
desc.find('payload-type').each(function () {
media += SDPUtil.build_rtpmap(this) + '\r\n';
if ($(this).find('>parameter').length) {
media += 'a=fmtp:' + this.getAttribute('id') + ' ';
media += $(this).find('parameter').map(function () { return (this.getAttribute('name') ? (this.getAttribute('name') + '=') : '') + this.getAttribute('value'); }).get().join('; ');
media += '\r\n';
}
// xep-0293
media += self.RtcpFbFromJingle($(this), this.getAttribute('id'));
});
// xep-0293
media += self.RtcpFbFromJingle(desc, '*');
// xep-0294
tmp = desc.find('>rtp-hdrext[xmlns="urn:xmpp:jingle:apps:rtp:rtp-hdrext:0"]');
tmp.each(function () {
media += 'a=extmap:' + this.getAttribute('id') + ' ' + this.getAttribute('uri') + '\r\n';
});
content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]>candidate').each(function () {
media += SDPUtil.candidateFromJingle(this);
});
// XEP-0339 handle ssrc-group attributes
tmp = content.find('description>ssrc-group[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]').each(function() {
var semantics = this.getAttribute('semantics');
var ssrcs = $(this).find('>source').map(function() {
return this.getAttribute('ssrc');
}).get();
if (ssrcs.length != 0) {
media += 'a=ssrc-group:' + semantics + ' ' + ssrcs.join(' ') + '\r\n';
}
});
tmp = content.find('description>source[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]');
tmp.each(function () {
var ssrc = this.getAttribute('ssrc');
$(this).find('>parameter').each(function () {
media += 'a=ssrc:' + ssrc + ' ' + this.getAttribute('name');
if (this.getAttribute('value') && this.getAttribute('value').length)
media += ':' + this.getAttribute('value');
media += '\r\n';
});
});
if (tmp.length === 0) {
// fallback to proprietary mapping of a=ssrc lines
tmp = content.find('description>ssrc[xmlns="http://estos.de/ns/ssrc"]');
if (tmp.length) {
media += 'a=ssrc:' + ssrc + ' cname:' + tmp.attr('cname') + '\r\n';
media += 'a=ssrc:' + ssrc + ' msid:' + tmp.attr('msid') + '\r\n';
media += 'a=ssrc:' + ssrc + ' mslabel:' + tmp.attr('mslabel') + '\r\n';
media += 'a=ssrc:' + ssrc + ' label:' + tmp.attr('label') + '\r\n';
}
}
return media;
};

View File

@@ -0,0 +1,408 @@
/**
* Contains utility classes used in SDP class.
*
*/
/**
* Class holds a=ssrc lines and media type a=mid
* @param ssrc synchronization source identifier number(a=ssrc lines from SDP)
* @param type media type eg. "audio" or "video"(a=mid frm SDP)
* @constructor
*/
function ChannelSsrc(ssrc, type) {
this.ssrc = ssrc;
this.type = type;
this.lines = [];
}
/**
* Class holds a=ssrc-group: lines
* @param semantics
* @param ssrcs
* @constructor
*/
function ChannelSsrcGroup(semantics, ssrcs, line) {
this.semantics = semantics;
this.ssrcs = ssrcs;
}
/**
* Helper class represents media channel. Is a container for ChannelSsrc, holds channel idx and media type.
* @param channelNumber channel idx in SDP media array.
* @param mediaType media type(a=mid)
* @constructor
*/
function MediaChannel(channelNumber, mediaType) {
/**
* SDP channel number
* @type {*}
*/
this.chNumber = channelNumber;
/**
* Channel media type(a=mid)
* @type {*}
*/
this.mediaType = mediaType;
/**
* The maps of ssrc numbers to ChannelSsrc objects.
*/
this.ssrcs = {};
/**
* The array of ChannelSsrcGroup objects.
* @type {Array}
*/
this.ssrcGroups = [];
}
SDPUtil = {
iceparams: function (mediadesc, sessiondesc) {
var data = null;
if (SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc) &&
SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc)) {
data = {
ufrag: SDPUtil.parse_iceufrag(SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc)),
pwd: SDPUtil.parse_icepwd(SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc))
};
}
return data;
},
parse_iceufrag: function (line) {
return line.substring(12);
},
build_iceufrag: function (frag) {
return 'a=ice-ufrag:' + frag;
},
parse_icepwd: function (line) {
return line.substring(10);
},
build_icepwd: function (pwd) {
return 'a=ice-pwd:' + pwd;
},
parse_mid: function (line) {
return line.substring(6);
},
parse_mline: function (line) {
var parts = line.substring(2).split(' '),
data = {};
data.media = parts.shift();
data.port = parts.shift();
data.proto = parts.shift();
if (parts[parts.length - 1] === '') { // trailing whitespace
parts.pop();
}
data.fmt = parts;
return data;
},
build_mline: function (mline) {
return 'm=' + mline.media + ' ' + mline.port + ' ' + mline.proto + ' ' + mline.fmt.join(' ');
},
parse_rtpmap: function (line) {
var parts = line.substring(9).split(' '),
data = {};
data.id = parts.shift();
parts = parts[0].split('/');
data.name = parts.shift();
data.clockrate = parts.shift();
data.channels = parts.length ? parts.shift() : '1';
return data;
},
/**
* Parses SDP line "a=sctpmap:..." and extracts SCTP port from it.
* @param line eg. "a=sctpmap:5000 webrtc-datachannel"
* @returns [SCTP port number, protocol, streams]
*/
parse_sctpmap: function (line)
{
var parts = line.substring(10).split(' ');
var sctpPort = parts[0];
var protocol = parts[1];
// Stream count is optional
var streamCount = parts.length > 2 ? parts[2] : null;
return [sctpPort, protocol, streamCount];// SCTP port
},
build_rtpmap: function (el) {
var line = 'a=rtpmap:' + el.getAttribute('id') + ' ' + el.getAttribute('name') + '/' + el.getAttribute('clockrate');
if (el.getAttribute('channels') && el.getAttribute('channels') != '1') {
line += '/' + el.getAttribute('channels');
}
return line;
},
parse_crypto: function (line) {
var parts = line.substring(9).split(' '),
data = {};
data.tag = parts.shift();
data['crypto-suite'] = parts.shift();
data['key-params'] = parts.shift();
if (parts.length) {
data['session-params'] = parts.join(' ');
}
return data;
},
parse_fingerprint: function (line) { // RFC 4572
var parts = line.substring(14).split(' '),
data = {};
data.hash = parts.shift();
data.fingerprint = parts.shift();
// TODO assert that fingerprint satisfies 2UHEX *(":" 2UHEX) ?
return data;
},
parse_fmtp: function (line) {
var parts = line.split(' '),
i, key, value,
data = [];
parts.shift();
parts = parts.join(' ').split(';');
for (i = 0; i < parts.length; i++) {
key = parts[i].split('=')[0];
while (key.length && key[0] == ' ') {
key = key.substring(1);
}
value = parts[i].split('=')[1];
if (key && value) {
data.push({name: key, value: value});
} else if (key) {
// rfc 4733 (DTMF) style stuff
data.push({name: '', value: key});
}
}
return data;
},
parse_icecandidate: function (line) {
var candidate = {},
elems = line.split(' ');
candidate.foundation = elems[0].substring(12);
candidate.component = elems[1];
candidate.protocol = elems[2].toLowerCase();
candidate.priority = elems[3];
candidate.ip = elems[4];
candidate.port = elems[5];
// elems[6] => "typ"
candidate.type = elems[7];
candidate.generation = 0; // default value, may be overwritten below
for (var i = 8; i < elems.length; i += 2) {
switch (elems[i]) {
case 'raddr':
candidate['rel-addr'] = elems[i + 1];
break;
case 'rport':
candidate['rel-port'] = elems[i + 1];
break;
case 'generation':
candidate.generation = elems[i + 1];
break;
case 'tcptype':
candidate.tcptype = elems[i + 1];
break;
default: // TODO
console.log('parse_icecandidate not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
}
}
candidate.network = '1';
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
return candidate;
},
build_icecandidate: function (cand) {
var line = ['a=candidate:' + cand.foundation, cand.component, cand.protocol, cand.priority, cand.ip, cand.port, 'typ', cand.type].join(' ');
line += ' ';
switch (cand.type) {
case 'srflx':
case 'prflx':
case 'relay':
if (cand.hasOwnAttribute('rel-addr') && cand.hasOwnAttribute('rel-port')) {
line += 'raddr';
line += ' ';
line += cand['rel-addr'];
line += ' ';
line += 'rport';
line += ' ';
line += cand['rel-port'];
line += ' ';
}
break;
}
if (cand.hasOwnAttribute('tcptype')) {
line += 'tcptype';
line += ' ';
line += cand.tcptype;
line += ' ';
}
line += 'generation';
line += ' ';
line += cand.hasOwnAttribute('generation') ? cand.generation : '0';
return line;
},
parse_ssrc: function (desc) {
// proprietary mapping of a=ssrc lines
// TODO: see "Jingle RTP Source Description" by Juberti and P. Thatcher on google docs
// and parse according to that
var lines = desc.split('\r\n'),
data = {};
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, 7) == 'a=ssrc:') {
var idx = lines[i].indexOf(' ');
data[lines[i].substr(idx + 1).split(':', 2)[0]] = lines[i].substr(idx + 1).split(':', 2)[1];
}
}
return data;
},
parse_rtcpfb: function (line) {
var parts = line.substr(10).split(' ');
var data = {};
data.pt = parts.shift();
data.type = parts.shift();
data.params = parts;
return data;
},
parse_extmap: function (line) {
var parts = line.substr(9).split(' ');
var data = {};
data.value = parts.shift();
if (data.value.indexOf('/') != -1) {
data.direction = data.value.substr(data.value.indexOf('/') + 1);
data.value = data.value.substr(0, data.value.indexOf('/'));
} else {
data.direction = 'both';
}
data.uri = parts.shift();
data.params = parts;
return data;
},
find_line: function (haystack, needle, sessionpart) {
var lines = haystack.split('\r\n');
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, needle.length) == needle) {
return lines[i];
}
}
if (!sessionpart) {
return false;
}
// search session part
lines = sessionpart.split('\r\n');
for (var j = 0; j < lines.length; j++) {
if (lines[j].substring(0, needle.length) == needle) {
return lines[j];
}
}
return false;
},
find_lines: function (haystack, needle, sessionpart) {
var lines = haystack.split('\r\n'),
needles = [];
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, needle.length) == needle)
needles.push(lines[i]);
}
if (needles.length || !sessionpart) {
return needles;
}
// search session part
lines = sessionpart.split('\r\n');
for (var j = 0; j < lines.length; j++) {
if (lines[j].substring(0, needle.length) == needle) {
needles.push(lines[j]);
}
}
return needles;
},
candidateToJingle: function (line) {
// a=candidate:2979166662 1 udp 2113937151 192.168.2.100 57698 typ host generation 0
// <candidate component=... foundation=... generation=... id=... ip=... network=... port=... priority=... protocol=... type=.../>
if (line.indexOf('candidate:') === 0) {
line = 'a=' + line;
} else if (line.substring(0, 12) != 'a=candidate:') {
console.log('parseCandidate called with a line that is not a candidate line');
console.log(line);
return null;
}
if (line.substring(line.length - 2) == '\r\n') // chomp it
line = line.substring(0, line.length - 2);
var candidate = {},
elems = line.split(' '),
i;
if (elems[6] != 'typ') {
console.log('did not find typ in the right place');
console.log(line);
return null;
}
candidate.foundation = elems[0].substring(12);
candidate.component = elems[1];
candidate.protocol = elems[2].toLowerCase();
candidate.priority = elems[3];
candidate.ip = elems[4];
candidate.port = elems[5];
// elems[6] => "typ"
candidate.type = elems[7];
candidate.generation = '0'; // default, may be overwritten below
for (i = 8; i < elems.length; i += 2) {
switch (elems[i]) {
case 'raddr':
candidate['rel-addr'] = elems[i + 1];
break;
case 'rport':
candidate['rel-port'] = elems[i + 1];
break;
case 'generation':
candidate.generation = elems[i + 1];
break;
case 'tcptype':
candidate.tcptype = elems[i + 1];
break;
default: // TODO
console.log('not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
}
}
candidate.network = '1';
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
return candidate;
},
candidateFromJingle: function (cand) {
var line = 'a=candidate:';
line += cand.getAttribute('foundation');
line += ' ';
line += cand.getAttribute('component');
line += ' ';
line += cand.getAttribute('protocol'); //.toUpperCase(); // chrome M23 doesn't like this
line += ' ';
line += cand.getAttribute('priority');
line += ' ';
line += cand.getAttribute('ip');
line += ' ';
line += cand.getAttribute('port');
line += ' ';
line += 'typ';
line += ' ' + cand.getAttribute('type');
line += ' ';
switch (cand.getAttribute('type')) {
case 'srflx':
case 'prflx':
case 'relay':
if (cand.getAttribute('rel-addr') && cand.getAttribute('rel-port')) {
line += 'raddr';
line += ' ';
line += cand.getAttribute('rel-addr');
line += ' ';
line += 'rport';
line += ' ';
line += cand.getAttribute('rel-port');
line += ' ';
}
break;
}
if (cand.getAttribute('protocol').toLowerCase() == 'tcp') {
line += 'tcptype';
line += ' ';
line += cand.getAttribute('tcptype');
line += ' ';
}
line += 'generation';
line += ' ';
line += cand.getAttribute('generation') || '0';
return line + '\r\n';
}
};
exports.SDPUtil = SDPUtil;

View File

@@ -0,0 +1,254 @@
/**
* Wrapper for built-in http.js to emulate the browser XMLHttpRequest object.
*
* This can be used with JS designed for browsers to improve reuse of code and
* allow the use of existing libraries.
*
* Usage: include("XMLHttpRequest.js") and use XMLHttpRequest per W3C specs.
*
* @todo SSL Support
* @author Dan DeFelippi <dan@driverdan.com>
* @license MIT
*/
var Url = require("url")
,sys = require("util");
exports.XMLHttpRequest = function() {
/**
* Private variables
*/
var self = this;
var http = require('http');
var https = require('https');
// Holds http.js objects
var client;
var request;
var response;
// Request settings
var settings = {};
// Set some default headers
var defaultHeaders = {
"User-Agent": "node.js",
"Accept": "*/*",
};
var headers = defaultHeaders;
/**
* Constants
*/
this.UNSENT = 0;
this.OPENED = 1;
this.HEADERS_RECEIVED = 2;
this.LOADING = 3;
this.DONE = 4;
/**
* Public vars
*/
// Current state
this.readyState = this.UNSENT;
// default ready state change handler in case one is not set or is set late
this.onreadystatechange = function() {};
// Result & response
this.responseText = "";
this.responseXML = "";
this.status = null;
this.statusText = null;
/**
* Open the connection. Currently supports local server requests.
*
* @param string method Connection method (eg GET, POST)
* @param string url URL for the connection.
* @param boolean async Asynchronous connection. Default is true.
* @param string user Username for basic authentication (optional)
* @param string password Password for basic authentication (optional)
*/
this.open = function(method, url, async, user, password) {
settings = {
"method": method,
"url": url,
"async": async || null,
"user": user || null,
"password": password || null
};
this.abort();
setState(this.OPENED);
};
/**
* Sets a header for the request.
*
* @param string header Header name
* @param string value Header value
*/
this.setRequestHeader = function(header, value) {
headers[header] = value;
};
/**
* Gets a header from the server response.
*
* @param string header Name of header to get.
* @return string Text of the header or null if it doesn't exist.
*/
this.getResponseHeader = function(header) {
if (this.readyState > this.OPENED && response.headers[header]) {
return header + ": " + response.headers[header];
}
return null;
};
/**
* Gets all the response headers.
*
* @return string
*/
this.getAllResponseHeaders = function() {
if (this.readyState < this.HEADERS_RECEIVED) {
throw "INVALID_STATE_ERR: Headers have not been received.";
}
var result = "";
for (var i in response.headers) {
result += i + ": " + response.headers[i] + "\r\n";
}
return result.substr(0, result.length - 2);
};
/**
* Sends the request to the server.
*
* @param string data Optional data to send as request body.
*/
this.send = function(data) {
if (this.readyState != this.OPENED) {
throw "INVALID_STATE_ERR: connection must be opened before send() is called";
}
var ssl = false;
var url = Url.parse(settings.url);
// Determine the server
switch (url.protocol) {
case 'https:':
ssl = true;
// SSL & non-SSL both need host, no break here.
case 'http:':
var host = url.hostname;
break;
case undefined:
case '':
var host = "localhost";
break;
default:
throw "Protocol not supported.";
}
// Default to port 80. If accessing localhost on another port be sure
// to use http://localhost:port/path
var port = url.port || (ssl ? 443 : 80);
// Add query string if one is used
var uri = url.pathname + (url.search ? url.search : '');
// Set the Host header or the server may reject the request
this.setRequestHeader("Host", host);
// Set content length header
if (settings.method == "GET" || settings.method == "HEAD") {
data = null;
} else if (data) {
this.setRequestHeader("Content-Length", Buffer.byteLength(data));
if (!headers["Content-Type"]) {
this.setRequestHeader("Content-Type", "text/plain;charset=UTF-8");
}
}
// Use the proper protocol
var doRequest = ssl ? https.request : http.request;
var options = {
host: host,
port: port,
path: uri,
method: settings.method,
headers: headers,
agent: false
};
var req = doRequest(options, function(res) {
response = res;
response.setEncoding("utf8");
setState(self.HEADERS_RECEIVED);
self.status = response.statusCode;
response.on('data', function(chunk) {
// Make sure there's some data
if (chunk) {
self.responseText += chunk;
}
setState(self.LOADING);
});
response.on('end', function() {
setState(self.DONE);
});
response.on('error', function() {
self.handleError(error);
});
}).on('error', function(error) {
self.handleError(error);
});
req.setHeader("Connection", "Close");
// Node 0.4 and later won't accept empty data. Make sure it's needed.
if (data) {
req.write(data);
}
req.end();
};
this.handleError = function(error) {
this.status = 503;
this.statusText = error;
this.responseText = error.stack;
setState(this.DONE);
};
/**
* Aborts a request.
*/
this.abort = function() {
headers = defaultHeaders;
this.readyState = this.UNSENT;
this.responseText = "";
this.responseXML = "";
};
/**
* Changes readyState and calls onreadystatechange.
*
* @param int state New state
*/
var setState = function(state) {
self.readyState = state;
self.onreadystatechange();
}
};

View File

@@ -0,0 +1,83 @@
// This code was written by Tyler Akins and has been placed in the
// public domain. It would be nice if you left this header intact.
// Base64 code from Tyler Akins -- http://rumkin.com
var Base64 = (function () {
var keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
var obj = {
/**
* Encodes a string in base64
* @param {String} input The string to encode in base64.
*/
encode: function (input) {
var output = "";
var chr1, chr2, chr3;
var enc1, enc2, enc3, enc4;
var i = 0;
do {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) +
keyStr.charAt(enc3) + keyStr.charAt(enc4);
} while (i < input.length);
return output;
},
/**
* Decodes a base64 string.
* @param {String} input The string to decode.
*/
decode: function (input) {
var output = "";
var chr1, chr2, chr3;
var enc1, enc2, enc3, enc4;
var i = 0;
// remove all characters that are not A-Z, a-z, 0-9, +, /, or =
input = input.replace(/[^A-Za-z0-9\+\/\=]/g, '');
do {
enc1 = keyStr.indexOf(input.charAt(i++));
enc2 = keyStr.indexOf(input.charAt(i++));
enc3 = keyStr.indexOf(input.charAt(i++));
enc4 = keyStr.indexOf(input.charAt(i++));
chr1 = (enc1 << 2) | (enc2 >> 4);
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2);
chr3 = ((enc3 & 3) << 6) | enc4;
output = output + String.fromCharCode(chr1);
if (enc3 != 64) {
output = output + String.fromCharCode(chr2);
}
if (enc4 != 64) {
output = output + String.fromCharCode(chr3);
}
} while (i < input.length);
return output;
}
};
return obj;
})();
// Nodify
exports.Base64 = Base64;

View File

@@ -0,0 +1,279 @@
/*
* A JavaScript implementation of the RSA Data Security, Inc. MD5 Message
* Digest Algorithm, as defined in RFC 1321.
* Version 2.1 Copyright (C) Paul Johnston 1999 - 2002.
* Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
* Distributed under the BSD License
* See http://pajhome.org.uk/crypt/md5 for more info.
*/
var MD5 = (function () {
/*
* Configurable variables. You may need to tweak these to be compatible with
* the server-side, but the defaults work in most cases.
*/
var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
var b64pad = ""; /* base-64 pad character. "=" for strict RFC compliance */
var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
/*
* Add integers, wrapping at 2^32. This uses 16-bit operations internally
* to work around bugs in some JS interpreters.
*/
var safe_add = function (x, y) {
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
};
/*
* Bitwise rotate a 32-bit number to the left.
*/
var bit_rol = function (num, cnt) {
return (num << cnt) | (num >>> (32 - cnt));
};
/*
* Convert a string to an array of little-endian words
* If chrsz is ASCII, characters >255 have their hi-byte silently ignored.
*/
var str2binl = function (str) {
var bin = [];
var mask = (1 << chrsz) - 1;
for(var i = 0; i < str.length * chrsz; i += chrsz)
{
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (i%32);
}
return bin;
};
/*
* Convert an array of little-endian words to a string
*/
var binl2str = function (bin) {
var str = "";
var mask = (1 << chrsz) - 1;
for(var i = 0; i < bin.length * 32; i += chrsz)
{
str += String.fromCharCode((bin[i>>5] >>> (i % 32)) & mask);
}
return str;
};
/*
* Convert an array of little-endian words to a hex string.
*/
var binl2hex = function (binarray) {
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
var str = "";
for(var i = 0; i < binarray.length * 4; i++)
{
str += hex_tab.charAt((binarray[i>>2] >> ((i%4)*8+4)) & 0xF) +
hex_tab.charAt((binarray[i>>2] >> ((i%4)*8 )) & 0xF);
}
return str;
};
/*
* Convert an array of little-endian words to a base-64 string
*/
var binl2b64 = function (binarray) {
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var str = "";
var triplet, j;
for(var i = 0; i < binarray.length * 4; i += 3)
{
triplet = (((binarray[i >> 2] >> 8 * ( i %4)) & 0xFF) << 16) |
(((binarray[i+1 >> 2] >> 8 * ((i+1)%4)) & 0xFF) << 8 ) |
((binarray[i+2 >> 2] >> 8 * ((i+2)%4)) & 0xFF);
for(j = 0; j < 4; j++)
{
if(i * 8 + j * 6 > binarray.length * 32) { str += b64pad; }
else { str += tab.charAt((triplet >> 6*(3-j)) & 0x3F); }
}
}
return str;
};
/*
* These functions implement the four basic operations the algorithm uses.
*/
var md5_cmn = function (q, a, b, x, s, t) {
return safe_add(bit_rol(safe_add(safe_add(a, q),safe_add(x, t)), s),b);
};
var md5_ff = function (a, b, c, d, x, s, t) {
return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t);
};
var md5_gg = function (a, b, c, d, x, s, t) {
return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t);
};
var md5_hh = function (a, b, c, d, x, s, t) {
return md5_cmn(b ^ c ^ d, a, b, x, s, t);
};
var md5_ii = function (a, b, c, d, x, s, t) {
return md5_cmn(c ^ (b | (~d)), a, b, x, s, t);
};
/*
* Calculate the MD5 of an array of little-endian words, and a bit length
*/
var core_md5 = function (x, len) {
/* append padding */
x[len >> 5] |= 0x80 << ((len) % 32);
x[(((len + 64) >>> 9) << 4) + 14] = len;
var a = 1732584193;
var b = -271733879;
var c = -1732584194;
var d = 271733878;
var olda, oldb, oldc, oldd;
for (var i = 0; i < x.length; i += 16)
{
olda = a;
oldb = b;
oldc = c;
oldd = d;
a = md5_ff(a, b, c, d, x[i+ 0], 7 , -680876936);
d = md5_ff(d, a, b, c, x[i+ 1], 12, -389564586);
c = md5_ff(c, d, a, b, x[i+ 2], 17, 606105819);
b = md5_ff(b, c, d, a, x[i+ 3], 22, -1044525330);
a = md5_ff(a, b, c, d, x[i+ 4], 7 , -176418897);
d = md5_ff(d, a, b, c, x[i+ 5], 12, 1200080426);
c = md5_ff(c, d, a, b, x[i+ 6], 17, -1473231341);
b = md5_ff(b, c, d, a, x[i+ 7], 22, -45705983);
a = md5_ff(a, b, c, d, x[i+ 8], 7 , 1770035416);
d = md5_ff(d, a, b, c, x[i+ 9], 12, -1958414417);
c = md5_ff(c, d, a, b, x[i+10], 17, -42063);
b = md5_ff(b, c, d, a, x[i+11], 22, -1990404162);
a = md5_ff(a, b, c, d, x[i+12], 7 , 1804603682);
d = md5_ff(d, a, b, c, x[i+13], 12, -40341101);
c = md5_ff(c, d, a, b, x[i+14], 17, -1502002290);
b = md5_ff(b, c, d, a, x[i+15], 22, 1236535329);
a = md5_gg(a, b, c, d, x[i+ 1], 5 , -165796510);
d = md5_gg(d, a, b, c, x[i+ 6], 9 , -1069501632);
c = md5_gg(c, d, a, b, x[i+11], 14, 643717713);
b = md5_gg(b, c, d, a, x[i+ 0], 20, -373897302);
a = md5_gg(a, b, c, d, x[i+ 5], 5 , -701558691);
d = md5_gg(d, a, b, c, x[i+10], 9 , 38016083);
c = md5_gg(c, d, a, b, x[i+15], 14, -660478335);
b = md5_gg(b, c, d, a, x[i+ 4], 20, -405537848);
a = md5_gg(a, b, c, d, x[i+ 9], 5 , 568446438);
d = md5_gg(d, a, b, c, x[i+14], 9 , -1019803690);
c = md5_gg(c, d, a, b, x[i+ 3], 14, -187363961);
b = md5_gg(b, c, d, a, x[i+ 8], 20, 1163531501);
a = md5_gg(a, b, c, d, x[i+13], 5 , -1444681467);
d = md5_gg(d, a, b, c, x[i+ 2], 9 , -51403784);
c = md5_gg(c, d, a, b, x[i+ 7], 14, 1735328473);
b = md5_gg(b, c, d, a, x[i+12], 20, -1926607734);
a = md5_hh(a, b, c, d, x[i+ 5], 4 , -378558);
d = md5_hh(d, a, b, c, x[i+ 8], 11, -2022574463);
c = md5_hh(c, d, a, b, x[i+11], 16, 1839030562);
b = md5_hh(b, c, d, a, x[i+14], 23, -35309556);
a = md5_hh(a, b, c, d, x[i+ 1], 4 , -1530992060);
d = md5_hh(d, a, b, c, x[i+ 4], 11, 1272893353);
c = md5_hh(c, d, a, b, x[i+ 7], 16, -155497632);
b = md5_hh(b, c, d, a, x[i+10], 23, -1094730640);
a = md5_hh(a, b, c, d, x[i+13], 4 , 681279174);
d = md5_hh(d, a, b, c, x[i+ 0], 11, -358537222);
c = md5_hh(c, d, a, b, x[i+ 3], 16, -722521979);
b = md5_hh(b, c, d, a, x[i+ 6], 23, 76029189);
a = md5_hh(a, b, c, d, x[i+ 9], 4 , -640364487);
d = md5_hh(d, a, b, c, x[i+12], 11, -421815835);
c = md5_hh(c, d, a, b, x[i+15], 16, 530742520);
b = md5_hh(b, c, d, a, x[i+ 2], 23, -995338651);
a = md5_ii(a, b, c, d, x[i+ 0], 6 , -198630844);
d = md5_ii(d, a, b, c, x[i+ 7], 10, 1126891415);
c = md5_ii(c, d, a, b, x[i+14], 15, -1416354905);
b = md5_ii(b, c, d, a, x[i+ 5], 21, -57434055);
a = md5_ii(a, b, c, d, x[i+12], 6 , 1700485571);
d = md5_ii(d, a, b, c, x[i+ 3], 10, -1894986606);
c = md5_ii(c, d, a, b, x[i+10], 15, -1051523);
b = md5_ii(b, c, d, a, x[i+ 1], 21, -2054922799);
a = md5_ii(a, b, c, d, x[i+ 8], 6 , 1873313359);
d = md5_ii(d, a, b, c, x[i+15], 10, -30611744);
c = md5_ii(c, d, a, b, x[i+ 6], 15, -1560198380);
b = md5_ii(b, c, d, a, x[i+13], 21, 1309151649);
a = md5_ii(a, b, c, d, x[i+ 4], 6 , -145523070);
d = md5_ii(d, a, b, c, x[i+11], 10, -1120210379);
c = md5_ii(c, d, a, b, x[i+ 2], 15, 718787259);
b = md5_ii(b, c, d, a, x[i+ 9], 21, -343485551);
a = safe_add(a, olda);
b = safe_add(b, oldb);
c = safe_add(c, oldc);
d = safe_add(d, oldd);
}
return [a, b, c, d];
};
/*
* Calculate the HMAC-MD5, of a key and some data
*/
var core_hmac_md5 = function (key, data) {
var bkey = str2binl(key);
if(bkey.length > 16) { bkey = core_md5(bkey, key.length * chrsz); }
var ipad = new Array(16), opad = new Array(16);
for(var i = 0; i < 16; i++)
{
ipad[i] = bkey[i] ^ 0x36363636;
opad[i] = bkey[i] ^ 0x5C5C5C5C;
}
var hash = core_md5(ipad.concat(str2binl(data)), 512 + data.length * chrsz);
return core_md5(opad.concat(hash), 512 + 128);
};
var obj = {
/*
* These are the functions you'll usually want to call.
* They take string arguments and return either hex or base-64 encoded
* strings.
*/
hexdigest: function (s) {
return binl2hex(core_md5(str2binl(s), s.length * chrsz));
},
b64digest: function (s) {
return binl2b64(core_md5(str2binl(s), s.length * chrsz));
},
hash: function (s) {
return binl2str(core_md5(str2binl(s), s.length * chrsz));
},
hmac_hexdigest: function (key, data) {
return binl2hex(core_hmac_md5(key, data));
},
hmac_b64digest: function (key, data) {
return binl2b64(core_hmac_md5(key, data));
},
hmac_hash: function (key, data) {
return binl2str(core_hmac_md5(key, data));
},
/*
* Perform a simple self-test to see if the VM is working
*/
test: function () {
return MD5.hexdigest("abc") === "900150983cd24fb0d6963f7d28e17f72";
}
};
return obj;
})();
// Nodify
exports.MD5 = MD5;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,48 @@
var strophe = require("./strophe/strophe.js").Strophe;
var Strophe = strophe.Strophe;
var $iq = strophe.$iq;
var $msg = strophe.$msg;
var $build = strophe.$build;
var $pres = strophe.$pres;
var jsdom = require("jsdom");
var window = jsdom.jsdom().parentWindow;
var $ = require('jquery')(window);
var stropheJingle = require("./strophe.jingle.sdp.js");
var input = '';
process.stdin.on('readable', function() {
var chunk = process.stdin.read();
if (chunk !== null) {
input += chunk;
}
});
process.stdin.on('end', function() {
if (process.argv[2] == '--jingle') {
var elem = $(input);
// app does:
// sess.setRemoteDescription($(iq).find('>jingle'), 'offer');
//console.log(elem.find('>content'));
var sdp = new stropheJingle.SDP('');
sdp.fromJingle(elem);
console.log(sdp.raw);
} else if (process.argv[2] == '--sdp') {
var sdp = new stropheJingle.SDP(input);
var accept = $iq({to: '%(tojid)s',
type: 'set'})
.c('jingle', {xmlns: 'urn:xmpp:jingle:1',
//action: 'session-accept',
action: '%(action)s',
initiator: '%(initiator)s',
responder: '%(responder)s',
sid: '%(sid)s' });
sdp.toJingle(accept, 'responder');
console.log(Strophe.serialize(accept));
}
});

View File

@@ -92,6 +92,22 @@ new PromConsole.Graph({
})
</script>
<h3>Pending calls per tick</h3>
<div id="reactor_pending_calls"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_pending_calls"),
expr: "rate(python_twisted_reactor_pending_calls_sum[30s]) / rate(python_twisted_reactor_pending_calls_count[30s])",
name: "[[job]]-[[index]]",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yTitle: "Pending Calls"
})
</script>
<h1>Storage</h1>
<h3>Queries</h3>

88
contrib/scripts/kick_users.py Executable file
View File

@@ -0,0 +1,88 @@
#!/usr/bin/env python
import json
import sys
import urllib
from argparse import ArgumentParser
import requests
def _mkurl(template, kws):
for key in kws:
template = template.replace(key, kws[key])
return template
def main(hs, room_id, access_token, user_id_prefix, why):
if not why:
why = "Automated kick."
print(
"Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
)
room_state_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
{"$HS": hs, "$ROOM": room_id, "$TOKEN": access_token},
)
print("Getting room state => %s" % room_state_url)
res = requests.get(room_state_url)
print("HTTP %s" % res.status_code)
state_events = res.json()
if "error" in state_events:
print("FATAL")
print(state_events)
return
kick_list = []
room_name = room_id
for event in state_events:
if not event["type"] == "m.room.member":
if event["type"] == "m.room.name":
room_name = event["content"].get("name")
continue
if not event["content"].get("membership") == "join":
continue
if event["state_key"].startswith(user_id_prefix):
kick_list.append(event["state_key"])
if len(kick_list) == 0:
print("No user IDs match the prefix '%s'" % user_id_prefix)
return
print("The following user IDs will be kicked from %s" % room_name)
for uid in kick_list:
print(uid)
doit = input("Continue? [Y]es\n")
if len(doit) > 0 and doit.lower() == "y":
print("Kicking members...")
# encode them all
kick_list = [urllib.quote(uid) for uid in kick_list]
for uid in kick_list:
kick_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
{"$HS": hs, "$UID": uid, "$ROOM": room_id, "$TOKEN": access_token},
)
kick_body = {"membership": "leave", "reason": why}
print("Kicking %s" % uid)
res = requests.put(kick_url, data=json.dumps(kick_body))
if res.status_code != 200:
print("ERROR: HTTP %s" % res.status_code)
if res.json().get("error"):
print("ERROR: JSON %s" % res.json())
if __name__ == "__main__":
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
parser.add_argument("-u", "--user-id", help="The user ID prefix e.g. '@irc_'")
parser.add_argument("-t", "--token", help="Your access_token")
parser.add_argument("-r", "--room", help="The room ID to kick members in")
parser.add_argument(
"-s", "--homeserver", help="The base HS url e.g. http://matrix.org"
)
parser.add_argument("-w", "--why", help="Reason for the kick. Optional.")
args = parser.parse_args()
if not args.room or not args.token or not args.user_id or not args.homeserver:
parser.print_help()
sys.exit(1)
else:
main(args.homeserver, args.room, args.token, args.user_id, args.why)

View File

@@ -1,31 +0,0 @@
# Creating multiple workers with a bash script
Setting up multiple worker configuration files manually can be time-consuming.
You can alternatively create multiple worker configuration files with a simple `bash` script. For example:
```sh
#!/bin/bash
for i in {1..5}
do
cat << EOF >> generic_worker$i.yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker$i
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 808$i
resources:
- names: [client, federation]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
EOF
done
```
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
Customise the script to your needs.

View File

@@ -30,23 +30,9 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in
;;
esac
# Manually install Poetry and export a pip-compatible `requirements.txt`
# We need a Poetry pre-release as the export command is buggy in < 1.2
TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.2.0b1
poetry export \
--extras all \
--extras test \
--extras systemd \
-o exported_requirements.txt
deactivate
rm -rf "$TEMP_VENV"
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
# than the 2/3 compatible `virtualenv`.
# Use --no-deps to only install pinned versions in exported_requirements.txt,
# and to avoid https://github.com/pypa/pip/issues/9644
dh_virtualenv \
--install-suffix "matrix-synapse" \
--builtin-venv \
@@ -55,11 +41,9 @@ dh_virtualenv \
--preinstall="lxml" \
--preinstall="mock" \
--preinstall="wheel" \
--extra-pip-arg="--no-deps" \
--extra-pip-arg="--no-cache-dir" \
--extra-pip-arg="--compile" \
--extras="all,systemd,test" \
--requirements="exported_requirements.txt"
--extras="all,systemd,test"
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"

327
debian/changelog vendored
View File

@@ -1,330 +1,3 @@
matrix-synapse-py3 (1.63.1) stable; urgency=medium
* New Synapse release 1.63.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Jul 2022 13:36:52 +0100
matrix-synapse-py3 (1.63.0) stable; urgency=medium
* Clarify that homeserver server names are included in the data reported
by opt-in server stats reporting (`report_stats` homeserver config option).
* New Synapse release 1.63.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Jul 2022 14:42:24 +0200
matrix-synapse-py3 (1.63.0~rc1) stable; urgency=medium
* New Synapse release 1.63.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Jul 2022 11:26:02 +0100
matrix-synapse-py3 (1.62.0) stable; urgency=medium
* New Synapse release 1.62.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Jul 2022 11:14:15 +0100
matrix-synapse-py3 (1.62.0~rc3) stable; urgency=medium
* New Synapse release 1.62.0rc3.
-- Synapse Packaging team <packages@matrix.org> Mon, 04 Jul 2022 16:07:01 +0100
matrix-synapse-py3 (1.62.0~rc2) stable; urgency=medium
* New Synapse release 1.62.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 01 Jul 2022 11:42:41 +0100
matrix-synapse-py3 (1.62.0~rc1) stable; urgency=medium
* New Synapse release 1.62.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jun 2022 16:34:57 +0100
matrix-synapse-py3 (1.61.1) stable; urgency=medium
* New Synapse release 1.61.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jun 2022 14:33:46 +0100
matrix-synapse-py3 (1.61.0) stable; urgency=medium
* New Synapse release 1.61.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Jun 2022 11:44:19 +0100
matrix-synapse-py3 (1.61.0~rc1) stable; urgency=medium
* Remove unused `jitsimeetbridge` experiment from `contrib` directory.
* New Synapse release 1.61.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Jun 2022 12:42:31 +0100
matrix-synapse-py3 (1.60.0) stable; urgency=medium
* New Synapse release 1.60.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 31 May 2022 13:41:22 +0100
matrix-synapse-py3 (1.60.0~rc2) stable; urgency=medium
* New Synapse release 1.60.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 27 May 2022 11:04:55 +0100
matrix-synapse-py3 (1.60.0~rc1) stable; urgency=medium
* New Synapse release 1.60.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 24 May 2022 12:05:01 +0100
matrix-synapse-py3 (1.59.1) stable; urgency=medium
* New Synapse release 1.59.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 18 May 2022 11:41:46 +0100
matrix-synapse-py3 (1.59.0) stable; urgency=medium
* New Synapse release 1.59.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 17 May 2022 10:26:50 +0100
matrix-synapse-py3 (1.59.0~rc2) stable; urgency=medium
* New Synapse release 1.59.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 16 May 2022 12:52:15 +0100
matrix-synapse-py3 (1.59.0~rc1) stable; urgency=medium
* Adjust how the `exported-requirements.txt` file is generated as part of
the process of building these packages. This affects the package
maintainers only; end-users are unaffected.
* New Synapse release 1.59.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 10 May 2022 10:45:08 +0100
matrix-synapse-py3 (1.58.1) stable; urgency=medium
* Include python dependencies from the `systemd` and `cache_memory` extras package groups, which
were incorrectly omitted from the 1.58.0 package.
* New Synapse release 1.58.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 05 May 2022 14:58:23 +0100
matrix-synapse-py3 (1.58.0) stable; urgency=medium
* New Synapse release 1.58.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 May 2022 10:52:58 +0100
matrix-synapse-py3 (1.58.0~rc2) stable; urgency=medium
* New Synapse release 1.58.0rc2.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Apr 2022 17:14:56 +0100
matrix-synapse-py3 (1.58.0~rc1) stable; urgency=medium
* Use poetry to manage the bundled virtualenv included with this package.
* New Synapse release 1.58.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Apr 2022 11:15:20 +0100
matrix-synapse-py3 (1.57.1) stable; urgency=medium
* New synapse release 1.57.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Apr 2022 15:27:21 +0100
matrix-synapse-py3 (1.57.0) stable; urgency=medium
* New synapse release 1.57.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Apr 2022 10:58:42 +0100
matrix-synapse-py3 (1.57.0~rc1) stable; urgency=medium
* New synapse release 1.57.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Apr 2022 13:36:25 +0100
matrix-synapse-py3 (1.56.0) stable; urgency=medium
* New synapse release 1.56.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Apr 2022 12:38:39 +0100
matrix-synapse-py3 (1.56.0~rc1) stable; urgency=medium
* New synapse release 1.56.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Mar 2022 10:40:50 +0100
matrix-synapse-py3 (1.55.2) stable; urgency=medium
* New synapse release 1.55.2.
-- Synapse Packaging team <packages@matrix.org> Thu, 24 Mar 2022 19:07:11 +0000
matrix-synapse-py3 (1.55.1) stable; urgency=medium
* New synapse release 1.55.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 24 Mar 2022 17:44:23 +0000
matrix-synapse-py3 (1.55.0) stable; urgency=medium
* New synapse release 1.55.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Mar 2022 13:59:26 +0000
matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium
* New synapse release 1.55.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Mar 2022 10:59:31 +0000
matrix-synapse-py3 (1.54.0) stable; urgency=medium
* New synapse release 1.54.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Mar 2022 10:54:52 +0000
matrix-synapse-py3 (1.54.0~rc1) stable; urgency=medium
* New synapse release 1.54.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 02 Mar 2022 10:43:22 +0000
matrix-synapse-py3 (1.53.0) stable; urgency=medium
* New synapse release 1.53.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Feb 2022 11:32:06 +0000
matrix-synapse-py3 (1.53.0~rc1) stable; urgency=medium
* New synapse release 1.53.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Feb 2022 10:40:50 +0000
matrix-synapse-py3 (1.52.0) stable; urgency=medium
* New synapse release 1.52.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Feb 2022 11:34:54 +0000
matrix-synapse-py3 (1.52.0~rc1) stable; urgency=medium
* New synapse release 1.52.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Feb 2022 11:04:09 +0000
matrix-synapse-py3 (1.51.0) stable; urgency=medium
* New synapse release 1.51.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Jan 2022 11:28:51 +0000
matrix-synapse-py3 (1.51.0~rc2) stable; urgency=medium
* New synapse release 1.51.0~rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 24 Jan 2022 12:25:00 +0000
matrix-synapse-py3 (1.51.0~rc1) stable; urgency=medium
* New synapse release 1.51.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Fri, 21 Jan 2022 10:46:02 +0000
matrix-synapse-py3 (1.50.2) stable; urgency=medium
* New synapse release 1.50.2.
-- Synapse Packaging team <packages@matrix.org> Mon, 24 Jan 2022 13:37:11 +0000
matrix-synapse-py3 (1.50.1) stable; urgency=medium
* New synapse release 1.50.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jan 2022 16:06:26 +0000
matrix-synapse-py3 (1.50.0) stable; urgency=medium
* New synapse release 1.50.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jan 2022 10:40:38 +0000
matrix-synapse-py3 (1.50.0~rc2) stable; urgency=medium
* New synapse release 1.50.0~rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 14 Jan 2022 11:18:06 +0000
matrix-synapse-py3 (1.50.0~rc1) stable; urgency=medium
* New synapse release 1.50.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 05 Jan 2022 12:36:17 +0000
matrix-synapse-py3 (1.49.2) stable; urgency=medium
* New synapse release 1.49.2.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Dec 2021 17:31:03 +0000
matrix-synapse-py3 (1.49.1) stable; urgency=medium
* New synapse release 1.49.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Dec 2021 11:07:30 +0000
matrix-synapse-py3 (1.49.0) stable; urgency=medium
* New synapse release 1.49.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Dec 2021 12:39:46 +0000
matrix-synapse-py3 (1.49.0~rc1) stable; urgency=medium
* New synapse release 1.49.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Dec 2021 13:52:21 +0000
matrix-synapse-py3 (1.48.0) stable; urgency=medium
* New synapse release 1.48.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Nov 2021 11:24:15 +0000
matrix-synapse-py3 (1.48.0~rc1) stable; urgency=medium
* New synapse release 1.48.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Nov 2021 15:56:03 +0000
matrix-synapse-py3 (1.47.1) stable; urgency=medium
* New synapse release 1.47.1.
-- Synapse Packaging team <packages@matrix.org> Fri, 19 Nov 2021 13:44:32 +0000
matrix-synapse-py3 (1.47.0) stable; urgency=medium
* New synapse release 1.47.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 17 Nov 2021 13:09:43 +0000
matrix-synapse-py3 (1.47.0~rc3) stable; urgency=medium
* New synapse release 1.47.0~rc3.
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Nov 2021 14:32:47 +0000
matrix-synapse-py3 (1.47.0~rc2) stable; urgency=medium
[ Dan Callahan ]

1
debian/clean vendored
View File

@@ -1 +0,0 @@
exported_requirements.txt

23
debian/copyright vendored
View File

@@ -22,6 +22,29 @@ Files: synapse/config/repository.py
Copyright: 2014-2015, matrix.org
License: Apache-2.0
Files: contrib/jitsimeetbridge/unjingle/strophe/base64.js
Copyright: Public Domain (Tyler Akins http://rumkin.com)
License: public-domain
This code was written by Tyler Akins and has been placed in the
public domain. It would be nice if you left this header intact.
Base64 code from Tyler Akins -- http://rumkin.com
Files: contrib/jitsimeetbridge/unjingle/strophe/md5.js
Copyright: 1999-2002, Paul Johnston & Contributors
License: BSD-3-clause
Files: contrib/jitsimeetbridge/unjingle/strophe/strophe.js
Copyright: 2006-2008, OGG, LLC
License: Expat
Files: contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js
Copyright: 2010 passive.ly LLC
License: Expat
Files: contrib/jitsimeetbridge/unjingle/*.js
Copyright: 2014 Jitsi
License: Apache-2.0
Files: debian/*
Copyright: 2016-2017, Erik Johnston <erik@matrix.org>
2017, Rahul De <rahulde@swecha.net>

View File

@@ -31,7 +31,7 @@ EOF
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
# Any changes you make will be preserved.
# Whether to report homeserver usage statistics.
# Whether to report anonymized homeserver usage statistics.
report_stats: false
EOF
fi

View File

@@ -37,7 +37,7 @@ msgstr ""
#. Type: boolean
#. Description
#: ../templates:2001
msgid "Report homeserver usage statistics?"
msgid "Report anonymous statistics?"
msgstr ""
#. Type: boolean
@@ -45,11 +45,11 @@ msgstr ""
#: ../templates:2001
msgid ""
"Developers of Matrix and Synapse really appreciate helping the project out "
"by reporting homeserver usage statistics from this homeserver. Your "
"homeserver's server name, along with very basic aggregate data (e.g. "
"number of users) will be reported. But it helps track the growth of the "
"Matrix community, and helps in making Matrix a success, as well as to "
"convince other networks that they should peer with Matrix."
"by reporting anonymized usage statistics from this homeserver. Only very "
"basic aggregate data (e.g. number of users) will be reported, but it helps "
"track the growth of the Matrix community, and helps in making Matrix a "
"success, as well as to convince other networks that they should peer with "
"Matrix."
msgstr ""
#. Type: boolean

13
debian/templates vendored
View File

@@ -10,13 +10,12 @@ _Description: Name of the server:
Template: matrix-synapse/report-stats
Type: boolean
Default: false
_Description: Report homeserver usage statistics?
_Description: Report anonymous statistics?
Developers of Matrix and Synapse really appreciate helping the
project out by reporting homeserver usage statistics from this
homeserver. Your homeserver's server name, along with very basic
aggregate data (e.g. number of users) will be reported. But it
helps track the growth of the Matrix community, and helps in
making Matrix a success, as well as to convince other networks
that they should peer with Matrix.
project out by reporting anonymized usage statistics from this
homeserver. Only very basic aggregate data (e.g. number of users)
will be reported, but it helps track the growth of the Matrix
community, and helps in making Matrix a success, as well as to
convince other networks that they should peer with Matrix.
.
Thank you.

11
demo/.gitignore vendored
View File

@@ -1,4 +1,7 @@
# Ignore all the temporary files from the demo servers.
8080/
8081/
8082/
*.db
*.log
*.log.*
*.pid
/media_store.*
/etc

26
demo/README Normal file
View File

@@ -0,0 +1,26 @@
DO NOT USE THESE DEMO SERVERS IN PRODUCTION
Requires you to have done:
python setup.py develop
The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs
and are configured in a highly insecure way. Do not use these configuration files in production.
stop.sh will stop the synapse servers and the webclient.
clean.sh will delete the databases and log files.
To start a completely new set of servers, run:
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db}
Also note that when joining a public room on a differnt HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name.

View File

@@ -4,9 +4,6 @@ set -e
DIR="$( cd "$( dirname "$0" )" && pwd )"
# Ensure that the servers are stopped.
$DIR/stop.sh
PID_FILE="$DIR/servers.pid"
if [ -f "$PID_FILE" ]; then

View File

@@ -6,14 +6,14 @@ CWD=$(pwd)
cd "$DIR/.." || exit
# Do not override PYTHONPATH if we are in a virtual env
if [ "$VIRTUAL_ENV" = "" ]; then
PYTHONPATH=$(readlink -f "$(pwd)")
export PYTHONPATH
echo "$PYTHONPATH"
fi
mkdir -p demo/etc
PYTHONPATH=$(readlink -f "$(pwd)")
export PYTHONPATH
echo "$PYTHONPATH"
# Create servers which listen on HTTP at 808x and HTTPS at 848x.
for port in 8080 8081 8082; do
echo "Starting server on port $port... "
@@ -21,29 +21,22 @@ for port in 8080 8081 8082; do
mkdir -p demo/$port
pushd demo/$port || exit
# Generate the configuration for the homeserver at localhost:848x, note that
# the homeserver name needs to match the HTTPS listening port for federation
# to properly work..
#rm $DIR/etc/$port.config
python3 -m synapse.app.homeserver \
--generate-config \
--server-name "localhost:$https_port" \
--config-path "$port.config" \
-H "localhost:$https_port" \
--config-path "$DIR/etc/$port.config" \
--report-stats no
if ! grep -F "Customisation made by demo/start.sh" -q "$port.config"; then
# Generate TLS keys.
openssl req -x509 -newkey rsa:4096 \
-keyout "localhost:$port.tls.key" \
-out "localhost:$port.tls.crt" \
-days 365 -nodes -subj "/O=matrix"
if ! grep -F "Customisation made by demo/start.sh" -q "$DIR/etc/$port.config"; then
# Generate tls keys
openssl req -x509 -newkey rsa:4096 -keyout "$DIR/etc/localhost:$https_port.tls.key" -out "$DIR/etc/localhost:$https_port.tls.crt" -days 365 -nodes -subj "/O=matrix"
# Add customisations to the configuration.
# Regenerate configuration
{
printf '\n\n# Customisation made by demo/start.sh\n\n'
printf '\n\n# Customisation made by demo/start.sh\n'
echo "public_baseurl: http://localhost:$port/"
echo 'enable_registration: true'
echo 'enable_registration_without_verification: true'
echo ''
# Warning, this heredoc depends on the interaction of tabs and spaces.
# Please don't accidentaly bork me with your fancy settings.
@@ -70,34 +63,38 @@ for port in 8080 8081 8082; do
echo "${listeners}"
# Disable TLS for the servers
printf '\n\n# Disable TLS for the servers.'
# Disable tls for the servers
printf '\n\n# Disable tls on the servers.'
echo '# DO NOT USE IN PRODUCTION'
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true'
echo 'federation_verify_certificates: false'
# Set paths for the TLS certificates.
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
# Set tls paths
echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\""
echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\""
# Ignore keys from the trusted keys server
echo '# Ignore keys from the trusted keys server'
echo 'trusted_key_servers:'
echo ' - server_name: "matrix.org"'
echo ' accept_keys_insecurely: true'
echo ''
# Allow the servers to communicate over localhost.
allow_list=$(cat <<-ALLOW_LIST
# Allow the servers to communicate over localhost.
ip_range_whitelist:
- '127.0.0.1/8'
- '::1/128'
ALLOW_LIST
# Reduce the blacklist
blacklist=$(cat <<-BLACK
# Set the blacklist so that it doesn't include 127.0.0.1, ::1
federation_ip_range_blacklist:
- '10.0.0.0/8'
- '172.16.0.0/12'
- '192.168.0.0/16'
- '100.64.0.0/10'
- '169.254.0.0/16'
- 'fe80::/64'
- 'fc00::/7'
BLACK
)
echo "${allow_list}"
} >> "$port.config"
echo "${blacklist}"
} >> "$DIR/etc/$port.config"
fi
# Check script parameters
@@ -144,18 +141,19 @@ for port in 8080 8081 8082; do
burst_count: 1000
RC
)
echo "${ratelimiting}" >> "$port.config"
echo "${ratelimiting}" >> "$DIR/etc/$port.config"
fi
fi
# Always disable reporting of stats if the option is not there.
if ! grep -F "report_stats" -q "$port.config" ; then
echo "report_stats: false" >> "$port.config"
if ! grep -F "full_twisted_stacktraces" -q "$DIR/etc/$port.config"; then
echo "full_twisted_stacktraces: true" >> "$DIR/etc/$port.config"
fi
if ! grep -F "report_stats" -q "$DIR/etc/$port.config" ; then
echo "report_stats: false" >> "$DIR/etc/$port.config"
fi
# Run the homeserver in the background.
python3 -m synapse.app.homeserver \
--config-path "$port.config" \
--config-path "$DIR/etc/$port.config" \
-D \
popd || exit

View File

@@ -1,85 +1,25 @@
# syntax=docker/dockerfile:1
# Dockerfile to build the matrixdotorg/synapse docker images.
#
# Note that it uses features which are only available in BuildKit - see
# https://docs.docker.com/go/buildkit/ for more information.
#
# To build the image, run `docker build` command from the root of the
# synapse repository:
#
# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile .
# docker build -f docker/Dockerfile .
#
# There is an optional PYTHON_VERSION build argument which sets the
# version of python to build against: for example:
#
# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.10 .
# docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.6 .
#
# Irritatingly, there is no blessed guide on how to distribute an application with its
# poetry-managed environment in a docker image. We have opted for
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
# In case we get bitten by those bugs in the future, the recommendations here might
# be useful:
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
ARG PYTHON_VERSION=3.9
ARG PYTHON_VERSION=3.8
###
### Stage 0: generate requirements.txt
###
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
# Here we use it to set up a cache for apt (and below for pip), to improve
# rebuild speeds on slow connections.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq git \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
# We use a specific commit from poetry's master branch instead of our usual 1.1.12,
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
# https://github.com/python-poetry/poetry/pull/5156 and
# https://github.com/python-poetry/poetry/issues/5141 ;
# without it, we generate a requirements.txt with incorrect environment markers,
# which causes necessary packages to be omitted when we `pip install`.
#
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
WORKDIR /synapse
# Copy just what we need to run `poetry export`...
COPY pyproject.toml poetry.lock /synapse/
# If specified, we won't verify the hashes of dependencies.
# This is only needed if the hashes of dependencies cannot be checked for some
# reason, such as when a git repository is used directly as a dependency.
ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}
###
### Stage 1: builder
### Stage 0: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
# install the OS build deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
RUN apt-get update && apt-get install -y \
build-essential \
libffi-dev \
libjpeg-dev \
@@ -91,28 +31,32 @@ RUN \
openssl \
rustc \
zlib1g-dev \
git \
&& rm -rf /var/lib/apt/lists/*
# Copy just what we need to pip install
COPY scripts /synapse/scripts/
COPY MANIFEST.in README.rst setup.py synctl /synapse/
COPY synapse/__init__.py /synapse/synapse/__init__.py
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be
# the whole synapse project so that we this layer in the Docker cache can be
# used while you develop on the source
#
# This is aiming at installing the `[tool.poetry.depdendencies]` from pyproject.toml.
COPY --from=requirements /synapse/requirements.txt /synapse/
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --prefix="/install" --no-deps --no-warn-script-location -r /synapse/requirements.txt
# This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
RUN pip install --prefix="/install" --no-warn-script-location \
/synapse[all]
# Copy over the rest of the synapse source code.
# Copy over the rest of the project
COPY synapse /synapse/synapse/
# ... and what we need to `pip install`.
COPY pyproject.toml README.rst /synapse/
# Install the synapse package itself.
# Install the synapse package itself and all of its children packages.
#
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
###
### Stage 2: runtime
### Stage 1: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-slim
@@ -122,10 +66,7 @@ LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/syna
LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git'
LABEL org.opencontainers.image.licenses='Apache-2.0'
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
RUN apt-get update && apt-get install -y \
curl \
gosu \
libjpeg62-turbo \
@@ -141,6 +82,8 @@ COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py
COPY ./docker/conf /conf
VOLUME ["/data"]
EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"]

View File

@@ -16,7 +16,7 @@ ARG distro=""
### Stage 0: build a dh-virtualenv
###
# This is only really needed on focal, since other distributions we
# This is only really needed on bionic and focal, since other distributions we
# care about have a recent version of dh-virtualenv by default. Unfortunately,
# it looks like focal is going to be with us for a while.
#
@@ -36,8 +36,9 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
wget
# fetch and unpack the package
# TODO: Upgrade to 1.2.2 once bionic is dropped (1.2.2 requires debhelper 12; bionic has only 11)
RUN mkdir /dh-virtualenv
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/refs/tags/1.2.2.tar.gz
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
# install its build deps. We do another apt-cache-update here, because we might
@@ -85,12 +86,12 @@ RUN apt-get update -qq -o Acquire::Languages=none \
libpq-dev \
xmlsec1
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /
# install dhvirtualenv. Update the apt cache again first, in case we got a
# cached cache from docker the first time.
RUN apt-get update -qq -o Acquire::Languages=none \
&& apt-get install -yq /dh-virtualenv_1.2.2-1_all.deb
&& apt-get install -yq /dh-virtualenv_1.2~dev-1_all.deb
WORKDIR /synapse/source
ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]

30
docker/Dockerfile-pgtests Normal file
View File

@@ -0,0 +1,30 @@
# Use the Sytest image that comes with a lot of the build dependencies
# pre-installed
FROM matrixdotorg/sytest:bionic
# The Sytest image doesn't come with python, so install that
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
# We need tox to run the tests in run_pg_tests.sh
RUN python3 -m pip install tox
# Initialise the db
RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
# Add a user with our UID and GID so that files get created on the host owned
# by us, not root.
ARG UID
ARG GID
RUN groupadd --gid $GID user
RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
# Ensure we can start postgres by sudo-ing as the postgres user.
RUN apt-get update && apt-get -qq install -y sudo
RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
ADD run_pg_tests.sh /run_pg_tests.sh
# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
# so that we can `docker run` this container and pass arguments to pg_tests.sh
ENTRYPOINT ["/run_pg_tests.sh"]
USER user

View File

@@ -1,38 +1,23 @@
# Inherit from the official Synapse docker image
ARG SYNAPSE_VERSION=latest
FROM matrixdotorg/synapse:$SYNAPSE_VERSION
FROM matrixdotorg/synapse
# Install deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && \
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
redis-server nginx-light
RUN apt-get update
RUN apt-get install -y supervisor redis nginx
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install supervisor~=4.2
# Disable the default nginx sites
# Remove the default nginx sites
RUN rm /etc/nginx/sites-enabled/default
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
# Copy a script to prefix log lines with the supervisor program name
COPY ./docker/prefix-log /usr/local/bin/
# Expose nginx listener port
EXPOSE 8080/tcp
# Volume for user-editable config files, logs etc.
VOLUME ["/data"]
# A script to read environment variables and create the necessary
# files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"]
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh

View File

@@ -8,19 +8,13 @@ docker images that can be run inside Complement for testing purposes.
Note that running Synapse's unit tests from within the docker image is not supported.
## Using the Complement launch script
## Testing with SQLite and single-process Synapse
`scripts-dev/complement.sh` is a script that will automatically build
and run Synapse against Complement.
Consult the [contributing guide][guideComplementSh] for instructions on how to use it.
> Note that `scripts-dev/complement.sh` is a script that will automatically build
> and run an SQLite-based, single-process of Synapse against Complement.
[guideComplementSh]: https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-integration-tests-complement
## Building and running the images manually
Under some circumstances, you may wish to build the images manually.
The instructions below will lead you to doing that.
The instructions below will set up Complement testing for a single-process,
SQLite-based Synapse deployment.
Start by building the base Synapse docker image. If you wish to run tests with the latest
release of Synapse, instead of your current checkout, you can skip this step. From the
@@ -30,17 +24,58 @@ root of the repository:
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
Next, build the workerised Synapse docker image, which is a layer over the base
image.
This will build an image with the tag `matrixdotorg/synapse`.
Next, build the Synapse image for Complement. You will need a local checkout
of Complement. Change to the root of your Complement checkout and run:
```sh
docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles
```
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
## Testing with PostgreSQL and single or multi-process Synapse
The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL
database backend.
As with the single-process image, build the base Synapse docker image. If you wish to run
tests with the latest release of Synapse, instead of your current checkout, you can skip
this step. From the root of the repository:
```sh
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
This will build an image with the tag `matrixdotorg/synapse`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Again, from the root of the repository:
```sh
docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
```
Finally, build the multi-purpose image for Complement, which is a layer over the workers image.
This will build an image with the tag` matrixdotorg/synapse-workers`.
It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container
under
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
below.
Finally, build the Synapse image for Complement, which is based on
`matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to
the root of your Complement checkout and run:
```sh
docker build -t complement-synapse -f docker/complement/Dockerfile docker/complement
docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles
```
This will build an image with the tag `complement-synapse`, which can be handed to
@@ -48,10 +83,6 @@ Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Ref
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
See [the Complement image README](./complement/README.md) for information about the
expected environment variables.
## Running the Dockerfile-worker image standalone
For manual testing of a multi-process Synapse instance in Docker,
@@ -60,10 +91,10 @@ bundling all necessary components together for a workerised homeserver instance.
This includes any desired Synapse worker processes, a nginx to route traffic accordingly,
a redis for worker communication and a supervisord instance to start up and monitor all
processes. You will need to provide your own postgres container to connect to, and TLS
processes. You will need to provide your own postgres container to connect to, and TLS
is not handled by the container.
Once you've built the image using the above instructions, you can run it. Be sure
Once you've built the image using the above instructions, you can run it. Be sure
you've set up a volume according to the [usual Synapse docker instructions](README.md).
Then run something along the lines of:
@@ -81,12 +112,9 @@ docker run -d --name synapse \
matrixdotorg/synapse-workers
```
...substituting `POSTGRES*` variables for those that match a postgres host you have
...substituting `POSTGRES*` variables for those that match a postgres host you have
available (usually a running postgres docker container).
### Workers
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
use when running the container. All possible worker names are defined by the keys of the
`WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the
@@ -99,35 +127,14 @@ type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES`
(e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`).
Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers
(leaving only the main process).
The container will only be configured to use Redis-based worker mode if there are
workers enabled.
(leaving only the main process). The container is configured to use redis-based worker
mode.
### Logging
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
after the timestamp.
Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
00, according to the container's clock. Logging for the main process must still be
00, according to the container's clock. Logging for the main process must still be
configured by modifying the homeserver's log config in your Synapse data volume.
### Application Services
Setting the `SYNAPSE_AS_REGISTRATION_DIR` environment variable to the path of
a directory (within the container) will cause the configuration script to scan
that directory for `.yaml`/`.yml` registration files.
Synapse will be configured to load these configuration files.
### TLS Termination
Nginx is present in the image to route requests to the appropriate workers,
but it does not serve TLS by default.
You can configure `SYNAPSE_TLS_CERT` and `SYNAPSE_TLS_KEY` to point to a
TLS certificate and key (respectively), both in PEM (textual) format.
In this case, Nginx will additionally serve using HTTPS on port 8448.

Some files were not shown because too many files have changed in this diff Show More