Compare commits

..

7 Commits

Author SHA1 Message Date
David Robertson
0953cad3e4 docstringggggg 2022-05-18 10:19:30 +01:00
David Robertson
ad6a6675bf go away flake8 2022-05-17 14:14:40 +01:00
David Robertson
21d1347f2c Changelog 2022-05-17 14:00:56 +01:00
David Robertson
a9fe3350f8 Drive-by typo fix I spotted while debugging 2022-05-17 13:55:58 +01:00
David Robertson
a1adede444 discard strings with nulls before user dir update 2022-05-17 13:55:58 +01:00
David Robertson
79f1cef5e4 Move non_null_str_or_none to s.utils.stringutils 2022-05-17 13:55:58 +01:00
David Robertson
8c977edec8 Reproduce #12755 2022-05-17 13:55:44 +01:00
460 changed files with 23458 additions and 13047 deletions

View File

@@ -1,93 +0,0 @@
{{- /*gotype: github.com/haveyoudebuggedit/gotestfmt/parser.Package*/ -}}
{{- /*
This template contains the format for an individual package. GitHub actions does not currently support nested groups so
we are creating a stylized header for each package.
This template is based on https://github.com/haveyoudebuggedit/gotestfmt/blob/f179b0e462a9dcf7101515d87eec4e4d7e58b92a/.gotestfmt/github/package.gotpl
which is under the Unlicense licence.
*/ -}}
{{- $settings := .Settings -}}
{{- if and (or (not $settings.HideSuccessfulPackages) (ne .Result "PASS")) (or (not $settings.HideEmptyPackages) (ne .Result "SKIP") (ne (len .TestCases) 0)) -}}
{{- if eq .Result "PASS" -}}
{{ "\033" }}[0;32m
{{- else if eq .Result "SKIP" -}}
{{ "\033" }}[0;33m
{{- else -}}
{{ "\033" }}[0;31m
{{- end -}}
📦 {{ .Name }}{{- "\033" }}[0m
{{- with .Coverage -}}
{{- "\033" -}}[0;37m ({{ . }}% coverage){{- "\033" -}}[0m
{{- end -}}
{{- "\n" -}}
{{- with .Reason -}}
{{- " " -}}🛑 {{ . -}}{{- "\n" -}}
{{- end -}}
{{- with .Output -}}
{{- . -}}{{- "\n" -}}
{{- end -}}
{{- with .TestCases -}}
{{- /* Failing tests are first */ -}}
{{- range . -}}
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- /* Then skipped tests are second */ -}}
{{- range . -}}
{{- if eq .Result "SKIP" -}}
::group::{{ "\033" }}[0;33m🚧{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- /* Then passing tests are last */ -}}
{{- range . -}}
{{- if eq .Result "PASS" -}}
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- "\n" -}}
{{- end -}}

View File

@@ -1,25 +0,0 @@
#!/bin/bash
#
# Fetches a version of complement which best matches the current build.
#
# The tarball is unpacked into `./complement`.
set -e
mkdir -p complement
# Pick an appropriate version of complement. Depending on whether this is a PR or release,
# etc. we need to use different fallbacks:
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("HEAD").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done

View File

@@ -27,10 +27,9 @@ export VIRTUALENV_NO_DOWNLOAD=1
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Replace all caret bounds---but not the one that defines the supported Python version!
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Use pyopenssl 17.0, which is the oldest version that works with
# - Make the pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Omit systemd: we're not logging to journal here.
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
@@ -41,7 +40,6 @@ export VIRTUALENV_NO_DOWNLOAD=1
sed -i \
-e "s/[~>]=/==/g" \
-e '/^python = "^/!s/\^/==/g' \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \

View File

@@ -6,6 +6,3 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
# Update black to 20.8b1 (#9381).
0a00b7ff14890987f09112a2ae696c61001e6cf1
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
c4268e3da64f1abb5b31deaeb5769adb6510c0a7

72
.github/ISSUE_TEMPLATE/BUG_REPORT.md vendored Normal file
View File

@@ -0,0 +1,72 @@
---
name: Bug report
about: Create a report to help us improve
---
<!--
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**,
please ask in **#synapse:matrix.org** (using a matrix.org account if necessary)
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report template. By following the instructions below and
filling out the sections with your information, you will help the us to get all
the necessary data to fix your issue.
You can also preview your report before submitting it. You may remove sections
that aren't relevant to your particular case.
Text between <!-- and --> marks will be invisible in the report.
-->
### Description
<!-- Describe here the problem that you are experiencing -->
### Steps to reproduce
- list the steps
- that reproduce the bug
- using hyphens as bullet points
<!--
Describe how what happens differs from what you expected.
If you can identify any relevant log snippets from _homeserver.log_, please include
those (please be careful to remove any personal or private data). Please surround them with
``` (three backticks, on a line on their own), so that they are formatted legibly.
-->
### Version information
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
<!-- Was this issue identified on matrix.org or another homeserver? -->
- **Homeserver**:
If not matrix.org:
<!--
What version of Synapse is running?
You can find the Synapse version with this command:
$ curl http://localhost:8008/_synapse/admin/v1/server_version
(You may need to replace `localhost:8008` if Synapse is not configured to
listen on that port.)
-->
- **Version**:
- **Install method**:
<!-- examples: package manager/git clone/pip -->
- **Platform**:
<!--
Tell us about the environment in which your homeserver is operating
distro, hardware, if it's running in a vm/container, etc.
-->

View File

@@ -1,103 +0,0 @@
name: Bug report
description: Create a report to help us improve
body:
- type: markdown
attributes:
value: |
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
You can also preview your report before submitting it.
- type: textarea
id: description
attributes:
label: Description
description: Describe the problem that you are experiencing
validations:
required: true
- type: textarea
id: reproduction_steps
attributes:
label: Steps to reproduce
description: |
Describe the series of steps that leads you to the problem.
Describe how what happens differs from what you expected.
placeholder: Tell us what you see!
value: |
- list the steps
- that reproduce the bug
- using hyphens as bullet points
validations:
required: true
- type: markdown
attributes:
value: |
---
**IMPORTANT**: please answer the following questions, to help us narrow down the problem.
- type: input
id: homeserver
attributes:
label: Homeserver
description: Which homeserver was this issue identified on? (matrix.org, another homeserver, etc)
validations:
required: true
- type: input
id: version
attributes:
label: Synapse Version
description: |
What version of Synapse is this homeserver running?
You can find the Synapse version by visiting https://yourserver.example.com/_matrix/federation/v1/version
or with this command:
```
$ curl http://localhost:8008/_synapse/admin/v1/server_version
```
(You may need to replace `localhost:8008` if Synapse is not configured to listen on that port.)
validations:
required: true
- type: dropdown
id: install_method
attributes:
label: Installation Method
options:
- Docker (matrixdotorg/synapse)
- Debian packages from packages.matrix.org
- pip (from PyPI)
- Other (please mention below)
- type: textarea
id: platform
attributes:
label: Platform
description: |
Tell us about the environment in which your homeserver is operating...
e.g. distro, hardware, if it's running in a vm/container, etc.
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
This will be automatically formatted into code, so there is no need for backticks.
Please be careful to remove any personal or private data.
**Bug reports are usually very difficult to diagnose without logging.**
render: shell
validations:
required: true
- type: textarea
id: anything_else
attributes:
label: Anything else that would be useful to know?

View File

@@ -19,14 +19,6 @@ jobs:
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
check-schema-delta:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
with:
@@ -56,7 +48,7 @@ jobs:
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig, check-schema-delta]
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig]
runs-on: ubuntu-latest
steps:
- run: "true"
@@ -314,20 +306,10 @@ jobs:
- run: .ci/scripts/test_synapse_port_db.sh
complement:
if: "${{ !failure() && !cancelled() }}"
if: ${{ !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
steps:
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
@@ -348,65 +330,33 @@ jobs:
with:
path: synapse
- name: "Install custom gotestfmt template"
run: |
mkdir .gotestfmt/github -p
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
- name: Checkout complement
run: synapse/.ci/scripts/checkout_complement.sh
- run: |
set -o pipefail
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
# We only run the workers tests on `develop` for now, because they're too slow to wait for on PRs.
# Sadly, you can't have an `if` condition on the value of a matrix, so this is a temporary, separate job for now.
# GitHub Actions doesn't support YAML anchors, so it's full-on duplication for now.
complement-developonly:
if: "${{ !failure() && !cancelled() && (github.ref == 'refs/heads/develop') }}"
needs: linting-done
runs-on: ubuntu-latest
name: "Complement Workers (develop only)"
steps:
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
mkdir -p complement
# Attempt to use the version of complement which best matches the current
# build. Depending on whether this is a PR or release, etc. we need to
# use different fallbacks.
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("HEAD").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
- name: "Install Complement Dependencies"
run: |
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
- name: "Install custom gotestfmt template"
run: |
mkdir .gotestfmt/github -p
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
- name: Checkout complement
run: synapse/.ci/scripts/checkout_complement.sh
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done
- run: |
set -o pipefail
WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests

View File

@@ -1,400 +1,3 @@
Synapse 1.62.0 (2022-07-05)
===========================
No significant changes since 1.62.0rc3.
Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse.
Synapse 1.62.0rc3 (2022-07-04)
==============================
Bugfixes
--------
- Update the version of the [ldap3 plugin](https://github.com/matrix-org/matrix-synapse-ldap3/) included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on `packages.matrix.org` to 0.2.1. This fixes [a bug](https://github.com/matrix-org/matrix-synapse-ldap3/pull/163) with usernames containing uppercase characters. ([\#13156](https://github.com/matrix-org/synapse/issues/13156))
- Fix a bug introduced in Synapse 1.62.0rc1 affecting unread counts for users on small servers. ([\#13168](https://github.com/matrix-org/synapse/issues/13168))
Synapse 1.62.0rc2 (2022-07-01)
==============================
Bugfixes
--------
- Fix unread counts for users on large servers. Introduced in v1.62.0rc1. ([\#13140](https://github.com/matrix-org/synapse/issues/13140))
- Fix DB performance when deleting old push notifications. Introduced in v1.62.0rc1. ([\#13141](https://github.com/matrix-org/synapse/issues/13141))
Synapse 1.62.0rc1 (2022-06-28)
==============================
Features
--------
- Port the spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. ([\#12857](https://github.com/matrix-org/synapse/issues/12857), [\#13047](https://github.com/matrix-org/synapse/issues/13047))
- Allow server admins to customise the response of the `/.well-known/matrix/client` endpoint. ([\#13035](https://github.com/matrix-org/synapse/issues/13035))
- Add metrics measuring the CPU and DB time spent in state resolution. ([\#13036](https://github.com/matrix-org/synapse/issues/13036))
- Speed up fetching of device list changes in `/sync` and `/keys/changes`. ([\#13045](https://github.com/matrix-org/synapse/issues/13045), [\#13098](https://github.com/matrix-org/synapse/issues/13098))
- Improve URL previews for sites which only provide Twitter Card metadata, e.g. LWN.net. ([\#13056](https://github.com/matrix-org/synapse/issues/13056))
Bugfixes
--------
- Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`. ([\#12939](https://github.com/matrix-org/synapse/issues/12939))
- Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973))
- Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979))
- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced
in Synapse v1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991))
- Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018))
- Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041))
- Fix some inconsistencies in the event authentication code. ([\#13087](https://github.com/matrix-org/synapse/issues/13087), [\#13088](https://github.com/matrix-org/synapse/issues/13088))
- Fix a long-standing bug where room directory requests would cause an internal server error if given a malformed room alias. ([\#13106](https://github.com/matrix-org/synapse/issues/13106))
Improved Documentation
----------------------
- Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew. ([\#12737](https://github.com/matrix-org/synapse/issues/12737))
- Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation. ([\#13022](https://github.com/matrix-org/synapse/issues/13022))
- Mention removed community/group worker endpoints in upgrade.md. Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023))
- Add instructions for running Complement with `gotestfmt`-formatted output locally. ([\#13073](https://github.com/matrix-org/synapse/issues/13073))
- Update OpenTracing docs to reference the configuration manual rather than the configuration file. ([\#13076](https://github.com/matrix-org/synapse/issues/13076))
- Update information on downstream Debian packages. ([\#13095](https://github.com/matrix-org/synapse/issues/13095))
- Remove documentation for the Delete Group Admin API which no longer exists. ([\#13112](https://github.com/matrix-org/synapse/issues/13112))
Deprecations and Removals
-------------------------
- Remove the unspecced `DELETE /directory/list/room/{roomId}` endpoint, which hid rooms from the [public room directory](https://spec.matrix.org/v1.3/client-server-api/#listing-rooms). Instead, `PUT` to the same URL with a visibility of `"private"`. ([\#13123](https://github.com/matrix-org/synapse/issues/13123))
Internal Changes
----------------
- Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests. ([\#12674](https://github.com/matrix-org/synapse/issues/12674))
- Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni. ([\#12738](https://github.com/matrix-org/synapse/issues/12738))
- Merge the Complement testing Docker images into a single, multi-purpose image. ([\#12881](https://github.com/matrix-org/synapse/issues/12881), [\#13075](https://github.com/matrix-org/synapse/issues/13075))
- Simplify the database schema for `event_edges`. ([\#12893](https://github.com/matrix-org/synapse/issues/12893))
- Clean up the test code for client disconnection. ([\#12929](https://github.com/matrix-org/synapse/issues/12929))
- Remove code generating comments in configuration. ([\#12941](https://github.com/matrix-org/synapse/issues/12941))
- Add `Cross-Origin-Resource-Policy: cross-origin` header to content repository's thumbnail and download endpoints. ([\#12944](https://github.com/matrix-org/synapse/issues/12944))
- Replace noop background updates with `DELETE` delta. ([\#12954](https://github.com/matrix-org/synapse/issues/12954), [\#13050](https://github.com/matrix-org/synapse/issues/13050))
- Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12957](https://github.com/matrix-org/synapse/issues/12957))
- Reduce the amount of state we pull from the DB. ([\#12963](https://github.com/matrix-org/synapse/issues/12963))
- Enable testing against PostgreSQL databases in Complement CI. ([\#12965](https://github.com/matrix-org/synapse/issues/12965), [\#13034](https://github.com/matrix-org/synapse/issues/13034))
- Fix an inaccurate comment. ([\#12969](https://github.com/matrix-org/synapse/issues/12969))
- Remove the `delete_device` method and always call `delete_devices`. ([\#12970](https://github.com/matrix-org/synapse/issues/12970))
- Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. ([\#12982](https://github.com/matrix-org/synapse/issues/12982))
- Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag. ([\#12984](https://github.com/matrix-org/synapse/issues/12984))
- Add type hints to tests. ([\#12985](https://github.com/matrix-org/synapse/issues/12985), [\#13099](https://github.com/matrix-org/synapse/issues/13099))
- Refactor macaroon tokens generation and move the unsubscribe link in notification emails to `/_synapse/client/unsubscribe`. ([\#12986](https://github.com/matrix-org/synapse/issues/12986))
- Fix documentation for running complement tests. ([\#12990](https://github.com/matrix-org/synapse/issues/12990))
- Faster joins: add issue links to the TODO comments in the code. ([\#13004](https://github.com/matrix-org/synapse/issues/13004))
- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13005](https://github.com/matrix-org/synapse/issues/13005), [\#13096](https://github.com/matrix-org/synapse/issues/13096), [\#13118](https://github.com/matrix-org/synapse/issues/13118))
- Replaced usage of PyJWT with methods from Authlib in `org.matrix.login.jwt`. Contributed by Hannes Lerchl. ([\#13011](https://github.com/matrix-org/synapse/issues/13011))
- Modernize the `contrib/graph/` scripts. ([\#13013](https://github.com/matrix-org/synapse/issues/13013))
- Remove redundant `room_version` parameters from event auth functions. ([\#13017](https://github.com/matrix-org/synapse/issues/13017))
- Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. ([\#13021](https://github.com/matrix-org/synapse/issues/13021))
- Add type annotations to `synapse.storage.databases.main.devices`. ([\#13025](https://github.com/matrix-org/synapse/issues/13025))
- Set default `sync_response_cache_duration` to two minutes. ([\#13042](https://github.com/matrix-org/synapse/issues/13042))
- Rename CI test runs. ([\#13046](https://github.com/matrix-org/synapse/issues/13046))
- Increase timeout of complement CI test runs. ([\#13048](https://github.com/matrix-org/synapse/issues/13048))
- Refactor entry points so that they all have a `main` function. ([\#13052](https://github.com/matrix-org/synapse/issues/13052))
- Refactor the Dockerfile-workers configuration script to use Jinja2 templates in Synapse workers' Supervisord blocks. ([\#13054](https://github.com/matrix-org/synapse/issues/13054))
- Add headers to individual options in config documentation to allow for linking. ([\#13055](https://github.com/matrix-org/synapse/issues/13055))
- Make Complement CI logs easier to read. ([\#13057](https://github.com/matrix-org/synapse/issues/13057), [\#13058](https://github.com/matrix-org/synapse/issues/13058), [\#13069](https://github.com/matrix-org/synapse/issues/13069))
- Don't instantiate modules with keyword arguments. ([\#13060](https://github.com/matrix-org/synapse/issues/13060))
- Fix type checking errors against Twisted trunk. ([\#13061](https://github.com/matrix-org/synapse/issues/13061))
- Allow MSC3030 `timestamp_to_event` calls from anyone on world-readable rooms. ([\#13062](https://github.com/matrix-org/synapse/issues/13062))
- Add a CI job to check that schema deltas are in the correct folder. ([\#13063](https://github.com/matrix-org/synapse/issues/13063))
- Avoid rechecking event auth rules which are independent of room state. ([\#13065](https://github.com/matrix-org/synapse/issues/13065))
- Reduce the duplication of code that invokes the rate limiter. ([\#13070](https://github.com/matrix-org/synapse/issues/13070))
- Add a Subject Alternative Name to the certificate generated for Complement tests. ([\#13071](https://github.com/matrix-org/synapse/issues/13071))
- Add more tests for room upgrades. ([\#13074](https://github.com/matrix-org/synapse/issues/13074))
- Pin dependencies maintained by matrix.org to [semantic version](https://semver.org/) bounds. ([\#13082](https://github.com/matrix-org/synapse/issues/13082))
- Correctly report prometheus DB stats for `get_earliest_token_for_stats`. ([\#13085](https://github.com/matrix-org/synapse/issues/13085))
- Fix a long-standing bug where a finished logging context would be re-started when Synapse failed to persist an event from federation. ([\#13089](https://github.com/matrix-org/synapse/issues/13089))
- Simplify the alias deletion logic as an application service. ([\#13093](https://github.com/matrix-org/synapse/issues/13093))
- Add type annotations to `tests.test_server`. ([\#13124](https://github.com/matrix-org/synapse/issues/13124))
Synapse 1.61.1 (2022-06-28)
===========================
This patch release fixes a security issue regarding URL previews, affecting all prior versions of Synapse. Server administrators are encouraged to update Synapse as soon as possible. We are not aware of these vulnerabilities being exploited in the wild.
Server administrators who are unable to update Synapse may use the workarounds described in the linked GitHub Security Advisory below.
## Security advisory
The following issue is fixed in 1.61.1.
* [GHSA-22p3-qrh9-cx32](https://github.com/matrix-org/synapse/security/advisories/GHSA-22p3-qrh9-cx32) / [CVE-2022-31052](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31052)
Synapse instances with the [`url_preview_enabled`](https://matrix-org.github.io/synapse/v1.61/usage/configuration/config_documentation.html#media-store) homeserver config option set to `true` are affected. URL previews of some web pages can lead to unbounded recursion, causing the request to either fail, or in some cases crash the running Synapse process.
Requesting URL previews requires authentication. Nevertheless, it is possible to exploit this maliciously, either by malicious users on the homeserver, or by remote users sending URLs that a local user's client may automatically request a URL preview for.
Homeservers with the `url_preview_enabled` configuration option set to `false` (the default) are unaffected. Instances with the `enable_media_repo` configuration option set to `false` are also unaffected, as this also disables URL preview functionality.
Fixed by [fa1308061802ac7b7d20e954ba7372c5ac292333](https://github.com/matrix-org/synapse/commit/fa1308061802ac7b7d20e954ba7372c5ac292333).
Synapse 1.61.0 (2022-06-14)
===========================
This release removes support for the non-standard feature known both as 'groups' and as 'communities', which have been superseded by *Spaces*.
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610)
for more details.
Improved Documentation
----------------------
- Mention removed community/group worker endpoints in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610). Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023))
Synapse 1.61.0rc1 (2022-06-07)
==============================
Features
--------
- Add new `media_retention` options to the homeserver config for routinely cleaning up non-recently accessed media. ([\#12732](https://github.com/matrix-org/synapse/issues/12732), [\#12972](https://github.com/matrix-org/synapse/issues/12972), [\#12977](https://github.com/matrix-org/synapse/issues/12977))
- Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events. ([\#12740](https://github.com/matrix-org/synapse/issues/12740), [\#12859](https://github.com/matrix-org/synapse/issues/12859))
- Update to the `check_event_for_spam` module callback: Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
- Add storage and module API methods to get monthly active users (and their corresponding appservices) within an optionally specified time range. ([\#12838](https://github.com/matrix-org/synapse/issues/12838), [\#12917](https://github.com/matrix-org/synapse/issues/12917))
- Support the new error code `ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED` from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#12845](https://github.com/matrix-org/synapse/issues/12845), [\#12923](https://github.com/matrix-org/synapse/issues/12923))
- Add a configurable background job to delete stale devices. ([\#12855](https://github.com/matrix-org/synapse/issues/12855))
- Improve URL previews for pages with empty elements. ([\#12951](https://github.com/matrix-org/synapse/issues/12951))
- Allow updating a user's password using the admin API without logging out their devices. Contributed by @jcgruenhage. ([\#12952](https://github.com/matrix-org/synapse/issues/12952))
Bugfixes
--------
- Always send an `access_token` in `/thirdparty/` requests to appservices, as required by the [Application Service API specification](https://spec.matrix.org/v1.1/application-service-api/#third-party-networks). ([\#12746](https://github.com/matrix-org/synapse/issues/12746))
- Implement [MSC3816](https://github.com/matrix-org/matrix-spec-proposals/pull/3816): sending the root event in a thread should count as having 'participated' in it. ([\#12766](https://github.com/matrix-org/synapse/issues/12766))
- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12784](https://github.com/matrix-org/synapse/issues/12784))
- Fix a bug where we did not correctly handle invalid device list updates over federation. Contributed by Carl Bordum Hansen. ([\#12829](https://github.com/matrix-org/synapse/issues/12829))
- Fix a bug which allowed multiple async operations to access database locks concurrently. Contributed by @sumnerevans @ Beeper. ([\#12832](https://github.com/matrix-org/synapse/issues/12832))
- Fix an issue introduced in Synapse 0.34 where the `/notifications` endpoint would only return notifications if a user registered at least one pusher. Contributed by Famedly. ([\#12840](https://github.com/matrix-org/synapse/issues/12840))
- Fix a bug where servers using a Postgres database would fail to backfill from an insertion event when MSC2716 is enabled (`experimental_features.msc2716_enabled`). ([\#12843](https://github.com/matrix-org/synapse/issues/12843))
- Fix [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) rooms being omitted from room directory, room summary and space hierarchy responses. ([\#12858](https://github.com/matrix-org/synapse/issues/12858))
- Fix a bug introduced in Synapse 1.54.0 which could sometimes cause exceptions when handling federated traffic. ([\#12877](https://github.com/matrix-org/synapse/issues/12877))
- Fix a bug introduced in Synapse 1.59.0 which caused room deletion to fail with a foreign key violation error. ([\#12889](https://github.com/matrix-org/synapse/issues/12889))
- Fix a long-standing bug which caused the `/messages` endpoint to return an incorrect `end` attribute when there were no more events. Contributed by @Vetchu. ([\#12903](https://github.com/matrix-org/synapse/issues/12903))
- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was a redaction of an event that has since been purged. ([\#12905](https://github.com/matrix-org/synapse/issues/12905))
- Fix a potential memory leak when generating thumbnails. ([\#12932](https://github.com/matrix-org/synapse/issues/12932))
- Fix a long-standing bug where a URL preview would break if the image failed to download. ([\#12950](https://github.com/matrix-org/synapse/issues/12950))
Improved Documentation
----------------------
- Fix typographical errors in documentation. ([\#12863](https://github.com/matrix-org/synapse/issues/12863))
- Fix documentation incorrectly stating the `sendToDevice` endpoint can be directed at generic workers. Contributed by Nick @ Beeper. ([\#12867](https://github.com/matrix-org/synapse/issues/12867))
Deprecations and Removals
-------------------------
- Remove support for the non-standard groups/communities feature from Synapse. ([\#12553](https://github.com/matrix-org/synapse/issues/12553), [\#12558](https://github.com/matrix-org/synapse/issues/12558), [\#12563](https://github.com/matrix-org/synapse/issues/12563), [\#12895](https://github.com/matrix-org/synapse/issues/12895), [\#12897](https://github.com/matrix-org/synapse/issues/12897), [\#12899](https://github.com/matrix-org/synapse/issues/12899), [\#12900](https://github.com/matrix-org/synapse/issues/12900), [\#12936](https://github.com/matrix-org/synapse/issues/12936), [\#12966](https://github.com/matrix-org/synapse/issues/12966))
- Remove contributed `kick_users.py` script. This is broken under Python 3, and is not added to the environment when `pip install`ing Synapse. ([\#12908](https://github.com/matrix-org/synapse/issues/12908))
- Remove `contrib/jitsimeetbridge`. This was an unused experiment that hasn't been meaningfully changed since 2014. ([\#12909](https://github.com/matrix-org/synapse/issues/12909))
- Remove unused `contrib/experiements/cursesio.py` script, which fails to run under Python 3. ([\#12910](https://github.com/matrix-org/synapse/issues/12910))
- Remove unused `contrib/experiements/test_messaging.py` script. This fails to run on Python 3. ([\#12911](https://github.com/matrix-org/synapse/issues/12911))
Internal Changes
----------------
- Test Synapse against Complement with workers. ([\#12810](https://github.com/matrix-org/synapse/issues/12810), [\#12933](https://github.com/matrix-org/synapse/issues/12933))
- Reduce the amount of state we pull from the DB. ([\#12811](https://github.com/matrix-org/synapse/issues/12811), [\#12964](https://github.com/matrix-org/synapse/issues/12964))
- Try other homeservers when re-syncing state for rooms with partial state. ([\#12812](https://github.com/matrix-org/synapse/issues/12812))
- Resume state re-syncing for rooms with partial state after a Synapse restart. ([\#12813](https://github.com/matrix-org/synapse/issues/12813))
- Remove Mutual Rooms' ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) endpoint dependency on the User Directory. ([\#12836](https://github.com/matrix-org/synapse/issues/12836))
- Experimental: expand `check_event_for_spam` with ability to return additional fields. This enables spam-checker implementations to experiment with mechanisms to give users more information about why they are blocked and whether any action is needed from them to be unblocked. ([\#12846](https://github.com/matrix-org/synapse/issues/12846))
- Remove `dont_notify` from the `.m.rule.room.server_acl` rule. ([\#12849](https://github.com/matrix-org/synapse/issues/12849))
- Remove the unstable `/hierarchy` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12851](https://github.com/matrix-org/synapse/issues/12851))
- Pull out less state when handling gaps in room DAG. ([\#12852](https://github.com/matrix-org/synapse/issues/12852), [\#12904](https://github.com/matrix-org/synapse/issues/12904))
- Clean-up the push rules datastore. ([\#12856](https://github.com/matrix-org/synapse/issues/12856))
- Correct a type annotation in the URL preview source code. ([\#12860](https://github.com/matrix-org/synapse/issues/12860))
- Update `pyjwt` dependency to [2.4.0](https://github.com/jpadilla/pyjwt/releases/tag/2.4.0). ([\#12865](https://github.com/matrix-org/synapse/issues/12865))
- Enable the `/account/whoami` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12866](https://github.com/matrix-org/synapse/issues/12866))
- Enable the `batch_send` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12868](https://github.com/matrix-org/synapse/issues/12868))
- Don't generate empty AS transactions when the AS is flagged as down. Contributed by Nick @ Beeper. ([\#12869](https://github.com/matrix-org/synapse/issues/12869))
- Fix up the variable `state_store` naming. ([\#12871](https://github.com/matrix-org/synapse/issues/12871))
- Faster room joins: when querying the current state of the room, wait for state to be populated. ([\#12872](https://github.com/matrix-org/synapse/issues/12872))
- Avoid running queries which will never result in deletions. ([\#12879](https://github.com/matrix-org/synapse/issues/12879))
- Use constants for EDU types. ([\#12884](https://github.com/matrix-org/synapse/issues/12884))
- Reduce database load of `/sync` when presence is enabled. ([\#12885](https://github.com/matrix-org/synapse/issues/12885))
- Refactor `have_seen_events` to reduce memory consumed when processing federation traffic. ([\#12886](https://github.com/matrix-org/synapse/issues/12886))
- Refactor receipt linearization code. ([\#12888](https://github.com/matrix-org/synapse/issues/12888))
- Add type annotations to `synapse.logging.opentracing`. ([\#12894](https://github.com/matrix-org/synapse/issues/12894))
- Remove PyNaCl occurrences directly used in Synapse code. ([\#12902](https://github.com/matrix-org/synapse/issues/12902))
- Bump types-jsonschema from 4.4.1 to 4.4.6. ([\#12912](https://github.com/matrix-org/synapse/issues/12912))
- Rename storage classes. ([\#12913](https://github.com/matrix-org/synapse/issues/12913))
- Preparation for database schema simplifications: stop reading from `event_edges.room_id`. ([\#12914](https://github.com/matrix-org/synapse/issues/12914))
- Check if we are in a virtual environment before overriding the `PYTHONPATH` environment variable in the demo script. ([\#12916](https://github.com/matrix-org/synapse/issues/12916))
- Improve the logging when signature checks on events fail. ([\#12925](https://github.com/matrix-org/synapse/issues/12925))
Synapse 1.60.0 (2022-05-31)
===========================
This release of Synapse adds a unique index to the `state_group_edges` table, in
order to prevent accidentally introducing duplicate information (for example,
because a database backup was restored multiple times). If your Synapse database
already has duplicate rows in this table, this could fail with an error and
require manual remediation.
Additionally, the signature of the `check_event_for_spam` module callback has changed.
The previous signature has been deprecated and remains working for now. Module authors
should update their modules to use the new signature where possible.
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1600)
for more details.
Bugfixes
--------
- Fix a bug introduced in Synapse 1.60.0rc1 that would break some imports from `synapse.module_api`. ([\#12918](https://github.com/matrix-org/synapse/issues/12918))
Synapse 1.60.0rc2 (2022-05-27)
==============================
Features
--------
- Add an option allowing users to use their password to reauthenticate for privileged actions even though password login is disabled. ([\#12883](https://github.com/matrix-org/synapse/issues/12883))
Bugfixes
--------
- Explicitly close `ijson` coroutines once we are done with them, instead of leaving the garbage collector to close them. ([\#12875](https://github.com/matrix-org/synapse/issues/12875))
Internal Changes
----------------
- Improve URL previews by not including the content of media tags in the generated description. ([\#12887](https://github.com/matrix-org/synapse/issues/12887))
Synapse 1.60.0rc1 (2022-05-24)
==============================
Features
--------
- Measure the time taken in spam-checking callbacks and expose those measurements as metrics. ([\#12513](https://github.com/matrix-org/synapse/issues/12513))
- Add a `default_power_level_content_override` config option to set default room power levels per room preset. ([\#12618](https://github.com/matrix-org/synapse/issues/12618))
- Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787). ([\#12623](https://github.com/matrix-org/synapse/issues/12623))
- Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. ([\#12672](https://github.com/matrix-org/synapse/issues/12672), [\#12809](https://github.com/matrix-org/synapse/issues/12809))
- Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal. ([\#12673](https://github.com/matrix-org/synapse/issues/12673))
- Add a config options to allow for auto-tuning of caches. ([\#12701](https://github.com/matrix-org/synapse/issues/12701))
- Update [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to process marker events from the current state to avoid markers being lost in timeline gaps for federated servers which would cause the imported history to be undiscovered. ([\#12718](https://github.com/matrix-org/synapse/issues/12718))
- Add a `drop_federated_event` callback to `SpamChecker` to disregard inbound federated events before they take up much processing power, in an emergency. ([\#12744](https://github.com/matrix-org/synapse/issues/12744))
- Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). ([\#12786](https://github.com/matrix-org/synapse/issues/12786), [\#12792](https://github.com/matrix-org/synapse/issues/12792))
- Update to the `check_event_for_spam` module callback. Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
Bugfixes
--------
- Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled. ([\#12611](https://github.com/matrix-org/synapse/issues/12611))
- Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room. ([\#12683](https://github.com/matrix-org/synapse/issues/12683))
- Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance. ([\#12687](https://github.com/matrix-org/synapse/issues/12687))
- Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room. ([\#12696](https://github.com/matrix-org/synapse/issues/12696))
- Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. ([\#12713](https://github.com/matrix-org/synapse/issues/12713))
- Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. ([\#12721](https://github.com/matrix-org/synapse/issues/12721))
- Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. ([\#12747](https://github.com/matrix-org/synapse/issues/12747))
- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12770](https://github.com/matrix-org/synapse/issues/12770))
- Give a meaningful error message when a client tries to create a room with an invalid alias localpart. ([\#12779](https://github.com/matrix-org/synapse/issues/12779))
- Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. ([\#12794](https://github.com/matrix-org/synapse/issues/12794))
- Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers. ([\#12803](https://github.com/matrix-org/synapse/issues/12803))
- Fix a bug, introduced in Synapse 1.21.0, that led to media thumbnails being unusable before the index has been added in the background. ([\#12823](https://github.com/matrix-org/synapse/issues/12823))
Updates to the Docker image
---------------------------
- Fix the docker file after a dependency update. ([\#12853](https://github.com/matrix-org/synapse/issues/12853))
Improved Documentation
----------------------
- Fix a typo in the Media Admin API documentation. ([\#12715](https://github.com/matrix-org/synapse/issues/12715))
- Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh. ([\#12727](https://github.com/matrix-org/synapse/issues/12727))
- Fix typo in server listener documentation. ([\#12742](https://github.com/matrix-org/synapse/issues/12742))
- Link to the configuration manual from the welcome page of the documentation. ([\#12748](https://github.com/matrix-org/synapse/issues/12748))
- Fix typo in `run_background_tasks_on` option name in configuration manual documentation. ([\#12749](https://github.com/matrix-org/synapse/issues/12749))
- Add information regarding the `rc_invites` ratelimiting option to the configuration docs. ([\#12759](https://github.com/matrix-org/synapse/issues/12759))
- Add documentation for cancellation of request processing. ([\#12761](https://github.com/matrix-org/synapse/issues/12761))
- Recommend using docker to run tests against postgres. ([\#12765](https://github.com/matrix-org/synapse/issues/12765))
- Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari. ([\#12773](https://github.com/matrix-org/synapse/issues/12773))
- Add additional info to documentation of config option `cache_autotuning`. ([\#12776](https://github.com/matrix-org/synapse/issues/12776))
- Update configuration manual documentation to document size-related suffixes. ([\#12777](https://github.com/matrix-org/synapse/issues/12777))
- Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option. ([\#12785](https://github.com/matrix-org/synapse/issues/12785))
Deprecations and Removals
-------------------------
- Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts. ([\#12709](https://github.com/matrix-org/synapse/issues/12709))
Internal Changes
----------------
- Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. ([\#10533](https://github.com/matrix-org/synapse/issues/10533))
- Preparation for faster-room-join work: return subsets of room state which we already have, immediately. ([\#12498](https://github.com/matrix-org/synapse/issues/12498))
- Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. ([\#12586](https://github.com/matrix-org/synapse/issues/12586), [\#12588](https://github.com/matrix-org/synapse/issues/12588), [\#12630](https://github.com/matrix-org/synapse/issues/12630), [\#12694](https://github.com/matrix-org/synapse/issues/12694), [\#12698](https://github.com/matrix-org/synapse/issues/12698), [\#12699](https://github.com/matrix-org/synapse/issues/12699), [\#12700](https://github.com/matrix-org/synapse/issues/12700), [\#12705](https://github.com/matrix-org/synapse/issues/12705))
- Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests. ([\#12708](https://github.com/matrix-org/synapse/issues/12708))
- Improve documentation of the `synapse.push` module. ([\#12676](https://github.com/matrix-org/synapse/issues/12676))
- Refactor functions to on `PushRuleEvaluatorForEvent`. ([\#12677](https://github.com/matrix-org/synapse/issues/12677))
- Preparation for database schema simplifications: stop writing to `event_reference_hashes`. ([\#12679](https://github.com/matrix-org/synapse/issues/12679))
- Remove code which updates unused database column `application_services_state.last_txn`. ([\#12680](https://github.com/matrix-org/synapse/issues/12680))
- Refactor `EventContext` class. ([\#12689](https://github.com/matrix-org/synapse/issues/12689))
- Remove an unneeded class in the push code. ([\#12691](https://github.com/matrix-org/synapse/issues/12691))
- Consolidate parsing of relation information from events. ([\#12693](https://github.com/matrix-org/synapse/issues/12693))
- Convert namespace class `Codes` into a string enum. ([\#12703](https://github.com/matrix-org/synapse/issues/12703))
- Optimize private read receipt filtering. ([\#12711](https://github.com/matrix-org/synapse/issues/12711))
- Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. ([\#12720](https://github.com/matrix-org/synapse/issues/12720))
- Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. ([\#12723](https://github.com/matrix-org/synapse/issues/12723))
- Update configs used by Complement to allow more invites/3PID validations during tests. ([\#12731](https://github.com/matrix-org/synapse/issues/12731))
- Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. ([\#12769](https://github.com/matrix-org/synapse/issues/12769))
- Move methods that call `add_push_rule` to the `PushRuleStore` class. ([\#12772](https://github.com/matrix-org/synapse/issues/12772))
- Make handling of federation Authorization header (more) compliant with RFC7230. ([\#12774](https://github.com/matrix-org/synapse/issues/12774))
- Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens. ([\#12775](https://github.com/matrix-org/synapse/issues/12775))
- Do not keep going if there are 5 back-to-back background update failures. ([\#12781](https://github.com/matrix-org/synapse/issues/12781))
- Fix federation when using the demo scripts. ([\#12783](https://github.com/matrix-org/synapse/issues/12783))
- The `hash_password` script now fails when it is called without specifying a config file. Contributed by @jae1911. ([\#12789](https://github.com/matrix-org/synapse/issues/12789))
- Improve and fix type hints. ([\#12567](https://github.com/matrix-org/synapse/issues/12567), [\#12477](https://github.com/matrix-org/synapse/issues/12477), [\#12717](https://github.com/matrix-org/synapse/issues/12717), [\#12753](https://github.com/matrix-org/synapse/issues/12753), [\#12695](https://github.com/matrix-org/synapse/issues/12695), [\#12734](https://github.com/matrix-org/synapse/issues/12734), [\#12716](https://github.com/matrix-org/synapse/issues/12716), [\#12726](https://github.com/matrix-org/synapse/issues/12726), [\#12790](https://github.com/matrix-org/synapse/issues/12790), [\#12833](https://github.com/matrix-org/synapse/issues/12833))
- Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible. ([\#12791](https://github.com/matrix-org/synapse/issues/12791))
- Remove Caddy from the Synapse workers image used in Complement. ([\#12818](https://github.com/matrix-org/synapse/issues/12818))
- Add Complement's shared registration secret to the Complement worker image. This fixes tests that depend on it. ([\#12819](https://github.com/matrix-org/synapse/issues/12819))
- Support registering Application Services when running with workers under Complement. ([\#12826](https://github.com/matrix-org/synapse/issues/12826))
- Disable 'faster room join' Complement tests when testing against Synapse with workers. ([\#12842](https://github.com/matrix-org/synapse/issues/12842))
Synapse 1.59.1 (2022-05-18)
===========================
This release fixes a long-standing issue which could prevent Synapse's user directory for updating properly.
Bugfixes
----------------
- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. Contributed by Nick @ Beeper. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
Synapse 1.59.0 (2022-05-17)
===========================
@@ -475,7 +78,7 @@ Deprecations and Removals
-------------------------
- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596))
- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
[MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613))

View File

@@ -34,14 +34,6 @@ additional-css = [
"docs/website_files/table-of-contents.css",
"docs/website_files/remove-nav-buttons.css",
"docs/website_files/indent-section-headers.css",
"docs/website_files/version-picker.css",
]
additional-js = [
"docs/website_files/table-of-contents.js",
"docs/website_files/version-picker.js",
"docs/website_files/version.js",
]
theme = "docs/website_files/theme"
[preprocessor.schema_versions]
command = "./scripts-dev/schema_versions.py"
additional-js = ["docs/website_files/table-of-contents.js"]
theme = "docs/website_files/theme"

1
changelog.d/10533.misc Normal file
View File

@@ -0,0 +1 @@
Improve event caching mechanism to avoid having multiple copies of an event in memory at a time.

1
changelog.d/12477.misc Normal file
View File

@@ -0,0 +1 @@
Add some type hints to datastore.

View File

@@ -0,0 +1 @@
Measure the time taken in spam-checking callbacks and expose those measurements as metrics.

1
changelog.d/12567.misc Normal file
View File

@@ -0,0 +1 @@
Replace string literal instances of stream key types with typed constants.

1
changelog.d/12586.misc Normal file
View File

@@ -0,0 +1 @@
Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect.

1
changelog.d/12588.misc Normal file
View File

@@ -0,0 +1 @@
Add ability to cancel disconnected requests to `SynapseRequest`.

View File

@@ -0,0 +1 @@
Add a `default_power_level_content_override` config option to set default room power levels per room preset.

1
changelog.d/12630.misc Normal file
View File

@@ -0,0 +1 @@
Add a helper class for testing request cancellation.

View File

@@ -0,0 +1 @@
Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal.

1
changelog.d/12676.misc Normal file
View File

@@ -0,0 +1 @@
Improve documentation of the `synapse.push` module.

1
changelog.d/12677.misc Normal file
View File

@@ -0,0 +1 @@
Refactor functions to on `PushRuleEvaluatorForEvent`.

1
changelog.d/12679.misc Normal file
View File

@@ -0,0 +1 @@
Preparation for database schema simplifications: stop writing to `event_reference_hashes`.

1
changelog.d/12683.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room.

1
changelog.d/12689.misc Normal file
View File

@@ -0,0 +1 @@
Refactor `EventContext` class.

1
changelog.d/12691.misc Normal file
View File

@@ -0,0 +1 @@
Remove an unneeded class in the push code.

1
changelog.d/12693.misc Normal file
View File

@@ -0,0 +1 @@
Consolidate parsing of relation information from events.

1
changelog.d/12694.misc Normal file
View File

@@ -0,0 +1 @@
Capture the `Deferred` for request cancellation in `_AsyncResource`.

1
changelog.d/12695.misc Normal file
View File

@@ -0,0 +1 @@
Fixes an incorrect type hint for `Filter._check_event_relations`.

1
changelog.d/12696.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room.

1
changelog.d/12698.misc Normal file
View File

@@ -0,0 +1 @@
Respect the `@cancellable` flag for `DirectServe{Html,Json}Resource`s.

1
changelog.d/12699.misc Normal file
View File

@@ -0,0 +1 @@
Respect the `@cancellable` flag for `RestServlet`s and `BaseFederationServlet`s.

1
changelog.d/12700.misc Normal file
View File

@@ -0,0 +1 @@
Respect the `@cancellable` flag for `ReplicationEndpoint`s.

View File

@@ -0,0 +1 @@
Add a config options to allow for auto-tuning of caches.

1
changelog.d/12705.misc Normal file
View File

@@ -0,0 +1 @@
Complain if a federation endpoint has the `@cancellable` flag, since some of the wrapper code may not handle cancellation correctly yet.

1
changelog.d/12708.misc Normal file
View File

@@ -0,0 +1 @@
Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests.

View File

@@ -0,0 +1 @@
Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts.

1
changelog.d/12711.misc Normal file
View File

@@ -0,0 +1 @@
Optimize private read receipt filtering.

1
changelog.d/12713.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set.

1
changelog.d/12715.doc Normal file
View File

@@ -0,0 +1 @@
Fix a typo in the Media Admin API documentation.

1
changelog.d/12716.misc Normal file
View File

@@ -0,0 +1 @@
Add type annotations to increase the number of modules passing `disallow-untyped-defs`.

1
changelog.d/12720.misc Normal file
View File

@@ -0,0 +1 @@
Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG.

1
changelog.d/12726.misc Normal file
View File

@@ -0,0 +1 @@
Add type annotations to increase the number of modules passing `disallow-untyped-defs`.

1
changelog.d/12727.doc Normal file
View File

@@ -0,0 +1 @@
Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh.

1
changelog.d/12731.misc Normal file
View File

@@ -0,0 +1 @@
Update configs used by Complement to allow more invites/3PID validations during tests.

1
changelog.d/12734.misc Normal file
View File

@@ -0,0 +1 @@
Tidy up and type-hint the database engine modules.

1
changelog.d/12742.doc Normal file
View File

@@ -0,0 +1 @@
Fix typo in server listener documentation.

1
changelog.d/12747.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers.

1
changelog.d/12749.doc Normal file
View File

@@ -0,0 +1 @@
Fix typo in 'run_background_tasks_on' option name in configuration manual documentation.

1
changelog.d/12762.misc Normal file
View File

@@ -0,0 +1 @@
Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar.

View File

@@ -16,7 +16,6 @@
""" Starts a synapse client console. """
import argparse
import binascii
import cmd
import getpass
import json
@@ -27,8 +26,9 @@ import urllib
from http import TwistedHttpClient
from typing import Optional
import nacl.encoding
import nacl.signing
import urlparse
from signedjson.key import NACL_ED25519, decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from twisted.internet import defer, reactor, threads
@@ -41,6 +41,7 @@ TRUSTED_ID_SERVERS = ["localhost:8001"]
class SynapseCmd(cmd.Cmd):
"""Basic synapse command-line processor.
This processes commands from the user and calls the relevant HTTP methods.
@@ -419,8 +420,8 @@ class SynapseCmd(cmd.Cmd):
pubKey = None
pubKeyObj = yield self.http_client.do_request("GET", url)
if "public_key" in pubKeyObj:
pubKey = decode_verify_key_bytes(
NACL_ED25519, binascii.unhexlify(pubKeyObj["public_key"])
pubKey = nacl.signing.VerifyKey(
pubKeyObj["public_key"], encoder=nacl.encoding.HexEncoder
)
else:
print("No public key found in pubkey response!")

View File

@@ -1,125 +0,0 @@
# Setting up Synapse with Workers using Docker Compose
This directory describes how deploy and manage Synapse and workers via [Docker Compose](https://docs.docker.com/compose/).
Example worker configuration files can be found [here](workers).
All examples and snippets assume that your Synapse service is called `synapse` in your Docker Compose file.
An example Docker Compose file can be found [here](docker-compose.yaml).
## Worker Service Examples in Docker Compose
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).
### Generic Worker Example
```yaml
synapse-generic-worker-1:
image: matrixdotorg/synapse:latest
container_name: synapse-generic-worker-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
healthcheck:
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
start_period: "5s"
interval: "15s"
timeout: "5s"
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.generic_worker
# Expose port if required so your reverse proxy can send requests to this worker
# Port configuration will depend on how the http listener is defined in the worker configuration file
ports:
- 8081:8081
depends_on:
- synapse
```
### Federation Sender Example
Please note: The federation sender does not receive REST API calls so no exposed ports are required.
```yaml
synapse-federation-sender-1:
image: matrixdotorg/synapse:latest
container_name: synapse-federation-sender-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
healthcheck:
disable: true
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.federation_sender
depends_on:
- synapse
```
## `homeserver.yaml` Configuration
### Enable Redis
Locate the `redis` section of your `homeserver.yaml` and enable and configure it:
```yaml
redis:
enabled: true
host: redis
port: 6379
# password: <secret_password>
```
This assumes that your Redis service is called `redis` in your Docker Compose file.
### Add a replication Listener
Locate the `listeners` section of your `homeserver.yaml` and add the following replication listener:
```yaml
listeners:
# Other listeners
- port: 9093
type: http
resources:
- names: [replication]
```
This listener is used by the workers for replication and is referred to in worker config files using the following settings:
```yaml
worker_replication_host: synapse
worker_replication_http_port: 9093
```
### Add Workers to `instance_map`
Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
```yaml
instance_map:
synapse-generic-worker-1: # The worker_name setting in your worker configuration file
host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
port: 8034 # The port assigned to the replication listener in your worker config file
synapse-federation-sender-1:
host: synapse-federation-sender-1
port: 8034
```
### Configure Federation Senders
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
```yaml
# This will disable federation sending on the main Synapse instance
send_federation: false
federation_sender_instances:
- synapse-federation-sender-1 # The worker_name setting in your federation sender worker configuration file
```
## Other Worker types
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.

View File

@@ -1,77 +0,0 @@
networks:
backend:
services:
postgres:
image: postgres:latest
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/var/lib/postgresql/data:/var/lib/postgresql/data:rw
networks:
- backend
environment:
POSTGRES_DB: synapse
POSTGRES_USER: synapse_user
POSTGRES_PASSWORD: postgres
POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C
redis:
image: redis:latest
restart: unless-stopped
networks:
- backend
synapse:
image: matrixdotorg/synapse:latest
container_name: synapse
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/data:/data:rw
ports:
- 8008:8008
networks:
- backend
environment:
SYNAPSE_CONFIG_DIR: /data
SYNAPSE_CONFIG_PATH: /data/homeserver.yaml
depends_on:
- postgres
synapse-generic-worker-1:
image: matrixdotorg/synapse:latest
container_name: synapse-generic-worker-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
healthcheck:
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
start_period: "5s"
interval: "15s"
timeout: "5s"
networks:
- backend
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.generic_worker
# Expose port if required so your reverse proxy can send requests to this worker
# Port configuration will depend on how the http listener is defined in the worker configuration file
ports:
- 8081:8081
depends_on:
- synapse
synapse-federation-sender-1:
image: matrixdotorg/synapse:latest
container_name: synapse-federation-sender-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
healthcheck:
disable: true
networks:
- backend
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.federation_sender
depends_on:
- synapse

View File

@@ -1,14 +0,0 @@
worker_app: synapse.app.federation_sender
worker_name: synapse-federation-sender-1
# The replication listener on the main synapse process.
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8034
resources:
- names: [replication]
worker_log_config: /data/federation_sender.log.config

View File

@@ -1,19 +0,0 @@
worker_app: synapse.app.generic_worker
worker_name: synapse-generic-worker-1
# The replication listener on the main synapse process.
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8034
resources:
- names: [replication]
- type: http
port: 8081
x_forwarded: true
resources:
- names: [client, federation]
worker_log_config: /data/worker.log.config

View File

@@ -0,0 +1,165 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import curses
import curses.wrapper
from curses.ascii import isprint
from twisted.internet import reactor
class CursesStdIO:
def __init__(self, stdscr, callback=None):
self.statusText = "Synapse test app -"
self.searchText = ""
self.stdscr = stdscr
self.logLine = ""
self.callback = callback
self._setup()
def _setup(self):
self.stdscr.nodelay(1) # Make non blocking
self.rows, self.cols = self.stdscr.getmaxyx()
self.lines = []
curses.use_default_colors()
self.paintStatus(self.statusText)
self.stdscr.refresh()
def set_callback(self, callback):
self.callback = callback
def fileno(self):
"""We want to select on FD 0"""
return 0
def connectionLost(self, reason):
self.close()
def print_line(self, text):
"""add a line to the internal list of lines"""
self.lines.append(text)
self.redraw()
def print_log(self, text):
self.logLine = text
self.redraw()
def redraw(self):
"""method for redisplaying lines based on internal list of lines"""
self.stdscr.clear()
self.paintStatus(self.statusText)
i = 0
index = len(self.lines) - 1
while i < (self.rows - 3) and index >= 0:
self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index], curses.A_NORMAL)
i = i + 1
index = index - 1
self.printLogLine(self.logLine)
self.stdscr.refresh()
def paintStatus(self, text):
if len(text) > self.cols:
raise RuntimeError("TextTooLongError")
self.stdscr.addstr(
self.rows - 2, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def printLogLine(self, text):
self.stdscr.addstr(
0, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def doRead(self):
"""Input is ready!"""
curses.noecho()
c = self.stdscr.getch() # read a character
if c == curses.KEY_BACKSPACE:
self.searchText = self.searchText[:-1]
elif c == curses.KEY_ENTER or c == 10:
text = self.searchText
self.searchText = ""
self.print_line(">> %s" % text)
try:
if self.callback:
self.callback.on_line(text)
except Exception as e:
self.print_line(str(e))
self.stdscr.refresh()
elif isprint(c):
if len(self.searchText) == self.cols - 2:
return
self.searchText = self.searchText + chr(c)
self.stdscr.addstr(
self.rows - 1,
0,
self.searchText + (" " * (self.cols - len(self.searchText) - 2)),
)
self.paintStatus(self.statusText + " %d" % len(self.searchText))
self.stdscr.move(self.rows - 1, len(self.searchText))
self.stdscr.refresh()
def logPrefix(self):
return "CursesStdIO"
def close(self):
"""clean up"""
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
class Callback:
def __init__(self, stdio):
self.stdio = stdio
def on_line(self, text):
self.stdio.print_line(text)
def main(stdscr):
screen = CursesStdIO(stdscr) # create Screen object
callback = Callback(screen)
screen.set_callback(callback)
stdscr.refresh()
reactor.addReader(screen)
reactor.run()
screen.close()
if __name__ == "__main__":
curses.wrapper(main)

View File

@@ -0,0 +1,367 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is an example of using the server to server implementation to do a
basic chat style thing. It accepts commands from stdin and outputs to stdout.
It assumes that ucids are of the form <user>@<domain>, and uses <domain> as
the address of the remote home server to hit.
Usage:
python test_messaging.py <port>
Currently assumes the local address is localhost:<port>
"""
import argparse
import curses.wrapper
import json
import logging
import os
import re
import cursesio
from twisted.internet import defer, reactor
from twisted.python import log
from synapse.app.homeserver import SynapseHomeServer
from synapse.federation import ReplicationHandler
from synapse.federation.units import Pdu
from synapse.util import origin_from_ucid
# from synapse.logging.utils import log_function
logger = logging.getLogger("example")
def excpetion_errback(failure):
logging.exception(failure)
class InputOutput:
"""This is responsible for basic I/O so that a user can interact with
the example app.
"""
def __init__(self, screen, user):
self.screen = screen
self.user = user
def set_home_server(self, server):
self.server = server
def on_line(self, line):
"""This is where we process commands."""
try:
m = re.match(r"^join (\S+)$", line)
if m:
# The `sender` wants to join a room.
(room_name,) = m.groups()
self.print_line("%s joining %s" % (self.user, room_name))
self.server.join_room(room_name, self.user, self.user)
# self.print_line("OK.")
return
m = re.match(r"^invite (\S+) (\S+)$", line)
if m:
# `sender` wants to invite someone to a room
room_name, invitee = m.groups()
self.print_line("%s invited to %s" % (invitee, room_name))
self.server.invite_to_room(room_name, self.user, invitee)
# self.print_line("OK.")
return
m = re.match(r"^send (\S+) (.*)$", line)
if m:
# `sender` wants to message a room
room_name, body = m.groups()
self.print_line("%s send to %s" % (self.user, room_name))
self.server.send_message(room_name, self.user, body)
# self.print_line("OK.")
return
m = re.match(r"^backfill (\S+)$", line)
if m:
# we want to backfill a room
(room_name,) = m.groups()
self.print_line("backfill %s" % room_name)
self.server.backfill(room_name)
return
self.print_line("Unrecognized command")
except Exception as e:
logger.exception(e)
def print_line(self, text):
self.screen.print_line(text)
def print_log(self, text):
self.screen.print_log(text)
class IOLoggerHandler(logging.Handler):
def __init__(self, io):
logging.Handler.__init__(self)
self.io = io
def emit(self, record):
if record.levelno < logging.WARN:
return
msg = self.format(record)
self.io.print_log(msg)
class Room:
"""Used to store (in memory) the current membership state of a room, and
which home servers we should send PDUs associated with the room to.
"""
def __init__(self, room_name):
self.room_name = room_name
self.invited = set()
self.participants = set()
self.servers = set()
self.oldest_server = None
self.have_got_metadata = False
def add_participant(self, participant):
"""Someone has joined the room"""
self.participants.add(participant)
self.invited.discard(participant)
server = origin_from_ucid(participant)
self.servers.add(server)
if not self.oldest_server:
self.oldest_server = server
def add_invited(self, invitee):
"""Someone has been invited to the room"""
self.invited.add(invitee)
self.servers.add(origin_from_ucid(invitee))
class HomeServer(ReplicationHandler):
"""A very basic home server implentation that allows people to join a
room and then invite other people.
"""
def __init__(self, server_name, replication_layer, output):
self.server_name = server_name
self.replication_layer = replication_layer
self.replication_layer.set_handler(self)
self.joined_rooms = {}
self.output = output
def on_receive_pdu(self, pdu):
"""We just received a PDU"""
pdu_type = pdu.pdu_type
if pdu_type == "sy.room.message":
self._on_message(pdu)
elif pdu_type == "sy.room.member" and "membership" in pdu.content:
if pdu.content["membership"] == "join":
self._on_join(pdu.context, pdu.state_key)
elif pdu.content["membership"] == "invite":
self._on_invite(pdu.origin, pdu.context, pdu.state_key)
else:
self.output.print_line(
"#%s (unrec) %s = %s"
% (pdu.context, pdu.pdu_type, json.dumps(pdu.content))
)
def _on_message(self, pdu):
"""We received a message"""
self.output.print_line(
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
)
def _on_join(self, context, joinee):
"""Someone has joined a room, either a remote user or a local user"""
room = self._get_or_create_room(context)
room.add_participant(joinee)
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
def _on_invite(self, origin, context, invitee):
"""Someone has been invited"""
room = self._get_or_create_room(context)
room.add_invited(invitee)
self.output.print_line("#%s %s %s" % (context, invitee, "*** INVITED"))
if not room.have_got_metadata and origin is not self.server_name:
logger.debug("Get room state")
self.replication_layer.get_state_for_context(origin, context)
room.have_got_metadata = True
@defer.inlineCallbacks
def send_message(self, room_name, sender, body):
"""Send a message to a room!"""
destinations = yield self.get_servers_for_context(room_name)
try:
yield self.replication_layer.send_pdu(
Pdu.create_new(
context=room_name,
pdu_type="sy.room.message",
content={"sender": sender, "body": body},
origin=self.server_name,
destinations=destinations,
)
)
except Exception as e:
logger.exception(e)
@defer.inlineCallbacks
def join_room(self, room_name, sender, joinee):
"""Join a room!"""
self._on_join(room_name, joinee)
destinations = yield self.get_servers_for_context(room_name)
try:
pdu = Pdu.create_new(
context=room_name,
pdu_type="sy.room.member",
is_state=True,
state_key=joinee,
content={"membership": "join"},
origin=self.server_name,
destinations=destinations,
)
yield self.replication_layer.send_pdu(pdu)
except Exception as e:
logger.exception(e)
@defer.inlineCallbacks
def invite_to_room(self, room_name, sender, invitee):
"""Invite someone to a room!"""
self._on_invite(self.server_name, room_name, invitee)
destinations = yield self.get_servers_for_context(room_name)
try:
yield self.replication_layer.send_pdu(
Pdu.create_new(
context=room_name,
is_state=True,
pdu_type="sy.room.member",
state_key=invitee,
content={"membership": "invite"},
origin=self.server_name,
destinations=destinations,
)
)
except Exception as e:
logger.exception(e)
def backfill(self, room_name, limit=5):
room = self.joined_rooms.get(room_name)
if not room:
return
dest = room.oldest_server
return self.replication_layer.backfill(dest, room_name, limit)
def _get_room_remote_servers(self, room_name):
return list(self.joined_rooms.setdefault(room_name).servers)
def _get_or_create_room(self, room_name):
return self.joined_rooms.setdefault(room_name, Room(room_name))
def get_servers_for_context(self, context):
return defer.succeed(
self.joined_rooms.setdefault(context, Room(context)).servers
)
def main(stdscr):
parser = argparse.ArgumentParser()
parser.add_argument("user", type=str)
parser.add_argument("-v", "--verbose", action="count")
args = parser.parse_args()
user = args.user
server_name = origin_from_ucid(user)
# Set up logging
root_logger = logging.getLogger()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
)
if not os.path.exists("logs"):
os.makedirs("logs")
fh = logging.FileHandler("logs/%s" % user)
fh.setFormatter(formatter)
root_logger.addHandler(fh)
root_logger.setLevel(logging.DEBUG)
# Hack: The only way to get it to stop logging to sys.stderr :(
log.theLogPublisher.observers = []
observer = log.PythonLoggingObserver()
observer.start()
# Set up synapse server
curses_stdio = cursesio.CursesStdIO(stdscr)
input_output = InputOutput(curses_stdio, user)
curses_stdio.set_callback(input_output)
app_hs = SynapseHomeServer(server_name, db_name="dbs/%s" % user)
replication = app_hs.get_replication_layer()
hs = HomeServer(server_name, replication, curses_stdio)
input_output.set_home_server(hs)
# Add input_output logger
io_logger = IOLoggerHandler(input_output)
io_logger.setFormatter(formatter)
root_logger.addHandler(io_logger)
# Start!
try:
port = int(server_name.split(":")[1])
except Exception:
port = 12345
app_hs.get_http_server().start_listening(port)
reactor.addReader(curses_stdio)
reactor.run()
if __name__ == "__main__":
curses.wrapper(main)

View File

@@ -1,3 +1,11 @@
import argparse
import cgi
import datetime
import json
import pydot
import urllib2
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,25 +20,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cgi
import datetime
import json
import urllib.request
from typing import List
import pydot
def make_name(pdu_id, origin):
return "%s@%s" % (pdu_id, origin)
def make_name(pdu_id: str, origin: str) -> str:
return f"{pdu_id}@{origin}"
def make_graph(pdus: List[dict], filename_prefix: str) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by querying a homeserver.
"""
def make_graph(pdus, room, filename_prefix):
pdu_map = {}
node_map = {}
@@ -116,10 +111,10 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
graph.write_svg("%s.svg" % filename_prefix, prog="dot")
def get_pdus(host: str, room: str) -> List[dict]:
def get_pdus(host, room):
transaction = json.loads(
urllib.request.urlopen(
f"http://{host}/_matrix/federation/v1/context/{room}/"
urllib2.urlopen(
"http://%s/_matrix/federation/v1/context/%s/" % (host, room)
).read()
)
@@ -146,4 +141,4 @@ if __name__ == "__main__":
pdus = get_pdus(host, room)
make_graph(pdus, prefix)
make_graph(pdus, room, prefix)

View File

@@ -14,31 +14,22 @@
import argparse
import cgi
import datetime
import html
import json
import sqlite3
import pydot
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by reading from a Synapse SQLite database.
"""
def make_graph(db_name, room_id, file_prefix, limit):
conn = sqlite3.connect(db_name)
sql = "SELECT room_version FROM rooms WHERE room_id = ?"
c = conn.execute(sql, (room_id,))
room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]]
sql = (
"SELECT json, internal_metadata FROM event_json as j "
"SELECT json FROM event_json as j "
"INNER JOIN events as e ON e.event_id = j.event_id "
"WHERE j.room_id = ?"
)
@@ -52,10 +43,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
c = conn.execute(sql, args)
events = [
make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1]))
for e in c.fetchall()
]
events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()]
events.sort(key=lambda e: e.depth)
@@ -96,7 +84,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
"name": event.event_id,
"type": event.type,
"state_key": event.get("state_key", None),
"content": html.escape(content, quote=True),
"content": cgi.escape(content, quote=True),
"time": t,
"depth": event.depth,
"state_group": state_group,
@@ -108,11 +96,11 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
graph.add_node(node)
for event in events:
for prev_id in event.prev_event_ids():
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
node_map[prev_id] = end_node
graph.add_node(end_node)
@@ -124,7 +112,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
if len(event_ids) <= 1:
continue
cluster = pydot.Cluster(str(group), label=f"<State Group: {str(group)}>")
cluster = pydot.Cluster(str(group), label="<State Group: %s>" % (str(group),))
for event_id in event_ids:
cluster.add_node(node_map[event_id])
@@ -138,7 +126,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by talking "
"to the given Synapse SQLite file to get the list of PDUs. \n"
"to the given homeserver to get the list of PDUs. \n"
"Requires pydot."
)
parser.add_argument(

View File

@@ -1,3 +1,13 @@
import argparse
import cgi
import datetime
import pydot
import simplejson as json
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,35 +22,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import html
import json
import pydot
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.util.frozenutils import unfreeze
def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by reading line-delimited JSON from a file.
"""
def make_graph(file_name, room_id, file_prefix, limit):
print("Reading lines")
with open(file_name) as f:
lines = f.readlines()
print("Read lines")
# Figure out the room version, assume the first line is the create event.
room_version = KNOWN_ROOM_VERSIONS[
json.loads(lines[0]).get("content", {}).get("room_version")
]
events = [make_event_from_dict(json.loads(line), room_version) for line in lines]
events = [FrozenEvent(json.loads(line)) for line in lines]
print("Loaded events.")
@@ -76,8 +66,8 @@ def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
content.append(
"<b>%s</b>: %s,"
% (
html.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
html.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
cgi.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
cgi.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
)
)
@@ -111,11 +101,11 @@ def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
print("Created Nodes")
for event in events:
for prev_id in event.prev_event_ids():
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
node_map[prev_id] = end_node
graph.add_node(end_node)
@@ -149,7 +139,8 @@ if __name__ == "__main__":
)
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
parser.add_argument("event_file")
parser.add_argument("room")
args = parser.parse_args()
make_graph(args.event_file, args.prefix, args.limit)
make_graph(args.event_file, args.room, args.prefix, args.limit)

View File

@@ -0,0 +1,298 @@
#!/usr/bin/env python
"""
This is an attempt at bridging matrix clients into a Jitis meet room via Matrix
video call. It uses hard-coded xml strings overg XMPP BOSH. It can display one
of the streams from the Jitsi bridge until the second lot of SDP comes down and
we set the remote SDP at which point the stream ends. Our video never gets to
the bridge.
Requires:
npm install jquery jsdom
"""
import json
import subprocess
import time
import gevent
import grequests
from BeautifulSoup import BeautifulSoup
ACCESS_TOKEN = ""
MATRIXBASE = "https://matrix.org/_matrix/client/api/v1/"
MYUSERNAME = "@davetest:matrix.org"
HTTPBIND = "https://meet.jit.si/http-bind"
# HTTPBIND = 'https://jitsi.vuc.me/http-bind'
# ROOMNAME = "matrix"
ROOMNAME = "pibble"
HOST = "guest.jit.si"
# HOST="jitsi.vuc.me"
TURNSERVER = "turn.guest.jit.si"
# TURNSERVER="turn.jitsi.vuc.me"
ROOMDOMAIN = "meet.jit.si"
# ROOMDOMAIN="conference.jitsi.vuc.me"
class TrivialMatrixClient:
def __init__(self, access_token):
self.token = None
self.access_token = access_token
def getEvent(self):
while True:
url = (
MATRIXBASE
+ "events?access_token="
+ self.access_token
+ "&timeout=60000"
)
if self.token:
url += "&from=" + self.token
req = grequests.get(url)
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("incoming from matrix", obj)
if "end" not in obj:
continue
self.token = obj["end"]
if len(obj["chunk"]):
return obj["chunk"][0]
def joinRoom(self, roomId):
url = MATRIXBASE + "rooms/" + roomId + "/join?access_token=" + self.access_token
print(url)
headers = {"Content-Type": "application/json"}
req = grequests.post(url, headers=headers, data="{}")
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("response: ", obj)
def sendEvent(self, roomId, evType, event):
url = (
MATRIXBASE
+ "rooms/"
+ roomId
+ "/send/"
+ evType
+ "?access_token="
+ self.access_token
)
print(url)
print(json.dumps(event))
headers = {"Content-Type": "application/json"}
req = grequests.post(url, headers=headers, data=json.dumps(event))
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("response: ", obj)
xmppClients = {}
def matrixLoop():
while True:
ev = matrixCli.getEvent()
print(ev)
if ev["type"] == "m.room.member":
print("membership event")
if ev["membership"] == "invite" and ev["state_key"] == MYUSERNAME:
roomId = ev["room_id"]
print("joining room %s" % (roomId))
matrixCli.joinRoom(roomId)
elif ev["type"] == "m.room.message":
if ev["room_id"] in xmppClients:
print("already have a bridge for that user, ignoring")
continue
print("got message, connecting")
xmppClients[ev["room_id"]] = TrivialXmppClient(ev["room_id"], ev["user_id"])
gevent.spawn(xmppClients[ev["room_id"]].xmppLoop)
elif ev["type"] == "m.call.invite":
print("Incoming call")
# sdp = ev['content']['offer']['sdp']
# print "sdp: %s" % (sdp)
# xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
# gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
elif ev["type"] == "m.call.answer":
print("Call answered")
sdp = ev["content"]["answer"]["sdp"]
if ev["room_id"] not in xmppClients:
print("We didn't have a call for that room")
continue
# should probably check call ID too
xmppCli = xmppClients[ev["room_id"]]
xmppCli.sendAnswer(sdp)
elif ev["type"] == "m.call.hangup":
if ev["room_id"] in xmppClients:
xmppClients[ev["room_id"]].stop()
del xmppClients[ev["room_id"]]
class TrivialXmppClient:
def __init__(self, matrixRoom, userId):
self.rid = 0
self.matrixRoom = matrixRoom
self.userId = userId
self.running = True
def stop(self):
self.running = False
def nextRid(self):
self.rid += 1
return "%d" % (self.rid)
def sendIq(self, xml):
fullXml = (
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>"
% (self.nextRid(), self.sid, xml)
)
# print "\t>>>%s" % (fullXml)
return self.xmppPoke(fullXml)
def xmppPoke(self, xml):
headers = {"Content-Type": "application/xml"}
req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml)
resps = grequests.map([req])
obj = BeautifulSoup(resps[0].content)
return obj
def sendAnswer(self, answer):
print("sdp from matrix client", answer)
p = subprocess.Popen(
["node", "unjingle/unjingle.js", "--sdp"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
jingle, out_err = p.communicate(answer)
jingle = jingle % {
"tojid": self.callfrom,
"action": "session-accept",
"initiator": self.callfrom,
"responder": self.jid,
"sid": self.callsid,
}
print("answer jingle from sdp", jingle)
res = self.sendIq(jingle)
print("reply from answer: ", res)
self.ssrcs = {}
jingleSoup = BeautifulSoup(jingle)
for cont in jingleSoup.iq.jingle.findAll("content"):
if cont.description:
self.ssrcs[cont["name"]] = cont.description["ssrc"]
print("my ssrcs:", self.ssrcs)
gevent.joinall([gevent.spawn(self.advertiseSsrcs)])
def advertiseSsrcs(self):
time.sleep(7)
print("SSRC spammer started")
while self.running:
ssrcMsg = (
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
% {
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
"nick": self.userId,
"assrc": self.ssrcs["audio"],
"vssrc": self.ssrcs["video"],
}
)
res = self.sendIq(ssrcMsg)
print("reply from ssrc announce: ", res)
time.sleep(10)
def xmppLoop(self):
self.matrixCallId = time.time()
res = self.xmppPoke(
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>"
% (self.nextRid(), HOST)
)
print(res)
self.sid = res.body["sid"]
print("sid %s" % (self.sid))
res = self.sendIq(
"<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>"
)
res = self.xmppPoke(
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>"
% (self.nextRid(), self.sid, HOST)
)
res = self.sendIq(
"<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>"
)
print(res)
self.jid = res.body.iq.bind.jid.string
print("jid: %s" % (self.jid))
self.shortJid = self.jid.split("-")[0]
res = self.sendIq(
"<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>"
)
# randomthing = res.body.iq['to']
# whatsitpart = randomthing.split('-')[0]
# print "other random bind thing: %s" % (randomthing)
# advertise preence to the jitsi room, with our nick
res = self.sendIq(
"<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>"
% (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId)
)
self.muc = {"users": []}
for p in res.body.findAll("presence"):
u = {}
u["shortJid"] = p["from"].split("/")[1]
if p.c and p.c.nick:
u["nick"] = p.c.nick.string
self.muc["users"].append(u)
print("muc: ", self.muc)
# wait for stuff
while True:
print("waiting...")
res = self.sendIq("")
print("got from stream: ", res)
if res.body.iq:
jingles = res.body.iq.findAll("jingle")
if len(jingles):
self.callfrom = res.body.iq["from"]
self.handleInvite(jingles[0])
elif "type" in res.body and res.body["type"] == "terminate":
self.running = False
del xmppClients[self.matrixRoom]
return
def handleInvite(self, jingle):
self.initiator = jingle["initiator"]
self.callsid = jingle["sid"]
p = subprocess.Popen(
["node", "unjingle/unjingle.js", "--jingle"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
print("raw jingle invite", str(jingle))
sdp, out_err = p.communicate(str(jingle))
print("transformed remote offer sdp", sdp)
inviteEvent = {
"offer": {"type": "offer", "sdp": sdp},
"call_id": self.matrixCallId,
"version": 0,
"lifetime": 30000,
}
matrixCli.sendEvent(self.matrixRoom, "m.call.invite", inviteEvent)
matrixCli = TrivialMatrixClient(ACCESS_TOKEN) # Undefined name
gevent.joinall([gevent.spawn(matrixLoop)])

View File

@@ -0,0 +1,188 @@
diff --git a/syweb/webclient/app/components/matrix/matrix-call.js b/syweb/webclient/app/components/matrix/matrix-call.js
index 9fbfff0..dc68077 100644
--- a/syweb/webclient/app/components/matrix/matrix-call.js
+++ b/syweb/webclient/app/components/matrix/matrix-call.js
@@ -16,6 +16,45 @@ limitations under the License.
'use strict';
+
+function sendKeyframe(pc) {
+ console.log('sendkeyframe', pc.iceConnectionState);
+ if (pc.iceConnectionState !== 'connected') return; // safe...
+ pc.setRemoteDescription(
+ pc.remoteDescription,
+ function () {
+ pc.createAnswer(
+ function (modifiedAnswer) {
+ pc.setLocalDescription(
+ modifiedAnswer,
+ function () {
+ // noop
+ },
+ function (error) {
+ console.log('triggerKeyframe setLocalDescription failed', error);
+ messageHandler.showError();
+ }
+ );
+ },
+ function (error) {
+ console.log('triggerKeyframe createAnswer failed', error);
+ messageHandler.showError();
+ }
+ );
+ },
+ function (error) {
+ console.log('triggerKeyframe setRemoteDescription failed', error);
+ messageHandler.showError();
+ }
+ );
+}
+
+
+
+
+
+
+
var forAllVideoTracksOnStream = function(s, f) {
var tracks = s.getVideoTracks();
for (var i = 0; i < tracks.length; i++) {
@@ -83,7 +122,7 @@ angular.module('MatrixCall', [])
}
// FIXME: we should prevent any calls from being placed or accepted before this has finished
- MatrixCall.getTurnServer();
+ //MatrixCall.getTurnServer();
MatrixCall.CALL_TIMEOUT = 60000;
MatrixCall.FALLBACK_STUN_SERVER = 'stun:stun.l.google.com:19302';
@@ -132,6 +171,22 @@ angular.module('MatrixCall', [])
pc.onsignalingstatechange = function() { self.onSignallingStateChanged(); };
pc.onicecandidate = function(c) { self.gotLocalIceCandidate(c); };
pc.onaddstream = function(s) { self.onAddStream(s); };
+
+ var datachan = pc.createDataChannel('RTCDataChannel', {
+ reliable: false
+ });
+ console.log("data chan: "+datachan);
+ datachan.onopen = function() {
+ console.log("data channel open");
+ };
+ datachan.onmessage = function() {
+ console.log("data channel message");
+ };
+ pc.ondatachannel = function(event) {
+ console.log("have data channel");
+ event.channel.binaryType = 'blob';
+ };
+
return pc;
}
@@ -200,6 +255,12 @@ angular.module('MatrixCall', [])
}, this.msg.lifetime - event.age);
};
+ MatrixCall.prototype.receivedInvite = function(event) {
+ console.log("Got second invite for call "+this.call_id);
+ this.peerConn.setRemoteDescription(new RTCSessionDescription(this.msg.offer), this.onSetRemoteDescriptionSuccess, this.onSetRemoteDescriptionError);
+ };
+
+
// perverse as it may seem, sometimes we want to instantiate a call with a hangup message
// (because when getting the state of the room on load, events come in reverse order and
// we want to remember that a call has been hung up)
@@ -349,7 +410,7 @@ angular.module('MatrixCall', [])
'mandatory': {
'OfferToReceiveAudio': true,
'OfferToReceiveVideo': this.type == 'video'
- },
+ }
};
this.peerConn.createAnswer(function(d) { self.createdAnswer(d); }, function(e) {}, constraints);
// This can't be in an apply() because it's called by a predecessor call under glare conditions :(
@@ -359,8 +420,20 @@ angular.module('MatrixCall', [])
MatrixCall.prototype.gotLocalIceCandidate = function(event) {
if (event.candidate) {
console.log("Got local ICE "+event.candidate.sdpMid+" candidate: "+event.candidate.candidate);
- this.sendCandidate(event.candidate);
- }
+ //this.sendCandidate(event.candidate);
+ } else {
+ console.log("have all candidates, sending answer");
+ var content = {
+ version: 0,
+ call_id: this.call_id,
+ answer: this.peerConn.localDescription
+ };
+ this.sendEventWithRetry('m.call.answer', content);
+ var self = this;
+ $rootScope.$apply(function() {
+ self.state = 'connecting';
+ });
+ }
}
MatrixCall.prototype.gotRemoteIceCandidate = function(cand) {
@@ -418,15 +491,6 @@ angular.module('MatrixCall', [])
console.log("Created answer: "+description);
var self = this;
this.peerConn.setLocalDescription(description, function() {
- var content = {
- version: 0,
- call_id: self.call_id,
- answer: self.peerConn.localDescription
- };
- self.sendEventWithRetry('m.call.answer', content);
- $rootScope.$apply(function() {
- self.state = 'connecting';
- });
}, function() { console.log("Error setting local description!"); } );
};
@@ -448,6 +512,9 @@ angular.module('MatrixCall', [])
$rootScope.$apply(function() {
self.state = 'connected';
self.didConnect = true;
+ /*$timeout(function() {
+ sendKeyframe(self.peerConn);
+ }, 1000);*/
});
} else if (this.peerConn.iceConnectionState == 'failed') {
this.hangup('ice_failed');
@@ -518,6 +585,7 @@ angular.module('MatrixCall', [])
MatrixCall.prototype.onRemoteStreamEnded = function(event) {
console.log("Remote stream ended");
+ return;
var self = this;
$rootScope.$apply(function() {
self.state = 'ended';
diff --git a/syweb/webclient/app/components/matrix/matrix-phone-service.js b/syweb/webclient/app/components/matrix/matrix-phone-service.js
index 55dbbf5..272fa27 100644
--- a/syweb/webclient/app/components/matrix/matrix-phone-service.js
+++ b/syweb/webclient/app/components/matrix/matrix-phone-service.js
@@ -48,6 +48,13 @@ angular.module('matrixPhoneService', [])
return;
}
+ // do we already have an entry for this call ID?
+ var existingEntry = matrixPhoneService.allCalls[msg.call_id];
+ if (existingEntry) {
+ existingEntry.receivedInvite(msg);
+ return;
+ }
+
var call = undefined;
if (!isLive) {
// if this event wasn't live then this call may already be over
@@ -108,7 +115,7 @@ angular.module('matrixPhoneService', [])
call.hangup();
}
} else {
- $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
+ $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
}
} else if (event.type == 'm.call.answer') {
var call = matrixPhoneService.allCalls[msg.call_id];

View File

@@ -0,0 +1,712 @@
/* jshint -W117 */
// SDP STUFF
function SDP(sdp) {
this.media = sdp.split('\r\nm=');
for (var i = 1; i < this.media.length; i++) {
this.media[i] = 'm=' + this.media[i];
if (i != this.media.length - 1) {
this.media[i] += '\r\n';
}
}
this.session = this.media.shift() + '\r\n';
this.raw = this.session + this.media.join('');
}
exports.SDP = SDP;
var jsdom = require("jsdom");
var window = jsdom.jsdom().parentWindow;
var $ = require('jquery')(window);
var SDPUtil = require('./strophe.jingle.sdp.util.js').SDPUtil;
/**
* Returns map of MediaChannel mapped per channel idx.
*/
SDP.prototype.getMediaSsrcMap = function() {
var self = this;
var media_ssrcs = {};
for (channelNum = 0; channelNum < self.media.length; channelNum++) {
modified = true;
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc:');
var type = SDPUtil.parse_mid(SDPUtil.find_line(self.media[channelNum], 'a=mid:'));
var channel = new MediaChannel(channelNum, type);
media_ssrcs[channelNum] = channel;
tmp.forEach(function (line) {
var linessrc = line.substring(7).split(' ')[0];
// allocate new ChannelSsrc
if(!channel.ssrcs[linessrc]) {
channel.ssrcs[linessrc] = new ChannelSsrc(linessrc, type);
}
channel.ssrcs[linessrc].lines.push(line);
});
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc-group:');
tmp.forEach(function(line){
var semantics = line.substr(0, idx).substr(13);
var ssrcs = line.substr(14 + semantics.length).split(' ');
if (ssrcs.length != 0) {
var ssrcGroup = new ChannelSsrcGroup(semantics, ssrcs);
channel.ssrcGroups.push(ssrcGroup);
}
});
}
return media_ssrcs;
};
/**
* Returns <tt>true</tt> if this SDP contains given SSRC.
* @param ssrc the ssrc to check.
* @returns {boolean} <tt>true</tt> if this SDP contains given SSRC.
*/
SDP.prototype.containsSSRC = function(ssrc) {
var channels = this.getMediaSsrcMap();
var contains = false;
Object.keys(channels).forEach(function(chNumber){
var channel = channels[chNumber];
//console.log("Check", channel, ssrc);
if(Object.keys(channel.ssrcs).indexOf(ssrc) != -1){
contains = true;
}
});
return contains;
};
/**
* Returns map of MediaChannel that contains only media not contained in <tt>otherSdp</tt>. Mapped by channel idx.
* @param otherSdp the other SDP to check ssrc with.
*/
SDP.prototype.getNewMedia = function(otherSdp) {
// this could be useful in Array.prototype.
function arrayEquals(array) {
// if the other array is a falsy value, return
if (!array)
return false;
// compare lengths - can save a lot of time
if (this.length != array.length)
return false;
for (var i = 0, l=this.length; i < l; i++) {
// Check if we have nested arrays
if (this[i] instanceof Array && array[i] instanceof Array) {
// recurse into the nested arrays
if (!this[i].equals(array[i]))
return false;
}
else if (this[i] != array[i]) {
// Warning - two different object instances will never be equal: {x:20} != {x:20}
return false;
}
}
return true;
}
var myMedia = this.getMediaSsrcMap();
var othersMedia = otherSdp.getMediaSsrcMap();
var newMedia = {};
Object.keys(othersMedia).forEach(function(channelNum) {
var myChannel = myMedia[channelNum];
var othersChannel = othersMedia[channelNum];
if(!myChannel && othersChannel) {
// Add whole channel
newMedia[channelNum] = othersChannel;
return;
}
// Look for new ssrcs accross the channel
Object.keys(othersChannel.ssrcs).forEach(function(ssrc) {
if(Object.keys(myChannel.ssrcs).indexOf(ssrc) === -1) {
// Allocate channel if we've found ssrc that doesn't exist in our channel
if(!newMedia[channelNum]){
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
}
newMedia[channelNum].ssrcs[ssrc] = othersChannel.ssrcs[ssrc];
}
});
// Look for new ssrc groups across the channels
othersChannel.ssrcGroups.forEach(function(otherSsrcGroup){
// try to match the other ssrc-group with an ssrc-group of ours
var matched = false;
for (var i = 0; i < myChannel.ssrcGroups.length; i++) {
var mySsrcGroup = myChannel.ssrcGroups[i];
if (otherSsrcGroup.semantics == mySsrcGroup.semantics
&& arrayEquals.apply(otherSsrcGroup.ssrcs, [mySsrcGroup.ssrcs])) {
matched = true;
break;
}
}
if (!matched) {
// Allocate channel if we've found an ssrc-group that doesn't
// exist in our channel
if(!newMedia[channelNum]){
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
}
newMedia[channelNum].ssrcGroups.push(otherSsrcGroup);
}
});
});
return newMedia;
};
// remove iSAC and CN from SDP
SDP.prototype.mangle = function () {
var i, j, mline, lines, rtpmap, newdesc;
for (i = 0; i < this.media.length; i++) {
lines = this.media[i].split('\r\n');
lines.pop(); // remove empty last element
mline = SDPUtil.parse_mline(lines.shift());
if (mline.media != 'audio')
continue;
newdesc = '';
mline.fmt.length = 0;
for (j = 0; j < lines.length; j++) {
if (lines[j].substr(0, 9) == 'a=rtpmap:') {
rtpmap = SDPUtil.parse_rtpmap(lines[j]);
if (rtpmap.name == 'CN' || rtpmap.name == 'ISAC')
continue;
mline.fmt.push(rtpmap.id);
newdesc += lines[j] + '\r\n';
} else {
newdesc += lines[j] + '\r\n';
}
}
this.media[i] = SDPUtil.build_mline(mline) + '\r\n';
this.media[i] += newdesc;
}
this.raw = this.session + this.media.join('');
};
// remove lines matching prefix from session section
SDP.prototype.removeSessionLines = function(prefix) {
var self = this;
var lines = SDPUtil.find_lines(this.session, prefix);
lines.forEach(function(line) {
self.session = self.session.replace(line + '\r\n', '');
});
this.raw = this.session + this.media.join('');
return lines;
}
// remove lines matching prefix from a media section specified by mediaindex
// TODO: non-numeric mediaindex could match mid
SDP.prototype.removeMediaLines = function(mediaindex, prefix) {
var self = this;
var lines = SDPUtil.find_lines(this.media[mediaindex], prefix);
lines.forEach(function(line) {
self.media[mediaindex] = self.media[mediaindex].replace(line + '\r\n', '');
});
this.raw = this.session + this.media.join('');
return lines;
}
// add content's to a jingle element
SDP.prototype.toJingle = function (elem, thecreator) {
var i, j, k, mline, ssrc, rtpmap, tmp, line, lines;
var self = this;
// new bundle plan
if (SDPUtil.find_line(this.session, 'a=group:')) {
lines = SDPUtil.find_lines(this.session, 'a=group:');
for (i = 0; i < lines.length; i++) {
tmp = lines[i].split(' ');
var semantics = tmp.shift().substr(8);
elem.c('group', {xmlns: 'urn:xmpp:jingle:apps:grouping:0', semantics:semantics});
for (j = 0; j < tmp.length; j++) {
elem.c('content', {name: tmp[j]}).up();
}
elem.up();
}
}
// old bundle plan, to be removed
var bundle = [];
if (SDPUtil.find_line(this.session, 'a=group:BUNDLE')) {
bundle = SDPUtil.find_line(this.session, 'a=group:BUNDLE ').split(' ');
bundle.shift();
}
for (i = 0; i < this.media.length; i++) {
mline = SDPUtil.parse_mline(this.media[i].split('\r\n')[0]);
if (!(mline.media === 'audio' ||
mline.media === 'video' ||
mline.media === 'application'))
{
continue;
}
if (SDPUtil.find_line(this.media[i], 'a=ssrc:')) {
ssrc = SDPUtil.find_line(this.media[i], 'a=ssrc:').substring(7).split(' ')[0]; // take the first
} else {
ssrc = false;
}
elem.c('content', {creator: thecreator, name: mline.media});
if (SDPUtil.find_line(this.media[i], 'a=mid:')) {
// prefer identifier from a=mid if present
var mid = SDPUtil.parse_mid(SDPUtil.find_line(this.media[i], 'a=mid:'));
elem.attrs({ name: mid });
// old BUNDLE plan, to be removed
if (bundle.indexOf(mid) !== -1) {
elem.c('bundle', {xmlns: 'http://estos.de/ns/bundle'}).up();
bundle.splice(bundle.indexOf(mid), 1);
}
}
if (SDPUtil.find_line(this.media[i], 'a=rtpmap:').length)
{
elem.c('description',
{xmlns: 'urn:xmpp:jingle:apps:rtp:1',
media: mline.media });
if (ssrc) {
elem.attrs({ssrc: ssrc});
}
for (j = 0; j < mline.fmt.length; j++) {
rtpmap = SDPUtil.find_line(this.media[i], 'a=rtpmap:' + mline.fmt[j]);
elem.c('payload-type', SDPUtil.parse_rtpmap(rtpmap));
// put any 'a=fmtp:' + mline.fmt[j] lines into <param name=foo value=bar/>
if (SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j])) {
tmp = SDPUtil.parse_fmtp(SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j]));
for (k = 0; k < tmp.length; k++) {
elem.c('parameter', tmp[k]).up();
}
}
this.RtcpFbToJingle(i, elem, mline.fmt[j]); // XEP-0293 -- map a=rtcp-fb
elem.up();
}
if (SDPUtil.find_line(this.media[i], 'a=crypto:', this.session)) {
elem.c('encryption', {required: 1});
var crypto = SDPUtil.find_lines(this.media[i], 'a=crypto:', this.session);
crypto.forEach(function(line) {
elem.c('crypto', SDPUtil.parse_crypto(line)).up();
});
elem.up(); // end of encryption
}
if (ssrc) {
// new style mapping
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
// FIXME: group by ssrc and support multiple different ssrcs
var ssrclines = SDPUtil.find_lines(this.media[i], 'a=ssrc:');
ssrclines.forEach(function(line) {
idx = line.indexOf(' ');
var linessrc = line.substr(0, idx).substr(7);
if (linessrc != ssrc) {
elem.up();
ssrc = linessrc;
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
}
var kv = line.substr(idx + 1);
elem.c('parameter');
if (kv.indexOf(':') == -1) {
elem.attrs({ name: kv });
} else {
elem.attrs({ name: kv.split(':', 2)[0] });
elem.attrs({ value: kv.split(':', 2)[1] });
}
elem.up();
});
elem.up();
// old proprietary mapping, to be removed at some point
tmp = SDPUtil.parse_ssrc(this.media[i]);
tmp.xmlns = 'http://estos.de/ns/ssrc';
tmp.ssrc = ssrc;
elem.c('ssrc', tmp).up(); // ssrc is part of description
// XEP-0339 handle ssrc-group attributes
var ssrc_group_lines = SDPUtil.find_lines(this.media[i], 'a=ssrc-group:');
ssrc_group_lines.forEach(function(line) {
idx = line.indexOf(' ');
var semantics = line.substr(0, idx).substr(13);
var ssrcs = line.substr(14 + semantics.length).split(' ');
if (ssrcs.length != 0) {
elem.c('ssrc-group', { semantics: semantics, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
ssrcs.forEach(function(ssrc) {
elem.c('source', { ssrc: ssrc })
.up();
});
elem.up();
}
});
}
if (SDPUtil.find_line(this.media[i], 'a=rtcp-mux')) {
elem.c('rtcp-mux').up();
}
// XEP-0293 -- map a=rtcp-fb:*
this.RtcpFbToJingle(i, elem, '*');
// XEP-0294
if (SDPUtil.find_line(this.media[i], 'a=extmap:')) {
lines = SDPUtil.find_lines(this.media[i], 'a=extmap:');
for (j = 0; j < lines.length; j++) {
tmp = SDPUtil.parse_extmap(lines[j]);
elem.c('rtp-hdrext', { xmlns: 'urn:xmpp:jingle:apps:rtp:rtp-hdrext:0',
uri: tmp.uri,
id: tmp.value });
if (tmp.hasOwnProperty('direction')) {
switch (tmp.direction) {
case 'sendonly':
elem.attrs({senders: 'responder'});
break;
case 'recvonly':
elem.attrs({senders: 'initiator'});
break;
case 'sendrecv':
elem.attrs({senders: 'both'});
break;
case 'inactive':
elem.attrs({senders: 'none'});
break;
}
}
// TODO: handle params
elem.up();
}
}
elem.up(); // end of description
}
// map ice-ufrag/pwd, dtls fingerprint, candidates
this.TransportToJingle(i, elem);
if (SDPUtil.find_line(this.media[i], 'a=sendrecv', this.session)) {
elem.attrs({senders: 'both'});
} else if (SDPUtil.find_line(this.media[i], 'a=sendonly', this.session)) {
elem.attrs({senders: 'initiator'});
} else if (SDPUtil.find_line(this.media[i], 'a=recvonly', this.session)) {
elem.attrs({senders: 'responder'});
} else if (SDPUtil.find_line(this.media[i], 'a=inactive', this.session)) {
elem.attrs({senders: 'none'});
}
if (mline.port == '0') {
// estos hack to reject an m-line
elem.attrs({senders: 'rejected'});
}
elem.up(); // end of content
}
elem.up();
return elem;
};
SDP.prototype.TransportToJingle = function (mediaindex, elem) {
var i = mediaindex;
var tmp;
var self = this;
elem.c('transport');
// XEP-0343 DTLS/SCTP
if (SDPUtil.find_line(this.media[mediaindex], 'a=sctpmap:').length)
{
var sctpmap = SDPUtil.find_line(
this.media[i], 'a=sctpmap:', self.session);
if (sctpmap)
{
var sctpAttrs = SDPUtil.parse_sctpmap(sctpmap);
elem.c('sctpmap',
{
xmlns: 'urn:xmpp:jingle:transports:dtls-sctp:1',
number: sctpAttrs[0], /* SCTP port */
protocol: sctpAttrs[1], /* protocol */
});
// Optional stream count attribute
if (sctpAttrs.length > 2)
elem.attrs({ streams: sctpAttrs[2]});
elem.up();
}
}
// XEP-0320
var fingerprints = SDPUtil.find_lines(this.media[mediaindex], 'a=fingerprint:', this.session);
fingerprints.forEach(function(line) {
tmp = SDPUtil.parse_fingerprint(line);
tmp.xmlns = 'urn:xmpp:jingle:apps:dtls:0';
elem.c('fingerprint').t(tmp.fingerprint);
delete tmp.fingerprint;
line = SDPUtil.find_line(self.media[mediaindex], 'a=setup:', self.session);
if (line) {
tmp.setup = line.substr(8);
}
elem.attrs(tmp);
elem.up(); // end of fingerprint
});
tmp = SDPUtil.iceparams(this.media[mediaindex], this.session);
if (tmp) {
tmp.xmlns = 'urn:xmpp:jingle:transports:ice-udp:1';
elem.attrs(tmp);
// XEP-0176
if (SDPUtil.find_line(this.media[mediaindex], 'a=candidate:', this.session)) { // add any a=candidate lines
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=candidate:', this.session);
lines.forEach(function (line) {
elem.c('candidate', SDPUtil.candidateToJingle(line)).up();
});
}
}
elem.up(); // end of transport
}
SDP.prototype.RtcpFbToJingle = function (mediaindex, elem, payloadtype) { // XEP-0293
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=rtcp-fb:' + payloadtype);
lines.forEach(function (line) {
var tmp = SDPUtil.parse_rtcpfb(line);
if (tmp.type == 'trr-int') {
elem.c('rtcp-fb-trr-int', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', value: tmp.params[0]});
elem.up();
} else {
elem.c('rtcp-fb', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', type: tmp.type});
if (tmp.params.length > 0) {
elem.attrs({'subtype': tmp.params[0]});
}
elem.up();
}
});
};
SDP.prototype.RtcpFbFromJingle = function (elem, payloadtype) { // XEP-0293
var media = '';
var tmp = elem.find('>rtcp-fb-trr-int[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
if (tmp.length) {
media += 'a=rtcp-fb:' + '*' + ' ' + 'trr-int' + ' ';
if (tmp.attr('value')) {
media += tmp.attr('value');
} else {
media += '0';
}
media += '\r\n';
}
tmp = elem.find('>rtcp-fb[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
tmp.each(function () {
media += 'a=rtcp-fb:' + payloadtype + ' ' + $(this).attr('type');
if ($(this).attr('subtype')) {
media += ' ' + $(this).attr('subtype');
}
media += '\r\n';
});
return media;
};
// construct an SDP from a jingle stanza
SDP.prototype.fromJingle = function (jingle) {
var self = this;
this.raw = 'v=0\r\n' +
'o=- ' + '1923518516' + ' 2 IN IP4 0.0.0.0\r\n' +// FIXME
's=-\r\n' +
't=0 0\r\n';
// http://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-04#section-8
if ($(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').length) {
$(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').each(function (idx, group) {
var contents = $(group).find('>content').map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (contents.length > 0) {
self.raw += 'a=group:' + (group.getAttribute('semantics') || group.getAttribute('type')) + ' ' + contents.join(' ') + '\r\n';
}
});
} else if ($(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').length) {
// temporary namespace, not to be used. to be removed soon.
$(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').each(function (idx, group) {
var contents = $(group).find('>content').map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (group.getAttribute('type') !== null && contents.length > 0) {
self.raw += 'a=group:' + group.getAttribute('type') + ' ' + contents.join(' ') + '\r\n';
}
});
} else {
// for backward compability, to be removed soon
// assume all contents are in the same bundle group, can be improved upon later
var bundle = $(jingle).find('>content').filter(function (idx, content) {
//elem.c('bundle', {xmlns:'http://estos.de/ns/bundle'});
return $(content).find('>bundle').length > 0;
}).map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (bundle.length) {
this.raw += 'a=group:BUNDLE ' + bundle.join(' ') + '\r\n';
}
}
this.session = this.raw;
jingle.find('>content').each(function () {
var m = self.jingle2media($(this));
self.media.push(m);
});
// reconstruct msid-semantic -- apparently not necessary
/*
var msid = SDPUtil.parse_ssrc(this.raw);
if (msid.hasOwnProperty('mslabel')) {
this.session += "a=msid-semantic: WMS " + msid.mslabel + "\r\n";
}
*/
this.raw = this.session + this.media.join('');
};
// translate a jingle content element into an an SDP media part
SDP.prototype.jingle2media = function (content) {
var media = '',
desc = content.find('description'),
ssrc = desc.attr('ssrc'),
self = this,
tmp;
var sctp = content.find(
'>transport>sctpmap[xmlns="urn:xmpp:jingle:transports:dtls-sctp:1"]');
tmp = { media: desc.attr('media') };
tmp.port = '1';
if (content.attr('senders') == 'rejected') {
// estos hack to reject an m-line.
tmp.port = '0';
}
if (content.find('>transport>fingerprint').length || desc.find('encryption').length) {
if (sctp.length)
tmp.proto = 'DTLS/SCTP';
else
tmp.proto = 'RTP/SAVPF';
} else {
tmp.proto = 'RTP/AVPF';
}
if (!sctp.length)
{
tmp.fmt = desc.find('payload-type').map(
function () { return this.getAttribute('id'); }).get();
media += SDPUtil.build_mline(tmp) + '\r\n';
}
else
{
media += 'm=application 1 DTLS/SCTP ' + sctp.attr('number') + '\r\n';
media += 'a=sctpmap:' + sctp.attr('number') +
' ' + sctp.attr('protocol');
var streamCount = sctp.attr('streams');
if (streamCount)
media += ' ' + streamCount + '\r\n';
else
media += '\r\n';
}
media += 'c=IN IP4 0.0.0.0\r\n';
if (!sctp.length)
media += 'a=rtcp:1 IN IP4 0.0.0.0\r\n';
//tmp = content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
tmp = content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
//console.log('transports: '+content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
//console.log('bundle.transports: '+content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
//console.log("tmp fingerprint: "+tmp.find('>fingerprint').innerHTML);
if (tmp.length) {
if (tmp.attr('ufrag')) {
media += SDPUtil.build_iceufrag(tmp.attr('ufrag')) + '\r\n';
}
if (tmp.attr('pwd')) {
media += SDPUtil.build_icepwd(tmp.attr('pwd')) + '\r\n';
}
tmp.find('>fingerprint').each(function () {
// FIXME: check namespace at some point
media += 'a=fingerprint:' + this.getAttribute('hash');
media += ' ' + $(this).text();
media += '\r\n';
//console.log("mline "+media);
if (this.getAttribute('setup')) {
media += 'a=setup:' + this.getAttribute('setup') + '\r\n';
}
});
}
switch (content.attr('senders')) {
case 'initiator':
media += 'a=sendonly\r\n';
break;
case 'responder':
media += 'a=recvonly\r\n';
break;
case 'none':
media += 'a=inactive\r\n';
break;
case 'both':
media += 'a=sendrecv\r\n';
break;
}
media += 'a=mid:' + content.attr('name') + '\r\n';
/*if (content.attr('name') == 'video') {
media += 'a=x-google-flag:conference' + '\r\n';
}*/
// <description><rtcp-mux/></description>
// see http://code.google.com/p/libjingle/issues/detail?id=309 -- no spec though
// and http://mail.jabber.org/pipermail/jingle/2011-December/001761.html
if (desc.find('rtcp-mux').length) {
media += 'a=rtcp-mux\r\n';
}
if (desc.find('encryption').length) {
desc.find('encryption>crypto').each(function () {
media += 'a=crypto:' + this.getAttribute('tag');
media += ' ' + this.getAttribute('crypto-suite');
media += ' ' + this.getAttribute('key-params');
if (this.getAttribute('session-params')) {
media += ' ' + this.getAttribute('session-params');
}
media += '\r\n';
});
}
desc.find('payload-type').each(function () {
media += SDPUtil.build_rtpmap(this) + '\r\n';
if ($(this).find('>parameter').length) {
media += 'a=fmtp:' + this.getAttribute('id') + ' ';
media += $(this).find('parameter').map(function () { return (this.getAttribute('name') ? (this.getAttribute('name') + '=') : '') + this.getAttribute('value'); }).get().join('; ');
media += '\r\n';
}
// xep-0293
media += self.RtcpFbFromJingle($(this), this.getAttribute('id'));
});
// xep-0293
media += self.RtcpFbFromJingle(desc, '*');
// xep-0294
tmp = desc.find('>rtp-hdrext[xmlns="urn:xmpp:jingle:apps:rtp:rtp-hdrext:0"]');
tmp.each(function () {
media += 'a=extmap:' + this.getAttribute('id') + ' ' + this.getAttribute('uri') + '\r\n';
});
content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]>candidate').each(function () {
media += SDPUtil.candidateFromJingle(this);
});
// XEP-0339 handle ssrc-group attributes
tmp = content.find('description>ssrc-group[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]').each(function() {
var semantics = this.getAttribute('semantics');
var ssrcs = $(this).find('>source').map(function() {
return this.getAttribute('ssrc');
}).get();
if (ssrcs.length != 0) {
media += 'a=ssrc-group:' + semantics + ' ' + ssrcs.join(' ') + '\r\n';
}
});
tmp = content.find('description>source[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]');
tmp.each(function () {
var ssrc = this.getAttribute('ssrc');
$(this).find('>parameter').each(function () {
media += 'a=ssrc:' + ssrc + ' ' + this.getAttribute('name');
if (this.getAttribute('value') && this.getAttribute('value').length)
media += ':' + this.getAttribute('value');
media += '\r\n';
});
});
if (tmp.length === 0) {
// fallback to proprietary mapping of a=ssrc lines
tmp = content.find('description>ssrc[xmlns="http://estos.de/ns/ssrc"]');
if (tmp.length) {
media += 'a=ssrc:' + ssrc + ' cname:' + tmp.attr('cname') + '\r\n';
media += 'a=ssrc:' + ssrc + ' msid:' + tmp.attr('msid') + '\r\n';
media += 'a=ssrc:' + ssrc + ' mslabel:' + tmp.attr('mslabel') + '\r\n';
media += 'a=ssrc:' + ssrc + ' label:' + tmp.attr('label') + '\r\n';
}
}
return media;
};

View File

@@ -0,0 +1,408 @@
/**
* Contains utility classes used in SDP class.
*
*/
/**
* Class holds a=ssrc lines and media type a=mid
* @param ssrc synchronization source identifier number(a=ssrc lines from SDP)
* @param type media type eg. "audio" or "video"(a=mid frm SDP)
* @constructor
*/
function ChannelSsrc(ssrc, type) {
this.ssrc = ssrc;
this.type = type;
this.lines = [];
}
/**
* Class holds a=ssrc-group: lines
* @param semantics
* @param ssrcs
* @constructor
*/
function ChannelSsrcGroup(semantics, ssrcs, line) {
this.semantics = semantics;
this.ssrcs = ssrcs;
}
/**
* Helper class represents media channel. Is a container for ChannelSsrc, holds channel idx and media type.
* @param channelNumber channel idx in SDP media array.
* @param mediaType media type(a=mid)
* @constructor
*/
function MediaChannel(channelNumber, mediaType) {
/**
* SDP channel number
* @type {*}
*/
this.chNumber = channelNumber;
/**
* Channel media type(a=mid)
* @type {*}
*/
this.mediaType = mediaType;
/**
* The maps of ssrc numbers to ChannelSsrc objects.
*/
this.ssrcs = {};
/**
* The array of ChannelSsrcGroup objects.
* @type {Array}
*/
this.ssrcGroups = [];
}
SDPUtil = {
iceparams: function (mediadesc, sessiondesc) {
var data = null;
if (SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc) &&
SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc)) {
data = {
ufrag: SDPUtil.parse_iceufrag(SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc)),
pwd: SDPUtil.parse_icepwd(SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc))
};
}
return data;
},
parse_iceufrag: function (line) {
return line.substring(12);
},
build_iceufrag: function (frag) {
return 'a=ice-ufrag:' + frag;
},
parse_icepwd: function (line) {
return line.substring(10);
},
build_icepwd: function (pwd) {
return 'a=ice-pwd:' + pwd;
},
parse_mid: function (line) {
return line.substring(6);
},
parse_mline: function (line) {
var parts = line.substring(2).split(' '),
data = {};
data.media = parts.shift();
data.port = parts.shift();
data.proto = parts.shift();
if (parts[parts.length - 1] === '') { // trailing whitespace
parts.pop();
}
data.fmt = parts;
return data;
},
build_mline: function (mline) {
return 'm=' + mline.media + ' ' + mline.port + ' ' + mline.proto + ' ' + mline.fmt.join(' ');
},
parse_rtpmap: function (line) {
var parts = line.substring(9).split(' '),
data = {};
data.id = parts.shift();
parts = parts[0].split('/');
data.name = parts.shift();
data.clockrate = parts.shift();
data.channels = parts.length ? parts.shift() : '1';
return data;
},
/**
* Parses SDP line "a=sctpmap:..." and extracts SCTP port from it.
* @param line eg. "a=sctpmap:5000 webrtc-datachannel"
* @returns [SCTP port number, protocol, streams]
*/
parse_sctpmap: function (line)
{
var parts = line.substring(10).split(' ');
var sctpPort = parts[0];
var protocol = parts[1];
// Stream count is optional
var streamCount = parts.length > 2 ? parts[2] : null;
return [sctpPort, protocol, streamCount];// SCTP port
},
build_rtpmap: function (el) {
var line = 'a=rtpmap:' + el.getAttribute('id') + ' ' + el.getAttribute('name') + '/' + el.getAttribute('clockrate');
if (el.getAttribute('channels') && el.getAttribute('channels') != '1') {
line += '/' + el.getAttribute('channels');
}
return line;
},
parse_crypto: function (line) {
var parts = line.substring(9).split(' '),
data = {};
data.tag = parts.shift();
data['crypto-suite'] = parts.shift();
data['key-params'] = parts.shift();
if (parts.length) {
data['session-params'] = parts.join(' ');
}
return data;
},
parse_fingerprint: function (line) { // RFC 4572
var parts = line.substring(14).split(' '),
data = {};
data.hash = parts.shift();
data.fingerprint = parts.shift();
// TODO assert that fingerprint satisfies 2UHEX *(":" 2UHEX) ?
return data;
},
parse_fmtp: function (line) {
var parts = line.split(' '),
i, key, value,
data = [];
parts.shift();
parts = parts.join(' ').split(';');
for (i = 0; i < parts.length; i++) {
key = parts[i].split('=')[0];
while (key.length && key[0] == ' ') {
key = key.substring(1);
}
value = parts[i].split('=')[1];
if (key && value) {
data.push({name: key, value: value});
} else if (key) {
// rfc 4733 (DTMF) style stuff
data.push({name: '', value: key});
}
}
return data;
},
parse_icecandidate: function (line) {
var candidate = {},
elems = line.split(' ');
candidate.foundation = elems[0].substring(12);
candidate.component = elems[1];
candidate.protocol = elems[2].toLowerCase();
candidate.priority = elems[3];
candidate.ip = elems[4];
candidate.port = elems[5];
// elems[6] => "typ"
candidate.type = elems[7];
candidate.generation = 0; // default value, may be overwritten below
for (var i = 8; i < elems.length; i += 2) {
switch (elems[i]) {
case 'raddr':
candidate['rel-addr'] = elems[i + 1];
break;
case 'rport':
candidate['rel-port'] = elems[i + 1];
break;
case 'generation':
candidate.generation = elems[i + 1];
break;
case 'tcptype':
candidate.tcptype = elems[i + 1];
break;
default: // TODO
console.log('parse_icecandidate not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
}
}
candidate.network = '1';
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
return candidate;
},
build_icecandidate: function (cand) {
var line = ['a=candidate:' + cand.foundation, cand.component, cand.protocol, cand.priority, cand.ip, cand.port, 'typ', cand.type].join(' ');
line += ' ';
switch (cand.type) {
case 'srflx':
case 'prflx':
case 'relay':
if (cand.hasOwnAttribute('rel-addr') && cand.hasOwnAttribute('rel-port')) {
line += 'raddr';
line += ' ';
line += cand['rel-addr'];
line += ' ';
line += 'rport';
line += ' ';
line += cand['rel-port'];
line += ' ';
}
break;
}
if (cand.hasOwnAttribute('tcptype')) {
line += 'tcptype';
line += ' ';
line += cand.tcptype;
line += ' ';
}
line += 'generation';
line += ' ';
line += cand.hasOwnAttribute('generation') ? cand.generation : '0';
return line;
},
parse_ssrc: function (desc) {
// proprietary mapping of a=ssrc lines
// TODO: see "Jingle RTP Source Description" by Juberti and P. Thatcher on google docs
// and parse according to that
var lines = desc.split('\r\n'),
data = {};
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, 7) == 'a=ssrc:') {
var idx = lines[i].indexOf(' ');
data[lines[i].substr(idx + 1).split(':', 2)[0]] = lines[i].substr(idx + 1).split(':', 2)[1];
}
}
return data;
},
parse_rtcpfb: function (line) {
var parts = line.substr(10).split(' ');
var data = {};
data.pt = parts.shift();
data.type = parts.shift();
data.params = parts;
return data;
},
parse_extmap: function (line) {
var parts = line.substr(9).split(' ');
var data = {};
data.value = parts.shift();
if (data.value.indexOf('/') != -1) {
data.direction = data.value.substr(data.value.indexOf('/') + 1);
data.value = data.value.substr(0, data.value.indexOf('/'));
} else {
data.direction = 'both';
}
data.uri = parts.shift();
data.params = parts;
return data;
},
find_line: function (haystack, needle, sessionpart) {
var lines = haystack.split('\r\n');
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, needle.length) == needle) {
return lines[i];
}
}
if (!sessionpart) {
return false;
}
// search session part
lines = sessionpart.split('\r\n');
for (var j = 0; j < lines.length; j++) {
if (lines[j].substring(0, needle.length) == needle) {
return lines[j];
}
}
return false;
},
find_lines: function (haystack, needle, sessionpart) {
var lines = haystack.split('\r\n'),
needles = [];
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, needle.length) == needle)
needles.push(lines[i]);
}
if (needles.length || !sessionpart) {
return needles;
}
// search session part
lines = sessionpart.split('\r\n');
for (var j = 0; j < lines.length; j++) {
if (lines[j].substring(0, needle.length) == needle) {
needles.push(lines[j]);
}
}
return needles;
},
candidateToJingle: function (line) {
// a=candidate:2979166662 1 udp 2113937151 192.168.2.100 57698 typ host generation 0
// <candidate component=... foundation=... generation=... id=... ip=... network=... port=... priority=... protocol=... type=.../>
if (line.indexOf('candidate:') === 0) {
line = 'a=' + line;
} else if (line.substring(0, 12) != 'a=candidate:') {
console.log('parseCandidate called with a line that is not a candidate line');
console.log(line);
return null;
}
if (line.substring(line.length - 2) == '\r\n') // chomp it
line = line.substring(0, line.length - 2);
var candidate = {},
elems = line.split(' '),
i;
if (elems[6] != 'typ') {
console.log('did not find typ in the right place');
console.log(line);
return null;
}
candidate.foundation = elems[0].substring(12);
candidate.component = elems[1];
candidate.protocol = elems[2].toLowerCase();
candidate.priority = elems[3];
candidate.ip = elems[4];
candidate.port = elems[5];
// elems[6] => "typ"
candidate.type = elems[7];
candidate.generation = '0'; // default, may be overwritten below
for (i = 8; i < elems.length; i += 2) {
switch (elems[i]) {
case 'raddr':
candidate['rel-addr'] = elems[i + 1];
break;
case 'rport':
candidate['rel-port'] = elems[i + 1];
break;
case 'generation':
candidate.generation = elems[i + 1];
break;
case 'tcptype':
candidate.tcptype = elems[i + 1];
break;
default: // TODO
console.log('not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
}
}
candidate.network = '1';
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
return candidate;
},
candidateFromJingle: function (cand) {
var line = 'a=candidate:';
line += cand.getAttribute('foundation');
line += ' ';
line += cand.getAttribute('component');
line += ' ';
line += cand.getAttribute('protocol'); //.toUpperCase(); // chrome M23 doesn't like this
line += ' ';
line += cand.getAttribute('priority');
line += ' ';
line += cand.getAttribute('ip');
line += ' ';
line += cand.getAttribute('port');
line += ' ';
line += 'typ';
line += ' ' + cand.getAttribute('type');
line += ' ';
switch (cand.getAttribute('type')) {
case 'srflx':
case 'prflx':
case 'relay':
if (cand.getAttribute('rel-addr') && cand.getAttribute('rel-port')) {
line += 'raddr';
line += ' ';
line += cand.getAttribute('rel-addr');
line += ' ';
line += 'rport';
line += ' ';
line += cand.getAttribute('rel-port');
line += ' ';
}
break;
}
if (cand.getAttribute('protocol').toLowerCase() == 'tcp') {
line += 'tcptype';
line += ' ';
line += cand.getAttribute('tcptype');
line += ' ';
}
line += 'generation';
line += ' ';
line += cand.getAttribute('generation') || '0';
return line + '\r\n';
}
};
exports.SDPUtil = SDPUtil;

View File

@@ -0,0 +1,254 @@
/**
* Wrapper for built-in http.js to emulate the browser XMLHttpRequest object.
*
* This can be used with JS designed for browsers to improve reuse of code and
* allow the use of existing libraries.
*
* Usage: include("XMLHttpRequest.js") and use XMLHttpRequest per W3C specs.
*
* @todo SSL Support
* @author Dan DeFelippi <dan@driverdan.com>
* @license MIT
*/
var Url = require("url")
,sys = require("util");
exports.XMLHttpRequest = function() {
/**
* Private variables
*/
var self = this;
var http = require('http');
var https = require('https');
// Holds http.js objects
var client;
var request;
var response;
// Request settings
var settings = {};
// Set some default headers
var defaultHeaders = {
"User-Agent": "node.js",
"Accept": "*/*",
};
var headers = defaultHeaders;
/**
* Constants
*/
this.UNSENT = 0;
this.OPENED = 1;
this.HEADERS_RECEIVED = 2;
this.LOADING = 3;
this.DONE = 4;
/**
* Public vars
*/
// Current state
this.readyState = this.UNSENT;
// default ready state change handler in case one is not set or is set late
this.onreadystatechange = function() {};
// Result & response
this.responseText = "";
this.responseXML = "";
this.status = null;
this.statusText = null;
/**
* Open the connection. Currently supports local server requests.
*
* @param string method Connection method (eg GET, POST)
* @param string url URL for the connection.
* @param boolean async Asynchronous connection. Default is true.
* @param string user Username for basic authentication (optional)
* @param string password Password for basic authentication (optional)
*/
this.open = function(method, url, async, user, password) {
settings = {
"method": method,
"url": url,
"async": async || null,
"user": user || null,
"password": password || null
};
this.abort();
setState(this.OPENED);
};
/**
* Sets a header for the request.
*
* @param string header Header name
* @param string value Header value
*/
this.setRequestHeader = function(header, value) {
headers[header] = value;
};
/**
* Gets a header from the server response.
*
* @param string header Name of header to get.
* @return string Text of the header or null if it doesn't exist.
*/
this.getResponseHeader = function(header) {
if (this.readyState > this.OPENED && response.headers[header]) {
return header + ": " + response.headers[header];
}
return null;
};
/**
* Gets all the response headers.
*
* @return string
*/
this.getAllResponseHeaders = function() {
if (this.readyState < this.HEADERS_RECEIVED) {
throw "INVALID_STATE_ERR: Headers have not been received.";
}
var result = "";
for (var i in response.headers) {
result += i + ": " + response.headers[i] + "\r\n";
}
return result.substr(0, result.length - 2);
};
/**
* Sends the request to the server.
*
* @param string data Optional data to send as request body.
*/
this.send = function(data) {
if (this.readyState != this.OPENED) {
throw "INVALID_STATE_ERR: connection must be opened before send() is called";
}
var ssl = false;
var url = Url.parse(settings.url);
// Determine the server
switch (url.protocol) {
case 'https:':
ssl = true;
// SSL & non-SSL both need host, no break here.
case 'http:':
var host = url.hostname;
break;
case undefined:
case '':
var host = "localhost";
break;
default:
throw "Protocol not supported.";
}
// Default to port 80. If accessing localhost on another port be sure
// to use http://localhost:port/path
var port = url.port || (ssl ? 443 : 80);
// Add query string if one is used
var uri = url.pathname + (url.search ? url.search : '');
// Set the Host header or the server may reject the request
this.setRequestHeader("Host", host);
// Set content length header
if (settings.method == "GET" || settings.method == "HEAD") {
data = null;
} else if (data) {
this.setRequestHeader("Content-Length", Buffer.byteLength(data));
if (!headers["Content-Type"]) {
this.setRequestHeader("Content-Type", "text/plain;charset=UTF-8");
}
}
// Use the proper protocol
var doRequest = ssl ? https.request : http.request;
var options = {
host: host,
port: port,
path: uri,
method: settings.method,
headers: headers,
agent: false
};
var req = doRequest(options, function(res) {
response = res;
response.setEncoding("utf8");
setState(self.HEADERS_RECEIVED);
self.status = response.statusCode;
response.on('data', function(chunk) {
// Make sure there's some data
if (chunk) {
self.responseText += chunk;
}
setState(self.LOADING);
});
response.on('end', function() {
setState(self.DONE);
});
response.on('error', function() {
self.handleError(error);
});
}).on('error', function(error) {
self.handleError(error);
});
req.setHeader("Connection", "Close");
// Node 0.4 and later won't accept empty data. Make sure it's needed.
if (data) {
req.write(data);
}
req.end();
};
this.handleError = function(error) {
this.status = 503;
this.statusText = error;
this.responseText = error.stack;
setState(this.DONE);
};
/**
* Aborts a request.
*/
this.abort = function() {
headers = defaultHeaders;
this.readyState = this.UNSENT;
this.responseText = "";
this.responseXML = "";
};
/**
* Changes readyState and calls onreadystatechange.
*
* @param int state New state
*/
var setState = function(state) {
self.readyState = state;
self.onreadystatechange();
}
};

View File

@@ -0,0 +1,83 @@
// This code was written by Tyler Akins and has been placed in the
// public domain. It would be nice if you left this header intact.
// Base64 code from Tyler Akins -- http://rumkin.com
var Base64 = (function () {
var keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
var obj = {
/**
* Encodes a string in base64
* @param {String} input The string to encode in base64.
*/
encode: function (input) {
var output = "";
var chr1, chr2, chr3;
var enc1, enc2, enc3, enc4;
var i = 0;
do {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) +
keyStr.charAt(enc3) + keyStr.charAt(enc4);
} while (i < input.length);
return output;
},
/**
* Decodes a base64 string.
* @param {String} input The string to decode.
*/
decode: function (input) {
var output = "";
var chr1, chr2, chr3;
var enc1, enc2, enc3, enc4;
var i = 0;
// remove all characters that are not A-Z, a-z, 0-9, +, /, or =
input = input.replace(/[^A-Za-z0-9\+\/\=]/g, '');
do {
enc1 = keyStr.indexOf(input.charAt(i++));
enc2 = keyStr.indexOf(input.charAt(i++));
enc3 = keyStr.indexOf(input.charAt(i++));
enc4 = keyStr.indexOf(input.charAt(i++));
chr1 = (enc1 << 2) | (enc2 >> 4);
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2);
chr3 = ((enc3 & 3) << 6) | enc4;
output = output + String.fromCharCode(chr1);
if (enc3 != 64) {
output = output + String.fromCharCode(chr2);
}
if (enc4 != 64) {
output = output + String.fromCharCode(chr3);
}
} while (i < input.length);
return output;
}
};
return obj;
})();
// Nodify
exports.Base64 = Base64;

View File

@@ -0,0 +1,279 @@
/*
* A JavaScript implementation of the RSA Data Security, Inc. MD5 Message
* Digest Algorithm, as defined in RFC 1321.
* Version 2.1 Copyright (C) Paul Johnston 1999 - 2002.
* Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
* Distributed under the BSD License
* See http://pajhome.org.uk/crypt/md5 for more info.
*/
var MD5 = (function () {
/*
* Configurable variables. You may need to tweak these to be compatible with
* the server-side, but the defaults work in most cases.
*/
var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
var b64pad = ""; /* base-64 pad character. "=" for strict RFC compliance */
var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
/*
* Add integers, wrapping at 2^32. This uses 16-bit operations internally
* to work around bugs in some JS interpreters.
*/
var safe_add = function (x, y) {
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
};
/*
* Bitwise rotate a 32-bit number to the left.
*/
var bit_rol = function (num, cnt) {
return (num << cnt) | (num >>> (32 - cnt));
};
/*
* Convert a string to an array of little-endian words
* If chrsz is ASCII, characters >255 have their hi-byte silently ignored.
*/
var str2binl = function (str) {
var bin = [];
var mask = (1 << chrsz) - 1;
for(var i = 0; i < str.length * chrsz; i += chrsz)
{
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (i%32);
}
return bin;
};
/*
* Convert an array of little-endian words to a string
*/
var binl2str = function (bin) {
var str = "";
var mask = (1 << chrsz) - 1;
for(var i = 0; i < bin.length * 32; i += chrsz)
{
str += String.fromCharCode((bin[i>>5] >>> (i % 32)) & mask);
}
return str;
};
/*
* Convert an array of little-endian words to a hex string.
*/
var binl2hex = function (binarray) {
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
var str = "";
for(var i = 0; i < binarray.length * 4; i++)
{
str += hex_tab.charAt((binarray[i>>2] >> ((i%4)*8+4)) & 0xF) +
hex_tab.charAt((binarray[i>>2] >> ((i%4)*8 )) & 0xF);
}
return str;
};
/*
* Convert an array of little-endian words to a base-64 string
*/
var binl2b64 = function (binarray) {
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var str = "";
var triplet, j;
for(var i = 0; i < binarray.length * 4; i += 3)
{
triplet = (((binarray[i >> 2] >> 8 * ( i %4)) & 0xFF) << 16) |
(((binarray[i+1 >> 2] >> 8 * ((i+1)%4)) & 0xFF) << 8 ) |
((binarray[i+2 >> 2] >> 8 * ((i+2)%4)) & 0xFF);
for(j = 0; j < 4; j++)
{
if(i * 8 + j * 6 > binarray.length * 32) { str += b64pad; }
else { str += tab.charAt((triplet >> 6*(3-j)) & 0x3F); }
}
}
return str;
};
/*
* These functions implement the four basic operations the algorithm uses.
*/
var md5_cmn = function (q, a, b, x, s, t) {
return safe_add(bit_rol(safe_add(safe_add(a, q),safe_add(x, t)), s),b);
};
var md5_ff = function (a, b, c, d, x, s, t) {
return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t);
};
var md5_gg = function (a, b, c, d, x, s, t) {
return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t);
};
var md5_hh = function (a, b, c, d, x, s, t) {
return md5_cmn(b ^ c ^ d, a, b, x, s, t);
};
var md5_ii = function (a, b, c, d, x, s, t) {
return md5_cmn(c ^ (b | (~d)), a, b, x, s, t);
};
/*
* Calculate the MD5 of an array of little-endian words, and a bit length
*/
var core_md5 = function (x, len) {
/* append padding */
x[len >> 5] |= 0x80 << ((len) % 32);
x[(((len + 64) >>> 9) << 4) + 14] = len;
var a = 1732584193;
var b = -271733879;
var c = -1732584194;
var d = 271733878;
var olda, oldb, oldc, oldd;
for (var i = 0; i < x.length; i += 16)
{
olda = a;
oldb = b;
oldc = c;
oldd = d;
a = md5_ff(a, b, c, d, x[i+ 0], 7 , -680876936);
d = md5_ff(d, a, b, c, x[i+ 1], 12, -389564586);
c = md5_ff(c, d, a, b, x[i+ 2], 17, 606105819);
b = md5_ff(b, c, d, a, x[i+ 3], 22, -1044525330);
a = md5_ff(a, b, c, d, x[i+ 4], 7 , -176418897);
d = md5_ff(d, a, b, c, x[i+ 5], 12, 1200080426);
c = md5_ff(c, d, a, b, x[i+ 6], 17, -1473231341);
b = md5_ff(b, c, d, a, x[i+ 7], 22, -45705983);
a = md5_ff(a, b, c, d, x[i+ 8], 7 , 1770035416);
d = md5_ff(d, a, b, c, x[i+ 9], 12, -1958414417);
c = md5_ff(c, d, a, b, x[i+10], 17, -42063);
b = md5_ff(b, c, d, a, x[i+11], 22, -1990404162);
a = md5_ff(a, b, c, d, x[i+12], 7 , 1804603682);
d = md5_ff(d, a, b, c, x[i+13], 12, -40341101);
c = md5_ff(c, d, a, b, x[i+14], 17, -1502002290);
b = md5_ff(b, c, d, a, x[i+15], 22, 1236535329);
a = md5_gg(a, b, c, d, x[i+ 1], 5 , -165796510);
d = md5_gg(d, a, b, c, x[i+ 6], 9 , -1069501632);
c = md5_gg(c, d, a, b, x[i+11], 14, 643717713);
b = md5_gg(b, c, d, a, x[i+ 0], 20, -373897302);
a = md5_gg(a, b, c, d, x[i+ 5], 5 , -701558691);
d = md5_gg(d, a, b, c, x[i+10], 9 , 38016083);
c = md5_gg(c, d, a, b, x[i+15], 14, -660478335);
b = md5_gg(b, c, d, a, x[i+ 4], 20, -405537848);
a = md5_gg(a, b, c, d, x[i+ 9], 5 , 568446438);
d = md5_gg(d, a, b, c, x[i+14], 9 , -1019803690);
c = md5_gg(c, d, a, b, x[i+ 3], 14, -187363961);
b = md5_gg(b, c, d, a, x[i+ 8], 20, 1163531501);
a = md5_gg(a, b, c, d, x[i+13], 5 , -1444681467);
d = md5_gg(d, a, b, c, x[i+ 2], 9 , -51403784);
c = md5_gg(c, d, a, b, x[i+ 7], 14, 1735328473);
b = md5_gg(b, c, d, a, x[i+12], 20, -1926607734);
a = md5_hh(a, b, c, d, x[i+ 5], 4 , -378558);
d = md5_hh(d, a, b, c, x[i+ 8], 11, -2022574463);
c = md5_hh(c, d, a, b, x[i+11], 16, 1839030562);
b = md5_hh(b, c, d, a, x[i+14], 23, -35309556);
a = md5_hh(a, b, c, d, x[i+ 1], 4 , -1530992060);
d = md5_hh(d, a, b, c, x[i+ 4], 11, 1272893353);
c = md5_hh(c, d, a, b, x[i+ 7], 16, -155497632);
b = md5_hh(b, c, d, a, x[i+10], 23, -1094730640);
a = md5_hh(a, b, c, d, x[i+13], 4 , 681279174);
d = md5_hh(d, a, b, c, x[i+ 0], 11, -358537222);
c = md5_hh(c, d, a, b, x[i+ 3], 16, -722521979);
b = md5_hh(b, c, d, a, x[i+ 6], 23, 76029189);
a = md5_hh(a, b, c, d, x[i+ 9], 4 , -640364487);
d = md5_hh(d, a, b, c, x[i+12], 11, -421815835);
c = md5_hh(c, d, a, b, x[i+15], 16, 530742520);
b = md5_hh(b, c, d, a, x[i+ 2], 23, -995338651);
a = md5_ii(a, b, c, d, x[i+ 0], 6 , -198630844);
d = md5_ii(d, a, b, c, x[i+ 7], 10, 1126891415);
c = md5_ii(c, d, a, b, x[i+14], 15, -1416354905);
b = md5_ii(b, c, d, a, x[i+ 5], 21, -57434055);
a = md5_ii(a, b, c, d, x[i+12], 6 , 1700485571);
d = md5_ii(d, a, b, c, x[i+ 3], 10, -1894986606);
c = md5_ii(c, d, a, b, x[i+10], 15, -1051523);
b = md5_ii(b, c, d, a, x[i+ 1], 21, -2054922799);
a = md5_ii(a, b, c, d, x[i+ 8], 6 , 1873313359);
d = md5_ii(d, a, b, c, x[i+15], 10, -30611744);
c = md5_ii(c, d, a, b, x[i+ 6], 15, -1560198380);
b = md5_ii(b, c, d, a, x[i+13], 21, 1309151649);
a = md5_ii(a, b, c, d, x[i+ 4], 6 , -145523070);
d = md5_ii(d, a, b, c, x[i+11], 10, -1120210379);
c = md5_ii(c, d, a, b, x[i+ 2], 15, 718787259);
b = md5_ii(b, c, d, a, x[i+ 9], 21, -343485551);
a = safe_add(a, olda);
b = safe_add(b, oldb);
c = safe_add(c, oldc);
d = safe_add(d, oldd);
}
return [a, b, c, d];
};
/*
* Calculate the HMAC-MD5, of a key and some data
*/
var core_hmac_md5 = function (key, data) {
var bkey = str2binl(key);
if(bkey.length > 16) { bkey = core_md5(bkey, key.length * chrsz); }
var ipad = new Array(16), opad = new Array(16);
for(var i = 0; i < 16; i++)
{
ipad[i] = bkey[i] ^ 0x36363636;
opad[i] = bkey[i] ^ 0x5C5C5C5C;
}
var hash = core_md5(ipad.concat(str2binl(data)), 512 + data.length * chrsz);
return core_md5(opad.concat(hash), 512 + 128);
};
var obj = {
/*
* These are the functions you'll usually want to call.
* They take string arguments and return either hex or base-64 encoded
* strings.
*/
hexdigest: function (s) {
return binl2hex(core_md5(str2binl(s), s.length * chrsz));
},
b64digest: function (s) {
return binl2b64(core_md5(str2binl(s), s.length * chrsz));
},
hash: function (s) {
return binl2str(core_md5(str2binl(s), s.length * chrsz));
},
hmac_hexdigest: function (key, data) {
return binl2hex(core_hmac_md5(key, data));
},
hmac_b64digest: function (key, data) {
return binl2b64(core_hmac_md5(key, data));
},
hmac_hash: function (key, data) {
return binl2str(core_hmac_md5(key, data));
},
/*
* Perform a simple self-test to see if the VM is working
*/
test: function () {
return MD5.hexdigest("abc") === "900150983cd24fb0d6963f7d28e17f72";
}
};
return obj;
})();
// Nodify
exports.MD5 = MD5;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,48 @@
var strophe = require("./strophe/strophe.js").Strophe;
var Strophe = strophe.Strophe;
var $iq = strophe.$iq;
var $msg = strophe.$msg;
var $build = strophe.$build;
var $pres = strophe.$pres;
var jsdom = require("jsdom");
var window = jsdom.jsdom().parentWindow;
var $ = require('jquery')(window);
var stropheJingle = require("./strophe.jingle.sdp.js");
var input = '';
process.stdin.on('readable', function() {
var chunk = process.stdin.read();
if (chunk !== null) {
input += chunk;
}
});
process.stdin.on('end', function() {
if (process.argv[2] == '--jingle') {
var elem = $(input);
// app does:
// sess.setRemoteDescription($(iq).find('>jingle'), 'offer');
//console.log(elem.find('>content'));
var sdp = new stropheJingle.SDP('');
sdp.fromJingle(elem);
console.log(sdp.raw);
} else if (process.argv[2] == '--sdp') {
var sdp = new stropheJingle.SDP(input);
var accept = $iq({to: '%(tojid)s',
type: 'set'})
.c('jingle', {xmlns: 'urn:xmpp:jingle:1',
//action: 'session-accept',
action: '%(action)s',
initiator: '%(initiator)s',
responder: '%(responder)s',
sid: '%(sid)s' });
sdp.toJingle(accept, 'responder');
console.log(Strophe.serialize(accept));
}
});

88
contrib/scripts/kick_users.py Executable file
View File

@@ -0,0 +1,88 @@
#!/usr/bin/env python
import json
import sys
import urllib
from argparse import ArgumentParser
import requests
def _mkurl(template, kws):
for key in kws:
template = template.replace(key, kws[key])
return template
def main(hs, room_id, access_token, user_id_prefix, why):
if not why:
why = "Automated kick."
print(
"Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
)
room_state_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
{"$HS": hs, "$ROOM": room_id, "$TOKEN": access_token},
)
print("Getting room state => %s" % room_state_url)
res = requests.get(room_state_url)
print("HTTP %s" % res.status_code)
state_events = res.json()
if "error" in state_events:
print("FATAL")
print(state_events)
return
kick_list = []
room_name = room_id
for event in state_events:
if not event["type"] == "m.room.member":
if event["type"] == "m.room.name":
room_name = event["content"].get("name")
continue
if not event["content"].get("membership") == "join":
continue
if event["state_key"].startswith(user_id_prefix):
kick_list.append(event["state_key"])
if len(kick_list) == 0:
print("No user IDs match the prefix '%s'" % user_id_prefix)
return
print("The following user IDs will be kicked from %s" % room_name)
for uid in kick_list:
print(uid)
doit = input("Continue? [Y]es\n")
if len(doit) > 0 and doit.lower() == "y":
print("Kicking members...")
# encode them all
kick_list = [urllib.quote(uid) for uid in kick_list]
for uid in kick_list:
kick_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
{"$HS": hs, "$UID": uid, "$ROOM": room_id, "$TOKEN": access_token},
)
kick_body = {"membership": "leave", "reason": why}
print("Kicking %s" % uid)
res = requests.put(kick_url, data=json.dumps(kick_body))
if res.status_code != 200:
print("ERROR: HTTP %s" % res.status_code)
if res.json().get("error"):
print("ERROR: JSON %s" % res.json())
if __name__ == "__main__":
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
parser.add_argument("-u", "--user-id", help="The user ID prefix e.g. '@irc_'")
parser.add_argument("-t", "--token", help="Your access_token")
parser.add_argument("-r", "--room", help="The room ID to kick members in")
parser.add_argument(
"-s", "--homeserver", help="The base HS url e.g. http://matrix.org"
)
parser.add_argument("-w", "--why", help="Reason for the kick. Optional.")
args = parser.parse_args()
if not args.room or not args.token or not args.user_id or not args.homeserver:
parser.print_help()
sys.exit(1)
else:
main(args.homeserver, args.room, args.token, args.user_id, args.why)

67
debian/changelog vendored
View File

@@ -1,70 +1,3 @@
matrix-synapse-py3 (1.62.0) stable; urgency=medium
* New Synapse release 1.62.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Jul 2022 11:14:15 +0100
matrix-synapse-py3 (1.62.0~rc3) stable; urgency=medium
* New Synapse release 1.62.0rc3.
-- Synapse Packaging team <packages@matrix.org> Mon, 04 Jul 2022 16:07:01 +0100
matrix-synapse-py3 (1.62.0~rc2) stable; urgency=medium
* New Synapse release 1.62.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 01 Jul 2022 11:42:41 +0100
matrix-synapse-py3 (1.62.0~rc1) stable; urgency=medium
* New Synapse release 1.62.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jun 2022 16:34:57 +0100
matrix-synapse-py3 (1.61.1) stable; urgency=medium
* New Synapse release 1.61.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jun 2022 14:33:46 +0100
matrix-synapse-py3 (1.61.0) stable; urgency=medium
* New Synapse release 1.61.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Jun 2022 11:44:19 +0100
matrix-synapse-py3 (1.61.0~rc1) stable; urgency=medium
* Remove unused `jitsimeetbridge` experiment from `contrib` directory.
* New Synapse release 1.61.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Jun 2022 12:42:31 +0100
matrix-synapse-py3 (1.60.0) stable; urgency=medium
* New Synapse release 1.60.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 31 May 2022 13:41:22 +0100
matrix-synapse-py3 (1.60.0~rc2) stable; urgency=medium
* New Synapse release 1.60.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 27 May 2022 11:04:55 +0100
matrix-synapse-py3 (1.60.0~rc1) stable; urgency=medium
* New Synapse release 1.60.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 24 May 2022 12:05:01 +0100
matrix-synapse-py3 (1.59.1) stable; urgency=medium
* New Synapse release 1.59.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 18 May 2022 11:41:46 +0100
matrix-synapse-py3 (1.59.0) stable; urgency=medium
* New Synapse release 1.59.0.

23
debian/copyright vendored
View File

@@ -22,6 +22,29 @@ Files: synapse/config/repository.py
Copyright: 2014-2015, matrix.org
License: Apache-2.0
Files: contrib/jitsimeetbridge/unjingle/strophe/base64.js
Copyright: Public Domain (Tyler Akins http://rumkin.com)
License: public-domain
This code was written by Tyler Akins and has been placed in the
public domain. It would be nice if you left this header intact.
Base64 code from Tyler Akins -- http://rumkin.com
Files: contrib/jitsimeetbridge/unjingle/strophe/md5.js
Copyright: 1999-2002, Paul Johnston & Contributors
License: BSD-3-clause
Files: contrib/jitsimeetbridge/unjingle/strophe/strophe.js
Copyright: 2006-2008, OGG, LLC
License: Expat
Files: contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js
Copyright: 2010 passive.ly LLC
License: Expat
Files: contrib/jitsimeetbridge/unjingle/*.js
Copyright: 2014 Jitsi
License: Apache-2.0
Files: debian/*
Copyright: 2016-2017, Erik Johnston <erik@matrix.org>
2017, Rahul De <rahulde@swecha.net>

View File

@@ -6,14 +6,12 @@ CWD=$(pwd)
cd "$DIR/.." || exit
# Do not override PYTHONPATH if we are in a virtual env
if [ "$VIRTUAL_ENV" = "" ]; then
PYTHONPATH=$(readlink -f "$(pwd)")
export PYTHONPATH
echo "$PYTHONPATH"
fi
PYTHONPATH=$(readlink -f "$(pwd)")
export PYTHONPATH
echo "$PYTHONPATH"
# Create servers which listen on HTTP at 808x and HTTPS at 848x.
for port in 8080 8081 8082; do
echo "Starting server on port $port... "
@@ -21,12 +19,10 @@ for port in 8080 8081 8082; do
mkdir -p demo/$port
pushd demo/$port || exit
# Generate the configuration for the homeserver at localhost:848x, note that
# the homeserver name needs to match the HTTPS listening port for federation
# to properly work..
# Generate the configuration for the homeserver at localhost:848x.
python3 -m synapse.app.homeserver \
--generate-config \
--server-name "localhost:$https_port" \
--server-name "localhost:$port" \
--config-path "$port.config" \
--report-stats no

View File

@@ -40,7 +40,7 @@ FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq git \
apt-get update && apt-get install -y git \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
@@ -55,7 +55,7 @@ RUN \
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
pip install --user git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5
WORKDIR /synapse
@@ -73,7 +73,7 @@ FROM docker.io/python:${PYTHON_VERSION}-slim as builder
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
apt-get update && apt-get install -y \
build-essential \
libffi-dev \
libjpeg-dev \
@@ -118,7 +118,7 @@ LABEL org.opencontainers.image.licenses='Apache-2.0'
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
apt-get update && apt-get install -y \
curl \
gosu \
libjpeg62-turbo \

View File

@@ -1,13 +1,12 @@
# Inherit from the official Synapse docker image
ARG SYNAPSE_VERSION=latest
FROM matrixdotorg/synapse:$SYNAPSE_VERSION
FROM matrixdotorg/synapse
# Install deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && \
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
redis-server nginx-light
# Install supervisord with pip instead of apt, to avoid installing a second

View File

@@ -8,19 +8,13 @@ docker images that can be run inside Complement for testing purposes.
Note that running Synapse's unit tests from within the docker image is not supported.
## Using the Complement launch script
## Testing with SQLite and single-process Synapse
`scripts-dev/complement.sh` is a script that will automatically build
and run Synapse against Complement.
Consult the [contributing guide][guideComplementSh] for instructions on how to use it.
> Note that `scripts-dev/complement.sh` is a script that will automatically build
> and run an SQLite-based, single-process of Synapse against Complement.
[guideComplementSh]: https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-integration-tests-complement
## Building and running the images manually
Under some circumstances, you may wish to build the images manually.
The instructions below will lead you to doing that.
The instructions below will set up Complement testing for a single-process,
SQLite-based Synapse deployment.
Start by building the base Synapse docker image. If you wish to run tests with the latest
release of Synapse, instead of your current checkout, you can skip this step. From the
@@ -30,17 +24,12 @@ root of the repository:
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
Next, build the workerised Synapse docker image, which is a layer over the base
image.
This will build an image with the tag `matrixdotorg/synapse`.
Next, build the Synapse image for Complement.
```sh
docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
```
Finally, build the multi-purpose image for Complement, which is a layer over the workers image.
```sh
docker build -t complement-synapse -f docker/complement/Dockerfile docker/complement
docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement
```
This will build an image with the tag `complement-synapse`, which can be handed to
@@ -48,9 +37,49 @@ Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Ref
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
See [the Complement image README](./complement/README.md) for information about the
expected environment variables.
## Testing with PostgreSQL and single or multi-process Synapse
The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL
database backend.
As with the single-process image, build the base Synapse docker image. If you wish to run
tests with the latest release of Synapse, instead of your current checkout, you can skip
this step. From the root of the repository:
```sh
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
This will build an image with the tag `matrixdotorg/synapse`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Again, from the root of the repository:
```sh
docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
```
This will build an image with the tag` matrixdotorg/synapse-workers`.
It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container
under
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
below.
Finally, build the Synapse image for Complement, which is based on
`matrixdotorg/synapse-workers`.
```sh
docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement
```
This will build an image with the tag `complement-synapse-workers`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
## Running the Dockerfile-worker image standalone
@@ -84,9 +113,6 @@ docker run -d --name synapse \
...substituting `POSTGRES*` variables for those that match a postgres host you have
available (usually a running postgres docker container).
### Workers
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
use when running the container. All possible worker names are defined by the keys of the
`WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the
@@ -99,11 +125,8 @@ type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES`
(e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`).
Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers
(leaving only the main process).
The container will only be configured to use Redis-based worker mode if there are
workers enabled.
### Logging
(leaving only the main process). The container is configured to use redis-based worker
mode.
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
@@ -113,21 +136,3 @@ Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be writ
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
00, according to the container's clock. Logging for the main process must still be
configured by modifying the homeserver's log config in your Synapse data volume.
### Application Services
Setting the `SYNAPSE_AS_REGISTRATION_DIR` environment variable to the path of
a directory (within the container) will cause the configuration script to scan
that directory for `.yaml`/`.yml` registration files.
Synapse will be configured to load these configuration files.
### TLS Termination
Nginx is present in the image to route requests to the appropriate workers,
but it does not serve TLS by default.
You can configure `SYNAPSE_TLS_CERT` and `SYNAPSE_TLS_KEY` to point to a
TLS certificate and key (respectively), both in PEM (textual) format.
In this case, Nginx will additionally serve using HTTPS on port 8448.

View File

@@ -1,45 +1,22 @@
# This dockerfile builds on top of 'docker/Dockerfile-workers' in matrix-org/synapse
# by including a built-in postgres instance, as well as setting up the homeserver so
# that it is ready for testing via Complement.
#
# Instructions for building this image from those it depends on is detailed in this guide:
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
# A dockerfile which builds an image suitable for testing Synapse under
# complement.
ARG SYNAPSE_VERSION=latest
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
# Install postgresql
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -yqq postgresql-13
FROM matrixdotorg/synapse:${SYNAPSE_VERSION}
# Configure a user and create a database for Synapse
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
\"ALTER USER postgres PASSWORD 'somesecret'; \
CREATE DATABASE synapse \
ENCODING 'UTF8' \
LC_COLLATE='C' \
LC_CTYPE='C' \
template=template0;\" | psql" && pg_ctlcluster 13 main stop
ENV SERVER_NAME=localhost
# Extend the shared homeserver config to disable rate-limiting,
# set Complement's static shared secret, enable registration, amongst other
# tweaks to get Synapse ready for testing.
# To do this, we copy the old template out of the way and then include it
# with Jinja2.
RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2
COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2
COPY conf/* /conf/
# generate a signing key
RUN generate_signing_key -o /conf/server.signing.key
WORKDIR /data
COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
# Copy the entrypoint
COPY conf/start_for_complement.sh /
# Expose nginx's listener ports
EXPOSE 8008 8448
ENTRYPOINT ["/start_for_complement.sh"]
ENTRYPOINT ["/conf/start.sh"]
# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -1,32 +1 @@
# Unified Complement image for Synapse
This is an image for testing Synapse with [the *Complement* integration test suite][complement].
It contains some insecure defaults that are only suitable for testing purposes,
so **please don't use this image for a production server**.
This multi-purpose image is built on top of `Dockerfile-workers` in the parent directory
and can be switched using environment variables between the following configurations:
- Monolithic Synapse with SQLite (default, or `SYNAPSE_COMPLEMENT_DATABASE=sqlite`)
- Monolithic Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres`)
- Workerised Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres` and `SYNAPSE_COMPLEMENT_USE_WORKERS=true`)
The image is self-contained; it contains an integrated Postgres, Redis and Nginx.
## How to get Complement to pass the environment variables through
To pass these environment variables, use [Complement's `COMPLEMENT_SHARE_ENV_PREFIX`][complementEnv]
variable to configure an environment prefix to pass through, then prefix the above options
with that prefix.
Example:
```
COMPLEMENT_SHARE_ENV_PREFIX=PASS_ PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
```
Consult `scripts-dev/complement.sh` in the repository root for a real example.
[complement]: https://github.com/matrix-org/complement
[complementEnv]: https://github.com/matrix-org/complement/pull/382
Stuff for building the docker image used for testing under complement.

View File

@@ -0,0 +1,50 @@
# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse
# by including a built-in postgres instance, as well as setting up the homeserver so
# that it is ready for testing via Complement.
#
# Instructions for building this image from those it depends on is detailed in this guide:
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
FROM matrixdotorg/synapse-workers
# Download a caddy server to stand in front of nginx and terminate TLS using Complement's
# custom CA.
# We include this near the top of the file in order to cache the result.
RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/caddy_2.3.0_linux_amd64.tar.gz" && \
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
# Install postgresql
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
# Configure a user and create a database for Synapse
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
\"ALTER USER postgres PASSWORD 'somesecret'; \
CREATE DATABASE synapse \
ENCODING 'UTF8' \
LC_COLLATE='C' \
LC_CTYPE='C' \
template=template0;\" | psql" && pg_ctlcluster 13 main stop
# Modify the shared homeserver config with postgres support, certificate setup
# and the disabling of rate-limiting
COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
WORKDIR /data
# Copy the caddy config
COPY conf-workers/caddy.complement.json /root/caddy.json
COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
COPY conf-workers/caddy.supervisord.conf /etc/supervisor/conf.d/caddy.conf
# Copy the entrypoint
COPY conf-workers/start-complement-synapse-workers.sh /
# Expose caddy's listener ports
EXPOSE 8008 8448
ENTRYPOINT ["/start-complement-synapse-workers.sh"]
# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh

View File

@@ -0,0 +1,72 @@
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":8448"
],
"routes": [
{
"match": [
{
"host": [
"{{ server_name }}"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "localhost:8008"
}
]
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"tls": {
"automation": {
"policies": [
{
"subjects": [
"{{ server_name }}"
],
"issuers": [
{
"module": "internal"
}
],
"on_demand": true
}
]
}
},
"pki": {
"certificate_authorities": {
"local": {
"name": "Complement CA",
"root": {
"certificate": "/complement/ca/ca.crt",
"private_key": "/complement/ca/ca.key"
}
}
}
}
}
}

View File

@@ -0,0 +1,7 @@
[program:caddy]
command=/usr/local/bin/prefix-log /root/caddy run --config /root/caddy.json
autorestart=unexpected
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0

View File

@@ -1,9 +1,6 @@
[program:postgres]
command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground
# Only start if START_POSTGRES=1
autostart=%(ENV_START_POSTGRES)s
# Lower priority number = starts first
priority=1

View File

@@ -0,0 +1,44 @@
#!/bin/bash
#
# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
set -e
function log {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
echo "$d $@"
}
# Replace the server name in the caddy config
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json
# Set the server name of the homeserver
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
# No need to report stats here
export SYNAPSE_REPORT_STATS=no
# Set postgres authentication details which will be placed in the homeserver config file
export POSTGRES_PASSWORD=somesecret
export POSTGRES_USER=postgres
export POSTGRES_HOST=localhost
# Specify the workers to test with
export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
user_dir, \
media_repository, \
federation_inbound, \
federation_reader, \
federation_sender, \
synchrotron, \
appservice, \
pusher"
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
exec /configure_workers_and_start.py

View File

@@ -0,0 +1,84 @@
## Server ##
report_stats: False
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
bcrypt_rounds: 4
## Federation ##
# trust certs signed by Complement's CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
federation_ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
window_size: 1000
sleep_limit: 10
sleep_delay: 500
reject_limit: 99999
concurrent: 3
rc_message:
per_second: 9999
burst_count: 9999
rc_registration:
per_second: 9999
burst_count: 9999
rc_login:
address:
per_second: 9999
burst_count: 9999
account:
per_second: 9999
burst_count: 9999
failed_attempts:
per_second: 9999
burst_count: 9999
rc_admin_redaction:
per_second: 9999
burst_count: 9999
rc_joins:
local:
per_second: 9999
burst_count: 9999
remote:
per_second: 9999
burst_count: 9999
rc_3pid_validation:
per_second: 1000
burst_count: 1000
rc_invites:
per_room:
per_second: 1000
burst_count: 1000
per_user:
per_second: 1000
burst_count: 1000
federation_rr_transactions_per_room_per_second: 9999
## Experimental Features ##
experimental_features:
# Enable history backfilling support
msc2716_enabled: true
# Enable spaces support
spaces_enabled: true
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"

View File

@@ -1,32 +1,52 @@
{#
This file extends the default 'shared' configuration file (from the 'synapse-workers'
docker image) with Complement-specific tweak.
The base configuration is moved out of the default path to `shared-orig.yaml.j2`
in the Complement Dockerfile and below we include that original file.
#}
## Server ##
server_name: SERVER_NAME
log_config: /conf/log_config.yaml
report_stats: False
signing_key_path: /conf/server.signing.key
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
## Listeners ##
tls_certificate_path: /conf/server.tls.crt
tls_private_key_path: /conf/server.tls.key
bcrypt_rounds: 4
## Registration ##
# Needed by Complement to register admin users
# DO NOT USE in a production configuration! This should be a random secret.
registration_shared_secret: complement
listeners:
- port: 8448
bind_addresses: ['::']
type: http
tls: true
resources:
- names: [federation]
- port: 8008
bind_addresses: ['::']
type: http
resources:
- names: [client]
## Database ##
database:
name: "sqlite3"
args:
# We avoid /data, as it is a volume and is not transferred when the container is committed,
# which is a fundamental necessity in complement.
database: "/conf/homeserver.db"
## Federation ##
# trust certs signed by Complement's CA
# trust certs signed by the complement CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
federation_ip_range_blacklist: []
ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
@@ -81,6 +101,13 @@ rc_invites:
federation_rr_transactions_per_room_per_second: 9999
## API Configuration ##
# A list of application service config files to use
#
app_service_config_files:
AS_REGISTRATION_FILES
## Experimental Features ##
experimental_features:
@@ -90,10 +117,8 @@ experimental_features:
msc2716_enabled: true
# server-side support for partial state in /send_join responses
msc3706_enabled: true
{% if not workers_in_use %}
# client-side support for partial state in /send_join responses
faster_joins: true
{% endif %}
# Enable jump to date endpoint
msc3030_enabled: true
@@ -102,11 +127,3 @@ server_notices:
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"
# Disable sync cache so that initial `/sync` requests are up-to-date.
caches:
sync_response_cache_duration: 0
{% include "shared-orig.yaml.j2" %}

View File

@@ -0,0 +1,24 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
# log to stdout, for easier use with 'docker logs'
stream: 'ext://sys.stdout'
root:
level: INFO
handlers: [console]
disable_existing_loggers: false

30
docker/complement/conf/start.sh Executable file
View File

@@ -0,0 +1,30 @@
#!/bin/sh
set -e
sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml
# Add the application service registration files to the homeserver.yaml config
for filename in /complement/appservice/*.yaml; do
[ -f "$filename" ] || break
as_id=$(basename "$filename" .yaml)
# Insert the path to the registration file and the AS_REGISTRATION_FILES marker after
# so we can add the next application service in the next iteration of this for loop
sed -i "s/AS_REGISTRATION_FILES/ - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml
done
# Remove the AS_REGISTRATION_FILES entry
sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml
# generate an ssl key and cert for the server, signed by the complement CA
openssl genrsa -out /conf/server.tls.key 2048
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-subj "/CN=${SERVER_NAME}"
openssl x509 -req -in /conf/server.tls.csr \
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-out /conf/server.tls.crt
exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@"

View File

@@ -1,106 +0,0 @@
#!/bin/bash
#
# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
set -e
echo "Complement Synapse launcher"
echo " Args: $@"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS"
function log {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
echo "$d $@"
}
# Set the server name of the homeserver
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
# No need to report stats here
export SYNAPSE_REPORT_STATS=no
case "$SYNAPSE_COMPLEMENT_DATABASE" in
postgres)
# Set postgres authentication details which will be placed in the homeserver config file
export POSTGRES_PASSWORD=somesecret
export POSTGRES_USER=postgres
export POSTGRES_HOST=localhost
# configure supervisord to start postgres
export START_POSTGRES=true
;;
sqlite|"")
# Configure supervisord not to start Postgres, as we don't need it
export START_POSTGRES=false
;;
*)
echo "Unknown Synapse database: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE" >&2
exit 1
;;
esac
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
# Specify the workers to test with
export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
user_dir, \
media_repository, \
federation_inbound, \
federation_reader, \
federation_sender, \
synchrotron, \
appservice, \
pusher"
else
# Empty string here means 'main process only'
export SYNAPSE_WORKER_TYPES=""
fi
# Add Complement's appservice registration directory, if there is one
# (It can be absent when there are no application services in this test!)
if [ -d /complement/appservice ]; then
export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
fi
# Generate a TLS key, then generate a certificate by having Complement's CA sign it
# Note that both the key and certificate are in PEM format (not DER).
# First generate a configuration file to set up a Subject Alternative Name.
cat > /conf/server.tls.conf <<EOF
.include /etc/ssl/openssl.cnf
[SAN]
subjectAltName=DNS:${SERVER_NAME}
EOF
# Generate an RSA key
openssl genrsa -out /conf/server.tls.key 2048
# Generate a certificate signing request
openssl req -new -config /conf/server.tls.conf -key /conf/server.tls.key -out /conf/server.tls.csr \
-subj "/CN=${SERVER_NAME}" -reqexts SAN
# Make the Complement Certificate Authority sign and generate a certificate.
openssl x509 -req -in /conf/server.tls.csr \
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN
# Assert that we have a Subject Alternative Name in the certificate.
# (grep will exit with 1 here if there isn't a SAN in the certificate.)
openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS:
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
export SYNAPSE_TLS_KEY=/conf/server.tls.key
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
exec /configure_workers_and_start.py

View File

@@ -9,22 +9,6 @@ server {
listen 8008;
listen [::]:8008;
{% if tls_cert_path is not none and tls_key_path is not none %}
listen 8448 ssl;
listen [::]:8448 ssl;
ssl_certificate {{ tls_cert_path }};
ssl_certificate_key {{ tls_key_path }};
# Some directives from cipherlist.eu (fka cipherli.st):
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
ssl_ecdh_curve secp384r1; # Requires nginx >= 1.1.0
ssl_session_cache shared:SSL:10m;
ssl_session_tickets off; # Requires nginx >= 1.5.9
{% endif %}
server_name localhost;
# Nginx by default only allows file uploads up to 1M in size

View File

@@ -3,18 +3,7 @@
# configure_workers_and_start.py uses and amends to this file depending on the workers
# that have been selected.
{% if enable_redis %}
redis:
enabled: true
{% endif %}
{% if appservice_registrations is not none %}
## Application Services ##
# A list of application service config files to use.
app_service_config_files:
{%- for path in appservice_registrations %}
- "{{ path }}"
{%- endfor %}
{%- endif %}
{{ shared_worker_config }}
{{ shared_worker_config }}

View File

@@ -28,6 +28,17 @@ stderr_logfile_maxbytes=0
username=redis
autorestart=true
# Redis can be disabled if the image is being used without workers
autostart={{ enable_redis }}
[program:synapse_main]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
priority=10
# Log startup failures to supervisord's stdout/err
# Regular synapse logs will still go in the configured data directory
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=unexpected
exitcodes=0
# Additional process blocks
{{ worker_config }}

View File

@@ -1,30 +0,0 @@
[program:synapse_main]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
priority=10
# Log startup failures to supervisord's stdout/err
# Regular synapse logs will still go in the configured data directory
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=unexpected
exitcodes=0
{% for worker in workers %}
[program:synapse_{{ worker.name }}]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {{ worker.app }}
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
--config-path=/conf/workers/{{ worker.name }}.yaml
autorestart=unexpected
priority=500
exitcodes=0
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
{% endfor %}

View File

@@ -21,11 +21,6 @@
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
# below. Leave empty for no workers, or set to '*' for all possible workers.
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
# * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
# Nginx will be configured to serve TLS on port 8448.
#
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
# in the project's README), this script may be run multiple times, and functionality should
@@ -34,11 +29,10 @@
import os
import subprocess
import sys
from pathlib import Path
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
import jinja2
import yaml
from jinja2 import Environment, FileSystemLoader
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
@@ -52,12 +46,12 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "",
},
"user_dir": {
"app": "synapse.app.generic_worker",
"app": "synapse.app.user_dir",
"listener_resources": ["client"],
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
],
"shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
"shared_extra_conf": {"update_user_directory": False},
"worker_extra_conf": "",
},
"media_repository": {
@@ -78,7 +72,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
"shared_extra_conf": {"notify_appservices_from_worker": "appservice"},
"worker_extra_conf": "",
},
"federation_sender": {
@@ -158,7 +152,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
"^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -176,6 +169,21 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
}
# Templates for sections that may be inserted multiple times in config files
SUPERVISORD_PROCESS_CONFIG_BLOCK = """
[program:synapse_{name}]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {app} \
--config-path="{config_path}" \
--config-path=/conf/workers/shared.yaml \
--config-path=/conf/workers/{name}.yaml
autorestart=unexpected
priority=500
exitcodes=0
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
"""
NGINX_LOCATION_CONFIG_BLOCK = """
location ~* {endpoint} {{
proxy_pass {upstream};
@@ -221,13 +229,12 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
template_vars: The arguments to replace placeholder variables in the template with.
"""
# Read the template file
# We disable autoescape to prevent template variables from being escaped,
# as we're not using HTML.
env = Environment(loader=FileSystemLoader(os.path.dirname(src)), autoescape=False)
template = env.get_template(os.path.basename(src))
with open(src) as infile:
template = infile.read()
# Generate a string from the template.
rendered = template.render(**template_vars)
# Generate a string from the template. We disable autoescape to prevent template
# variables from being escaped.
rendered = jinja2.Template(template, autoescape=False).render(**template_vars)
# Write the generated contents to a file
#
@@ -338,10 +345,13 @@ def generate_worker_files(
# This config file will be passed to all workers, included Synapse's main process.
shared_config: Dict[str, Any] = {"listeners": listeners}
# List of dicts that describe workers.
# We pass this to the Supervisor template later to generate the appropriate
# program blocks.
worker_descriptors: List[Dict[str, Any]] = []
# The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template.
#
# Supervisord will be in charge of running everything, from redis to nginx to Synapse
# and all of its worker processes. Load the config template, which defines a few
# services that are necessary to run.
supervisord_config = ""
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
# ports of each worker. For example:
@@ -361,8 +371,8 @@ def generate_worker_files(
nginx_locations = {}
# Read the desired worker configuration from the environment
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
if not worker_types_env:
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES")
if worker_types_env is None:
# No workers, just the main process
worker_types = []
else:
@@ -419,7 +429,7 @@ def generate_worker_files(
)
# Enable the worker in supervisord
worker_descriptors.append(worker_config)
supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config)
# Add nginx location blocks for this worker's endpoints (if any are defined)
for pattern in worker_config["endpoint_patterns"]:
@@ -478,27 +488,11 @@ def generate_worker_files(
master_log_config = generate_worker_log_config(environ, "master", data_dir)
shared_config["log_config"] = master_log_config
# Find application service registrations
appservice_registrations = None
appservice_registration_dir = os.environ.get("SYNAPSE_AS_REGISTRATION_DIR")
if appservice_registration_dir:
# Scan for all YAML files that should be application service registrations.
appservice_registrations = [
str(reg_path.resolve())
for reg_path in Path(appservice_registration_dir).iterdir()
if reg_path.suffix.lower() in (".yaml", ".yml")
]
workers_in_use = len(worker_types) > 0
# Shared homeserver config
convert(
"/conf/shared.yaml.j2",
"/conf/workers/shared.yaml",
shared_worker_config=yaml.dump(shared_config),
appservice_registrations=appservice_registrations,
enable_redis=workers_in_use,
workers_in_use=workers_in_use,
)
# Nginx config
@@ -507,8 +501,6 @@ def generate_worker_files(
"/etc/nginx/conf.d/matrix-synapse.conf",
worker_locations=nginx_location_config,
upstream_directives=nginx_upstream_config,
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
)
# Supervisord config
@@ -517,14 +509,7 @@ def generate_worker_files(
"/conf/supervisord.conf.j2",
"/etc/supervisor/supervisord.conf",
main_config_path=config_path,
enable_redis=workers_in_use,
)
convert(
"/conf/synapse.supervisord.conf.j2",
"/etc/supervisor/conf.d/synapse.conf",
workers=worker_descriptors,
main_config_path=config_path,
worker_config=supervisord_config,
)
# healthcheck config

View File

@@ -1,12 +1,26 @@
# This file is maintained as an up-to-date snapshot of the default
# homeserver.yaml configuration generated by Synapse. You can find a
# complete accounting of possible configuration options at
# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html
# homeserver.yaml configuration generated by Synapse.
#
# It is intended to act as a reference for the default configuration,
# helping admins keep track of new options and other changes, and compare
# their configs with the current default. As such, many of the actual
# config values shown are placeholders.
#
# It is *not* intended to be copied and used as the basis for a real
# homeserver.yaml. Instead, if you are starting from scratch, please generate
# a fresh config using Synapse by following the instructions in
# https://matrix-org.github.io/synapse/latest/setup/installation.html.
#
# Configuration options that take a time period can be set using a number
# followed by a letter. Letters have the following meanings:
# s = second
# m = minute
# h = hour
# d = day
# w = week
# y = year
# For example, setting redaction_retention_period: 5m would remove redacted
# messages from the database after 5 minutes, rather than 5 months.
################################################################################

View File

@@ -55,6 +55,7 @@
- [Admin API](usage/administration/admin_api/README.md)
- [Account Validity](admin_api/account_validity.md)
- [Background Updates](usage/administration/admin_api/background_updates.md)
- [Delete Group](admin_api/delete_group.md)
- [Event Reports](admin_api/event_reports.md)
- [Media](admin_api/media_admin_api.md)
- [Purge History](admin_api/purge_history_api.md)
@@ -87,9 +88,7 @@
- [OpenTracing](opentracing.md)
- [Database Schemas](development/database_schema.md)
- [Experimental features](development/experimental_features.md)
- [Dependency management](development/dependencies.md)
- [Synapse Architecture]()
- [Cancellation](development/synapse_architecture/cancellation.md)
- [Log Contexts](log_contexts.md)
- [Replication](replication.md)
- [TCP Replication](tcp_replication.md)

View File

@@ -0,0 +1,14 @@
# Delete a local group
This API lets a server admin delete a local group. Doing so will kick all
users out of the group so that their clients will correctly handle the group
being deleted.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is:
```
POST /_synapse/admin/v1/delete_group/<group_id>
```

View File

@@ -115,9 +115,7 @@ URL parameters:
Body parameters:
- `password` - string, optional. If provided, the user's password is updated and all
devices are logged out, unless `logout_devices` is set to `false`.
- `logout_devices` - bool, optional, defaults to `true`. If set to false, devices aren't
logged out even when `password` is provided.
devices are logged out.
- `displayname` - string, optional, defaults to the value of `user_id`.
- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn)
- `medium` - string. Kind of third-party ID, either `email` or `msisdn`.

View File

@@ -206,32 +206,7 @@ This means that we need to run our unit tests against PostgreSQL too. Our CI doe
this automatically for pull requests and release candidates, but it's sometimes
useful to reproduce this locally.
#### Using Docker
The easiest way to do so is to run Postgres via a docker container. In one
terminal:
```shell
docker run --rm -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgress -p 5432:5432 postgres:14
```
If you see an error like
```
docker: Error response from daemon: driver failed programming external connectivity on endpoint nice_ride (b57bbe2e251b70015518d00c9981e8cb8346b5c785250341a6c53e3c899875f1): Error starting userland proxy: listen tcp4 0.0.0.0:5432: bind: address already in use.
```
then something is already bound to port 5432. You're probably already running postgres locally.
Once you have a postgres server running, invoke `trial` in a second terminal:
```shell
SYNAPSE_POSTGRES=1 SYNAPSE_POSTGRES_HOST=127.0.0.1 SYNAPSE_POSTGRES_USER=postgres SYNAPSE_POSTGRES_PASSWORD=mysecretpassword poetry run trial tests
````
#### Using an existing Postgres installation
If you have postgres already installed on your system, you can run `trial` with the
To do so, [configure Postgres](../postgres.md) and run `trial` with the
following environment variables matching your configuration:
- `SYNAPSE_POSTGRES` to anything nonempty
@@ -254,8 +229,8 @@ You don't need to specify the host, user, port or password if your Postgres
server is set to authenticate you over the UNIX socket (i.e. if the `psql` command
works without further arguments).
Your Postgres account needs to be able to create databases; see the postgres
docs for [`ALTER ROLE`](https://www.postgresql.org/docs/current/sql-alterrole.html).
Your Postgres account needs to be able to create databases.
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
@@ -304,25 +279,6 @@ To run a specific test, you can specify the whole name structure:
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order
```
The above will run a monolithic (single-process) Synapse with SQLite as the database. For other configurations, try:
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
### Prettier formatting with `gotestfmt`
If you want to format the output of the tests the same way as it looks in CI,
install [gotestfmt](https://github.com/haveyoudebuggedit/gotestfmt).
You can then use this incantation to format the tests appropriately:
```sh
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -json | gotestfmt -hide successful-tests
```
(Remove `-hide successful-tests` if you don't want to hide successful tests.)
### Access database for homeserver after Complement test runs.
@@ -441,8 +397,8 @@ same lightweight approach that the Linux Kernel
[submitting patches process](
https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>),
[Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
projects use: the DCO ([Developer Certificate of Origin](http://developercertificate.org/)).
This is a simple declaration that you wrote
projects use: the DCO (Developer Certificate of Origin:
http://developercertificate.org/). This is a simple declaration that you wrote
the contribution or otherwise have the right to contribute it to Matrix:
```

View File

@@ -5,7 +5,7 @@
Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies).
The demo setup allows running three federation Synapse servers, with server
names `localhost:8480`, `localhost:8481`, and `localhost:8482`.
names `localhost:8080`, `localhost:8081`, and `localhost:8082`.
You can access them via any Matrix client over HTTP at `localhost:8080`,
`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`,
@@ -20,10 +20,9 @@ and the servers are configured in a highly insecure way, including:
The servers are configured to store their data under `demo/8080`, `demo/8081`, and
`demo/8082`. This includes configuration, logs, SQLite databases, and media.
Note that when joining a public room on a different homeserver via "#foo:bar.net",
then you are (in the current implementation) joining a room with room_id "foo".
This means that it won't work if your homeserver already has a room with that
name.
Note that when joining a public room on a different HS via "#foo:bar.net", then
you are (in the current impl) joining a room with room_id "foo". This means that
it won't work if your HS already has a room with that name.
## Using the demo scripts

View File

@@ -1,392 +0,0 @@
# Cancellation
Sometimes, requests take a long time to service and clients disconnect
before Synapse produces a response. To avoid wasting resources, Synapse
can cancel request processing for select endpoints marked with the
`@cancellable` decorator.
Synapse makes use of Twisted's `Deferred.cancel()` feature to make
cancellation work. The `@cancellable` decorator does nothing by itself
and merely acts as a flag, signalling to developers and other code alike
that a method can be cancelled.
## Enabling cancellation for an endpoint
1. Check that the endpoint method, and any `async` functions in its call
tree handle cancellation correctly. See
[Handling cancellation correctly](#handling-cancellation-correctly)
for a list of things to look out for.
2. Add the `@cancellable` decorator to the `on_GET/POST/PUT/DELETE`
method. It's not recommended to make non-`GET` methods cancellable,
since cancellation midway through some database updates is less
likely to be handled correctly.
## Mechanics
There are two stages to cancellation: downward propagation of a
`cancel()` call, followed by upwards propagation of a `CancelledError`
out of a blocked `await`.
Both Twisted and asyncio have a cancellation mechanism.
| | Method | Exception | Exception inherits from |
|---------------|---------------------|-----------------------------------------|-------------------------|
| Twisted | `Deferred.cancel()` | `twisted.internet.defer.CancelledError` | `Exception` (!) |
| asyncio | `Task.cancel()` | `asyncio.CancelledError` | `BaseException` |
### Deferred.cancel()
When Synapse starts handling a request, it runs the async method
responsible for handling it using `defer.ensureDeferred`, which returns
a `Deferred`. For example:
```python
def do_something() -> Deferred[None]:
...
@cancellable
async def on_GET() -> Tuple[int, JsonDict]:
d = make_deferred_yieldable(do_something())
await d
return 200, {}
request = defer.ensureDeferred(on_GET())
```
When a client disconnects early, Synapse checks for the presence of the
`@cancellable` decorator on `on_GET`. Since `on_GET` is cancellable,
`Deferred.cancel()` is called on the `Deferred` from
`defer.ensureDeferred`, ie. `request`. Twisted knows which `Deferred`
`request` is waiting on and passes the `cancel()` call on to `d`.
The `Deferred` being waited on, `d`, may have its own handling for
`cancel()` and pass the call on to other `Deferred`s.
Eventually, a `Deferred` handles the `cancel()` call by resolving itself
with a `CancelledError`.
### CancelledError
The `CancelledError` gets raised out of the `await` and bubbles up, as
per normal Python exception handling.
## Handling cancellation correctly
In general, when writing code that might be subject to cancellation, two
things must be considered:
* The effect of `CancelledError`s raised out of `await`s.
* The effect of `Deferred`s being `cancel()`ed.
Examples of code that handles cancellation incorrectly include:
* `try-except` blocks which swallow `CancelledError`s.
* Code that shares the same `Deferred`, which may be cancelled, between
multiple requests.
* Code that starts some processing that's exempt from cancellation, but
uses a logging context from cancellable code. The logging context
will be finished upon cancellation, while the uncancelled processing
is still using it.
Some common patterns are listed below in more detail.
### `async` function calls
Most functions in Synapse are relatively straightforward from a
cancellation standpoint: they don't do anything with `Deferred`s and
purely call and `await` other `async` functions.
An `async` function handles cancellation correctly if its own code
handles cancellation correctly and all the async function it calls
handle cancellation correctly. For example:
```python
async def do_two_things() -> None:
check_something()
await do_something()
await do_something_else()
```
`do_two_things` handles cancellation correctly if `do_something` and
`do_something_else` handle cancellation correctly.
That is, when checking whether a function handles cancellation
correctly, its implementation and all its `async` function calls need to
be checked, recursively.
As `check_something` is not `async`, it does not need to be checked.
### CancelledErrors
Because Twisted's `CancelledError`s are `Exception`s, it's easy to
accidentally catch and suppress them. Care must be taken to ensure that
`CancelledError`s are allowed to propagate upwards.
<table width="100%">
<tr>
<td width="50%" valign="top">
**Bad**:
```python
try:
await do_something()
except Exception:
# `CancelledError` gets swallowed here.
logger.info(...)
```
</td>
<td width="50%" valign="top">
**Good**:
```python
try:
await do_something()
except CancelledError:
raise
except Exception:
logger.info(...)
```
</td>
</tr>
<tr>
<td width="50%" valign="top">
**OK**:
```python
try:
check_something()
# A `CancelledError` won't ever be raised here.
except Exception:
logger.info(...)
```
</td>
<td width="50%" valign="top">
**Good**:
```python
try:
await do_something()
except ValueError:
logger.info(...)
```
</td>
</tr>
</table>
#### defer.gatherResults
`defer.gatherResults` produces a `Deferred` which:
* broadcasts `cancel()` calls to every `Deferred` being waited on.
* wraps the first exception it sees in a `FirstError`.
Together, this means that `CancelledError`s will be wrapped in
a `FirstError` unless unwrapped. Such `FirstError`s are liable to be
swallowed, so they must be unwrapped.
<table width="100%">
<tr>
<td width="50%" valign="top">
**Bad**:
```python
async def do_something() -> None:
await make_deferred_yieldable(
defer.gatherResults([...], consumeErrors=True)
)
try:
await do_something()
except CancelledError:
raise
except Exception:
# `FirstError(CancelledError)` gets swallowed here.
logger.info(...)
```
</td>
<td width="50%" valign="top">
**Good**:
```python
async def do_something() -> None:
await make_deferred_yieldable(
defer.gatherResults([...], consumeErrors=True)
).addErrback(unwrapFirstError)
try:
await do_something()
except CancelledError:
raise
except Exception:
logger.info(...)
```
</td>
</tr>
</table>
### Creation of `Deferred`s
If a function creates a `Deferred`, the effect of cancelling it must be considered. `Deferred`s that get shared are likely to have unintended behaviour when cancelled.
<table width="100%">
<tr>
<td width="50%" valign="top">
**Bad**:
```python
cache: Dict[str, Deferred[None]] = {}
def wait_for_room(room_id: str) -> Deferred[None]:
deferred = cache.get(room_id)
if deferred is None:
deferred = Deferred()
cache[room_id] = deferred
# `deferred` can have multiple waiters.
# All of them will observe a `CancelledError`
# if any one of them is cancelled.
return make_deferred_yieldable(deferred)
# Request 1
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
# Request 2
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
```
</td>
<td width="50%" valign="top">
**Good**:
```python
cache: Dict[str, Deferred[None]] = {}
def wait_for_room(room_id: str) -> Deferred[None]:
deferred = cache.get(room_id)
if deferred is None:
deferred = Deferred()
cache[room_id] = deferred
# `deferred` will never be cancelled now.
# A `CancelledError` will still come out of
# the `await`.
# `delay_cancellation` may also be used.
return make_deferred_yieldable(stop_cancellation(deferred))
# Request 1
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
# Request 2
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
```
</td>
</tr>
<tr>
<td width="50%" valign="top">
</td>
<td width="50%" valign="top">
**Good**:
```python
cache: Dict[str, List[Deferred[None]]] = {}
def wait_for_room(room_id: str) -> Deferred[None]:
if room_id not in cache:
cache[room_id] = []
# Each request gets its own `Deferred` to wait on.
deferred = Deferred()
cache[room_id]].append(deferred)
return make_deferred_yieldable(deferred)
# Request 1
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
# Request 2
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
```
</td>
</table>
### Uncancelled processing
Some `async` functions may kick off some `async` processing which is
intentionally protected from cancellation, by `stop_cancellation` or
other means. If the `async` processing inherits the logcontext of the
request which initiated it, care must be taken to ensure that the
logcontext is not finished before the `async` processing completes.
<table width="100%">
<tr>
<td width="50%" valign="top">
**Bad**:
```python
cache: Optional[ObservableDeferred[None]] = None
async def do_something_else(
to_resolve: Deferred[None]
) -> None:
await ...
logger.info("done!")
to_resolve.callback(None)
async def do_something() -> None:
if not cache:
to_resolve = Deferred()
cache = ObservableDeferred(to_resolve)
# `do_something_else` will never be cancelled and
# can outlive the `request-1` logging context.
run_in_background(do_something_else, to_resolve)
await make_deferred_yieldable(cache.observe())
with LoggingContext("request-1"):
await do_something()
```
</td>
<td width="50%" valign="top">
**Good**:
```python
cache: Optional[ObservableDeferred[None]] = None
async def do_something_else(
to_resolve: Deferred[None]
) -> None:
await ...
logger.info("done!")
to_resolve.callback(None)
async def do_something() -> None:
if not cache:
to_resolve = Deferred()
cache = ObservableDeferred(to_resolve)
run_in_background(do_something_else, to_resolve)
# We'll wait until `do_something_else` is
# done before raising a `CancelledError`.
await make_deferred_yieldable(
delay_cancellation(cache.observe())
)
else:
await make_deferred_yieldable(cache.observe())
with LoggingContext("request-1"):
await do_something()
```
</td>
</tr>
<tr>
<td width="50%">
**OK**:
```python
cache: Optional[ObservableDeferred[None]] = None
async def do_something_else(
to_resolve: Deferred[None]
) -> None:
await ...
logger.info("done!")
to_resolve.callback(None)
async def do_something() -> None:
if not cache:
to_resolve = Deferred()
cache = ObservableDeferred(to_resolve)
# `do_something_else` will get its own independent
# logging context. `request-1` will not count any
# metrics from `do_something_else`.
run_as_background_process(
"do_something_else",
do_something_else,
to_resolve,
)
await make_deferred_yieldable(cache.observe())
with LoggingContext("request-1"):
await do_something()
```
</td>
<td width="50%">
</td>
</tr>
</table>

View File

@@ -37,19 +37,19 @@ As with other login types, there are additional fields (e.g. `device_id` and
## Preparing Synapse
The JSON Web Token integration in Synapse uses the
[`Authlib`](https://docs.authlib.org/en/latest/index.html) library, which must be installed
[`PyJWT`](https://pypi.org/project/pyjwt/) library, which must be installed
as follows:
* The relevant libraries are included in the Docker images and Debian packages
provided by `matrix.org` so no further action is needed.
* The relevant libraries are included in the Docker images and Debian packages
provided by `matrix.org` so no further action is needed.
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
install synapse[jwt]` to install the necessary dependencies.
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
install synapse[pyjwt]` to install the necessary dependencies.
* For other installation mechanisms, see the documentation provided by the
maintainer.
* For other installation mechanisms, see the documentation provided by the
maintainer.
To enable the JSON web token integration, you should then add a `jwt_config` section
To enable the JSON web token integration, you should then add an `jwt_config` section
to your configuration file (or uncomment the `enabled: true` line in the
existing section). See [sample_config.yaml](./sample_config.yaml) for some
sample settings.
@@ -57,7 +57,7 @@ sample settings.
## How to test JWT as a developer
Although JSON Web Tokens are typically generated from an external server, the
example below uses a locally generated JWT.
examples below use [PyJWT](https://pyjwt.readthedocs.io/en/latest/) directly.
1. Configure Synapse with JWT logins, note that this example uses a pre-shared
secret and an algorithm of HS256:
@@ -70,21 +70,10 @@ example below uses a locally generated JWT.
```
2. Generate a JSON web token:
You can use the following short Python snippet to generate a JWT
protected by an HMAC.
Take care that the `secret` and the algorithm given in the `header` match
the entries from `jwt_config` above.
```python
from authlib.jose import jwt
header = {"alg": "HS256"}
payload = {"sub": "user1", "aud": ["audience"]}
secret = "my-secret-token"
result = jwt.encode(header, payload, secret)
print(result.decode("ascii"))
```bash
$ pyjwt --key=my-secret-token --alg=HS256 encode sub=test-user
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0LXVzZXIifQ.Ag71GT8v01UO3w80aqRPTeuVPBIBZkYhNTJJ-_-zQIc
```
3. Query for the login types and ensure `org.matrix.login.jwt` is there:
```bash

View File

@@ -117,7 +117,7 @@ In this example, we define three jobs:
Note that this example is tailored to show different configurations and
features slightly more jobs than it's probably necessary (in practice, a
server admin would probably consider it better to replace the two last
jobs with one that runs once a day and handles rooms which
jobs with one that runs once a day and handles rooms which which
policy's `max_lifetime` is greater than 3 days).
Keep in mind, when configuring these jobs, that a purge job can become

Some files were not shown because too many files have changed in this diff Show More