Compare commits

..

6 Commits

Author SHA1 Message Date
Travis Ralston
e70810934f Fix group server for older synapse 2020-08-24 11:48:50 -06:00
Travis Ralston
b8a05b5a3f Add rudimentary API for promoting/demoting other people in a group
For https://github.com/matrix-org/synapse/issues/2855 (initial)
2020-08-21 19:35:36 -06:00
Ben Banfield-Zanin
2e9f389fd2 Merge remote-tracking branch 'origin/release-v1.15.1' into bbz/info-mainline-1.15 2020-06-25 08:33:23 +01:00
Ben Banfield-Zanin
80c66c4bca Merge remote-tracking branch 'origin/babolivier/info_mainline' into bbz/info-mainline2 2020-04-30 08:24:15 +01:00
Brendan Abolivier
09cb7dec5f Improve error message 2020-04-23 17:22:47 +02:00
Brendan Abolivier
456fa172cf Implement use of internal-info 2020-04-23 13:22:02 +02:00
1036 changed files with 31877 additions and 68361 deletions

View File

@@ -15,7 +15,6 @@
# limitations under the License.
import logging
from synapse.storage.engines import create_engine
logger = logging.getLogger("create_postgres_db")

View File

@@ -6,6 +6,8 @@
set -ex
apt-get update
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev tox
export LANG="C.UTF-8"
exec tox -e py35-old,combine

Binary file not shown.

View File

@@ -1,10 +1,41 @@
# This file serves as a blacklist for SyTest tests that we expect will fail in
# Synapse when run under worker mode. For more details, see sytest-blacklist.
Message history can be paginated
Can re-join room if re-invited
The only membership state included in an initial sync is for all the senders in the timeline
Local device key changes get to remote servers
If remote user leaves room we no longer receive device updates
Forgotten room messages cannot be paginated
Inbound federation can get public room list
Members from the gap are included in gappy incr LL sync
Leaves are present in non-gapped incremental syncs
Old leaves are present in gapped incremental syncs
User sees updates to presence from other users in the incremental sync.
Gapped incremental syncs include all state changes
Old members are included in gappy incr LL sync if they start speaking
# new failures as of https://github.com/matrix-org/sytest/pull/732
Device list doesn't change if remote server is down
Remote servers cannot set power levels in rooms without existing powerlevels
Remote servers should reject attempts by non-creators to set the power levels
# https://buildkite.com/matrix-dot-org/synapse/builds/6134#6f67bf47-e234-474d-80e8-c6e1868b15c5
Server correctly handles incoming m.device_list_update
# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
Can get rooms/{roomId}/members at a given point

View File

@@ -1,35 +1,24 @@
version: 2.1
version: 2
jobs:
dockerhubuploadrelease:
docker:
- image: docker:git
machine: true
steps:
- checkout
- docker_prepare
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
# for release builds, we want to get the amd64 image out asap, so first
# we do an amd64-only build, before following up with a multiarch build.
- docker_build:
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
platforms: linux/amd64
- docker_build:
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
platforms: linux/amd64,linux/arm/v7,linux/arm64
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
dockerhubuploadlatest:
docker:
- image: docker:git
machine: true
steps:
- checkout
- docker_prepare
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
# for `latest`, we don't want the arm images to disappear, so don't update the tag
# until all of the platforms are built.
- docker_build:
tag: -t matrixdotorg/synapse:latest
platforms: linux/amd64,linux/arm/v7,linux/arm64
- run: docker push matrixdotorg/synapse:latest
- run: docker push matrixdotorg/synapse:latest-py3
workflows:
version: 2
build:
jobs:
- dockerhubuploadrelease:
@@ -42,37 +31,3 @@ workflows:
filters:
branches:
only: master
commands:
docker_prepare:
description: Sets up a remote docker server, downloads the buildx cli plugin, and enables multiarch images
parameters:
buildx_version:
type: string
default: "v0.4.1"
steps:
- setup_remote_docker:
# 19.03.13 was the most recent available on circleci at the time of
# writing.
version: 19.03.13
- run: apk add --no-cache curl
- run: mkdir -vp ~/.docker/cli-plugins/ ~/dockercache
- run: curl --silent -L "https://github.com/docker/buildx/releases/download/<< parameters.buildx_version >>/buildx-<< parameters.buildx_version >>.linux-amd64" > ~/.docker/cli-plugins/docker-buildx
- run: chmod a+x ~/.docker/cli-plugins/docker-buildx
# install qemu links in /proc/sys/fs/binfmt_misc on the docker instance running the circleci job
- run: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
# create a context named `builder` for the builds
- run: docker context create builder
# create a buildx builder using the new context, and set it as the default
- run: docker buildx create builder --use
docker_build:
description: Builds and pushed images to dockerhub using buildx
parameters:
platforms:
type: string
default: linux/amd64
tag:
type: string
steps:
- run: docker buildx build -f docker/Dockerfile --push --platform << parameters.platforms >> --label gitsha1=${CIRCLE_SHA1} << parameters.tag >> --progress=plain .

View File

@@ -4,12 +4,12 @@ about: Create a report to help us improve
---
<!--
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**,
please ask in **#synapse:matrix.org** (using a matrix.org account if necessary)
<!--
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report template. By following the instructions below and

3
.gitignore vendored
View File

@@ -12,18 +12,15 @@
_trial_temp/
_trial_temp*/
/out
.DS_Store
# stuff that is likely to exist when you run a server locally
/*.db
/*.log
/*.log.*
/*.log.config
/*.pid
/.python-version
/*.signing.key
/env/
/.venv*/
/homeserver*.yaml
/logs
/media_store/

1691
CHANGES.md

File diff suppressed because it is too large Load Diff

View File

@@ -17,9 +17,9 @@ https://help.github.com/articles/using-pull-requests/) to ask us to pull your
changes into our repo.
Some other points to follow:
* Please base your changes on the `develop` branch.
* Please follow the [code style requirements](#code-style).
* Please include a [changelog entry](#changelog) with each PR.
@@ -46,7 +46,7 @@ locally. You'll need python 3.6 or later, and to install a number of tools:
```
# Install the dependencies
pip install -e ".[lint,mypy]"
pip install -U black flake8 flake8-comprehensions isort
# Run the linter script
./scripts-dev/lint.sh
@@ -63,10 +63,6 @@ run-time:
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
```
You can also provide the `-d` option, which will lint the files that have been
changed since the last git commit. This will often be significantly faster than
linting the whole codebase.
Before pushing new changes, ensure they don't produce linting errors. Commit any
files that were corrected.
@@ -156,24 +152,6 @@ directory, you will need both a regular newsfragment *and* an entry in the
debian changelog. (Though typically such changes should be submitted as two
separate pull requests.)
## Documentation
There is a growing amount of documentation located in the [docs](docs)
directory. This documentation is intended primarily for sysadmins running their
own Synapse instance, as well as developers interacting externally with
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
regarding Synapse's Admin API, which is used mostly by sysadmins and external
service developers.
New files added to both folders should be written in [Github-Flavoured
Markdown](https://guides.github.com/features/mastering-markdown/), and attempts
should be made to migrate existing documents to markdown where possible.
Some documentation also exists in [Synapse's Github
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
contributed to by community authors.
## Sign off
In order to have a concrete record that your contribution is intentional

View File

@@ -1,44 +1,17 @@
# Installation Instructions
- [Choosing your server name](#choosing-your-server-name)
- [Installing Synapse](#installing-synapse)
- [Installing from source](#installing-from-source)
- [Platform-Specific Instructions](#platform-specific-instructions)
- [Prebuilt packages](#prebuilt-packages)
- [Setting up Synapse](#setting-up-synapse)
- [TLS certificates](#tls-certificates)
- [Email](#email)
- [Registering a user](#registering-a-user)
- [Setting up a TURN server](#setting-up-a-turn-server)
- [URL previews](#url-previews)
- [Troubleshooting Installation](#troubleshooting-installation)
There are 3 steps to follow under **Installation Instructions**.
- [Installation Instructions](#installation-instructions)
- [Choosing your server name](#choosing-your-server-name)
- [Installing Synapse](#installing-synapse)
- [Installing from source](#installing-from-source)
- [Platform-Specific Instructions](#platform-specific-instructions)
- [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
- [ArchLinux](#archlinux)
- [CentOS/Fedora](#centosfedora)
- [macOS](#macos)
- [OpenSUSE](#opensuse)
- [OpenBSD](#openbsd)
- [Windows](#windows)
- [Prebuilt packages](#prebuilt-packages)
- [Docker images and Ansible playbooks](#docker-images-and-ansible-playbooks)
- [Debian/Ubuntu](#debianubuntu)
- [Matrix.org packages](#matrixorg-packages)
- [Downstream Debian packages](#downstream-debian-packages)
- [Downstream Ubuntu packages](#downstream-ubuntu-packages)
- [Fedora](#fedora)
- [OpenSUSE](#opensuse-1)
- [SUSE Linux Enterprise Server](#suse-linux-enterprise-server)
- [ArchLinux](#archlinux-1)
- [Void Linux](#void-linux)
- [FreeBSD](#freebsd)
- [OpenBSD](#openbsd-1)
- [NixOS](#nixos)
- [Setting up Synapse](#setting-up-synapse)
- [Using PostgreSQL](#using-postgresql)
- [TLS certificates](#tls-certificates)
- [Client Well-Known URI](#client-well-known-uri)
- [Email](#email)
- [Registering a user](#registering-a-user)
- [Setting up a TURN server](#setting-up-a-turn-server)
- [URL previews](#url-previews)
- [Troubleshooting Installation](#troubleshooting-installation)
## Choosing your server name
# Choosing your server name
It is important to choose the name for your server before you install Synapse,
because it cannot be changed later.
@@ -54,16 +27,16 @@ that your email address is probably `user@example.com` rather than
`user@email.example.com`) - but doing so may require more advanced setup: see
[Setting up Federation](docs/federate.md).
## Installing Synapse
# Installing Synapse
### Installing from source
## Installing from source
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
- Python 3.5.2 or later, up to Python 3.9.
- Python 3.5.2 or later, up to Python 3.8.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
Synapse is written in Python but some of the libraries it uses are written in
@@ -74,7 +47,7 @@ these on various platforms.
To install the Synapse homeserver run:
```sh
```
mkdir -p ~/synapse
virtualenv -p python3 ~/synapse/env
source ~/synapse/env/bin/activate
@@ -91,7 +64,7 @@ prefer.
This Synapse installation can then be later upgraded by using pip again with the
update flag:
```sh
```
source ~/synapse/env/bin/activate
pip install -U matrix-synapse
```
@@ -99,7 +72,7 @@ pip install -U matrix-synapse
Before you can start Synapse, you will need to generate a configuration
file. To do this, run (in your virtualenv, as before):
```sh
```
cd ~/synapse
python -m synapse.app.homeserver \
--server-name my.domain.name \
@@ -117,43 +90,45 @@ wise to back them up somewhere safe. (If, for whatever reason, you do need to
change your homeserver's keys, you may find that other homeserver have the
old key cached. If you update the signing key, you should change the name of the
key in the `<server name>.signing.key` file (the second word) to something
different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
different. See the
[spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys)
for more information on key management).
To actually run your new homeserver, pick a working directory for Synapse to
run (e.g. `~/synapse`), and:
```sh
```
cd ~/synapse
source env/bin/activate
synctl start
```
#### Platform-Specific Instructions
### Platform-Specific Instructions
##### Debian/Ubuntu/Raspbian
#### Debian/Ubuntu/Raspbian
Installing prerequisites on Ubuntu or Debian:
```sh
sudo apt install build-essential python3-dev libffi-dev \
```
sudo apt-get install build-essential python3-dev libffi-dev \
python3-pip python3-setuptools sqlite3 \
libssl-dev virtualenv libjpeg-dev libxslt1-dev
```
##### ArchLinux
#### ArchLinux
Installing prerequisites on ArchLinux:
```sh
```
sudo pacman -S base-devel python python-pip \
python-setuptools python-virtualenv sqlite3
```
##### CentOS/Fedora
#### CentOS/Fedora
Installing prerequisites on CentOS 8 or Fedora>26:
```sh
```
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
libwebp-devel tk-devel redhat-rpm-config \
python3-virtualenv libffi-devel openssl-devel
@@ -162,7 +137,7 @@ sudo dnf groupinstall "Development Tools"
Installing prerequisites on CentOS 7 or Fedora<=25:
```sh
```
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
python3-virtualenv libffi-devel openssl-devel
@@ -174,11 +149,11 @@ uses SQLite 3.7. You may be able to work around this by installing a more
recent SQLite version, but it is recommended that you instead use a Postgres
database: see [docs/postgres.md](docs/postgres.md).
##### macOS
#### macOS
Installing prerequisites on macOS:
```sh
```
xcode-select --install
sudo easy_install pip
sudo pip install virtualenv
@@ -188,23 +163,22 @@ brew install pkg-config libffi
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
via brew and inform `pip` about it so that `psycopg2` builds:
```sh
```
brew install openssl@1.1
export LDFLAGS="-L/usr/local/opt/openssl/lib"
export CPPFLAGS="-I/usr/local/opt/openssl/include"
export LDFLAGS=-L/usr/local/Cellar/openssl\@1.1/1.1.1d/lib/
```
##### OpenSUSE
#### OpenSUSE
Installing prerequisites on openSUSE:
```sh
```
sudo zypper in -t pattern devel_basis
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
python-devel libffi-devel libopenssl-devel libjpeg62-devel
```
##### OpenBSD
#### OpenBSD
A port of Synapse is available under `net/synapse`. The filesystem
underlying the homeserver directory (defaults to `/var/synapse`) has to be
@@ -218,72 +192,72 @@ mounted with `wxallowed` (cf. `mount(8)`).
Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
default OpenBSD installation is mounted with `wxallowed`):
```sh
```
doas mkdir /usr/local/pobj_wxallowed
```
Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
configured in `/etc/mk.conf`:
```sh
```
doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
```
Setting the `WRKOBJDIR` for building python:
```sh
```
echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
```
Building Synapse:
```sh
```
cd /usr/ports/net/synapse
make install
```
##### Windows
#### Windows
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
Linux provides a Linux environment on Windows 10 which is capable of using the
Debian, Fedora, or source installation methods. More information about WSL can
be found at <https://docs.microsoft.com/en-us/windows/wsl/install-win10> for
Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
for Windows Server.
### Prebuilt packages
## Prebuilt packages
As an alternative to installing from source, prebuilt packages are available
for a number of platforms.
#### Docker images and Ansible playbooks
### Docker images and Ansible playbooks
There is an official synapse image available at
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
the docker-compose file available at [contrib/docker](contrib/docker). Further
information on this including configuration options is available in the README
on hub.docker.com.
There is an offical synapse image available at
https://hub.docker.com/r/matrixdotorg/synapse which can be used with
the docker-compose file available at [contrib/docker](contrib/docker). Further information on
this including configuration options is available in the README on
hub.docker.com.
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
Dockerfile to automate a synapse server in a single Docker image, at
<https://hub.docker.com/r/avhost/docker-matrix/tags/>
https://hub.docker.com/r/avhost/docker-matrix/tags/
Slavi Pantaleev has created an Ansible playbook,
which installs the offical Docker image of Matrix Synapse
along with many other Matrix-related services (Postgres database, Element, coturn,
ma1sd, SSL support, etc.).
along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
For more details, see
<https://github.com/spantaleev/matrix-docker-ansible-deploy>
https://github.com/spantaleev/matrix-docker-ansible-deploy
#### Debian/Ubuntu
##### Matrix.org packages
### Debian/Ubuntu
#### Matrix.org packages
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
Synapse via <https://packages.matrix.org/debian/>. They are available for Debian
Synapse via https://packages.matrix.org/debian/. They are available for Debian
9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
```sh
```
sudo apt install -y lsb-release wget apt-transport-https
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
@@ -303,61 +277,56 @@ The fingerprint of the repository signing key (as shown by `gpg
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
##### Downstream Debian packages
#### Downstream Debian/Ubuntu packages
We do not recommend using the packages from the default Debian `buster`
repository at this time, as they are old and suffer from known security
vulnerabilities. You can install the latest version of Synapse from
[our repository](#matrixorg-packages) or from `buster-backports`. Please
see the [Debian documentation](https://backports.debian.org/Instructions/)
for information on how to use backports.
For `buster` and `sid`, Synapse is available in the Debian repositories and
it should be possible to install it with simply:
If you are using Debian `sid` or testing, Synapse is available in the default
repositories and it should be possible to install it simply with:
```sh
```
sudo apt install matrix-synapse
```
##### Downstream Ubuntu packages
There is also a version of `matrix-synapse` in `stretch-backports`. Please see
the [Debian documentation on
backports](https://backports.debian.org/Instructions/) for information on how
to use them.
We do not recommend using the packages in the default Ubuntu repository
at this time, as they are old and suffer from known security vulnerabilities.
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
We do not recommend using the packages in downstream Ubuntu at this time, as
they are old and suffer from known security vulnerabilities.
#### Fedora
### Fedora
Synapse is in the Fedora repositories as `matrix-synapse`:
```sh
```
sudo dnf install matrix-synapse
```
Oleg Girko provides Fedora RPMs at
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
https://obs.infoserver.lv/project/monitor/matrix-synapse
#### OpenSUSE
### OpenSUSE
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
```sh
```
sudo zypper install matrix-synapse
```
#### SUSE Linux Enterprise Server
### SUSE Linux Enterprise Server
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
#### ArchLinux
### ArchLinux
The quickest way to get up and running with ArchLinux is probably with the community package
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
the necessary dependencies.
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
```sh
```
sudo pip install --upgrade pip
```
@@ -366,28 +335,28 @@ ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
compile it under the right architecture. (This should not be needed if
installing under virtualenv):
```sh
```
sudo pip uninstall py-bcrypt
sudo pip install py-bcrypt
```
#### Void Linux
### Void Linux
Synapse can be found in the void repositories as 'synapse':
```sh
```
xbps-install -Su
xbps-install -S synapse
```
#### FreeBSD
### FreeBSD
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
- Packages: `pkg install py37-matrix-synapse`
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
- Packages: `pkg install py37-matrix-synapse`
#### OpenBSD
### OpenBSD
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
underlying the homeserver directory (defaults to `/var/synapse`) has to be
@@ -396,35 +365,20 @@ and mounting it to `/var/synapse` should be taken into consideration.
Installing Synapse:
```sh
```
doas pkg_add synapse
```
#### NixOS
### NixOS
Robin Lambertz has packaged Synapse for NixOS at:
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix
## Setting up Synapse
# Setting up Synapse
Once you have installed synapse as above, you will need to configure it.
### Using PostgreSQL
By default Synapse uses [SQLite](https://sqlite.org/) and in doing so trades performance for convenience.
SQLite is only recommended in Synapse for testing purposes or for servers with
very light workloads.
Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org). Advantages include:
- significant performance improvements due to the superior threading and
caching model, smarter query optimiser
- allowing the DB to be run on separate hardware
For information on how to install and use PostgreSQL in Synapse, please see
[docs/postgres.md](docs/postgres.md)
### TLS certificates
## TLS certificates
The default configuration exposes a single HTTP port on the local
interface: `http://localhost:8008`. It is suitable for local testing,
@@ -438,24 +392,26 @@ The recommended way to do so is to set up a reverse proxy on port
Alternatively, you can configure Synapse to expose an HTTPS port. To do
so, you will need to edit `homeserver.yaml`, as follows:
- First, under the `listeners` section, uncomment the configuration for the
* First, under the `listeners` section, uncomment the configuration for the
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
each line). The relevant lines are like this:
```yaml
- port: 8448
type: http
tls: true
resources:
- names: [client, federation]
```
- port: 8448
type: http
tls: true
resources:
- names: [client, federation]
```
- You will also need to uncomment the `tls_certificate_path` and
`tls_private_key_path` lines under the `TLS` section. You will need to manage
provisioning of these certificates yourself — Synapse had built-in ACME
support, but the ACMEv1 protocol Synapse implements is deprecated, not
allowed by LetsEncrypt for new sites, and will break for existing sites in
late 2020. See [ACME.md](docs/ACME.md).
* You will also need to uncomment the `tls_certificate_path` and
`tls_private_key_path` lines under the `TLS` section. You can either
point these settings at an existing certificate and key, or you can
enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
for having Synapse automatically provision and renew federation
certificates through ACME can be found at [ACME.md](docs/ACME.md).
Note that, as pointed out in that document, this feature will not
work with installs set up after November 2019.
If you are using your own certificate, be sure to use a `.pem` file that
includes the full certificate chain including any intermediate certificates
@@ -465,63 +421,8 @@ so, you will need to edit `homeserver.yaml`, as follows:
For a more detailed guide to configuring your server for federation, see
[federate.md](docs/federate.md).
### Client Well-Known URI
Setting up the client Well-Known URI is optional but if you set it up, it will
allow users to enter their full username (e.g. `@user:<server_name>`) into clients
which support well-known lookup to automatically configure the homeserver and
identity server URLs. This is useful so that users don't have to memorize or think
about the actual homeserver URL you are using.
The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
the following format.
```json
{
"m.homeserver": {
"base_url": "https://<matrix.example.com>"
}
}
```
It can optionally contain identity server information as well.
```json
{
"m.homeserver": {
"base_url": "https://<matrix.example.com>"
},
"m.identity_server": {
"base_url": "https://<identity.example.com>"
}
}
```
To work in browser based clients, the file must be served with the appropriate
Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
`Access-Control-Allow-Origin: *` which would allow all browser based clients to
view it.
In nginx this would be something like:
```nginx
location /.well-known/matrix/client {
return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
default_type application/json;
add_header Access-Control-Allow-Origin *;
}
```
You should also ensure the `public_baseurl` option in `homeserver.yaml` is set
correctly. `public_baseurl` should be set to the URL that clients will use to
connect to your server. This is the same URL you put for the `m.homeserver`
`base_url` above.
```yaml
public_baseurl: "https://<matrix.example.com>"
```
### Email
## Email
It is desirable for Synapse to have the capability to send email. This allows
Synapse to send password reset emails, send verifications when an email address
@@ -536,15 +437,15 @@ and `notif_from` fields filled out. You may also need to set `smtp_user`,
If email is not configured, password reset, registration and notifications via
email will be disabled.
### Registering a user
## Registering a user
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
The easiest way to create a new user is to do so from a client like [Riot](https://riot.im).
Alternatively you can do so from the command line if you have installed via pip.
This can be done as follows:
```sh
```
$ source ~/synapse/env/bin/activate
$ synctl start # if not already running
$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
@@ -562,12 +463,12 @@ value is generated by `--generate-config`), but it should be kept secret, as
anyone with knowledge of it can register users, including admin accounts,
on your server even if `enable_registration` is `false`.
### Setting up a TURN server
## Setting up a TURN server
For reliable VoIP calls to be routed via this homeserver, you MUST configure
a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
### URL previews
## URL previews
Synapse includes support for previewing URLs, which is disabled by default. To
turn it on you must enable the `url_preview_enabled: True` config parameter
@@ -577,18 +478,19 @@ This is critical from a security perspective to stop arbitrary Matrix users
spidering 'internal' URLs on your network. At the very least we recommend that
your loopback and RFC1918 IP addresses are blacklisted.
This also requires the optional `lxml` python dependency to be installed. This
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
means `apt-get install libxml2-dev`, or equivalent for your OS.
This also requires the optional `lxml` and `netaddr` python dependencies to be
installed. This in turn requires the `libxml2` library to be available - on
Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for
your OS.
### Troubleshooting Installation
# Troubleshooting Installation
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
happens, you will have to individually install the dependencies which are
failing, e.g.:
```sh
```
pip install twisted
```

View File

@@ -1,6 +1,10 @@
=========================================================
Synapse |support| |development| |license| |pypi| |python|
=========================================================
================
Synapse |shield|
================
.. |shield| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
:alt: (get support on #synapse:matrix.org)
:target: https://matrix.to/#/#synapse:matrix.org
.. contents::
@@ -41,7 +45,7 @@ which handle:
- Eventually-consistent cryptographically secure synchronisation of room
state across a global open network of federated servers and services
- Sending and receiving extensible messages in a room with (optional)
end-to-end encryption
end-to-end encryption[1]
- Inviting, joining, leaving, kicking, banning room members
- Managing user accounts (registration, login, logout)
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
@@ -78,6 +82,9 @@ at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
Thanks for using Matrix!
[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
Support
=======
@@ -108,11 +115,12 @@ Unless you are running a test instance of Synapse on your local machine, in
general, you will need to enable TLS support before you can successfully
connect from a client: see `<INSTALL.md#tls-certificates>`_.
An easy way to get started is to login or register via Element at
https://app.element.io/#/login or https://app.element.io/#/register respectively.
An easy way to get started is to login or register via Riot at
https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
You will need to change the server you are logging into from ``matrix.org``
and instead specify a Homeserver URL of ``https://<server_name>:8448``
(or just ``https://<server_name>`` if you are using a reverse proxy).
(Leave the identity server as the default - see `Identity servers`_.)
If you prefer to use another client, refer to our
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
@@ -129,7 +137,7 @@ it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
Once ``enable_registration`` is set to ``true``, it is possible to register a
user via a Matrix client.
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
Your new user name will be formed partly from the ``server_name``, and partly
from a localpart you specify when you create the account. Your name will take
@@ -175,6 +183,30 @@ versions of synapse.
.. _UPGRADE.rst: UPGRADE.rst
Using PostgreSQL
================
Synapse offers two database engines:
* `SQLite <https://sqlite.org/>`_
* `PostgreSQL <https://www.postgresql.org>`_
By default Synapse uses SQLite in and doing so trades performance for convenience.
SQLite is only recommended in Synapse for testing purposes or for servers with
light workloads.
Almost all installations should opt to use PostreSQL. Advantages include:
* significant performance improvements due to the superior threading and
caching model, smarter query optimiser
* allowing the DB to be run on separate hardware
* allowing basic active/backup high-availability with a "hot spare" synapse
pointing at the same DB master, as well as enabling DB replication in
synapse itself.
For information on how to install and use PostgreSQL, please see
`docs/postgres.md <docs/postgres.md>`_.
.. _reverse-proxy:
Using a reverse proxy with Synapse
@@ -183,7 +215,7 @@ Using a reverse proxy with Synapse
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_ or
`Caddy <https://caddyserver.com/docs/proxy>`_ or
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
@@ -223,9 +255,10 @@ email address.
Password reset
==============
Users can reset their password through their client. Alternatively, a server admin
can reset a users password using the `admin API <docs/admin_api/user_admin_api.rst#reset-password>`_
or by directly editing the database as shown below.
If a user has registered an email address to their account using an identity
server, they can request a password-reset token via clients such as Riot.
A manual password reset can be done via direct database access as follows.
First calculate the hash of the new password::
@@ -243,8 +276,6 @@ Then update the ``users`` table in the database::
Synapse Development
===================
Join our developer community on Matrix: `#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_
Before setting up a development environment for synapse, make sure you have the
system dependencies (such as the python header files) installed - see
`Installing from source <INSTALL.md#installing-from-source>`_.
@@ -258,48 +289,23 @@ directory of your choice::
Synapse has a number of external dependencies, that are easiest
to install using pip and a virtualenv::
python3 -m venv ./env
source ./env/bin/activate
pip install -e ".[all,test]"
virtualenv -p python3 env
source env/bin/activate
python -m pip install --no-use-pep517 -e ".[all]"
This will run a process of downloading and installing all the needed
dependencies into a virtual env. If any dependencies fail to install,
try installing the failing modules individually::
dependencies into a virtual env.
pip install -e "module-name"
Once this is done, you may wish to run Synapse's unit tests to
check that everything is installed correctly::
Once this is done, you may wish to run Synapse's unit tests, to
check that everything is installed as it should be::
python -m twisted.trial tests
This should end with a 'PASSED' result (note that exact numbers will
differ)::
Ran 1337 tests in 716.064s
PASSED (skips=15, successes=1322)
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
./demo/start.sh
(to stop, you can use `./demo/stop.sh`)
If you just want to start a single instance of the app and run it directly::
# Create the homeserver.yaml config once
python -m synapse.app.homeserver \
--server-name my.domain.name \
--config-path homeserver.yaml \
--generate-config \
--report-stats=[yes|no]
# Start the app
python -m synapse.app.homeserver --config-path homeserver.yaml
This should end with a 'PASSED' result::
Ran 143 tests in 0.601s
PASSED (successes=143)
Running the Integration Tests
=============================
@@ -313,6 +319,19 @@ Testing with SyTest is recommended for verifying that changes related to the
Client-Server API are functioning correctly. See the `installation instructions
<https://github.com/matrix-org/sytest#installing>`_ for details.
Building Internal API Documentation
===================================
Before building internal API documentation install sphinx and
sphinxcontrib-napoleon::
pip install sphinx
pip install sphinxcontrib-napoleon
Building internal API documentation::
python setup.py build_sphinx
Troubleshooting
===============
@@ -397,23 +416,3 @@ something like the following in their logs::
This is normally caused by a misconfiguration in your reverse-proxy. See
`<docs/reverse_proxy.md>`_ and double-check that your settings are correct.
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
:alt: (get support on #synapse:matrix.org)
:target: https://matrix.to/#/#synapse:matrix.org
.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix
:alt: (discuss development on #synapse-dev:matrix.org)
:target: https://matrix.to/#/#synapse-dev:matrix.org
.. |license| image:: https://img.shields.io/github/license/matrix-org/synapse
:alt: (check license in LICENSE file)
:target: LICENSE
.. |pypi| image:: https://img.shields.io/pypi/v/matrix-synapse
:alt: (latest version released on PyPi)
:target: https://pypi.org/project/matrix-synapse
.. |python| image:: https://img.shields.io/pypi/pyversions/matrix-synapse
:alt: (supported python versions)
:target: https://pypi.org/project/matrix-synapse

View File

@@ -5,16 +5,6 @@ Before upgrading check if any special steps are required to upgrade from the
version you currently have installed to the current version of Synapse. The extra
instructions that may be required are listed later in this document.
* Check that your versions of Python and PostgreSQL are still supported.
Synapse follows upstream lifecycles for `Python`_ and `PostgreSQL`_, and
removes support for versions which are no longer maintained.
The website https://endoflife.date also offers convenient summaries.
.. _Python: https://devguide.python.org/devcycle/#end-of-life-branches
.. _PostgreSQL: https://www.postgresql.org/support/versioning/
* If Synapse was installed using `prebuilt packages
<INSTALL.md#prebuilt-packages>`_, you will need to follow the normal process
for upgrading those packages.
@@ -85,245 +75,6 @@ for example:
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
Upgrading to v1.26.0
====================
Rolling back to v1.25.0 after a failed upgrade
----------------------------------------------
v1.26.0 includes a lot of large changes. If something problematic occurs, you
may want to roll-back to a previous version of Synapse. Because v1.26.0 also
includes a new database schema version, reverting that version is also required
alongside the generic rollback instructions mentioned above. In short, to roll
back to v1.25.0 you need to:
1. Stop the server
2. Decrease the schema version in the database:
.. code:: sql
UPDATE schema_version SET version = 58;
3. Delete the ignored users & chain cover data:
.. code:: sql
DROP TABLE IF EXISTS ignored_users;
UPDATE rooms SET has_auth_chain_index = false;
For PostgreSQL run:
.. code:: sql
TRUNCATE event_auth_chain_links;
TRUNCATE event_auth_chains;
For SQLite run:
.. code:: sql
DELETE FROM event_auth_chain_links;
DELETE FROM event_auth_chains;
4. Mark the deltas as not run (so they will re-run on upgrade).
.. code:: sql
DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/01ignored_user.py";
DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/06chain_cover_index.sql";
5. Downgrade Synapse by following the instructions for your installation method
in the "Rolling back to older versions" section above.
Upgrading to v1.25.0
====================
Last release supporting Python 3.5
----------------------------------
This is the last release of Synapse which guarantees support with Python 3.5,
which passed its upstream End of Life date several months ago.
We will attempt to maintain support through March 2021, but without guarantees.
In the future, Synapse will follow upstream schedules for ending support of
older versions of Python and PostgreSQL. Please upgrade to at least Python 3.6
and PostgreSQL 9.6 as soon as possible.
Blacklisting IP ranges
----------------------
Synapse v1.25.0 includes new settings, ``ip_range_blacklist`` and
``ip_range_whitelist``, for controlling outgoing requests from Synapse for federation,
identity servers, push, and for checking key validity for third-party invite events.
The previous setting, ``federation_ip_range_blacklist``, is deprecated. The new
``ip_range_blacklist`` defaults to private IP ranges if it is not defined.
If you have never customised ``federation_ip_range_blacklist`` it is recommended
that you remove that setting.
If you have customised ``federation_ip_range_blacklist`` you should update the
setting name to ``ip_range_blacklist``.
If you have a custom push server that is reached via private IP space you may
need to customise ``ip_range_blacklist`` or ``ip_range_whitelist``.
Upgrading to v1.24.0
====================
Custom OpenID Connect mapping provider breaking change
------------------------------------------------------
This release allows the OpenID Connect mapping provider to perform normalisation
of the localpart of the Matrix ID. This allows for the mapping provider to
specify different algorithms, instead of the [default way](https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets).
If your Synapse configuration uses a custom mapping provider
(`oidc_config.user_mapping_provider.module` is specified and not equal to
`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`) then you *must* ensure
that `map_user_attributes` of the mapping provider performs some normalisation
of the `localpart` returned. To match previous behaviour you can use the
`map_username_to_mxid_localpart` function provided by Synapse. An example is
shown below:
.. code-block:: python
from synapse.types import map_username_to_mxid_localpart
class MyMappingProvider:
def map_user_attributes(self, userinfo, token):
# ... your custom logic ...
sso_user_id = ...
localpart = map_username_to_mxid_localpart(sso_user_id)
return {"localpart": localpart}
Removal historical Synapse Admin API
------------------------------------
Historically, the Synapse Admin API has been accessible under:
* ``/_matrix/client/api/v1/admin``
* ``/_matrix/client/unstable/admin``
* ``/_matrix/client/r0/admin``
* ``/_synapse/admin/v1``
The endpoints with ``/_matrix/client/*`` prefixes have been removed as of v1.24.0.
The Admin API is now only accessible under:
* ``/_synapse/admin/v1``
The only exception is the `/admin/whois` endpoint, which is
`also available via the client-server API <https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_.
The deprecation of the old endpoints was announced with Synapse 1.20.0 (released
on 2020-09-22) and makes it easier for homeserver admins to lock down external
access to the Admin API endpoints.
Upgrading to v1.23.0
====================
Structured logging configuration breaking changes
-------------------------------------------------
This release deprecates use of the ``structured: true`` logging configuration for
structured logging. If your logging configuration contains ``structured: true``
then it should be modified based on the `structured logging documentation
<https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md>`_.
The ``structured`` and ``drains`` logging options are now deprecated and should
be replaced by standard logging configuration of ``handlers`` and ``formatters``.
A future will release of Synapse will make using ``structured: true`` an error.
Upgrading to v1.22.0
====================
ThirdPartyEventRules breaking changes
-------------------------------------
This release introduces a backwards-incompatible change to modules making use of
``ThirdPartyEventRules`` in Synapse. If you make use of a module defined under the
``third_party_event_rules`` config option, please make sure it is updated to handle
the below change:
The ``http_client`` argument is no longer passed to modules as they are initialised. Instead,
modules are expected to make use of the ``http_client`` property on the ``ModuleApi`` class.
Modules are now passed a ``module_api`` argument during initialisation, which is an instance of
``ModuleApi``. ``ModuleApi`` instances have a ``http_client`` property which acts the same as
the ``http_client`` argument previously passed to ``ThirdPartyEventRules`` modules.
Upgrading to v1.21.0
====================
Forwarding ``/_synapse/client`` through your reverse proxy
----------------------------------------------------------
The `reverse proxy documentation
<https://github.com/matrix-org/synapse/blob/develop/docs/reverse_proxy.md>`_ has been updated
to include reverse proxy directives for ``/_synapse/client/*`` endpoints. As the user password
reset flow now uses endpoints under this prefix, **you must update your reverse proxy
configurations for user password reset to work**.
Additionally, note that the `Synapse worker documentation
<https://github.com/matrix-org/synapse/blob/develop/docs/workers.md>`_ has been updated to
state that the ``/_synapse/client/password_reset/email/submit_token`` endpoint can be handled
by all workers. If you make use of Synapse's worker feature, please update your reverse proxy
configuration to reflect this change.
New HTML templates
------------------
A new HTML template,
`password_reset_confirmation.html <https://github.com/matrix-org/synapse/blob/develop/synapse/res/templates/password_reset_confirmation.html>`_,
has been added to the ``synapse/res/templates`` directory. If you are using a
custom template directory, you may want to copy the template over and modify it.
Note that as of v1.20.0, templates do not need to be included in custom template
directories for Synapse to start. The default templates will be used if a custom
template cannot be found.
This page will appear to the user after clicking a password reset link that has
been emailed to them.
To complete password reset, the page must include a way to make a `POST`
request to
``/_synapse/client/password_reset/{medium}/submit_token``
with the query parameters from the original link, presented as a URL-encoded form. See the file
itself for more details.
Updated Single Sign-on HTML Templates
-------------------------------------
The ``saml_error.html`` template was removed from Synapse and replaced with the
``sso_error.html`` template. If your Synapse is configured to use SAML and a
custom ``sso_redirect_confirm_template_dir`` configuration then any customisations
of the ``saml_error.html`` template will need to be merged into the ``sso_error.html``
template. These templates are similar, but the parameters are slightly different:
* The ``msg`` parameter should be renamed to ``error_description``.
* There is no longer a ``code`` parameter for the response code.
* A string ``error`` parameter is available that includes a short hint of why a
user is seeing the error page.
Upgrading to v1.18.0
====================
Docker `-py3` suffix will be removed in future versions
-------------------------------------------------------
From 10th August 2020, we will no longer publish Docker images with the `-py3` tag suffix. The images tagged with the `-py3` suffix have been identical to the non-suffixed tags since release 0.99.0, and the suffix is obsolete.
On 10th August, we will remove the `latest-py3` tag. Existing per-release tags (such as `v1.18.0-py3`) will not be removed, but no new `-py3` tags will be added.
Scripts relying on the `-py3` suffix will need to be updated.
Redis replication is now recommended in lieu of TCP replication
---------------------------------------------------------------
When setting up worker processes, we now recommend the use of a Redis server for replication. **The old direct TCP connection method is deprecated and will be removed in a future release.**
See `docs/workers.md <docs/workers.md>`_ for more details.
Upgrading to v1.14.0
====================

View File

@@ -1 +0,0 @@
Add tests to `test_user.UsersListTestCase` for List Users Admin API.

View File

@@ -1 +0,0 @@
Various improvements to the federation client.

View File

@@ -1 +0,0 @@
Add link to Matrix VoIP tester for turn-howto.

View File

@@ -1 +0,0 @@
Fix a long-standing bug where Synapse would return a 500 error when a thumbnail did not exist (and auto-generation of thumbnails was not enabled).

View File

@@ -1 +0,0 @@
Speed up chain cover calculation when persisting a batch of state events at once.

View File

@@ -1 +0,0 @@
Add a `long_description_type` to the package metadata.

View File

@@ -1 +0,0 @@
Speed up batch insertion when using PostgreSQL.

View File

@@ -1 +0,0 @@
Emit an error at startup if different Identity Providers are configured with the same `idp_id`.

View File

@@ -1 +0,0 @@
Speed up batch insertion when using PostgreSQL.

View File

@@ -1 +0,0 @@
Add an `oidc-` prefix to any `idp_id`s which are given in the `oidc_providers` configuration.

View File

@@ -1 +0,0 @@
Improve performance of concurrent use of `StreamIDGenerators`.

View File

@@ -1 +0,0 @@
Add some missing source directories to the automatic linting script.

View File

@@ -1 +0,0 @@
Fix receipts or account data not being sent down sync. Introduced in v1.26.0rc1.

View File

@@ -1 +0,0 @@
Fix receipts or account data not being sent down sync. Introduced in v1.26.0rc1.

View File

@@ -15,6 +15,11 @@
# limitations under the License.
""" Starts a synapse client console. """
from __future__ import print_function
from twisted.internet import reactor, defer, threads
from http import TwistedHttpClient
import argparse
import cmd
import getpass
@@ -23,14 +28,12 @@ import shlex
import sys
import time
import urllib
from http import TwistedHttpClient
import nacl.encoding
import nacl.signing
import urlparse
from signedjson.sign import SignatureVerifyException, verify_signed_json
from twisted.internet import defer, reactor, threads
import nacl.signing
import nacl.encoding
from signedjson.sign import verify_signed_json, SignatureVerifyException
CONFIG_JSON = "cmdclient_config.json"
@@ -490,7 +493,7 @@ class SynapseCmd(cmd.Cmd):
"list messages <roomid> from=END&to=START&limit=3"
"""
args = self._parse(line, ["type", "roomid", "qp"])
if "type" not in args or "roomid" not in args:
if not "type" in args or not "roomid" in args:
print("Must specify type and room ID.")
return
if args["type"] not in ["members", "messages"]:
@@ -505,7 +508,7 @@ class SynapseCmd(cmd.Cmd):
try:
key_value = key_value_str.split("=")
qp[key_value[0]] = key_value[1]
except Exception:
except:
print("Bad query param: %s" % key_value)
return
@@ -582,7 +585,7 @@ class SynapseCmd(cmd.Cmd):
parsed_url = urlparse.urlparse(args["path"])
qp.update(urlparse.parse_qs(parsed_url.query))
args["path"] = parsed_url.path
except Exception:
except:
pass
reactor.callFromThread(
@@ -607,15 +610,13 @@ class SynapseCmd(cmd.Cmd):
@defer.inlineCallbacks
def _do_event_stream(self, timeout):
res = yield defer.ensureDeferred(
self.http_client.get_json(
self._url() + "/events",
{
"access_token": self._tok(),
"timeout": str(timeout),
"from": self.event_stream_token,
},
)
res = yield self.http_client.get_json(
self._url() + "/events",
{
"access_token": self._tok(),
"timeout": str(timeout),
"from": self.event_stream_token,
},
)
print(json.dumps(res, indent=4))
@@ -771,10 +772,10 @@ def main(server_url, identity_server_url, username, token, config_path):
syn_cmd.config = json.load(config)
try:
http_client.verbose = "on" == syn_cmd.config["verbose"]
except Exception:
except:
pass
print("Loaded config from %s" % config_path)
except Exception:
except:
pass
# Twisted-specific: Runs the command processor in Twisted's event loop

View File

@@ -13,16 +13,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urllib
from pprint import pformat
from twisted.internet import defer, reactor
from __future__ import print_function
from twisted.web.client import Agent, readBody
from twisted.web.http_headers import Headers
from twisted.internet import defer, reactor
from pprint import pformat
import json
import urllib
class HttpClient:
class HttpClient(object):
""" Interface for talking json over http
"""
@@ -167,7 +169,7 @@ class TwistedHttpClient(HttpClient):
return d
class _RawProducer:
class _RawProducer(object):
def __init__(self, data):
self.data = data
self.body = data
@@ -184,7 +186,7 @@ class _RawProducer:
pass
class _JsonProducer:
class _JsonProducer(object):
""" Used by the twisted http client to create the HTTP body from json
"""

View File

@@ -50,7 +50,7 @@ services:
- traefik.http.routers.https-synapse.tls.certResolver=le-ssl
db:
image: docker.io/postgres:12-alpine
image: docker.io/postgres:10-alpine
# Change that password, of course!
environment:
- POSTGRES_USER=synapse

View File

@@ -141,7 +141,7 @@ class CursesStdIO:
curses.endwin()
class Callback:
class Callback(object):
def __init__(self, stdio):
self.stdio = stdio

View File

@@ -28,24 +28,27 @@ Currently assumes the local address is localhost:<port>
"""
from synapse.federation import ReplicationHandler
from synapse.federation.units import Pdu
from synapse.util import origin_from_ucid
from synapse.app.homeserver import SynapseHomeServer
# from synapse.logging.utils import log_function
from twisted.internet import reactor, defer
from twisted.python import log
import argparse
import curses.wrapper
import json
import logging
import os
import re
import cursesio
from twisted.internet import defer, reactor
from twisted.python import log
from synapse.app.homeserver import SynapseHomeServer
from synapse.federation import ReplicationHandler
from synapse.federation.units import Pdu
from synapse.util import origin_from_ucid
# from synapse.logging.utils import log_function
import curses.wrapper
logger = logging.getLogger("example")
@@ -55,7 +58,7 @@ def excpetion_errback(failure):
logging.exception(failure)
class InputOutput:
class InputOutput(object):
""" This is responsible for basic I/O so that a user can interact with
the example app.
"""
@@ -72,7 +75,7 @@ class InputOutput:
"""
try:
m = re.match(r"^join (\S+)$", line)
m = re.match("^join (\S+)$", line)
if m:
# The `sender` wants to join a room.
(room_name,) = m.groups()
@@ -81,7 +84,7 @@ class InputOutput:
# self.print_line("OK.")
return
m = re.match(r"^invite (\S+) (\S+)$", line)
m = re.match("^invite (\S+) (\S+)$", line)
if m:
# `sender` wants to invite someone to a room
room_name, invitee = m.groups()
@@ -90,7 +93,7 @@ class InputOutput:
# self.print_line("OK.")
return
m = re.match(r"^send (\S+) (.*)$", line)
m = re.match("^send (\S+) (.*)$", line)
if m:
# `sender` wants to message a room
room_name, body = m.groups()
@@ -99,7 +102,7 @@ class InputOutput:
# self.print_line("OK.")
return
m = re.match(r"^backfill (\S+)$", line)
m = re.match("^backfill (\S+)$", line)
if m:
# we want to backfill a room
(room_name,) = m.groups()
@@ -132,7 +135,7 @@ class IOLoggerHandler(logging.Handler):
self.io.print_log(msg)
class Room:
class Room(object):
""" Used to store (in memory) the current membership state of a room, and
which home servers we should send PDUs associated with the room to.
"""
@@ -198,6 +201,16 @@ class HomeServer(ReplicationHandler):
% (pdu.context, pdu.pdu_type, json.dumps(pdu.content))
)
# def on_state_change(self, pdu):
##self.output.print_line("#%s (state) %s *** %s" %
##(pdu.context, pdu.state_key, pdu.pdu_type)
##)
# if "joinee" in pdu.content:
# self._on_join(pdu.context, pdu.content["joinee"])
# elif "invitee" in pdu.content:
# self._on_invite(pdu.origin, pdu.context, pdu.content["invitee"])
def _on_message(self, pdu):
""" We received a message
"""
@@ -301,7 +314,7 @@ class HomeServer(ReplicationHandler):
return self.replication_layer.backfill(dest, room_name, limit)
def _get_room_remote_servers(self, room_name):
return list(self.joined_rooms.setdefault(room_name).servers)
return [i for i in self.joined_rooms.setdefault(room_name).servers]
def _get_or_create_room(self, room_name):
return self.joined_rooms.setdefault(room_name, Room(room_name))
@@ -321,7 +334,7 @@ def main(stdscr):
user = args.user
server_name = origin_from_ucid(user)
# Set up logging
## Set up logging ##
root_logger = logging.getLogger()
@@ -341,7 +354,7 @@ def main(stdscr):
observer = log.PythonLoggingObserver()
observer.start()
# Set up synapse server
## Set up synapse server
curses_stdio = cursesio.CursesStdIO(stdscr)
input_output = InputOutput(curses_stdio, user)
@@ -355,16 +368,16 @@ def main(stdscr):
input_output.set_home_server(hs)
# Add input_output logger
## Add input_output logger
io_logger = IOLoggerHandler(input_output)
io_logger.setFormatter(formatter)
root_logger.addHandler(io_logger)
# Start!
## Start! ##
try:
port = int(server_name.split(":")[1])
except Exception:
except:
port = 12345
app_hs.get_http_server().start_listening(port)

View File

@@ -3,4 +3,4 @@
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
3. Set up required recording rules. https://github.com/matrix-org/synapse/tree/master/contrib/prometheus
3. Set up additional recording rules

View File

@@ -1,44 +1,7 @@
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "6.7.4"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "panel",
"id": "heatmap",
"name": "Heatmap",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
}
],
"annotations": {
"list": [
{
"$$hashKey": "object:76",
"builtIn": 1,
"datasource": "$datasource",
"enable": false,
@@ -54,8 +17,8 @@
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"iteration": 1594646317221,
"id": 1,
"iteration": 1591098104645,
"links": [
{
"asDropdown": true,
@@ -71,7 +34,7 @@
"panels": [
{
"collapsed": false,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -306,6 +269,7 @@
"show": false
},
"links": [],
"options": {},
"reverseYBuckets": false,
"targets": [
{
@@ -595,7 +559,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -1459,7 +1423,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -1831,7 +1795,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -2567,7 +2531,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -2859,7 +2823,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -2880,7 +2844,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 6
"y": 33
},
"hiddenSeries": false,
"id": 79,
@@ -2976,7 +2940,7 @@
"h": 9,
"w": 12,
"x": 12,
"y": 6
"y": 33
},
"hiddenSeries": false,
"id": 83,
@@ -3074,7 +3038,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 15
"y": 42
},
"hiddenSeries": false,
"id": 109,
@@ -3173,7 +3137,7 @@
"h": 9,
"w": 12,
"x": 12,
"y": 15
"y": 42
},
"hiddenSeries": false,
"id": 111,
@@ -3259,14 +3223,14 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"description": "Number of events queued up on the master process for processing by the federation sender",
"description": "",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 24
"y": 51
},
"hiddenSeries": false,
"id": 140,
@@ -3390,103 +3354,6 @@
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"description": "The number of events in the in-memory queues ",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 24
},
"hiddenSeries": false,
"id": 142,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"interval": "",
"legendFormat": "pending PDUs {{job}}-{{index}}",
"refId": "A"
},
{
"expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"interval": "",
"legendFormat": "pending EDUs {{job}}-{{index}}",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "In-memory federation transmission queues",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:317",
"format": "short",
"label": "events",
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"$$hashKey": "object:318",
"format": "short",
"label": "",
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"title": "Federation",
@@ -3494,7 +3361,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -3700,7 +3567,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -3721,7 +3588,7 @@
"h": 7,
"w": 12,
"x": 0,
"y": 79
"y": 52
},
"hiddenSeries": false,
"id": 48,
@@ -3815,7 +3682,7 @@
"h": 7,
"w": 12,
"x": 12,
"y": 79
"y": 52
},
"hiddenSeries": false,
"id": 104,
@@ -3935,7 +3802,7 @@
"h": 7,
"w": 12,
"x": 0,
"y": 86
"y": 59
},
"hiddenSeries": false,
"id": 10,
@@ -4031,7 +3898,7 @@
"h": 7,
"w": 12,
"x": 12,
"y": 86
"y": 59
},
"hiddenSeries": false,
"id": 11,
@@ -4120,7 +3987,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -4144,7 +4011,7 @@
"h": 13,
"w": 12,
"x": 0,
"y": 80
"y": 67
},
"hiddenSeries": false,
"id": 12,
@@ -4239,7 +4106,7 @@
"h": 13,
"w": 12,
"x": 12,
"y": 80
"y": 67
},
"hiddenSeries": false,
"id": 26,
@@ -4334,7 +4201,7 @@
"h": 13,
"w": 12,
"x": 0,
"y": 93
"y": 80
},
"hiddenSeries": false,
"id": 13,
@@ -4430,7 +4297,7 @@
"h": 13,
"w": 12,
"x": 12,
"y": 93
"y": 80
},
"hiddenSeries": false,
"id": 27,
@@ -4525,7 +4392,7 @@
"h": 13,
"w": 12,
"x": 0,
"y": 106
"y": 93
},
"hiddenSeries": false,
"id": 28,
@@ -4619,7 +4486,7 @@
"h": 13,
"w": 12,
"x": 12,
"y": 106
"y": 93
},
"hiddenSeries": false,
"id": 25,
@@ -4705,7 +4572,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -5195,7 +5062,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -5216,7 +5083,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 121
"y": 66
},
"hiddenSeries": false,
"id": 91,
@@ -5312,7 +5179,7 @@
"h": 9,
"w": 12,
"x": 12,
"y": 121
"y": 66
},
"hiddenSeries": false,
"id": 21,
@@ -5404,7 +5271,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 130
"y": 75
},
"hiddenSeries": false,
"id": 89,
@@ -5502,7 +5369,7 @@
"h": 9,
"w": 12,
"x": 12,
"y": 130
"y": 75
},
"hiddenSeries": false,
"id": 93,
@@ -5592,7 +5459,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 139
"y": 84
},
"hiddenSeries": false,
"id": 95,
@@ -5685,12 +5552,12 @@
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
"datasource": "${DS_PROMETHEUS}",
"datasource": "Prometheus",
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 139
"y": 84
},
"heatmap": {},
"hideZeroBuckets": true,
@@ -5700,6 +5567,7 @@
"show": true
},
"links": [],
"options": {},
"reverseYBuckets": false,
"targets": [
{
@@ -5741,7 +5609,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -5762,7 +5630,7 @@
"h": 7,
"w": 12,
"x": 0,
"y": 66
"y": 39
},
"hiddenSeries": false,
"id": 2,
@@ -5886,7 +5754,7 @@
"h": 7,
"w": 12,
"x": 12,
"y": 66
"y": 39
},
"hiddenSeries": false,
"id": 41,
@@ -5979,7 +5847,7 @@
"h": 7,
"w": 12,
"x": 0,
"y": 73
"y": 46
},
"hiddenSeries": false,
"id": 42,
@@ -6071,7 +5939,7 @@
"h": 7,
"w": 12,
"x": 12,
"y": 73
"y": 46
},
"hiddenSeries": false,
"id": 43,
@@ -6163,7 +6031,7 @@
"h": 7,
"w": 12,
"x": 0,
"y": 80
"y": 53
},
"hiddenSeries": false,
"id": 113,
@@ -6261,7 +6129,7 @@
"h": 7,
"w": 12,
"x": 12,
"y": 80
"y": 53
},
"hiddenSeries": false,
"id": 115,
@@ -6347,7 +6215,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -6368,7 +6236,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 40
"y": 58
},
"hiddenSeries": false,
"id": 67,
@@ -6399,7 +6267,7 @@
"steppedLine": false,
"targets": [
{
"expr": "max(synapse_event_persisted_position{instance=\"$instance\"}) - ignoring(instance,index, job, name) group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"expr": " synapse_event_persisted_position{instance=\"$instance\",job=\"synapse\"} - ignoring(index, job, name) group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -6460,7 +6328,7 @@
"h": 9,
"w": 12,
"x": 12,
"y": 40
"y": 58
},
"hiddenSeries": false,
"id": 71,
@@ -6494,7 +6362,6 @@
"expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} {{name}}",
"refId": "B"
@@ -6553,7 +6420,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 49
"y": 67
},
"hiddenSeries": false,
"id": 121,
@@ -6642,7 +6509,7 @@
},
{
"collapsed": true,
"datasource": "${DS_PROMETHEUS}",
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -6672,7 +6539,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 86
"y": 41
},
"heatmap": {},
"hideZeroBuckets": true,
@@ -6682,6 +6549,7 @@
"show": true
},
"links": [],
"options": {},
"reverseYBuckets": false,
"targets": [
{
@@ -6731,7 +6599,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 86
"y": 41
},
"hiddenSeries": false,
"id": 124,
@@ -6832,7 +6700,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 94
"y": 49
},
"heatmap": {},
"hideZeroBuckets": true,
@@ -6842,6 +6710,7 @@
"show": true
},
"links": [],
"options": {},
"reverseYBuckets": false,
"targets": [
{
@@ -6891,7 +6760,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 94
"y": 49
},
"hiddenSeries": false,
"id": 128,
@@ -7010,7 +6879,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 102
"y": 57
},
"heatmap": {},
"hideZeroBuckets": true,
@@ -7020,6 +6889,7 @@
"show": true
},
"links": [],
"options": {},
"reverseYBuckets": false,
"targets": [
{
@@ -7069,7 +6939,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 102
"y": 57
},
"hiddenSeries": false,
"id": 130,
@@ -7188,7 +7058,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 110
"y": 65
},
"heatmap": {},
"hideZeroBuckets": true,
@@ -7198,12 +7068,12 @@
"show": true
},
"links": [],
"options": {},
"reverseYBuckets": false,
"targets": [
{
"expr": "rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"expr": "rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)",
"format": "heatmap",
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{le}}",
"refId": "A"
@@ -7248,7 +7118,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 110
"y": 65
},
"hiddenSeries": false,
"id": 132,
@@ -7279,33 +7149,29 @@
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)) ",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "50%",
"refId": "A"
},
{
"expr": "histogram_quantile(0.75, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"expr": "histogram_quantile(0.75, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "75%",
"refId": "B"
},
{
"expr": "histogram_quantile(0.90, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"expr": "histogram_quantile(0.90, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "90%",
"refId": "C"
},
{
"expr": "histogram_quantile(0.99, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"expr": "histogram_quantile(0.99, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "99%",
"refId": "D"
@@ -7315,7 +7181,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Number of state resolutions performed, by number of state groups involved (quantiles)",
"title": "Number of state resolution performed, by number of state groups involved (quantiles)",
"tooltip": {
"shared": true,
"sort": 0,
@@ -7367,7 +7233,6 @@
"list": [
{
"current": {
"selected": false,
"text": "Prometheus",
"value": "Prometheus"
},
@@ -7444,12 +7309,14 @@
},
{
"allValue": null,
"current": {},
"current": {
"text": "matrix.org",
"value": "matrix.org"
},
"datasource": "$datasource",
"definition": "",
"hide": 0,
"includeAll": false,
"index": -1,
"label": null,
"multi": false,
"name": "instance",
@@ -7468,13 +7335,17 @@
{
"allFormat": "regex wildcard",
"allValue": "",
"current": {},
"current": {
"text": "synapse",
"value": [
"synapse"
]
},
"datasource": "$datasource",
"definition": "",
"hide": 0,
"hideLabel": false,
"includeAll": true,
"index": -1,
"label": "Job",
"multi": true,
"multiFormat": "regex values",
@@ -7495,13 +7366,16 @@
{
"allFormat": "regex wildcard",
"allValue": ".*",
"current": {},
"current": {
"selected": false,
"text": "All",
"value": "$__all"
},
"datasource": "$datasource",
"definition": "",
"hide": 0,
"hideLabel": false,
"includeAll": true,
"index": -1,
"label": "",
"multi": true,
"multiFormat": "regex values",
@@ -7554,8 +7428,5 @@
"timezone": "",
"title": "Synapse",
"uid": "000000012",
"variables": {
"list": []
},
"version": 32
"version": 29
}

View File

@@ -1,10 +1,4 @@
import argparse
import cgi
import datetime
import json
import pydot
import urllib2
from __future__ import print_function
# Copyright 2014-2016 OpenMarket Ltd
#
@@ -21,6 +15,15 @@ import urllib2
# limitations under the License.
import sqlite3
import pydot
import cgi
import json
import datetime
import argparse
import urllib2
def make_name(pdu_id, origin):
return "%s@%s" % (pdu_id, origin)
@@ -30,7 +33,7 @@ def make_graph(pdus, room, filename_prefix):
node_map = {}
origins = set()
colors = {"red", "green", "blue", "yellow", "purple"}
colors = set(("red", "green", "blue", "yellow", "purple"))
for pdu in pdus:
origins.add(pdu.get("origin"))
@@ -46,7 +49,7 @@ def make_graph(pdus, room, filename_prefix):
try:
c = colors.pop()
color_map[o] = c
except Exception:
except:
print("Run out of colours!")
color_map[o] = "black"

View File

@@ -13,13 +13,12 @@
# limitations under the License.
import argparse
import cgi
import datetime
import json
import sqlite3
import pydot
import cgi
import json
import datetime
import argparse
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
@@ -99,7 +98,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
except:
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
node_map[prev_id] = end_node

View File

@@ -1,12 +1,4 @@
import argparse
import cgi
import datetime
import pydot
import simplejson as json
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
from __future__ import print_function
# Copyright 2016 OpenMarket Ltd
#
@@ -23,6 +15,18 @@ from synapse.util.frozenutils import unfreeze
# limitations under the License.
import pydot
import cgi
import simplejson as json
import datetime
import argparse
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
from six import string_types
def make_graph(file_name, room_id, file_prefix, limit):
print("Reading lines")
with open(file_name) as f:
@@ -58,7 +62,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
for key, value in unfreeze(event.get_dict()["content"]).items():
if value is None:
value = "<null>"
elif isinstance(value, str):
elif isinstance(value, string_types):
pass
else:
value = json.dumps(value)
@@ -104,7 +108,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
except:
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
node_map[prev_id] = end_node

View File

@@ -10,15 +10,17 @@ the bridge.
Requires:
npm install jquery jsdom
"""
import json
import subprocess
import time
from __future__ import print_function
import gevent
import grequests
from BeautifulSoup import BeautifulSoup
import json
import urllib
import subprocess
import time
ACCESS_TOKEN = ""
# ACCESS_TOKEN="" #
MATRIXBASE = "https://matrix.org/_matrix/client/api/v1/"
MYUSERNAME = "@davetest:matrix.org"

View File

@@ -20,7 +20,6 @@ Add a new job to the main prometheus.conf file:
```
### for Prometheus v2
Add a new job to the main prometheus.yml file:
```yaml
@@ -30,17 +29,14 @@ Add a new job to the main prometheus.yml file:
scheme: "https"
static_configs:
- targets: ["my.server.here:port"]
- targets: ['SERVER.LOCATION:PORT']
```
An example of a Prometheus configuration with workers can be found in
[metrics-howto.md](https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md).
To use `synapse.rules` add
```yaml
rule_files:
- "/PATH/TO/synapse-v2.rules"
rule_files:
- "/PATH/TO/synapse-v2.rules"
```
Metrics are disabled by default when running synapse; they must be enabled

View File

@@ -9,7 +9,7 @@
new PromConsole.Graph({
node: document.querySelector("#process_resource_utime"),
expr: "rate(process_cpu_seconds_total[2m]) * 100",
name: "[[job]]-[[index]]",
name: "[[job]]",
min: 0,
max: 100,
renderer: "line",
@@ -22,12 +22,12 @@ new PromConsole.Graph({
</script>
<h3>Memory</h3>
<div id="process_resident_memory_bytes"></div>
<div id="process_resource_maxrss"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_resident_memory_bytes"),
expr: "process_resident_memory_bytes",
name: "[[job]]-[[index]]",
node: document.querySelector("#process_resource_maxrss"),
expr: "process_psutil_rss:max",
name: "Maxrss",
min: 0,
renderer: "line",
height: 150,
@@ -43,8 +43,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#process_fds"),
expr: "process_open_fds",
name: "[[job]]-[[index]]",
expr: "process_open_fds{job='synapse'}",
name: "FDs",
min: 0,
renderer: "line",
height: 150,
@@ -62,8 +62,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_total_time"),
expr: "rate(python_twisted_reactor_tick_time_sum[2m])",
name: "[[job]]-[[index]]",
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
name: "time",
max: 1,
min: 0,
renderer: "area",
@@ -80,8 +80,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_average_time"),
expr: "rate(python_twisted_reactor_tick_time_sum[2m]) / rate(python_twisted_reactor_tick_time_count[2m])",
name: "[[job]]-[[index]]",
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
name: "time",
min: 0,
renderer: "line",
height: 150,
@@ -97,14 +97,14 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_pending_calls"),
expr: "rate(python_twisted_reactor_pending_calls_sum[30s]) / rate(python_twisted_reactor_pending_calls_count[30s])",
name: "[[job]]-[[index]]",
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
name: "calls",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yTitle: "Pending Calls"
yTitle: "Pending Cals"
})
</script>
@@ -115,7 +115,7 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_query_time"),
expr: "sum(rate(synapse_storage_query_time_count[2m])) by (verb)",
expr: "rate(synapse_storage_query_time:count[2m])",
name: "[[verb]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
@@ -129,8 +129,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_transaction_time"),
expr: "topk(10, rate(synapse_storage_transaction_time_count[2m]))",
name: "[[job]]-[[index]] [[desc]]",
expr: "rate(synapse_storage_transaction_time:count[2m])",
name: "[[desc]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
@@ -140,12 +140,12 @@ new PromConsole.Graph({
</script>
<h3>Transaction execution time</h3>
<div id="synapse_storage_transactions_time_sec"></div>
<div id="synapse_storage_transactions_time_msec"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_transactions_time_sec"),
expr: "rate(synapse_storage_transaction_time_sum[2m])",
name: "[[job]]-[[index]] [[desc]]",
node: document.querySelector("#synapse_storage_transactions_time_msec"),
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
name: "[[desc]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
@@ -154,33 +154,34 @@ new PromConsole.Graph({
})
</script>
<h3>Average time waiting for database connection</h3>
<div id="synapse_storage_avg_waiting_time"></div>
<h3>Database scheduling latency</h3>
<div id="synapse_storage_schedule_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_avg_waiting_time"),
expr: "rate(synapse_storage_schedule_time_sum[2m]) / rate(synapse_storage_schedule_time_count[2m])",
name: "[[job]]-[[index]]",
node: document.querySelector("#synapse_storage_schedule_time"),
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
name: "Total latency",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s",
yTitle: "Time"
yUnits: "s/s",
yTitle: "Usage"
})
</script>
<h3>Cache request rate</h3>
<div id="synapse_cache_request_rate"></div>
<h3>Cache hit ratio</h3>
<div id="synapse_cache_ratio"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_cache_request_rate"),
expr: "rate(synapse_util_caches_cache:total[2m])",
name: "[[job]]-[[index]] [[name]]",
node: document.querySelector("#synapse_cache_ratio"),
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
name: "[[name]]",
min: 0,
max: 100,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "rps",
yTitle: "Cache request rate"
yUnits: "%",
yTitle: "Percentage"
})
</script>
@@ -190,7 +191,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_cache_size"),
expr: "synapse_util_caches_cache:size",
name: "[[job]]-[[index]] [[name]]",
name: "[[name]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
@@ -205,8 +206,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet"),
expr: "rate(synapse_http_server_in_flight_requests_count[2m])",
name: "[[job]]-[[index]] [[method]] [[servlet]]",
expr: "rate(synapse_http_server_request_count:servlet[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
@@ -218,8 +219,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
expr: "rate(synapse_http_server_in_flight_requests_count{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
name: "[[job]]-[[index]] [[method]] [[servlet]]",
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
@@ -232,8 +233,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_time_avg"),
expr: "rate(synapse_http_server_response_time_seconds_sum[2m]) / rate(synapse_http_server_response_count[2m])",
name: "[[job]]-[[index]] [[servlet]]",
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/req",
@@ -276,7 +277,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_ru_utime"),
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
name: "[[job]]-[[index]] [[servlet]]",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
@@ -291,7 +292,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
name: "[[job]]-[[index]] [[servlet]]",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
@@ -305,8 +306,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_send_time_avg"),
expr: "rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m])",
name: "[[job]]-[[index]] [[servlet]]",
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/req",
@@ -322,7 +323,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_client_sent"),
expr: "rate(synapse_federation_client_sent[2m])",
name: "[[job]]-[[index]] [[type]]",
name: "[[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
@@ -336,7 +337,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_server_received"),
expr: "rate(synapse_federation_server_received[2m])",
name: "[[job]]-[[index]] [[type]]",
name: "[[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
@@ -366,7 +367,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_notifier_listeners"),
expr: "synapse_notifier_listeners",
name: "[[job]]-[[index]]",
name: "listeners",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
@@ -381,7 +382,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_notifier_notified_events"),
expr: "rate(synapse_notifier_notified_events[2m])",
name: "[[job]]-[[index]]",
name: "events",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "events/s",

View File

@@ -58,21 +58,3 @@ groups:
labels:
type: "PDU"
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
labels:
type: remote
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
labels:
type: local
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
labels:
type: bridges
- record: synapse_storage_events_persisted_by_event_type
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
- record: synapse_storage_events_persisted_by_origin
expr: sum without(type) (synapse_storage_events_persisted_events_sep)

View File

@@ -1,11 +1,15 @@
#!/usr/bin/env python
from __future__ import print_function
from argparse import ArgumentParser
import json
import requests
import sys
import urllib
from argparse import ArgumentParser
import requests
try:
raw_input
except NameError: # Python 3
raw_input = input
def _mkurl(template, kws):
@@ -52,7 +56,7 @@ def main(hs, room_id, access_token, user_id_prefix, why):
print("The following user IDs will be kicked from %s" % room_name)
for uid in kick_list:
print(uid)
doit = input("Continue? [Y]es\n")
doit = raw_input("Continue? [Y]es\n")
if len(doit) > 0 and doit.lower() == "y":
print("Kicking members...")
# encode them all

View File

@@ -42,7 +42,7 @@ dh_virtualenv \
--preinstall="mock" \
--extra-pip-arg="--no-cache-dir" \
--extra-pip-arg="--compile" \
--extras="all,systemd,test"
--extras="all,systemd"
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"

150
debian/changelog vendored
View File

@@ -1,153 +1,3 @@
matrix-synapse-py3 (1.25.0ubuntu1) UNRELEASED; urgency=medium
* Remove dependency on `python3-distutils`.
-- Richard van der Hoff <richard@matrix.org> Fri, 15 Jan 2021 12:44:19 +0000
matrix-synapse-py3 (1.25.0) stable; urgency=medium
[ Dan Callahan ]
* Update dependencies to account for the removal of the transitional
dh-systemd package from Debian Bullseye.
[ Synapse Packaging team ]
* New synapse release 1.25.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 13 Jan 2021 10:14:55 +0000
matrix-synapse-py3 (1.24.0) stable; urgency=medium
* New synapse release 1.24.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 09 Dec 2020 10:14:30 +0000
matrix-synapse-py3 (1.23.1) stable; urgency=medium
* New synapse release 1.23.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 09 Dec 2020 10:40:39 +0000
matrix-synapse-py3 (1.23.0) stable; urgency=medium
* New synapse release 1.23.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 18 Nov 2020 11:41:28 +0000
matrix-synapse-py3 (1.22.1) stable; urgency=medium
* New synapse release 1.22.1.
-- Synapse Packaging team <packages@matrix.org> Fri, 30 Oct 2020 15:25:37 +0000
matrix-synapse-py3 (1.22.0) stable; urgency=medium
* New synapse release 1.22.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Oct 2020 12:07:12 +0000
matrix-synapse-py3 (1.21.2) stable; urgency=medium
[ Synapse Packaging team ]
* New synapse release 1.21.2.
-- Synapse Packaging team <packages@matrix.org> Thu, 15 Oct 2020 09:23:27 -0400
matrix-synapse-py3 (1.21.1) stable; urgency=medium
[ Synapse Packaging team ]
* New synapse release 1.21.1.
[ Andrew Morgan ]
* Explicitly install "test" python dependencies.
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Oct 2020 10:24:13 +0100
matrix-synapse-py3 (1.21.0) stable; urgency=medium
* New synapse release 1.21.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 12 Oct 2020 15:47:44 +0100
matrix-synapse-py3 (1.20.1) stable; urgency=medium
* New synapse release 1.20.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 24 Sep 2020 16:25:22 +0100
matrix-synapse-py3 (1.20.0) stable; urgency=medium
[ Synapse Packaging team ]
* New synapse release 1.20.0.
[ Dexter Chua ]
* Use Type=notify in systemd service
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Sep 2020 15:19:32 +0100
matrix-synapse-py3 (1.19.3) stable; urgency=medium
* New synapse release 1.19.3.
-- Synapse Packaging team <packages@matrix.org> Fri, 18 Sep 2020 14:59:30 +0100
matrix-synapse-py3 (1.19.2) stable; urgency=medium
* New synapse release 1.19.2.
-- Synapse Packaging team <packages@matrix.org> Wed, 16 Sep 2020 12:50:30 +0100
matrix-synapse-py3 (1.19.1) stable; urgency=medium
* New synapse release 1.19.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 27 Aug 2020 10:50:19 +0100
matrix-synapse-py3 (1.19.0) stable; urgency=medium
[ Synapse Packaging team ]
* New synapse release 1.19.0.
[ Aaron Raimist ]
* Fix outdated documentation for SYNAPSE_CACHE_FACTOR
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Aug 2020 14:06:42 +0100
matrix-synapse-py3 (1.18.0) stable; urgency=medium
* New synapse release 1.18.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 30 Jul 2020 10:55:53 +0100
matrix-synapse-py3 (1.17.0) stable; urgency=medium
* New synapse release 1.17.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 13 Jul 2020 10:20:31 +0100
matrix-synapse-py3 (1.16.1) stable; urgency=medium
* New synapse release 1.16.1.
-- Synapse Packaging team <packages@matrix.org> Fri, 10 Jul 2020 12:09:24 +0100
matrix-synapse-py3 (1.17.0rc1) stable; urgency=medium
* New synapse release 1.17.0rc1.
-- Synapse Packaging team <packages@matrix.org> Thu, 09 Jul 2020 16:53:12 +0100
matrix-synapse-py3 (1.16.0) stable; urgency=medium
* New synapse release 1.16.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 08 Jul 2020 11:03:48 +0100
matrix-synapse-py3 (1.15.2) stable; urgency=medium
* New synapse release 1.15.2.
-- Synapse Packaging team <packages@matrix.org> Thu, 02 Jul 2020 10:34:00 -0400
matrix-synapse-py3 (1.15.1) stable; urgency=medium
* New synapse release 1.15.1.

7
debian/control vendored
View File

@@ -3,11 +3,9 @@ Section: contrib/python
Priority: extra
Maintainer: Synapse Packaging team <packages@matrix.org>
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
# TODO: Remove the dependency on dh-systemd after dropping support for Ubuntu xenial
# On all other supported releases, it's merely a transitional package which
# does nothing but depends on debhelper (> 9.20160709)
Build-Depends:
debhelper (>= 9.20160709) | dh-systemd,
debhelper (>= 9),
dh-systemd,
dh-virtualenv (>= 1.1),
libsystemd-dev,
libpq-dev,
@@ -31,6 +29,7 @@ Pre-Depends: dpkg (>= 1.16.1)
Depends:
adduser,
debconf,
python3-distutils|libpython3-stdlib (<< 3.6),
${misc:Depends},
${shlibs:Depends},
${synapse:pydepends},

View File

@@ -1,2 +1,2 @@
# Specify environment variables used when running Synapse
# SYNAPSE_CACHE_FACTOR=0.5 (default)
# SYNAPSE_CACHE_FACTOR=1 (default)

View File

@@ -2,7 +2,7 @@
Description=Synapse Matrix homeserver
[Service]
Type=notify
Type=simple
User=matrix-synapse
WorkingDirectory=/var/lib/matrix-synapse
EnvironmentFile=/etc/default/matrix-synapse

27
debian/synctl.ronn vendored
View File

@@ -46,20 +46,19 @@ Configuration file may be generated as follows:
## ENVIRONMENT
* `SYNAPSE_CACHE_FACTOR`:
Synapse's architecture is quite RAM hungry currently - we deliberately
cache a lot of recent room data and metadata in RAM in order to speed up
common requests. We'll improve this in the future, but for now the easiest
way to either reduce the RAM usage (at the risk of slowing things down)
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
variable. The default is 0.5, which can be decreased to reduce RAM usage
in memory constrained enviroments, or increased if performance starts to
degrade.
However, degraded performance due to a low cache factor, common on
machines with slow disks, often leads to explosions in memory use due
backlogged requests. In this case, reducing the cache factor will make
things worse. Instead, try increasing it drastically. 2.0 is a good
starting value.
Synapse's architecture is quite RAM hungry currently - a lot of
recent room data and metadata is deliberately cached in RAM in
order to speed up common requests. This will be improved in
future, but for now the easiest way to either reduce the RAM usage
(at the risk of slowing things down) is to set the
SYNAPSE_CACHE_FACTOR environment variable. Roughly speaking, a
SYNAPSE_CACHE_FACTOR of 1.0 will max out at around 3-4GB of
resident memory - this is what we currently run the matrix.org
on. The default setting is currently 0.1, which is probably around
a ~700MB footprint. You can dial it down further to 0.02 if
desired, which targets roughly ~512MB. Conversely you can dial it
up if you need performance for lots of users and have a box with a
lot of RAM.
## COPYRIGHT

View File

@@ -30,8 +30,6 @@ for port in 8080 8081 8082; do
if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then
printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
echo "public_baseurl: http://localhost:$port/" >> $DIR/etc/$port.config
echo 'enable_registration: true' >> $DIR/etc/$port.config
# Warning, this heredoc depends on the interaction of tabs and spaces. Please don't

59
demo/webserver.py Normal file
View File

@@ -0,0 +1,59 @@
import argparse
import BaseHTTPServer
import os
import SimpleHTTPServer
import cgi, logging
from daemonize import Daemonize
class SimpleHTTPRequestHandlerWithPOST(SimpleHTTPServer.SimpleHTTPRequestHandler):
UPLOAD_PATH = "upload"
"""
Accept all post request as file upload
"""
def do_POST(self):
path = os.path.join(self.UPLOAD_PATH, os.path.basename(self.path))
length = self.headers["content-length"]
data = self.rfile.read(int(length))
with open(path, "wb") as fh:
fh.write(data)
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
# Return the absolute path of the uploaded file
self.wfile.write('{"url":"/%s"}' % path)
def setup():
parser = argparse.ArgumentParser()
parser.add_argument("directory")
parser.add_argument("-p", "--port", dest="port", type=int, default=8080)
parser.add_argument("-P", "--pid-file", dest="pid", default="web.pid")
args = parser.parse_args()
# Get absolute path to directory to serve, as daemonize changes to '/'
os.chdir(args.directory)
dr = os.getcwd()
httpd = BaseHTTPServer.HTTPServer(("", args.port), SimpleHTTPRequestHandlerWithPOST)
def run():
os.chdir(dr)
httpd.serve_forever()
daemon = Daemonize(
app="synapse-webclient", pid=args.pid, action=run, auto_close_fds=False
)
daemon.start()
if __name__ == "__main__":
setup()

View File

@@ -11,42 +11,39 @@
# docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.6 .
#
ARG PYTHON_VERSION=3.8
ARG PYTHON_VERSION=3.7
###
### Stage 0: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11 as builder
# install the OS build deps
RUN apt-get update && apt-get install -y \
build-essential \
libffi-dev \
libjpeg-dev \
libpq-dev \
libssl-dev \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
# Build dependencies that are not available as wheels, to speed up rebuilds
RUN apk add \
build-base \
libffi-dev \
libjpeg-turbo-dev \
libressl-dev \
libxslt-dev \
linux-headers \
postgresql-dev \
zlib-dev
# build things which have slow build steps, before we copy synapse, so that
# the layer can be cached.
#
# (we really just care about caching a wheel here, as the "pip install" below
# will install them again.)
RUN pip install --prefix="/install" --no-warn-script-location \
frozendict \
jaeger-client \
opentracing \
# Match the version constraints of Synapse
"prometheus_client>=0.4.0" \
psycopg2 \
pycparser \
pyrsistent \
pyyaml \
simplejson \
threadloop \
thrift
cryptography \
msgpack-python \
pillow \
pynacl
# now install synapse and all of the python deps to /install.
COPY synapse /synapse/synapse/
COPY scripts /synapse/scripts/
COPY MANIFEST.in README.rst setup.py synctl /synapse/
@@ -58,16 +55,19 @@ RUN pip install --prefix="/install" --no-warn-script-location \
### Stage 1: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-slim
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11
RUN apt-get update && apt-get install -y \
curl \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp6 \
xmlsec1 \
&& rm -rf /var/lib/apt/lists/*
# xmlsec is required for saml support
RUN apk add --no-cache --virtual .runtime_deps \
libffi \
libjpeg-turbo \
libressl \
libxslt \
libpq \
zlib \
su-exec \
tzdata \
xmlsec
COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py
@@ -78,6 +78,3 @@ VOLUME ["/data"]
EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"]
HEALTHCHECK --interval=1m --timeout=5s \
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -50,22 +50,17 @@ FROM ${distro}
ARG distro=""
ENV distro ${distro}
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
# http://bugs.python.org/issue19846
ENV LANG C.UTF-8
# Install the build dependencies
#
# NB: keep this list in sync with the list of build-deps in debian/control
# TODO: it would be nice to do that automatically.
# TODO: Remove the dh-systemd stanza after dropping support for Ubuntu xenial
# it's a transitional package on all other, more recent releases
RUN apt-get update -qq -o Acquire::Languages=none \
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
build-essential \
debhelper \
devscripts \
dh-systemd \
libsystemd-dev \
lsb-release \
pkg-config \
@@ -74,11 +69,7 @@ RUN apt-get update -qq -o Acquire::Languages=none \
python3-setuptools \
python3-venv \
sqlite3 \
libpq-dev \
xmlsec1 \
&& ( env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
dh-systemd || true )
libpq-dev
COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /

View File

@@ -83,7 +83,7 @@ docker logs synapse
If all is well, you should now be able to connect to http://localhost:8008 and
see a confirmation message.
The following environment variables are supported in `run` mode:
The following environment variables are supported in run mode:
* `SYNAPSE_CONFIG_DIR`: where additional config files are stored. Defaults to
`/data`.
@@ -94,35 +94,6 @@ The following environment variables are supported in `run` mode:
* `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`.
* `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`.
For more complex setups (e.g. for workers) you can also pass your args directly to synapse using `run` mode. For example like this:
```
docker run -d --name synapse \
--mount type=volume,src=synapse-data,dst=/data \
-p 8008:8008 \
matrixdotorg/synapse:latest run \
-m synapse.app.generic_worker \
--config-path=/data/homeserver.yaml \
--config-path=/data/generic_worker.yaml
```
If you do not provide `-m`, the value of the `SYNAPSE_WORKER` environment variable is used. If you do not provide at least one `--config-path` or `-c`, the value of the `SYNAPSE_CONFIG_PATH` environment variable is used instead.
## Generating an (admin) user
After synapse is running, you may wish to create a user via `register_new_matrix_user`.
This requires a `registration_shared_secret` to be set in your config file. Synapse
must be restarted to pick up this change.
You can then call the script:
```
docker exec -it synapse register_new_matrix_user http://localhost:8008 -c /data/homeserver.yaml --help
```
Remember to remove the `registration_shared_secret` and restart if you no-longer need it.
## TLS support
The default configuration exposes a single HTTP port: http://localhost:8008. It
@@ -176,32 +147,3 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
You can choose to build a different docker image by changing the value of the `-f` flag to
point to another Dockerfile.
## Disabling the healthcheck
If you are using a non-standard port or tls inside docker you can disable the healthcheck
whilst running the above `docker run` commands.
```
--no-healthcheck
```
## Setting custom healthcheck on docker run
If you wish to point the healthcheck at a different port with docker command, add the following
```
--health-cmd 'curl -fSs http://localhost:1234/health'
```
## Setting the healthcheck in docker-compose file
You can add the following to set a custom healthcheck in a docker compose file.
You will need version >2.1 for this to work.
```
healthcheck:
test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"]
interval: 1m
timeout: 10s
retries: 3
```

View File

@@ -90,7 +90,7 @@ federation_rc_concurrent: 3
media_store_path: "/data/media"
uploads_path: "/data/uploads"
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "50M" }}"
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}"
max_image_pixels: "32M"
dynamic_thumbnails: false
@@ -198,10 +198,12 @@ old_signing_keys: {}
key_refresh_interval: "1d" # 1 Day.
# The trusted servers to download signing keys from.
trusted_key_servers:
- server_name: matrix.org
verify_keys:
"ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
perspectives:
servers:
"matrix.org":
verify_keys:
"ed25519:auto":
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
password_config:
enabled: true

View File

@@ -4,10 +4,16 @@ formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse.storage.SQL:

View File

@@ -120,7 +120,7 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
if ownership is not None:
subprocess.check_output(["chown", "-R", ownership, "/data"])
args = ["gosu", ownership] + args
args = ["su-exec", ownership] + args
subprocess.check_output(args)
@@ -172,14 +172,14 @@ def run_generate_config(environ, ownership):
# make sure that synapse has perms to write to the data dir.
subprocess.check_output(["chown", ownership, data_dir])
args = ["gosu", ownership] + args
os.execv("/usr/sbin/gosu", args)
args = ["su-exec", ownership] + args
os.execv("/sbin/su-exec", args)
else:
os.execv("/usr/local/bin/python", args)
def main(args, environ):
mode = args[1] if len(args) > 1 else "run"
mode = args[1] if len(args) > 1 else None
desired_uid = int(environ.get("UID", "991"))
desired_gid = int(environ.get("GID", "991"))
synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver")
@@ -189,7 +189,7 @@ def main(args, environ):
ownership = "{}:{}".format(desired_uid, desired_gid)
if ownership is None:
log("Will not perform chmod/gosu as UserID already matches request")
log("Will not perform chmod/su-exec as UserID already matches request")
# In generate mode, generate a configuration and missing keys, then exit
if mode == "generate":
@@ -205,50 +205,39 @@ def main(args, environ):
config_dir, config_path, environ, ownership
)
if mode != "run":
if mode is not None:
error("Unknown execution mode '%s'" % (mode,))
args = args[2:]
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
if "-m" not in args:
args = ["-m", synapse_worker] + args
# if there are no config files passed to synapse, try adding the default file
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
)
if not os.path.exists(config_path):
if "SYNAPSE_SERVER_NAME" in environ:
error(
"""\
if not os.path.exists(config_path):
if "SYNAPSE_SERVER_NAME" in environ:
error(
"""\
Config file '%s' does not exist.
The synapse docker image no longer supports generating a config file on-the-fly
based on environment variables. You can migrate to a static config file by
running with 'migrate_config'. See the README for more details.
"""
% (config_path,)
)
error(
"Config file '%s' does not exist. You should either create a new "
"config file by running with the `generate` argument (and then edit "
"the resulting file before restarting) or specify the path to an "
"existing config file with the SYNAPSE_CONFIG_PATH variable."
% (config_path,)
)
args += ["--config-path", config_path]
error(
"Config file '%s' does not exist. You should either create a new "
"config file by running with the `generate` argument (and then edit "
"the resulting file before restarting) or specify the path to an "
"existing config file with the SYNAPSE_CONFIG_PATH variable."
% (config_path,)
)
log("Starting synapse with args " + " ".join(args))
log("Starting synapse with config file " + config_path)
args = ["python"] + args
args = ["python", "-m", synapse_worker, "--config-path", config_path]
if ownership is not None:
args = ["gosu", ownership] + args
os.execv("/usr/sbin/gosu", args)
args = ["su-exec", ownership] + args
os.execv("/sbin/su-exec", args)
else:
os.execv("/usr/local/bin/python", args)

View File

@@ -10,16 +10,5 @@
# homeserver.yaml. Instead, if you are starting from scratch, please generate
# a fresh config using Synapse by following the instructions in INSTALL.md.
# Configuration options that take a time period can be set using a number
# followed by a letter. Letters have the following meanings:
# s = second
# m = minute
# h = hour
# d = day
# w = week
# y = year
# For example, setting redaction_retention_period: 5m would remove redacted
# messages from the database after 5 minutes, rather than 5 months.
################################################################################

View File

@@ -12,14 +12,13 @@ introduced support for automatically provisioning certificates through
In [March 2019](https://community.letsencrypt.org/t/end-of-life-plan-for-acmev1/88430),
Let's Encrypt announced that they were deprecating version 1 of the ACME
protocol, with the plan to disable the use of it for new accounts in
November 2019, for new domains in June 2020, and for existing accounts and
domains in June 2021.
November 2019, and for existing accounts in June 2020.
Synapse doesn't currently support version 2 of the ACME protocol, which
means that:
* for existing installs, Synapse's built-in ACME support will continue
to work until June 2021.
to work until June 2020.
* for new installs, this feature will not work at all.
Either way, it is recommended to move from Synapse's ACME support

View File

@@ -1,172 +0,0 @@
# Show reported events
This API returns information about reported events.
The api is:
```
GET /_synapse/admin/v1/event_reports?from=0&limit=10
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [README.rst](README.rst).
It returns a JSON body like the following:
```json
{
"event_reports": [
{
"event_id": "$bNUFCwGzWca1meCGkjp-zwslF-GfVcXukvRLI1_FaVY",
"id": 2,
"reason": "foo",
"score": -100,
"received_ts": 1570897107409,
"canonical_alias": "#alias1:matrix.org",
"room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org",
"name": "Matrix HQ",
"sender": "@foobar:matrix.org",
"user_id": "@foo:matrix.org"
},
{
"event_id": "$3IcdZsDaN_En-S1DF4EMCy3v4gNRKeOJs8W5qTOKj4I",
"id": 3,
"reason": "bar",
"score": -100,
"received_ts": 1598889612059,
"canonical_alias": "#alias2:matrix.org",
"room_id": "!eGvUQuTCkHGVwNMOjv:matrix.org",
"name": "Your room name here",
"sender": "@foobar:matrix.org",
"user_id": "@bar:matrix.org"
}
],
"next_token": 2,
"total": 4
}
```
To paginate, check for `next_token` and if present, call the endpoint again with `from`
set to the value of `next_token`. This will return a new page.
If the endpoint does not return a `next_token` then there are no more reports to
paginate through.
**URL parameters:**
* `limit`: integer - Is optional but is used for pagination, denoting the maximum number
of items to return in this call. Defaults to `100`.
* `from`: integer - Is optional but used for pagination, denoting the offset in the
returned results. This should be treated as an opaque value and not explicitly set to
anything other than the return value of `next_token` from a previous call. Defaults to `0`.
* `dir`: string - Direction of event report order. Whether to fetch the most recent
first (`b`) or the oldest first (`f`). Defaults to `b`.
* `user_id`: string - Is optional and filters to only return users with user IDs that
contain this value. This is the user who reported the event and wrote the reason.
* `room_id`: string - Is optional and filters to only return rooms with room IDs that
contain this value.
**Response**
The following fields are returned in the JSON response body:
* `id`: integer - ID of event report.
* `received_ts`: integer - The timestamp (in milliseconds since the unix epoch) when this
report was sent.
* `room_id`: string - The ID of the room in which the event being reported is located.
* `name`: string - The name of the room.
* `event_id`: string - The ID of the reported event.
* `user_id`: string - This is the user who reported the event and wrote the reason.
* `reason`: string - Comment made by the `user_id` in this report. May be blank.
* `score`: integer - Content is reported based upon a negative score, where -100 is
"most offensive" and 0 is "inoffensive".
* `sender`: string - This is the ID of the user who sent the original message/event that
was reported.
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
have a canonical alias set.
* `next_token`: integer - Indication for pagination. See above.
* `total`: integer - Total number of event reports related to the query
(`user_id` and `room_id`).
# Show details of a specific event report
This API returns information about a specific event report.
The api is:
```
GET /_synapse/admin/v1/event_reports/<report_id>
```
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [README.rst](README.rst).
It returns a JSON body like the following:
```jsonc
{
"event_id": "$bNUFCwGzWca1meCGkjp-zwslF-GfVcXukvRLI1_FaVY",
"event_json": {
"auth_events": [
"$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M",
"$oggsNXxzPFRE3y53SUNd7nsj69-QzKv03a1RucHu-ws"
],
"content": {
"body": "matrix.org: This Week in Matrix",
"format": "org.matrix.custom.html",
"formatted_body": "<strong>matrix.org</strong>:<br><a href=\"https://matrix.org/blog/\"><strong>This Week in Matrix</strong></a>",
"msgtype": "m.notice"
},
"depth": 546,
"hashes": {
"sha256": "xK1//xnmvHJIOvbgXlkI8eEqdvoMmihVDJ9J4SNlsAw"
},
"origin": "matrix.org",
"origin_server_ts": 1592291711430,
"prev_events": [
"$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M"
],
"prev_state": [],
"room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org",
"sender": "@foobar:matrix.org",
"signatures": {
"matrix.org": {
"ed25519:a_JaEG": "cs+OUKW/iHx5pEidbWxh0UiNNHwe46Ai9LwNz+Ah16aWDNszVIe2gaAcVZfvNsBhakQTew51tlKmL2kspXk/Dg"
}
},
"type": "m.room.message",
"unsigned": {
"age_ts": 1592291711430,
}
},
"id": <report_id>,
"reason": "foo",
"score": -100,
"received_ts": 1570897107409,
"canonical_alias": "#alias1:matrix.org",
"room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org",
"name": "Matrix HQ",
"sender": "@foobar:matrix.org",
"user_id": "@foo:matrix.org"
}
```
**URL parameters:**
* `report_id`: string - The ID of the event report.
**Response**
The following fields are returned in the JSON response body:
* `id`: integer - ID of event report.
* `received_ts`: integer - The timestamp (in milliseconds since the unix epoch) when this
report was sent.
* `room_id`: string - The ID of the room in which the event being reported is located.
* `name`: string - The name of the room.
* `event_id`: string - The ID of the reported event.
* `user_id`: string - This is the user who reported the event and wrote the reason.
* `reason`: string - Comment made by the `user_id` in this report. May be blank.
* `score`: integer - Content is reported based upon a negative score, where -100 is
"most offensive" and 0 is "inoffensive".
* `sender`: string - This is the ID of the user who sent the original message/event that
was reported.
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
have a canonical alias set.
* `event_json`: object - Details of the original event that was reported.

View File

@@ -1,19 +1,6 @@
# Contents
- [List all media in a room](#list-all-media-in-a-room)
- [Quarantine media](#quarantine-media)
* [Quarantining media by ID](#quarantining-media-by-id)
* [Quarantining media in a room](#quarantining-media-in-a-room)
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
- [Delete local media](#delete-local-media)
* [Delete a specific local media](#delete-a-specific-local-media)
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
- [Purge Remote Media API](#purge-remote-media-api)
# List all media in a room
This API gets a list of known media in a room.
However, it only shows media from unencrypted events or rooms.
The API is:
```
@@ -23,16 +10,16 @@ To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [README.rst](README.rst).
The API returns a JSON body like the following:
```json
```
{
"local": [
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
"mxc://localhost/abcdefghijklmnopqrstuvwx"
],
"remote": [
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
]
"local": [
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
"mxc://localhost/abcdefghijklmnopqrstuvwx"
],
"remote": [
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
]
}
```
@@ -60,7 +47,7 @@ form of `abcdefg12345...`.
Response:
```json
```
{}
```
@@ -80,18 +67,14 @@ Where `room_id` is in the form of `!roomid12345:example.org`.
Response:
```json
```
{
"num_quarantined": 10
"num_quarantined": 10 # The number of media items successfully quarantined
}
```
The following fields are returned in the JSON response body:
* `num_quarantined`: integer - The number of media items successfully quarantined
Note that there is a legacy endpoint, `POST
/_synapse/admin/v1/quarantine_media/<room_id>`, that operates the same.
/_synapse/admin/v1/quarantine_media/<room_id >`, that operates the same.
However, it is deprecated and may be removed in a future release.
## Quarantining all media of a user
@@ -108,155 +91,12 @@ POST /_synapse/admin/v1/user/<user_id>/media/quarantine
{}
```
URL Parameters
* `user_id`: string - User ID in the form of `@bob:example.org`
Where `user_id` is in the form of `@bob:example.org`.
Response:
```json
```
{
"num_quarantined": 10
"num_quarantined": 10 # The number of media items successfully quarantined
}
```
The following fields are returned in the JSON response body:
* `num_quarantined`: integer - The number of media items successfully quarantined
## Protecting media from being quarantined
This API protects a single piece of local media from being quarantined using the
above APIs. This is useful for sticker packs and other shared media which you do
not want to get quarantined, especially when
[quarantining media in a room](#quarantining-media-in-a-room).
Request:
```
POST /_synapse/admin/v1/media/protect/<media_id>
{}
```
Where `media_id` is in the form of `abcdefg12345...`.
Response:
```json
{}
```
# Delete local media
This API deletes the *local* media from the disk of your own server.
This includes any local thumbnails and copies of media downloaded from
remote homeservers.
This API will not affect media that has been uploaded to external
media repositories (e.g https://github.com/turt2live/matrix-media-repo/).
See also [Purge Remote Media API](#purge-remote-media-api).
## Delete a specific local media
Delete a specific `media_id`.
Request:
```
DELETE /_synapse/admin/v1/media/<server_name>/<media_id>
{}
```
URL Parameters
* `server_name`: string - The name of your local server (e.g `matrix.org`)
* `media_id`: string - The ID of the media (e.g `abcdefghijklmnopqrstuvwx`)
Response:
```json
{
"deleted_media": [
"abcdefghijklmnopqrstuvwx"
],
"total": 1
}
```
The following fields are returned in the JSON response body:
* `deleted_media`: an array of strings - List of deleted `media_id`
* `total`: integer - Total number of deleted `media_id`
## Delete local media by date or size
Request:
```
POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
{}
```
URL Parameters
* `server_name`: string - The name of your local server (e.g `matrix.org`).
* `before_ts`: string representing a positive integer - Unix timestamp in ms.
Files that were last used before this timestamp will be deleted. It is the timestamp of
last access and not the timestamp creation.
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
Files that are larger will be deleted. Defaults to `0`.
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files
that are still used in image data (e.g user profile, room avatar).
If `false` these files will be deleted. Defaults to `true`.
Response:
```json
{
"deleted_media": [
"abcdefghijklmnopqrstuvwx",
"abcdefghijklmnopqrstuvwz"
],
"total": 2
}
```
The following fields are returned in the JSON response body:
* `deleted_media`: an array of strings - List of deleted `media_id`
* `total`: integer - Total number of deleted `media_id`
# Purge Remote Media API
The purge remote media API allows server admins to purge old cached remote media.
The API is:
```
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
{}
```
URL Parameters
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in ms.
All cached media that was last accessed before this timestamp will be removed.
Response:
```json
{
"deleted": 10
}
```
The following fields are returned in the JSON response body:
* `deleted`: integer - The number of media items successfully deleted
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see [README.rst](README.rst).
If the user re-requests purged remote media, synapse will re-request the media
from the originating server.

View File

@@ -0,0 +1,20 @@
Purge Remote Media API
======================
The purge remote media API allows server admins to purge old cached remote
media.
The API is::
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
{}
\... which will remove all cached media that was last accessed before
``<unix_timestamp_in_ms>``.
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
If the user re-requests purged remote media, synapse will re-request the media
from the originating server.

View File

@@ -1,8 +1,5 @@
Deprecated: Purge room API
==========================
**The old Purge room API is deprecated and will be removed in a future release.
See the new [Delete Room API](rooms.md#delete-room-api) for more details.**
Purge room API
==============
This API will remove all trace of a room from your database.

View File

@@ -18,8 +18,7 @@ To fetch the nonce, you need to request one from the API::
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
body containing the nonce, username, password, whether they are an admin
(optional, False by default), and a HMAC digest of the content. Also you can
set the displayname (optional, ``username`` by default).
(optional, False by default), and a HMAC digest of the content.
As an example::
@@ -27,7 +26,6 @@ As an example::
> {
"nonce": "thisisanonce",
"username": "pepper_roni",
"displayname": "Pepper Roni",
"password": "pizza",
"admin": true,
"mac": "mac_digest_here"

View File

@@ -1,15 +1,3 @@
# Contents
- [List Room API](#list-room-api)
* [Parameters](#parameters)
* [Usage](#usage)
- [Room Details API](#room-details-api)
- [Room Members API](#room-members-api)
- [Delete Room API](#delete-room-api)
* [Parameters](#parameters-1)
* [Response](#response)
* [Undoing room shutdowns](#undoing-room-shutdowns)
- [Make Room Admin API](#make-room-admin-api)
# List Room API
The List Room admin API allows server admins to get a list of rooms on their
@@ -88,7 +76,7 @@ GET /_synapse/admin/v1/rooms
Response:
```jsonc
```
{
"rooms": [
{
@@ -140,7 +128,7 @@ GET /_synapse/admin/v1/rooms?search_term=TWIM
Response:
```json
```
{
"rooms": [
{
@@ -175,7 +163,7 @@ GET /_synapse/admin/v1/rooms?order_by=size
Response:
```jsonc
```
{
"rooms": [
{
@@ -231,14 +219,14 @@ GET /_synapse/admin/v1/rooms?order_by=size&from=100
Response:
```jsonc
```
{
"rooms": [
{
"room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
"name": "Music Theory",
"canonical_alias": "#musictheory:matrix.org",
"joined_members": 127,
"joined_members": 127
"joined_local_members": 2,
"version": "1",
"creator": "@foo:matrix.org",
@@ -255,7 +243,7 @@ Response:
"room_id": "!twcBhHVdZlQWuuxBhN:termina.org.uk",
"name": "weechat-matrix",
"canonical_alias": "#weechat-matrix:termina.org.uk",
"joined_members": 137,
"joined_members": 137
"joined_local_members": 20,
"version": "4",
"creator": "@foo:termina.org.uk",
@@ -277,20 +265,19 @@ Response:
Once the `next_token` parameter is no longer present, we know we've reached the
end of the list.
# Room Details API
# DRAFT: Room Details API
The Room Details admin API allows server admins to get all details of a room.
This API is still a draft and details might change!
The following fields are possible in the JSON response body:
* `room_id` - The ID of the room.
* `name` - The name of the room.
* `topic` - The topic of the room.
* `avatar` - The `mxc` URI to the avatar of the room.
* `canonical_alias` - The canonical (main) alias address of the room.
* `joined_members` - How many users are currently in the room.
* `joined_local_members` - How many local users are currently in the room.
* `joined_local_devices` - How many local devices are currently in the room.
* `version` - The version of the room as a string.
* `creator` - The `user_id` of the room creator.
* `encryption` - Algorithm of end-to-end encryption of messages. Is `null` if encryption is not active.
@@ -313,16 +300,13 @@ GET /_synapse/admin/v1/rooms/<room_id>
Response:
```json
```
{
"room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
"name": "Music Theory",
"avatar": "mxc://matrix.org/AQDaVFlbkQoErdOgqWRgiGSV",
"topic": "Theory, Composition, Notation, Analysis",
"canonical_alias": "#musictheory:matrix.org",
"joined_members": 127,
"joined_members": 127
"joined_local_members": 2,
"joined_local_devices": 2,
"version": "1",
"creator": "@foo:matrix.org",
"encryption": null,
@@ -334,180 +318,3 @@ Response:
"state_events": 93534
}
```
# Room Members API
The Room Members admin API allows server admins to get a list of all members of a room.
The response includes the following fields:
* `members` - A list of all the members that are present in the room, represented by their ids.
* `total` - Total number of members in the room.
## Usage
A standard request:
```
GET /_synapse/admin/v1/rooms/<room_id>/members
{}
```
Response:
```json
{
"members": [
"@foo:matrix.org",
"@bar:matrix.org",
"@foobar:matrix.org"
],
"total": 3
}
```
# Delete Room API
The Delete Room admin API allows server admins to remove rooms from server
and block these rooms.
Shuts down a room. Moves all local users and room aliases automatically to a
new room if `new_room_user_id` is set. Otherwise local users only
leave the room without any information.
The new room will be created with the user specified by the `new_room_user_id` parameter
as room administrator and will contain a message explaining what happened. Users invited
to the new room will have power level `-10` by default, and thus be unable to speak.
If `block` is `True` it prevents new joins to the old room.
This API will remove all trace of the old room from your database after removing
all local users. If `purge` is `true` (the default), all traces of the old room will
be removed from your database after removing all local users. If you do not want
this to happen, set `purge` to `false`.
Depending on the amount of history being purged a call to the API may take
several minutes or longer.
The local server will only have the power to move local user and room aliases to
the new room. Users on other servers will be unaffected.
The API is:
```
POST /_synapse/admin/v1/rooms/<room_id>/delete
```
with a body of:
```json
{
"new_room_user_id": "@someuser:example.com",
"room_name": "Content Violation Notification",
"message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service.",
"block": true,
"purge": true
}
```
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see [README.rst](README.rst).
A response body like the following is returned:
```json
{
"kicked_users": [
"@foobar:example.com"
],
"failed_to_kick_users": [],
"local_aliases": [
"#badroom:example.com",
"#evilsaloon:example.com"
],
"new_room_id": "!newroomid:example.com"
}
```
## Parameters
The following parameters should be set in the URL:
* `room_id` - The ID of the room.
The following JSON body parameters are available:
* `new_room_user_id` - Optional. If set, a new room will be created with this user ID
as the creator and admin, and all users in the old room will be moved into that
room. If not set, no new room will be created and the users will just be removed
from the old room. The user ID must be on the local server, but does not necessarily
have to belong to a registered user.
* `room_name` - Optional. A string representing the name of the room that new users will be
invited to. Defaults to `Content Violation Notification`
* `message` - Optional. A string containing the first message that will be sent as
`new_room_user_id` in the new room. Ideally this will clearly convey why the
original room was shut down. Defaults to `Sharing illegal content on this server
is not permitted and rooms in violation will be blocked.`
* `block` - Optional. If set to `true`, this room will be added to a blocking list, preventing
future attempts to join the room. Defaults to `false`.
* `purge` - Optional. If set to `true`, it will remove all traces of the room from your database.
Defaults to `true`.
* `force_purge` - Optional, and ignored unless `purge` is `true`. If set to `true`, it
will force a purge to go ahead even if there are local users still in the room. Do not
use this unless a regular `purge` operation fails, as it could leave those users'
clients in a confused state.
The JSON body must not be empty. The body must be at least `{}`.
## Response
The following fields are returned in the JSON response body:
* `kicked_users` - An array of users (`user_id`) that were kicked.
* `failed_to_kick_users` - An array of users (`user_id`) that that were not kicked.
* `local_aliases` - An array of strings representing the local aliases that were migrated from
the old room to the new.
* `new_room_id` - A string representing the room ID of the new room.
## Undoing room shutdowns
*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
the structure can and does change without notice.
First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
to recover at all:
* If the room was invite-only, your users will need to be re-invited.
* If the room no longer has any members at all, it'll be impossible to rejoin.
* The first user to rejoin will have to do so via an alias on a different server.
With all that being said, if you still want to try and recover the room:
1. For safety reasons, shut down Synapse.
2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
* For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
* The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
3. Restart Synapse.
You will have to manually handle, if you so choose, the following:
* Aliases that would have been redirected to the Content Violation room.
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
* Removal of the Content Violation room if desired.
# Make Room Admin API
Grants another user the highest power available to a local user who is in the room.
If the user is not in the room, and it is not publicly joinable, then invite the user.
By default the server admin (the caller) is granted power, but another user can
optionally be specified, e.g.:
```
POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
{
"user_id": "@foo:example.com"
}
```

View File

@@ -1,7 +1,4 @@
# Deprecated: Shutdown room API
**The old Shutdown room API is deprecated and will be removed in a future release.
See the new [Delete Room API](rooms.md#delete-room-api) for more details.**
# Shutdown room API
Shuts down a room, preventing new joins and moves local users and room aliases automatically
to a new room. The new room will be created with the user specified by the
@@ -34,7 +31,7 @@ You will need to authenticate with an access token for an admin user.
* `message` - Optional. A string containing the first message that will be sent as
`new_room_user_id` in the new room. Ideally this will clearly convey why the
original room was shut down.
If not specified, the default value of `room_name` is "Content Violation
Notification". The default value of `message` is "Sharing illegal content on
othis server is not permitted and rooms in violation will be blocked."
@@ -73,30 +70,3 @@ Response:
"new_room_id": "!newroomid:example.com",
},
```
## Undoing room shutdowns
*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
the structure can and does change without notice.
First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
to recover at all:
* If the room was invite-only, your users will need to be re-invited.
* If the room no longer has any members at all, it'll be impossible to rejoin.
* The first user to rejoin will have to do so via an alias on a different server.
With all that being said, if you still want to try and recover the room:
1. For safety reasons, shut down Synapse.
2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
* For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
* The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
3. Restart Synapse.
You will have to manually handle, if you so choose, the following:
* Aliases that would have been redirected to the Content Violation room.
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
* Removal of the Content Violation room if desired.

View File

@@ -1,83 +0,0 @@
# Users' media usage statistics
Returns information about all local media usage of users. Gives the
possibility to filter them by time and user.
The API is:
```
GET /_synapse/admin/v1/statistics/users/media
```
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [README.rst](README.rst).
A response body like the following is returned:
```json
{
"users": [
{
"displayname": "foo_user_0",
"media_count": 2,
"media_length": 134,
"user_id": "@foo_user_0:test"
},
{
"displayname": "foo_user_1",
"media_count": 2,
"media_length": 134,
"user_id": "@foo_user_1:test"
}
],
"next_token": 3,
"total": 10
}
```
To paginate, check for `next_token` and if present, call the endpoint
again with `from` set to the value of `next_token`. This will return a new page.
If the endpoint does not return a `next_token` then there are no more
reports to paginate through.
**Parameters**
The following parameters should be set in the URL:
* `limit`: string representing a positive integer - Is optional but is
used for pagination, denoting the maximum number of items to return
in this call. Defaults to `100`.
* `from`: string representing a positive integer - Is optional but used for pagination,
denoting the offset in the returned results. This should be treated as an opaque value
and not explicitly set to anything other than the return value of `next_token` from a
previous call. Defaults to `0`.
* `order_by` - string - The method in which to sort the returned list of users. Valid values are:
- `user_id` - Users are ordered alphabetically by `user_id`. This is the default.
- `displayname` - Users are ordered alphabetically by `displayname`.
- `media_length` - Users are ordered by the total size of uploaded media in bytes.
Smallest to largest.
- `media_count` - Users are ordered by number of uploaded media. Smallest to largest.
* `from_ts` - string representing a positive integer - Considers only
files created at this timestamp or later. Unix timestamp in ms.
* `until_ts` - string representing a positive integer - Considers only
files created at this timestamp or earlier. Unix timestamp in ms.
* `search_term` - string - Filter users by their user ID localpart **or** displayname.
The search term can be found in any part of the string.
Defaults to no filtering.
* `dir` - string - Direction of order. Either `f` for forwards or `b` for backwards.
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
**Response**
The following fields are returned in the JSON response body:
* `users` - An array of objects, each containing information
about the user and their local media. Objects contain the following fields:
- `displayname` - string - Displayname of this user.
- `media_count` - integer - Number of uploaded media by this user.
- `media_length` - integer - Size of uploaded media in bytes by this user.
- `user_id` - string - Fully-qualified user ID (ex. `@user:server.com`).
* `next_token` - integer - Opaque value used for pagination. See above.
* `total` - integer - Total number of users after filtering.

View File

@@ -30,12 +30,7 @@ It returns a JSON body like the following:
],
"avatar_url": "<avatar_url>",
"admin": false,
"deactivated": false,
"password_hash": "$2b$12$p9B4GkqYdRTPGD",
"creation_ts": 1560432506,
"appservice_id": null,
"consent_server_notice_sent": null,
"consent_version": null
"deactivated": false
}
URL parameters:
@@ -96,16 +91,10 @@ Body parameters:
- ``admin``, optional, defaults to ``false``.
- ``deactivated``, optional. If unspecified, deactivation state will be left
unchanged on existing accounts and set to ``false`` for new accounts.
A user cannot be erased by deactivating with this API. For details on deactivating users see
`Deactivate Account <#deactivate-account>`_.
- ``deactivated``, optional, defaults to ``false``.
If the user already exists then optional parameters default to the current value.
In order to re-activate an account ``deactivated`` must be set to ``false``. If
users do not login via single-sign-on, a new ``password`` must be provided.
List Accounts
=============
@@ -115,7 +104,7 @@ The api is::
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
To use it, you will need to authenticate by providing an ``access_token`` for a
To use it, you will need to authenticate by providing an `access_token` for a
server admin: see `README.rst <README.rst>`_.
The parameter ``from`` is optional but used for pagination, denoting the
@@ -126,11 +115,8 @@ from a previous call.
The parameter ``limit`` is optional but is used for pagination, denoting the
maximum number of items to return in this call. Defaults to ``100``.
The parameter ``user_id`` is optional and filters to only return users with user IDs
that contain this value. This parameter is ignored when using the ``name`` parameter.
The parameter ``name`` is optional and filters to only return users with user ID localparts
**or** displaynames that contain this value.
The parameter ``user_id`` is optional and filters to only users with user IDs
that contain this value.
The parameter ``guests`` is optional and if ``false`` will **exclude** guest users.
Defaults to ``true`` to include guest users.
@@ -146,6 +132,7 @@ A JSON body is returned with the following shape:
"users": [
{
"name": "<user_id1>",
"password_hash": "<password_hash1>",
"is_guest": 0,
"admin": 0,
"user_type": null,
@@ -154,6 +141,7 @@ A JSON body is returned with the following shape:
"avatar_url": null
}, {
"name": "<user_id2>",
"password_hash": "<password_hash2>",
"is_guest": 0,
"admin": 1,
"user_type": null,
@@ -181,13 +169,6 @@ The api is::
GET /_synapse/admin/v1/whois/<user_id>
and::
GET /_matrix/client/r0/admin/whois/<userId>
See also: `Client Server API Whois
<https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
@@ -226,11 +207,9 @@ Deactivate Account
This API deactivates an account. It removes active access tokens, resets the
password, and deletes third-party IDs (to prevent the user requesting a
password reset).
It can also mark the user as GDPR-erased. This means messages sent by the
user will still be visible by anyone that was in the room when these messages
were sent, but hidden from users joining the room afterwards.
password reset). It can also mark the user as GDPR-erased (stopping their data
from distributed further, and deleting it entirely if there are no other
references to it).
The api is::
@@ -250,25 +229,6 @@ server admin: see `README.rst <README.rst>`_.
The erase parameter is optional and defaults to ``false``.
An empty body may be passed for backwards compatibility.
The following actions are performed when deactivating an user:
- Try to unpind 3PIDs from the identity server
- Remove all 3PIDs from the homeserver
- Delete all devices and E2EE keys
- Delete all access tokens
- Delete the password hash
- Removal from all rooms the user is a member of
- Remove the user from the user directory
- Reject all pending invites
- Remove all account validity information related to the user
The following additional actions are performed during deactivation if``erase``
is set to ``true``:
- Remove the user's display name
- Remove the user's avatar URL
- Mark the user as erased
Reset password
==============
@@ -285,7 +245,7 @@ with a body of:
{
"new_password": "<secret>",
"logout_devices": true
"logout_devices": true,
}
To use it, you will need to authenticate by providing an ``access_token`` for a
@@ -335,165 +295,6 @@ To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
List room memberships of an user
================================
Gets a list of all ``room_id`` that a specific ``user_id`` is member.
The API is::
GET /_synapse/admin/v1/users/<user_id>/joined_rooms
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
A response body like the following is returned:
.. code:: json
{
"joined_rooms": [
"!DuGcnbhHGaSZQoNQR:matrix.org",
"!ZtSaPCawyWtxfWiIy:matrix.org"
],
"total": 2
}
The server returns the list of rooms of which the user and the server
are member. If the user is local, all the rooms of which the user is
member are returned.
**Parameters**
The following parameters should be set in the URL:
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
**Response**
The following fields are returned in the JSON response body:
- ``joined_rooms`` - An array of ``room_id``.
- ``total`` - Number of rooms.
List media of an user
================================
Gets a list of all local media that a specific ``user_id`` has created.
The response is ordered by creation date descending and media ID descending.
The newest media is on top.
The API is::
GET /_synapse/admin/v1/users/<user_id>/media
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
A response body like the following is returned:
.. code:: json
{
"media": [
{
"created_ts": 100400,
"last_access_ts": null,
"media_id": "qXhyRzulkwLsNHTbpHreuEgo",
"media_length": 67,
"media_type": "image/png",
"quarantined_by": null,
"safe_from_quarantine": false,
"upload_name": "test1.png"
},
{
"created_ts": 200400,
"last_access_ts": null,
"media_id": "FHfiSnzoINDatrXHQIXBtahw",
"media_length": 67,
"media_type": "image/png",
"quarantined_by": null,
"safe_from_quarantine": false,
"upload_name": "test2.png"
}
],
"next_token": 3,
"total": 2
}
To paginate, check for ``next_token`` and if present, call the endpoint again
with ``from`` set to the value of ``next_token``. This will return a new page.
If the endpoint does not return a ``next_token`` then there are no more
reports to paginate through.
**Parameters**
The following parameters should be set in the URL:
- ``user_id`` - string - fully qualified: for example, ``@user:server.com``.
- ``limit``: string representing a positive integer - Is optional but is used for pagination,
denoting the maximum number of items to return in this call. Defaults to ``100``.
- ``from``: string representing a positive integer - Is optional but used for pagination,
denoting the offset in the returned results. This should be treated as an opaque value and
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
Defaults to ``0``.
**Response**
The following fields are returned in the JSON response body:
- ``media`` - An array of objects, each containing information about a media.
Media objects contain the following fields:
- ``created_ts`` - integer - Timestamp when the content was uploaded in ms.
- ``last_access_ts`` - integer - Timestamp when the content was last accessed in ms.
- ``media_id`` - string - The id used to refer to the media.
- ``media_length`` - integer - Length of the media in bytes.
- ``media_type`` - string - The MIME-type of the media.
- ``quarantined_by`` - string - The user ID that initiated the quarantine request
for this media.
- ``safe_from_quarantine`` - bool - Status if this media is safe from quarantining.
- ``upload_name`` - string - The name the media was uploaded with.
- ``next_token``: integer - Indication for pagination. See above.
- ``total`` - integer - Total number of media.
Login as a user
===============
Get an access token that can be used to authenticate as that user. Useful for
when admins wish to do actions on behalf of a user.
The API is::
POST /_synapse/admin/v1/users/<user_id>/login
{}
An optional ``valid_until_ms`` field can be specified in the request body as an
integer timestamp that specifies when the token should expire. By default tokens
do not expire.
A response body like the following is returned:
.. code:: json
{
"access_token": "<opaque_access_token_string>"
}
This API does *not* generate a new device for the user, and so will not appear
their ``/devices`` list, and in general the target user should not be able to
tell they have been logged in as.
To expire the token call the standard ``/logout`` API with the token.
Note: The token will expire if the *admin* user calls ``/logout/all`` from any
of their devices, but the token will *not* expire if the target user does the
same.
User devices
============
@@ -528,8 +329,7 @@ A response body like the following is returned:
"last_seen_ts": 1474491775025,
"user_id": "<user_id>"
}
],
"total": 2
]
}
**Parameters**
@@ -554,8 +354,6 @@ The following fields are returned in the JSON response body:
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
- ``user_id`` - Owner of device.
- ``total`` - Total number of user's devices.
Delete multiple devices
------------------
Deletes the given devices for a specific ``user_id``, and invalidates
@@ -681,82 +479,3 @@ The following parameters should be set in the URL:
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
- ``device_id`` - The device to delete.
List all pushers
================
Gets information about all pushers for a specific ``user_id``.
The API is::
GET /_synapse/admin/v1/users/<user_id>/pushers
To use it, you will need to authenticate by providing an ``access_token`` for a
server admin: see `README.rst <README.rst>`_.
A response body like the following is returned:
.. code:: json
{
"pushers": [
{
"app_display_name":"HTTP Push Notifications",
"app_id":"m.http",
"data": {
"url":"example.com"
},
"device_display_name":"pushy push",
"kind":"http",
"lang":"None",
"profile_tag":"",
"pushkey":"a@example.com"
}
],
"total": 1
}
**Parameters**
The following parameters should be set in the URL:
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
**Response**
The following fields are returned in the JSON response body:
- ``pushers`` - An array containing the current pushers for the user
- ``app_display_name`` - string - A string that will allow the user to identify
what application owns this pusher.
- ``app_id`` - string - This is a reverse-DNS style identifier for the application.
Max length, 64 chars.
- ``data`` - A dictionary of information for the pusher implementation itself.
- ``url`` - string - Required if ``kind`` is ``http``. The URL to use to send
notifications to.
- ``format`` - string - The format to use when sending notifications to the
Push Gateway.
- ``device_display_name`` - string - A string that will allow the user to identify
what device owns this pusher.
- ``profile_tag`` - string - This string determines which set of device specific rules
this pusher executes.
- ``kind`` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes.
- ``lang`` - string - The preferred language for receiving notifications
(e.g. 'en' or 'en-US')
- ``profile_tag`` - string - This string determines which set of device specific rules
this pusher executes.
- ``pushkey`` - string - This is a unique identifier for this pusher.
Max length, 512 bytes.
- ``total`` - integer - Number of pushers.
See also `Client-Server API Spec <https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers>`_

View File

@@ -1,32 +0,0 @@
digraph auth {
nodesep=0.5;
rankdir="RL";
C [label="Create (1,1)"];
BJ [label="Bob's Join (2,1)", color=red];
BJ2 [label="Bob's Join (2,2)", color=red];
BJ2 -> BJ [color=red, dir=none];
subgraph cluster_foo {
A1 [label="Alice's invite (4,1)", color=blue];
A2 [label="Alice's Join (4,2)", color=blue];
A3 [label="Alice's Join (4,3)", color=blue];
A3 -> A2 -> A1 [color=blue, dir=none];
color=none;
}
PL1 [label="Power Level (3,1)", color=darkgreen];
PL2 [label="Power Level (3,2)", color=darkgreen];
PL2 -> PL1 [color=darkgreen, dir=none];
{rank = same; C; BJ; PL1; A1;}
A1 -> C [color=grey];
A1 -> BJ [color=grey];
PL1 -> C [color=grey];
BJ2 -> PL1 [penwidth=2];
A3 -> PL2 [penwidth=2];
A1 -> PL1 -> BJ -> C [penwidth=2];
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

View File

@@ -1,108 +0,0 @@
# Auth Chain Difference Algorithm
The auth chain difference algorithm is used by V2 state resolution, where a
naive implementation can be a significant source of CPU and DB usage.
### Definitions
A *state set* is a set of state events; e.g. the input of a state resolution
algorithm is a collection of state sets.
The *auth chain* of a set of events are all the events' auth events and *their*
auth events, recursively (i.e. the events reachable by walking the graph induced
by an event's auth events links).
The *auth chain difference* of a collection of state sets is the union minus the
intersection of the sets of auth chains corresponding to the state sets, i.e an
event is in the auth chain difference if it is reachable by walking the auth
event graph from at least one of the state sets but not from *all* of the state
sets.
## Breadth First Walk Algorithm
A way of calculating the auth chain difference without calculating the full auth
chains for each state set is to do a parallel breadth first walk (ordered by
depth) of each state set's auth chain. By tracking which events are reachable
from each state set we can finish early if every pending event is reachable from
every state set.
This can work well for state sets that have a small auth chain difference, but
can be very inefficient for larger differences. However, this algorithm is still
used if we don't have a chain cover index for the room (e.g. because we're in
the process of indexing it).
## Chain Cover Index
Synapse computes auth chain differences by pre-computing a "chain cover" index
for the auth chain in a room, allowing efficient reachability queries like "is
event A in the auth chain of event B". This is done by assigning every event a
*chain ID* and *sequence number* (e.g. `(5,3)`), and having a map of *links*
between chains (e.g. `(5,3) -> (2,4)`) such that A is reachable by B (i.e. `A`
is in the auth chain of `B`) if and only if either:
1. A and B have the same chain ID and `A`'s sequence number is less than `B`'s
sequence number; or
2. there is a link `L` between `B`'s chain ID and `A`'s chain ID such that
`L.start_seq_no` <= `B.seq_no` and `A.seq_no` <= `L.end_seq_no`.
There are actually two potential implementations, one where we store links from
each chain to every other reachable chain (the transitive closure of the links
graph), and one where we remove redundant links (the transitive reduction of the
links graph) e.g. if we have chains `C3 -> C2 -> C1` then the link `C3 -> C1`
would not be stored. Synapse uses the former implementations so that it doesn't
need to recurse to test reachability between chains.
### Example
An example auth graph would look like the following, where chains have been
formed based on type/state_key and are denoted by colour and are labelled with
`(chain ID, sequence number)`. Links are denoted by the arrows (links in grey
are those that would be remove in the second implementation described above).
![Example](auth_chain_diff.dot.png)
Note that we don't include all links between events and their auth events, as
most of those links would be redundant. For example, all events point to the
create event, but each chain only needs the one link from it's base to the
create event.
## Using the Index
This index can be used to calculate the auth chain difference of the state sets
by looking at the chain ID and sequence numbers reachable from each state set:
1. For every state set lookup the chain ID/sequence numbers of each state event
2. Use the index to find all chains and the maximum sequence number reachable
from each state set.
3. The auth chain difference is then all events in each chain that have sequence
numbers between the maximum sequence number reachable from *any* state set and
the minimum reachable by *all* state sets (if any).
Note that steps 2 is effectively calculating the auth chain for each state set
(in terms of chain IDs and sequence numbers), and step 3 is calculating the
difference between the union and intersection of the auth chains.
### Worked Example
For example, given the above graph, we can calculate the difference between
state sets consisting of:
1. `S1`: Alice's invite `(4,1)` and Bob's second join `(2,2)`; and
2. `S2`: Alice's second join `(4,3)` and Bob's first join `(2,1)`.
Using the index we see that the following auth chains are reachable from each
state set:
1. `S1`: `(1,1)`, `(2,2)`, `(3,1)` & `(4,1)`
2. `S2`: `(1,1)`, `(2,1)`, `(3,2)` & `(4,3)`
And so, for each the ranges that are in the auth chain difference:
1. Chain 1: None, (since everything can reach the create event).
2. Chain 2: The range `(1, 2]` (i.e. just `2`), as `1` is reachable by all state
sets and the maximum reachable is `2` (corresponding to Bob's second join).
3. Chain 3: Similarly the range `(1, 2]` (corresponding to the second power
level).
4. Chain 4: The range `(1, 3]` (corresponding to both of Alice's joins).
So the final result is: Bob's second join `(2,2)`, the second power level
`(3,2)` and both of Alice's joins `(4,2)` & `(4,3)`.

View File

@@ -64,6 +64,8 @@ save as it takes a while and is very resource intensive.
- Use underscores for functions and variables.
- **Docstrings**: should follow the [google code
style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings).
This is so that we can generate documentation with
[sphinx](http://sphinxcontrib-napoleon.readthedocs.org/en/latest/).
See the
[examples](http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
in the sphinx documentation.

View File

@@ -31,7 +31,7 @@ easy to run CAS implementation built on top of Django.
You should now have a Django project configured to serve CAS authentication with
a single user created.
## Configure Synapse (and Element) to use CAS
## Configure Synapse (and Riot) to use CAS
1. Modify your `homeserver.yaml` to enable CAS and point it to your locally
running Django test server:
@@ -51,9 +51,9 @@ and that the CAS server is on port 8000, both on localhost.
## Testing the configuration
Then in Element:
Then in Riot:
1. Visit the login page with a Element pointing at your homeserver.
1. Visit the login page with a Riot pointing at your homeserver.
2. Click the Single Sign-On button.
3. Login using the credentials created with `createsuperuser`.
4. You should be logged in.

View File

@@ -47,18 +47,6 @@ you invite them to. This can be caused by an incorrectly-configured reverse
proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions on how to correctly
configure a reverse proxy.
### Known issues
**HTTP `308 Permanent Redirect` redirects are not followed**: Due to missing features
in the HTTP library used by Synapse, 308 redirects are currently not followed by
federating servers, which can cause `M_UNKNOWN` or `401 Unauthorized` errors. This
may affect users who are redirecting apex-to-www (e.g. `example.com` -> `www.example.com`),
and especially users of the Kubernetes *Nginx Ingress* module, which uses 308 redirect
codes by default. For those Kubernetes users, [this Stackoverflow post](https://stackoverflow.com/a/52617528/5096871)
might be helpful. For other users, switching to a `301 Moved Permanently` code may be
an option. 308 redirect codes will be supported properly in a future
release of Synapse.
## Running a demo federation of Synapses
If you want to get up and running quickly with a trio of homeservers in a

View File

@@ -1,97 +0,0 @@
# JWT Login Type
Synapse comes with a non-standard login type to support
[JSON Web Tokens](https://en.wikipedia.org/wiki/JSON_Web_Token). In general the
documentation for
[the login endpoint](https://matrix.org/docs/spec/client_server/r0.6.1#login)
is still valid (and the mechanism works similarly to the
[token based login](https://matrix.org/docs/spec/client_server/r0.6.1#token-based)).
To log in using a JSON Web Token, clients should submit a `/login` request as
follows:
```json
{
"type": "org.matrix.login.jwt",
"token": "<jwt>"
}
```
Note that the login type of `m.login.jwt` is supported, but is deprecated. This
will be removed in a future version of Synapse.
The `token` field should include the JSON web token with the following claims:
* The `sub` (subject) claim is required and should encode the local part of the
user ID.
* The expiration time (`exp`), not before time (`nbf`), and issued at (`iat`)
claims are optional, but validated if present.
* The issuer (`iss`) claim is optional, but required and validated if configured.
* The audience (`aud`) claim is optional, but required and validated if configured.
Providing the audience claim when not configured will cause validation to fail.
In the case that the token is not valid, the homeserver must respond with
`403 Forbidden` and an error code of `M_FORBIDDEN`.
As with other login types, there are additional fields (e.g. `device_id` and
`initial_device_display_name`) which can be included in the above request.
## Preparing Synapse
The JSON Web Token integration in Synapse uses the
[`PyJWT`](https://pypi.org/project/pyjwt/) library, which must be installed
as follows:
* The relevant libraries are included in the Docker images and Debian packages
provided by `matrix.org` so no further action is needed.
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
install synapse[pyjwt]` to install the necessary dependencies.
* For other installation mechanisms, see the documentation provided by the
maintainer.
To enable the JSON web token integration, you should then add an `jwt_config` section
to your configuration file (or uncomment the `enabled: true` line in the
existing section). See [sample_config.yaml](./sample_config.yaml) for some
sample settings.
## How to test JWT as a developer
Although JSON Web Tokens are typically generated from an external server, the
examples below use [PyJWT](https://pyjwt.readthedocs.io/en/latest/) directly.
1. Configure Synapse with JWT logins, note that this example uses a pre-shared
secret and an algorithm of HS256:
```yaml
jwt_config:
enabled: true
secret: "my-secret-token"
algorithm: "HS256"
```
2. Generate a JSON web token:
```bash
$ pyjwt --key=my-secret-token --alg=HS256 encode sub=test-user
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0LXVzZXIifQ.Ag71GT8v01UO3w80aqRPTeuVPBIBZkYhNTJJ-_-zQIc
```
3. Query for the login types and ensure `org.matrix.login.jwt` is there:
```bash
curl http://localhost:8080/_matrix/client/r0/login
```
4. Login used the generated JSON web token from above:
```bash
$ curl http://localhost:8082/_matrix/client/r0/login -X POST \
--data '{"type":"org.matrix.login.jwt","token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0LXVzZXIifQ.Ag71GT8v01UO3w80aqRPTeuVPBIBZkYhNTJJ-_-zQIc"}'
{
"access_token": "<access token>",
"device_id": "ACBDEFGHI",
"home_server": "localhost:8080",
"user_id": "@test-user:localhost:8480"
}
```
You should now be able to use the returned access token to query the client API.

View File

@@ -5,45 +5,8 @@ The "manhole" allows server administrators to access a Python shell on a running
Synapse installation. This is a very powerful mechanism for administration and
debugging.
**_Security Warning_**
Note that this will give administrative access to synapse to **all users** with
shell access to the server. It should therefore **not** be enabled in
environments where untrusted users have shell access.
***
To enable it, first uncomment the `manhole` listener configuration in
`homeserver.yaml`. The configuration is slightly different if you're using docker.
#### Docker config
If you are using Docker, set `bind_addresses` to `['0.0.0.0']` as shown:
```yaml
listeners:
- port: 9000
bind_addresses: ['0.0.0.0']
type: manhole
```
When using `docker run` to start the server, you will then need to change the command to the following to include the
`manhole` port forwarding. The `-p 127.0.0.1:9000:9000` below is important: it
ensures that access to the `manhole` is only possible for local users.
```bash
docker run -d --name synapse \
--mount type=volume,src=synapse-data,dst=/data \
-p 8008:8008 \
-p 127.0.0.1:9000:9000 \
matrixdotorg/synapse:latest
```
#### Native config
If you are not using docker, set `bind_addresses` to `['::1', '127.0.0.1']` as shown.
The `bind_addresses` in the example below is important: it ensures that access to the
`manhole` is only possible for local users).
`homeserver.yaml`:
```yaml
listeners:
@@ -52,7 +15,12 @@ listeners:
type: manhole
```
#### Accessing synapse manhole
(`bind_addresses` in the above is important: it ensures that access to the
manhole is only possible for local users).
Note that this will give administrative access to synapse to **all users** with
shell access to the server. It should therefore **not** be enabled in
environments where untrusted users have shell access.
Then restart synapse, and point an ssh client at port 9000 on localhost, using
the username `matrix`:
@@ -67,12 +35,9 @@ This gives a Python REPL in which `hs` gives access to the
`synapse.server.HomeServer` object - which in turn gives access to many other
parts of the process.
Note that any call which returns a coroutine will need to be wrapped in `ensureDeferred`.
As a simple example, retrieving an event from the database:
```pycon
>>> from twisted.internet import defer
>>> defer.ensureDeferred(hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org'))
```
>>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')
<Deferred at 0x7ff253fc6998 current result: <FrozenEvent event_id='$1416420717069yeQaw:matrix.org', type='m.room.create', state_key=''>>
```

View File

@@ -136,34 +136,24 @@ the server's database.
### Lifetime limits
Server admins can set limits on the values of `max_lifetime` to use when
purging old events in a room. These limits can be defined as such in the
`retention` section of the configuration file:
**Note: this feature is mainly useful within a closed federation or on
servers that don't federate, because there currently is no way to
enforce these limits in an open federation.**
Server admins can restrict the values their local users are allowed to
use for both `min_lifetime` and `max_lifetime`. These limits can be
defined as such in the `retention` section of the configuration file:
```yaml
allowed_lifetime_min: 1d
allowed_lifetime_max: 1y
```
The limits are considered when running purge jobs. If necessary, the
effective value of `max_lifetime` will be brought between
`allowed_lifetime_min` and `allowed_lifetime_max` (inclusive).
This means that, if the value of `max_lifetime` defined in the room's state
is lower than `allowed_lifetime_min`, the value of `allowed_lifetime_min`
will be used instead. Likewise, if the value of `max_lifetime` is higher
than `allowed_lifetime_max`, the value of `allowed_lifetime_max` will be
used instead.
In the example above, we ensure Synapse never deletes events that are less
than one day old, and that it always deletes events that are over a year
old.
If a default policy is set, and its `max_lifetime` value is lower than
`allowed_lifetime_min` or higher than `allowed_lifetime_max`, the same
process applies.
Both parameters are optional; if one is omitted Synapse won't use it to
adjust the effective value of `max_lifetime`.
Here, `allowed_lifetime_min` is the lowest value a local user can set
for both `min_lifetime` and `max_lifetime`, and `allowed_lifetime_max`
is the highest value. Both parameters are optional (e.g. setting
`allowed_lifetime_min` but not `allowed_lifetime_max` only enforces a
minimum and no maximum).
Like other settings in this section, these parameters can be expressed
either as a duration or as a number of milliseconds.

View File

@@ -13,12 +13,10 @@
can be enabled by adding the \"metrics\" resource to the existing
listener as such:
```yaml
resources:
- names:
- client
- metrics
```
resources:
- names:
- client
- metrics
This provides a simple way of adding metrics to your Synapse
installation, and serves under `/_synapse/metrics`. If you do not
@@ -29,17 +27,15 @@
different thread to Synapse. This can make it more resilient to
heavy load meaning metrics cannot be retrieved, and can be exposed
to just internal networks easier. The served metrics are available
over HTTP only, and will be available at `/_synapse/metrics`.
over HTTP only, and will be available at `/`.
Add a new listener to homeserver.yaml:
```yaml
listeners:
- type: metrics
port: 9000
bind_addresses:
- '0.0.0.0'
```
listeners:
- type: metrics
port: 9000
bind_addresses:
- '0.0.0.0'
For both options, you will need to ensure that `enable_metrics` is
set to `True`.
@@ -51,13 +47,10 @@
It needs to set the `metrics_path` to a non-default value (under
`scrape_configs`):
```yaml
- job_name: "synapse"
scrape_interval: 15s
metrics_path: "/_synapse/metrics"
static_configs:
- targets: ["my.server.here:port"]
```
- job_name: "synapse"
metrics_path: "/_synapse/metrics"
static_configs:
- targets: ["my.server.here:port"]
where `my.server.here` is the IP address of Synapse, and `port` is
the listener port configured with the `metrics` resource.
@@ -67,9 +60,6 @@
1. Restart Prometheus.
1. Consider using the [grafana dashboard](https://github.com/matrix-org/synapse/tree/master/contrib/grafana/)
and required [recording rules](https://github.com/matrix-org/synapse/tree/master/contrib/prometheus/)
## Monitoring workers
To monitor a Synapse installation using
@@ -84,9 +74,9 @@ To allow collecting metrics from a worker, you need to add a
under `worker_listeners`:
```yaml
- type: metrics
bind_address: ''
port: 9101
- type: metrics
bind_address: ''
port: 9101
```
The `bind_address` and `port` parameters should be set so that
@@ -95,38 +85,6 @@ don't clash with an existing worker.
With this example, the worker's metrics would then be available
on `http://127.0.0.1:9101`.
Example Prometheus target for Synapse with workers:
```yaml
- job_name: "synapse"
scrape_interval: 15s
metrics_path: "/_synapse/metrics"
static_configs:
- targets: ["my.server.here:port"]
labels:
instance: "my.server"
job: "master"
index: 1
- targets: ["my.workerserver.here:port"]
labels:
instance: "my.server"
job: "generic_worker"
index: 1
- targets: ["my.workerserver.here:port"]
labels:
instance: "my.server"
job: "generic_worker"
index: 2
- targets: ["my.workerserver.here:port"]
labels:
instance: "my.server"
job: "media_repository"
index: 1
```
Labels (`instance`, `job`, `index`) can be defined as anything.
The labels are used to group graphs in grafana.
## Renaming of metrics & deprecation of old names in 1.2
Synapse 1.2 updates the Prometheus metrics to match the naming

View File

@@ -23,7 +23,6 @@ such as [Github][github-idp].
[auth0]: https://auth0.com/
[okta]: https://www.okta.com/
[dex-idp]: https://github.com/dexidp/dex
[keycloak-idp]: https://www.keycloak.org/docs/latest/server_admin/#sso-protocols
[hydra]: https://www.ory.sh/docs/hydra/
[github-idp]: https://developer.github.com/apps/building-oauth-apps/authorizing-oauth-apps
@@ -37,54 +36,29 @@ as follows:
provided by `matrix.org` so no further action is needed.
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
install matrix-synapse[oidc]` to install the necessary dependencies.
install synapse[oidc]` to install the necessary dependencies.
* For other installation mechanisms, see the documentation provided by the
maintainer.
To enable the OpenID integration, you should then add a section to the `oidc_providers`
setting in your configuration file (or uncomment one of the existing examples).
See [sample_config.yaml](./sample_config.yaml) for some sample settings, as well as
the text below for example configurations for specific providers.
To enable the OpenID integration, you should then add an `oidc_config` section
to your configuration file (or uncomment the `enabled: true` line in the
existing section). See [sample_config.yaml](./sample_config.yaml) for some
sample settings, as well as the text below for example configurations for
specific providers.
## Sample configs
Here are a few configs for providers that should work with Synapse.
### Microsoft Azure Active Directory
Azure AD can act as an OpenID Connect Provider. Register a new application under
*App registrations* in the Azure AD management console. The RedirectURI for your
application should point to your matrix server: `[synapse public baseurl]/_synapse/oidc/callback`
Go to *Certificates & secrets* and register a new client secret. Make note of your
Directory (tenant) ID as it will be used in the Azure links.
Edit your Synapse config file and change the `oidc_config` section:
```yaml
oidc_providers:
- idp_id: microsoft
idp_name: Microsoft
issuer: "https://login.microsoftonline.com/<tenant id>/v2.0"
client_id: "<client id>"
client_secret: "<client secret>"
scopes: ["openid", "profile"]
authorization_endpoint: "https://login.microsoftonline.com/<tenant id>/oauth2/v2.0/authorize"
token_endpoint: "https://login.microsoftonline.com/<tenant id>/oauth2/v2.0/token"
userinfo_endpoint: "https://graph.microsoft.com/oidc/userinfo"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username.split('@')[0] }}"
display_name_template: "{{ user.name }}"
```
### [Dex][dex-idp]
[Dex][dex-idp] is a simple, open-source, certified OpenID Connect Provider.
Although it is designed to help building a full-blown provider with an
external database, it can be configured with static passwords in a config file.
Follow the [Getting Started guide](https://dexidp.io/docs/getting-started/)
Follow the [Getting Started
guide](https://github.com/dexidp/dex/blob/master/Documentation/getting-started.md)
to install Dex.
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
@@ -98,73 +72,24 @@ staticClients:
name: 'Synapse'
```
Run with `dex serve examples/config-dev.yaml`.
Run with `dex serve examples/config-dex.yaml`.
Synapse config:
```yaml
oidc_providers:
- idp_id: dex
idp_name: "My Dex server"
skip_verification: true # This is needed as Dex is served on an insecure endpoint
issuer: "http://127.0.0.1:5556/dex"
client_id: "synapse"
client_secret: "secret"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.name }}"
display_name_template: "{{ user.name|capitalize }}"
oidc_config:
enabled: true
skip_verification: true # This is needed as Dex is served on an insecure endpoint
issuer: "http://127.0.0.1:5556/dex"
client_id: "synapse"
client_secret: "secret"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.name }}"
display_name_template: "{{ user.name|capitalize }}"
```
### [Keycloak][keycloak-idp]
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
1. Click `Clients` in the sidebar and click `Create`
2. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Client Protocol | `openid-connect` |
3. Click `Save`
4. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Enabled | `On` |
| Client Protocol | `openid-connect` |
| Access Type | `confidential` |
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/oidc/callback` |
5. Click `Save`
6. On the Credentials tab, update the fields:
| Field | Value |
|-------|-------|
| Client Authenticator | `Client ID and Secret` |
7. Click `Regenerate Secret`
8. Copy Secret
```yaml
oidc_providers:
- idp_id: keycloak
idp_name: "My KeyCloak server"
issuer: "https://127.0.0.1:8443/auth/realms/{realm_name}"
client_id: "synapse"
client_secret: "copy secret generated from above"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### [Auth0][auth0]
1. Create a regular web application for Synapse
@@ -193,17 +118,16 @@ oidc_providers:
Synapse config:
```yaml
oidc_providers:
- idp_id: auth0
idp_name: Auth0
issuer: "https://your-tier.eu.auth0.com/" # TO BE FILLED
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
oidc_config:
enabled: true
issuer: "https://your-tier.eu.auth0.com/" # TO BE FILLED
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### GitHub
@@ -212,7 +136,7 @@ GitHub is a bit special as it is not an OpenID Connect compliant provider, but
just a regular OAuth2 provider.
The [`/user` API endpoint](https://developer.github.com/v3/users/#get-the-authenticated-user)
can be used to retrieve information on the authenticated user. As the Synapse
can be used to retrieve information on the authenticated user. As the Synaspse
login mechanism needs an attribute to uniquely identify users, and that endpoint
does not return a `sub` property, an alternative `subject_claim` has to be set.
@@ -222,22 +146,21 @@ does not return a `sub` property, an alternative `subject_claim` has to be set.
Synapse config:
```yaml
oidc_providers:
- idp_id: github
idp_name: Github
discover: false
issuer: "https://github.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
authorization_endpoint: "https://github.com/login/oauth/authorize"
token_endpoint: "https://github.com/login/oauth/access_token"
userinfo_endpoint: "https://api.github.com/user"
scopes: ["read:user"]
user_mapping_provider:
config:
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.name }}"
oidc_config:
enabled: true
discover: false
issuer: "https://github.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
authorization_endpoint: "https://github.com/login/oauth/authorize"
token_endpoint: "https://github.com/login/oauth/access_token"
userinfo_endpoint: "https://api.github.com/user"
scopes: ["read:user"]
user_mapping_provider:
config:
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.name }}"
```
### [Google][google-idp]
@@ -247,17 +170,16 @@ oidc_providers:
2. add an "OAuth Client ID" for a Web Application under "Credentials".
3. Copy the Client ID and Client Secret, and add the following to your synapse config:
```yaml
oidc_providers:
- idp_id: google
idp_name: Google
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
oidc_config:
enabled: true
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
```
4. Back in the Google console, add this Authorized redirect URI: `[synapse
public baseurl]/_synapse/oidc/callback`.
@@ -271,39 +193,14 @@ oidc_providers:
Synapse config:
```yaml
oidc_providers:
- idp_id: twitch
idp_name: Twitch
issuer: "https://id.twitch.tv/oauth2/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### GitLab
1. Create a [new application](https://gitlab.com/profile/applications).
2. Add the `read_user` and `openid` scopes.
3. Add this Callback URL: `[synapse public baseurl]/_synapse/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: gitlab
idp_name: Gitlab
issuer: "https://gitlab.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
scopes: ["openid", "read_user"]
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
localpart_template: '{{ user.nickname }}'
display_name_template: '{{ user.name }}'
oidc_config:
enabled: true
issuer: "https://id.twitch.tv/oauth2/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
user_mapping_provider:
config:
localpart_template: '{{ user.preferred_username }}'
display_name_template: '{{ user.name }}'
```

View File

@@ -14,110 +14,107 @@ password auth provider module implementations:
* [matrix-synapse-ldap3](https://github.com/matrix-org/matrix-synapse-ldap3/)
* [matrix-synapse-shared-secret-auth](https://github.com/devture/matrix-synapse-shared-secret-auth)
* [matrix-synapse-rest-password-provider](https://github.com/ma1uta/matrix-synapse-rest-password-provider)
## Required methods
Password auth provider classes must provide the following methods:
* `parse_config(config)`
This method is passed the `config` object for this module from the
homeserver configuration file.
*class* `SomeProvider.parse_config`(*config*)
It should perform any appropriate sanity checks on the provided
configuration, and return an object which is then passed into
`__init__`.
> This method is passed the `config` object for this module from the
> homeserver configuration file.
>
> It should perform any appropriate sanity checks on the provided
> configuration, and return an object which is then passed into
> `__init__`.
This method should have the `@staticmethod` decoration.
*class* `SomeProvider`(*config*, *account_handler*)
* `__init__(self, config, account_handler)`
The constructor is passed the config object returned by
`parse_config`, and a `synapse.module_api.ModuleApi` object which
allows the password provider to check if accounts exist and/or create
new ones.
> The constructor is passed the config object returned by
> `parse_config`, and a `synapse.module_api.ModuleApi` object which
> allows the password provider to check if accounts exist and/or create
> new ones.
## Optional methods
Password auth provider classes may optionally provide the following methods:
Password auth provider classes may optionally provide the following
methods.
* `get_db_schema_files(self)`
*class* `SomeProvider.get_db_schema_files`()
This method, if implemented, should return an Iterable of
`(name, stream)` pairs of database schema files. Each file is applied
in turn at initialisation, and a record is then made in the database
so that it is not re-applied on the next start.
> This method, if implemented, should return an Iterable of
> `(name, stream)` pairs of database schema files. Each file is applied
> in turn at initialisation, and a record is then made in the database
> so that it is not re-applied on the next start.
* `get_supported_login_types(self)`
`someprovider.get_supported_login_types`()
This method, if implemented, should return a `dict` mapping from a
login type identifier (such as `m.login.password`) to an iterable
giving the fields which must be provided by the user in the submission
to [the `/login` API](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login).
These fields are passed in the `login_dict` dictionary to `check_auth`.
> This method, if implemented, should return a `dict` mapping from a
> login type identifier (such as `m.login.password`) to an iterable
> giving the fields which must be provided by the user in the submission
> to the `/login` api. These fields are passed in the `login_dict`
> dictionary to `check_auth`.
>
> For example, if a password auth provider wants to implement a custom
> login type of `com.example.custom_login`, where the client is expected
> to pass the fields `secret1` and `secret2`, the provider should
> implement this method and return the following dict:
>
> {"com.example.custom_login": ("secret1", "secret2")}
For example, if a password auth provider wants to implement a custom
login type of `com.example.custom_login`, where the client is expected
to pass the fields `secret1` and `secret2`, the provider should
implement this method and return the following dict:
`someprovider.check_auth`(*username*, *login_type*, *login_dict*)
```python
{"com.example.custom_login": ("secret1", "secret2")}
```
> This method is the one that does the real work. If implemented, it
> will be called for each login attempt where the login type matches one
> of the keys returned by `get_supported_login_types`.
>
> It is passed the (possibly UNqualified) `user` provided by the client,
> the login type, and a dictionary of login secrets passed by the
> client.
>
> The method should return a Twisted `Deferred` object, which resolves
> to the canonical `@localpart:domain` user id if authentication is
> successful, and `None` if not.
>
> Alternatively, the `Deferred` can resolve to a `(str, func)` tuple, in
> which case the second field is a callback which will be called with
> the result from the `/login` call (including `access_token`,
> `device_id`, etc.)
* `check_auth(self, username, login_type, login_dict)`
`someprovider.check_3pid_auth`(*medium*, *address*, *password*)
This method does the real work. If implemented, it
will be called for each login attempt where the login type matches one
of the keys returned by `get_supported_login_types`.
> This method, if implemented, is called when a user attempts to
> register or log in with a third party identifier, such as email. It is
> passed the medium (ex. "email"), an address (ex.
> "<jdoe@example.com>") and the user's password.
>
> The method should return a Twisted `Deferred` object, which resolves
> to a `str` containing the user's (canonical) User ID if
> authentication was successful, and `None` if not.
>
> As with `check_auth`, the `Deferred` may alternatively resolve to a
> `(user_id, callback)` tuple.
It is passed the (possibly unqualified) `user` field provided by the client,
the login type, and a dictionary of login secrets passed by the
client.
`someprovider.check_password`(*user_id*, *password*)
The method should return an `Awaitable` object, which resolves
to the canonical `@localpart:domain` user ID if authentication is
successful, and `None` if not.
> This method provides a simpler interface than
> `get_supported_login_types` and `check_auth` for password auth
> providers that just want to provide a mechanism for validating
> `m.login.password` logins.
>
> Iif implemented, it will be called to check logins with an
> `m.login.password` login type. It is passed a qualified
> `@localpart:domain` user id, and the password provided by the user.
>
> The method should return a Twisted `Deferred` object, which resolves
> to `True` if authentication is successful, and `False` if not.
Alternatively, the `Awaitable` can resolve to a `(str, func)` tuple, in
which case the second field is a callback which will be called with
the result from the `/login` call (including `access_token`,
`device_id`, etc.)
`someprovider.on_logged_out`(*user_id*, *device_id*, *access_token*)
* `check_3pid_auth(self, medium, address, password)`
This method, if implemented, is called when a user attempts to
register or log in with a third party identifier, such as email. It is
passed the medium (ex. "email"), an address (ex.
"<jdoe@example.com>") and the user's password.
The method should return an `Awaitable` object, which resolves
to a `str` containing the user's (canonical) User id if
authentication was successful, and `None` if not.
As with `check_auth`, the `Awaitable` may alternatively resolve to a
`(user_id, callback)` tuple.
* `check_password(self, user_id, password)`
This method provides a simpler interface than
`get_supported_login_types` and `check_auth` for password auth
providers that just want to provide a mechanism for validating
`m.login.password` logins.
If implemented, it will be called to check logins with an
`m.login.password` login type. It is passed a qualified
`@localpart:domain` user id, and the password provided by the user.
The method should return an `Awaitable` object, which resolves
to `True` if authentication is successful, and `False` if not.
* `on_logged_out(self, user_id, device_id, access_token)`
This method, if implemented, is called when a user logs out. It is
passed the qualified user ID, the ID of the deactivated device (if
any: access tokens are occasionally created without an associated
device ID), and the (now deactivated) access token.
It may return an `Awaitable` object; the logout request will
wait for the `Awaitable` to complete, but the result is ignored.
> This method, if implemented, is called when a user logs out. It is
> passed the qualified user ID, the ID of the deactivated device (if
> any: access tokens are occasionally created without an associated
> device ID), and the (now deactivated) access token.
>
> It may return a Twisted `Deferred` object; the logout request will
> wait for the deferred to complete but the result is ignored.

View File

@@ -18,7 +18,7 @@ connect to a postgres database.
virtualenv](../INSTALL.md#installing-from-source), you can install
the library with:
~/synapse/env/bin/pip install "matrix-synapse[postgres]"
~/synapse/env/bin/pip install matrix-synapse[postgres]
(substituting the path to your virtualenv for `~/synapse/env`, if
you used a different path). You will require the postgres
@@ -106,17 +106,6 @@ Note that the above may fail with an error about duplicate rows if corruption
has already occurred, and such duplicate rows will need to be manually removed.
## Fixing inconsistent sequences error
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
and associated table can get out of sync if, for example, Synapse has been
downgraded and then upgraded again.
To fix the issue shut down Synapse (including any and all workers) and run the
SQL command included in the error message. Once done Synapse should start
successfully.
## Tuning Postgres
The default settings should be fine for most deployments. For larger
@@ -199,9 +188,6 @@ to do step 2.
It is safe to at any time kill the port script and restart it.
Note that the database may take up significantly more (25% - 100% more)
space on disk after porting to Postgres.
### Using the port script
Firstly, shut down the currently running synapse server and copy its

View File

@@ -3,7 +3,7 @@
It is recommended to put a reverse proxy such as
[nginx](https://nginx.org/en/docs/http/ngx_http_proxy_module.html),
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy) or
[Caddy](https://caddyserver.com/docs/proxy) or
[HAProxy](https://www.haproxy.org/) in front of Synapse. One advantage
of doing so is that it means that you can expose the default https port
(443) to Matrix clients without needing to run Synapse with root
@@ -11,7 +11,7 @@ privileges.
**NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
the requested URI in any way (for example, by decoding `%xx` escapes).
Beware that Apache *will* canonicalise URIs unless you specify
Beware that Apache *will* canonicalise URIs unless you specifify
`nocanon`.
When setting up a reverse proxy, remember that Matrix clients and other
@@ -23,10 +23,6 @@ specification](https://matrix.org/docs/spec/server_server/latest#resolving-serve
for more details of the algorithm used for federation connections, and
[delegate.md](<delegate.md>) for instructions on setting up delegation.
Endpoints that are part of the standardised Matrix specification are
located under `/_matrix`, whereas endpoints specific to Synapse are
located under `/_synapse/client`.
Let's assume that we expect clients to connect to our server at
`https://matrix.example.com`, and other servers to connect at
`https://example.com:8448`. The following sections detail the configuration of
@@ -42,19 +38,25 @@ the reverse proxy and the homeserver.
server {
listen 443 ssl;
listen [::]:443 ssl;
# For the federation port
listen 8448 ssl default_server;
listen [::]:8448 ssl default_server;
server_name matrix.example.com;
location ~* ^(\/_matrix|\/_synapse\/client) {
location /_matrix {
proxy_pass http://localhost:8008;
proxy_set_header X-Forwarded-For $remote_addr;
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 50M;
client_max_body_size 10M;
}
}
server {
listen 8448 ssl default_server;
listen [::]:8448 ssl default_server;
server_name example.com;
location / {
proxy_pass http://localhost:8008;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
```
@@ -69,10 +71,6 @@ matrix.example.com {
proxy /_matrix http://localhost:8008 {
transparent
}
proxy /_synapse/client http://localhost:8008 {
transparent
}
}
example.com:8448 {
@@ -87,7 +85,6 @@ example.com:8448 {
```
matrix.example.com {
reverse_proxy /_matrix/* http://localhost:8008
reverse_proxy /_synapse/client/* http://localhost:8008
}
example.com:8448 {
@@ -105,8 +102,6 @@ example.com:8448 {
AllowEncodedSlashes NoDecode
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
ProxyPass /_synapse/client http://127.0.0.1:8008/_synapse/client nocanon
ProxyPassReverse /_synapse/client http://127.0.0.1:8008/_synapse/client
</VirtualHost>
<VirtualHost *:8448>
@@ -121,14 +116,6 @@ example.com:8448 {
**NOTE**: ensure the `nocanon` options are included.
**NOTE 2**: It appears that Synapse is currently incompatible with the ModSecurity module for Apache (`mod_security2`). If you need it enabled for other services on your web server, you can disable it for Synapse's two VirtualHosts by including the following lines before each of the two `</VirtualHost>` above:
```
<IfModule security2_module>
SecRuleEngine off
</IfModule>
```
### HAProxy
```
@@ -138,7 +125,6 @@ frontend https
# Matrix client traffic
acl matrix-host hdr(host) -i matrix.example.com
acl matrix-path path_beg /_matrix
acl matrix-path path_beg /_synapse/client
use_backend matrix if matrix-host matrix-path
@@ -159,17 +145,3 @@ client IP addresses are recorded correctly.
Having done so, you can then use `https://matrix.example.com` (instead
of `https://matrix.example.com:8448`) as the "Custom server" when
connecting to Synapse from a client.
## Health check endpoint
Synapse exposes a health check endpoint for use by reverse proxies.
Each configured HTTP listener has a `/health` endpoint which always returns
200 OK (and doesn't get logged).
## Synapse administration endpoints
Endpoints for administering your Synapse instance are placed under
`/_synapse/admin`. These require authentication through an access token of an
admin user. However as access to these endpoints grants the caller a lot of power,
we do not recommend exposing them to the public internet without good reason.

File diff suppressed because it is too large Load Diff

View File

@@ -3,11 +3,7 @@
# This is a YAML file containing a standard Python logging configuration
# dictionary. See [1] for details on the valid settings.
#
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
# [2]: https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md
version: 1
@@ -15,33 +11,24 @@ formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.TimedRotatingFileHandler
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/homeserver.log
when: midnight
backupCount: 3 # Does not include the current log file.
maxBytes: 104857600
backupCount: 10
filters: [context]
encoding: utf8
# Default to buffering writes to log file for efficiency. This means that
# will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
# logs will still be flushed immediately.
buffer:
class: logging.handlers.MemoryHandler
target: file
# The capacity is the number of log lines that are buffered before
# being written to disk. Increasing this will lead to better
# performance, at the expensive of it taking longer for log lines to
# be written to disk.
capacity: 10
flushLevel: 30 # Flush for WARNING logs as well
# A handler that writes logs to stderr. Unused by default, but can be used
# instead of "buffer" and "file" in the logger handlers.
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse.storage.SQL:
@@ -49,23 +36,8 @@ loggers:
# information such as access tokens.
level: INFO
twisted:
# We send the twisted logging directly to the file handler,
# to work around https://github.com/matrix-org/synapse/issues/3471
# when using "buffer" logger. Use "console" to log to stderr instead.
handlers: [file]
propagate: false
root:
level: INFO
# Write logs to the `buffer` handler, which will buffer them together in memory,
# then write them to a file.
#
# Replace "buffer" with "console" to log to stderr instead. (Note that you'll
# also need to update the configuration for the `twisted` logger above, in
# this case.)
#
handlers: [buffer]
handlers: [file, console]
disable_existing_loggers: false

View File

@@ -11,7 +11,7 @@ able to be imported by the running Synapse.
The Python class is instantiated with two objects:
* Any configuration (see below).
* An instance of `synapse.module_api.ModuleApi`.
* An instance of `synapse.spam_checker_api.SpamCheckerApi`.
It then implements methods which return a boolean to alter behavior in Synapse.
@@ -22,45 +22,41 @@ well as some specific methods:
* `user_may_create_room`
* `user_may_create_room_alias`
* `user_may_publish_room`
* `check_username_for_spam`
* `check_registration_for_spam`
The details of the each of these methods (as well as their inputs and outputs)
are documented in the `synapse.events.spamcheck.SpamChecker` class.
The `ModuleApi` class provides a way for the custom spam checker class to
call back into the homeserver internals.
The `SpamCheckerApi` class provides a way for the custom spam checker class to
call back into the homeserver internals. It currently implements the following
methods:
* `get_state_events_in_room`
### Example
```python
from synapse.spam_checker_api import RegistrationBehaviour
class ExampleSpamChecker:
def __init__(self, config, api):
self.config = config
self.api = api
async def check_event_for_spam(self, foo):
def check_event_for_spam(self, foo):
return False # allow all events
async def user_may_invite(self, inviter_userid, invitee_userid, room_id):
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
return True # allow all invites
async def user_may_create_room(self, userid):
def user_may_create_room(self, userid):
return True # allow all room creations
async def user_may_create_room_alias(self, userid, room_alias):
def user_may_create_room_alias(self, userid, room_alias):
return True # allow all room aliases
async def user_may_publish_room(self, userid, room_id):
def user_may_publish_room(self, userid, room_id):
return True # allow publishing of all rooms
async def check_username_for_spam(self, user_profile):
def check_username_for_spam(self, user_profile):
return False # allow all usernames
async def check_registration_for_spam(self, email_threepid, username, request_info):
return RegistrationBehaviour.ALLOW # allow all registrations
```
## Configuration

1
docs/sphinx/README.rst Normal file
View File

@@ -0,0 +1 @@
TODO: how (if at all) is this actually maintained?

271
docs/sphinx/conf.py Normal file
View File

@@ -0,0 +1,271 @@
# -*- coding: utf-8 -*-
#
# Synapse documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 10 17:31:02 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
"sphinxcontrib.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Synapse"
copyright = (
"Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd"
)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Synapsedoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [("index", "Synapse.tex", "Synapse Documentation", "TNG", "manual")]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "synapse", "Synapse Documentation", ["TNG"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Synapse",
"Synapse Documentation",
"TNG",
"Synapse",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"http://docs.python.org/": None}
napoleon_include_special_with_doc = True
napoleon_use_ivar = True

20
docs/sphinx/index.rst Normal file
View File

@@ -0,0 +1,20 @@
.. Synapse documentation master file, created by
sphinx-quickstart on Tue Jun 10 17:31:02 2014.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Synapse's documentation!
===================================
Contents:
.. toctree::
synapse
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

7
docs/sphinx/modules.rst Normal file
View File

@@ -0,0 +1,7 @@
synapse
=======
.. toctree::
:maxdepth: 4
synapse

View File

@@ -0,0 +1,7 @@
synapse.api.auth module
=======================
.. automodule:: synapse.api.auth
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.constants module
============================
.. automodule:: synapse.api.constants
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.dbobjects module
============================
.. automodule:: synapse.api.dbobjects
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.errors module
=========================
.. automodule:: synapse.api.errors
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.event_stream module
===============================
.. automodule:: synapse.api.event_stream
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.events.factory module
=================================
.. automodule:: synapse.api.events.factory
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.events.room module
==============================
.. automodule:: synapse.api.events.room
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,18 @@
synapse.api.events package
==========================
Submodules
----------
.. toctree::
synapse.api.events.factory
synapse.api.events.room
Module contents
---------------
.. automodule:: synapse.api.events
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.handlers.events module
==================================
.. automodule:: synapse.api.handlers.events
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.handlers.factory module
===================================
.. automodule:: synapse.api.handlers.factory
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.handlers.federation module
======================================
.. automodule:: synapse.api.handlers.federation
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.handlers.register module
====================================
.. automodule:: synapse.api.handlers.register
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,7 @@
synapse.api.handlers.room module
================================
.. automodule:: synapse.api.handlers.room
:members:
:undoc-members:
:show-inheritance:

Some files were not shown because too many files have changed in this diff Show More