Compare commits

..

44 Commits

Author SHA1 Message Date
Richard van der Hoff
f56db48f67 Merge branch 'develop' into matrix-org-hotfixes 2018-04-13 11:27:29 +01:00
Erik Johnston
dd7f1b10ce Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-04-12 13:35:56 +01:00
Neil Johnson
661a4d4603 Merge branch 'develop' of https://github.com/matrix-org/synapse into matrix-org-hotfixes 2018-04-10 15:10:57 +01:00
Erik Johnston
a401750ef4 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-04-10 13:32:49 +01:00
Erik Johnston
72c673407f Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-04-09 16:11:43 +01:00
Richard van der Hoff
488a2696b6 Merge branch 'develop' into matrix-org-hotfixes 2018-04-06 09:29:15 +01:00
Richard van der Hoff
052df50e49 Merge branch 'develop' into matrix-org-hotfixes 2018-04-06 09:14:40 +01:00
Richard van der Hoff
3633ee8c63 Fix earlier logging patch
`@cached` doesn't work on decorated functions, because it uses inspection on
the target to calculate the number of arguments.
2018-04-04 23:20:30 +01:00
Richard van der Hoff
912c456d60 Logging for get_users_in_room 2018-04-04 17:30:02 +01:00
Richard van der Hoff
d05575a574 Increase member limiter to 20
Let's see if this makes the bridges go faster, or if it kills the synapse
master.
2018-04-04 16:14:02 +01:00
Richard van der Hoff
2fb6d46042 fix PEP8 2018-04-04 16:13:40 +01:00
Erik Johnston
c7fa6672d9 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-03-28 10:02:11 +01:00
Neil Johnson
36c43eef17 Merge branch 'develop' of https://github.com/matrix-org/synapse into matrix-org-hotfixes 2018-03-26 11:29:10 +01:00
Richard van der Hoff
411c437fc6 more simplejson 2018-03-15 23:54:03 +00:00
Erik Johnston
6783bc0ded Replace some ujson with simplejson to make it work 2018-03-15 23:40:23 +00:00
Erik Johnston
295973e3a9 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-03-14 14:43:28 +00:00
Richard van der Hoff
3b78452c3a Merge branch 'rav/request_logging' into matrix-org-hotfixes 2018-03-09 18:06:53 +00:00
Erik Johnston
1f732e05d0 Merge branch 'erikj/read_marker_caches' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-03-01 17:56:40 +00:00
Erik Johnston
9ed48cecbc Merge branch 'erikj/read_marker_caches' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-03-01 17:11:01 +00:00
Erik Johnston
cc9d4a4d3e Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-03-01 12:31:36 +00:00
hera
889803a78e increase sync cache to 2 minutes
to give synchrotrons being hammered by repeating initial /syncs to get more chance to actually complete and avoid a DoS
2018-02-28 00:35:03 +00:00
Erik Johnston
22245fb8a7 Actuall set cache factors in workers 2018-02-21 21:10:48 +00:00
Erik Johnston
78c5eca141 Add hacky cache factor override system 2018-02-21 21:00:24 +00:00
Richard van der Hoff
55bef59cc7 (Really) fix tablescan of event_push_actions on purge
commit 278d21b5 added new code to avoid the tablescan, but didn't remove the
old :/
2018-02-20 10:37:44 +00:00
Erik Johnston
ad683cd203 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-02-15 14:10:11 +00:00
Erik Johnston
d66afef01e Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-02-15 13:03:05 +00:00
Richard van der Hoff
b07a33f024 Avoid doing presence updates on replication reconnect
Presence is supposed to be disabled on matrix.org, so we shouldn't send a load
of USER_SYNC commands every time the synchrotron reconnects to the master.
2018-02-15 09:51:09 +00:00
hera
74539aa25b Disable auth on room_members for now
because the moznet bridge is broken (https://github.com/matrix-org/matrix-appservice-irc/issues/506)
2018-02-15 09:51:09 +00:00
Erik Johnston
8f25cd6627 Bump LAST_SEEN_GRANULARITY in client_ips 2018-02-15 09:51:09 +00:00
Erik Johnston
dc1299a4b0 Prefill client_ip_last_seen in replication 2018-02-15 09:51:09 +00:00
Erik Johnston
2c72d66cda Move event sending to end in shutdown room admin api 2018-02-15 09:51:09 +00:00
Erik Johnston
cde90a89ed Add dummy presence REST handler to frontend proxy
The handler no-ops all requests as presence is disabled.
2018-02-15 09:51:09 +00:00
Erik Johnston
5aec53ad95 Don't intern type/state_keys in state store 2018-02-15 09:51:09 +00:00
Erik Johnston
d10d19f0ad Increase store._state_group_cache cache size 2018-02-15 09:51:09 +00:00
Erik Johnston
daec1d77be Make _get_joined_hosts_cache cache non-iterable 2018-02-15 09:51:09 +00:00
Erik Johnston
61885f7849 Increase MAX_EVENTS_BEHIND for replication clients 2018-02-15 09:51:09 +00:00
Erik Johnston
ba30d489d9 Disable presence in txn queue 2018-02-15 09:51:09 +00:00
Erik Johnston
5e2d0650df Handle exceptions in get_hosts_for_room when sending events over federation 2018-02-15 09:51:09 +00:00
Erik Johnston
841bcbcafa Limit concurrent AS joins 2018-02-15 09:51:09 +00:00
Erik Johnston
08a6b88e3d Disable presence
This reverts commit 0ebd376a53 and
disables presence a bit more
2018-02-15 09:51:09 +00:00
Erik Johnston
aece8e73b1 Make push actions rotation configurable 2018-02-15 09:51:09 +00:00
Mark Haines
328bd35e00 Deleting from event_push_actions needs to use an index 2018-02-15 09:51:09 +00:00
Erik Johnston
7de9a28b8e Disable auto search for prefixes in event search 2018-02-15 09:51:09 +00:00
Erik Johnston
2a9c3aea89 Add timeout to ResponseCache of /public_rooms 2018-02-15 09:51:09 +00:00
424 changed files with 10723 additions and 23758 deletions

View File

@@ -1,5 +0,0 @@
Dockerfile
.travis.yml
.gitignore
demo/etc
tox.ini

View File

@@ -27,9 +27,8 @@ Describe here the problem that you are experiencing, or the feature you are requ
Describe how what happens differs from what you expected.
<!-- If you can identify any relevant log snippets from _homeserver.log_, please include
those (please be careful to remove any personal or private data). Please surround them with
``` (three backticks, on a line on their own), so that they are formatted legibly. -->
If you can identify any relevant log snippets from _homeserver.log_, please include
those here (please be careful to remove any personal or private data):
### Version information

6
.gitignore vendored
View File

@@ -1,6 +1,5 @@
*.pyc
.*.swp
*~
.DS_Store
_trial_temp/
@@ -14,7 +13,6 @@ docs/build/
cmdclient_config.json
homeserver*.db
homeserver*.log
homeserver*.log.*
homeserver*.pid
homeserver*.yaml
@@ -34,7 +32,6 @@ demo/media_store.*
demo/etc
uploads
cache
.idea/
media_store/
@@ -42,8 +39,6 @@ media_store/
*.tac
build/
venv/
venv*/
localhost-800*/
static/client/register/register_config.js
@@ -53,4 +48,3 @@ env/
*.config
.vscode/
.ropeproject/

View File

@@ -1,33 +1,14 @@
sudo: false
language: python
python: 2.7
# tell travis to cache ~/.cache/pip
cache: pip
before_script:
- git remote set-branches --add origin develop
- git fetch origin develop
matrix:
fast_finish: true
include:
- python: 2.7
env: TOX_ENV=packaging
- python: 2.7
env: TOX_ENV=pep8
- python: 2.7
env: TOX_ENV=py27
- python: 3.6
env: TOX_ENV=py36
- python: 3.6
env: TOX_ENV=check_isort
- python: 3.6
env: TOX_ENV=check-newsfragment
env:
- TOX_ENV=packaging
- TOX_ENV=pep8
- TOX_ENV=py27
install:
- pip install tox

View File

@@ -60,9 +60,3 @@ Niklas Riekenbrauck <nikriek at gmail dot.com>
Christoph Witzany <christoph at web.crofting.com>
* Add LDAP support for authentication
Pierre Jaury <pierre at jaury.eu>
* Docker packaging
Serban Constantin <serban.constantin at gmail dot com>
* Small bug fix

2552
CHANGES.md

File diff suppressed because it is too large Load Diff

2461
CHANGES.rst Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -48,26 +48,6 @@ Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
makes it horribly hard to review otherwise.
Changelog
~~~~~~~~~
All changes, even minor ones, need a corresponding changelog / newsfragment
entry. These are managed by Towncrier
(https://github.com/hawkowl/towncrier).
To create a changelog entry, make a new file in the ``changelog.d``
file named in the format of ``issuenumberOrPR.type``. The type can be
one of ``feature``, ``bugfix``, ``removal`` (also used for
deprecations), or ``misc`` (for internal-only changes). The content of
the file is your changelog entry, which can contain RestructuredText
formatting. A note of contributors is welcomed in changelogs for
non-misc changes (the content of misc changes is not displayed).
For example, a fix for a bug reported in #1234 would have its
changelog entry in ``changelog.d/1234.bugfix``, and contain content
like "The security levels of Florbs are now validated when
recieved over federation. Contributed by Jane Matrix".
Attribution
~~~~~~~~~~~
@@ -130,15 +110,11 @@ If you agree to this for your contribution, then all that's needed is to
include the line in your commit or pull request comment::
Signed-off-by: Your Name <your@email.example.org>
We accept contributions under a legally identifiable name, such as
your name on government documentation or common-law names (names
claimed by legitimate usage or repute). Unfortunately, we cannot
accept anonymous contributions at this time.
Git allows you to add this signoff automatically when using the ``-s``
flag to ``git commit``, which uses the name and email set in your
``user.name`` and ``user.email`` git configs.
...using your real name; unfortunately pseudonyms and anonymous contributions
can't be accepted. Git makes this trivial - just use the -s flag when you do
``git commit``, having first set ``user.name`` and ``user.email`` git configs
(which you should have done anyway :)
Conclusion
~~~~~~~~~~

View File

@@ -2,7 +2,6 @@ include synctl
include LICENSE
include VERSION
include *.rst
include *.md
include demo/README
include demo/demo.tls.dh
include demo/*.py
@@ -26,13 +25,7 @@ recursive-include synapse/static *.js
exclude jenkins.sh
exclude jenkins*.sh
exclude jenkins*
exclude Dockerfile
exclude .dockerignore
recursive-exclude jenkins *.sh
include pyproject.toml
recursive-include changelog.d *
prune .github
prune demo/etc
prune docker

View File

@@ -71,7 +71,7 @@ We'd like to invite you to join #matrix:matrix.org (via
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
Thanks for using Matrix!
@@ -157,19 +157,11 @@ if you prefer.
In case of problems, please see the _`Troubleshooting` section below.
There is an offical synapse image available at
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
this including configuration options is available in the README on
hub.docker.com.
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
Dockerfile to automate a synapse server in a single Docker image, at
https://hub.docker.com/r/avhost/docker-matrix/tags/
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
tested with VirtualBox/AWS/DigitalOcean - see
https://github.com/EMnify/matrix-synapse-auto-deploy
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
for details.
Configuring synapse
@@ -290,7 +282,7 @@ Connecting to Synapse from a client
The easiest way to try out your new Synapse installation is by connecting to it
from a web client. The easiest option is probably the one at
https://riot.im/app. You will need to specify a "Custom server" when you log on
http://riot.im/app. You will need to specify a "Custom server" when you log on
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
@@ -336,7 +328,7 @@ Security Note
=============
Matrix serves raw user generated data in some APIs - specifically the `content
repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
Whilst we have tried to mitigate against possible XSS attacks (e.g.
https://github.com/matrix-org/synapse/pull/1021) we recommend running
@@ -355,7 +347,7 @@ Platform-Specific Instructions
Debian
------
Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/.
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
Note that these packages do not include a client - choose one from
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
@@ -369,19 +361,6 @@ Synapse is in the Fedora repositories as ``matrix-synapse``::
Oleg Girko provides Fedora RPMs at
https://obs.infoserver.lv/project/monitor/matrix-synapse
OpenSUSE
--------
Synapse is in the OpenSUSE repositories as ``matrix-synapse``::
sudo zypper install matrix-synapse
SUSE Linux Enterprise Server
----------------------------
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
ArchLinux
---------
@@ -544,7 +523,7 @@ Troubleshooting Running
-----------------------
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for
to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
encryption and digital signatures.
Unfortunately PyNACL currently has a few issues
(https://github.com/pyca/pynacl/issues/53) and
@@ -635,9 +614,6 @@ should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
$ dig -t srv _matrix._tcp.example.com
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
Note that the server hostname cannot be an alias (CNAME record): it has to point
directly to the server hosting the synapse instance.
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
its user-ids, by setting ``server_name``::
@@ -692,8 +668,8 @@ useful just for development purposes. See `<demo/README>`_.
Using PostgreSQL
================
As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
traditionally used for convenience and simplicity.
The advantages of Postgres include:
@@ -717,7 +693,7 @@ Using a reverse proxy with Synapse
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.

View File

@@ -1 +0,0 @@
!.gitignore

View File

@@ -1,10 +0,0 @@
Community Contributions
=======================
Everything in this directory are projects submitted by the community that may be useful
to others. As such, the project maintainers cannot guarantee support, stability
or backwards compatibility of these projects.
Files in this directory should *not* be relied on directly, as they may not
continue to work or exist in future. If you wish to use any of these files then
they should be copied to avoid them breaking from underneath you.

View File

@@ -1,41 +0,0 @@
# Synapse Docker
### Automated configuration
It is recommended that you use Docker Compose to run your containers, including
this image and a Postgres server. A sample ``docker-compose.yml`` is provided,
including example labels for reverse proxying and other artifacts.
Read the section about environment variables and set at least mandatory variables,
then run the server:
```
docker-compose up -d
```
If secrets are not specified in the environment variables, they will be generated
as part of the startup. Please ensure these secrets are kept between launches of the
Docker container, as their loss may require users to log in again.
### Manual configuration
A sample ``docker-compose.yml`` is provided, including example labels for
reverse proxying and other artifacts. The docker-compose file is an example,
please comment/uncomment sections that are not suitable for your usecase.
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run:
```
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate
```
Then, customize your configuration and run the server:
```
docker-compose up -d
```
### More information
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)

View File

@@ -1,50 +0,0 @@
# This compose file is compatible with Compose itself, it might need some
# adjustments to run properly with stack.
version: '3'
services:
synapse:
build: ../..
image: docker.io/matrixdotorg/synapse:latest
# Since snyapse does not retry to connect to the database, restart upon
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
environment:
- SYNAPSE_SERVER_NAME=my.matrix.host
- SYNAPSE_REPORT_STATS=no
- SYNAPSE_ENABLE_REGISTRATION=yes
- SYNAPSE_LOG_LEVEL=INFO
- POSTGRES_PASSWORD=changeme
volumes:
# You may either store all the files in a local folder
- ./files:/data
# .. or you may split this between different storage points
# - ./files:/data
# - /path/to/ssd:/data/uploads
# - /path/to/large_hdd:/data/media
depends_on:
- db
# In order to expose Synapse, remove one of the following, you might for
# instance expose the TLS port directly:
ports:
- 8448:8448/tcp
# ... or use a reverse proxy, here is an example for traefik:
labels:
- traefik.enable=true
- traefik.frontend.rule=Host:my.matrix.Host
- traefik.port=8448
db:
image: docker.io/postgres:10-alpine
# Change that password, of course!
environment:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=changeme
volumes:
# You may store the database tables in a local folder..
- ./schemas:/var/lib/postgresql/data
# .. or store them on some high performance storage for better results
# - /path/to/ssd/storage:/var/lib/postfesql/data

View File

@@ -1,6 +0,0 @@
# Using the Synapse Grafana dashboard
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
3. Set up additional recording rules

File diff suppressed because it is too large Load Diff

View File

@@ -2,9 +2,6 @@
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
# rather than in a user home directory or similar under virtualenv.
# **NOTE:** This is an example service file that may change in the future. If you
# wish to use this please copy rather than symlink it.
[Unit]
Description=Synapse Matrix homeserver
@@ -15,7 +12,6 @@ Group=synapse
WorkingDirectory=/var/lib/synapse
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
# EnvironmentFile=-/etc/sysconfig/synapse # Can be used to e.g. set SYNAPSE_CACHE_FACTOR
[Install]
WantedBy=multi-user.target

View File

@@ -1,35 +0,0 @@
FROM docker.io/python:2-alpine3.7
RUN apk add --no-cache --virtual .nacl_deps \
build-base \
libffi-dev \
libjpeg-turbo-dev \
libressl-dev \
libxslt-dev \
linux-headers \
postgresql-dev \
su-exec \
zlib-dev
COPY . /synapse
# A wheel cache may be provided in ./cache for faster build
RUN cd /synapse \
&& pip install --upgrade \
lxml \
pip \
psycopg2 \
setuptools \
&& mkdir -p /synapse/cache \
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \
&& mv /synapse/docker/start.py /synapse/docker/conf / \
&& rm -rf \
setup.cfg \
setup.py \
synapse
VOLUME ["/data"]
EXPOSE 8008/tcp 8448/tcp
ENTRYPOINT ["/start.py"]

View File

@@ -1,124 +0,0 @@
# Synapse Docker
This Docker image will run Synapse as a single process. It does not provide a database
server or a TURN server, you should run these separately.
## Run
We do not currently offer a `latest` image, as this has somewhat undefined semantics.
We instead release only tagged versions so upgrading between releases is entirely
within your control.
### Using docker-compose (easier)
This image is designed to run either with an automatically generated configuration
file or with a custom configuration that requires manual editing.
An easy way to make use of this image is via docker-compose. See the
[contrib/docker](../contrib/docker)
section of the synapse project for examples.
### Without Compose (harder)
If you do not wish to use Compose, you may still run this image using plain
Docker commands. Note that the following is just a guideline and you may need
to add parameters to the docker run command to account for the network situation
with your postgres database.
```
docker run \
-d \
--name synapse \
-v ${DATA_PATH}:/data \
-e SYNAPSE_SERVER_NAME=my.matrix.host \
-e SYNAPSE_REPORT_STATS=yes \
docker.io/matrixdotorg/synapse:latest
```
## Volumes
The image expects a single volume, located at ``/data``, that will hold:
* temporary files during uploads;
* uploaded media and thumbnails;
* the SQLite database if you do not configure postgres;
* the appservices configuration.
You are free to use separate volumes depending on storage endpoints at your
disposal. For instance, ``/data/media`` coud be stored on a large but low
performance hdd storage while other files could be stored on high performance
endpoints.
In order to setup an application service, simply create an ``appservices``
directory in the data volume and write the application service Yaml
configuration file there. Multiple application services are supported.
## Environment
Unless you specify a custom path for the configuration file, a very generic
file will be generated, based on the following environment settings.
These are a good starting point for setting up your own deployment.
Global settings:
* ``UID``, the user id Synapse will run as [default 991]
* ``GID``, the group id Synapse will run as [default 991]
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
then customize it manually. No other environment variable is required.
Otherwise, a dynamic configuration file will be used. The following environment
variables are available for configuration:
* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
statistics reporting back to the Matrix project which helps us to get funding.
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
you run your own TLS-capable reverse proxy).
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
the Synapse instance.
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
key in order to enable recaptcha upon registration.
* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
key in order to enable recaptcha upon registration.
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
uris to enable TURN for this homeserver.
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
Shared secrets, that will be initialized to random values if not set:
* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
registration is disable.
* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
to the server.
Database specific values (will use SQLite if not set):
* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
Mail server specific values (will not send emails if not set):
* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
## Build
Build the docker image with the `docker build` command from the root of the synapse repository.
```
docker build -t docker.io/matrixdotorg/synapse . -f docker/Dockerfile
```
The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
You may have a local Python wheel cache available, in which case copy the relevant
packages in the ``cache/`` directory at the root of the project.

View File

@@ -1,219 +0,0 @@
# vim:ft=yaml
## TLS ##
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
tls_fingerprints: []
## Server ##
server_name: "{{ SYNAPSE_SERVER_NAME }}"
pid_file: /homeserver.pid
web_client: False
soft_file_limit: 0
## Ports ##
listeners:
{% if not SYNAPSE_NO_TLS %}
-
port: 8448
bind_addresses: ['0.0.0.0']
type: http
tls: true
x_forwarded: false
resources:
- names: [client]
compress: true
- names: [federation] # Federation APIs
compress: false
{% endif %}
- port: 8008
tls: false
bind_addresses: ['0.0.0.0']
type: http
x_forwarded: false
resources:
- names: [client]
compress: true
- names: [federation]
compress: false
## Database ##
{% if POSTGRES_PASSWORD %}
database:
name: "psycopg2"
args:
user: "{{ POSTGRES_USER or "synapse" }}"
password: "{{ POSTGRES_PASSWORD }}"
database: "{{ POSTGRES_DB or "synapse" }}"
host: "{{ POSTGRES_HOST or "db" }}"
port: "{{ POSTGRES_PORT or "5432" }}"
cp_min: 5
cp_max: 10
{% else %}
database:
name: "sqlite3"
args:
database: "/data/homeserver.db"
{% endif %}
## Performance ##
event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}"
verbose: 0
log_file: "/data/homeserver.log"
log_config: "/compiled/log.config"
## Ratelimiting ##
rc_messages_per_second: 0.2
rc_message_burst_count: 10.0
federation_rc_window_size: 1000
federation_rc_sleep_limit: 10
federation_rc_sleep_delay: 500
federation_rc_reject_limit: 50
federation_rc_concurrent: 3
## Files ##
media_store_path: "/data/media"
uploads_path: "/data/uploads"
max_upload_size: "10M"
max_image_pixels: "32M"
dynamic_thumbnails: false
# List of thumbnail to precalculate when an image is uploaded.
thumbnail_sizes:
- width: 32
height: 32
method: crop
- width: 96
height: 96
method: crop
- width: 320
height: 240
method: scale
- width: 640
height: 480
method: scale
- width: 800
height: 600
method: scale
url_preview_enabled: False
max_spider_size: "10M"
## Captcha ##
{% if SYNAPSE_RECAPTCHA_PUBLIC_KEY %}
recaptcha_public_key: "{{ SYNAPSE_RECAPTCHA_PUBLIC_KEY }}"
recaptcha_private_key: "{{ SYNAPSE_RECAPTCHA_PRIVATE_KEY }}"
enable_registration_captcha: True
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
{% else %}
recaptcha_public_key: "YOUR_PUBLIC_KEY"
recaptcha_private_key: "YOUR_PRIVATE_KEY"
enable_registration_captcha: False
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
{% endif %}
## Turn ##
{% if SYNAPSE_TURN_URIS %}
turn_uris:
{% for uri in SYNAPSE_TURN_URIS.split(',') %} - "{{ uri }}"
{% endfor %}
turn_shared_secret: "{{ SYNAPSE_TURN_SECRET }}"
turn_user_lifetime: "1h"
turn_allow_guests: True
{% else %}
turn_uris: []
turn_shared_secret: "YOUR_SHARED_SECRET"
turn_user_lifetime: "1h"
turn_allow_guests: True
{% endif %}
## Registration ##
enable_registration: {{ "True" if SYNAPSE_ENABLE_REGISTRATION else "False" }}
registration_shared_secret: "{{ SYNAPSE_REGISTRATION_SHARED_SECRET }}"
bcrypt_rounds: 12
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
enable_group_creation: true
# The list of identity servers trusted to verify third party
# identifiers by this server.
trusted_third_party_id_servers:
- matrix.org
- vector.im
- riot.im
## Metrics ###
{% if SYNAPSE_REPORT_STATS.lower() == "yes" %}
enable_metrics: True
report_stats: True
{% else %}
enable_metrics: False
report_stats: False
{% endif %}
## API Configuration ##
room_invite_state_types:
- "m.room.join_rules"
- "m.room.canonical_alias"
- "m.room.avatar"
- "m.room.name"
{% if SYNAPSE_APPSERVICES %}
app_service_config_files:
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
{% endfor %}
{% else %}
app_service_config_files: []
{% endif %}
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
expire_access_token: False
## Signing Keys ##
signing_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.signing.key"
old_signing_keys: {}
key_refresh_interval: "1d" # 1 Day.
# The trusted servers to download signing keys from.
perspectives:
servers:
"matrix.org":
verify_keys:
"ed25519:auto":
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
password_config:
enabled: true
{% if SYNAPSE_SMTP_HOST %}
email:
enable_notifs: false
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
require_transport_security: False
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
app_name: Matrix
template_dir: res/templates
notif_template_html: notif_mail.html
notif_template_text: notif_mail.txt
notif_for_new_users: True
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
{% endif %}

View File

@@ -1,29 +0,0 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
root:
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
handlers: [console]

View File

@@ -1,66 +0,0 @@
#!/usr/local/bin/python
import jinja2
import os
import sys
import subprocess
import glob
# Utility functions
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
def check_arguments(environ, args):
for argument in args:
if argument not in environ:
print("Environment variable %s is mandatory, exiting." % argument)
sys.exit(2)
def generate_secrets(environ, secrets):
for name, secret in secrets.items():
if secret not in environ:
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
if os.path.exists(filename):
with open(filename) as handle: value = handle.read()
else:
print("Generating a random secret for {}".format(name))
value = os.urandom(32).encode("hex")
with open(filename, "w") as handle: handle.write(value)
environ[secret] = value
# Prepare the configuration
mode = sys.argv[1] if len(sys.argv) > 1 else None
environ = os.environ.copy()
ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991))
args = ["python", "-m", "synapse.app.homeserver"]
# In generate mode, generate a configuration, missing keys, then exit
if mode == "generate":
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_CONFIG_PATH"))
args += [
"--server-name", environ["SYNAPSE_SERVER_NAME"],
"--report-stats", environ["SYNAPSE_REPORT_STATS"],
"--config-path", environ["SYNAPSE_CONFIG_PATH"],
"--generate-config"
]
os.execv("/usr/local/bin/python", args)
# In normal mode, generate missing keys if any, then run synapse
else:
# Parse the configuration file
if "SYNAPSE_CONFIG_PATH" in environ:
args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]]
else:
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"))
generate_secrets(environ, {
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY"
})
environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
if not os.path.exists("/compiled"): os.mkdir("/compiled")
convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ)
convert("/conf/log.config", "/compiled/log.config", environ)
subprocess.check_output(["chown", "-R", ownership, "/data"])
args += ["--config-path", "/compiled/homeserver.yaml"]
# Generate missing keys and start synapse
subprocess.check_output(args + ["--generate-keys"])
os.execv("/sbin/su-exec", ["su-exec", ownership] + args)

View File

@@ -1,63 +0,0 @@
Shared-Secret Registration
==========================
This API allows for the creation of users in an administrative and
non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
(``registration_shared_secret`` in the homeserver configuration), and a
one-time nonce. If the registration shared secret is not configured, this API
is not enabled.
To fetch the nonce, you need to request one from the API::
> GET /_matrix/client/r0/admin/register
< {"nonce": "thisisanonce"}
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
body containing the nonce, username, password, whether they are an admin
(optional, False by default), and a HMAC digest of the content.
As an example::
> POST /_matrix/client/r0/admin/register
> {
"nonce": "thisisanonce",
"username": "pepper_roni",
"password": "pizza",
"admin": true,
"mac": "mac_digest_here"
}
< {
"access_token": "token_here",
"user_id": "@pepper_roni@test",
"home_server": "test",
"device_id": "device_id_here"
}
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
the shared secret and the content being the nonce, user, password, and either
the string "admin" or "notadmin", each separated by NULs. For an example of
generation in Python::
import hmac, hashlib
def generate_mac(nonce, user, password, admin=False):
mac = hmac.new(
key=shared_secret,
digestmod=hashlib.sha1,
)
mac.update(nonce.encode('utf8'))
mac.update(b"\x00")
mac.update(user.encode('utf8'))
mac.update(b"\x00")
mac.update(password.encode('utf8'))
mac.update(b"\x00")
mac.update(b"admin" if admin else b"notadmin")
return mac.hexdigest()

View File

@@ -44,26 +44,13 @@ Deactivate Account
This API deactivates an account. It removes active access tokens, resets the
password, and deletes third-party IDs (to prevent the user requesting a
password reset). It can also mark the user as GDPR-erased (stopping their data
from distributed further, and deleting it entirely if there are no other
references to it).
password reset).
The api is::
POST /_matrix/client/r0/admin/deactivate/<user_id>
with a body of:
.. code:: json
{
"erase": true
}
including an ``access_token`` of a server admin.
The erase parameter is optional and defaults to 'false'.
An empty body may be passed for backwards compatibility.
including an ``access_token`` of a server admin, and an empty request body.
Reset password

View File

@@ -16,7 +16,7 @@
print("I am a fish %s" %
"moo")
and this::
and this::
print(
"I am a fish %s" %

View File

@@ -1,160 +0,0 @@
Support in Synapse for tracking agreement to server terms and conditions
========================================================================
Synapse 0.30 introduces support for tracking whether users have agreed to the
terms and conditions set by the administrator of a server - and blocking access
to the server until they have.
There are several parts to this functionality; each requires some specific
configuration in `homeserver.yaml` to be enabled.
Note that various parts of the configuation and this document refer to the
"privacy policy": agreement with a privacy policy is one particular use of this
feature, but of course adminstrators can specify other terms and conditions
unrelated to "privacy" per se.
Collecting policy agreement from a user
---------------------------------------
Synapse can be configured to serve the user a simple policy form with an
"accept" button. Clicking "Accept" records the user's acceptance in the
database and shows a success page.
To enable this, first create templates for the policy and success pages.
These should be stored on the local filesystem.
These templates use the [Jinja2](http://jinja.pocoo.org) templating language,
and [docs/privacy_policy_templates](privacy_policy_templates) gives
examples of the sort of thing that can be done.
Note that the templates must be stored under a name giving the language of the
template - currently this must always be `en` (for "English");
internationalisation support is intended for the future.
The template for the policy itself should be versioned and named according to
the version: for example `1.0.html`. The version of the policy which the user
has agreed to is stored in the database.
Once the templates are in place, make the following changes to `homeserver.yaml`:
1. Add a `user_consent` section, which should look like:
```yaml
user_consent:
template_dir: privacy_policy_templates
version: 1.0
```
`template_dir` points to the directory containing the policy
templates. `version` defines the version of the policy which will be served
to the user. In the example above, Synapse will serve
`privacy_policy_templates/en/1.0.html`.
2. Add a `form_secret` setting at the top level:
```yaml
form_secret: "<unique secret>"
```
This should be set to an arbitrary secret string (try `pwgen -y 30` to
generate suitable secrets).
More on what this is used for below.
3. Add `consent` wherever the `client` resource is currently enabled in the
`listeners` configuration. For example:
```yaml
listeners:
- port: 8008
resources:
- names:
- client
- consent
```
Finally, ensure that `jinja2` is installed. If you are using a virtualenv, this
should be a matter of `pip install Jinja2`. On debian, try `apt-get install
python-jinja2`.
Once this is complete, and the server has been restarted, try visiting
`https://<server>/_matrix/consent`. If correctly configured, this should give
an error "Missing string query parameter 'u'". It is now possible to manually
construct URIs where users can give their consent.
### Constructing the consent URI
It may be useful to manually construct the "consent URI" for a given user - for
instance, in order to send them an email asking them to consent. To do this,
take the base `https://<server>/_matrix/consent` URL and add the following
query parameters:
* `u`: the user id of the user. This can either be a full MXID
(`@user:server.com`) or just the localpart (`user`).
* `h`: hex-encoded HMAC-SHA256 of `u` using the `form_secret` as a key. It is
possible to calculate this on the commandline with something like:
```bash
echo -n '<user>' | openssl sha256 -hmac '<form_secret>'
```
This should result in a URI which looks something like:
`https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
Sending users a server notice asking them to agree to the policy
----------------------------------------------------------------
It is possible to configure Synapse to send a [server
notice](server_notices.md) to anybody who has not yet agreed to the current
version of the policy. To do so:
* ensure that the consent resource is configured, as in the previous section
* ensure that server notices are configured, as in [server_notices.md](server_notices.md).
* Add `server_notice_content` under `user_consent` in `homeserver.yaml`. For
example:
```yaml
user_consent:
server_notice_content:
msgtype: m.text
body: >-
Please give your consent to the privacy policy at %(consent_uri)s.
```
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
consent uri for that user.
* ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
URI that clients use to connect to the server. (It is used to construct
`consent_uri` in the server notice.)
Blocking users from using the server until they agree to the policy
-------------------------------------------------------------------
Synapse can be configured to block any attempts to join rooms or send messages
until the user has given their agreement to the policy. (Joining the server
notices room is exempted from this).
To enable this, add `block_events_error` under `user_consent`. For example:
```yaml
user_consent:
block_events_error: >-
You can't send any messages until you consent to the privacy policy at
%(consent_uri)s.
```
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
consent uri for that user.
ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
URI that clients use to connect to the server. (It is used to construct
`consent_uri` in the error.)

View File

@@ -1,43 +0,0 @@
Using the synapse manhole
=========================
The "manhole" allows server administrators to access a Python shell on a running
Synapse installation. This is a very powerful mechanism for administration and
debugging.
To enable it, first uncomment the `manhole` listener configuration in
`homeserver.yaml`:
```yaml
listeners:
- port: 9000
bind_addresses: ['::1', '127.0.0.1']
type: manhole
```
(`bind_addresses` in the above is important: it ensures that access to the
manhole is only possible for local users).
Note that this will give administrative access to synapse to **all users** with
shell access to the server. It should therefore **not** be enabled in
environments where untrusted users have shell access.
Then restart synapse, and point an ssh client at port 9000 on localhost, using
the username `matrix`:
```bash
ssh -p9000 matrix@localhost
```
The password is `rabbithole`.
This gives a Python REPL in which `hs` gives access to the
`synapse.server.HomeServer` object - which in turn gives access to many other
parts of the process.
As a simple example, retrieving an event from the database:
```
>>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')
<Deferred at 0x7ff253fc6998 current result: <FrozenEvent event_id='$1416420717069yeQaw:matrix.org', type='m.room.create', state_key=''>>
```

View File

@@ -1,47 +1,25 @@
How to monitor Synapse metrics using Prometheus
===============================================
1. Install Prometheus:
1. Install prometheus:
Follow instructions at http://prometheus.io/docs/introduction/install/
2. Enable Synapse metrics:
2. Enable synapse metrics:
There are two methods of enabling metrics in Synapse.
Simply setting a (local) port number will enable it. Pick a port.
prometheus itself defaults to 9090, so starting just above that for
locally monitored services seems reasonable. E.g. 9092:
The first serves the metrics as a part of the usual web server and can be
enabled by adding the "metrics" resource to the existing listener as such::
Add to homeserver.yaml::
resources:
- names:
- client
- metrics
metrics_port: 9092
This provides a simple way of adding metrics to your Synapse installation,
and serves under ``/_synapse/metrics``. If you do not wish your metrics be
publicly exposed, you will need to either filter it out at your load
balancer, or use the second method.
Also ensure that ``enable_metrics`` is set to ``True``.
The second method runs the metrics server on a different port, in a
different thread to Synapse. This can make it more resilient to heavy load
meaning metrics cannot be retrieved, and can be exposed to just internal
networks easier. The served metrics are available over HTTP only, and will
be available at ``/``.
Restart synapse.
Add a new listener to homeserver.yaml::
listeners:
- type: metrics
port: 9000
bind_addresses:
- '0.0.0.0'
For both options, you will need to ensure that ``enable_metrics`` is set to
``True``.
Restart Synapse.
3. Add a Prometheus target for Synapse.
3. Add a prometheus target for synapse.
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
@@ -53,50 +31,7 @@ How to monitor Synapse metrics using Prometheus
If your prometheus is older than 1.5.2, you will need to replace
``static_configs`` in the above with ``target_groups``.
Restart Prometheus.
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
---------------------------------------------------------------------------------
The duplicated metrics deprecated in Synapse 0.27.0 have been removed.
All time duration-based metrics have been changed to be seconds. This affects:
+----------------------------------+
| msec -> sec metrics |
+==================================+
| python_gc_time |
+----------------------------------+
| python_twisted_reactor_tick_time |
+----------------------------------+
| synapse_storage_query_time |
+----------------------------------+
| synapse_storage_schedule_time |
+----------------------------------+
| synapse_storage_transaction_time |
+----------------------------------+
Several metrics have been changed to be histograms, which sort entries into
buckets and allow better analysis. The following metrics are now histograms:
+-------------------------------------------+
| Altered metrics |
+===========================================+
| python_gc_time |
+-------------------------------------------+
| python_twisted_reactor_pending_calls |
+-------------------------------------------+
| python_twisted_reactor_tick_time |
+-------------------------------------------+
| synapse_http_server_response_time_seconds |
+-------------------------------------------+
| synapse_storage_query_time |
+-------------------------------------------+
| synapse_storage_schedule_time |
+-------------------------------------------+
| synapse_storage_transaction_time |
+-------------------------------------------+
Restart prometheus.
Block and response metrics renamed for 0.27.0

View File

@@ -6,22 +6,16 @@ Postgres version 9.4 or later is known to work.
Set up database
===============
Assuming your PostgreSQL database user is called ``postgres``, create a user
``synapse_user`` with::
su - postgres
createuser --pwprompt synapse_user
The PostgreSQL database used *must* have the correct encoding set, otherwise it
The PostgreSQL database used *must* have the correct encoding set, otherwise
would not be able to store UTF8 strings. To create a database with the correct
encoding use, e.g.::
CREATE DATABASE synapse
ENCODING 'UTF8'
LC_COLLATE='C'
LC_CTYPE='C'
template=template0
OWNER synapse_user;
CREATE DATABASE synapse
ENCODING 'UTF8'
LC_COLLATE='C'
LC_CTYPE='C'
template=template0
OWNER synapse_user;
This would create an appropriate database named ``synapse`` owned by the
``synapse_user`` user (which must already exist).
@@ -52,8 +46,8 @@ As with Debian/Ubuntu, postgres support depends on the postgres python connector
Synapse config
==============
When you are ready to start using PostgreSQL, edit the ``database`` section in
your config file to match the following lines::
When you are ready to start using PostgreSQL, add the following line to your
config file::
database:
name: psycopg2
@@ -102,12 +96,9 @@ complete, restart synapse. For instance::
cp homeserver.db homeserver.db.snapshot
./synctl start
Copy the old config file into a new config file::
cp homeserver.yaml homeserver-postgres.yaml
Edit the database section as described in the section *Synapse config* above
and with the SQLite snapshot located at ``homeserver.db.snapshot`` simply run::
Assuming your new config file (as described in the section *Synapse config*)
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
``homeserver.db.snapshot`` then simply run::
synapse_port_db --sqlite-database homeserver.db.snapshot \
--postgres-config homeserver-postgres.yaml
@@ -126,11 +117,6 @@ run::
--postgres-config homeserver-postgres.yaml
Once that has completed, change the synapse config to point at the PostgreSQL
database configuration file ``homeserver-postgres.yaml``::
./synctl stop
mv homeserver.yaml homeserver-old-sqlite.yaml
mv homeserver-postgres.yaml homeserver.yaml
./synctl start
Synapse should now be running against PostgreSQL.
database configuration file ``homeserver-postgres.yaml`` (i.e. rename it to
``homeserver.yaml``) and restart synapse. Synapse should now be running against
PostgreSQL.

View File

@@ -1,23 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<title>Matrix.org Privacy policy</title>
</head>
<body>
{% if has_consented %}
<p>
Your base already belong to us.
</p>
{% else %}
<p>
All your base are belong to us.
</p>
<form method="post" action="consent">
<input type="hidden" name="v" value="{{version}}"/>
<input type="hidden" name="u" value="{{user}}"/>
<input type="hidden" name="h" value="{{userhmac}}"/>
<input type="submit" value="Sure thing!"/>
</form>
{% endif %}
</body>
</html>

View File

@@ -1,11 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<title>Matrix.org Privacy policy</title>
</head>
<body>
<p>
Sweet.
</p>
</body>
</html>

View File

@@ -1,74 +0,0 @@
Server Notices
==============
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
channel whereby server administrators can send messages to users on the server.
They are used as part of communication of the server polices(see
[consent_tracking.md](consent_tracking.md)), however the intention is that
they may also find a use for features such as "Message of the day".
This is a feature specific to Synapse, but it uses standard Matrix
communication mechanisms, so should work with any Matrix client.
User experience
---------------
When the user is first sent a server notice, they will get an invitation to a
room (typically called 'Server Notices', though this is configurable in
`homeserver.yaml`). They will be **unable to reject** this invitation -
attempts to do so will receive an error.
Once they accept the invitation, they will see the notice message in the room
history; it will appear to have come from the 'server notices user' (see
below).
The user is prevented from sending any messages in this room by the power
levels.
Having joined the room, the user can leave the room if they want. Subsequent
server notices will then cause a new room to be created.
Synapse configuration
---------------------
Server notices come from a specific user id on the server. Server
administrators are free to choose the user id - something like `server` is
suggested, meaning the notices will come from
`@server:<your_server_name>`. Once the Server Notices user is configured, that
user id becomes a special, privileged user, so administrators should ensure
that **it is not already allocated**.
In order to support server notices, it is necessary to add some configuration
to the `homeserver.yaml` file. In particular, you should add a `server_notices`
section, which should look like this:
```yaml
server_notices:
system_mxid_localpart: server
system_mxid_display_name: "Server Notices"
system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
room_name: "Server Notices"
```
The only compulsory setting is `system_mxid_localpart`, which defines the user
id of the Server Notices user, as above. `room_name` defines the name of the
room which will be created.
`system_mxid_display_name` and `system_mxid_avatar_url` can be used to set the
displayname and avatar of the Server Notices user.
Sending notices
---------------
As of the current version of synapse, there is no convenient interface for
sending notices (other than the automated ones sent as part of consent
tracking).
In the meantime, it is possible to test this feature using the manhole. Having
gone into the manhole as described in [manhole.md](manhole.md), a notice can be
sent with something like:
```
>>> hs.get_server_notices_manager().send_notice('@user:server.com', {'msgtype':'m.text', 'body':'foo'})
```

View File

@@ -206,10 +206,6 @@ Handles client API endpoints. It can handle REST endpoints matching the
following regular expressions::
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
``synapse.app.user_dir``
~~~~~~~~~~~~~~~~~~~~~~~~

View File

@@ -1,7 +1,5 @@
#! /bin/bash
set -eux
cd "`dirname $0`/.."
TOX_DIR=$WORKSPACE/.tox
@@ -16,20 +14,7 @@ fi
tox -e py27 --notest -v
TOX_BIN=$TOX_DIR/py27/bin
# cryptography 2.2 requires setuptools >= 18.5.
#
# older versions of virtualenv (?) give us a virtualenv with the same version
# of setuptools as is installed on the system python (and tox runs virtualenv
# under python3, so we get the version of setuptools that is installed on that).
#
# anyway, make sure that we have a recent enough setuptools.
$TOX_BIN/pip install 'setuptools>=18.5'
# we also need a semi-recent version of pip, because old ones fail to install
# the "enum34" dependency of cryptography.
$TOX_BIN/pip install 'pip>=10'
$TOX_BIN/pip install setuptools
{ python synapse/python_dependencies.py
echo lxml psycopg2
} | xargs $TOX_BIN/pip install

View File

@@ -1,30 +0,0 @@
[tool.towncrier]
package = "synapse"
filename = "CHANGES.md"
directory = "changelog.d"
issue_format = "[\\#{issue}](https://github.com/matrix-org/synapse/issues/{issue})"
[[tool.towncrier.type]]
directory = "feature"
name = "Features"
showcontent = true
[[tool.towncrier.type]]
directory = "bugfix"
name = "Bugfixes"
showcontent = true
[[tool.towncrier.type]]
directory = "doc"
name = "Improved Documentation"
showcontent = true
[[tool.towncrier.type]]
directory = "removal"
name = "Deprecations and Removals"
showcontent = true
[[tool.towncrier.type]]
directory = "misc"
name = "Internal Changes"
showcontent = true

View File

@@ -18,22 +18,14 @@
from __future__ import print_function
import argparse
from urlparse import urlparse, urlunparse
import nacl.signing
import json
import base64
import requests
import sys
from requests.adapters import HTTPAdapter
import srvlookup
import yaml
# uncomment the following to enable debug logging of http requests
#from httplib import HTTPConnection
#HTTPConnection.debuglevel = 1
def encode_base64(input_bytes):
"""Encode bytes as a base64 string without any padding."""
@@ -121,6 +113,17 @@ def read_signing_keys(stream):
return keys
def lookup(destination, path):
if ":" in destination:
return "https://%s%s" % (destination, path)
else:
try:
srv = srvlookup.lookup("matrix", "tcp", destination)[0]
return "https://%s:%d%s" % (srv.host, srv.port, path)
except:
return "https://%s:%d%s" % (destination, 8448, path)
def request_json(method, origin_name, origin_key, destination, path, content):
if method is None:
if content is None:
@@ -149,19 +152,13 @@ def request_json(method, origin_name, origin_key, destination, path, content):
authorization_headers.append(bytes(header))
print ("Authorization: %s" % header, file=sys.stderr)
dest = "matrix://%s%s" % (destination, path)
dest = lookup(destination, path)
print ("Requesting %s" % dest, file=sys.stderr)
s = requests.Session()
s.mount("matrix://", MatrixConnectionAdapter())
result = s.request(
result = requests.request(
method=method,
url=dest,
headers={
"Host": destination,
"Authorization": authorization_headers[0]
},
headers={"Authorization": authorization_headers[0]},
verify=False,
data=content,
)
@@ -245,39 +242,5 @@ def read_args_from_config(args):
args.signing_key_path = config['signing_key_path']
class MatrixConnectionAdapter(HTTPAdapter):
@staticmethod
def lookup(s):
if s[-1] == ']':
# ipv6 literal (with no port)
return s, 8448
if ":" in s:
out = s.rsplit(":",1)
try:
port = int(out[1])
except ValueError:
raise ValueError("Invalid host:port '%s'" % s)
return out[0], port
try:
srv = srvlookup.lookup("matrix", "tcp", s)[0]
return srv.host, srv.port
except:
return s, 8448
def get_connection(self, url, proxies=None):
parsed = urlparse(url)
(host, port) = self.lookup(parsed.netloc)
netloc = "%s:%d" % (host, port)
print("Connecting to %s" % (netloc,), file=sys.stderr)
url = urlunparse((
"https", netloc, parsed.path, parsed.params, parsed.query,
parsed.fragment,
))
return super(MatrixConnectionAdapter, self).get_connection(url, proxies)
if __name__ == "__main__":
main()

View File

@@ -6,19 +6,9 @@
## Do not run it lightly.
set -e
if [ "$1" == "-h" ] || [ "$1" == "" ]; then
echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run"
echo " nuke-room-from-db.sh <room_id> | sqlite3 homeserver.db"
echo "or"
echo " nuke-room-from-db.sh <room_id> | psql --dbname=synapse"
exit
fi
ROOMID="$1"
cat <<EOF
sqlite3 homeserver.db <<EOF
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
DELETE FROM event_edges WHERE room_id = '$ROOMID';
@@ -39,7 +29,7 @@ DELETE FROM state_groups WHERE room_id = '$ROOMID';
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
DELETE FROM event_search WHERE room_id = '$ROOMID';
DELETE FROM event_search_content WHERE c1room_id = '$ROOMID';
DELETE FROM guest_access WHERE room_id = '$ROOMID';
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
DELETE FROM room_tags WHERE room_id = '$ROOMID';

View File

@@ -26,37 +26,11 @@ import yaml
def request_registration(user, password, server_location, shared_secret, admin=False):
req = urllib2.Request(
"%s/_matrix/client/r0/admin/register" % (server_location,),
headers={'Content-Type': 'application/json'}
)
try:
if sys.version_info[:3] >= (2, 7, 9):
# As of version 2.7.9, urllib2 now checks SSL certs
import ssl
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
else:
f = urllib2.urlopen(req)
body = f.read()
f.close()
nonce = json.loads(body)["nonce"]
except urllib2.HTTPError as e:
print "ERROR! Received %d %s" % (e.code, e.reason,)
if 400 <= e.code < 500:
if e.info().type == "application/json":
resp = json.load(e)
if "error" in resp:
print resp["error"]
sys.exit(1)
mac = hmac.new(
key=shared_secret,
digestmod=hashlib.sha1,
)
mac.update(nonce)
mac.update("\x00")
mac.update(user)
mac.update("\x00")
mac.update(password)
@@ -66,10 +40,10 @@ def request_registration(user, password, server_location, shared_secret, admin=F
mac = mac.hexdigest()
data = {
"nonce": nonce,
"username": user,
"user": user,
"password": password,
"mac": mac,
"type": "org.matrix.login.shared_secret",
"admin": admin,
}
@@ -78,7 +52,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
print "Sending registration request..."
req = urllib2.Request(
"%s/_matrix/client/r0/admin/register" % (server_location,),
"%s/_matrix/client/api/v1/register" % (server_location,),
data=json.dumps(data),
headers={'Content-Type': 'application/json'}
)

View File

@@ -14,26 +14,7 @@ ignore =
pylint.cfg
tox.ini
[pep8]
max-line-length = 90
# W503 requires that binary operators be at the end, not start, of lines. Erik
# doesn't like it. E203 is contrary to PEP8.
ignore = W503,E203
[flake8]
# note that flake8 inherits the "ignore" settings from "pep8" (because it uses
# pep8 to do those checks), but not the "max-line-length" setting
max-line-length = 90
[isort]
line_length = 89
not_skip = __init__.py
sections=FUTURE,STDLIB,COMPAT,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER
default_section=THIRDPARTY
known_first_party = synapse
known_tests=tests
known_compat = mock,six
known_twisted=twisted,OpenSSL
multi_line_output=3
include_trailing_comma=true
combine_as_imports=true
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
ignore = W503

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.33.2"
__version__ = "0.27.3-rc2"

View File

@@ -15,19 +15,15 @@
import logging
from six import itervalues
import pymacaroons
from netaddr import IPAddress
from twisted.internet import defer
import synapse.types
from synapse import event_auth
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.constants import EventTypes, Membership, JoinRules
from synapse.api.errors import AuthError, Codes
from synapse.types import UserID
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR
from synapse.util.caches.lrucache import LruCache
from synapse.util.metrics import Measure
@@ -61,17 +57,16 @@ class Auth(object):
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
register_cache("cache", "token_cache", self.token_cache)
register_cache("token_cache", self.token_cache)
@defer.inlineCallbacks
def check_from_context(self, event, context, do_sig_check=True):
prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_events_ids = yield self.compute_auth_events(
event, prev_state_ids, for_verification=True,
event, context.prev_state_ids, for_verification=True,
)
auth_events = yield self.store.get_events(auth_events_ids)
auth_events = {
(e.type, e.state_key): e for e in itervalues(auth_events)
(e.type, e.state_key): e for e in auth_events.values()
}
self.check(event, auth_events=auth_events, do_sig_check=do_sig_check)
@@ -194,7 +189,7 @@ class Auth(object):
synapse.types.create_requester(user_id, app_service=app_service)
)
access_token = self.get_access_token_from_request(
access_token = get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
@@ -240,22 +235,17 @@ class Auth(object):
@defer.inlineCallbacks
def _get_appservice_user_id(self, request):
app_service = self.store.get_app_service_by_token(
self.get_access_token_from_request(
get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
)
if app_service is None:
defer.returnValue((None, None))
if app_service.ip_range_whitelist:
ip_address = IPAddress(self.hs.get_ip_from_request(request))
if ip_address not in app_service.ip_range_whitelist:
defer.returnValue((None, None))
if b"user_id" not in request.args:
if "user_id" not in request.args:
defer.returnValue((app_service.sender, app_service))
user_id = request.args[b"user_id"][0].decode('utf8')
user_id = request.args["user_id"][0]
if app_service.sender == user_id:
defer.returnValue((app_service.sender, app_service))
@@ -496,7 +486,7 @@ class Auth(object):
def _look_up_user_by_access_token(self, token):
ret = yield self.store.get_user_by_access_token(token)
if not ret:
logger.warn("Unrecognised access token - not in store.")
logger.warn("Unrecognised access token - not in store: %s" % (token,))
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN
@@ -514,12 +504,12 @@ class Auth(object):
def get_appservice_by_req(self, request):
try:
token = self.get_access_token_from_request(
token = get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
service = self.store.get_app_service_by_token(token)
if not service:
logger.warn("Unrecognised appservice access token.")
logger.warn("Unrecognised appservice access token: %s" % (token,))
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",
@@ -545,8 +535,7 @@ class Auth(object):
@defer.inlineCallbacks
def add_auth_events(self, builder, context):
prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_ids = yield self.compute_auth_events(builder, prev_state_ids)
auth_ids = yield self.compute_auth_events(builder, context.prev_state_ids)
auth_events_entries = yield self.store.add_event_hashes(
auth_ids
@@ -664,7 +653,7 @@ class Auth(object):
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
send_level = event_auth.get_send_level(
EventTypes.Aliases, "", power_level_event,
EventTypes.Aliases, "", auth_events
)
user_level = event_auth.get_user_power_level(user_id, auth_events)
@@ -675,101 +664,67 @@ class Auth(object):
" edit its room list entry"
)
@staticmethod
def has_access_token(request):
"""Checks if the request has an access_token.
Returns:
bool: False if no access_token was given, True otherwise.
"""
query_params = request.args.get("access_token")
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
return bool(query_params) or bool(auth_headers)
def has_access_token(request):
"""Checks if the request has an access_token.
@staticmethod
def get_access_token_from_request(request, token_not_found_http_status=401):
"""Extracts the access_token from the request.
Returns:
bool: False if no access_token was given, True otherwise.
"""
query_params = request.args.get("access_token")
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
return bool(query_params) or bool(auth_headers)
Args:
request: The http request.
token_not_found_http_status(int): The HTTP status code to set in the
AuthError if the token isn't found. This is used in some of the
legacy APIs to change the status code to 403 from the default of
401 since some of the old clients depended on auth errors returning
403.
Returns:
str: The access_token
Raises:
AuthError: If there isn't an access_token in the request.
"""
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
query_params = request.args.get(b"access_token")
if auth_headers:
# Try the get the access_token from a "Authorization: Bearer"
# header
if query_params is not None:
raise AuthError(
token_not_found_http_status,
"Mixing Authorization headers and access_token query parameters.",
errcode=Codes.MISSING_TOKEN,
)
if len(auth_headers) > 1:
raise AuthError(
token_not_found_http_status,
"Too many Authorization headers.",
errcode=Codes.MISSING_TOKEN,
)
parts = auth_headers[0].split(" ")
if parts[0] == "Bearer" and len(parts) == 2:
return parts[1]
else:
raise AuthError(
token_not_found_http_status,
"Invalid Authorization header.",
errcode=Codes.MISSING_TOKEN,
)
else:
# Try to get the access_token from the query params.
if not query_params:
raise AuthError(
token_not_found_http_status,
"Missing access token.",
errcode=Codes.MISSING_TOKEN
)
def get_access_token_from_request(request, token_not_found_http_status=401):
"""Extracts the access_token from the request.
return query_params[0]
Args:
request: The http request.
token_not_found_http_status(int): The HTTP status code to set in the
AuthError if the token isn't found. This is used in some of the
legacy APIs to change the status code to 403 from the default of
401 since some of the old clients depended on auth errors returning
403.
Returns:
str: The access_token
Raises:
AuthError: If there isn't an access_token in the request.
"""
@defer.inlineCallbacks
def check_in_room_or_world_readable(self, room_id, user_id):
"""Checks that the user is or was in the room or the room is world
readable. If it isn't then an exception is raised.
Returns:
Deferred[tuple[str, str|None]]: Resolves to the current membership of
the user in the room and the membership event ID of the user. If
the user is not in the room and never has been, then
`(Membership.JOIN, None)` is returned.
"""
try:
# check_user_was_in_room will return the most recent membership
# event for the user if:
# * The user is a non-guest user, and was ever in the room
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.check_user_was_in_room(room_id, user_id)
defer.returnValue((member_event.membership, member_event.event_id))
except AuthError:
visibility = yield self.state.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
)
if (
visibility and
visibility.content["history_visibility"] == "world_readable"
):
defer.returnValue((Membership.JOIN, None))
return
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
query_params = request.args.get(b"access_token")
if auth_headers:
# Try the get the access_token from a "Authorization: Bearer"
# header
if query_params is not None:
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
token_not_found_http_status,
"Mixing Authorization headers and access_token query parameters.",
errcode=Codes.MISSING_TOKEN,
)
if len(auth_headers) > 1:
raise AuthError(
token_not_found_http_status,
"Too many Authorization headers.",
errcode=Codes.MISSING_TOKEN,
)
parts = auth_headers[0].split(" ")
if parts[0] == "Bearer" and len(parts) == 2:
return parts[1]
else:
raise AuthError(
token_not_found_http_status,
"Invalid Authorization header.",
errcode=Codes.MISSING_TOKEN,
)
else:
# Try to get the access_token from the query params.
if not query_params:
raise AuthError(
token_not_found_http_status,
"Missing access token.",
errcode=Codes.MISSING_TOKEN
)
return query_params[0]

View File

@@ -16,9 +16,6 @@
"""Contains constants from the specification."""
# the "depth" field on events is limited to 2**63 - 1
MAX_DEPTH = 2**63 - 1
class Membership(object):
@@ -76,8 +73,6 @@ class EventTypes(object):
Topic = "m.room.topic"
Name = "m.room.name"
ServerACL = "m.room.server_acl"
class RejectedReason(object):
AUTH_ERROR = "auth_error"

View File

@@ -17,10 +17,7 @@
import logging
from six import iteritems
from six.moves import http_client
from canonicaljson import json
import simplejson as json
logger = logging.getLogger(__name__)
@@ -53,9 +50,6 @@ class Codes(object):
THREEPID_DENIED = "M_THREEPID_DENIED"
INVALID_USERNAME = "M_INVALID_USERNAME"
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
MAU_LIMIT_EXCEEDED = "M_MAU_LIMIT_EXCEEDED"
class CodeMessageException(RuntimeError):
@@ -70,6 +64,20 @@ class CodeMessageException(RuntimeError):
self.code = code
self.msg = msg
def error_dict(self):
return cs_error(self.msg)
class MatrixCodeMessageException(CodeMessageException):
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
Attributes:
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
"""
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
super(MatrixCodeMessageException, self).__init__(code, msg)
self.errcode = errcode
class SynapseError(CodeMessageException):
"""A base exception type for matrix errors which have an errcode and error
@@ -95,54 +103,38 @@ class SynapseError(CodeMessageException):
self.errcode,
)
@classmethod
def from_http_response_exception(cls, err):
"""Make a SynapseError based on an HTTPResponseException
class ProxiedRequestError(SynapseError):
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
This is useful when a proxied request has failed, and we need to
decide how to map the failure onto a matrix error to send back to the
client.
Attributes:
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
"""
def __init__(self, code, msg, errcode=Codes.UNKNOWN, additional_fields=None):
super(ProxiedRequestError, self).__init__(
code, msg, errcode
)
if additional_fields is None:
self._additional_fields = {}
else:
self._additional_fields = dict(additional_fields)
An attempt is made to parse the body of the http response as a matrix
error. If that succeeds, the errcode and error message from the body
are used as the errcode and error message in the new synapse error.
def error_dict(self):
return cs_error(
self.msg,
self.errcode,
**self._additional_fields
)
class ConsentNotGivenError(SynapseError):
"""The error returned to the client when the user has not consented to the
privacy policy.
"""
def __init__(self, msg, consent_uri):
"""Constructs a ConsentNotGivenError
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
set to the reason code from the HTTP response.
Args:
msg (str): The human-readable error message
consent_url (str): The URL where the user can give their consent
"""
super(ConsentNotGivenError, self).__init__(
code=http_client.FORBIDDEN,
msg=msg,
errcode=Codes.CONSENT_NOT_GIVEN
)
self._consent_uri = consent_uri
err (HttpResponseException):
def error_dict(self):
return cs_error(
self.msg,
self.errcode,
consent_uri=self._consent_uri
)
Returns:
SynapseError:
"""
# try to parse the body as json, to get better errcode/msg, but
# default to M_UNKNOWN with the HTTP status as the error text
try:
j = json.loads(err.response)
except ValueError:
j = {}
errcode = j.get('errcode', Codes.UNKNOWN)
errmsg = j.get('error', err.msg)
res = SynapseError(err.code, errmsg, errcode)
return res
class RegistrationError(SynapseError):
@@ -285,19 +277,27 @@ class LimitExceededError(SynapseError):
)
def cs_exception(exception):
if isinstance(exception, CodeMessageException):
return exception.error_dict()
else:
logger.error("Unknown exception type: %s", type(exception))
return {}
def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
""" Utility method for constructing an error response for client-server
interactions.
Args:
msg (str): The error message.
code (str): The error code.
code (int): The error code.
kwargs : Additional keys to add to the response.
Returns:
A dict representing the error response JSON.
"""
err = {"error": msg, "errcode": code}
for key, value in iteritems(kwargs):
for key, value in kwargs.iteritems():
err[key] = value
return err
@@ -341,7 +341,7 @@ class HttpResponseException(CodeMessageException):
Represents an HTTP-level failure of an outbound request
Attributes:
response (bytes): body of response
response (str): body of response
"""
def __init__(self, code, msg, response):
"""
@@ -349,39 +349,7 @@ class HttpResponseException(CodeMessageException):
Args:
code (int): HTTP status code
msg (str): reason phrase from HTTP response status line
response (bytes): body of response
response (str): body of response
"""
super(HttpResponseException, self).__init__(code, msg)
self.response = response
def to_synapse_error(self):
"""Make a SynapseError based on an HTTPResponseException
This is useful when a proxied request has failed, and we need to
decide how to map the failure onto a matrix error to send back to the
client.
An attempt is made to parse the body of the http response as a matrix
error. If that succeeds, the errcode and error message from the body
are used as the errcode and error message in the new synapse error.
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
set to the reason code from the HTTP response.
Returns:
SynapseError:
"""
# try to parse the body as json, to get better errcode/msg, but
# default to M_UNKNOWN with the HTTP status as the error text
try:
j = json.loads(self.response)
except ValueError:
j = {}
if not isinstance(j, dict):
j = {}
errcode = j.pop('errcode', Codes.UNKNOWN)
errmsg = j.pop('error', self.msg)
return ProxiedRequestError(self.code, errmsg, errcode, j)

View File

@@ -12,15 +12,14 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
from canonicaljson import json
from jsonschema import FormatChecker
from twisted.internet import defer
from synapse.api.errors import SynapseError
from synapse.storage.presence import UserPresenceState
from synapse.types import RoomID, UserID
from synapse.types import UserID, RoomID
from twisted.internet import defer
import simplejson as json
import jsonschema
from jsonschema import FormatChecker
FILTER_SCHEMA = {
"additionalProperties": False,
@@ -113,13 +112,7 @@ ROOM_EVENT_FILTER_SCHEMA = {
},
"contains_url": {
"type": "boolean"
},
"lazy_load_members": {
"type": "boolean"
},
"include_redundant_members": {
"type": "boolean"
},
}
}
}
@@ -267,12 +260,6 @@ class FilterCollection(object):
def ephemeral_limit(self):
return self._room_ephemeral_filter.limit()
def lazy_load_members(self):
return self._room_state_filter.lazy_load_members()
def include_redundant_members(self):
return self._room_state_filter.include_redundant_members()
def filter_presence(self, events):
return self._presence_filter.filter(events)
@@ -424,17 +411,11 @@ class Filter(object):
return room_ids
def filter(self, events):
return list(filter(self.check, events))
return filter(self.check, events)
def limit(self):
return self.filter_json.get("limit", 10)
def lazy_load_members(self):
return self.filter_json.get("lazy_load_members", False)
def include_redundant_members(self):
return self.filter_json.get("include_redundant_members", False)
def _matches_wildcard(actual_value, filter_value):
if filter_value.endswith("*"):

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,12 +14,6 @@
# limitations under the License.
"""Contains the URL paths to prefix various aspects of the server with. """
import hmac
from hashlib import sha256
from six.moves.urllib.parse import urlencode
from synapse.config import ConfigError
CLIENT_PREFIX = "/_matrix/client/api/v1"
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
@@ -32,46 +25,3 @@ SERVER_KEY_PREFIX = "/_matrix/key/v1"
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
MEDIA_PREFIX = "/_matrix/media/r0"
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
class ConsentURIBuilder(object):
def __init__(self, hs_config):
"""
Args:
hs_config (synapse.config.homeserver.HomeServerConfig):
"""
if hs_config.form_secret is None:
raise ConfigError(
"form_secret not set in config",
)
if hs_config.public_baseurl is None:
raise ConfigError(
"public_baseurl not set in config",
)
self._hmac_secret = hs_config.form_secret.encode("utf-8")
self._public_baseurl = hs_config.public_baseurl
def build_user_consent_uri(self, user_id):
"""Build a URI which we can give to the user to do their privacy
policy consent
Args:
user_id (str): mxid or username of user
Returns
(str) the URI where the user can do consent
"""
mac = hmac.new(
key=self._hmac_secret,
msg=user_id,
digestmod=sha256,
).hexdigest()
consent_uri = "%s_matrix/consent?%s" % (
self._public_baseurl,
urlencode({
"u": user_id,
"h": mac
}),
)
return consent_uri

View File

@@ -14,11 +14,9 @@
# limitations under the License.
import sys
from synapse import python_dependencies # noqa: E402
sys.dont_write_bytecode = True
from synapse import python_dependencies # noqa: E402
try:
python_dependencies.check_requirements()

View File

@@ -17,18 +17,15 @@ import gc
import logging
import sys
from daemonize import Daemonize
from twisted.internet import error, reactor
from synapse.util import PreserveLoggingContext
from synapse.util.rlimit import change_resource_limit
try:
import affinity
except Exception:
affinity = None
from daemonize import Daemonize
from synapse.util import PreserveLoggingContext
from synapse.util.rlimit import change_resource_limit
from twisted.internet import error, reactor
logger = logging.getLogger(__name__)
@@ -127,19 +124,6 @@ def quit_with_error(error_string):
sys.exit(1)
def listen_metrics(bind_addresses, port):
"""
Start Prometheus metrics server.
"""
from synapse.metrics import RegistryProxy
from prometheus_client import start_http_server
for host in bind_addresses:
reactor.callInThread(start_http_server, int(port),
addr=host, registry=RegistryProxy)
logger.info("Metrics now reporting on %s:%d", host, port)
def listen_tcp(bind_addresses, port, factory, backlog=50):
"""
Create a TCP socket for a port and several addresses

View File

@@ -16,9 +16,6 @@
import logging
import sys
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
@@ -26,7 +23,6 @@ from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.directory import DirectoryStore
@@ -36,9 +32,11 @@ from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.appservice")
@@ -64,7 +62,7 @@ class AppserviceServer(HomeServer):
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, NoResource())
@@ -76,7 +74,6 @@ class AppserviceServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
@@ -96,13 +93,6 @@ class AppserviceServer(HomeServer):
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
@@ -122,14 +112,9 @@ class ASReplicationHandler(ReplicationClientHandler):
if stream_name == "events":
max_stream_id = self.store.get_room_max_stream_ordering()
run_in_background(self._notify_app_services, max_stream_id)
@defer.inlineCallbacks
def _notify_app_services(self, room_stream_id):
try:
yield self.appservice_handler.notify_interested_services(room_stream_id)
except Exception:
logger.exception("Error notifying application services of event")
preserve_fn(
self.appservice_handler.notify_interested_services
)(max_stream_id)
def start(config_options):

View File

@@ -16,9 +16,6 @@
import logging
import sys
from twisted.internet import reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
@@ -28,10 +25,8 @@ from synapse.config.logger import setup_logging
from synapse.crypto import context_factory
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.directory import DirectoryStore
@@ -41,25 +36,20 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import TransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v1.room import (
JoinedRoomMemberListRestServlet,
PublicRoomListRestServlet,
RoomEventContextServlet,
RoomMemberListRestServlet,
RoomStateRestServlet,
)
from synapse.rest.client.v1.room import PublicRoomListRestServlet
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.client_reader")
class ClientReaderSlavedStore(
SlavedAccountDataStore,
SlavedEventStore,
SlavedKeyStore,
RoomStore,
@@ -87,16 +77,10 @@ class ClientReaderServer(HomeServer):
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
PublicRoomListRestServlet(self).register(resource)
RoomMemberListRestServlet(self).register(resource)
JoinedRoomMemberListRestServlet(self).register(resource)
RoomStateRestServlet(self).register(resource)
RoomEventContextServlet(self).register(resource)
resources.update({
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
@@ -114,7 +98,6 @@ class ClientReaderServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
@@ -134,13 +117,7 @@ class ClientReaderServer(HomeServer):
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])

View File

@@ -16,9 +16,6 @@
import logging
import sys
from twisted.internet import reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
@@ -28,7 +25,6 @@ from synapse.config.logger import setup_logging
from synapse.crypto import context_factory
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
@@ -46,10 +42,8 @@ from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import TransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v1.room import (
RoomSendEventRestServlet, RoomMembershipRestServlet, RoomStateEventRestServlet,
JoinRoomAliasServlet,
RoomMembershipRestServlet,
RoomSendEventRestServlet,
RoomStateEventRestServlet,
)
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
@@ -57,6 +51,8 @@ from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.event_creator")
@@ -94,7 +90,7 @@ class EventCreatorServer(HomeServer):
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
RoomSendEventRestServlet(self).register(resource)
@@ -118,7 +114,6 @@ class EventCreatorServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
@@ -138,13 +133,6 @@ class EventCreatorServer(HomeServer):
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])

View File

@@ -16,9 +16,6 @@
import logging
import sys
from twisted.internet import reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.api.urls import FEDERATION_PREFIX
@@ -29,7 +26,6 @@ from synapse.config.logger import setup_logging
from synapse.crypto import context_factory
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.directory import DirectoryStore
@@ -44,6 +40,8 @@ from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.federation_reader")
@@ -73,7 +71,7 @@ class FederationReaderServer(HomeServer):
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
elif name == "federation":
resources.update({
FEDERATION_PREFIX: TransportLayerServer(self),
@@ -89,7 +87,6 @@ class FederationReaderServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
@@ -109,13 +106,6 @@ class FederationReaderServer(HomeServer):
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])

View File

@@ -16,9 +16,6 @@
import logging
import sys
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
@@ -28,7 +25,6 @@ from synapse.config.logger import setup_logging
from synapse.crypto import context_factory
from synapse.federation import send_queue
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
@@ -42,9 +38,11 @@ from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.async import Linearizer
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.federation_sender")
@@ -91,7 +89,7 @@ class FederationSenderServer(HomeServer):
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, NoResource())
@@ -103,7 +101,6 @@ class FederationSenderServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
@@ -123,13 +120,6 @@ class FederationSenderServer(HomeServer):
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
@@ -239,7 +229,7 @@ class FederationSenderHandler(object):
# presence, typing, etc.
if stream_name == "federation":
send_queue.process_rows_for_federation(self.federation_sender, rows)
run_in_background(self.update_token, token)
preserve_fn(self.update_token)(token)
# We also need to poke the federation sender when new events happen
elif stream_name == "events":
@@ -247,22 +237,19 @@ class FederationSenderHandler(object):
@defer.inlineCallbacks
def update_token(self, token):
try:
self.federation_position = token
self.federation_position = token
# We linearize here to ensure we don't have races updating the token
with (yield self._fed_position_linearizer.queue(None)):
if self._last_ack < self.federation_position:
yield self.store.update_federation_out_pos(
"federation", self.federation_position
)
# We linearize here to ensure we don't have races updating the token
with (yield self._fed_position_linearizer.queue(None)):
if self._last_ack < self.federation_position:
yield self.store.update_federation_out_pos(
"federation", self.federation_position
)
# We ACK this token over replication so that the master can drop
# its in memory queues
self.replication_client.send_federation_ack(self.federation_position)
self._last_ack = self.federation_position
except Exception:
logger.exception("Error updating federation stream position")
# We ACK this token over replication so that the master can drop
# its in memory queues
self.replication_client.send_federation_ack(self.federation_position)
self._last_ack = self.federation_position
if __name__ == '__main__':

View File

@@ -16,9 +16,6 @@
import logging
import sys
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.api.errors import SynapseError
@@ -28,9 +25,10 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.crypto import context_factory
from synapse.http.server import JsonResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.servlet import (
RestServlet, parse_json_object_from_request,
)
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
@@ -38,6 +36,7 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns
from synapse.rest.client.v2_alpha._base import client_v2_patterns
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
@@ -45,10 +44,41 @@ from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.frontend_proxy")
class PresenceStatusStubServlet(ClientV1RestServlet):
PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
def __init__(self, hs):
super(PresenceStatusStubServlet, self).__init__(hs)
self.http_client = hs.get_simple_http_client()
self.auth = hs.get_auth()
self.main_uri = hs.config.worker_main_http_uri
@defer.inlineCallbacks
def on_GET(self, request, user_id):
# Pass through the auth headers, if any, in case the access token
# is there.
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
headers = {
"Authorization": auth_headers,
}
result = yield self.http_client.get_json(
self.main_uri + request.uri,
headers=headers,
)
defer.returnValue((200, result))
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
yield self.auth.get_user_by_req(request)
defer.returnValue((200, {}))
class KeyUploadServlet(RestServlet):
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
@@ -131,10 +161,11 @@ class FrontendProxyServer(HomeServer):
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
KeyUploadServlet(self).register(resource)
PresenceStatusStubServlet(self).register(resource)
resources.update({
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
@@ -152,7 +183,6 @@ class FrontendProxyServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
@@ -172,13 +202,6 @@ class FrontendProxyServer(HomeServer):
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])

View File

@@ -18,44 +18,27 @@ import logging
import os
import sys
from six import iteritems
from prometheus_client import Gauge
from twisted.application import service
from twisted.internet import defer, reactor
from twisted.web.resource import EncodingResourceWrapper, NoResource
from twisted.web.server import GzipEncoderFactory
from twisted.web.static import File
import synapse
import synapse.config.logger
from synapse import events
from synapse.api.urls import (
CONTENT_REPO_PREFIX,
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_PREFIX,
SERVER_KEY_PREFIX,
SERVER_KEY_V2_PREFIX,
STATIC_PREFIX,
WEB_CLIENT_PREFIX,
)
from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \
LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \
STATIC_PREFIX, WEB_CLIENT_PREFIX
from synapse.app import _base
from synapse.app._base import listen_ssl, listen_tcp, quit_with_error
from synapse.app._base import quit_with_error, listen_ssl, listen_tcp
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory
from synapse.federation.transport.server import TransportLayerServer
from synapse.module_api import ModuleApi
from synapse.http.additional_resource import AdditionalResource
from synapse.http.server import RootRedirect
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.metrics import register_memory_metrics
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.module_api import ModuleApi
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirements
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
check_requirements
from synapse.replication.http import ReplicationRestResource, REPLICATION_PREFIX
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
from synapse.rest import ClientRestResource
from synapse.rest.key.v1.server_key_resource import LocalKey
@@ -72,6 +55,11 @@ from synapse.util.manhole import manhole
from synapse.util.module_loader import load_module
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
from twisted.application import service
from twisted.internet import defer, reactor
from twisted.web.resource import EncodingResourceWrapper, NoResource
from twisted.web.server import GzipEncoderFactory
from twisted.web.static import File
logger = logging.getLogger("synapse.app.homeserver")
@@ -152,7 +140,6 @@ class SynapseHomeServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
),
self.tls_server_context_factory,
)
@@ -166,7 +153,6 @@ class SynapseHomeServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
logger.info("Synapse now listening on port %d", port)
@@ -196,15 +182,6 @@ class SynapseHomeServer(HomeServer):
"/_matrix/client/versions": client_resource,
})
if name == "consent":
from synapse.rest.consent.consent_resource import ConsentResource
consent_resource = ConsentResource(self)
if compress:
consent_resource = gz_wrap(consent_resource)
resources.update({
"/_matrix/consent": consent_resource,
})
if name == "federation":
resources.update({
FEDERATION_PREFIX: TransportLayerServer(self),
@@ -242,7 +219,7 @@ class SynapseHomeServer(HomeServer):
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
if name == "metrics" and self.get_config().enable_metrics:
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
if name == "replication":
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
@@ -275,13 +252,6 @@ class SynapseHomeServer(HomeServer):
reactor.addSystemEventTrigger(
"before", "shutdown", server_listener.stopListening,
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
@@ -302,11 +272,6 @@ class SynapseHomeServer(HomeServer):
quit_with_error(e.message)
# Gauges to expose monthly active user control metrics
current_mau_gauge = Gauge("synapse_admin_current_mau", "Current MAU")
max_mau_value_gauge = Gauge("synapse_admin_max_mau_value", "MAU Limit")
def setup(config_options):
"""
Args:
@@ -335,6 +300,11 @@ def setup(config_options):
# check any extra requirements we have now we have a config
check_requirements(config)
version_string = "Synapse/" + get_version_string(synapse)
logger.info("Server hostname: %s", config.server_name)
logger.info("Server version: %s", version_string)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
tls_server_context_factory = context_factory.ServerContextFactory(config)
@@ -347,7 +317,7 @@ def setup(config_options):
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
version_string=version_string,
database_engine=database_engine,
)
@@ -381,6 +351,8 @@ def setup(config_options):
hs.get_datastore().start_doing_background_updates()
hs.get_federation_client().start_get_pdu_cache()
register_memory_metrics(hs)
reactor.callWhenRunning(start)
return hs
@@ -435,9 +407,6 @@ def run(hs):
# currently either 0 or 1
stats_process = []
def start_phone_stats_home():
return run_as_background_process("phone_stats_home", phone_stats_home)
@defer.inlineCallbacks
def phone_stats_home():
logger.info("Gathering stats for reporting")
@@ -454,10 +423,6 @@ def run(hs):
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
stats["total_nonbridged_users"] = total_nonbridged_users
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
for name, count in iteritems(daily_user_type_results):
stats["daily_user_type_" + name] = count
room_count = yield hs.get_datastore().get_room_count()
stats["total_room_count"] = room_count
@@ -466,7 +431,7 @@ def run(hs):
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
r30_results = yield hs.get_datastore().count_r30_users()
for name, count in iteritems(r30_results):
for name, count in r30_results.iteritems():
stats["r30_users_" + name] = count
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
@@ -508,32 +473,9 @@ def run(hs):
" changes across releases."
)
def generate_user_daily_visit_stats():
return run_as_background_process(
"generate_user_daily_visits",
hs.get_datastore().generate_user_daily_visits,
)
# Rather than update on per session basis, batch up the requests.
# If you increase the loop period, the accuracy of user_daily_visits
# table will decrease
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
@defer.inlineCallbacks
def generate_monthly_active_users():
count = 0
if hs.config.limit_usage_by_mau:
count = yield hs.get_datastore().count_monthly_users()
current_mau_gauge.set(float(count))
max_mau_value_gauge.set(float(hs.config.max_mau_value))
generate_monthly_active_users()
if hs.config.limit_usage_by_mau:
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
if hs.config.report_stats:
logger.info("Scheduling stats reporting for 3 hour intervals")
clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000)
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
# We need to defer this init for the cases that we daemonize
# otherwise the process ID we get is that of the non-daemon process
@@ -541,7 +483,7 @@ def run(hs):
# We wait 5 minutes to send the first set of stats as the server can
# be quite busy the first few minutes
clock.call_later(5 * 60, start_phone_stats_home)
clock.call_later(5 * 60, phone_stats_home)
if hs.config.daemonize and hs.config.print_pidfile:
print (hs.config.pid_file)

View File

@@ -16,19 +16,17 @@
import logging
import sys
from twisted.internet import reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.api.urls import CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
from synapse.api.urls import (
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
)
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.crypto import context_factory
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
@@ -44,6 +42,8 @@ from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.media_repository")
@@ -73,7 +73,7 @@ class MediaRepositoryServer(HomeServer):
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
elif name == "media":
media_repo = self.get_media_repository_resource()
resources.update({
@@ -94,7 +94,6 @@ class MediaRepositoryServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
@@ -114,13 +113,6 @@ class MediaRepositoryServer(HomeServer):
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])

View File

@@ -16,9 +16,6 @@
import logging
import sys
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
@@ -26,7 +23,6 @@ from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.events import SlavedEventStore
@@ -37,9 +33,11 @@ from synapse.server import HomeServer
from synapse.storage import DataStore
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.pusher")
@@ -94,7 +92,7 @@ class PusherServer(HomeServer):
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, NoResource())
@@ -106,7 +104,6 @@ class PusherServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
@@ -126,13 +123,6 @@ class PusherServer(HomeServer):
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
@@ -150,27 +140,24 @@ class PusherReplicationHandler(ReplicationClientHandler):
def on_rdata(self, stream_name, token, rows):
super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
run_in_background(self.poke_pushers, stream_name, token, rows)
preserve_fn(self.poke_pushers)(stream_name, token, rows)
@defer.inlineCallbacks
def poke_pushers(self, stream_name, token, rows):
try:
if stream_name == "pushers":
for row in rows:
if row.deleted:
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
else:
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
elif stream_name == "events":
yield self.pusher_pool.on_new_notifications(
token, token,
)
elif stream_name == "receipts":
yield self.pusher_pool.on_new_receipts(
token, token, set(row.room_id for row in rows)
)
except Exception:
logger.exception("Error poking pushers")
if stream_name == "pushers":
for row in rows:
if row.deleted:
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
else:
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
elif stream_name == "events":
yield self.pusher_pool.on_new_notifications(
token, token,
)
elif stream_name == "receipts":
yield self.pusher_pool.on_new_receipts(
token, token, set(row.room_id for row in rows)
)
def stop_pusher(self, user_id, app_id, pushkey):
key = "%s:%s" % (app_id, pushkey)

View File

@@ -17,11 +17,6 @@ import contextlib
import logging
import sys
from six import iteritems
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse.api.constants import EventTypes
from synapse.app import _base
@@ -31,7 +26,6 @@ from synapse.config.logger import setup_logging
from synapse.handlers.presence import PresenceHandler, get_interested_parties
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
@@ -41,12 +35,12 @@ from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.presence import SlavedPresenceStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v1 import events
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
@@ -55,11 +49,14 @@ from synapse.rest.client.v2_alpha import sync
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.storage.presence import UserPresenceState
from synapse.storage.roommember import RoomMemberStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.stringutils import random_string
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.synchrotron")
@@ -80,7 +77,9 @@ class SynchrotronSlavedStore(
RoomStore,
BaseSlavedStore,
):
pass
did_forget = (
RoomMemberStore.__dict__["did_forget"]
)
UPDATE_SYNCING_USERS_MS = 10 * 1000
@@ -114,6 +113,7 @@ class SynchrotronPresence(object):
logger.info("Presence process_id is %r", self.process_id)
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
return
self.hs.get_tcp_replication().send_user_sync(user_id, is_syncing, last_sync_ms)
def mark_as_coming_online(self, user_id):
@@ -211,8 +211,10 @@ class SynchrotronPresence(object):
yield self.notify_from_replication(states, stream_id)
def get_currently_syncing_users(self):
# presence is disabled on matrix.org, so we return the empty set
return set()
return [
user_id for user_id, count in iteritems(self.user_to_num_current_syncs)
user_id for user_id, count in self.user_to_num_current_syncs.iteritems()
if count > 0
]
@@ -256,7 +258,7 @@ class SynchrotronServer(HomeServer):
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
sync.register_servlets(self, resource)
@@ -280,7 +282,6 @@ class SynchrotronServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
@@ -300,13 +301,6 @@ class SynchrotronServer(HomeServer):
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
@@ -334,7 +328,8 @@ class SyncReplicationHandler(ReplicationClientHandler):
def on_rdata(self, stream_name, token, rows):
super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
run_in_background(self.process_and_notify, stream_name, token, rows)
preserve_fn(self.process_and_notify)(stream_name, token, rows)
def get_streams_to_replicate(self):
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
@@ -346,58 +341,55 @@ class SyncReplicationHandler(ReplicationClientHandler):
@defer.inlineCallbacks
def process_and_notify(self, stream_name, token, rows):
try:
if stream_name == "events":
# We shouldn't get multiple rows per token for events stream, so
# we don't need to optimise this for multiple rows.
for row in rows:
event = yield self.store.get_event(row.event_id)
extra_users = ()
if event.type == EventTypes.Member:
extra_users = (event.state_key,)
max_token = self.store.get_room_max_stream_ordering()
self.notifier.on_new_room_event(
event, token, max_token, extra_users
)
elif stream_name == "push_rules":
self.notifier.on_new_event(
"push_rules_key", token, users=[row.user_id for row in rows],
if stream_name == "events":
# We shouldn't get multiple rows per token for events stream, so
# we don't need to optimise this for multiple rows.
for row in rows:
event = yield self.store.get_event(row.event_id)
extra_users = ()
if event.type == EventTypes.Member:
extra_users = (event.state_key,)
max_token = self.store.get_room_max_stream_ordering()
self.notifier.on_new_room_event(
event, token, max_token, extra_users
)
elif stream_name in ("account_data", "tag_account_data",):
elif stream_name == "push_rules":
self.notifier.on_new_event(
"push_rules_key", token, users=[row.user_id for row in rows],
)
elif stream_name in ("account_data", "tag_account_data",):
self.notifier.on_new_event(
"account_data_key", token, users=[row.user_id for row in rows],
)
elif stream_name == "receipts":
self.notifier.on_new_event(
"receipt_key", token, rooms=[row.room_id for row in rows],
)
elif stream_name == "typing":
self.typing_handler.process_replication_rows(token, rows)
self.notifier.on_new_event(
"typing_key", token, rooms=[row.room_id for row in rows],
)
elif stream_name == "to_device":
entities = [row.entity for row in rows if row.entity.startswith("@")]
if entities:
self.notifier.on_new_event(
"account_data_key", token, users=[row.user_id for row in rows],
"to_device_key", token, users=entities,
)
elif stream_name == "receipts":
self.notifier.on_new_event(
"receipt_key", token, rooms=[row.room_id for row in rows],
)
elif stream_name == "typing":
self.typing_handler.process_replication_rows(token, rows)
self.notifier.on_new_event(
"typing_key", token, rooms=[row.room_id for row in rows],
)
elif stream_name == "to_device":
entities = [row.entity for row in rows if row.entity.startswith("@")]
if entities:
self.notifier.on_new_event(
"to_device_key", token, users=entities,
)
elif stream_name == "device_lists":
all_room_ids = set()
for row in rows:
room_ids = yield self.store.get_rooms_for_user(row.user_id)
all_room_ids.update(room_ids)
self.notifier.on_new_event(
"device_list_key", token, rooms=all_room_ids,
)
elif stream_name == "presence":
yield self.presence_handler.process_replication_rows(token, rows)
elif stream_name == "receipts":
self.notifier.on_new_event(
"groups_key", token, users=[row.user_id for row in rows],
)
except Exception:
logger.exception("Error processing replication")
elif stream_name == "device_lists":
all_room_ids = set()
for row in rows:
room_ids = yield self.store.get_rooms_for_user(row.user_id)
all_room_ids.update(room_ids)
self.notifier.on_new_event(
"device_list_key", token, rooms=all_room_ids,
)
elif stream_name == "presence":
yield self.presence_handler.process_replication_rows(token, rows)
elif stream_name == "receipts":
self.notifier.on_new_event(
"groups_key", token, users=[row.user_id for row in rows],
)
def start(config_options):

View File

@@ -16,18 +16,15 @@
import argparse
import collections
import errno
import glob
import os
import os.path
import signal
import subprocess
import sys
import time
from six import iteritems
import yaml
import errno
import time
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
@@ -111,7 +108,7 @@ def stop(pidfile, app):
Worker = collections.namedtuple("Worker", [
"app", "configfile", "pidfile", "cache_factor"
"app", "configfile", "pidfile", "cache_factor", "cache_factors",
])
@@ -175,7 +172,7 @@ def main():
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
cache_factors = config.get("synctl_cache_factors", {})
for cache_name, factor in iteritems(cache_factors):
for cache_name, factor in cache_factors.iteritems():
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
worker_configfiles = []
@@ -218,6 +215,10 @@ def main():
or pidfile
)
worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor
worker_cache_factors = (
worker_config.get("synctl_cache_factors")
or cache_factors
)
daemonize = worker_config.get("daemonize") or config.get("daemonize")
assert daemonize, "Main process must have daemonize set to true"
@@ -233,8 +234,10 @@ def main():
assert worker_daemonize, "In config %r: expected '%s' to be True" % (
worker_configfile, "worker_daemonize")
worker_cache_factor = worker_config.get("synctl_cache_factor")
worker_cache_factors = worker_config.get("synctl_cache_factors", {})
workers.append(Worker(
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
worker_cache_factors,
))
action = options.action
@@ -269,15 +272,19 @@ def main():
start(configfile)
for worker in workers:
env = os.environ.copy()
if worker.cache_factor:
os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
for cache_name, factor in worker.cache_factors.iteritems():
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
start_worker(worker.app, configfile, worker.configfile)
if cache_factor:
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
else:
os.environ.pop("SYNAPSE_CACHE_FACTOR", None)
# Reset env back to the original
os.environ.clear()
os.environ.update(env)
if __name__ == "__main__":

View File

@@ -17,9 +17,6 @@
import logging
import sys
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
@@ -29,7 +26,6 @@ from synapse.config.logger import setup_logging
from synapse.crypto import context_factory
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
@@ -43,9 +39,11 @@ from synapse.storage.engines import create_engine
from synapse.storage.user_directory import UserDirectoryStore
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.user_dir")
@@ -107,7 +105,7 @@ class UserDirectoryServer(HomeServer):
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
resources[METRICS_PREFIX] = MetricsResource(self)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
user_directory.register_servlets(self, resource)
@@ -128,7 +126,6 @@ class UserDirectoryServer(HomeServer):
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
@@ -148,13 +145,6 @@ class UserDirectoryServer(HomeServer):
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
@@ -174,14 +164,7 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler):
stream_name, token, rows
)
if stream_name == "current_state_deltas":
run_in_background(self._notify_directory)
@defer.inlineCallbacks
def _notify_directory(self):
try:
yield self.user_directory.notify_new_event()
except Exception:
logger.exception("Error notifiying user directory of state update")
preserve_fn(self.user_directory.notify_new_event)()
def start(config_options):

View File

@@ -12,17 +12,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.constants import EventTypes
from synapse.util.caches.descriptors import cachedInlineCallbacks
from synapse.types import GroupID, get_domain_from_id
from twisted.internet import defer
import logging
import re
from six import string_types
from twisted.internet import defer
from synapse.api.constants import EventTypes
from synapse.types import GroupID, get_domain_from_id
from synapse.util.caches.descriptors import cachedInlineCallbacks
logger = logging.getLogger(__name__)
@@ -85,8 +85,7 @@ class ApplicationService(object):
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
def __init__(self, token, hostname, url=None, namespaces=None, hs_token=None,
sender=None, id=None, protocols=None, rate_limited=True,
ip_range_whitelist=None):
sender=None, id=None, protocols=None, rate_limited=True):
self.token = token
self.url = url
self.hs_token = hs_token
@@ -94,7 +93,6 @@ class ApplicationService(object):
self.server_name = hostname
self.namespaces = self._check_namespaces(namespaces)
self.id = id
self.ip_range_whitelist = ip_range_whitelist
if "|" in self.id:
raise Exception("application service ID cannot contain '|' character")
@@ -294,8 +292,4 @@ class ApplicationService(object):
return self.rate_limited
def __str__(self):
# copy dictionary and redact token fields so they don't get logged
dict_copy = self.__dict__.copy()
dict_copy["token"] = "<redacted>"
dict_copy["hs_token"] = "<redacted>"
return "ApplicationService: %s" % (dict_copy,)
return "ApplicationService: %s" % (self.__dict__,)

View File

@@ -12,39 +12,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib
from prometheus_client import Counter
from twisted.internet import defer
from synapse.api.constants import ThirdPartyEntityKind
from synapse.api.errors import CodeMessageException
from synapse.events.utils import serialize_event
from synapse.http.client import SimpleHttpClient
from synapse.types import ThirdPartyInstanceID
from synapse.events.utils import serialize_event
from synapse.util.logcontext import preserve_fn, make_deferred_yieldable
from synapse.util.caches.response_cache import ResponseCache
from synapse.types import ThirdPartyInstanceID
import logging
import urllib
logger = logging.getLogger(__name__)
sent_transactions_counter = Counter(
"synapse_appservice_api_sent_transactions",
"Number of /transactions/ requests sent",
["service"]
)
failed_transactions_counter = Counter(
"synapse_appservice_api_failed_transactions",
"Number of /transactions/ requests that failed to send",
["service"]
)
sent_events_counter = Counter(
"synapse_appservice_api_sent_events",
"Number of events sent to the AS",
["service"]
)
HOUR_IN_MS = 60 * 60 * 1000
@@ -212,7 +194,12 @@ class ApplicationServiceApi(SimpleHttpClient):
defer.returnValue(None)
key = (service.id, protocol)
return self.protocol_meta_cache.wrap(key, _get)
result = self.protocol_meta_cache.get(key)
if not result:
result = self.protocol_meta_cache.set(
key, preserve_fn(_get)()
)
return make_deferred_yieldable(result)
@defer.inlineCallbacks
def push_bulk(self, service, events, txn_id=None):
@@ -238,15 +225,12 @@ class ApplicationServiceApi(SimpleHttpClient):
args={
"access_token": service.hs_token
})
sent_transactions_counter.labels(service.id).inc()
sent_events_counter.labels(service.id).inc(len(events))
defer.returnValue(True)
return
except CodeMessageException as e:
logger.warning("push_bulk to %s received %s", uri, e.code)
except Exception as ex:
logger.warning("push_bulk to %s threw exception %s", uri, ex)
failed_transactions_counter.labels(service.id).inc()
defer.returnValue(False)
def _serialize(self, events):

View File

@@ -48,14 +48,14 @@ UP & quit +---------- YES SUCCESS
This is all tied together by the AppServiceScheduler which DIs the required
components.
"""
import logging
from twisted.internet import defer
from synapse.appservice import ApplicationServiceState
from synapse.util.logcontext import run_in_background
from synapse.util.logcontext import preserve_fn
from synapse.util.metrics import Measure
import logging
logger = logging.getLogger(__name__)
@@ -106,7 +106,7 @@ class _ServiceQueuer(object):
def enqueue(self, service, event):
# if this service isn't being sent something
self.queued_events.setdefault(service.id, []).append(event)
run_in_background(self._send_request, service)
preserve_fn(self._send_request)(service)
@defer.inlineCallbacks
def _send_request(self, service):
@@ -152,10 +152,10 @@ class _TransactionController(object):
if sent:
yield txn.complete(self.store)
else:
run_in_background(self._start_recoverer, service)
except Exception:
logger.exception("Error creating appservice transaction")
run_in_background(self._start_recoverer, service)
preserve_fn(self._start_recoverer)(service)
except Exception as e:
logger.exception(e)
preserve_fn(self._start_recoverer)(service)
@defer.inlineCallbacks
def on_recovered(self, recoverer):
@@ -176,20 +176,17 @@ class _TransactionController(object):
@defer.inlineCallbacks
def _start_recoverer(self, service):
try:
yield self.store.set_appservice_state(
service,
ApplicationServiceState.DOWN
)
logger.info(
"Application service falling behind. Starting recoverer. AS ID %s",
service.id
)
recoverer = self.recoverer_fn(service, self.on_recovered)
self.add_recoverers([recoverer])
recoverer.recover()
except Exception:
logger.exception("Error starting AS recoverer")
yield self.store.set_appservice_state(
service,
ApplicationServiceState.DOWN
)
logger.info(
"Application service falling behind. Starting recoverer. AS ID %s",
service.id
)
recoverer = self.recoverer_fn(service, self.on_recovered)
self.add_recoverers([recoverer])
recoverer.recover()
@defer.inlineCallbacks
def _is_service_up(self, service):

View File

@@ -12,9 +12,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import ConfigError
# export ConfigError if somebody does import *
# this is largely a fudge to stop PEP8 moaning about the import
__all__ = ["ConfigError"]

View File

@@ -16,12 +16,11 @@
import argparse
import errno
import os
import yaml
from textwrap import dedent
from six import integer_types
import yaml
class ConfigError(Exception):
pass
@@ -282,15 +281,15 @@ class Config(object):
)
if not cls.path_exists(config_dir_path):
os.makedirs(config_dir_path)
with open(config_path, "w") as config_file:
config_str, config = obj.generate_config(
with open(config_path, "wb") as config_file:
config_bytes, config = obj.generate_config(
config_dir_path=config_dir_path,
server_name=server_name,
report_stats=(config_args.report_stats == "yes"),
is_generating_file=True
)
obj.invoke_all("generate_files", config)
config_file.write(config_str)
config_file.write(config_bytes)
print((
"A config file has been generated in %r for server name"
" %r with corresponding SSL keys and self-signed"

View File

@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.constants import EventTypes
from ._base import Config
from synapse.api.constants import EventTypes
class ApiConfig(Config):

View File

@@ -12,18 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from six import string_types
from six.moves.urllib import parse as urlparse
import yaml
from netaddr import IPSet
from ._base import Config, ConfigError
from synapse.appservice import ApplicationService
from synapse.types import UserID
from ._base import Config, ConfigError
import urllib
import yaml
import logging
from six import string_types
logger = logging.getLogger(__name__)
@@ -107,7 +105,7 @@ def _load_appservice(hostname, as_info, config_filename):
)
localpart = as_info["sender_localpart"]
if urlparse.quote(localpart) != localpart:
if urllib.quote(localpart) != localpart:
raise ValueError(
"sender_localpart needs characters which are not URL encoded."
)
@@ -156,13 +154,6 @@ def _load_appservice(hostname, as_info, config_filename):
" will not receive events or queries.",
config_filename,
)
ip_range_whitelist = None
if as_info.get('ip_range_whitelist'):
ip_range_whitelist = IPSet(
as_info.get('ip_range_whitelist')
)
return ApplicationService(
token=as_info["as_token"],
hostname=hostname,
@@ -172,6 +163,5 @@ def _load_appservice(hostname, as_info, config_filename):
sender=user_id,
id=as_info["id"],
protocols=protocols,
rate_limited=rate_limited,
ip_range_whitelist=ip_range_whitelist,
rate_limited=rate_limited
)

View File

@@ -1,88 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
DEFAULT_CONFIG = """\
# User Consent configuration
#
# for detailed instructions, see
# https://github.com/matrix-org/synapse/blob/master/docs/consent_tracking.md
#
# Parts of this section are required if enabling the 'consent' resource under
# 'listeners', in particular 'template_dir' and 'version'.
#
# 'template_dir' gives the location of the templates for the HTML forms.
# This directory should contain one subdirectory per language (eg, 'en', 'fr'),
# and each language directory should contain the policy document (named as
# '<version>.html') and a success page (success.html).
#
# 'version' specifies the 'current' version of the policy document. It defines
# the version to be served by the consent resource if there is no 'v'
# parameter.
#
# 'server_notice_content', if enabled, will send a user a "Server Notice"
# asking them to consent to the privacy policy. The 'server_notices' section
# must also be configured for this to work. Notices will *not* be sent to
# guest users unless 'send_server_notice_to_guests' is set to true.
#
# 'block_events_error', if set, will block any attempts to send events
# until the user consents to the privacy policy. The value of the setting is
# used as the text of the error.
#
# user_consent:
# template_dir: res/templates/privacy
# version: 1.0
# server_notice_content:
# msgtype: m.text
# body: >-
# To continue using this homeserver you must review and agree to the
# terms and conditions at %(consent_uri)s
# send_server_notice_to_guests: True
# block_events_error: >-
# To continue using this homeserver you must review and agree to the
# terms and conditions at %(consent_uri)s
#
"""
class ConsentConfig(Config):
def __init__(self):
super(ConsentConfig, self).__init__()
self.user_consent_version = None
self.user_consent_template_dir = None
self.user_consent_server_notice_content = None
self.user_consent_server_notice_to_guests = False
self.block_events_without_consent_error = None
def read_config(self, config):
consent_config = config.get("user_consent")
if consent_config is None:
return
self.user_consent_version = str(consent_config["version"])
self.user_consent_template_dir = consent_config["template_dir"]
self.user_consent_server_notice_content = consent_config.get(
"server_notice_content",
)
self.block_events_without_consent_error = consent_config.get(
"block_events_error",
)
self.user_consent_server_notice_to_guests = bool(consent_config.get(
"send_server_notice_to_guests", False,
))
def default_config(self, **kwargs):
return DEFAULT_CONFIG

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,32 +12,31 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tls import TlsConfig
from .server import ServerConfig
from .logger import LoggingConfig
from .database import DatabaseConfig
from .ratelimiting import RatelimitConfig
from .repository import ContentRepositoryConfig
from .captcha import CaptchaConfig
from .voip import VoipConfig
from .registration import RegistrationConfig
from .metrics import MetricsConfig
from .api import ApiConfig
from .appservice import AppServiceConfig
from .captcha import CaptchaConfig
from .cas import CasConfig
from .consent_config import ConsentConfig
from .database import DatabaseConfig
from .emailconfig import EmailConfig
from .groups import GroupsConfig
from .jwt import JWTConfig
from .key import KeyConfig
from .logger import LoggingConfig
from .metrics import MetricsConfig
from .password import PasswordConfig
from .password_auth_providers import PasswordAuthProviderConfig
from .push import PushConfig
from .ratelimiting import RatelimitConfig
from .registration import RegistrationConfig
from .repository import ContentRepositoryConfig
from .saml2 import SAML2Config
from .server import ServerConfig
from .server_notices_config import ServerNoticesConfig
from .spam_checker import SpamCheckerConfig
from .tls import TlsConfig
from .user_directory import UserDirectoryConfig
from .voip import VoipConfig
from .cas import CasConfig
from .password import PasswordConfig
from .jwt import JWTConfig
from .password_auth_providers import PasswordAuthProviderConfig
from .emailconfig import EmailConfig
from .workers import WorkerConfig
from .push import PushConfig
from .spam_checker import SpamCheckerConfig
from .groups import GroupsConfig
from .user_directory import UserDirectoryConfig
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
@@ -47,15 +45,12 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
JWTConfig, PasswordConfig, EmailConfig,
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
ConsentConfig,
ServerNoticesConfig,
):
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,):
pass
if __name__ == '__main__':
import sys
sys.stdout.write(
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2], True)[0]
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2])[0]
)

View File

@@ -15,6 +15,7 @@
from ._base import Config, ConfigError
MISSING_JWT = (
"""Missing jwt library. This is required for jwt login.

View File

@@ -13,24 +13,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import os
from ._base import Config, ConfigError
from synapse.util.stringutils import random_string
from signedjson.key import (
NACL_ED25519,
decode_signing_key_base64,
decode_verify_key_bytes,
generate_signing_key,
is_signing_algorithm_supported,
read_signing_keys,
write_signing_keys,
generate_signing_key, is_signing_algorithm_supported,
decode_signing_key_base64, decode_verify_key_bytes,
read_signing_keys, write_signing_keys, NACL_ED25519
)
from unpaddedbase64 import decode_base64
from synapse.util.stringutils import random_string_with_symbols
from synapse.util.stringutils import random_string, random_string_with_symbols
import os
import hashlib
import logging
from ._base import Config, ConfigError
logger = logging.getLogger(__name__)
@@ -62,20 +59,14 @@ class KeyConfig(Config):
self.expire_access_token = config.get("expire_access_token", False)
# a secret which is used to calculate HMACs for form values, to stop
# falsification of values
self.form_secret = config.get("form_secret", None)
def default_config(self, config_dir_path, server_name, is_generating_file=False,
**kwargs):
base_key_name = os.path.join(config_dir_path, server_name)
if is_generating_file:
macaroon_secret_key = random_string_with_symbols(50)
form_secret = '"%s"' % random_string_with_symbols(50)
else:
macaroon_secret_key = None
form_secret = 'null'
return """\
macaroon_secret_key: "%(macaroon_secret_key)s"
@@ -83,10 +74,6 @@ class KeyConfig(Config):
# Used to enable access token expiration.
expire_access_token: False
# a secret which is used to calculate HMACs for form values, to stop
# falsification of values
form_secret: %(form_secret)s
## Signing Keys ##
# Path to the signing key to sign messages with

View File

@@ -12,22 +12,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import logging.config
import os
import signal
import sys
from string import Template
import yaml
from twisted.logger import STDLibLogObserver, globalLogBeginner
import synapse
from synapse.util.logcontext import LoggingContextFilter
from synapse.util.versionstring import get_version_string
from ._base import Config
from synapse.util.logcontext import LoggingContextFilter
from twisted.logger import globalLogBeginner, STDLibLogObserver
import logging
import logging.config
import yaml
from string import Template
import os
import signal
DEFAULT_LOG_CONFIG = Template("""
version: 1
@@ -122,7 +117,7 @@ class LoggingConfig(Config):
log_config = config.get("log_config")
if log_config and not os.path.exists(log_config):
log_file = self.abspath("homeserver.log")
with open(log_config, "w") as log_config_file:
with open(log_config, "wb") as log_config_file:
log_config_file.write(
DEFAULT_LOG_CONFIG.substitute(log_file=log_file)
)
@@ -207,15 +202,6 @@ def setup_logging(config, use_worker_options=False):
if getattr(signal, "SIGHUP"):
signal.signal(signal.SIGHUP, sighup)
# make sure that the first thing we log is a thing we can grep backwards
# for
logging.warn("***** STARTING SERVER *****")
logging.warn(
"Server %s version %s",
sys.argv[0], get_version_string(synapse),
)
logging.info("Server hostname: %s", config.server_name)
# It's critical to point twisted's internal logging somewhere, otherwise it
# stacks up and leaks kup to 64K object;
# see: https://twistedmatrix.com/trac/ticket/8164

View File

@@ -13,10 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.util.module_loader import load_module
from ._base import Config
from synapse.util.module_loader import load_module
LDAP_PROVIDER = 'ldap_auth_provider.LdapAuthProvider'

View File

@@ -13,11 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.util import strtobool
from ._base import Config
from synapse.util.stringutils import random_string_with_symbols
from ._base import Config
from distutils.util import strtobool
class RegistrationConfig(Config):

View File

@@ -13,11 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config, ConfigError
from collections import namedtuple
from synapse.util.module_loader import load_module
from ._base import Config, ConfigError
MISSING_NETADDR = (
"Missing netaddr library. This is required for URL preview API."
@@ -250,9 +250,6 @@ class ContentRepositoryConfig(Config):
# - '192.168.0.0/16'
# - '100.64.0.0/10'
# - '169.254.0.0/16'
# - '::1/128'
# - 'fe80::/64'
# - 'fc00::/7'
#
# List of IP address CIDR ranges that the URL preview spider is allowed
# to access even if they are specified in url_preview_ip_range_blacklist.

View File

@@ -14,25 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.http.endpoint import parse_and_validate_server_name
from ._base import Config, ConfigError
logger = logging.Logger(__name__)
class ServerConfig(Config):
def read_config(self, config):
self.server_name = config["server_name"]
try:
parse_and_validate_server_name(self.server_name)
except ValueError as e:
raise ConfigError(str(e))
self.pid_file = self.abspath(config.get("pid_file"))
self.web_client = config["web_client"]
self.web_client_location = config.get("web_client_location", None)
@@ -67,14 +55,6 @@ class ServerConfig(Config):
"block_non_admin_invites", False,
)
# Options to control access by tracking MAU
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
if self.limit_usage_by_mau:
self.max_mau_value = config.get(
"max_mau_value", 0,
)
else:
self.max_mau_value = 0
# FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist = None
federation_domain_whitelist = config.get(
@@ -158,12 +138,6 @@ class ServerConfig(Config):
metrics_port = config.get("metrics_port")
if metrics_port:
logger.warn(
("The metrics_port configuration option is deprecated in Synapse 0.31 "
"in favour of a listener. Please see "
"http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst"
" on how to configure the new listener."))
self.listeners.append({
"port": metrics_port,
"bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
@@ -178,8 +152,8 @@ class ServerConfig(Config):
})
def default_config(self, server_name, **kwargs):
_, bind_port = parse_and_validate_server_name(server_name)
if bind_port is not None:
if ":" in server_name:
bind_port = int(server_name.split(":")[1])
unsecure_port = bind_port - 400
else:
bind_port = 8448
@@ -217,8 +191,6 @@ class ServerConfig(Config):
# different cores. See
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
#
# This setting requires the affinity package to be installed!
#
# cpu_affinity: 0xFFFFFFFF
# Whether to serve a web client from the HTTP/HTTPS root resource.

View File

@@ -1,87 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.types import UserID
from ._base import Config
DEFAULT_CONFIG = """\
# Server Notices room configuration
#
# Uncomment this section to enable a room which can be used to send notices
# from the server to users. It is a special room which cannot be left; notices
# come from a special "notices" user id.
#
# If you uncomment this section, you *must* define the system_mxid_localpart
# setting, which defines the id of the user which will be used to send the
# notices.
#
# It's also possible to override the room name, the display name of the
# "notices" user, and the avatar for the user.
#
# server_notices:
# system_mxid_localpart: notices
# system_mxid_display_name: "Server Notices"
# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
# room_name: "Server Notices"
"""
class ServerNoticesConfig(Config):
"""Configuration for the server notices room.
Attributes:
server_notices_mxid (str|None):
The MXID to use for server notices.
None if server notices are not enabled.
server_notices_mxid_display_name (str|None):
The display name to use for the server notices user.
None if server notices are not enabled.
server_notices_mxid_avatar_url (str|None):
The display name to use for the server notices user.
None if server notices are not enabled.
server_notices_room_name (str|None):
The name to use for the server notices room.
None if server notices are not enabled.
"""
def __init__(self):
super(ServerNoticesConfig, self).__init__()
self.server_notices_mxid = None
self.server_notices_mxid_display_name = None
self.server_notices_mxid_avatar_url = None
self.server_notices_room_name = None
def read_config(self, config):
c = config.get("server_notices")
if c is None:
return
mxid_localpart = c['system_mxid_localpart']
self.server_notices_mxid = UserID(
mxid_localpart, self.server_name,
).to_string()
self.server_notices_mxid_display_name = c.get(
'system_mxid_display_name', None,
)
self.server_notices_mxid_avatar_url = c.get(
'system_mxid_avatar_url', None,
)
# todo: i18n
self.server_notices_room_name = c.get('room_name', "Server Notices")
def default_config(self, **kwargs):
return DEFAULT_CONFIG

View File

@@ -13,15 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from hashlib import sha256
from unpaddedbase64 import encode_base64
from ._base import Config
from OpenSSL import crypto
import subprocess
import os
from ._base import Config
from hashlib import sha256
from unpaddedbase64 import encode_base64
GENERATE_DH_PARAMS = False
@@ -134,7 +133,7 @@ class TlsConfig(Config):
tls_dh_params_path = config["tls_dh_params_path"]
if not self.path_exists(tls_private_key_path):
with open(tls_private_key_path, "wb") as private_key_file:
with open(tls_private_key_path, "w") as private_key_file:
tls_private_key = crypto.PKey()
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
private_key_pem = crypto.dump_privatekey(
@@ -149,7 +148,7 @@ class TlsConfig(Config):
)
if not self.path_exists(tls_certificate_path):
with open(tls_certificate_path, "wb") as certificate_file:
with open(tls_certificate_path, "w") as certificate_file:
cert = crypto.X509()
subject = cert.get_subject()
subject.CN = config["server_name"]

View File

@@ -30,10 +30,10 @@ class VoipConfig(Config):
## Turn ##
# The public URIs of the TURN server to give to clients
#turn_uris: []
turn_uris: []
# The shared secret used to compute passwords for the TURN server
#turn_shared_secret: "YOUR_SHARED_SECRET"
turn_shared_secret: "YOUR_SHARED_SECRET"
# The Username and password if the TURN server needs them and
# does not use a token

View File

@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from OpenSSL import SSL, crypto
from twisted.internet import ssl
from twisted.internet._sslverify import _defaultCurveName
from OpenSSL import SSL
from twisted.internet._sslverify import _OpenSSLECCurve, _defaultCurveName
import logging
logger = logging.getLogger(__name__)
@@ -32,9 +32,8 @@ class ServerContextFactory(ssl.ContextFactory):
@staticmethod
def configure_context(context, config):
try:
_ecCurve = crypto.get_elliptic_curve(_defaultCurveName)
context.set_tmp_ecdh(_ecCurve)
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
_ecCurve.addECKeyToContext(context)
except Exception:
logger.exception("Failed to enable elliptic curve for TLS")
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)

View File

@@ -15,15 +15,15 @@
# limitations under the License.
import hashlib
import logging
from synapse.api.errors import SynapseError, Codes
from synapse.events.utils import prune_event
from canonicaljson import encode_canonical_json
from unpaddedbase64 import encode_base64, decode_base64
from signedjson.sign import sign_json
from unpaddedbase64 import decode_base64, encode_base64
from synapse.api.errors import Codes, SynapseError
from synapse.events.utils import prune_event
import hashlib
import logging
logger = logging.getLogger(__name__)

View File

@@ -13,16 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.util import logcontext
from twisted.web.http import HTTPClient
from twisted.internet.protocol import Factory
from twisted.internet import defer, reactor
from synapse.http.endpoint import matrix_federation_endpoint
import simplejson as json
import logging
from canonicaljson import json
from twisted.internet import defer, reactor
from twisted.internet.protocol import Factory
from twisted.web.http import HTTPClient
from synapse.http.endpoint import matrix_federation_endpoint
from synapse.util import logcontext
logger = logging.getLogger(__name__)

View File

@@ -14,37 +14,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import urllib
from collections import namedtuple
from signedjson.key import (
decode_verify_key_bytes,
encode_verify_key_base64,
is_signing_algorithm_supported,
from synapse.crypto.keyclient import fetch_server_key
from synapse.api.errors import SynapseError, Codes
from synapse.util import unwrapFirstError, logcontext
from synapse.util.logcontext import (
PreserveLoggingContext,
preserve_fn
)
from synapse.util.metrics import Measure
from twisted.internet import defer
from signedjson.sign import (
SignatureVerifyException,
encode_canonical_json,
sign_json,
signature_ids,
verify_signed_json,
verify_signed_json, signature_ids, sign_json, encode_canonical_json
)
from signedjson.key import (
is_signing_algorithm_supported, decode_verify_key_bytes
)
from unpaddedbase64 import decode_base64, encode_base64
from OpenSSL import crypto
from twisted.internet import defer
from synapse.api.errors import Codes, SynapseError
from synapse.crypto.keyclient import fetch_server_key
from synapse.util import logcontext, unwrapFirstError
from synapse.util.logcontext import (
PreserveLoggingContext,
preserve_fn,
run_in_background,
)
from synapse.util.metrics import Measure
from collections import namedtuple
import urllib
import hashlib
import logging
logger = logging.getLogger(__name__)
@@ -60,7 +55,7 @@ Attributes:
key_ids(set(str)): The set of key_ids to that could be used to verify the
JSON object
json_object(dict): The JSON object to verify.
deferred(Deferred[str, str, nacl.signing.VerifyKey]):
deferred(twisted.internet.defer.Deferred):
A deferred (server_name, key_id, verify_key) tuple that resolves when
a verify key has been fetched. The deferreds' callbacks are run with no
logcontext.
@@ -132,7 +127,7 @@ class Keyring(object):
verify_requests.append(verify_request)
run_in_background(self._start_key_lookups, verify_requests)
preserve_fn(self._start_key_lookups)(verify_requests)
# Pass those keys to handle_key_deferred so that the json object
# signatures can be verified
@@ -151,56 +146,53 @@ class Keyring(object):
verify_requests (List[VerifyKeyRequest]):
"""
try:
# create a deferred for each server we're going to look up the keys
# for; we'll resolve them once we have completed our lookups.
# These will be passed into wait_for_previous_lookups to block
# any other lookups until we have finished.
# The deferreds are called with no logcontext.
server_to_deferred = {
rq.server_name: defer.Deferred()
for rq in verify_requests
}
# create a deferred for each server we're going to look up the keys
# for; we'll resolve them once we have completed our lookups.
# These will be passed into wait_for_previous_lookups to block
# any other lookups until we have finished.
# The deferreds are called with no logcontext.
server_to_deferred = {
rq.server_name: defer.Deferred()
for rq in verify_requests
}
# We want to wait for any previous lookups to complete before
# proceeding.
yield self.wait_for_previous_lookups(
[rq.server_name for rq in verify_requests],
server_to_deferred,
# We want to wait for any previous lookups to complete before
# proceeding.
yield self.wait_for_previous_lookups(
[rq.server_name for rq in verify_requests],
server_to_deferred,
)
# Actually start fetching keys.
self._get_server_verify_keys(verify_requests)
# When we've finished fetching all the keys for a given server_name,
# resolve the deferred passed to `wait_for_previous_lookups` so that
# any lookups waiting will proceed.
#
# map from server name to a set of request ids
server_to_request_ids = {}
for verify_request in verify_requests:
server_name = verify_request.server_name
request_id = id(verify_request)
server_to_request_ids.setdefault(server_name, set()).add(request_id)
def remove_deferreds(res, verify_request):
server_name = verify_request.server_name
request_id = id(verify_request)
server_to_request_ids[server_name].discard(request_id)
if not server_to_request_ids[server_name]:
d = server_to_deferred.pop(server_name, None)
if d:
d.callback(None)
return res
for verify_request in verify_requests:
verify_request.deferred.addBoth(
remove_deferreds, verify_request,
)
# Actually start fetching keys.
self._get_server_verify_keys(verify_requests)
# When we've finished fetching all the keys for a given server_name,
# resolve the deferred passed to `wait_for_previous_lookups` so that
# any lookups waiting will proceed.
#
# map from server name to a set of request ids
server_to_request_ids = {}
for verify_request in verify_requests:
server_name = verify_request.server_name
request_id = id(verify_request)
server_to_request_ids.setdefault(server_name, set()).add(request_id)
def remove_deferreds(res, verify_request):
server_name = verify_request.server_name
request_id = id(verify_request)
server_to_request_ids[server_name].discard(request_id)
if not server_to_request_ids[server_name]:
d = server_to_deferred.pop(server_name, None)
if d:
d.callback(None)
return res
for verify_request in verify_requests:
verify_request.deferred.addBoth(
remove_deferreds, verify_request,
)
except Exception:
logger.exception("Error starting key lookups")
@defer.inlineCallbacks
def wait_for_previous_lookups(self, server_names, server_to_deferred):
"""Waits for any previous key lookups for the given servers to finish.
@@ -321,7 +313,7 @@ class Keyring(object):
if not verify_request.deferred.called:
verify_request.deferred.errback(err)
run_in_background(do_iterations).addErrback(on_err)
preserve_fn(do_iterations)().addErrback(on_err)
@defer.inlineCallbacks
def get_keys_from_store(self, server_name_and_key_ids):
@@ -337,9 +329,8 @@ class Keyring(object):
"""
res = yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
run_in_background(
self.store.get_server_verify_keys,
server_name, key_ids,
preserve_fn(self.store.get_server_verify_keys)(
server_name, key_ids
).addCallback(lambda ks, server: (server, ks), server_name)
for server_name, key_ids in server_name_and_key_ids
],
@@ -361,13 +352,13 @@ class Keyring(object):
logger.exception(
"Unable to get key from %r: %s %s",
perspective_name,
type(e).__name__, str(e),
type(e).__name__, str(e.message),
)
defer.returnValue({})
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
run_in_background(get_key, p_name, p_keys)
preserve_fn(get_key)(p_name, p_keys)
for p_name, p_keys in self.perspective_servers.items()
],
consumeErrors=True,
@@ -393,7 +384,7 @@ class Keyring(object):
logger.info(
"Unable to get key %r for %r directly: %s %s",
key_ids, server_name,
type(e).__name__, str(e),
type(e).__name__, str(e.message),
)
if not keys:
@@ -407,7 +398,7 @@ class Keyring(object):
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
run_in_background(get_key, server_name, key_ids)
preserve_fn(get_key)(server_name, key_ids)
for server_name, key_ids in server_name_and_key_ids
],
consumeErrors=True,
@@ -490,8 +481,7 @@ class Keyring(object):
yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
run_in_background(
self.store_keys,
preserve_fn(self.store_keys)(
server_name=server_name,
from_server=perspective_name,
verify_keys=response_keys,
@@ -549,8 +539,7 @@ class Keyring(object):
yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
run_in_background(
self.store_keys,
preserve_fn(self.store_keys)(
server_name=key_server_name,
from_server=server_name,
verify_keys=verify_keys,
@@ -626,8 +615,7 @@ class Keyring(object):
yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
run_in_background(
self.store.store_server_keys_json,
preserve_fn(self.store.store_server_keys_json)(
server_name=server_name,
key_id=key_id,
from_server=server_name,
@@ -728,8 +716,7 @@ class Keyring(object):
# TODO(markjh): Store whether the keys have expired.
return logcontext.make_deferred_yieldable(defer.gatherResults(
[
run_in_background(
self.store.store_server_verify_key,
preserve_fn(self.store.store_server_verify_key)(
server_name, server_name, key.time_added, key
)
for key_id, key in verify_keys.items()
@@ -740,17 +727,6 @@ class Keyring(object):
@defer.inlineCallbacks
def _handle_key_deferred(verify_request):
"""Waits for the key to become available, and then performs a verification
Args:
verify_request (VerifyKeyRequest):
Returns:
Deferred[None]
Raises:
SynapseError if there was a problem performing the verification
"""
server_name = verify_request.server_name
try:
with PreserveLoggingContext():
@@ -758,7 +734,7 @@ def _handle_key_deferred(verify_request):
except IOError as e:
logger.warn(
"Got IOError when downloading keys for %s: %s %s",
server_name, type(e).__name__, str(e),
server_name, type(e).__name__, str(e.message),
)
raise SynapseError(
502,
@@ -768,7 +744,7 @@ def _handle_key_deferred(verify_request):
except Exception as e:
logger.exception(
"Got Exception when downloading keys for %s: %s %s",
server_name, type(e).__name__, str(e),
server_name, type(e).__name__, str(e.message),
)
raise SynapseError(
401,
@@ -783,17 +759,11 @@ def _handle_key_deferred(verify_request):
))
try:
verify_signed_json(json_object, server_name, verify_key)
except SignatureVerifyException as e:
logger.debug(
"Error verifying signature for %s:%s:%s with key %s: %s",
server_name, verify_key.alg, verify_key.version,
encode_verify_key_base64(verify_key),
str(e),
)
except Exception:
raise SynapseError(
401,
"Invalid signature for server %s with key %s:%s: %s" % (
server_name, verify_key.alg, verify_key.version, str(e),
"Invalid signature for server %s with key %s:%s" % (
server_name, verify_key.alg, verify_key.version
),
Codes.UNAUTHORIZED,
)

View File

@@ -17,11 +17,11 @@ import logging
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from signedjson.sign import verify_signed_json, SignatureVerifyException
from unpaddedbase64 import decode_base64
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.errors import AuthError, EventSizeError, SynapseError
from synapse.api.constants import EventTypes, Membership, JoinRules
from synapse.api.errors import AuthError, SynapseError, EventSizeError
from synapse.types import UserID, get_domain_from_id
logger = logging.getLogger(__name__)
@@ -34,11 +34,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
event: the event being checked.
auth_events (dict: event-key -> event): the existing room state.
Raises:
AuthError if the checks fail
Returns:
if the auth checks pass.
True if the auth checks pass.
"""
if do_size_check:
_check_size_limits(event)
@@ -73,10 +71,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
# Oh, we don't know what the state of the room was, so we
# are trusting that this is allowed (at least for now)
logger.warn("Trusting event: %s", event.event_id)
return
return True
if event.type == EventTypes.Create:
sender_domain = get_domain_from_id(event.sender)
room_id_domain = get_domain_from_id(event.room_id)
if room_id_domain != sender_domain:
raise AuthError(
@@ -84,8 +81,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
"Creation event's room_id domain does not match sender's"
)
# FIXME
logger.debug("Allowing! %s", event)
return
return True
creation_event = auth_events.get((EventTypes.Create, ""), None)
@@ -122,8 +118,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
403,
"Alias event's state_key does not match sender's domain"
)
logger.debug("Allowing! %s", event)
return
return True
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
@@ -132,9 +127,14 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
)
if event.type == EventTypes.Member:
_is_membership_change_allowed(event, auth_events)
logger.debug("Allowing! %s", event)
return
allowed = _is_membership_change_allowed(
event, auth_events
)
if allowed:
logger.debug("Allowing! %s", event)
else:
logger.debug("Denying! %s", event)
return allowed
_check_event_sender_in_room(event, auth_events)
@@ -153,8 +153,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
)
)
else:
logger.debug("Allowing! %s", event)
return
return True
_can_send_event(event, auth_events)
@@ -201,7 +200,7 @@ def _is_membership_change_allowed(event, auth_events):
create = auth_events.get(key)
if create and event.prev_events[0][0] == create.event_id:
if create.content["creator"] == event.state_key:
return
return True
target_user_id = event.state_key
@@ -266,13 +265,13 @@ def _is_membership_change_allowed(event, auth_events):
raise AuthError(
403, "%s is banned from the room" % (target_user_id,)
)
return
return True
if Membership.JOIN != membership:
if (caller_invited
and Membership.LEAVE == membership
and target_user_id == event.user_id):
return
return True
if not caller_in_room: # caller isn't joined
raise AuthError(
@@ -335,6 +334,8 @@ def _is_membership_change_allowed(event, auth_events):
else:
raise AuthError(500, "Unknown membership %s" % membership)
return True
def _check_event_sender_in_room(event, auth_events):
key = (EventTypes.Member, event.user_id, )
@@ -354,46 +355,35 @@ def _check_joined_room(member, user_id, room_id):
))
def get_send_level(etype, state_key, power_levels_event):
"""Get the power level required to send an event of a given type
def get_send_level(etype, state_key, auth_events):
key = (EventTypes.PowerLevels, "", )
send_level_event = auth_events.get(key)
send_level = None
if send_level_event:
send_level = send_level_event.content.get("events", {}).get(
etype
)
if send_level is None:
if state_key is not None:
send_level = send_level_event.content.get(
"state_default", 50
)
else:
send_level = send_level_event.content.get(
"events_default", 0
)
The federation spec [1] refers to this as "Required Power Level".
https://matrix.org/docs/spec/server_server/unstable.html#definitions
Args:
etype (str): type of event
state_key (str|None): state_key of state event, or None if it is not
a state event.
power_levels_event (synapse.events.EventBase|None): power levels event
in force at this point in the room
Returns:
int: power level required to send this event.
"""
if power_levels_event:
power_levels_content = power_levels_event.content
if send_level:
send_level = int(send_level)
else:
power_levels_content = {}
send_level = 0
# see if we have a custom level for this event type
send_level = power_levels_content.get("events", {}).get(etype)
# otherwise, fall back to the state_default/events_default.
if send_level is None:
if state_key is not None:
send_level = power_levels_content.get("state_default", 50)
else:
send_level = power_levels_content.get("events_default", 0)
return int(send_level)
return send_level
def _can_send_event(event, auth_events):
power_levels_event = _get_power_level_event(auth_events)
send_level = get_send_level(
event.type, event.get("state_key"), power_levels_event,
event.type, event.get("state_key", None), auth_events
)
user_level = get_user_power_level(event.user_id, auth_events)
@@ -481,14 +471,14 @@ def _check_power_levels(event, auth_events):
]
old_list = current_state.content.get("users", {})
for user in set(list(old_list) + list(user_list)):
for user in set(old_list.keys() + user_list.keys()):
levels_to_check.append(
(user, "users")
)
old_list = current_state.content.get("events", {})
new_list = event.content.get("events", {})
for ev_id in set(list(old_list) + list(new_list)):
for ev_id in set(old_list.keys() + new_list.keys()):
levels_to_check.append(
(ev_id, "events")
)
@@ -525,11 +515,7 @@ def _check_power_levels(event, auth_events):
"to your own"
)
# Check if the old and new levels are greater than the user level
# (if defined)
old_level_too_big = old_level is not None and old_level > user_level
new_level_too_big = new_level is not None and new_level > user_level
if old_level_too_big or new_level_too_big:
if old_level > user_level or new_level > user_level:
raise AuthError(
403,
"You don't have permission to add ops level greater "
@@ -538,22 +524,13 @@ def _check_power_levels(event, auth_events):
def _get_power_level_event(auth_events):
return auth_events.get((EventTypes.PowerLevels, ""))
key = (EventTypes.PowerLevels, "", )
return auth_events.get(key)
def get_user_power_level(user_id, auth_events):
"""Get a user's power level
Args:
user_id (str): user's id to look up in power_levels
auth_events (dict[(str, str), synapse.events.EventBase]):
state in force at this point in the room (or rather, a subset of
it including at least the create event and power levels event.
Returns:
int: the user's power level in this room.
"""
power_level_event = _get_power_level_event(auth_events)
if power_level_event:
level = power_level_event.content.get("users", {}).get(user_id)
if not level:
@@ -564,11 +541,6 @@ def get_user_power_level(user_id, auth_events):
else:
return int(level)
else:
# if there is no power levels event, the creator gets 100 and everyone
# else gets 0.
# some things which call this don't pass the create event: hack around
# that.
key = (EventTypes.Create, "", )
create_event = auth_events.get(key)
if (create_event is not None and

View File

@@ -13,8 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze
from synapse.util.caches import intern_dict
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
# bugs where we accidentally share e.g. signature dicts. However, converting
@@ -46,26 +47,14 @@ class _EventInternalMetadata(object):
def _event_dict_property(key):
# We want to be able to use hasattr with the event dict properties.
# However, (on python3) hasattr expects AttributeError to be raised. Hence,
# we need to transform the KeyError into an AttributeError
def getter(self):
try:
return self._event_dict[key]
except KeyError:
raise AttributeError(key)
return self._event_dict[key]
def setter(self, v):
try:
self._event_dict[key] = v
except KeyError:
raise AttributeError(key)
self._event_dict[key] = v
def delete(self):
try:
del self._event_dict[key]
except KeyError:
raise AttributeError(key)
del self._event_dict[key]
return property(
getter,
@@ -145,7 +134,7 @@ class EventBase(object):
return field in self._event_dict
def items(self):
return list(self._event_dict.items())
return self._event_dict.items()
class FrozenEvent(EventBase):

View File

@@ -13,12 +13,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from . import EventBase, FrozenEvent, _event_dict_property
from synapse.types import EventID
from synapse.util.stringutils import random_string
from . import EventBase, FrozenEvent, _event_dict_property
import copy
class EventBuilder(EventBase):

View File

@@ -13,18 +13,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from six import iteritems
from frozendict import frozendict
from twisted.internet import defer
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
from frozendict import frozendict
class EventContext(object):
"""
Attributes:
current_state_ids (dict[(str, str), str]):
The current state map including the current event.
(type, state_key) -> event_id
prev_state_ids (dict[(str, str), str]):
The current state map excluding the current event.
(type, state_key) -> event_id
state_group (int|None): state group id, if the state has been stored
as a state group. This is usually only None if e.g. the event is
an outlier.
@@ -41,77 +45,38 @@ class EventContext(object):
prev_state_events (?): XXX: is this ever set to anything other than
the empty list?
_current_state_ids (dict[(str, str), str]|None):
The current state map including the current event. None if outlier
or we haven't fetched the state from DB yet.
(type, state_key) -> event_id
_prev_state_ids (dict[(str, str), str]|None):
The current state map excluding the current event. None if outlier
or we haven't fetched the state from DB yet.
(type, state_key) -> event_id
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
been calculated. None if we haven't started calculating yet
_event_type (str): The type of the event the context is associated with.
Only set when state has not been fetched yet.
_event_state_key (str|None): The state_key of the event the context is
associated with. Only set when state has not been fetched yet.
_prev_state_id (str|None): If the event associated with the context is
a state event, then `_prev_state_id` is the event_id of the state
that was replaced.
Only set when state has not been fetched yet.
"""
__slots__ = [
"current_state_ids",
"prev_state_ids",
"state_group",
"rejected",
"prev_group",
"delta_ids",
"prev_state_events",
"app_service",
"_current_state_ids",
"_prev_state_ids",
"_prev_state_id",
"_event_type",
"_event_state_key",
"_fetching_state_deferred",
]
def __init__(self):
self.prev_state_events = []
self.rejected = False
self.app_service = None
@staticmethod
def with_state(state_group, current_state_ids, prev_state_ids,
prev_group=None, delta_ids=None):
context = EventContext()
# The current state including the current event
context._current_state_ids = current_state_ids
self.current_state_ids = None
# The current state excluding the current event
context._prev_state_ids = prev_state_ids
context.state_group = state_group
self.prev_state_ids = None
self.state_group = None
context._prev_state_id = None
context._event_type = None
context._event_state_key = None
context._fetching_state_deferred = defer.succeed(None)
self.rejected = False
# A previously persisted state group and a delta between that
# and this state.
context.prev_group = prev_group
context.delta_ids = delta_ids
self.prev_group = None
self.delta_ids = None
return context
self.prev_state_events = None
@defer.inlineCallbacks
def serialize(self, event, store):
self.app_service = None
def serialize(self, event):
"""Converts self to a type that can be serialized as JSON, and then
deserialized by `deserialize`
@@ -127,12 +92,11 @@ class EventContext(object):
# the prev_state_ids, so if we're a state event we include the event
# id that we replaced in the state.
if event.is_state():
prev_state_ids = yield self.get_prev_state_ids(store)
prev_state_id = prev_state_ids.get((event.type, event.state_key))
prev_state_id = self.prev_state_ids.get((event.type, event.state_key))
else:
prev_state_id = None
defer.returnValue({
return {
"prev_state_id": prev_state_id,
"event_type": event.type,
"event_state_key": event.state_key if event.is_state() else None,
@@ -142,9 +106,10 @@ class EventContext(object):
"delta_ids": _encode_state_dict(self.delta_ids),
"prev_state_events": self.prev_state_events,
"app_service_id": self.app_service.id if self.app_service else None
})
}
@staticmethod
@defer.inlineCallbacks
def deserialize(store, input):
"""Converts a dict that was produced by `serialize` back into a
EventContext.
@@ -157,115 +122,32 @@ class EventContext(object):
EventContext
"""
context = EventContext()
context.state_group = input["state_group"]
context.rejected = input["rejected"]
context.prev_group = input["prev_group"]
context.delta_ids = _decode_state_dict(input["delta_ids"])
context.prev_state_events = input["prev_state_events"]
# We use the state_group and prev_state_id stuff to pull the
# current_state_ids out of the DB and construct prev_state_ids.
context._prev_state_id = input["prev_state_id"]
context._event_type = input["event_type"]
context._event_state_key = input["event_state_key"]
prev_state_id = input["prev_state_id"]
event_type = input["event_type"]
event_state_key = input["event_state_key"]
context._current_state_ids = None
context._prev_state_ids = None
context._fetching_state_deferred = None
context.state_group = input["state_group"]
context.prev_group = input["prev_group"]
context.delta_ids = _decode_state_dict(input["delta_ids"])
context.rejected = input["rejected"]
context.prev_state_events = input["prev_state_events"]
context.current_state_ids = yield store.get_state_ids_for_group(
context.state_group,
)
if prev_state_id and event_state_key:
context.prev_state_ids = dict(context.current_state_ids)
context.prev_state_ids[(event_type, event_state_key)] = prev_state_id
else:
context.prev_state_ids = context.current_state_ids
app_service_id = input["app_service_id"]
if app_service_id:
context.app_service = store.get_app_service_by_id(app_service_id)
return context
@defer.inlineCallbacks
def get_current_state_ids(self, store):
"""Gets the current state IDs
Returns:
Deferred[dict[(str, str), str]|None]: Returns None if state_group
is None, which happens when the associated event is an outlier.
"""
if not self._fetching_state_deferred:
self._fetching_state_deferred = run_in_background(
self._fill_out_state, store,
)
yield make_deferred_yieldable(self._fetching_state_deferred)
defer.returnValue(self._current_state_ids)
@defer.inlineCallbacks
def get_prev_state_ids(self, store):
"""Gets the prev state IDs
Returns:
Deferred[dict[(str, str), str]|None]: Returns None if state_group
is None, which happens when the associated event is an outlier.
"""
if not self._fetching_state_deferred:
self._fetching_state_deferred = run_in_background(
self._fill_out_state, store,
)
yield make_deferred_yieldable(self._fetching_state_deferred)
defer.returnValue(self._prev_state_ids)
def get_cached_current_state_ids(self):
"""Gets the current state IDs if we have them already cached.
Returns:
dict[(str, str), str]|None: Returns None if we haven't cached the
state or if state_group is None, which happens when the associated
event is an outlier.
"""
return self._current_state_ids
@defer.inlineCallbacks
def _fill_out_state(self, store):
"""Called to populate the _current_state_ids and _prev_state_ids
attributes by loading from the database.
"""
if self.state_group is None:
return
self._current_state_ids = yield store.get_state_ids_for_group(
self.state_group,
)
if self._prev_state_id and self._event_state_key is not None:
self._prev_state_ids = dict(self._current_state_ids)
key = (self._event_type, self._event_state_key)
self._prev_state_ids[key] = self._prev_state_id
else:
self._prev_state_ids = self._current_state_ids
@defer.inlineCallbacks
def update_state(self, state_group, prev_state_ids, current_state_ids,
prev_group, delta_ids):
"""Replace the state in the context
"""
# We need to make sure we wait for any ongoing fetching of state
# to complete so that the updated state doesn't get clobbered
if self._fetching_state_deferred:
yield make_deferred_yieldable(self._fetching_state_deferred)
self.state_group = state_group
self._prev_state_ids = prev_state_ids
self.prev_group = prev_group
self._current_state_ids = current_state_ids
self.delta_ids = delta_ids
# We need to ensure that that we've marked as having fetched the state
self._fetching_state_deferred = defer.succeed(None)
defer.returnValue(context)
def _encode_state_dict(state_dict):
@@ -277,7 +159,7 @@ def _encode_state_dict(state_dict):
return [
(etype, state_key, v)
for (etype, state_key), v in iteritems(state_dict)
for (etype, state_key), v in state_dict.iteritems()
]

View File

@@ -13,15 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from six import string_types
from synapse.api.constants import EventTypes
from . import EventBase
from frozendict import frozendict
from synapse.api.constants import EventTypes
from . import EventBase
import re
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
# (?<!stuff) matches if the current position in the string is not preceded
@@ -280,7 +277,7 @@ def serialize_event(e, time_now_ms, as_client_event=True,
if only_event_fields:
if (not isinstance(only_event_fields, list) or
not all(isinstance(f, string_types) for f in only_event_fields)):
not all(isinstance(f, basestring) for f in only_event_fields)):
raise TypeError("only_event_fields must be a list of strings")
d = only_fields(d, only_event_fields)

View File

@@ -13,11 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from six import string_types
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.types import EventID, RoomID, UserID
from synapse.api.errors import SynapseError
from synapse.api.constants import EventTypes, Membership
class EventValidator(object):
@@ -51,7 +49,7 @@ class EventValidator(object):
strings.append("state_key")
for s in strings:
if not isinstance(getattr(event, s), string_types):
if not isinstance(getattr(event, s), basestring):
raise SynapseError(400, "Not '%s' a string type" % (s,))
if event.type == EventTypes.Member:
@@ -90,5 +88,5 @@ class EventValidator(object):
for s in keys:
if s not in d:
raise SynapseError(400, "'%s' not in content" % (s,))
if not isinstance(d[s], string_types):
if not isinstance(d[s], basestring):
raise SynapseError(400, "Not '%s' a string type" % (s,))

View File

@@ -14,17 +14,13 @@
# limitations under the License.
import logging
import six
from twisted.internet import defer
from synapse.api.constants import MAX_DEPTH
from synapse.api.errors import Codes, SynapseError
from synapse.api.errors import SynapseError
from synapse.crypto.event_signing import check_event_content_hash
from synapse.events import FrozenEvent
from synapse.events.utils import prune_event
from synapse.http.servlet import assert_params_in_dict
from synapse.util import logcontext, unwrapFirstError
from synapse.http.servlet import assert_params_in_request
from synapse.util import unwrapFirstError, logcontext
from twisted.internet import defer
logger = logging.getLogger(__name__)
@@ -194,23 +190,11 @@ def event_from_pdu_json(pdu_json, outlier=False):
FrozenEvent
Raises:
SynapseError: if the pdu is missing required fields or is otherwise
not a valid matrix event
SynapseError: if the pdu is missing required fields
"""
# we could probably enforce a bunch of other fields here (room_id, sender,
# origin, etc etc)
assert_params_in_dict(pdu_json, ('event_id', 'type', 'depth'))
depth = pdu_json['depth']
if not isinstance(depth, six.integer_types):
raise SynapseError(400, "Depth %r not an intger" % (depth, ),
Codes.BAD_JSON)
if depth < 0:
raise SynapseError(400, "Depth too small", Codes.BAD_JSON)
elif depth > MAX_DEPTH:
raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
assert_params_in_request(pdu_json, ('event_id', 'type'))
event = FrozenEvent(
pdu_json
)

View File

@@ -19,42 +19,36 @@ import itertools
import logging
import random
from six.moves import range
from prometheus_client import Counter
from twisted.internet import defer
from synapse.api.constants import Membership
from synapse.api.errors import (
CodeMessageException,
FederationDeniedError,
HttpResponseException,
SynapseError,
CodeMessageException, HttpResponseException, SynapseError, FederationDeniedError
)
from synapse.events import builder
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.federation_base import (
FederationBase,
event_from_pdu_json,
)
import synapse.metrics
from synapse.util import logcontext, unwrapFirstError
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
from synapse.util.logutils import log_function
from synapse.util.retryutils import NotRetryingDestination
logger = logging.getLogger(__name__)
sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["type"])
# synapse.federation.federation_client is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
PDU_RETRY_TIME_MS = 1 * 60 * 1000
class InvalidResponseError(RuntimeError):
"""Helper for _try_destination_list: indicates that the server returned a response
we couldn't parse
"""
pass
class FederationClient(FederationBase):
def __init__(self, hs):
super(FederationClient, self).__init__(hs)
@@ -112,7 +106,7 @@ class FederationClient(FederationBase):
a Deferred which will eventually yield a JSON object from the
response
"""
sent_queries_counter.labels(query_type).inc()
sent_queries_counter.inc(query_type)
return self.transport_layer.make_query(
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail,
@@ -131,7 +125,7 @@ class FederationClient(FederationBase):
a Deferred which will eventually yield a JSON object from the
response
"""
sent_queries_counter.labels("client_device_keys").inc()
sent_queries_counter.inc("client_device_keys")
return self.transport_layer.query_client_keys(
destination, content, timeout
)
@@ -141,7 +135,7 @@ class FederationClient(FederationBase):
"""Query the device keys for a list of user ids hosted on a remote
server.
"""
sent_queries_counter.labels("user_devices").inc()
sent_queries_counter.inc("user_devices")
return self.transport_layer.query_user_devices(
destination, user_id, timeout
)
@@ -158,7 +152,7 @@ class FederationClient(FederationBase):
a Deferred which will eventually yield a JSON object from the
response
"""
sent_queries_counter.labels("client_one_time_keys").inc()
sent_queries_counter.inc("client_one_time_keys")
return self.transport_layer.claim_client_keys(
destination, content, timeout
)
@@ -398,9 +392,9 @@ class FederationClient(FederationBase):
"""
if return_local:
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
signed_events = list(seen_events.values())
signed_events = seen_events.values()
else:
seen_events = yield self.store.have_seen_events(event_ids)
seen_events = yield self.store.have_events(event_ids)
signed_events = []
failed_to_fetch = set()
@@ -419,12 +413,11 @@ class FederationClient(FederationBase):
batch_size = 20
missing_events = list(missing_events)
for i in range(0, len(missing_events), batch_size):
for i in xrange(0, len(missing_events), batch_size):
batch = set(missing_events[i:i + batch_size])
deferreds = [
run_in_background(
self.get_pdu,
preserve_fn(self.get_pdu)(
destinations=random_server_list(),
event_id=e_id,
)
@@ -465,61 +458,6 @@ class FederationClient(FederationBase):
defer.returnValue(signed_auth)
@defer.inlineCallbacks
def _try_destination_list(self, description, destinations, callback):
"""Try an operation on a series of servers, until it succeeds
Args:
description (unicode): description of the operation we're doing, for logging
destinations (Iterable[unicode]): list of server_names to try
callback (callable): Function to run for each server. Passed a single
argument: the server_name to try. May return a deferred.
If the callback raises a CodeMessageException with a 300/400 code,
attempts to perform the operation stop immediately and the exception is
reraised.
Otherwise, if the callback raises an Exception the error is logged and the
next server tried. Normally the stacktrace is logged but this is
suppressed if the exception is an InvalidResponseError.
Returns:
The [Deferred] result of callback, if it succeeds
Raises:
SynapseError if the chosen remote server returns a 300/400 code.
RuntimeError if no servers were reachable.
"""
for destination in destinations:
if destination == self.server_name:
continue
try:
res = yield callback(destination)
defer.returnValue(res)
except InvalidResponseError as e:
logger.warn(
"Failed to %s via %s: %s",
description, destination, e,
)
except HttpResponseException as e:
if not 500 <= e.code < 600:
raise e.to_synapse_error()
else:
logger.warn(
"Failed to %s via %s: %i %s",
description, destination, e.code, e.message,
)
except Exception:
logger.warn(
"Failed to %s via %s",
description, destination, exc_info=1,
)
raise RuntimeError("Failed to %s via any server", description)
def make_membership_event(self, destinations, room_id, user_id, membership,
content={},):
"""
@@ -543,7 +481,7 @@ class FederationClient(FederationBase):
Deferred: resolves to a tuple of (origin (str), event (object))
where origin is the remote homeserver which generated the event.
Fails with a ``SynapseError`` if the chosen remote server
Fails with a ``CodeMessageException`` if the chosen remote server
returns a 300/400 code.
Fails with a ``RuntimeError`` if no servers were reachable.
@@ -554,35 +492,50 @@ class FederationClient(FederationBase):
"make_membership_event called with membership='%s', must be one of %s" %
(membership, ",".join(valid_memberships))
)
for destination in destinations:
if destination == self.server_name:
continue
@defer.inlineCallbacks
def send_request(destination):
ret = yield self.transport_layer.make_membership_event(
destination, room_id, user_id, membership
)
try:
ret = yield self.transport_layer.make_membership_event(
destination, room_id, user_id, membership
)
pdu_dict = ret["event"]
pdu_dict = ret["event"]
logger.debug("Got response to make_%s: %s", membership, pdu_dict)
logger.debug("Got response to make_%s: %s", membership, pdu_dict)
pdu_dict["content"].update(content)
pdu_dict["content"].update(content)
# The protoevent received over the JSON wire may not have all
# the required fields. Lets just gloss over that because
# there's some we never care about
if "prev_state" not in pdu_dict:
pdu_dict["prev_state"] = []
# The protoevent received over the JSON wire may not have all
# the required fields. Lets just gloss over that because
# there's some we never care about
if "prev_state" not in pdu_dict:
pdu_dict["prev_state"] = []
ev = builder.EventBuilder(pdu_dict)
ev = builder.EventBuilder(pdu_dict)
defer.returnValue(
(destination, ev)
)
defer.returnValue(
(destination, ev)
)
break
except CodeMessageException as e:
if not 500 <= e.code < 600:
raise
else:
logger.warn(
"Failed to make_%s via %s: %s",
membership, destination, e.message
)
except Exception as e:
logger.warn(
"Failed to make_%s via %s: %s",
membership, destination, e.message
)
return self._try_destination_list(
"make_" + membership, destinations, send_request,
)
raise RuntimeError("Failed to send to any server.")
@defer.inlineCallbacks
def send_join(self, destinations, pdu):
"""Sends a join event to one of a list of homeservers.
@@ -599,91 +552,103 @@ class FederationClient(FederationBase):
giving the serer the event was sent to, ``state`` (?) and
``auth_chain``.
Fails with a ``SynapseError`` if the chosen remote server
Fails with a ``CodeMessageException`` if the chosen remote server
returns a 300/400 code.
Fails with a ``RuntimeError`` if no servers were reachable.
"""
@defer.inlineCallbacks
def send_request(destination):
time_now = self._clock.time_msec()
_, content = yield self.transport_layer.send_join(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
for destination in destinations:
if destination == self.server_name:
continue
logger.debug("Got content: %s", content)
try:
time_now = self._clock.time_msec()
_, content = yield self.transport_layer.send_join(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
state = [
event_from_pdu_json(p, outlier=True)
for p in content.get("state", [])
]
logger.debug("Got content: %s", content)
auth_chain = [
event_from_pdu_json(p, outlier=True)
for p in content.get("auth_chain", [])
]
state = [
event_from_pdu_json(p, outlier=True)
for p in content.get("state", [])
]
pdus = {
p.event_id: p
for p in itertools.chain(state, auth_chain)
}
auth_chain = [
event_from_pdu_json(p, outlier=True)
for p in content.get("auth_chain", [])
]
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
destination, list(pdus.values()),
outlier=True,
)
pdus = {
p.event_id: p
for p in itertools.chain(state, auth_chain)
}
valid_pdus_map = {
p.event_id: p
for p in valid_pdus
}
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
destination, pdus.values(),
outlier=True,
)
# NB: We *need* to copy to ensure that we don't have multiple
# references being passed on, as that causes... issues.
signed_state = [
copy.copy(valid_pdus_map[p.event_id])
for p in state
if p.event_id in valid_pdus_map
]
valid_pdus_map = {
p.event_id: p
for p in valid_pdus
}
signed_auth = [
valid_pdus_map[p.event_id]
for p in auth_chain
if p.event_id in valid_pdus_map
]
# NB: We *need* to copy to ensure that we don't have multiple
# references being passed on, as that causes... issues.
signed_state = [
copy.copy(valid_pdus_map[p.event_id])
for p in state
if p.event_id in valid_pdus_map
]
# NB: We *need* to copy to ensure that we don't have multiple
# references being passed on, as that causes... issues.
for s in signed_state:
s.internal_metadata = copy.deepcopy(s.internal_metadata)
signed_auth = [
valid_pdus_map[p.event_id]
for p in auth_chain
if p.event_id in valid_pdus_map
]
auth_chain.sort(key=lambda e: e.depth)
# NB: We *need* to copy to ensure that we don't have multiple
# references being passed on, as that causes... issues.
for s in signed_state:
s.internal_metadata = copy.deepcopy(s.internal_metadata)
defer.returnValue({
"state": signed_state,
"auth_chain": signed_auth,
"origin": destination,
})
return self._try_destination_list("send_join", destinations, send_request)
auth_chain.sort(key=lambda e: e.depth)
defer.returnValue({
"state": signed_state,
"auth_chain": signed_auth,
"origin": destination,
})
except CodeMessageException as e:
if not 500 <= e.code < 600:
raise
else:
logger.exception(
"Failed to send_join via %s: %s",
destination, e.message
)
except Exception as e:
logger.exception(
"Failed to send_join via %s: %s",
destination, e.message
)
raise RuntimeError("Failed to send to any server.")
@defer.inlineCallbacks
def send_invite(self, destination, room_id, event_id, pdu):
time_now = self._clock.time_msec()
try:
code, content = yield self.transport_layer.send_invite(
destination=destination,
room_id=room_id,
event_id=event_id,
content=pdu.get_pdu_json(time_now),
)
except HttpResponseException as e:
if e.code == 403:
raise e.to_synapse_error()
raise
code, content = yield self.transport_layer.send_invite(
destination=destination,
room_id=room_id,
event_id=event_id,
content=pdu.get_pdu_json(time_now),
)
pdu_dict = content["event"]
@@ -698,6 +663,7 @@ class FederationClient(FederationBase):
defer.returnValue(pdu)
@defer.inlineCallbacks
def send_leave(self, destinations, pdu):
"""Sends a leave event to one of a list of homeservers.
@@ -714,25 +680,35 @@ class FederationClient(FederationBase):
Return:
Deferred: resolves to None.
Fails with a ``SynapseError`` if the chosen remote server
returns a 300/400 code.
Fails with a ``CodeMessageException`` if the chosen remote server
returns a non-200 code.
Fails with a ``RuntimeError`` if no servers were reachable.
"""
@defer.inlineCallbacks
def send_request(destination):
time_now = self._clock.time_msec()
_, content = yield self.transport_layer.send_leave(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
for destination in destinations:
if destination == self.server_name:
continue
logger.debug("Got content: %s", content)
defer.returnValue(None)
try:
time_now = self._clock.time_msec()
_, content = yield self.transport_layer.send_leave(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
return self._try_destination_list("send_leave", destinations, send_request)
logger.debug("Got content: %s", content)
defer.returnValue(None)
except CodeMessageException:
raise
except Exception as e:
logger.exception(
"Failed to send_leave via %s: %s",
destination, e.message
)
raise RuntimeError("Failed to send to any server.")
def get_public_rooms(self, destination, limit=None, since_token=None,
search_filter=None, include_all_networks=False,

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,28 +13,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import six
from six import iteritems
from canonicaljson import json
from prometheus_client import Counter
import simplejson as json
from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
from synapse.api.constants import EventTypes
from synapse.api.errors import AuthError, FederationError, NotFoundError, SynapseError
from synapse.api.errors import AuthError, FederationError, SynapseError, NotFoundError
from synapse.crypto.event_signing import compute_event_signature
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.federation_base import (
FederationBase,
event_from_pdu_json,
)
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.http.endpoint import parse_server_name
import synapse.metrics
from synapse.types import get_domain_from_id
from synapse.util import async
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
from synapse.util.logutils import log_function
# when processing incoming transactions, we try to handle multiple rooms in
@@ -44,17 +39,17 @@ TRANSACTION_CONCURRENCY_LIMIT = 10
logger = logging.getLogger(__name__)
received_pdus_counter = Counter("synapse_federation_server_received_pdus", "")
# synapse.federation.federation_server is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.server")
received_edus_counter = Counter("synapse_federation_server_received_edus", "")
received_pdus_counter = metrics.register_counter("received_pdus")
received_queries_counter = Counter(
"synapse_federation_server_received_queries", "", ["type"]
)
received_edus_counter = metrics.register_counter("received_edus")
received_queries_counter = metrics.register_counter("received_queries", labels=["type"])
class FederationServer(FederationBase):
def __init__(self, hs):
super(FederationServer, self).__init__(hs)
@@ -76,9 +71,6 @@ class FederationServer(FederationBase):
@log_function
def on_backfill_request(self, origin, room_id, versions, limit):
with (yield self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
pdus = yield self.handler.on_backfill_request(
origin, room_id, versions, limit
)
@@ -137,9 +129,7 @@ class FederationServer(FederationBase):
logger.debug("[%s] Transaction is new", transaction.transaction_id)
received_pdus_counter.inc(len(transaction.pdus))
origin_host, _ = parse_server_name(transaction.origin)
received_pdus_counter.inc_by(len(transaction.pdus))
pdus_by_room = {}
@@ -161,21 +151,9 @@ class FederationServer(FederationBase):
# we can process different rooms in parallel (which is useful if they
# require callouts to other servers to fetch missing events), but
# impose a limit to avoid going too crazy with ram/cpu.
@defer.inlineCallbacks
def process_pdus_for_room(room_id):
logger.debug("Processing PDUs for %s", room_id)
try:
yield self.check_server_matches_acl(origin_host, room_id)
except AuthError as e:
logger.warn(
"Ignoring PDUs for room %s from banned server", room_id,
)
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
pdu_results[event_id] = e.error_dict()
return
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
try:
@@ -187,12 +165,8 @@ class FederationServer(FederationBase):
logger.warn("Error handling PDU %s: %s", event_id, e)
pdu_results[event_id] = {"error": str(e)}
except Exception as e:
f = failure.Failure()
pdu_results[event_id] = {"error": str(e)}
logger.error(
"Failed to handle PDU %s: %s",
event_id, f.getTraceback().rstrip(),
)
logger.exception("Failed to handle PDU %s", event_id)
yield async.concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(),
@@ -207,6 +181,10 @@ class FederationServer(FederationBase):
edu.content
)
pdu_failures = getattr(transaction, "pdu_failures", [])
for failure in pdu_failures:
logger.info("Got failure %r", failure)
response = {
"pdus": pdu_results,
}
@@ -230,24 +208,20 @@ class FederationServer(FederationBase):
if not event_id:
raise NotImplementedError("Specify an event")
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
in_room = yield self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
# we grab the linearizer to protect ourselves from servers which hammer
# us. In theory we might already have the response to this query
# in the cache so we could return it without waiting for the linearizer
# - but that's non-trivial to get right, and anyway somewhat defeats
# the point of the linearizer.
with (yield self._server_linearizer.queue((origin, room_id))):
resp = yield self._state_resp_cache.wrap(
(room_id, event_id),
self._on_context_state_request_compute,
room_id, event_id,
)
result = self._state_resp_cache.get((room_id, event_id))
if not result:
with (yield self._server_linearizer.queue((origin, room_id))):
d = self._state_resp_cache.set(
(room_id, event_id),
preserve_fn(self._on_context_state_request_compute)(room_id, event_id)
)
resp = yield make_deferred_yieldable(d)
else:
resp = yield make_deferred_yieldable(result)
defer.returnValue((200, resp))
@@ -256,9 +230,6 @@ class FederationServer(FederationBase):
if not event_id:
raise NotImplementedError("Specify an event")
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
in_room = yield self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
@@ -302,7 +273,7 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
@log_function
def on_pdu_request(self, origin, event_id):
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
pdu = yield self._get_persisted_pdu(origin, event_id)
if pdu:
defer.returnValue(
@@ -318,14 +289,12 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
def on_query_request(self, query_type, args):
received_queries_counter.labels(query_type).inc()
received_queries_counter.inc(query_type)
resp = yield self.registry.on_query(query_type, args)
defer.returnValue((200, resp))
@defer.inlineCallbacks
def on_make_join_request(self, origin, room_id, user_id):
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
def on_make_join_request(self, room_id, user_id):
pdu = yield self.handler.on_make_join_request(room_id, user_id)
time_now = self._clock.time_msec()
defer.returnValue({"event": pdu.get_pdu_json(time_now)})
@@ -333,8 +302,6 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
def on_invite_request(self, origin, content):
pdu = event_from_pdu_json(content)
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, pdu.room_id)
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
time_now = self._clock.time_msec()
defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
@@ -343,10 +310,6 @@ class FederationServer(FederationBase):
def on_send_join_request(self, origin, content):
logger.debug("on_send_join_request: content: %s", content)
pdu = event_from_pdu_json(content)
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
time_now = self._clock.time_msec()
@@ -358,9 +321,7 @@ class FederationServer(FederationBase):
}))
@defer.inlineCallbacks
def on_make_leave_request(self, origin, room_id, user_id):
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
def on_make_leave_request(self, room_id, user_id):
pdu = yield self.handler.on_make_leave_request(room_id, user_id)
time_now = self._clock.time_msec()
defer.returnValue({"event": pdu.get_pdu_json(time_now)})
@@ -369,10 +330,6 @@ class FederationServer(FederationBase):
def on_send_leave_request(self, origin, content):
logger.debug("on_send_leave_request: content: %s", content)
pdu = event_from_pdu_json(content)
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
yield self.handler.on_send_leave_request(origin, pdu)
defer.returnValue((200, {}))
@@ -380,9 +337,6 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
def on_event_auth(self, origin, room_id, event_id):
with (yield self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
time_now = self._clock.time_msec()
auth_pdus = yield self.handler.on_event_auth(event_id)
res = {
@@ -411,9 +365,6 @@ class FederationServer(FederationBase):
Deferred: Results in `dict` with the same format as `content`
"""
with (yield self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
auth_chain = [
event_from_pdu_json(e)
for e in content["auth_chain"]
@@ -426,7 +377,6 @@ class FederationServer(FederationBase):
ret = yield self.handler.on_query_auth(
origin,
event_id,
room_id,
signed_auth,
content.get("rejects", []),
content.get("missing", []),
@@ -475,9 +425,9 @@ class FederationServer(FederationBase):
"Claimed one-time-keys: %s",
",".join((
"%s for %s:%s" % (key_id, user_id, device_id)
for user_id, user_keys in iteritems(json_result)
for device_id, device_keys in iteritems(user_keys)
for key_id, _ in iteritems(device_keys)
for user_id, user_keys in json_result.iteritems()
for device_id, device_keys in user_keys.iteritems()
for key_id, _ in device_keys.iteritems()
)),
)
@@ -488,9 +438,6 @@ class FederationServer(FederationBase):
def on_get_missing_events(self, origin, room_id, earliest_events,
latest_events, limit, min_depth):
with (yield self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
logger.info(
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
" limit: %d, min_depth: %d",
@@ -519,6 +466,17 @@ class FederationServer(FederationBase):
ts_now_ms = self._clock.time_msec()
return self.store.get_user_id_for_open_id_token(token, ts_now_ms)
@log_function
def _get_persisted_pdu(self, origin, event_id, do_auth=True):
""" Get a PDU from the database with given origin and id.
Returns:
Deferred: Results in a `Pdu`.
"""
return self.handler.get_persisted_pdu(
origin, event_id, do_auth=do_auth
)
def _transaction_from_pdus(self, pdu_list):
"""Returns a new Transaction containing the given PDUs suitable for
transmission.
@@ -536,33 +494,13 @@ class FederationServer(FederationBase):
def _handle_received_pdu(self, origin, pdu):
""" Process a PDU received in a federation /send/ transaction.
If the event is invalid, then this method throws a FederationError.
(The error will then be logged and sent back to the sender (which
probably won't do anything with it), and other events in the
transaction will be processed as normal).
It is likely that we'll then receive other events which refer to
this rejected_event in their prev_events, etc. When that happens,
we'll attempt to fetch the rejected event again, which will presumably
fail, so those second-generation events will also get rejected.
Eventually, we get to the point where there are more than 10 events
between any new events and the original rejected event. Since we
only try to backfill 10 events deep on received pdu, we then accept the
new event, possibly introducing a discontinuity in the DAG, with new
forward extremities, so normal service is approximately returned,
until we try to backfill across the discontinuity.
Args:
origin (str): server which sent the pdu
pdu (FrozenEvent): received pdu
Returns (Deferred): completes with None
Raises: FederationError if the signatures / hash do not match, or
if the event was unacceptable for any other reason (eg, too large,
too many prev_events, couldn't find the prev_events)
"""
Raises: FederationError if the signatures / hash do not match
"""
# check that it's actually being sent from a valid destination to
# workaround bug #1753 in 0.18.5 and 0.18.6
if origin != get_domain_from_id(pdu.event_id):
@@ -598,9 +536,7 @@ class FederationServer(FederationBase):
affected=pdu.event_id,
)
yield self.handler.on_receive_pdu(
origin, pdu, get_missing=True, sent_to_us_directly=True,
)
yield self.handler.on_receive_pdu(origin, pdu, get_missing=True)
def __str__(self):
return "<ReplicationLayer(%s)>" % self.server_name
@@ -628,101 +564,6 @@ class FederationServer(FederationBase):
)
defer.returnValue(ret)
@defer.inlineCallbacks
def check_server_matches_acl(self, server_name, room_id):
"""Check if the given server is allowed by the server ACLs in the room
Args:
server_name (str): name of server, *without any port part*
room_id (str): ID of the room to check
Raises:
AuthError if the server does not match the ACL
"""
state_ids = yield self.store.get_current_state_ids(room_id)
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
if not acl_event_id:
return
acl_event = yield self.store.get_event(acl_event_id)
if server_matches_acl_event(server_name, acl_event):
return
raise AuthError(code=403, msg="Server is banned from room")
def server_matches_acl_event(server_name, acl_event):
"""Check if the given server is allowed by the ACL event
Args:
server_name (str): name of server, without any port part
acl_event (EventBase): m.room.server_acl event
Returns:
bool: True if this server is allowed by the ACLs
"""
logger.debug("Checking %s against acl %s", server_name, acl_event.content)
# first of all, check if literal IPs are blocked, and if so, whether the
# server name is a literal IP
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
if not isinstance(allow_ip_literals, bool):
logger.warn("Ignorning non-bool allow_ip_literals flag")
allow_ip_literals = True
if not allow_ip_literals:
# check for ipv6 literals. These start with '['.
if server_name[0] == '[':
return False
# check for ipv4 literals. We can just lift the routine from twisted.
if isIPAddress(server_name):
return False
# next, check the deny list
deny = acl_event.content.get("deny", [])
if not isinstance(deny, (list, tuple)):
logger.warn("Ignorning non-list deny ACL %s", deny)
deny = []
for e in deny:
if _acl_entry_matches(server_name, e):
# logger.info("%s matched deny rule %s", server_name, e)
return False
# then the allow list.
allow = acl_event.content.get("allow", [])
if not isinstance(allow, (list, tuple)):
logger.warn("Ignorning non-list allow ACL %s", allow)
allow = []
for e in allow:
if _acl_entry_matches(server_name, e):
# logger.info("%s matched allow rule %s", server_name, e)
return True
# everything else should be rejected.
# logger.info("%s fell through", server_name)
return False
def _acl_entry_matches(server_name, acl_entry):
if not isinstance(acl_entry, six.string_types):
logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry))
return False
regex = _glob_to_regex(acl_entry)
return regex.match(server_name)
def _glob_to_regex(glob):
res = ''
for c in glob:
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
else:
res = res + re.escape(c)
return re.compile(res + "\\Z", re.IGNORECASE)
class FederationHandlerRegistry(object):
"""Allows classes to register themselves as handlers for a given EDU or

View File

@@ -19,12 +19,13 @@ package.
These actions are mostly only used by the :py:mod:`.replication` module.
"""
import logging
from twisted.internet import defer
from synapse.util.logutils import log_function
import logging
logger = logging.getLogger(__name__)

View File

@@ -29,22 +29,23 @@ dead worker doesn't cause the queues to grow limitlessly.
Events are replicated via a separate events stream.
"""
import logging
from collections import namedtuple
from six import iteritems, itervalues
from sortedcontainers import SortedDict
from synapse.metrics import LaterGauge
from synapse.storage.presence import UserPresenceState
from synapse.util.metrics import Measure
from .units import Edu
from synapse.storage.presence import UserPresenceState
from synapse.util.metrics import Measure
import synapse.metrics
from blist import sorteddict
from collections import namedtuple
import logging
logger = logging.getLogger(__name__)
metrics = synapse.metrics.get_metrics_for(__name__)
class FederationRemoteSendQueue(object):
"""A drop in replacement for TransactionQueue"""
@@ -55,29 +56,33 @@ class FederationRemoteSendQueue(object):
self.is_mine_id = hs.is_mine_id
self.presence_map = {} # Pending presence map user_id -> UserPresenceState
self.presence_changed = SortedDict() # Stream position -> user_id
self.presence_changed = sorteddict() # Stream position -> user_id
self.keyed_edu = {} # (destination, key) -> EDU
self.keyed_edu_changed = SortedDict() # stream position -> (destination, key)
self.keyed_edu_changed = sorteddict() # stream position -> (destination, key)
self.edus = SortedDict() # stream position -> Edu
self.edus = sorteddict() # stream position -> Edu
self.device_messages = SortedDict() # stream position -> destination
self.failures = sorteddict() # stream position -> (destination, Failure)
self.device_messages = sorteddict() # stream position -> destination
self.pos = 1
self.pos_time = SortedDict()
self.pos_time = sorteddict()
# EVERYTHING IS SAD. In particular, python only makes new scopes when
# we make a new function, so we need to make a new function so the inner
# lambda binds to the queue rather than to the name of the queue which
# changes. ARGH.
def register(name, queue):
LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,),
"", [], lambda: len(queue))
metrics.register_callback(
queue_name + "_size",
lambda: len(queue),
)
for queue_name in [
"presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
"edus", "device_messages", "pos_time",
"edus", "failures", "device_messages", "pos_time",
]:
register(queue_name, getattr(self, queue_name))
@@ -96,7 +101,7 @@ class FederationRemoteSendQueue(object):
now = self.clock.time_msec()
keys = self.pos_time.keys()
time = self.pos_time.bisect_left(now - FIVE_MINUTES_AGO)
time = keys.bisect_left(now - FIVE_MINUTES_AGO)
if not keys[:time]:
return
@@ -111,13 +116,13 @@ class FederationRemoteSendQueue(object):
with Measure(self.clock, "send_queue._clear"):
# Delete things out of presence maps
keys = self.presence_changed.keys()
i = self.presence_changed.bisect_left(position_to_delete)
i = keys.bisect_left(position_to_delete)
for key in keys[:i]:
del self.presence_changed[key]
user_ids = set(
user_id
for uids in itervalues(self.presence_changed)
for uids in self.presence_changed.itervalues()
for user_id in uids
)
@@ -129,7 +134,7 @@ class FederationRemoteSendQueue(object):
# Delete things out of keyed edus
keys = self.keyed_edu_changed.keys()
i = self.keyed_edu_changed.bisect_left(position_to_delete)
i = keys.bisect_left(position_to_delete)
for key in keys[:i]:
del self.keyed_edu_changed[key]
@@ -143,13 +148,19 @@ class FederationRemoteSendQueue(object):
# Delete things out of edu map
keys = self.edus.keys()
i = self.edus.bisect_left(position_to_delete)
i = keys.bisect_left(position_to_delete)
for key in keys[:i]:
del self.edus[key]
# Delete things out of failure map
keys = self.failures.keys()
i = keys.bisect_left(position_to_delete)
for key in keys[:i]:
del self.failures[key]
# Delete things out of device map
keys = self.device_messages.keys()
i = self.device_messages.bisect_left(position_to_delete)
i = keys.bisect_left(position_to_delete)
for key in keys[:i]:
del self.device_messages[key]
@@ -189,13 +200,20 @@ class FederationRemoteSendQueue(object):
# We only want to send presence for our own users, so lets always just
# filter here just in case.
local_states = list(filter(lambda s: self.is_mine_id(s.user_id), states))
local_states = filter(lambda s: self.is_mine_id(s.user_id), states)
self.presence_map.update({state.user_id: state for state in local_states})
self.presence_changed[pos] = [state.user_id for state in local_states]
self.notifier.on_new_replication_data()
def send_failure(self, failure, destination):
"""As per TransactionQueue"""
pos = self._next_pos()
self.failures[pos] = (destination, str(failure))
self.notifier.on_new_replication_data()
def send_device_messages(self, destination):
"""As per TransactionQueue"""
pos = self._next_pos()
@@ -235,12 +253,13 @@ class FederationRemoteSendQueue(object):
self._clear_queue_before_pos(federation_ack)
# Fetch changed presence
i = self.presence_changed.bisect_right(from_token)
j = self.presence_changed.bisect_right(to_token) + 1
keys = self.presence_changed.keys()
i = keys.bisect_right(from_token)
j = keys.bisect_right(to_token) + 1
dest_user_ids = [
(pos, user_id)
for pos, user_id_list in self.presence_changed.items()[i:j]
for user_id in user_id_list
for pos in keys[i:j]
for user_id in self.presence_changed[pos]
]
for (key, user_id) in dest_user_ids:
@@ -249,33 +268,48 @@ class FederationRemoteSendQueue(object):
)))
# Fetch changes keyed edus
i = self.keyed_edu_changed.bisect_right(from_token)
j = self.keyed_edu_changed.bisect_right(to_token) + 1
keys = self.keyed_edu_changed.keys()
i = keys.bisect_right(from_token)
j = keys.bisect_right(to_token) + 1
# We purposefully clobber based on the key here, python dict comprehensions
# always use the last value, so this will correctly point to the last
# stream position.
keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]}
keyed_edus = {self.keyed_edu_changed[k]: k for k in keys[i:j]}
for ((destination, edu_key), pos) in iteritems(keyed_edus):
for ((destination, edu_key), pos) in keyed_edus.iteritems():
rows.append((pos, KeyedEduRow(
key=edu_key,
edu=self.keyed_edu[(destination, edu_key)],
)))
# Fetch changed edus
i = self.edus.bisect_right(from_token)
j = self.edus.bisect_right(to_token) + 1
edus = self.edus.items()[i:j]
keys = self.edus.keys()
i = keys.bisect_right(from_token)
j = keys.bisect_right(to_token) + 1
edus = ((k, self.edus[k]) for k in keys[i:j])
for (pos, edu) in edus:
rows.append((pos, EduRow(edu)))
# Fetch changed device messages
i = self.device_messages.bisect_right(from_token)
j = self.device_messages.bisect_right(to_token) + 1
device_messages = {v: k for k, v in self.device_messages.items()[i:j]}
# Fetch changed failures
keys = self.failures.keys()
i = keys.bisect_right(from_token)
j = keys.bisect_right(to_token) + 1
failures = ((k, self.failures[k]) for k in keys[i:j])
for (destination, pos) in iteritems(device_messages):
for (pos, (destination, failure)) in failures:
rows.append((pos, FailureRow(
destination=destination,
failure=failure,
)))
# Fetch changed device messages
keys = self.device_messages.keys()
i = keys.bisect_right(from_token)
j = keys.bisect_right(to_token) + 1
device_messages = {self.device_messages[k]: k for k in keys[i:j]}
for (destination, pos) in device_messages.iteritems():
rows.append((pos, DeviceRow(
destination=destination,
)))
@@ -391,6 +425,34 @@ class EduRow(BaseFederationRow, namedtuple("EduRow", (
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
class FailureRow(BaseFederationRow, namedtuple("FailureRow", (
"destination", # str
"failure",
))):
"""Streams failures to a remote server. Failures are issued when there was
something wrong with a transaction the remote sent us, e.g. it included
an event that was invalid.
"""
TypeId = "f"
@staticmethod
def from_data(data):
return FailureRow(
destination=data["destination"],
failure=data["failure"],
)
def to_data(self):
return {
"destination": self.destination,
"failure": self.failure,
}
def add_to_buffer(self, buff):
buff.failures.setdefault(self.destination, []).append(self.failure)
class DeviceRow(BaseFederationRow, namedtuple("DeviceRow", (
"destination", # str
))):
@@ -417,6 +479,7 @@ TypeToRow = {
PresenceRow,
KeyedEduRow,
EduRow,
FailureRow,
DeviceRow,
)
}
@@ -426,6 +489,7 @@ ParsedFederationStreamData = namedtuple("ParsedFederationStreamData", (
"presence", # list(UserPresenceState)
"keyed_edus", # dict of destination -> { key -> Edu }
"edus", # dict of destination -> [Edu]
"failures", # dict of destination -> [failures]
"device_destinations", # set of destinations
))
@@ -447,6 +511,7 @@ def process_rows_for_federation(transaction_queue, rows):
presence=[],
keyed_edus={},
edus={},
failures={},
device_destinations=set(),
)
@@ -463,17 +528,21 @@ def process_rows_for_federation(transaction_queue, rows):
if buff.presence:
transaction_queue.send_presence(buff.presence)
for destination, edu_map in iteritems(buff.keyed_edus):
for destination, edu_map in buff.keyed_edus.iteritems():
for key, edu in edu_map.items():
transaction_queue.send_edu(
edu.destination, edu.edu_type, edu.content, key=key,
)
for destination, edu_list in iteritems(buff.edus):
for destination, edu_list in buff.edus.iteritems():
for edu in edu_list:
transaction_queue.send_edu(
edu.destination, edu.edu_type, edu.content, key=None,
)
for destination, failure_list in buff.failures.iteritems():
for failure in failure_list:
transaction_queue.send_failure(destination, failure)
for destination in buff.device_destinations:
transaction_queue.send_device_messages(destination)

View File

@@ -13,39 +13,36 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from six import itervalues
from prometheus_client import Counter
from twisted.internet import defer
import synapse.metrics
from synapse.api.errors import FederationDeniedError, HttpResponseException
from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
from synapse.metrics import (
LaterGauge,
events_processed_counter,
sent_edus_counter,
sent_transactions_counter,
)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util import logcontext
from synapse.util.metrics import measure_func
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
from .persistence import TransactionActions
from .units import Edu, Transaction
from .units import Transaction, Edu
from synapse.api.errors import HttpResponseException, FederationDeniedError
from synapse.util import logcontext, PreserveLoggingContext
from synapse.util.async import run_on_reactor
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
from synapse.util.metrics import measure_func
from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
import synapse.metrics
import logging
logger = logging.getLogger(__name__)
sent_pdus_destination_dist_count = Counter(
"synapse_federation_client_sent_pdu_destinations:count", ""
)
sent_pdus_destination_dist_total = Counter(
"synapse_federation_client_sent_pdu_destinations:total", ""
metrics = synapse.metrics.get_metrics_for(__name__)
client_metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
sent_pdus_destination_dist = client_metrics.register_distribution(
"sent_pdu_destinations"
)
sent_edus_counter = client_metrics.register_counter("sent_edus")
sent_transactions_counter = client_metrics.register_counter("sent_transactions")
events_processed_counter = client_metrics.register_counter("events_processed")
class TransactionQueue(object):
@@ -72,10 +69,8 @@ class TransactionQueue(object):
# done
self.pending_transactions = {}
LaterGauge(
"synapse_federation_transaction_queue_pending_destinations",
"",
[],
metrics.register_callback(
"pending_destinations",
lambda: len(self.pending_transactions),
)
@@ -99,16 +94,12 @@ class TransactionQueue(object):
# Map of destination -> (edu_type, key) -> Edu
self.pending_edus_keyed_by_dest = edus_keyed = {}
LaterGauge(
"synapse_federation_transaction_queue_pending_pdus",
"",
[],
metrics.register_callback(
"pending_pdus",
lambda: sum(map(len, pdus.values())),
)
LaterGauge(
"synapse_federation_transaction_queue_pending_edus",
"",
[],
metrics.register_callback(
"pending_edus",
lambda: (
sum(map(len, edus.values()))
+ sum(map(len, presence.values()))
@@ -116,6 +107,9 @@ class TransactionQueue(object):
),
)
# destination -> list of tuple(failure, deferred)
self.pending_failures_by_dest = {}
# destination -> stream_id of last successfully sent to-device message.
# NB: may be a long or an int.
self.last_device_stream_id_by_dest = {}
@@ -163,11 +157,10 @@ class TransactionQueue(object):
if self._is_processing:
return
# fire off a processing loop in the background
run_as_background_process(
"process_event_queue_for_federation",
self._process_event_queue_loop,
)
# fire off a processing loop in the background. It's likely it will
# outlast the current request, so run it in the sentinel logcontext.
with PreserveLoggingContext():
self._process_event_queue_loop()
@defer.inlineCallbacks
def _process_event_queue_loop(self):
@@ -235,7 +228,7 @@ class TransactionQueue(object):
yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
logcontext.run_in_background(handle_room_events, evs)
for evs in itervalues(events_by_room)
for evs in events_by_room.itervalues()
],
consumeErrors=True
))
@@ -248,15 +241,18 @@ class TransactionQueue(object):
now = self.clock.time_msec()
ts = yield self.store.get_received_ts(events[-1].event_id)
synapse.metrics.event_processing_lag.labels(
"federation_sender").set(now - ts)
synapse.metrics.event_processing_last_ts.labels(
"federation_sender").set(ts)
synapse.metrics.event_processing_lag.set(
now - ts, "federation_sender",
)
synapse.metrics.event_processing_last_ts.set(
ts, "federation_sender",
)
events_processed_counter.inc(len(events))
events_processed_counter.inc_by(len(events))
synapse.metrics.event_processing_positions.labels(
"federation_sender").set(next_token)
synapse.metrics.event_processing_positions.set(
next_token, "federation_sender",
)
finally:
self._is_processing = False
@@ -279,8 +275,7 @@ class TransactionQueue(object):
if not destinations:
return
sent_pdus_destination_dist_total.inc(len(destinations))
sent_pdus_destination_dist_count.inc()
sent_pdus_destination_dist.inc_by(len(destinations))
for destination in destinations:
self.pending_pdus_by_dest.setdefault(destination, []).append(
@@ -300,6 +295,7 @@ class TransactionQueue(object):
Args:
states (list(UserPresenceState))
"""
return
# First we queue up the new presence by user ID, so multiple presence
# updates in quick successtion are correctly handled
@@ -327,9 +323,7 @@ class TransactionQueue(object):
if not states_map:
break
yield self._process_presence_inner(list(states_map.values()))
except Exception:
logger.exception("Error sending presence states to servers")
yield self._process_presence_inner(states_map.values())
finally:
self._processing_pending_presence = False
@@ -379,6 +373,19 @@ class TransactionQueue(object):
self._attempt_new_transaction(destination)
def send_failure(self, failure, destination):
if destination == self.server_name or destination == "localhost":
return
if not self.can_send_to(destination):
return
self.pending_failures_by_dest.setdefault(
destination, []
).append(failure)
self._attempt_new_transaction(destination)
def send_device_messages(self, destination):
if destination == self.server_name or destination == "localhost":
return
@@ -418,11 +425,14 @@ class TransactionQueue(object):
logger.debug("TX [%s] Starting transaction loop", destination)
run_as_background_process(
"federation_transaction_transmission_loop",
self._transaction_transmission_loop,
destination,
)
# Drop the logcontext before starting the transaction. It doesn't
# really make sense to log all the outbound transactions against
# whatever path led us to this point: that's pretty arbitrary really.
#
# (this also means we can fire off _perform_transaction without
# yielding)
with logcontext.PreserveLoggingContext():
self._transaction_transmission_loop(destination)
@defer.inlineCallbacks
def _transaction_transmission_loop(self, destination):
@@ -435,6 +445,9 @@ class TransactionQueue(object):
# hence why we throw the result away.
yield get_retry_limiter(destination, self.clock, self.store)
# XXX: what's this for?
yield run_on_reactor()
pending_pdus = []
while True:
device_message_edus, device_stream_id, dev_list_id = (
@@ -453,6 +466,7 @@ class TransactionQueue(object):
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
pending_edus = self.pending_edus_by_dest.pop(destination, [])
pending_presence = self.pending_presence_by_dest.pop(destination, {})
pending_failures = self.pending_failures_by_dest.pop(destination, [])
pending_edus.extend(
self.pending_edus_keyed_by_dest.pop(destination, {}).values()
@@ -480,7 +494,7 @@ class TransactionQueue(object):
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
destination, len(pending_pdus))
if not pending_pdus and not pending_edus:
if not pending_pdus and not pending_edus and not pending_failures:
logger.debug("TX [%s] Nothing to send", destination)
self.last_device_stream_id_by_dest[destination] = (
device_stream_id
@@ -490,7 +504,7 @@ class TransactionQueue(object):
# END CRITICAL SECTION
success = yield self._send_new_transaction(
destination, pending_pdus, pending_edus,
destination, pending_pdus, pending_edus, pending_failures,
)
if success:
sent_transactions_counter.inc()
@@ -567,12 +581,14 @@ class TransactionQueue(object):
@measure_func("_send_new_transaction")
@defer.inlineCallbacks
def _send_new_transaction(self, destination, pending_pdus, pending_edus):
def _send_new_transaction(self, destination, pending_pdus, pending_edus,
pending_failures):
# Sort based on the order field
pending_pdus.sort(key=lambda t: t[1])
pdus = [x[0] for x in pending_pdus]
edus = pending_edus
failures = [x.get_dict() for x in pending_failures]
success = True
@@ -582,10 +598,11 @@ class TransactionQueue(object):
logger.debug(
"TX [%s] {%s} Attempting new transaction"
" (pdus: %d, edus: %d)",
" (pdus: %d, edus: %d, failures: %d)",
destination, txn_id,
len(pdus),
len(edus),
len(failures)
)
logger.debug("TX [%s] Persisting transaction...", destination)
@@ -597,6 +614,7 @@ class TransactionQueue(object):
destination=destination,
pdus=pdus,
edus=edus,
pdu_failures=failures,
)
self._next_txn_id += 1
@@ -606,11 +624,12 @@ class TransactionQueue(object):
logger.debug("TX [%s] Persisted transaction", destination)
logger.info(
"TX [%s] {%s} Sending transaction [%s],"
" (PDUs: %d, EDUs: %d)",
" (PDUs: %d, EDUs: %d, failures: %d)",
destination, txn_id,
transaction.transaction_id,
len(pdus),
len(edus),
len(failures),
)
# Actually send the transaction

View File

@@ -14,14 +14,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.constants import Membership
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
from synapse.util.logutils import log_function
import logging
import urllib
from twisted.internet import defer
from synapse.api.constants import Membership
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
from synapse.util.logutils import log_function
logger = logging.getLogger(__name__)

View File

@@ -14,27 +14,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
from synapse.api.errors import Codes, SynapseError, FederationDeniedError
from synapse.http.server import JsonResource
from synapse.http.servlet import (
parse_json_object_from_request, parse_integer_from_args, parse_string_from_args,
parse_boolean_from_args,
)
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.versionstring import get_version_string
from synapse.util.logcontext import preserve_fn
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
import functools
import logging
import re
from twisted.internet import defer
import synapse
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
from synapse.http.endpoint import parse_and_validate_server_name
from synapse.http.server import JsonResource
from synapse.http.servlet import (
parse_boolean_from_args,
parse_integer_from_args,
parse_json_object_from_request,
parse_string_from_args,
)
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
from synapse.util.logcontext import run_in_background
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
@@ -96,11 +94,37 @@ class Authenticator(object):
"signatures": {},
}
if (
self.federation_domain_whitelist is not None and
self.server_name not in self.federation_domain_whitelist
):
raise FederationDeniedError(self.server_name)
if content is not None:
json_request["content"] = content
origin = None
def parse_auth_header(header_str):
try:
params = auth.split(" ")[1].split(",")
param_dict = dict(kv.split("=") for kv in params)
def strip_quotes(value):
if value.startswith("\""):
return value[1:-1]
else:
return value
origin = strip_quotes(param_dict["origin"])
key = strip_quotes(param_dict["key"])
sig = strip_quotes(param_dict["sig"])
return (origin, key, sig)
except Exception:
raise AuthenticationError(
400, "Malformed Authorization header", Codes.UNAUTHORIZED
)
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
if not auth_headers:
@@ -109,17 +133,11 @@ class Authenticator(object):
)
for auth in auth_headers:
if auth.startswith(b"X-Matrix"):
(origin, key, sig) = _parse_auth_header(auth)
if auth.startswith("X-Matrix"):
(origin, key, sig) = parse_auth_header(auth)
json_request["origin"] = origin
json_request["signatures"].setdefault(origin, {})[key] = sig
if (
self.federation_domain_whitelist is not None and
origin not in self.federation_domain_whitelist
):
raise FederationDeniedError(origin)
if not json_request["signatures"]:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
@@ -134,60 +152,11 @@ class Authenticator(object):
# alive
retry_timings = yield self.store.get_destination_retry_timings(origin)
if retry_timings and retry_timings["retry_last_ts"]:
run_in_background(self._reset_retry_timings, origin)
logger.info("Marking origin %r as up", origin)
preserve_fn(self.store.set_destination_retry_timings)(origin, 0, 0)
defer.returnValue(origin)
@defer.inlineCallbacks
def _reset_retry_timings(self, origin):
try:
logger.info("Marking origin %r as up", origin)
yield self.store.set_destination_retry_timings(origin, 0, 0)
except Exception:
logger.exception("Error resetting retry timings on %s", origin)
def _parse_auth_header(header_bytes):
"""Parse an X-Matrix auth header
Args:
header_bytes (bytes): header value
Returns:
Tuple[str, str, str]: origin, key id, signature.
Raises:
AuthenticationError if the header could not be parsed
"""
try:
header_str = header_bytes.decode('utf-8')
params = header_str.split(" ")[1].split(",")
param_dict = dict(kv.split("=") for kv in params)
def strip_quotes(value):
if value.startswith("\""):
return value[1:-1]
else:
return value
origin = strip_quotes(param_dict["origin"])
# ensure that the origin is a valid server name
parse_and_validate_server_name(origin)
key = strip_quotes(param_dict["key"])
sig = strip_quotes(param_dict["sig"])
return origin, key, sig
except Exception as e:
logger.warn(
"Error parsing auth header '%s': %s",
header_bytes.decode('ascii', 'replace'),
e,
)
raise AuthenticationError(
400, "Malformed Authorization header", Codes.UNAUTHORIZED,
)
class BaseFederationServlet(object):
REQUIRE_AUTH = True
@@ -283,10 +252,11 @@ class FederationSendServlet(BaseFederationServlet):
)
logger.info(
"Received txn %s from %s. (PDUs: %d, EDUs: %d)",
"Received txn %s from %s. (PDUs: %d, EDUs: %d, failures: %d)",
transaction_id, origin,
len(transaction_data.get("pdus", [])),
len(transaction_data.get("edus", [])),
len(transaction_data.get("failures", [])),
)
# We should ideally be getting this from the security layer.
@@ -385,9 +355,7 @@ class FederationMakeJoinServlet(BaseFederationServlet):
@defer.inlineCallbacks
def on_GET(self, origin, content, query, context, user_id):
content = yield self.handler.on_make_join_request(
origin, context, user_id,
)
content = yield self.handler.on_make_join_request(context, user_id)
defer.returnValue((200, content))
@@ -396,17 +364,15 @@ class FederationMakeLeaveServlet(BaseFederationServlet):
@defer.inlineCallbacks
def on_GET(self, origin, content, query, context, user_id):
content = yield self.handler.on_make_leave_request(
origin, context, user_id,
)
content = yield self.handler.on_make_leave_request(context, user_id)
defer.returnValue((200, content))
class FederationSendLeaveServlet(BaseFederationServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<txid>[^/]*)"
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, room_id, event_id):
def on_PUT(self, origin, content, query, room_id, txid):
content = yield self.handler.on_send_leave_request(origin, content)
defer.returnValue((200, content))

View File

@@ -17,9 +17,10 @@
server protocol.
"""
from synapse.util.jsonobject import JsonEncodedObject
import logging
from synapse.util.jsonobject import JsonEncodedObject
logger = logging.getLogger(__name__)
@@ -73,6 +74,9 @@ class Transaction(JsonEncodedObject):
"previous_ids",
"pdus",
"edus",
"transaction_id",
"destination",
"pdu_failures",
]
internal_keys = [

View File

@@ -23,9 +23,9 @@ If a user leaves (or gets kicked out of) a group, either side can still use
their attestation to "prove" their membership, until the attestation expires.
Therefore attestations shouldn't be relied on to prove membership in important
cases, but can for less important situtations, e.g. showing a users membership
of groups on their profile, showing flairs, etc.
of groups on their profile, showing flairs, etc.abs
An attestation is a signed blob of json that looks like:
An attestsation is a signed blob of json that looks like:
{
"user_id": "@foo:a.example.com",
@@ -38,14 +38,14 @@ An attestation is a signed blob of json that looks like:
import logging
import random
from signedjson.sign import sign_json
from twisted.internet import defer
from synapse.api.errors import SynapseError
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import get_domain_from_id
from synapse.util.logcontext import run_in_background
from synapse.util.logcontext import preserve_fn
from signedjson.sign import sign_json
logger = logging.getLogger(__name__)
@@ -130,7 +130,7 @@ class GroupAttestionRenewer(object):
self.attestations = hs.get_groups_attestation_signing()
self._renew_attestations_loop = self.clock.looping_call(
self._start_renew_attestations, 30 * 60 * 1000,
self._renew_attestations, 30 * 60 * 1000,
)
@defer.inlineCallbacks
@@ -152,9 +152,6 @@ class GroupAttestionRenewer(object):
defer.returnValue({})
def _start_renew_attestations(self):
return run_as_background_process("renew_attestations", self._renew_attestations)
@defer.inlineCallbacks
def _renew_attestations(self):
"""Called periodically to check if we need to update any of our attestations
@@ -168,35 +165,31 @@ class GroupAttestionRenewer(object):
@defer.inlineCallbacks
def _renew_attestation(group_id, user_id):
try:
if not self.is_mine_id(group_id):
destination = get_domain_from_id(group_id)
elif not self.is_mine_id(user_id):
destination = get_domain_from_id(user_id)
else:
logger.warn(
"Incorrectly trying to do attestations for user: %r in %r",
user_id, group_id,
)
yield self.store.remove_attestation_renewal(group_id, user_id)
return
attestation = self.attestations.create_attestation(group_id, user_id)
yield self.transport_client.renew_group_attestation(
destination, group_id, user_id,
content={"attestation": attestation},
if not self.is_mine_id(group_id):
destination = get_domain_from_id(group_id)
elif not self.is_mine_id(user_id):
destination = get_domain_from_id(user_id)
else:
logger.warn(
"Incorrectly trying to do attestations for user: %r in %r",
user_id, group_id,
)
yield self.store.remove_attestation_renewal(group_id, user_id)
return
yield self.store.update_attestation_renewal(
group_id, user_id, attestation
)
except Exception:
logger.exception("Error renewing attestation of %r in %r",
user_id, group_id)
attestation = self.attestations.create_attestation(group_id, user_id)
yield self.transport_client.renew_group_attestation(
destination, group_id, user_id,
content={"attestation": attestation},
)
yield self.store.update_attestation_renewal(
group_id, user_id, attestation
)
for row in rows:
group_id = row["group_id"]
user_id = row["user_id"]
run_in_background(_renew_attestation, group_id, user_id)
preserve_fn(_renew_attestation)(group_id, user_id)

View File

@@ -16,12 +16,9 @@
import logging
from six import string_types
from twisted.internet import defer
from synapse.api.errors import SynapseError
from synapse.types import GroupID, RoomID, UserID, get_domain_from_id
from twisted.internet import defer
logger = logging.getLogger(__name__)
@@ -434,7 +431,7 @@ class GroupsServerHandler(object):
"long_description"):
if keyname in content:
value = content[keyname]
if not isinstance(value, string_types):
if not isinstance(value, basestring):
raise SynapseError(400, "%r value is not a string" % (keyname,))
profile[keyname] = value

View File

@@ -13,11 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .admin import AdminHandler
from .directory import DirectoryHandler
from .federation import FederationHandler
from .identity import IdentityHandler
from .register import RegistrationHandler
from .room import (
RoomCreationHandler, RoomContextHandler,
)
from .message import MessageHandler
from .federation import FederationHandler
from .directory import DirectoryHandler
from .admin import AdminHandler
from .identity import IdentityHandler
from .search import SearchHandler
@@ -42,8 +46,11 @@ class Handlers(object):
def __init__(self, hs):
self.registration_handler = RegistrationHandler(hs)
self.message_handler = MessageHandler(hs)
self.room_creation_handler = RoomCreationHandler(hs)
self.federation_handler = FederationHandler(hs)
self.directory_handler = DirectoryHandler(hs)
self.admin_handler = AdminHandler(hs)
self.identity_handler = IdentityHandler(hs)
self.search_handler = SearchHandler(hs)
self.room_context_handler = RoomContextHandler(hs)

Some files were not shown because too many files have changed in this diff Show More