mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-09 01:30:18 +00:00
Compare commits
7 Commits
v0.23.1
...
erikj/noti
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
10a67f0d69 | ||
|
|
9bc36b7f31 | ||
|
|
c59e904839 | ||
|
|
e70d484e1c | ||
|
|
6844bb8a6f | ||
|
|
30b53812de | ||
|
|
bddacb6dd1 |
47
.github/ISSUE_TEMPLATE.md
vendored
47
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,47 +0,0 @@
|
|||||||
<!--
|
|
||||||
|
|
||||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
|
|
||||||
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
|
|
||||||
|
|
||||||
|
|
||||||
This is a bug report template. By following the instructions below and
|
|
||||||
filling out the sections with your information, you will help the us to get all
|
|
||||||
the necessary data to fix your issue.
|
|
||||||
|
|
||||||
You can also preview your report before submitting it. You may remove sections
|
|
||||||
that aren't relevant to your particular case.
|
|
||||||
|
|
||||||
Text between <!-- and --> marks will be invisible in the report.
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
### Description
|
|
||||||
|
|
||||||
Describe here the problem that you are experiencing, or the feature you are requesting.
|
|
||||||
|
|
||||||
### Steps to reproduce
|
|
||||||
|
|
||||||
- For bugs, list the steps
|
|
||||||
- that reproduce the bug
|
|
||||||
- using hyphens as bullet points
|
|
||||||
|
|
||||||
Describe how what happens differs from what you expected.
|
|
||||||
|
|
||||||
If you can identify any relevant log snippets from _homeserver.log_, please include
|
|
||||||
those here (please be careful to remove any personal or private data):
|
|
||||||
|
|
||||||
### Version information
|
|
||||||
|
|
||||||
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
|
|
||||||
|
|
||||||
- **Homeserver**: Was this issue identified on matrix.org or another homeserver?
|
|
||||||
|
|
||||||
If not matrix.org:
|
|
||||||
- **Version**: What version of Synapse is running? <!--
|
|
||||||
You can find the Synapse version by inspecting the server headers (replace matrix.org with
|
|
||||||
your own homeserver domain):
|
|
||||||
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
|
|
||||||
-->
|
|
||||||
- **Install method**: package manager/git clone/pip
|
|
||||||
- **Platform**: Tell us about the environment in which your homeserver is operating
|
|
||||||
- distro, hardware, if it's running in a vm/container, etc.
|
|
||||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -24,10 +24,10 @@ homeserver*.yaml
|
|||||||
.coverage
|
.coverage
|
||||||
htmlcov
|
htmlcov
|
||||||
|
|
||||||
demo/*/*.db
|
demo/*.db
|
||||||
demo/*/*.log
|
demo/*.log
|
||||||
demo/*/*.log.*
|
demo/*.log.*
|
||||||
demo/*/*.pid
|
demo/*.pid
|
||||||
demo/media_store.*
|
demo/media_store.*
|
||||||
demo/etc
|
demo/etc
|
||||||
|
|
||||||
@@ -42,7 +42,3 @@ build/
|
|||||||
|
|
||||||
localhost-800*/
|
localhost-800*/
|
||||||
static/client/register/register_config.js
|
static/client/register/register_config.js
|
||||||
.tox
|
|
||||||
|
|
||||||
env/
|
|
||||||
*.config
|
|
||||||
|
|||||||
17
.travis.yml
17
.travis.yml
@@ -1,17 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
language: python
|
|
||||||
python: 2.7
|
|
||||||
|
|
||||||
# tell travis to cache ~/.cache/pip
|
|
||||||
cache: pip
|
|
||||||
|
|
||||||
env:
|
|
||||||
- TOX_ENV=packaging
|
|
||||||
- TOX_ENV=pep8
|
|
||||||
- TOX_ENV=py27
|
|
||||||
|
|
||||||
install:
|
|
||||||
- pip install tox
|
|
||||||
|
|
||||||
script:
|
|
||||||
- tox -e $TOX_ENV
|
|
||||||
24
AUTHORS.rst
24
AUTHORS.rst
@@ -29,7 +29,7 @@ Matthew Hodgson <matthew at matrix.org>
|
|||||||
|
|
||||||
Emmanuel Rohee <manu at matrix.org>
|
Emmanuel Rohee <manu at matrix.org>
|
||||||
* Supporting iOS clients (testability and fallback registration)
|
* Supporting iOS clients (testability and fallback registration)
|
||||||
|
|
||||||
Turned to Dust <dwinslow86 at gmail.com>
|
Turned to Dust <dwinslow86 at gmail.com>
|
||||||
* ArchLinux installation instructions
|
* ArchLinux installation instructions
|
||||||
|
|
||||||
@@ -38,25 +38,3 @@ Brabo <brabo at riseup.net>
|
|||||||
|
|
||||||
Ivan Shapovalov <intelfx100 at gmail.com>
|
Ivan Shapovalov <intelfx100 at gmail.com>
|
||||||
* contrib/systemd: a sample systemd unit file and a logger configuration
|
* contrib/systemd: a sample systemd unit file and a logger configuration
|
||||||
|
|
||||||
Eric Myhre <hash at exultant.us>
|
|
||||||
* Fix bug where ``media_store_path`` config option was ignored by v0 content
|
|
||||||
repository API.
|
|
||||||
|
|
||||||
Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
|
|
||||||
* Add SAML2 support for registration and login.
|
|
||||||
|
|
||||||
Steven Hammerton <steven.hammerton at openmarket.com>
|
|
||||||
* Add CAS support for registration and login.
|
|
||||||
|
|
||||||
Mads Robin Christensen <mads at v42 dot dk>
|
|
||||||
* CentOS 7 installation instructions.
|
|
||||||
|
|
||||||
Florent Violleau <floviolleau at gmail dot com>
|
|
||||||
* Add Raspberry Pi installation instructions and general troubleshooting items
|
|
||||||
|
|
||||||
Niklas Riekenbrauck <nikriek at gmail dot.com>
|
|
||||||
* Add JWT support for registration and login
|
|
||||||
|
|
||||||
Christoph Witzany <christoph at web.crofting.com>
|
|
||||||
* Add LDAP support for authentication
|
|
||||||
|
|||||||
1596
CHANGES.rst
1596
CHANGES.rst
File diff suppressed because it is too large
Load Diff
22
MANIFEST.in
22
MANIFEST.in
@@ -3,29 +3,13 @@ include LICENSE
|
|||||||
include VERSION
|
include VERSION
|
||||||
include *.rst
|
include *.rst
|
||||||
include demo/README
|
include demo/README
|
||||||
include demo/demo.tls.dh
|
|
||||||
include demo/*.py
|
|
||||||
include demo/*.sh
|
|
||||||
|
|
||||||
recursive-include synapse/storage/schema *.sql
|
recursive-include synapse/storage/schema *.sql
|
||||||
recursive-include synapse/storage/schema *.py
|
recursive-include synapse/storage/schema *.py
|
||||||
|
|
||||||
|
recursive-include demo *.dh
|
||||||
|
recursive-include demo *.py
|
||||||
|
recursive-include demo *.sh
|
||||||
recursive-include docs *
|
recursive-include docs *
|
||||||
recursive-include res *
|
|
||||||
recursive-include scripts *
|
recursive-include scripts *
|
||||||
recursive-include scripts-dev *
|
|
||||||
recursive-include synapse *.pyi
|
|
||||||
recursive-include tests *.py
|
recursive-include tests *.py
|
||||||
|
|
||||||
recursive-include synapse/static *.css
|
|
||||||
recursive-include synapse/static *.gif
|
|
||||||
recursive-include synapse/static *.html
|
|
||||||
recursive-include synapse/static *.js
|
|
||||||
|
|
||||||
exclude jenkins.sh
|
|
||||||
exclude jenkins*.sh
|
|
||||||
exclude jenkins*
|
|
||||||
recursive-exclude jenkins *.sh
|
|
||||||
|
|
||||||
prune .github
|
|
||||||
prune demo/etc
|
|
||||||
|
|||||||
954
README.rst
954
README.rst
File diff suppressed because it is too large
Load Diff
96
UPGRADE.rst
96
UPGRADE.rst
@@ -1,75 +1,3 @@
|
|||||||
Upgrading Synapse
|
|
||||||
=================
|
|
||||||
|
|
||||||
Before upgrading check if any special steps are required to upgrade from the
|
|
||||||
what you currently have installed to current version of synapse. The extra
|
|
||||||
instructions that may be required are listed later in this document.
|
|
||||||
|
|
||||||
1. If synapse was installed in a virtualenv then active that virtualenv before
|
|
||||||
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then
|
|
||||||
run:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
source ~/.synapse/bin/activate
|
|
||||||
|
|
||||||
2. If synapse was installed using pip then upgrade to the latest version by
|
|
||||||
running:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
|
||||||
|
|
||||||
# restart synapse
|
|
||||||
synctl restart
|
|
||||||
|
|
||||||
|
|
||||||
If synapse was installed using git then upgrade to the latest version by
|
|
||||||
running:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
# Pull the latest version of the master branch.
|
|
||||||
git pull
|
|
||||||
# Update the versions of synapse's python dependencies.
|
|
||||||
python synapse/python_dependencies.py | xargs pip install --upgrade
|
|
||||||
|
|
||||||
# restart synapse
|
|
||||||
./synctl restart
|
|
||||||
|
|
||||||
|
|
||||||
To check whether your update was sucessful, you can check the Server header
|
|
||||||
returned by the Client-Server API:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
# replace <host.name> with the hostname of your synapse homeserver.
|
|
||||||
# You may need to specify a port (eg, :8448) if your server is not
|
|
||||||
# configured on port 443.
|
|
||||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
|
||||||
|
|
||||||
Upgrading to v0.15.0
|
|
||||||
====================
|
|
||||||
|
|
||||||
If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
|
|
||||||
then you have to explicitly enable it in the config and update your dependencies
|
|
||||||
dependencies. See README.rst for details.
|
|
||||||
|
|
||||||
|
|
||||||
Upgrading to v0.11.0
|
|
||||||
====================
|
|
||||||
|
|
||||||
This release includes the option to send anonymous usage stats to matrix.org,
|
|
||||||
and requires that administrators explictly opt in or out by setting the
|
|
||||||
``report_stats`` option to either ``true`` or ``false``.
|
|
||||||
|
|
||||||
We would really appreciate it if you could help our project out by reporting
|
|
||||||
anonymized usage statistics from your homeserver. Only very basic aggregate
|
|
||||||
data (e.g. number of users) will be reported, but it helps us to track the
|
|
||||||
growth of the Matrix community, and helps us to make Matrix a success, as well
|
|
||||||
as to convince other networks that they should peer with us.
|
|
||||||
|
|
||||||
|
|
||||||
Upgrading to v0.9.0
|
Upgrading to v0.9.0
|
||||||
===================
|
===================
|
||||||
|
|
||||||
@@ -86,7 +14,7 @@ It has been replaced by specifying a list of application service registrations i
|
|||||||
``homeserver.yaml``::
|
``homeserver.yaml``::
|
||||||
|
|
||||||
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
|
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
|
||||||
|
|
||||||
Where ``registration-01.yaml`` looks like::
|
Where ``registration-01.yaml`` looks like::
|
||||||
|
|
||||||
url: <String> # e.g. "https://my.application.service.com"
|
url: <String> # e.g. "https://my.application.service.com"
|
||||||
@@ -175,7 +103,7 @@ This release completely changes the database schema and so requires upgrading
|
|||||||
it before starting the new version of the homeserver.
|
it before starting the new version of the homeserver.
|
||||||
|
|
||||||
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
|
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
|
||||||
database. This will save all user information, such as logins and profiles,
|
database. This will save all user information, such as logins and profiles,
|
||||||
but will otherwise purge the database. This includes messages, which
|
but will otherwise purge the database. This includes messages, which
|
||||||
rooms the home server was a member of and room alias mappings.
|
rooms the home server was a member of and room alias mappings.
|
||||||
|
|
||||||
@@ -184,18 +112,18 @@ file and ask for help in #matrix:matrix.org. The upgrade process is,
|
|||||||
unfortunately, non trivial and requires human intervention to resolve any
|
unfortunately, non trivial and requires human intervention to resolve any
|
||||||
resulting conflicts during the upgrade process.
|
resulting conflicts during the upgrade process.
|
||||||
|
|
||||||
Before running the command the homeserver should be first completely
|
Before running the command the homeserver should be first completely
|
||||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||||
|
|
||||||
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
|
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
|
||||||
|
|
||||||
Once this has successfully completed it will be safe to restart the
|
Once this has successfully completed it will be safe to restart the
|
||||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||||
restart than usual as it reinitializes the database.
|
restart than usual as it reinitializes the database.
|
||||||
|
|
||||||
On startup of the new version, users can either rejoin remote rooms using room
|
On startup of the new version, users can either rejoin remote rooms using room
|
||||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||||
message to a room that the homeserver was previously in the local HS will
|
message to a room that the homeserver was previously in the local HS will
|
||||||
automatically rejoin the room.
|
automatically rejoin the room.
|
||||||
|
|
||||||
Upgrading to v0.4.0
|
Upgrading to v0.4.0
|
||||||
@@ -254,7 +182,7 @@ automatically generate default config use::
|
|||||||
--config-path homeserver.config \
|
--config-path homeserver.config \
|
||||||
--generate-config
|
--generate-config
|
||||||
|
|
||||||
This config can be edited if desired, for example to specify a different SSL
|
This config can be edited if desired, for example to specify a different SSL
|
||||||
certificate to use. Once done you can run the home server using::
|
certificate to use. Once done you can run the home server using::
|
||||||
|
|
||||||
$ python synapse/app/homeserver.py --config-path homeserver.config
|
$ python synapse/app/homeserver.py --config-path homeserver.config
|
||||||
@@ -275,20 +203,20 @@ This release completely changes the database schema and so requires upgrading
|
|||||||
it before starting the new version of the homeserver.
|
it before starting the new version of the homeserver.
|
||||||
|
|
||||||
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
|
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
|
||||||
database. This will save all user information, such as logins and profiles,
|
database. This will save all user information, such as logins and profiles,
|
||||||
but will otherwise purge the database. This includes messages, which
|
but will otherwise purge the database. This includes messages, which
|
||||||
rooms the home server was a member of and room alias mappings.
|
rooms the home server was a member of and room alias mappings.
|
||||||
|
|
||||||
Before running the command the homeserver should be first completely
|
Before running the command the homeserver should be first completely
|
||||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||||
|
|
||||||
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
|
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
|
||||||
|
|
||||||
Once this has successfully completed it will be safe to restart the
|
Once this has successfully completed it will be safe to restart the
|
||||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||||
restart than usual as it reinitializes the database.
|
restart than usual as it reinitializes the database.
|
||||||
|
|
||||||
On startup of the new version, users can either rejoin remote rooms using room
|
On startup of the new version, users can either rejoin remote rooms using room
|
||||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||||
message to a room that the homeserver was previously in the local HS will
|
message to a room that the homeserver was previously in the local HS will
|
||||||
automatically rejoin the room.
|
automatically rejoin the room.
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -32,7 +32,7 @@ import urlparse
|
|||||||
import nacl.signing
|
import nacl.signing
|
||||||
import nacl.encoding
|
import nacl.encoding
|
||||||
|
|
||||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
|
||||||
|
|
||||||
CONFIG_JSON = "cmdclient_config.json"
|
CONFIG_JSON = "cmdclient_config.json"
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -36,13 +36,15 @@ class HttpClient(object):
|
|||||||
the request body. This will be encoded as JSON.
|
the request body. This will be encoded as JSON.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
Deferred: Succeeds when we get *any* HTTP response.
|
||||||
will be the decoded JSON body.
|
|
||||||
|
The result of the deferred is a tuple of `(code, response)`,
|
||||||
|
where `response` is a dict representing the decoded JSON body.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_json(self, url, args=None):
|
def get_json(self, url, args=None):
|
||||||
""" Gets some json from the given host homeserver and path
|
""" Get's some json from the given host homeserver and path
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
url (str): The URL to GET data from.
|
url (str): The URL to GET data from.
|
||||||
@@ -52,8 +54,10 @@ class HttpClient(object):
|
|||||||
and *not* a string.
|
and *not* a string.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
Deferred: Succeeds when we get *any* HTTP response.
|
||||||
will be the decoded JSON body.
|
|
||||||
|
The result of the deferred is a tuple of `(code, response)`,
|
||||||
|
where `response` is a dict representing the decoded JSON body.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -210,4 +214,4 @@ class _JsonProducer(object):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def stopProducing(self):
|
def stopProducing(self):
|
||||||
pass
|
pass
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
|
||||||
# `homeserver.yaml`, and restart synapse.
|
|
||||||
#
|
|
||||||
# This configuration will produce similar results to the defaults within
|
|
||||||
# synapse, but can be edited to give more flexibility.
|
|
||||||
|
|
||||||
version: 1
|
|
||||||
|
|
||||||
formatters:
|
|
||||||
fmt:
|
|
||||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
|
||||||
|
|
||||||
filters:
|
|
||||||
context:
|
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
|
||||||
request: ""
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
# example output to console
|
|
||||||
console:
|
|
||||||
class: logging.StreamHandler
|
|
||||||
filters: [context]
|
|
||||||
|
|
||||||
# example output to file - to enable, edit 'root' config below.
|
|
||||||
file:
|
|
||||||
class: logging.handlers.RotatingFileHandler
|
|
||||||
formatter: fmt
|
|
||||||
filename: /var/log/synapse/homeserver.log
|
|
||||||
maxBytes: 100000000
|
|
||||||
backupCount: 3
|
|
||||||
filters: [context]
|
|
||||||
|
|
||||||
|
|
||||||
root:
|
|
||||||
level: INFO
|
|
||||||
handlers: [console] # to use file handler instead, switch to [file]
|
|
||||||
|
|
||||||
loggers:
|
|
||||||
synapse:
|
|
||||||
level: INFO
|
|
||||||
|
|
||||||
synapse.storage.SQL:
|
|
||||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
|
||||||
# information such as access tokens.
|
|
||||||
level: INFO
|
|
||||||
|
|
||||||
# example of enabling debugging for a component:
|
|
||||||
#
|
|
||||||
# synapse.federation.transport.server:
|
|
||||||
# level: DEBUG
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,151 +0,0 @@
|
|||||||
# Copyright 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import pydot
|
|
||||||
import cgi
|
|
||||||
import simplejson as json
|
|
||||||
import datetime
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
from synapse.events import FrozenEvent
|
|
||||||
from synapse.util.frozenutils import unfreeze
|
|
||||||
|
|
||||||
|
|
||||||
def make_graph(file_name, room_id, file_prefix, limit):
|
|
||||||
print "Reading lines"
|
|
||||||
with open(file_name) as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
|
|
||||||
print "Read lines"
|
|
||||||
|
|
||||||
events = [FrozenEvent(json.loads(line)) for line in lines]
|
|
||||||
|
|
||||||
print "Loaded events."
|
|
||||||
|
|
||||||
events.sort(key=lambda e: e.depth)
|
|
||||||
|
|
||||||
print "Sorted events"
|
|
||||||
|
|
||||||
if limit:
|
|
||||||
events = events[-int(limit):]
|
|
||||||
|
|
||||||
node_map = {}
|
|
||||||
|
|
||||||
graph = pydot.Dot(graph_name="Test")
|
|
||||||
|
|
||||||
for event in events:
|
|
||||||
t = datetime.datetime.fromtimestamp(
|
|
||||||
float(event.origin_server_ts) / 1000
|
|
||||||
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
|
||||||
|
|
||||||
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
|
|
||||||
content = content.replace("\n", "<br/>\n")
|
|
||||||
|
|
||||||
print content
|
|
||||||
content = []
|
|
||||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
|
||||||
if value is None:
|
|
||||||
value = "<null>"
|
|
||||||
elif isinstance(value, basestring):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
value = json.dumps(value)
|
|
||||||
|
|
||||||
content.append(
|
|
||||||
"<b>%s</b>: %s," % (
|
|
||||||
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
|
||||||
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
content = "<br/>\n".join(content)
|
|
||||||
|
|
||||||
print content
|
|
||||||
|
|
||||||
label = (
|
|
||||||
"<"
|
|
||||||
"<b>%(name)s </b><br/>"
|
|
||||||
"Type: <b>%(type)s </b><br/>"
|
|
||||||
"State key: <b>%(state_key)s </b><br/>"
|
|
||||||
"Content: <b>%(content)s </b><br/>"
|
|
||||||
"Time: <b>%(time)s </b><br/>"
|
|
||||||
"Depth: <b>%(depth)s </b><br/>"
|
|
||||||
">"
|
|
||||||
) % {
|
|
||||||
"name": event.event_id,
|
|
||||||
"type": event.type,
|
|
||||||
"state_key": event.get("state_key", None),
|
|
||||||
"content": content,
|
|
||||||
"time": t,
|
|
||||||
"depth": event.depth,
|
|
||||||
}
|
|
||||||
|
|
||||||
node = pydot.Node(
|
|
||||||
name=event.event_id,
|
|
||||||
label=label,
|
|
||||||
)
|
|
||||||
|
|
||||||
node_map[event.event_id] = node
|
|
||||||
graph.add_node(node)
|
|
||||||
|
|
||||||
print "Created Nodes"
|
|
||||||
|
|
||||||
for event in events:
|
|
||||||
for prev_id, _ in event.prev_events:
|
|
||||||
try:
|
|
||||||
end_node = node_map[prev_id]
|
|
||||||
except:
|
|
||||||
end_node = pydot.Node(
|
|
||||||
name=prev_id,
|
|
||||||
label="<<b>%s</b>>" % (prev_id,),
|
|
||||||
)
|
|
||||||
|
|
||||||
node_map[prev_id] = end_node
|
|
||||||
graph.add_node(end_node)
|
|
||||||
|
|
||||||
edge = pydot.Edge(node_map[event.event_id], end_node)
|
|
||||||
graph.add_edge(edge)
|
|
||||||
|
|
||||||
print "Created edges"
|
|
||||||
|
|
||||||
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
|
|
||||||
|
|
||||||
print "Created Dot"
|
|
||||||
|
|
||||||
graph.write_svg("%s.svg" % file_prefix, prog='dot')
|
|
||||||
|
|
||||||
print "Created svg"
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Generate a PDU graph for a given room by reading "
|
|
||||||
"from a file with line deliminated events. \n"
|
|
||||||
"Requires pydot."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-p", "--prefix", dest="prefix",
|
|
||||||
help="String to prefix output files with",
|
|
||||||
default="graph_output"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-l", "--limit",
|
|
||||||
help="Only retrieve the last N events.",
|
|
||||||
)
|
|
||||||
parser.add_argument('event_file')
|
|
||||||
parser.add_argument('room')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
make_graph(args.event_file, args.room, args.prefix, args.limit)
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
This directory contains some sample monitoring config for using the
|
|
||||||
'Prometheus' monitoring server against synapse.
|
|
||||||
|
|
||||||
To use it, first install prometheus by following the instructions at
|
|
||||||
|
|
||||||
http://prometheus.io/
|
|
||||||
|
|
||||||
Then add a new job to the main prometheus.conf file:
|
|
||||||
|
|
||||||
job: {
|
|
||||||
name: "synapse"
|
|
||||||
|
|
||||||
target_group: {
|
|
||||||
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Metrics are disabled by default when running synapse; they must be enabled
|
|
||||||
with the 'enable-metrics' option, either in the synapse config file or as a
|
|
||||||
command-line option.
|
|
||||||
@@ -1,395 +0,0 @@
|
|||||||
{{ template "head" . }}
|
|
||||||
|
|
||||||
{{ template "prom_content_head" . }}
|
|
||||||
<h1>System Resources</h1>
|
|
||||||
|
|
||||||
<h3>CPU</h3>
|
|
||||||
<div id="process_resource_utime"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#process_resource_utime"),
|
|
||||||
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
|
||||||
name: "[[job]]",
|
|
||||||
min: 0,
|
|
||||||
max: 100,
|
|
||||||
renderer: "line",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "%",
|
|
||||||
yTitle: "CPU Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Memory</h3>
|
|
||||||
<div id="process_resource_maxrss"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#process_resource_maxrss"),
|
|
||||||
expr: "process_psutil_rss:max",
|
|
||||||
name: "Maxrss",
|
|
||||||
min: 0,
|
|
||||||
renderer: "line",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "bytes",
|
|
||||||
yTitle: "Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>File descriptors</h3>
|
|
||||||
<div id="process_fds"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#process_fds"),
|
|
||||||
expr: "process_open_fds{job='synapse'}",
|
|
||||||
name: "FDs",
|
|
||||||
min: 0,
|
|
||||||
renderer: "line",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "",
|
|
||||||
yTitle: "Descriptors"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h1>Reactor</h1>
|
|
||||||
|
|
||||||
<h3>Total reactor time</h3>
|
|
||||||
<div id="reactor_total_time"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#reactor_total_time"),
|
|
||||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
|
||||||
name: "time",
|
|
||||||
max: 1,
|
|
||||||
min: 0,
|
|
||||||
renderer: "area",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/s",
|
|
||||||
yTitle: "Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Average reactor tick time</h3>
|
|
||||||
<div id="reactor_average_time"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#reactor_average_time"),
|
|
||||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
|
||||||
name: "time",
|
|
||||||
min: 0,
|
|
||||||
renderer: "line",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s",
|
|
||||||
yTitle: "Time"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Pending calls per tick</h3>
|
|
||||||
<div id="reactor_pending_calls"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#reactor_pending_calls"),
|
|
||||||
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
|
||||||
name: "calls",
|
|
||||||
min: 0,
|
|
||||||
renderer: "line",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yTitle: "Pending Cals"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h1>Storage</h1>
|
|
||||||
|
|
||||||
<h3>Queries</h3>
|
|
||||||
<div id="synapse_storage_query_time"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_storage_query_time"),
|
|
||||||
expr: "rate(synapse_storage_query_time:count[2m])",
|
|
||||||
name: "[[verb]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "queries/s",
|
|
||||||
yTitle: "Queries"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Transactions</h3>
|
|
||||||
<div id="synapse_storage_transaction_time"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_storage_transaction_time"),
|
|
||||||
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
|
||||||
name: "[[desc]]",
|
|
||||||
min: 0,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "txn/s",
|
|
||||||
yTitle: "Transactions"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Transaction execution time</h3>
|
|
||||||
<div id="synapse_storage_transactions_time_msec"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
|
||||||
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
|
||||||
name: "[[desc]]",
|
|
||||||
min: 0,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/s",
|
|
||||||
yTitle: "Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Database scheduling latency</h3>
|
|
||||||
<div id="synapse_storage_schedule_time"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_storage_schedule_time"),
|
|
||||||
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
|
||||||
name: "Total latency",
|
|
||||||
min: 0,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/s",
|
|
||||||
yTitle: "Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Cache hit ratio</h3>
|
|
||||||
<div id="synapse_cache_ratio"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_cache_ratio"),
|
|
||||||
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
|
||||||
name: "[[name]]",
|
|
||||||
min: 0,
|
|
||||||
max: 100,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "%",
|
|
||||||
yTitle: "Percentage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Cache size</h3>
|
|
||||||
<div id="synapse_cache_size"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_cache_size"),
|
|
||||||
expr: "synapse_util_caches_cache:size",
|
|
||||||
name: "[[name]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "",
|
|
||||||
yTitle: "Items"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h1>Requests</h1>
|
|
||||||
|
|
||||||
<h3>Requests by Servlet</h3>
|
|
||||||
<div id="synapse_http_server_requests_servlet"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_requests_servlet"),
|
|
||||||
expr: "rate(synapse_http_server_requests:servlet[2m])",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
|
||||||
<div id="synapse_http_server_requests_servlet_minus_events"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_requests_servlet_minus_events"),
|
|
||||||
expr: "rate(synapse_http_server_requests:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Average response times</h3>
|
|
||||||
<div id="synapse_http_server_response_time_avg"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
|
||||||
expr: "rate(synapse_http_server_response_time:total[2m]) / rate(synapse_http_server_response_time:count[2m]) / 1000",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/req",
|
|
||||||
yTitle: "Response time"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>All responses by code</h3>
|
|
||||||
<div id="synapse_http_server_responses"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_responses"),
|
|
||||||
expr: "rate(synapse_http_server_responses[2m])",
|
|
||||||
name: "[[method]] / [[code]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Error responses by code</h3>
|
|
||||||
<div id="synapse_http_server_responses_err"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_responses_err"),
|
|
||||||
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
|
|
||||||
name: "[[method]] / [[code]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
|
|
||||||
<h3>CPU Usage</h3>
|
|
||||||
<div id="synapse_http_server_response_ru_utime"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
|
||||||
expr: "rate(synapse_http_server_response_ru_utime:total[2m])",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/s",
|
|
||||||
yTitle: "CPU Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
|
|
||||||
<h3>DB Usage</h3>
|
|
||||||
<div id="synapse_http_server_response_db_txn_duration"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
|
||||||
expr: "rate(synapse_http_server_response_db_txn_duration:total[2m])",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/s",
|
|
||||||
yTitle: "DB Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
|
|
||||||
<h3>Average event send times</h3>
|
|
||||||
<div id="synapse_http_server_send_time_avg"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
|
||||||
expr: "rate(synapse_http_server_response_time:total{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_time:count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/req",
|
|
||||||
yTitle: "Response time"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h1>Federation</h1>
|
|
||||||
|
|
||||||
<h3>Sent Messages</h3>
|
|
||||||
<div id="synapse_federation_client_sent"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_federation_client_sent"),
|
|
||||||
expr: "rate(synapse_federation_client_sent[2m])",
|
|
||||||
name: "[[type]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Received Messages</h3>
|
|
||||||
<div id="synapse_federation_server_received"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_federation_server_received"),
|
|
||||||
expr: "rate(synapse_federation_server_received[2m])",
|
|
||||||
name: "[[type]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Pending</h3>
|
|
||||||
<div id="synapse_federation_transaction_queue_pending"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
|
|
||||||
expr: "synapse_federation_transaction_queue_pending",
|
|
||||||
name: "[[type]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "",
|
|
||||||
yTitle: "Units"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h1>Clients</h1>
|
|
||||||
|
|
||||||
<h3>Notifiers</h3>
|
|
||||||
<div id="synapse_notifier_listeners"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_notifier_listeners"),
|
|
||||||
expr: "synapse_notifier_listeners",
|
|
||||||
name: "listeners",
|
|
||||||
min: 0,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "",
|
|
||||||
yTitle: "Listeners"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Notified Events</h3>
|
|
||||||
<div id="synapse_notifier_notified_events"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_notifier_notified_events"),
|
|
||||||
expr: "rate(synapse_notifier_notified_events[2m])",
|
|
||||||
name: "events",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "events/s",
|
|
||||||
yTitle: "Event rate"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
{{ template "prom_content_tail" . }}
|
|
||||||
|
|
||||||
{{ template "tail" }}
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
|
||||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
|
||||||
|
|
||||||
synapse_http_server_requests:method{servlet=""} = sum(synapse_http_server_requests) by (method)
|
|
||||||
synapse_http_server_requests:servlet{method=""} = sum(synapse_http_server_requests) by (servlet)
|
|
||||||
|
|
||||||
synapse_http_server_requests:total{servlet=""} = sum(synapse_http_server_requests:by_method) by (servlet)
|
|
||||||
|
|
||||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
|
||||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
|
||||||
|
|
||||||
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
|
||||||
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
|
||||||
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
|
||||||
|
|
||||||
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
|
||||||
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
|
||||||
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
|
||||||
|
|
||||||
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
|
||||||
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# This assumes that Synapse has been installed as a system package
|
# This assumes that Synapse has been installed as a system package
|
||||||
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
|
||||||
# rather than in a user home directory or similar under virtualenv.
|
# rather than in a user home directory or similar under virtualenv.
|
||||||
|
|
||||||
[Unit]
|
[Unit]
|
||||||
@@ -10,9 +10,7 @@ Type=simple
|
|||||||
User=synapse
|
User=synapse
|
||||||
Group=synapse
|
Group=synapse
|
||||||
WorkingDirectory=/var/lib/synapse
|
WorkingDirectory=/var/lib/synapse
|
||||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
|
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
||||||
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
|||||||
@@ -126,26 +126,12 @@ sub on_unknown_event
|
|||||||
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||||
$bridgestate->{$room_id}->{gathered_candidates} = 1;
|
$bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||||
my $offer = $bridgestate->{$room_id}->{offer};
|
my $offer = $bridgestate->{$room_id}->{offer};
|
||||||
my $candidate_block = {
|
my $candidate_block = "";
|
||||||
audio => '',
|
|
||||||
video => '',
|
|
||||||
};
|
|
||||||
foreach (@{$event->{content}->{candidates}}) {
|
foreach (@{$event->{content}->{candidates}}) {
|
||||||
if ($_->{sdpMid}) {
|
$candidate_block .= "a=" . $_->{candidate} . "\r\n";
|
||||||
$candidate_block->{$_->{sdpMid}} .= "a=" . $_->{candidate} . "\r\n";
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
$candidate_block->{audio} .= "a=" . $_->{candidate} . "\r\n";
|
|
||||||
$candidate_block->{video} .= "a=" . $_->{candidate} . "\r\n";
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
# XXX: collate using the right m= line - for now assume audio call
|
||||||
# XXX: assumes audio comes first
|
$offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
|
||||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{audio}/;
|
|
||||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{video}/;
|
|
||||||
|
|
||||||
$offer =~ s/(m=video)/$candidate_block->{audio}$1/;
|
|
||||||
$offer =~ s/(.$)/$1\n$candidate_block->{video}$1/;
|
|
||||||
|
|
||||||
my $f = send_verto_json_request("verto.invite", {
|
my $f = send_verto_json_request("verto.invite", {
|
||||||
"sdp" => $offer,
|
"sdp" => $offer,
|
||||||
@@ -186,18 +172,22 @@ sub on_room_message
|
|||||||
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
|
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
my $verto_connecting = $loop->new_future;
|
||||||
|
$bot_verto->connect(
|
||||||
|
%{ $CONFIG{"verto-bot"} },
|
||||||
|
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||||
|
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||||
|
)->then( sub {
|
||||||
|
warn("[Verto] connected to websocket");
|
||||||
|
$verto_connecting->done($bot_verto) if not $verto_connecting->is_done;
|
||||||
|
});
|
||||||
|
|
||||||
Future->needs_all(
|
Future->needs_all(
|
||||||
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
||||||
$bot_matrix->start;
|
$bot_matrix->start;
|
||||||
}),
|
}),
|
||||||
|
|
||||||
$bot_verto->connect(
|
$verto_connecting,
|
||||||
%{ $CONFIG{"verto-bot"} },
|
|
||||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
|
||||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
|
||||||
)->on_done( sub {
|
|
||||||
warn("[Verto] connected to websocket");
|
|
||||||
}),
|
|
||||||
)->get;
|
)->get;
|
||||||
|
|
||||||
$loop->attach_signal(
|
$loop->attach_signal(
|
||||||
|
|||||||
@@ -11,4 +11,7 @@ requires 'YAML', 0;
|
|||||||
requires 'JSON', 0;
|
requires 'JSON', 0;
|
||||||
requires 'Getopt::Long', 0;
|
requires 'Getopt::Long', 0;
|
||||||
|
|
||||||
|
on 'test' => sub {
|
||||||
|
requires 'Test::More', '>= 0.98';
|
||||||
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -11,9 +11,7 @@ if [ -f $PID_FILE ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for port in 8080 8081 8082; do
|
find "$DIR" -name "*.log" -delete
|
||||||
rm -rf $DIR/$port
|
find "$DIR" -name "*.db" -delete
|
||||||
rm -rf $DIR/media_store.$port
|
|
||||||
done
|
|
||||||
|
|
||||||
rm -rf $DIR/etc
|
rm -rf $DIR/etc
|
||||||
|
|||||||
@@ -8,6 +8,14 @@ cd "$DIR/.."
|
|||||||
|
|
||||||
mkdir -p demo/etc
|
mkdir -p demo/etc
|
||||||
|
|
||||||
|
# Check the --no-rate-limit param
|
||||||
|
PARAMS=""
|
||||||
|
if [ $# -eq 1 ]; then
|
||||||
|
if [ $1 = "--no-rate-limit" ]; then
|
||||||
|
PARAMS="--rc-messages-per-second 1000 --rc-message-burst-count 1000"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
export PYTHONPATH=$(readlink -f $(pwd))
|
export PYTHONPATH=$(readlink -f $(pwd))
|
||||||
|
|
||||||
|
|
||||||
@@ -23,27 +31,9 @@ for port in 8080 8081 8082; do
|
|||||||
#rm $DIR/etc/$port.config
|
#rm $DIR/etc/$port.config
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--generate-config \
|
--generate-config \
|
||||||
|
--enable_registration \
|
||||||
-H "localhost:$https_port" \
|
-H "localhost:$https_port" \
|
||||||
--config-path "$DIR/etc/$port.config" \
|
--config-path "$DIR/etc/$port.config" \
|
||||||
--report-stats no
|
|
||||||
|
|
||||||
# Check script parameters
|
|
||||||
if [ $# -eq 1 ]; then
|
|
||||||
if [ $1 = "--no-rate-limit" ]; then
|
|
||||||
# Set high limits in config file to disable rate limiting
|
|
||||||
perl -p -i -e 's/rc_messages_per_second.*/rc_messages_per_second: 1000/g' $DIR/etc/$port.config
|
|
||||||
perl -p -i -e 's/rc_message_burst_count.*/rc_message_burst_count: 1000/g' $DIR/etc/$port.config
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
perl -p -i -e 's/^enable_registration:.*/enable_registration: true/g' $DIR/etc/$port.config
|
|
||||||
|
|
||||||
if ! grep -F "full_twisted_stacktraces" -q $DIR/etc/$port.config; then
|
|
||||||
echo "full_twisted_stacktraces: true" >> $DIR/etc/$port.config
|
|
||||||
fi
|
|
||||||
if ! grep -F "report_stats" -q $DIR/etc/$port.config ; then
|
|
||||||
echo "report_stats: false" >> $DIR/etc/$port.config
|
|
||||||
fi
|
|
||||||
|
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--config-path "$DIR/etc/$port.config" \
|
--config-path "$DIR/etc/$port.config" \
|
||||||
|
|||||||
@@ -10,13 +10,13 @@ https://developers.google.com/recaptcha/
|
|||||||
|
|
||||||
Setting ReCaptcha Keys
|
Setting ReCaptcha Keys
|
||||||
----------------------
|
----------------------
|
||||||
The keys are a config option on the home server config. If they are not
|
The keys are a config option on the home server config. If they are not
|
||||||
visible, you can generate them via --generate-config. Set the following value::
|
visible, you can generate them via --generate-config. Set the following value:
|
||||||
|
|
||||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
recaptcha_public_key: YOUR_PUBLIC_KEY
|
||||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
recaptcha_private_key: YOUR_PRIVATE_KEY
|
||||||
|
|
||||||
In addition, you MUST enable captchas via::
|
In addition, you MUST enable captchas via:
|
||||||
|
|
||||||
enable_registration_captcha: true
|
enable_registration_captcha: true
|
||||||
|
|
||||||
@@ -25,5 +25,7 @@ Configuring IP used for auth
|
|||||||
The ReCaptcha API requires that the IP address of the user who solved the
|
The ReCaptcha API requires that the IP address of the user who solved the
|
||||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||||
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
||||||
IP address. This can be configured using the x_forwarded directive in the
|
IP address. This can be configured as an option on the home server like so:
|
||||||
listeners section of the homeserver.yaml configuration file.
|
|
||||||
|
captcha_ip_origin_is_x_forwarded: true
|
||||||
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
Admin APIs
|
|
||||||
==========
|
|
||||||
|
|
||||||
This directory includes documentation for the various synapse specific admin
|
|
||||||
APIs available.
|
|
||||||
|
|
||||||
Only users that are server admins can use these APIs. A user can be marked as a
|
|
||||||
server admin by updating the database directly, e.g.:
|
|
||||||
|
|
||||||
``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
|
|
||||||
|
|
||||||
Restarting may be required for the changes to register.
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
Purge History API
|
|
||||||
=================
|
|
||||||
|
|
||||||
The purge history API allows server admins to purge historic events from their
|
|
||||||
database, reclaiming disk space.
|
|
||||||
|
|
||||||
Depending on the amount of history being purged a call to the API may take
|
|
||||||
several minutes or longer. During this period users will not be able to
|
|
||||||
paginate further back in the room from the point being purged from.
|
|
||||||
|
|
||||||
The API is simply:
|
|
||||||
|
|
||||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
|
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
Purge Remote Media API
|
|
||||||
======================
|
|
||||||
|
|
||||||
The purge remote media API allows server admins to purge old cached remote
|
|
||||||
media.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
|
||||||
|
|
||||||
{}
|
|
||||||
|
|
||||||
Which will remove all cached media that was last accessed before
|
|
||||||
``<unix_timestamp_in_ms>``.
|
|
||||||
|
|
||||||
If the user re-requests purged remote media, synapse will re-request the media
|
|
||||||
from the originating server.
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
Query Account
|
|
||||||
=============
|
|
||||||
|
|
||||||
This API returns information about a specific user account.
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
GET /_matrix/client/r0/admin/whois/<user_id>
|
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
|
||||||
|
|
||||||
It returns a JSON body like the following:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"user_id": "<user_id>",
|
|
||||||
"devices": {
|
|
||||||
"": {
|
|
||||||
"sessions": [
|
|
||||||
{
|
|
||||||
"connections": [
|
|
||||||
{
|
|
||||||
"ip": "1.2.3.4",
|
|
||||||
"last_seen": 1417222374433,
|
|
||||||
"user_agent": "Mozilla/5.0 ..."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ip": "1.2.3.10",
|
|
||||||
"last_seen": 1417222374500,
|
|
||||||
"user_agent": "Dalvik/2.1.0 ..."
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
``last_seen`` is measured in milliseconds since the Unix epoch.
|
|
||||||
|
|
||||||
Deactivate Account
|
|
||||||
==================
|
|
||||||
|
|
||||||
This API deactivates an account. It removes active access tokens, resets the
|
|
||||||
password, and deletes third-party IDs (to prevent the user requesting a
|
|
||||||
password reset).
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
POST /_matrix/client/r0/admin/deactivate/<user_id>
|
|
||||||
|
|
||||||
including an ``access_token`` of a server admin, and an empty request body.
|
|
||||||
|
|
||||||
|
|
||||||
Reset password
|
|
||||||
==============
|
|
||||||
|
|
||||||
Changes the password of another user.
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
POST /_matrix/client/r0/admin/reset_password/<user_id>
|
|
||||||
|
|
||||||
with a body of:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"new_password": "<secret>"
|
|
||||||
}
|
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
|
||||||
@@ -32,4 +32,5 @@ The format of the AS configuration file is as follows:
|
|||||||
|
|
||||||
See the spec_ for further details on how application services work.
|
See the spec_ for further details on how application services work.
|
||||||
|
|
||||||
.. _spec: https://matrix.org/docs/spec/application_service/unstable.html
|
.. _spec: https://github.com/matrix-org/matrix-doc/blob/master/specification/25_application_service_api.rst#application-service-api
|
||||||
|
|
||||||
|
|||||||
@@ -43,10 +43,7 @@ Basically, PEP8
|
|||||||
together, or want to deliberately extend or preserve vertical/horizontal
|
together, or want to deliberately extend or preserve vertical/horizontal
|
||||||
space)
|
space)
|
||||||
|
|
||||||
Comments should follow the `google code style <http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
Comments should follow the google code style. This is so that we can generate
|
||||||
This is so that we can generate documentation with
|
documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/)
|
||||||
`sphinx <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
|
||||||
`examples <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
|
||||||
in the sphinx documentation.
|
|
||||||
|
|
||||||
Code should pass pep8 --max-line-length=100 without any warnings.
|
Code should pass pep8 --max-line-length=100 without any warnings.
|
||||||
|
|||||||
@@ -1,446 +0,0 @@
|
|||||||
Log contexts
|
|
||||||
============
|
|
||||||
|
|
||||||
.. contents::
|
|
||||||
|
|
||||||
To help track the processing of individual requests, synapse uses a
|
|
||||||
'log context' to track which request it is handling at any given moment. This
|
|
||||||
is done via a thread-local variable; a ``logging.Filter`` is then used to fish
|
|
||||||
the information back out of the thread-local variable and add it to each log
|
|
||||||
record.
|
|
||||||
|
|
||||||
Logcontexts are also used for CPU and database accounting, so that we can track
|
|
||||||
which requests were responsible for high CPU use or database activity.
|
|
||||||
|
|
||||||
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
|
||||||
current log context (as well as providing the ``LoggingContextFilter`` class).
|
|
||||||
|
|
||||||
Deferreds make the whole thing complicated, so this document describes how it
|
|
||||||
all works, and how to write code which follows the rules.
|
|
||||||
|
|
||||||
Logcontexts without Deferreds
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
In the absence of any Deferred voodoo, things are simple enough. As with any
|
|
||||||
code of this nature, the rule is that our function should leave things as it
|
|
||||||
found them:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
from synapse.util import logcontext # omitted from future snippets
|
|
||||||
|
|
||||||
def handle_request(request_id):
|
|
||||||
request_context = logcontext.LoggingContext()
|
|
||||||
|
|
||||||
calling_context = logcontext.LoggingContext.current_context()
|
|
||||||
logcontext.LoggingContext.set_current_context(request_context)
|
|
||||||
try:
|
|
||||||
request_context.request = request_id
|
|
||||||
do_request_handling()
|
|
||||||
logger.debug("finished")
|
|
||||||
finally:
|
|
||||||
logcontext.LoggingContext.set_current_context(calling_context)
|
|
||||||
|
|
||||||
def do_request_handling():
|
|
||||||
logger.debug("phew") # this will be logged against request_id
|
|
||||||
|
|
||||||
|
|
||||||
LoggingContext implements the context management methods, so the above can be
|
|
||||||
written much more succinctly as:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def handle_request(request_id):
|
|
||||||
with logcontext.LoggingContext() as request_context:
|
|
||||||
request_context.request = request_id
|
|
||||||
do_request_handling()
|
|
||||||
logger.debug("finished")
|
|
||||||
|
|
||||||
def do_request_handling():
|
|
||||||
logger.debug("phew")
|
|
||||||
|
|
||||||
|
|
||||||
Using logcontexts with Deferreds
|
|
||||||
--------------------------------
|
|
||||||
|
|
||||||
Deferreds — and in particular, ``defer.inlineCallbacks`` — break
|
|
||||||
the linear flow of code so that there is no longer a single entry point where
|
|
||||||
we should set the logcontext and a single exit point where we should remove it.
|
|
||||||
|
|
||||||
Consider the example above, where ``do_request_handling`` needs to do some
|
|
||||||
blocking operation, and returns a deferred:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def handle_request(request_id):
|
|
||||||
with logcontext.LoggingContext() as request_context:
|
|
||||||
request_context.request = request_id
|
|
||||||
yield do_request_handling()
|
|
||||||
logger.debug("finished")
|
|
||||||
|
|
||||||
|
|
||||||
In the above flow:
|
|
||||||
|
|
||||||
* The logcontext is set
|
|
||||||
* ``do_request_handling`` is called, and returns a deferred
|
|
||||||
* ``handle_request`` yields the deferred
|
|
||||||
* The ``inlineCallbacks`` wrapper of ``handle_request`` returns a deferred
|
|
||||||
|
|
||||||
So we have stopped processing the request (and will probably go on to start
|
|
||||||
processing the next), without clearing the logcontext.
|
|
||||||
|
|
||||||
To circumvent this problem, synapse code assumes that, wherever you have a
|
|
||||||
deferred, you will want to yield on it. To that end, whereever functions return
|
|
||||||
a deferred, we adopt the following conventions:
|
|
||||||
|
|
||||||
**Rules for functions returning deferreds:**
|
|
||||||
|
|
||||||
* If the deferred is already complete, the function returns with the same
|
|
||||||
logcontext it started with.
|
|
||||||
* If the deferred is incomplete, the function clears the logcontext before
|
|
||||||
returning; when the deferred completes, it restores the logcontext before
|
|
||||||
running any callbacks.
|
|
||||||
|
|
||||||
That sounds complicated, but actually it means a lot of code (including the
|
|
||||||
example above) "just works". There are two cases:
|
|
||||||
|
|
||||||
* If ``do_request_handling`` returns a completed deferred, then the logcontext
|
|
||||||
will still be in place. In this case, execution will continue immediately
|
|
||||||
after the ``yield``; the "finished" line will be logged against the right
|
|
||||||
context, and the ``with`` block restores the original context before we
|
|
||||||
return to the caller.
|
|
||||||
|
|
||||||
* If the returned deferred is incomplete, ``do_request_handling`` clears the
|
|
||||||
logcontext before returning. The logcontext is therefore clear when
|
|
||||||
``handle_request`` yields the deferred. At that point, the ``inlineCallbacks``
|
|
||||||
wrapper adds a callback to the deferred, and returns another (incomplete)
|
|
||||||
deferred to the caller, and it is safe to begin processing the next request.
|
|
||||||
|
|
||||||
Once ``do_request_handling``'s deferred completes, it will reinstate the
|
|
||||||
logcontext, before running the callback added by the ``inlineCallbacks``
|
|
||||||
wrapper. That callback runs the second half of ``handle_request``, so again
|
|
||||||
the "finished" line will be logged against the right
|
|
||||||
context, and the ``with`` block restores the original context.
|
|
||||||
|
|
||||||
As an aside, it's worth noting that ``handle_request`` follows our rules -
|
|
||||||
though that only matters if the caller has its own logcontext which it cares
|
|
||||||
about.
|
|
||||||
|
|
||||||
The following sections describe pitfalls and helpful patterns when implementing
|
|
||||||
these rules.
|
|
||||||
|
|
||||||
Always yield your deferreds
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
Whenever you get a deferred back from a function, you should ``yield`` on it
|
|
||||||
as soon as possible. (Returning it directly to your caller is ok too, if you're
|
|
||||||
not doing ``inlineCallbacks``.) Do not pass go; do not do any logging; do not
|
|
||||||
call any other functions.
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def fun():
|
|
||||||
logger.debug("starting")
|
|
||||||
yield do_some_stuff() # just like this
|
|
||||||
|
|
||||||
d = more_stuff()
|
|
||||||
result = yield d # also fine, of course
|
|
||||||
|
|
||||||
defer.returnValue(result)
|
|
||||||
|
|
||||||
def nonInlineCallbacksFun():
|
|
||||||
logger.debug("just a wrapper really")
|
|
||||||
return do_some_stuff() # this is ok too - the caller will yield on
|
|
||||||
# it anyway.
|
|
||||||
|
|
||||||
Provided this pattern is followed all the way back up to the callchain to where
|
|
||||||
the logcontext was set, this will make things work out ok: provided
|
|
||||||
``do_some_stuff`` and ``more_stuff`` follow the rules above, then so will
|
|
||||||
``fun`` (as wrapped by ``inlineCallbacks``) and ``nonInlineCallbacksFun``.
|
|
||||||
|
|
||||||
It's all too easy to forget to ``yield``: for instance if we forgot that
|
|
||||||
``do_some_stuff`` returned a deferred, we might plough on regardless. This
|
|
||||||
leads to a mess; it will probably work itself out eventually, but not before
|
|
||||||
a load of stuff has been logged against the wrong content. (Normally, other
|
|
||||||
things will break, more obviously, if you forget to ``yield``, so this tends
|
|
||||||
not to be a major problem in practice.)
|
|
||||||
|
|
||||||
Of course sometimes you need to do something a bit fancier with your Deferreds
|
|
||||||
- not all code follows the linear A-then-B-then-C pattern. Notes on
|
|
||||||
implementing more complex patterns are in later sections.
|
|
||||||
|
|
||||||
Where you create a new Deferred, make it follow the rules
|
|
||||||
---------------------------------------------------------
|
|
||||||
|
|
||||||
Most of the time, a Deferred comes from another synapse function. Sometimes,
|
|
||||||
though, we need to make up a new Deferred, or we get a Deferred back from
|
|
||||||
external code. We need to make it follow our rules.
|
|
||||||
|
|
||||||
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
|
||||||
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
|
||||||
which returns a deferred which will run its callbacks after a given number of
|
|
||||||
seconds. That might look like:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
# not a logcontext-rules-compliant function
|
|
||||||
def get_sleep_deferred(seconds):
|
|
||||||
d = defer.Deferred()
|
|
||||||
reactor.callLater(seconds, d.callback, None)
|
|
||||||
return d
|
|
||||||
|
|
||||||
That doesn't follow the rules, but we can fix it by wrapping it with
|
|
||||||
``PreserveLoggingContext`` and ``yield`` ing on it:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def sleep(seconds):
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
yield get_sleep_deferred(seconds)
|
|
||||||
|
|
||||||
This technique works equally for external functions which return deferreds,
|
|
||||||
or deferreds we have made ourselves.
|
|
||||||
|
|
||||||
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
|
||||||
boilerplate for you, so the above could be written:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def sleep(seconds):
|
|
||||||
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
|
||||||
|
|
||||||
|
|
||||||
Fire-and-forget
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Sometimes you want to fire off a chain of execution, but not wait for its
|
|
||||||
result. That might look a bit like this:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_request_handling():
|
|
||||||
yield foreground_operation()
|
|
||||||
|
|
||||||
# *don't* do this
|
|
||||||
background_operation()
|
|
||||||
|
|
||||||
logger.debug("Request handling complete")
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def background_operation():
|
|
||||||
yield first_background_step()
|
|
||||||
logger.debug("Completed first step")
|
|
||||||
yield second_background_step()
|
|
||||||
logger.debug("Completed second step")
|
|
||||||
|
|
||||||
The above code does a couple of steps in the background after
|
|
||||||
``do_request_handling`` has finished. The log lines are still logged against
|
|
||||||
the ``request_context`` logcontext, which may or may not be desirable. There
|
|
||||||
are two big problems with the above, however. The first problem is that, if
|
|
||||||
``background_operation`` returns an incomplete Deferred, it will expect its
|
|
||||||
caller to ``yield`` immediately, so will have cleared the logcontext. In this
|
|
||||||
example, that means that 'Request handling complete' will be logged without any
|
|
||||||
context.
|
|
||||||
|
|
||||||
The second problem, which is potentially even worse, is that when the Deferred
|
|
||||||
returned by ``background_operation`` completes, it will restore the original
|
|
||||||
logcontext. There is nothing waiting on that Deferred, so the logcontext will
|
|
||||||
leak into the reactor and possibly get attached to some arbitrary future
|
|
||||||
operation.
|
|
||||||
|
|
||||||
There are two potential solutions to this.
|
|
||||||
|
|
||||||
One option is to surround the call to ``background_operation`` with a
|
|
||||||
``PreserveLoggingContext`` call. That will reset the logcontext before
|
|
||||||
starting ``background_operation`` (so the context restored when the deferred
|
|
||||||
completes will be the empty logcontext), and will restore the current
|
|
||||||
logcontext before continuing the foreground process:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_request_handling():
|
|
||||||
yield foreground_operation()
|
|
||||||
|
|
||||||
# start background_operation off in the empty logcontext, to
|
|
||||||
# avoid leaking the current context into the reactor.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
background_operation()
|
|
||||||
|
|
||||||
# this will now be logged against the request context
|
|
||||||
logger.debug("Request handling complete")
|
|
||||||
|
|
||||||
Obviously that option means that the operations done in
|
|
||||||
``background_operation`` would be not be logged against a logcontext (though
|
|
||||||
that might be fixed by setting a different logcontext via a ``with
|
|
||||||
LoggingContext(...)`` in ``background_operation``).
|
|
||||||
|
|
||||||
The second option is to use ``logcontext.preserve_fn``, which wraps a function
|
|
||||||
so that it doesn't reset the logcontext even when it returns an incomplete
|
|
||||||
deferred, and adds a callback to the returned deferred to reset the
|
|
||||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
|
||||||
about logcontexts and Deferreds into one which behaves more like an external
|
|
||||||
function — the opposite operation to that described in the previous section.
|
|
||||||
It can be used like this:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_request_handling():
|
|
||||||
yield foreground_operation()
|
|
||||||
|
|
||||||
logcontext.preserve_fn(background_operation)()
|
|
||||||
|
|
||||||
# this will now be logged against the request context
|
|
||||||
logger.debug("Request handling complete")
|
|
||||||
|
|
||||||
XXX: I think ``preserve_context_over_fn`` is supposed to do the first option,
|
|
||||||
but the fact that it does ``preserve_context_over_deferred`` on its results
|
|
||||||
means that its use is fraught with difficulty.
|
|
||||||
|
|
||||||
Passing synapse deferreds into third-party functions
|
|
||||||
----------------------------------------------------
|
|
||||||
|
|
||||||
A typical example of this is where we want to collect together two or more
|
|
||||||
deferred via ``defer.gatherResults``:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
d1 = operation1()
|
|
||||||
d2 = operation2()
|
|
||||||
d3 = defer.gatherResults([d1, d2])
|
|
||||||
|
|
||||||
This is really a variation of the fire-and-forget problem above, in that we are
|
|
||||||
firing off ``d1`` and ``d2`` without yielding on them. The difference
|
|
||||||
is that we now have third-party code attached to their callbacks. Anyway either
|
|
||||||
technique given in the `Fire-and-forget`_ section will work.
|
|
||||||
|
|
||||||
Of course, the new Deferred returned by ``gatherResults`` needs to be wrapped
|
|
||||||
in order to make it follow the logcontext rules before we can yield it, as
|
|
||||||
described in `Where you create a new Deferred, make it follow the rules`_.
|
|
||||||
|
|
||||||
So, option one: reset the logcontext before starting the operations to be
|
|
||||||
gathered:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_request_handling():
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
d1 = operation1()
|
|
||||||
d2 = operation2()
|
|
||||||
result = yield defer.gatherResults([d1, d2])
|
|
||||||
|
|
||||||
In this case particularly, though, option two, of using
|
|
||||||
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
|
||||||
``operation1`` and ``operation2`` are both logged against the original
|
|
||||||
logcontext. This looks like:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_request_handling():
|
|
||||||
d1 = logcontext.preserve_fn(operation1)()
|
|
||||||
d2 = logcontext.preserve_fn(operation2)()
|
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
result = yield defer.gatherResults([d1, d2])
|
|
||||||
|
|
||||||
|
|
||||||
Was all this really necessary?
|
|
||||||
------------------------------
|
|
||||||
|
|
||||||
The conventions used work fine for a linear flow where everything happens in
|
|
||||||
series via ``defer.inlineCallbacks`` and ``yield``, but are certainly tricky to
|
|
||||||
follow for any more exotic flows. It's hard not to wonder if we could have done
|
|
||||||
something else.
|
|
||||||
|
|
||||||
We're not going to rewrite Synapse now, so the following is entirely of
|
|
||||||
academic interest, but I'd like to record some thoughts on an alternative
|
|
||||||
approach.
|
|
||||||
|
|
||||||
I briefly prototyped some code following an alternative set of rules. I think
|
|
||||||
it would work, but I certainly didn't get as far as thinking how it would
|
|
||||||
interact with concepts as complicated as the cache descriptors.
|
|
||||||
|
|
||||||
My alternative rules were:
|
|
||||||
|
|
||||||
* functions always preserve the logcontext of their caller, whether or not they
|
|
||||||
are returning a Deferred.
|
|
||||||
|
|
||||||
* Deferreds returned by synapse functions run their callbacks in the same
|
|
||||||
context as the function was orignally called in.
|
|
||||||
|
|
||||||
The main point of this scheme is that everywhere that sets the logcontext is
|
|
||||||
responsible for clearing it before returning control to the reactor.
|
|
||||||
|
|
||||||
So, for example, if you were the function which started a ``with
|
|
||||||
LoggingContext`` block, you wouldn't ``yield`` within it — instead you'd start
|
|
||||||
off the background process, and then leave the ``with`` block to wait for it:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def handle_request(request_id):
|
|
||||||
with logcontext.LoggingContext() as request_context:
|
|
||||||
request_context.request = request_id
|
|
||||||
d = do_request_handling()
|
|
||||||
|
|
||||||
def cb(r):
|
|
||||||
logger.debug("finished")
|
|
||||||
|
|
||||||
d.addCallback(cb)
|
|
||||||
return d
|
|
||||||
|
|
||||||
(in general, mixing ``with LoggingContext`` blocks and
|
|
||||||
``defer.inlineCallbacks`` in the same function leads to slighly
|
|
||||||
counter-intuitive code, under this scheme).
|
|
||||||
|
|
||||||
Because we leave the original ``with`` block as soon as the Deferred is
|
|
||||||
returned (as opposed to waiting for it to be resolved, as we do today), the
|
|
||||||
logcontext is cleared before control passes back to the reactor; so if there is
|
|
||||||
some code within ``do_request_handling`` which needs to wait for a Deferred to
|
|
||||||
complete, there is no need for it to worry about clearing the logcontext before
|
|
||||||
doing so:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def handle_request():
|
|
||||||
r = do_some_stuff()
|
|
||||||
r.addCallback(do_some_more_stuff)
|
|
||||||
return r
|
|
||||||
|
|
||||||
— and provided ``do_some_stuff`` follows the rules of returning a Deferred which
|
|
||||||
runs its callbacks in the original logcontext, all is happy.
|
|
||||||
|
|
||||||
The business of a Deferred which runs its callbacks in the original logcontext
|
|
||||||
isn't hard to achieve — we have it today, in the shape of
|
|
||||||
``logcontext._PreservingContextDeferred``:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def do_some_stuff():
|
|
||||||
deferred = do_some_io()
|
|
||||||
pcd = _PreservingContextDeferred(LoggingContext.current_context())
|
|
||||||
deferred.chainDeferred(pcd)
|
|
||||||
return pcd
|
|
||||||
|
|
||||||
It turns out that, thanks to the way that Deferreds chain together, we
|
|
||||||
automatically get the property of a context-preserving deferred with
|
|
||||||
``defer.inlineCallbacks``, provided the final Defered the function ``yields``
|
|
||||||
on has that property. So we can just write:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def handle_request():
|
|
||||||
yield do_some_stuff()
|
|
||||||
yield do_some_more_stuff()
|
|
||||||
|
|
||||||
To conclude: I think this scheme would have worked equally well, with less
|
|
||||||
danger of messing it up, and probably made some more esoteric code easier to
|
|
||||||
write. But again — changing the conventions of the entire Synapse codebase is
|
|
||||||
not a sensible option for the marginal improvement offered.
|
|
||||||
@@ -1,68 +1,50 @@
|
|||||||
How to monitor Synapse metrics using Prometheus
|
How to monitor Synapse metrics using Prometheus
|
||||||
===============================================
|
===============================================
|
||||||
|
|
||||||
1. Install prometheus:
|
1: Install prometheus:
|
||||||
|
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||||
|
|
||||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
2: Enable synapse metrics:
|
||||||
|
Simply setting a (local) port number will enable it. Pick a port.
|
||||||
|
prometheus itself defaults to 9090, so starting just above that for
|
||||||
|
locally monitored services seems reasonable. E.g. 9092:
|
||||||
|
|
||||||
2. Enable synapse metrics:
|
Add to homeserver.yaml
|
||||||
|
|
||||||
Simply setting a (local) port number will enable it. Pick a port.
|
metrics_port: 9092
|
||||||
prometheus itself defaults to 9090, so starting just above that for
|
|
||||||
locally monitored services seems reasonable. E.g. 9092:
|
|
||||||
|
|
||||||
Add to homeserver.yaml::
|
Restart synapse
|
||||||
|
|
||||||
metrics_port: 9092
|
3: Check out synapse-prometheus-config
|
||||||
|
https://github.com/matrix-org/synapse-prometheus-config
|
||||||
|
|
||||||
Also ensure that ``enable_metrics`` is set to ``True``.
|
4: Add ``synapse.html`` and ``synapse.rules``
|
||||||
|
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
|
||||||
Restart synapse.
|
and the ``.rules`` file needs to be invoked somewhere in the main config
|
||||||
|
file. A symlink to each from the git checkout into the prometheus directory
|
||||||
|
might be easiest to ensure ``git pull`` keeps it updated.
|
||||||
|
|
||||||
3. Add a prometheus target for synapse.
|
5: Add a prometheus target for synapse
|
||||||
|
This is easiest if prometheus runs on the same machine as synapse, as it can
|
||||||
|
then just use localhost::
|
||||||
|
|
||||||
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
|
global: {
|
||||||
|
rule_file: "synapse.rules"
|
||||||
|
}
|
||||||
|
|
||||||
- job_name: "synapse"
|
job: {
|
||||||
metrics_path: "/_synapse/metrics"
|
name: "synapse"
|
||||||
static_configs:
|
|
||||||
- targets: ["my.server.here:9092"]
|
|
||||||
|
|
||||||
If your prometheus is older than 1.5.2, you will need to replace
|
target_group: {
|
||||||
``static_configs`` in the above with ``target_groups``.
|
target: "http://localhost:9092/"
|
||||||
|
}
|
||||||
Restart prometheus.
|
}
|
||||||
|
|
||||||
Standard Metric Names
|
6: Start prometheus::
|
||||||
---------------------
|
|
||||||
|
|
||||||
As of synapse version 0.18.2, the format of the process-wide metrics has been
|
./prometheus -config.file=prometheus.conf
|
||||||
changed to fit prometheus standard naming conventions. Additionally the units
|
|
||||||
have been changed to seconds, from miliseconds.
|
|
||||||
|
|
||||||
================================== =============================
|
7: Wait a few seconds for it to start and perform the first scrape,
|
||||||
New name Old name
|
then visit the console:
|
||||||
---------------------------------- -----------------------------
|
|
||||||
process_cpu_user_seconds_total process_resource_utime / 1000
|
|
||||||
process_cpu_system_seconds_total process_resource_stime / 1000
|
|
||||||
process_open_fds (no 'type' label) process_fds
|
|
||||||
================================== =============================
|
|
||||||
|
|
||||||
The python-specific counts of garbage collector performance have been renamed.
|
http://server-where-prometheus-runs:9090/consoles/synapse.html
|
||||||
|
|
||||||
=========================== ======================
|
|
||||||
New name Old name
|
|
||||||
--------------------------- ----------------------
|
|
||||||
python_gc_time reactor_gc_time
|
|
||||||
python_gc_unreachable_total reactor_gc_unreachable
|
|
||||||
python_gc_counts reactor_gc_counts
|
|
||||||
=========================== ======================
|
|
||||||
|
|
||||||
The twisted-specific reactor metrics have been renamed.
|
|
||||||
|
|
||||||
==================================== =====================
|
|
||||||
New name Old name
|
|
||||||
------------------------------------ ---------------------
|
|
||||||
python_twisted_reactor_pending_calls reactor_pending_calls
|
|
||||||
python_twisted_reactor_tick_time reactor_tick_time
|
|
||||||
==================================== =====================
|
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
Using Postgres
|
Using Postgres
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
Postgres version 9.4 or later is known to work.
|
|
||||||
|
|
||||||
Set up database
|
Set up database
|
||||||
===============
|
===============
|
||||||
|
|
||||||
@@ -20,8 +18,8 @@ encoding use, e.g.::
|
|||||||
This would create an appropriate database named ``synapse`` owned by the
|
This would create an appropriate database named ``synapse`` owned by the
|
||||||
``synapse_user`` user (which must already exist).
|
``synapse_user`` user (which must already exist).
|
||||||
|
|
||||||
Set up client in Debian/Ubuntu
|
Set up client
|
||||||
===========================
|
=============
|
||||||
|
|
||||||
Postgres support depends on the postgres python connector ``psycopg2``. In the
|
Postgres support depends on the postgres python connector ``psycopg2``. In the
|
||||||
virtual env::
|
virtual env::
|
||||||
@@ -29,19 +27,6 @@ virtual env::
|
|||||||
sudo apt-get install libpq-dev
|
sudo apt-get install libpq-dev
|
||||||
pip install psycopg2
|
pip install psycopg2
|
||||||
|
|
||||||
Set up client in RHEL/CentOs 7
|
|
||||||
==============================
|
|
||||||
|
|
||||||
Make sure you have the appropriate version of postgres-devel installed. For a
|
|
||||||
postgres 9.4, use the postgres 9.4 packages from
|
|
||||||
[here](https://wiki.postgresql.org/wiki/YUM_Installation).
|
|
||||||
|
|
||||||
As with Debian/Ubuntu, postgres support depends on the postgres python connector
|
|
||||||
``psycopg2``. In the virtual env::
|
|
||||||
|
|
||||||
sudo yum install postgresql-devel libpqxx-devel.x86_64
|
|
||||||
export PATH=/usr/pgsql-9.4/bin/:$PATH
|
|
||||||
pip install psycopg2
|
|
||||||
|
|
||||||
Synapse config
|
Synapse config
|
||||||
==============
|
==============
|
||||||
@@ -70,8 +55,9 @@ Porting from SQLite
|
|||||||
Overview
|
Overview
|
||||||
~~~~~~~~
|
~~~~~~~~
|
||||||
|
|
||||||
The script ``synapse_port_db`` allows porting an existing synapse server
|
The script ``port_from_sqlite_to_postgres.py`` allows porting an existing
|
||||||
backed by SQLite to using PostgreSQL. This is done in as a two phase process:
|
synapse server backed by SQLite to using PostgreSQL. This is done in as a two
|
||||||
|
phase process:
|
||||||
|
|
||||||
1. Copy the existing SQLite database to a separate location (while the server
|
1. Copy the existing SQLite database to a separate location (while the server
|
||||||
is down) and running the port script against that offline database.
|
is down) and running the port script against that offline database.
|
||||||
@@ -100,7 +86,8 @@ Assuming your new config file (as described in the section *Synapse config*)
|
|||||||
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
|
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
|
||||||
``homeserver.db.snapshot`` then simply run::
|
``homeserver.db.snapshot`` then simply run::
|
||||||
|
|
||||||
synapse_port_db --sqlite-database homeserver.db.snapshot \
|
python scripts/port_from_sqlite_to_postgres.py \
|
||||||
|
--sqlite-database homeserver.db.snapshot \
|
||||||
--postgres-config homeserver-postgres.yaml
|
--postgres-config homeserver-postgres.yaml
|
||||||
|
|
||||||
The flag ``--curses`` displays a coloured curses progress UI.
|
The flag ``--curses`` displays a coloured curses progress UI.
|
||||||
@@ -113,10 +100,11 @@ To complete the conversion shut down the synapse server and run the port
|
|||||||
script one last time, e.g. if the SQLite database is at ``homeserver.db``
|
script one last time, e.g. if the SQLite database is at ``homeserver.db``
|
||||||
run::
|
run::
|
||||||
|
|
||||||
synapse_port_db --sqlite-database homeserver.db \
|
python scripts/port_from_sqlite_to_postgres.py \
|
||||||
--postgres-config homeserver-postgres.yaml
|
--sqlite-database homeserver.db \
|
||||||
|
--postgres-config database_config.yaml
|
||||||
|
|
||||||
Once that has completed, change the synapse config to point at the PostgreSQL
|
Once that has completed, change the synapse config to point at the PostgreSQL
|
||||||
database configuration file ``homeserver-postgres.yaml`` (i.e. rename it to
|
database configuration file using the ``database_config`` parameter (see
|
||||||
``homeserver.yaml``) and restart synapse. Synapse should now be running against
|
`Synapse Config`_) and restart synapse. Synapse should now be running against
|
||||||
PostgreSQL.
|
PostgreSQL.
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
Replication Architecture
|
|
||||||
========================
|
|
||||||
|
|
||||||
Motivation
|
|
||||||
----------
|
|
||||||
|
|
||||||
We'd like to be able to split some of the work that synapse does into multiple
|
|
||||||
python processes. In theory multiple synapse processes could share a single
|
|
||||||
postgresql database and we'd scale up by running more synapse processes.
|
|
||||||
However much of synapse assumes that only one process is interacting with the
|
|
||||||
database, both for assigning unique identifiers when inserting into tables,
|
|
||||||
notifying components about new updates, and for invalidating its caches.
|
|
||||||
|
|
||||||
So running multiple copies of the current code isn't an option. One way to
|
|
||||||
run multiple processes would be to have a single writer process and multiple
|
|
||||||
reader processes connected to the same database. In order to do this we'd need
|
|
||||||
a way for the reader process to invalidate its in-memory caches when an update
|
|
||||||
happens on the writer. One way to do this is for the writer to present an
|
|
||||||
append-only log of updates which the readers can consume to invalidate their
|
|
||||||
caches and to push updates to listening clients or pushers.
|
|
||||||
|
|
||||||
Synapse already stores much of its data as an append-only log so that it can
|
|
||||||
correctly respond to /sync requests so the amount of code changes needed to
|
|
||||||
expose the append-only log to the readers should be fairly minimal.
|
|
||||||
|
|
||||||
Architecture
|
|
||||||
------------
|
|
||||||
|
|
||||||
The Replication Protocol
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
See ``tcp_replication.rst``
|
|
||||||
|
|
||||||
|
|
||||||
The Slaved DataStore
|
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
There are read-only version of the synapse storage layer in
|
|
||||||
``synapse/replication/slave/storage`` that use the response of the replication
|
|
||||||
API to invalidate their caches.
|
|
||||||
@@ -1,223 +0,0 @@
|
|||||||
TCP Replication
|
|
||||||
===============
|
|
||||||
|
|
||||||
Motivation
|
|
||||||
----------
|
|
||||||
|
|
||||||
Previously the workers used an HTTP long poll mechanism to get updates from the
|
|
||||||
master, which had the problem of causing a lot of duplicate work on the server.
|
|
||||||
This TCP protocol replaces those APIs with the aim of increased efficiency.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Overview
|
|
||||||
--------
|
|
||||||
|
|
||||||
The protocol is based on fire and forget, line based commands. An example flow
|
|
||||||
would be (where '>' indicates master to worker and '<' worker to master flows)::
|
|
||||||
|
|
||||||
> SERVER example.com
|
|
||||||
< REPLICATE events 53
|
|
||||||
> RDATA events 54 ["$foo1:bar.com", ...]
|
|
||||||
> RDATA events 55 ["$foo4:bar.com", ...]
|
|
||||||
|
|
||||||
The example shows the server accepting a new connection and sending its identity
|
|
||||||
with the ``SERVER`` command, followed by the client asking to subscribe to the
|
|
||||||
``events`` stream from the token ``53``. The server then periodically sends ``RDATA``
|
|
||||||
commands which have the format ``RDATA <stream_name> <token> <row>``, where the
|
|
||||||
format of ``<row>`` is defined by the individual streams.
|
|
||||||
|
|
||||||
Error reporting happens by either the client or server sending an `ERROR`
|
|
||||||
command, and usually the connection will be closed.
|
|
||||||
|
|
||||||
|
|
||||||
Since the protocol is a simple line based, its possible to manually connect to
|
|
||||||
the server using a tool like netcat. A few things should be noted when manually
|
|
||||||
using the protocol:
|
|
||||||
|
|
||||||
* When subscribing to a stream using ``REPLICATE``, the special token ``NOW`` can
|
|
||||||
be used to get all future updates. The special stream name ``ALL`` can be used
|
|
||||||
with ``NOW`` to subscribe to all available streams.
|
|
||||||
* The federation stream is only available if federation sending has been
|
|
||||||
disabled on the main process.
|
|
||||||
* The server will only time connections out that have sent a ``PING`` command.
|
|
||||||
If a ping is sent then the connection will be closed if no further commands
|
|
||||||
are receieved within 15s. Both the client and server protocol implementations
|
|
||||||
will send an initial PING on connection and ensure at least one command every
|
|
||||||
5s is sent (not necessarily ``PING``).
|
|
||||||
* ``RDATA`` commands *usually* include a numeric token, however if the stream
|
|
||||||
has multiple rows to replicate per token the server will send multiple
|
|
||||||
``RDATA`` commands, with all but the last having a token of ``batch``. See
|
|
||||||
the documentation on ``commands.RdataCommand`` for further details.
|
|
||||||
|
|
||||||
|
|
||||||
Architecture
|
|
||||||
------------
|
|
||||||
|
|
||||||
The basic structure of the protocol is line based, where the initial word of
|
|
||||||
each line specifies the command. The rest of the line is parsed based on the
|
|
||||||
command. For example, the `RDATA` command is defined as::
|
|
||||||
|
|
||||||
RDATA <stream_name> <token> <row_json>
|
|
||||||
|
|
||||||
(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
|
|
||||||
|
|
||||||
Blank lines are ignored.
|
|
||||||
|
|
||||||
|
|
||||||
Keep alives
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
Both sides are expected to send at least one command every 5s or so, and
|
|
||||||
should send a ``PING`` command if necessary. If either side do not receive a
|
|
||||||
command within e.g. 15s then the connection should be closed.
|
|
||||||
|
|
||||||
Because the server may be connected to manually using e.g. netcat, the timeouts
|
|
||||||
aren't enabled until an initial ``PING`` command is seen. Both the client and
|
|
||||||
server implementations below send a ``PING`` command immediately on connection to
|
|
||||||
ensure the timeouts are enabled.
|
|
||||||
|
|
||||||
This ensures that both sides can quickly realize if the tcp connection has gone
|
|
||||||
and handle the situation appropriately.
|
|
||||||
|
|
||||||
|
|
||||||
Start up
|
|
||||||
~~~~~~~~
|
|
||||||
|
|
||||||
When a new connection is made, the server:
|
|
||||||
|
|
||||||
* Sends a ``SERVER`` command, which includes the identity of the server, allowing
|
|
||||||
the client to detect if its connected to the expected server
|
|
||||||
* Sends a ``PING`` command as above, to enable the client to time out connections
|
|
||||||
promptly.
|
|
||||||
|
|
||||||
The client:
|
|
||||||
|
|
||||||
* Sends a ``NAME`` command, allowing the server to associate a human friendly
|
|
||||||
name with the connection. This is optional.
|
|
||||||
* Sends a ``PING`` as above
|
|
||||||
* For each stream the client wishes to subscribe to it sends a ``REPLICATE``
|
|
||||||
with the stream_name and token it wants to subscribe from.
|
|
||||||
* On receipt of a ``SERVER`` command, checks that the server name matches the
|
|
||||||
expected server name.
|
|
||||||
|
|
||||||
|
|
||||||
Error handling
|
|
||||||
~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
If either side detects an error it can send an ``ERROR`` command and close the
|
|
||||||
connection.
|
|
||||||
|
|
||||||
If the client side loses the connection to the server it should reconnect,
|
|
||||||
following the steps above.
|
|
||||||
|
|
||||||
|
|
||||||
Congestion
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
If the server sends messages faster than the client can consume them the server
|
|
||||||
will first buffer a (fairly large) number of commands and then disconnect the
|
|
||||||
client. This ensures that we don't queue up an unbounded number of commands in
|
|
||||||
memory and gives us a potential oppurtunity to squawk loudly. When/if the client
|
|
||||||
recovers it can reconnect to the server and ask for missed messages.
|
|
||||||
|
|
||||||
|
|
||||||
Reliability
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
In general the replication stream should be considered an unreliable transport
|
|
||||||
since e.g. commands are not resent if the connection disappears.
|
|
||||||
|
|
||||||
The exception to that are the replication streams, i.e. RDATA commands, since
|
|
||||||
these include tokens which can be used to restart the stream on connection
|
|
||||||
errors.
|
|
||||||
|
|
||||||
The client should keep track of the token in the last RDATA command received
|
|
||||||
for each stream so that on reconneciton it can start streaming from the correct
|
|
||||||
place. Note: not all RDATA have valid tokens due to batching. See
|
|
||||||
``RdataCommand`` for more details.
|
|
||||||
|
|
||||||
|
|
||||||
Example
|
|
||||||
~~~~~~~
|
|
||||||
|
|
||||||
An example iteraction is shown below. Each line is prefixed with '>' or '<' to
|
|
||||||
indicate which side is sending, these are *not* included on the wire::
|
|
||||||
|
|
||||||
* connection established *
|
|
||||||
> SERVER localhost:8823
|
|
||||||
> PING 1490197665618
|
|
||||||
< NAME synapse.app.appservice
|
|
||||||
< PING 1490197665618
|
|
||||||
< REPLICATE events 1
|
|
||||||
< REPLICATE backfill 1
|
|
||||||
< REPLICATE caches 1
|
|
||||||
> POSITION events 1
|
|
||||||
> POSITION backfill 1
|
|
||||||
> POSITION caches 1
|
|
||||||
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
|
||||||
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
|
|
||||||
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
|
||||||
< PING 1490197675618
|
|
||||||
> ERROR server stopping
|
|
||||||
* connection closed by server *
|
|
||||||
|
|
||||||
The ``POSITION`` command sent by the server is used to set the clients position
|
|
||||||
without needing to send data with the ``RDATA`` command.
|
|
||||||
|
|
||||||
|
|
||||||
An example of a batched set of ``RDATA`` is::
|
|
||||||
|
|
||||||
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
|
||||||
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
|
||||||
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
|
||||||
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
|
||||||
|
|
||||||
In this case the client shouldn't advance their caches token until it sees the
|
|
||||||
the last ``RDATA``.
|
|
||||||
|
|
||||||
|
|
||||||
List of commands
|
|
||||||
~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
The list of valid commands, with which side can send it: server (S) or client (C):
|
|
||||||
|
|
||||||
SERVER (S)
|
|
||||||
Sent at the start to identify which server the client is talking to
|
|
||||||
|
|
||||||
RDATA (S)
|
|
||||||
A single update in a stream
|
|
||||||
|
|
||||||
POSITION (S)
|
|
||||||
The position of the stream has been updated
|
|
||||||
|
|
||||||
ERROR (S, C)
|
|
||||||
There was an error
|
|
||||||
|
|
||||||
PING (S, C)
|
|
||||||
Sent periodically to ensure the connection is still alive
|
|
||||||
|
|
||||||
NAME (C)
|
|
||||||
Sent at the start by client to inform the server who they are
|
|
||||||
|
|
||||||
REPLICATE (C)
|
|
||||||
Asks the server to replicate a given stream
|
|
||||||
|
|
||||||
USER_SYNC (C)
|
|
||||||
A user has started or stopped syncing
|
|
||||||
|
|
||||||
FEDERATION_ACK (C)
|
|
||||||
Acknowledge receipt of some federation data
|
|
||||||
|
|
||||||
REMOVE_PUSHER (C)
|
|
||||||
Inform the server a pusher should be removed
|
|
||||||
|
|
||||||
INVALIDATE_CACHE (C)
|
|
||||||
Inform the server a cache should be invalidated
|
|
||||||
|
|
||||||
SYNC (S, C)
|
|
||||||
Used exclusively in tests
|
|
||||||
|
|
||||||
|
|
||||||
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
|
|
||||||
format of each command.
|
|
||||||
@@ -9,35 +9,31 @@ the Home Server to generate credentials that are valid for use on the TURN
|
|||||||
server through the use of a secret shared between the Home Server and the
|
server through the use of a secret shared between the Home Server and the
|
||||||
TURN server.
|
TURN server.
|
||||||
|
|
||||||
This document describes how to install coturn
|
This document described how to install coturn
|
||||||
(https://github.com/coturn/coturn) which also supports the TURN REST API,
|
(https://code.google.com/p/coturn/) which also supports the TURN REST API,
|
||||||
and integrate it with synapse.
|
and integrate it with synapse.
|
||||||
|
|
||||||
coturn Setup
|
coturn Setup
|
||||||
============
|
============
|
||||||
|
|
||||||
You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
|
|
||||||
|
|
||||||
1. Check out coturn::
|
1. Check out coturn::
|
||||||
|
svn checkout http://coturn.googlecode.com/svn/trunk/ coturn
|
||||||
git clone https://github.com/coturn/coturn.git coturn
|
|
||||||
cd coturn
|
cd coturn
|
||||||
|
|
||||||
2. Configure it::
|
2. Configure it::
|
||||||
|
|
||||||
./configure
|
./configure
|
||||||
|
|
||||||
You may need to install ``libevent2``: if so, you should do so
|
You may need to install libevent2: if so, you should do so
|
||||||
in the way recommended by your operating system.
|
in the way recommended by your operating system.
|
||||||
You can ignore warnings about lack of database support: a
|
You can ignore warnings about lack of database support: a
|
||||||
database is unnecessary for this purpose.
|
database is unnecessary for this purpose.
|
||||||
|
|
||||||
3. Build and install it::
|
3. Build and install it::
|
||||||
|
|
||||||
make
|
make
|
||||||
make install
|
make install
|
||||||
|
|
||||||
4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
|
4. Make a config file in /etc/turnserver.conf. You can customise
|
||||||
|
a config file from turnserver.conf.default. The relevant
|
||||||
lines, with example values, are::
|
lines, with example values, are::
|
||||||
|
|
||||||
lt-cred-mech
|
lt-cred-mech
|
||||||
@@ -45,43 +41,19 @@ You may be able to setup coturn via your package manager, or set it up manually
|
|||||||
static-auth-secret=[your secret key here]
|
static-auth-secret=[your secret key here]
|
||||||
realm=turn.myserver.org
|
realm=turn.myserver.org
|
||||||
|
|
||||||
See turnserver.conf for explanations of the options.
|
See turnserver.conf.default for explanations of the options.
|
||||||
One way to generate the static-auth-secret is with pwgen::
|
One way to generate the static-auth-secret is with pwgen::
|
||||||
|
|
||||||
pwgen -s 64 1
|
pwgen -s 64 1
|
||||||
|
|
||||||
5. Consider your security settings. TURN lets users request a relay
|
5. Ensure youe firewall allows traffic into the TURN server on
|
||||||
which will connect to arbitrary IP addresses and ports. At the least
|
|
||||||
we recommend:
|
|
||||||
|
|
||||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
|
||||||
no-tcp-relay
|
|
||||||
|
|
||||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
|
||||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
|
||||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
|
||||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
|
||||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
|
||||||
|
|
||||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
|
||||||
allowed-peer-ip=10.0.0.1
|
|
||||||
|
|
||||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
|
||||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
|
||||||
total-quota=1200
|
|
||||||
|
|
||||||
Ideally coturn should refuse to relay traffic which isn't SRTP;
|
|
||||||
see https://github.com/matrix-org/synapse/issues/2009
|
|
||||||
|
|
||||||
6. Ensure your firewall allows traffic into the TURN server on
|
|
||||||
the ports you've configured it to listen on (remember to allow
|
the ports you've configured it to listen on (remember to allow
|
||||||
both TCP and UDP TURN traffic)
|
both TCP and UDP if you've enabled both).
|
||||||
|
|
||||||
7. If you've configured coturn to support TLS/DTLS, generate or
|
6. If you've configured coturn to support TLS/DTLS, generate or
|
||||||
import your private key and certificate.
|
import your private key and certificate.
|
||||||
|
|
||||||
8. Start the turn server::
|
7. Start the turn server::
|
||||||
|
|
||||||
bin/turnserver -o
|
bin/turnserver -o
|
||||||
|
|
||||||
|
|
||||||
@@ -106,19 +78,12 @@ Your home server configuration file needs the following extra keys:
|
|||||||
to refresh credentials. The TURN REST API specification recommends
|
to refresh credentials. The TURN REST API specification recommends
|
||||||
one day (86400000).
|
one day (86400000).
|
||||||
|
|
||||||
4. "turn_allow_guests": Whether to allow guest users to use the TURN
|
|
||||||
server. This is enabled by default, as otherwise VoIP will not
|
|
||||||
work reliably for guests. However, it does introduce a security risk
|
|
||||||
as it lets guests connect to arbitrary endpoints without having gone
|
|
||||||
through a CAPTCHA or similar to register a real account.
|
|
||||||
|
|
||||||
As an example, here is the relevant section of the config file for
|
As an example, here is the relevant section of the config file for
|
||||||
matrix.org::
|
matrix.org::
|
||||||
|
|
||||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||||
turn_user_lifetime: 86400000
|
turn_user_lifetime: 86400000
|
||||||
turn_allow_guests: True
|
|
||||||
|
|
||||||
Now, restart synapse::
|
Now, restart synapse::
|
||||||
|
|
||||||
|
|||||||
@@ -1,74 +0,0 @@
|
|||||||
URL Previews
|
|
||||||
============
|
|
||||||
|
|
||||||
Design notes on a URL previewing service for Matrix:
|
|
||||||
|
|
||||||
Options are:
|
|
||||||
|
|
||||||
1. Have an AS which listens for URLs, downloads them, and inserts an event that describes their metadata.
|
|
||||||
* Pros:
|
|
||||||
* Decouples the implementation entirely from Synapse.
|
|
||||||
* Uses existing Matrix events & content repo to store the metadata.
|
|
||||||
* Cons:
|
|
||||||
* Which AS should provide this service for a room, and why should you trust it?
|
|
||||||
* Doesn't work well with E2E; you'd have to cut the AS into every room
|
|
||||||
* the AS would end up subscribing to every room anyway.
|
|
||||||
|
|
||||||
2. Have a generic preview API (nothing to do with Matrix) that provides a previewing service:
|
|
||||||
* Pros:
|
|
||||||
* Simple and flexible; can be used by any clients at any point
|
|
||||||
* Cons:
|
|
||||||
* If each HS provides one of these independently, all the HSes in a room may needlessly DoS the target URI
|
|
||||||
* We need somewhere to store the URL metadata rather than just using Matrix itself
|
|
||||||
* We can't piggyback on matrix to distribute the metadata between HSes.
|
|
||||||
|
|
||||||
3. Make the synapse of the sending user responsible for spidering the URL and inserting an event asynchronously which describes the metadata.
|
|
||||||
* Pros:
|
|
||||||
* Works transparently for all clients
|
|
||||||
* Piggy-backs nicely on using Matrix for distributing the metadata.
|
|
||||||
* No confusion as to which AS
|
|
||||||
* Cons:
|
|
||||||
* Doesn't work with E2E
|
|
||||||
* We might want to decouple the implementation of the spider from the HS, given spider behaviour can be quite complicated and evolve much more rapidly than the HS. It's more like a bot than a core part of the server.
|
|
||||||
|
|
||||||
4. Make the sending client use the preview API and insert the event itself when successful.
|
|
||||||
* Pros:
|
|
||||||
* Works well with E2E
|
|
||||||
* No custom server functionality
|
|
||||||
* Lets the client customise the preview that they send (like on FB)
|
|
||||||
* Cons:
|
|
||||||
* Entirely specific to the sending client, whereas it'd be nice if /any/ URL was correctly previewed if clients support it.
|
|
||||||
|
|
||||||
5. Have the option of specifying a shared (centralised) previewing service used by a room, to avoid all the different HSes in the room DoSing the target.
|
|
||||||
|
|
||||||
Best solution is probably a combination of both 2 and 4.
|
|
||||||
* Sending clients do their best to create and send a preview at the point of sending the message, perhaps delaying the message until the preview is computed? (This also lets the user validate the preview before sending)
|
|
||||||
* Receiving clients have the option of going and creating their own preview if one doesn't arrive soon enough (or if the original sender didn't create one)
|
|
||||||
|
|
||||||
This is a bit magical though in that the preview could come from two entirely different sources - the sending HS or your local one. However, this can always be exposed to users: "Generate your own URL previews if none are available?"
|
|
||||||
|
|
||||||
This is tantamount also to senders calculating their own thumbnails for sending in advance of the main content - we are trusting the sender not to lie about the content in the thumbnail. Whereas currently thumbnails are calculated by the receiving homeserver to avoid this attack.
|
|
||||||
|
|
||||||
However, this kind of phishing attack does exist whether we let senders pick their thumbnails or not, in that a malicious sender can send normal text messages around the attachment claiming it to be legitimate. We could rely on (future) reputation/abuse management to punish users who phish (be it with bogus metadata or bogus descriptions). Bogus metadata is particularly bad though, especially if it's avoidable.
|
|
||||||
|
|
||||||
As a first cut, let's do #2 and have the receiver hit the API to calculate its own previews (as it does currently for image thumbnails). We can then extend/optimise this to option 4 as a special extra if needed.
|
|
||||||
|
|
||||||
API
|
|
||||||
---
|
|
||||||
|
|
||||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
|
||||||
200 OK
|
|
||||||
{
|
|
||||||
"og:type" : "article"
|
|
||||||
"og:url" : "https://twitter.com/matrixdotorg/status/684074366691356672"
|
|
||||||
"og:title" : "Matrix on Twitter"
|
|
||||||
"og:image" : "https://pbs.twimg.com/profile_images/500400952029888512/yI0qtFi7_400x400.png"
|
|
||||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
|
||||||
"og:site_name" : "Twitter"
|
|
||||||
}
|
|
||||||
|
|
||||||
* Downloads the URL
|
|
||||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
|
||||||
* Download any media OG meta tags to the media repo, and refer to them in the OG via mxc:// URIs.
|
|
||||||
* If a media filetype we know we can thumbnail: store it on disk, and hand it to the thumbnailer. Generate OG meta tags from the thumbnailer contents.
|
|
||||||
* Otherwise, don't bother downloading further.
|
|
||||||
@@ -1,94 +0,0 @@
|
|||||||
Scaling synapse via workers
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
Synapse has experimental support for splitting out functionality into
|
|
||||||
multiple separate python processes, helping greatly with scalability. These
|
|
||||||
processes are called 'workers', and are (eventually) intended to scale
|
|
||||||
horizontally independently.
|
|
||||||
|
|
||||||
All processes continue to share the same database instance, and as such, workers
|
|
||||||
only work with postgres based synapse deployments (sharing a single sqlite
|
|
||||||
across multiple processes is a recipe for disaster, plus you should be using
|
|
||||||
postgres anyway if you care about scalability).
|
|
||||||
|
|
||||||
The workers communicate with the master synapse process via a synapse-specific
|
|
||||||
TCP protocol called 'replication' - analogous to MySQL or Postgres style
|
|
||||||
database replication; feeding a stream of relevant data to the workers so they
|
|
||||||
can be kept in sync with the main synapse process and database state.
|
|
||||||
|
|
||||||
To enable workers, you need to add a replication listener to the master synapse, e.g.::
|
|
||||||
|
|
||||||
listeners:
|
|
||||||
- port: 9092
|
|
||||||
bind_address: '127.0.0.1'
|
|
||||||
type: replication
|
|
||||||
|
|
||||||
Under **no circumstances** should this replication API listener be exposed to the
|
|
||||||
public internet; it currently implements no authentication whatsoever and is
|
|
||||||
unencrypted.
|
|
||||||
|
|
||||||
You then create a set of configs for the various worker processes. These should be
|
|
||||||
worker configuration files should be stored in a dedicated subdirectory, to allow
|
|
||||||
synctl to manipulate them.
|
|
||||||
|
|
||||||
The current available worker applications are:
|
|
||||||
* synapse.app.pusher - handles sending push notifications to sygnal and email
|
|
||||||
* synapse.app.synchrotron - handles /sync endpoints. can scales horizontally through multiple instances.
|
|
||||||
* synapse.app.appservice - handles output traffic to Application Services
|
|
||||||
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
|
|
||||||
* synapse.app.media_repository - handles the media repository.
|
|
||||||
* synapse.app.client_reader - handles client API endpoints like /publicRooms
|
|
||||||
|
|
||||||
Each worker configuration file inherits the configuration of the main homeserver
|
|
||||||
configuration file. You can then override configuration specific to that worker,
|
|
||||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
|
||||||
You should minimise the number of overrides though to maintain a usable config.
|
|
||||||
|
|
||||||
You must specify the type of worker application (worker_app) and the replication
|
|
||||||
endpoint that it's talking to on the main synapse process (worker_replication_host
|
|
||||||
and worker_replication_port).
|
|
||||||
|
|
||||||
For instance::
|
|
||||||
|
|
||||||
worker_app: synapse.app.synchrotron
|
|
||||||
|
|
||||||
# The replication listener on the synapse to talk to.
|
|
||||||
worker_replication_host: 127.0.0.1
|
|
||||||
worker_replication_port: 9092
|
|
||||||
|
|
||||||
worker_listeners:
|
|
||||||
- type: http
|
|
||||||
port: 8083
|
|
||||||
resources:
|
|
||||||
- names:
|
|
||||||
- client
|
|
||||||
|
|
||||||
worker_daemonize: True
|
|
||||||
worker_pid_file: /home/matrix/synapse/synchrotron.pid
|
|
||||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
|
||||||
|
|
||||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
|
||||||
plain HTTP /sync endpoint on port 8083 separately from the /sync endpoint provided
|
|
||||||
by the main synapse.
|
|
||||||
|
|
||||||
Obviously you should configure your loadbalancer to route the /sync endpoint to
|
|
||||||
the synchrotron instance(s) in this instance.
|
|
||||||
|
|
||||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
|
||||||
commandline option to tell it to operate on all the worker configurations found
|
|
||||||
in the given directory, e.g.::
|
|
||||||
|
|
||||||
synctl -a $CONFIG/workers start
|
|
||||||
|
|
||||||
Currently one should always restart all workers when restarting or upgrading
|
|
||||||
synapse, unless you explicitly know it's safe not to. For instance, restarting
|
|
||||||
synapse without restarting all the synchrotrons may result in broken typing
|
|
||||||
notifications.
|
|
||||||
|
|
||||||
To manipulate a specific worker, you pass the -w option to synctl::
|
|
||||||
|
|
||||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
|
||||||
|
|
||||||
All of the above is highly experimental and subject to change as Synapse evolves,
|
|
||||||
but documenting it here to help folks needing highly scalable Synapses similar
|
|
||||||
to the one running matrix.org!
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export WORKSPACE
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
|
|
||||||
|
|
||||||
./jenkins/prepare_synapse.sh
|
|
||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
|
||||||
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
|
||||||
./dendron/jenkins/build_dendron.sh
|
|
||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
|
||||||
--python $WORKSPACE/.tox/py27/bin/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
|
||||||
--haproxy \
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export WORKSPACE
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
./jenkins/prepare_synapse.sh
|
|
||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
|
||||||
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
|
||||||
./dendron/jenkins/build_dendron.sh
|
|
||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
|
||||||
--python $WORKSPACE/.tox/py27/bin/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
# Output test results as junit xml
|
|
||||||
export TRIAL_FLAGS="--reporter=subunit"
|
|
||||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
|
||||||
# Write coverage reports to a separate file for each process
|
|
||||||
export COVERAGE_OPTS="-p"
|
|
||||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
|
||||||
|
|
||||||
# Output flake8 violations to violations.flake8.log
|
|
||||||
export PEP8SUFFIX="--output-file=violations.flake8.log"
|
|
||||||
|
|
||||||
rm .coverage* || echo "No coverage files to remove"
|
|
||||||
|
|
||||||
tox -e packaging -e pep8
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export WORKSPACE
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
./jenkins/prepare_synapse.sh
|
|
||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
|
||||||
|
|
||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
|
||||||
--python $WORKSPACE/.tox/py27/bin/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export WORKSPACE
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
./jenkins/prepare_synapse.sh
|
|
||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
|
||||||
--python $WORKSPACE/.tox/py27/bin/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
# Output test results as junit xml
|
|
||||||
export TRIAL_FLAGS="--reporter=subunit"
|
|
||||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
|
||||||
# Write coverage reports to a separate file for each process
|
|
||||||
export COVERAGE_OPTS="-p"
|
|
||||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
|
||||||
|
|
||||||
# Output flake8 violations to violations.flake8.log
|
|
||||||
# Don't exit with non-0 status code on Jenkins,
|
|
||||||
# so that the build steps continue and a later step can decided whether to
|
|
||||||
# UNSTABLE or FAILURE this build.
|
|
||||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
|
||||||
|
|
||||||
rm .coverage* || echo "No coverage files to remove"
|
|
||||||
|
|
||||||
tox --notest -e py27
|
|
||||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
|
||||||
$TOX_BIN/pip install lxml
|
|
||||||
|
|
||||||
tox -e py27
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
#! /bin/bash
|
|
||||||
|
|
||||||
# This clones a project from github into a named subdirectory
|
|
||||||
# If the project has a branch with the same name as this branch
|
|
||||||
# then it will checkout that branch after cloning.
|
|
||||||
# Otherwise it will checkout "origin/develop."
|
|
||||||
# The first argument is the name of the directory to checkout
|
|
||||||
# the branch into.
|
|
||||||
# The second argument is the URL of the remote repository to checkout.
|
|
||||||
# Usually something like https://github.com/matrix-org/sytest.git
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
NAME=$1
|
|
||||||
PROJECT=$2
|
|
||||||
BASE=".$NAME-base"
|
|
||||||
|
|
||||||
# Update our mirror.
|
|
||||||
if [ ! -d ".$NAME-base" ]; then
|
|
||||||
# Create a local mirror of the source repository.
|
|
||||||
# This saves us from having to download the entire repository
|
|
||||||
# when this script is next run.
|
|
||||||
git clone "$PROJECT" "$BASE" --mirror
|
|
||||||
else
|
|
||||||
# Fetch any updates from the source repository.
|
|
||||||
(cd "$BASE"; git fetch -p)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Remove the existing repository so that we have a clean copy
|
|
||||||
rm -rf "$NAME"
|
|
||||||
# Cloning with --shared means that we will share portions of the
|
|
||||||
# .git directory with our local mirror.
|
|
||||||
git clone "$BASE" "$NAME" --shared
|
|
||||||
|
|
||||||
# Jenkins may have supplied us with the name of the branch in the
|
|
||||||
# environment. Otherwise we will have to guess based on the current
|
|
||||||
# commit.
|
|
||||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
|
||||||
cd "$NAME"
|
|
||||||
# check out the relevant branch
|
|
||||||
git checkout "${GIT_BRANCH}" || (
|
|
||||||
echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
|
|
||||||
git checkout "origin/develop"
|
|
||||||
)
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
#! /bin/bash
|
|
||||||
|
|
||||||
cd "`dirname $0`/.."
|
|
||||||
|
|
||||||
TOX_DIR=$WORKSPACE/.tox
|
|
||||||
|
|
||||||
mkdir -p $TOX_DIR
|
|
||||||
|
|
||||||
if ! [ $TOX_DIR -ef .tox ]; then
|
|
||||||
ln -s "$TOX_DIR" .tox
|
|
||||||
fi
|
|
||||||
|
|
||||||
# set up the virtualenv
|
|
||||||
tox -e py27 --notest -v
|
|
||||||
|
|
||||||
TOX_BIN=$TOX_DIR/py27/bin
|
|
||||||
$TOX_BIN/pip install setuptools
|
|
||||||
{ python synapse/python_dependencies.py
|
|
||||||
echo lxml psycopg2
|
|
||||||
} | xargs $TOX_BIN/pip install
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
.header {
|
|
||||||
border-bottom: 4px solid #e4f7ed ! important;
|
|
||||||
}
|
|
||||||
|
|
||||||
.notif_link a, .footer a {
|
|
||||||
color: #76CFA6 ! important;
|
|
||||||
}
|
|
||||||
@@ -1,156 +0,0 @@
|
|||||||
body {
|
|
||||||
margin: 0px;
|
|
||||||
}
|
|
||||||
|
|
||||||
pre, code {
|
|
||||||
word-break: break-word;
|
|
||||||
white-space: pre-wrap;
|
|
||||||
}
|
|
||||||
|
|
||||||
#page {
|
|
||||||
font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
|
|
||||||
font-color: #454545;
|
|
||||||
font-size: 12pt;
|
|
||||||
width: 100%;
|
|
||||||
padding: 20px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#inner {
|
|
||||||
width: 640px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.header {
|
|
||||||
width: 100%;
|
|
||||||
height: 87px;
|
|
||||||
color: #454545;
|
|
||||||
border-bottom: 4px solid #e5e5e5;
|
|
||||||
}
|
|
||||||
|
|
||||||
.logo {
|
|
||||||
text-align: right;
|
|
||||||
margin-left: 20px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.salutation {
|
|
||||||
padding-top: 10px;
|
|
||||||
font-weight: bold;
|
|
||||||
}
|
|
||||||
|
|
||||||
.summarytext {
|
|
||||||
}
|
|
||||||
|
|
||||||
.room {
|
|
||||||
width: 100%;
|
|
||||||
color: #454545;
|
|
||||||
border-bottom: 1px solid #e5e5e5;
|
|
||||||
}
|
|
||||||
|
|
||||||
.room_header td {
|
|
||||||
padding-top: 38px;
|
|
||||||
padding-bottom: 10px;
|
|
||||||
border-bottom: 1px solid #e5e5e5;
|
|
||||||
}
|
|
||||||
|
|
||||||
.room_name {
|
|
||||||
vertical-align: middle;
|
|
||||||
font-size: 18px;
|
|
||||||
font-weight: bold;
|
|
||||||
}
|
|
||||||
|
|
||||||
.room_header h2 {
|
|
||||||
margin-top: 0px;
|
|
||||||
margin-left: 75px;
|
|
||||||
font-size: 20px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.room_avatar {
|
|
||||||
width: 56px;
|
|
||||||
line-height: 0px;
|
|
||||||
text-align: center;
|
|
||||||
vertical-align: middle;
|
|
||||||
}
|
|
||||||
|
|
||||||
.room_avatar img {
|
|
||||||
width: 48px;
|
|
||||||
height: 48px;
|
|
||||||
object-fit: cover;
|
|
||||||
border-radius: 24px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.notif {
|
|
||||||
border-bottom: 1px solid #e5e5e5;
|
|
||||||
margin-top: 16px;
|
|
||||||
padding-bottom: 16px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.historical_message .sender_avatar {
|
|
||||||
opacity: 0.3;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* spell out opacity and historical_message class names for Outlook aka Word */
|
|
||||||
.historical_message .sender_name {
|
|
||||||
color: #e3e3e3;
|
|
||||||
}
|
|
||||||
|
|
||||||
.historical_message .message_time {
|
|
||||||
color: #e3e3e3;
|
|
||||||
}
|
|
||||||
|
|
||||||
.historical_message .message_body {
|
|
||||||
color: #c7c7c7;
|
|
||||||
}
|
|
||||||
|
|
||||||
.historical_message td,
|
|
||||||
.message td {
|
|
||||||
padding-top: 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.sender_avatar {
|
|
||||||
width: 56px;
|
|
||||||
text-align: center;
|
|
||||||
vertical-align: top;
|
|
||||||
}
|
|
||||||
|
|
||||||
.sender_avatar img {
|
|
||||||
margin-top: -2px;
|
|
||||||
width: 32px;
|
|
||||||
height: 32px;
|
|
||||||
border-radius: 16px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.sender_name {
|
|
||||||
display: inline;
|
|
||||||
font-size: 13px;
|
|
||||||
color: #a2a2a2;
|
|
||||||
}
|
|
||||||
|
|
||||||
.message_time {
|
|
||||||
text-align: right;
|
|
||||||
width: 100px;
|
|
||||||
font-size: 11px;
|
|
||||||
color: #a2a2a2;
|
|
||||||
}
|
|
||||||
|
|
||||||
.message_body {
|
|
||||||
}
|
|
||||||
|
|
||||||
.notif_link td {
|
|
||||||
padding-top: 10px;
|
|
||||||
padding-bottom: 10px;
|
|
||||||
font-weight: bold;
|
|
||||||
}
|
|
||||||
|
|
||||||
.notif_link a, .footer a {
|
|
||||||
color: #454545;
|
|
||||||
text-decoration: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
.debug {
|
|
||||||
font-size: 10px;
|
|
||||||
color: #888;
|
|
||||||
}
|
|
||||||
|
|
||||||
.footer {
|
|
||||||
margin-top: 20px;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
{% for message in notif.messages %}
|
|
||||||
<tr class="{{ "historical_message" if message.is_historical else "message" }}">
|
|
||||||
<td class="sender_avatar">
|
|
||||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
|
||||||
{% if message.sender_avatar_url %}
|
|
||||||
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
|
|
||||||
{% else %}
|
|
||||||
{% if message.sender_hash % 3 == 0 %}
|
|
||||||
<img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
|
|
||||||
{% elif message.sender_hash % 3 == 1 %}
|
|
||||||
<img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
|
|
||||||
{% else %}
|
|
||||||
<img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td class="message_contents">
|
|
||||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
|
||||||
<div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
|
|
||||||
{% endif %}
|
|
||||||
<div class="message_body">
|
|
||||||
{% if message.msgtype == "m.text" %}
|
|
||||||
{{ message.body_text_html }}
|
|
||||||
{% elif message.msgtype == "m.emote" %}
|
|
||||||
{{ message.body_text_html }}
|
|
||||||
{% elif message.msgtype == "m.notice" %}
|
|
||||||
{{ message.body_text_html }}
|
|
||||||
{% elif message.msgtype == "m.image" %}
|
|
||||||
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
|
|
||||||
{% elif message.msgtype == "m.file" %}
|
|
||||||
<span class="filename">{{ message.body_text_plain }}</span>
|
|
||||||
{% endif %}
|
|
||||||
</div>
|
|
||||||
</td>
|
|
||||||
<td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
<tr class="notif_link">
|
|
||||||
<td></td>
|
|
||||||
<td>
|
|
||||||
<a href="{{ notif.link }}">View {{ room.title }}</a>
|
|
||||||
</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
{% for message in notif.messages %}
|
|
||||||
{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
|
|
||||||
{% if message.msgtype == "m.text" %}
|
|
||||||
{{ message.body_text_plain }}
|
|
||||||
{% elif message.msgtype == "m.emote" %}
|
|
||||||
{{ message.body_text_plain }}
|
|
||||||
{% elif message.msgtype == "m.notice" %}
|
|
||||||
{{ message.body_text_plain }}
|
|
||||||
{% elif message.msgtype == "m.image" %}
|
|
||||||
{{ message.body_text_plain }}
|
|
||||||
{% elif message.msgtype == "m.file" %}
|
|
||||||
{{ message.body_text_plain }}
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
View {{ room.title }} at {{ notif.link }}
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<style type="text/css">
|
|
||||||
{% include 'mail.css' without context %}
|
|
||||||
{% include "mail-%s.css" % app_name ignore missing without context %}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<table id="page">
|
|
||||||
<tr>
|
|
||||||
<td> </td>
|
|
||||||
<td id="inner">
|
|
||||||
<table class="header">
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<div class="salutation">Hi {{ user_display_name }},</div>
|
|
||||||
<div class="summarytext">{{ summary_text }}</div>
|
|
||||||
</td>
|
|
||||||
<td class="logo">
|
|
||||||
{% if app_name == "Riot" %}
|
|
||||||
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
|
||||||
{% elif app_name == "Vector" %}
|
|
||||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
|
||||||
{% else %}
|
|
||||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
{% for room in rooms %}
|
|
||||||
{% include 'room.html' with context %}
|
|
||||||
{% endfor %}
|
|
||||||
<div class="footer">
|
|
||||||
<a href="{{ unsubscribe_link }}">Unsubscribe</a>
|
|
||||||
<br/>
|
|
||||||
<br/>
|
|
||||||
<div class="debug">
|
|
||||||
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
|
|
||||||
an event was received at {{ reason.received_at|format_ts("%c") }}
|
|
||||||
which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
|
|
||||||
{% if reason.last_sent_ts %}
|
|
||||||
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
|
|
||||||
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
|
|
||||||
{% else %}
|
|
||||||
and we don't have a last time we sent a mail for this room.
|
|
||||||
{% endif %}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</td>
|
|
||||||
<td> </td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
Hi {{ user_display_name }},
|
|
||||||
|
|
||||||
{{ summary_text }}
|
|
||||||
|
|
||||||
{% for room in rooms %}
|
|
||||||
{% include 'room.txt' with context %}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
You can disable these notifications at {{ unsubscribe_link }}
|
|
||||||
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
<table class="room">
|
|
||||||
<tr class="room_header">
|
|
||||||
<td class="room_avatar">
|
|
||||||
{% if room.avatar_url %}
|
|
||||||
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
|
|
||||||
{% else %}
|
|
||||||
{% if room.hash % 3 == 0 %}
|
|
||||||
<img alt="" src="https://vector.im/beta/img/76cfa6.png" />
|
|
||||||
{% elif room.hash % 3 == 1 %}
|
|
||||||
<img alt="" src="https://vector.im/beta/img/50e2c2.png" />
|
|
||||||
{% else %}
|
|
||||||
<img alt="" src="https://vector.im/beta/img/f4c371.png" />
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td class="room_name" colspan="2">
|
|
||||||
{{ room.title }}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% if room.invite %}
|
|
||||||
<tr>
|
|
||||||
<td></td>
|
|
||||||
<td>
|
|
||||||
<a href="{{ room.link }}">Join the conversation.</a>
|
|
||||||
</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
{% else %}
|
|
||||||
{% for notif in room.notifs %}
|
|
||||||
{% include 'notif.html' with context %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
</table>
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
{{ room.title }}
|
|
||||||
|
|
||||||
{% if room.invite %}
|
|
||||||
You've been invited, join at {{ room.link }}
|
|
||||||
{% else %}
|
|
||||||
{% for notif in room.notifs %}
|
|
||||||
{% include 'notif.txt' with context %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
@@ -56,9 +56,10 @@ if __name__ == '__main__':
|
|||||||
|
|
||||||
js = json.load(args.json)
|
js = json.load(args.json)
|
||||||
|
|
||||||
|
|
||||||
auth = Auth(Mock())
|
auth = Auth(Mock())
|
||||||
check_auth(
|
check_auth(
|
||||||
auth,
|
auth,
|
||||||
[FrozenEvent(d) for d in js["auth_chain"]],
|
[FrozenEvent(d) for d in js["auth_chain"]],
|
||||||
[FrozenEvent(d) for d in js.get("pdus", [])],
|
[FrozenEvent(d) for d in js["pdus"]],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from synapse.crypto.event_signing import *
|
from synapse.crypto.event_signing import *
|
||||||
from unpaddedbase64 import encode_base64
|
from syutil.base64util import encode_base64
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import hashlib
|
import hashlib
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
|
|
||||||
from signedjson.sign import verify_signed_json
|
from syutil.crypto.jsonsign import verify_signed_json
|
||||||
from signedjson.key import decode_verify_key_bytes, write_signing_keys
|
from syutil.crypto.signing_key import (
|
||||||
from unpaddedbase64 import decode_base64
|
decode_verify_key_bytes, write_signing_keys
|
||||||
|
)
|
||||||
|
from syutil.base64util import decode_base64
|
||||||
|
|
||||||
import urllib2
|
import urllib2
|
||||||
import json
|
import json
|
||||||
|
|||||||
@@ -4,10 +4,10 @@ import sys
|
|||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import hashlib
|
import hashlib
|
||||||
from unpaddedbase64 import encode_base64
|
from syutil.base64util import encode_base64
|
||||||
from signedjson.key import read_signing_keys
|
from syutil.crypto.signing_key import read_signing_keys
|
||||||
from signedjson.sign import sign_json
|
from syutil.crypto.jsonsign import sign_json
|
||||||
from canonicaljson import encode_canonical_json
|
from syutil.jsonutil import encode_canonical_json
|
||||||
|
|
||||||
|
|
||||||
def select_v1_keys(connection):
|
def select_v1_keys(connection):
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/perl -pi
|
#!/usr/bin/perl -pi
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,7 +14,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
$copyright = <<EOT;
|
$copyright = <<EOT;
|
||||||
/* Copyright 2016 OpenMarket Ltd
|
/* Copyright 2015 OpenMarket Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/perl -pi
|
#!/usr/bin/perl -pi
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,7 +14,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
$copyright = <<EOT;
|
$copyright = <<EOT;
|
||||||
# Copyright 2016 OpenMarket Ltd
|
# Copyright 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,196 +0,0 @@
|
|||||||
#! /usr/bin/python
|
|
||||||
|
|
||||||
import ast
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
class DefinitionVisitor(ast.NodeVisitor):
|
|
||||||
def __init__(self):
|
|
||||||
super(DefinitionVisitor, self).__init__()
|
|
||||||
self.functions = {}
|
|
||||||
self.classes = {}
|
|
||||||
self.names = {}
|
|
||||||
self.attrs = set()
|
|
||||||
self.definitions = {
|
|
||||||
'def': self.functions,
|
|
||||||
'class': self.classes,
|
|
||||||
'names': self.names,
|
|
||||||
'attrs': self.attrs,
|
|
||||||
}
|
|
||||||
|
|
||||||
def visit_Name(self, node):
|
|
||||||
self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
|
|
||||||
|
|
||||||
def visit_Attribute(self, node):
|
|
||||||
self.attrs.add(node.attr)
|
|
||||||
for child in ast.iter_child_nodes(node):
|
|
||||||
self.visit(child)
|
|
||||||
|
|
||||||
def visit_ClassDef(self, node):
|
|
||||||
visitor = DefinitionVisitor()
|
|
||||||
self.classes[node.name] = visitor.definitions
|
|
||||||
for child in ast.iter_child_nodes(node):
|
|
||||||
visitor.visit(child)
|
|
||||||
|
|
||||||
def visit_FunctionDef(self, node):
|
|
||||||
visitor = DefinitionVisitor()
|
|
||||||
self.functions[node.name] = visitor.definitions
|
|
||||||
for child in ast.iter_child_nodes(node):
|
|
||||||
visitor.visit(child)
|
|
||||||
|
|
||||||
|
|
||||||
def non_empty(defs):
|
|
||||||
functions = {name: non_empty(f) for name, f in defs['def'].items()}
|
|
||||||
classes = {name: non_empty(f) for name, f in defs['class'].items()}
|
|
||||||
result = {}
|
|
||||||
if functions: result['def'] = functions
|
|
||||||
if classes: result['class'] = classes
|
|
||||||
names = defs['names']
|
|
||||||
uses = []
|
|
||||||
for name in names.get('Load', ()):
|
|
||||||
if name not in names.get('Param', ()) and name not in names.get('Store', ()):
|
|
||||||
uses.append(name)
|
|
||||||
uses.extend(defs['attrs'])
|
|
||||||
if uses: result['uses'] = uses
|
|
||||||
result['names'] = names
|
|
||||||
result['attrs'] = defs['attrs']
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def definitions_in_code(input_code):
|
|
||||||
input_ast = ast.parse(input_code)
|
|
||||||
visitor = DefinitionVisitor()
|
|
||||||
visitor.visit(input_ast)
|
|
||||||
definitions = non_empty(visitor.definitions)
|
|
||||||
return definitions
|
|
||||||
|
|
||||||
|
|
||||||
def definitions_in_file(filepath):
|
|
||||||
with open(filepath) as f:
|
|
||||||
return definitions_in_code(f.read())
|
|
||||||
|
|
||||||
|
|
||||||
def defined_names(prefix, defs, names):
|
|
||||||
for name, funcs in defs.get('def', {}).items():
|
|
||||||
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
|
|
||||||
defined_names(prefix + name + ".", funcs, names)
|
|
||||||
|
|
||||||
for name, funcs in defs.get('class', {}).items():
|
|
||||||
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
|
|
||||||
defined_names(prefix + name + ".", funcs, names)
|
|
||||||
|
|
||||||
|
|
||||||
def used_names(prefix, item, defs, names):
|
|
||||||
for name, funcs in defs.get('def', {}).items():
|
|
||||||
used_names(prefix + name + ".", name, funcs, names)
|
|
||||||
|
|
||||||
for name, funcs in defs.get('class', {}).items():
|
|
||||||
used_names(prefix + name + ".", name, funcs, names)
|
|
||||||
|
|
||||||
path = prefix.rstrip('.')
|
|
||||||
for used in defs.get('uses', ()):
|
|
||||||
if used in names:
|
|
||||||
if item:
|
|
||||||
names[item].setdefault('uses', []).append(used)
|
|
||||||
names[used].setdefault('used', {}).setdefault(item, []).append(path)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
import sys, os, argparse, re
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Find definitions.')
|
|
||||||
parser.add_argument(
|
|
||||||
"--unused", action="store_true", help="Only list unused definitions"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--pattern", action="append", metavar="REGEXP",
|
|
||||||
help="Search for a pattern"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"directories", nargs='+', metavar="DIR",
|
|
||||||
help="Directories to search for definitions"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--referrers", default=0, type=int,
|
|
||||||
help="Include referrers up to the given depth"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--referred", default=0, type=int,
|
|
||||||
help="Include referred down to the given depth"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--format", default="yaml",
|
|
||||||
help="Output format, one of 'yaml' or 'dot'"
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
definitions = {}
|
|
||||||
for directory in args.directories:
|
|
||||||
for root, dirs, files in os.walk(directory):
|
|
||||||
for filename in files:
|
|
||||||
if filename.endswith(".py"):
|
|
||||||
filepath = os.path.join(root, filename)
|
|
||||||
definitions[filepath] = definitions_in_file(filepath)
|
|
||||||
|
|
||||||
names = {}
|
|
||||||
for filepath, defs in definitions.items():
|
|
||||||
defined_names(filepath + ":", defs, names)
|
|
||||||
|
|
||||||
for filepath, defs in definitions.items():
|
|
||||||
used_names(filepath + ":", None, defs, names)
|
|
||||||
|
|
||||||
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
|
|
||||||
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
|
|
||||||
|
|
||||||
result = {}
|
|
||||||
for name, definition in names.items():
|
|
||||||
if patterns and not any(pattern.match(name) for pattern in patterns):
|
|
||||||
continue
|
|
||||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
|
||||||
continue
|
|
||||||
if args.unused and definition.get('used'):
|
|
||||||
continue
|
|
||||||
result[name] = definition
|
|
||||||
|
|
||||||
referrer_depth = args.referrers
|
|
||||||
referrers = set()
|
|
||||||
while referrer_depth:
|
|
||||||
referrer_depth -= 1
|
|
||||||
for entry in result.values():
|
|
||||||
for used_by in entry.get("used", ()):
|
|
||||||
referrers.add(used_by)
|
|
||||||
for name, definition in names.items():
|
|
||||||
if not name in referrers:
|
|
||||||
continue
|
|
||||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
|
||||||
continue
|
|
||||||
result[name] = definition
|
|
||||||
|
|
||||||
referred_depth = args.referred
|
|
||||||
referred = set()
|
|
||||||
while referred_depth:
|
|
||||||
referred_depth -= 1
|
|
||||||
for entry in result.values():
|
|
||||||
for uses in entry.get("uses", ()):
|
|
||||||
referred.add(uses)
|
|
||||||
for name, definition in names.items():
|
|
||||||
if not name in referred:
|
|
||||||
continue
|
|
||||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
|
||||||
continue
|
|
||||||
result[name] = definition
|
|
||||||
|
|
||||||
if args.format == 'yaml':
|
|
||||||
yaml.dump(result, sys.stdout, default_flow_style=False)
|
|
||||||
elif args.format == 'dot':
|
|
||||||
print "digraph {"
|
|
||||||
for name, entry in result.items():
|
|
||||||
print name
|
|
||||||
for used_by in entry.get("used", ()):
|
|
||||||
if used_by in result:
|
|
||||||
print used_by, "->", name
|
|
||||||
print "}"
|
|
||||||
else:
|
|
||||||
raise ValueError("Unknown format %r" % (args.format))
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
|
|
||||||
import pymacaroons
|
|
||||||
import sys
|
|
||||||
|
|
||||||
if len(sys.argv) == 1:
|
|
||||||
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
macaroon_string = sys.argv[1]
|
|
||||||
key = sys.argv[2] if len(sys.argv) > 2 else None
|
|
||||||
|
|
||||||
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
|
|
||||||
print macaroon.inspect()
|
|
||||||
|
|
||||||
print ""
|
|
||||||
|
|
||||||
verifier = pymacaroons.Verifier()
|
|
||||||
verifier.satisfy_general(lambda c: True)
|
|
||||||
try:
|
|
||||||
verifier.verify(macaroon, key)
|
|
||||||
print "Signature is correct"
|
|
||||||
except Exception as e:
|
|
||||||
print e.message
|
|
||||||
94
scripts-dev/federation_client.py
Executable file → Normal file
94
scripts-dev/federation_client.py
Executable file → Normal file
@@ -1,30 +1,10 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
#
|
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
|
||||||
# Copyright 2017 New Vector Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import nacl.signing
|
import nacl.signing
|
||||||
import json
|
import json
|
||||||
import base64
|
import base64
|
||||||
import requests
|
import requests
|
||||||
import sys
|
import sys
|
||||||
import srvlookup
|
import srvlookup
|
||||||
import yaml
|
|
||||||
|
|
||||||
def encode_base64(input_bytes):
|
def encode_base64(input_bytes):
|
||||||
"""Encode bytes as a base64 string without any padding."""
|
"""Encode bytes as a base64 string without any padding."""
|
||||||
@@ -136,85 +116,31 @@ def get_json(origin_name, origin_key, destination, path):
|
|||||||
authorization_headers = []
|
authorization_headers = []
|
||||||
|
|
||||||
for key, sig in signed_json["signatures"][origin_name].items():
|
for key, sig in signed_json["signatures"][origin_name].items():
|
||||||
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
authorization_headers.append(bytes(
|
||||||
origin_name, key, sig,
|
"X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||||
)
|
origin_name, key, sig,
|
||||||
authorization_headers.append(bytes(header))
|
)
|
||||||
print ("Authorization: %s" % header, file=sys.stderr)
|
))
|
||||||
|
|
||||||
dest = lookup(destination, path)
|
|
||||||
print ("Requesting %s" % dest, file=sys.stderr)
|
|
||||||
|
|
||||||
result = requests.get(
|
result = requests.get(
|
||||||
dest,
|
lookup(destination, path),
|
||||||
headers={"Authorization": authorization_headers[0]},
|
headers={"Authorization": authorization_headers[0]},
|
||||||
verify=False,
|
verify=False,
|
||||||
)
|
)
|
||||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
|
||||||
return result.json()
|
return result.json()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
origin_name, keyfile, destination, path = sys.argv[1:]
|
||||||
description=
|
|
||||||
"Signs and sends a federation request to a matrix homeserver",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
with open(keyfile) as f:
|
||||||
"-N", "--server-name",
|
|
||||||
help="Name to give as the local homeserver. If unspecified, will be "
|
|
||||||
"read from the config file.",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"-k", "--signing-key-path",
|
|
||||||
help="Path to the file containing the private ed25519 key to sign the "
|
|
||||||
"request with.",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"-c", "--config",
|
|
||||||
default="homeserver.yaml",
|
|
||||||
help="Path to server config file. Ignored if --server-name and "
|
|
||||||
"--signing-key-path are both given.",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"-d", "--destination",
|
|
||||||
default="matrix.org",
|
|
||||||
help="name of the remote homeserver. We will do SRV lookups and "
|
|
||||||
"connect appropriately.",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"path",
|
|
||||||
help="request path. We will add '/_matrix/federation/v1/' to this."
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if not args.server_name or not args.signing_key_path:
|
|
||||||
read_args_from_config(args)
|
|
||||||
|
|
||||||
with open(args.signing_key_path) as f:
|
|
||||||
key = read_signing_keys(f)[0]
|
key = read_signing_keys(f)[0]
|
||||||
|
|
||||||
result = get_json(
|
result = get_json(
|
||||||
args.server_name, key, args.destination, "/_matrix/federation/v1/" + args.path
|
origin_name, key, destination, "/_matrix/federation/v1/" + path
|
||||||
)
|
)
|
||||||
|
|
||||||
json.dump(result, sys.stdout)
|
json.dump(result, sys.stdout)
|
||||||
print ("")
|
|
||||||
|
|
||||||
|
|
||||||
def read_args_from_config(args):
|
|
||||||
with open(args.config, 'r') as fh:
|
|
||||||
config = yaml.safe_load(fh)
|
|
||||||
if not args.server_name:
|
|
||||||
args.server_name = config['server_name']
|
|
||||||
if not args.signing_key_path:
|
|
||||||
args.signing_key_path = config['signing_key_path']
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ from synapse.crypto.event_signing import (
|
|||||||
add_event_pdu_content_hash, compute_pdu_event_reference_hash
|
add_event_pdu_content_hash, compute_pdu_event_reference_hash
|
||||||
)
|
)
|
||||||
from synapse.api.events.utils import prune_pdu
|
from synapse.api.events.utils import prune_pdu
|
||||||
from unpaddedbase64 import encode_base64, decode_base64
|
from syutil.base64util import encode_base64, decode_base64
|
||||||
from canonicaljson import encode_canonical_json
|
from syutil.jsonutil import encode_canonical_json
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|||||||
@@ -1,62 +0,0 @@
|
|||||||
#! /usr/bin/python
|
|
||||||
|
|
||||||
import ast
|
|
||||||
import argparse
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
PATTERNS_V1 = []
|
|
||||||
PATTERNS_V2 = []
|
|
||||||
|
|
||||||
RESULT = {
|
|
||||||
"v1": PATTERNS_V1,
|
|
||||||
"v2": PATTERNS_V2,
|
|
||||||
}
|
|
||||||
|
|
||||||
class CallVisitor(ast.NodeVisitor):
|
|
||||||
def visit_Call(self, node):
|
|
||||||
if isinstance(node.func, ast.Name):
|
|
||||||
name = node.func.id
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
if name == "client_path_patterns":
|
|
||||||
PATTERNS_V1.append(node.args[0].s)
|
|
||||||
elif name == "client_v2_patterns":
|
|
||||||
PATTERNS_V2.append(node.args[0].s)
|
|
||||||
|
|
||||||
|
|
||||||
def find_patterns_in_code(input_code):
|
|
||||||
input_ast = ast.parse(input_code)
|
|
||||||
visitor = CallVisitor()
|
|
||||||
visitor.visit(input_ast)
|
|
||||||
|
|
||||||
|
|
||||||
def find_patterns_in_file(filepath):
|
|
||||||
with open(filepath) as f:
|
|
||||||
find_patterns_in_code(f.read())
|
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Find url patterns.')
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"directories", nargs='+', metavar="DIR",
|
|
||||||
help="Directories to search for definitions"
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
for directory in args.directories:
|
|
||||||
for root, dirs, files in os.walk(directory):
|
|
||||||
for filename in files:
|
|
||||||
if filename.endswith(".py"):
|
|
||||||
filepath = os.path.join(root, filename)
|
|
||||||
find_patterns_in_file(filepath)
|
|
||||||
|
|
||||||
PATTERNS_V1.sort()
|
|
||||||
PATTERNS_V2.sort()
|
|
||||||
|
|
||||||
yaml.dump(RESULT, sys.stdout, default_flow_style=False)
|
|
||||||
@@ -9,39 +9,16 @@
|
|||||||
ROOMID="$1"
|
ROOMID="$1"
|
||||||
|
|
||||||
sqlite3 homeserver.db <<EOF
|
sqlite3 homeserver.db <<EOF
|
||||||
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
|
DELETE FROM context_depth WHERE context = '$ROOMID';
|
||||||
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
|
DELETE FROM current_state WHERE context = '$ROOMID';
|
||||||
DELETE FROM event_edges WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_depth WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM events WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_json WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM state_events WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM current_state_events WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM topics WHERE room_id = '$ROOMID';
|
DELETE FROM messages WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM room_names WHERE room_id = '$ROOMID';
|
DELETE FROM pdu_backward_extremities WHERE context = '$ROOMID';
|
||||||
|
DELETE FROM pdu_edges WHERE context = '$ROOMID';
|
||||||
|
DELETE FROM pdu_forward_extremities WHERE context = '$ROOMID';
|
||||||
|
DELETE FROM pdus WHERE context = '$ROOMID';
|
||||||
|
DELETE FROM room_data WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM room_hosts WHERE room_id = '$ROOMID';
|
DELETE FROM state_pdus WHERE context = '$ROOMID';
|
||||||
DELETE FROM room_aliases WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM state_groups WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_search_content WHERE c1room_id = '$ROOMID';
|
|
||||||
DELETE FROM guest_access WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_tags WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_account_data WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_push_actions WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM local_invites WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM pusher_throttle WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_reports WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_auth WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM appservice_room_list WHERE room_id = '$ROOMID';
|
|
||||||
VACUUM;
|
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
@@ -1,67 +0,0 @@
|
|||||||
import requests
|
|
||||||
import collections
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import json
|
|
||||||
|
|
||||||
Entry = collections.namedtuple("Entry", "name position rows")
|
|
||||||
|
|
||||||
ROW_TYPES = {}
|
|
||||||
|
|
||||||
|
|
||||||
def row_type_for_columns(name, column_names):
|
|
||||||
column_names = tuple(column_names)
|
|
||||||
row_type = ROW_TYPES.get((name, column_names))
|
|
||||||
if row_type is None:
|
|
||||||
row_type = collections.namedtuple(name, column_names)
|
|
||||||
ROW_TYPES[(name, column_names)] = row_type
|
|
||||||
return row_type
|
|
||||||
|
|
||||||
|
|
||||||
def parse_response(content):
|
|
||||||
streams = json.loads(content)
|
|
||||||
result = {}
|
|
||||||
for name, value in streams.items():
|
|
||||||
row_type = row_type_for_columns(name, value["field_names"])
|
|
||||||
position = value["position"]
|
|
||||||
rows = [row_type(*row) for row in value["rows"]]
|
|
||||||
result[name] = Entry(name, position, rows)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def replicate(server, streams):
|
|
||||||
return parse_response(requests.get(
|
|
||||||
server + "/_synapse/replication",
|
|
||||||
verify=False,
|
|
||||||
params=streams
|
|
||||||
).content)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
server = sys.argv[1]
|
|
||||||
|
|
||||||
streams = None
|
|
||||||
while not streams:
|
|
||||||
try:
|
|
||||||
streams = {
|
|
||||||
row.name: row.position
|
|
||||||
for row in replicate(server, {"streams":"-1"})["streams"].rows
|
|
||||||
}
|
|
||||||
except requests.exceptions.ConnectionError as e:
|
|
||||||
time.sleep(0.1)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
results = replicate(server, streams)
|
|
||||||
except:
|
|
||||||
sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
|
|
||||||
break
|
|
||||||
for update in results.values():
|
|
||||||
for row in update.rows:
|
|
||||||
sys.stdout.write(repr(row) + "\n")
|
|
||||||
streams[update.name] = update.position
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__=='__main__':
|
|
||||||
main()
|
|
||||||
21
scripts/database-prepare-for-0.0.1.sh
Executable file
21
scripts/database-prepare-for-0.0.1.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This is will prepare a synapse database for running with v0.0.1 of synapse.
|
||||||
|
# It will store all the user information, but will *delete* all messages and
|
||||||
|
# room data.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cp "$1" "$1.bak"
|
||||||
|
|
||||||
|
DUMP=$(sqlite3 "$1" << 'EOF'
|
||||||
|
.dump users
|
||||||
|
.dump access_tokens
|
||||||
|
.dump presence
|
||||||
|
.dump profiles
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
rm "$1"
|
||||||
|
|
||||||
|
sqlite3 "$1" <<< "$DUMP"
|
||||||
21
scripts/database-prepare-for-0.5.0.sh
Executable file
21
scripts/database-prepare-for-0.5.0.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This is will prepare a synapse database for running with v0.5.0 of synapse.
|
||||||
|
# It will store all the user information, but will *delete* all messages and
|
||||||
|
# room data.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cp "$1" "$1.bak"
|
||||||
|
|
||||||
|
DUMP=$(sqlite3 "$1" << 'EOF'
|
||||||
|
.dump users
|
||||||
|
.dump access_tokens
|
||||||
|
.dump presence
|
||||||
|
.dump profiles
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
rm "$1"
|
||||||
|
|
||||||
|
sqlite3 "$1" <<< "$DUMP"
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import bcrypt
|
|
||||||
import getpass
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
bcrypt_rounds=12
|
|
||||||
password_pepper = ""
|
|
||||||
|
|
||||||
def prompt_for_pass():
|
|
||||||
password = getpass.getpass("Password: ")
|
|
||||||
|
|
||||||
if not password:
|
|
||||||
raise Exception("Password cannot be blank.")
|
|
||||||
|
|
||||||
confirm_password = getpass.getpass("Confirm password: ")
|
|
||||||
|
|
||||||
if password != confirm_password:
|
|
||||||
raise Exception("Passwords do not match.")
|
|
||||||
|
|
||||||
return password
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Calculate the hash of a new password, so that passwords"
|
|
||||||
" can be reset")
|
|
||||||
parser.add_argument(
|
|
||||||
"-p", "--password",
|
|
||||||
default=None,
|
|
||||||
help="New password for user. Will prompt if omitted.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-c", "--config",
|
|
||||||
type=argparse.FileType('r'),
|
|
||||||
help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
if "config" in args and args.config:
|
|
||||||
config = yaml.safe_load(args.config)
|
|
||||||
bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
|
|
||||||
password_config = config.get("password_config", {})
|
|
||||||
password_pepper = password_config.get("pepper", password_pepper)
|
|
||||||
password = args.password
|
|
||||||
|
|
||||||
if not password:
|
|
||||||
password = prompt_for_pass()
|
|
||||||
|
|
||||||
print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
|
|
||||||
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
# Copyright 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -19,7 +19,6 @@ from twisted.enterprise import adbapi
|
|||||||
|
|
||||||
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.prepare_database import prepare_database
|
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import curses
|
import curses
|
||||||
@@ -30,18 +29,14 @@ import traceback
|
|||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse_port_db")
|
logger = logging.getLogger("port_from_sqlite_to_postgres")
|
||||||
|
|
||||||
|
|
||||||
BOOLEAN_COLUMNS = {
|
BOOLEAN_COLUMNS = {
|
||||||
"events": ["processed", "outlier", "contains_url"],
|
"events": ["processed", "outlier"],
|
||||||
"rooms": ["is_public"],
|
"rooms": ["is_public"],
|
||||||
"event_edges": ["is_state"],
|
"event_edges": ["is_state"],
|
||||||
"presence_list": ["accepted"],
|
"presence_list": ["accepted"],
|
||||||
"presence_stream": ["currently_active"],
|
|
||||||
"public_room_list_stream": ["visibility"],
|
|
||||||
"device_lists_outbound_pokes": ["sent"],
|
|
||||||
"users_who_share_rooms": ["share_private"],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -73,15 +68,6 @@ APPEND_ONLY_TABLES = [
|
|||||||
"state_groups_state",
|
"state_groups_state",
|
||||||
"event_to_state_groups",
|
"event_to_state_groups",
|
||||||
"rejections",
|
"rejections",
|
||||||
"event_search",
|
|
||||||
"presence_stream",
|
|
||||||
"push_rules_stream",
|
|
||||||
"current_state_resets",
|
|
||||||
"ex_outlier_stream",
|
|
||||||
"cache_invalidation_stream",
|
|
||||||
"public_room_list_stream",
|
|
||||||
"state_group_edges",
|
|
||||||
"stream_ordering_to_exterm",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@@ -103,16 +89,14 @@ class Store(object):
|
|||||||
|
|
||||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||||
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
||||||
_simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
|
|
||||||
_simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
|
|
||||||
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
||||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__[
|
_simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
|
||||||
"_simple_select_one_onecol_txn"
|
|
||||||
]
|
|
||||||
|
|
||||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||||
|
|
||||||
|
_execute_and_decode = SQLBaseStore.__dict__["_execute_and_decode"]
|
||||||
|
|
||||||
def runInteraction(self, desc, func, *args, **kwargs):
|
def runInteraction(self, desc, func, *args, **kwargs):
|
||||||
def r(conn):
|
def r(conn):
|
||||||
try:
|
try:
|
||||||
@@ -122,7 +106,7 @@ class Store(object):
|
|||||||
try:
|
try:
|
||||||
txn = conn.cursor()
|
txn = conn.cursor()
|
||||||
return func(
|
return func(
|
||||||
LoggingTransaction(txn, desc, self.database_engine, [], []),
|
LoggingTransaction(txn, desc, self.database_engine, []),
|
||||||
*args, **kwargs
|
*args, **kwargs
|
||||||
)
|
)
|
||||||
except self.database_engine.module.DatabaseError as e:
|
except self.database_engine.module.DatabaseError as e:
|
||||||
@@ -173,40 +157,31 @@ class Porter(object):
|
|||||||
def setup_table(self, table):
|
def setup_table(self, table):
|
||||||
if table in APPEND_ONLY_TABLES:
|
if table in APPEND_ONLY_TABLES:
|
||||||
# It's safe to just carry on inserting.
|
# It's safe to just carry on inserting.
|
||||||
row = yield self.postgres_store._simple_select_one(
|
next_chunk = yield self.postgres_store._simple_select_one_onecol(
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
keyvalues={"table_name": table},
|
keyvalues={"table_name": table},
|
||||||
retcols=("forward_rowid", "backward_rowid"),
|
retcol="rowid",
|
||||||
allow_none=True,
|
allow_none=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
total_to_port = None
|
total_to_port = None
|
||||||
if row is None:
|
if next_chunk is None:
|
||||||
if table == "sent_transactions":
|
if table == "sent_transactions":
|
||||||
forward_chunk, already_ported, total_to_port = (
|
next_chunk, already_ported, total_to_port = (
|
||||||
yield self._setup_sent_transactions()
|
yield self._setup_sent_transactions()
|
||||||
)
|
)
|
||||||
backward_chunk = 0
|
|
||||||
else:
|
else:
|
||||||
yield self.postgres_store._simple_insert(
|
yield self.postgres_store._simple_insert(
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
values={
|
values={"table_name": table, "rowid": 1}
|
||||||
"table_name": table,
|
|
||||||
"forward_rowid": 1,
|
|
||||||
"backward_rowid": 0,
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
forward_chunk = 1
|
next_chunk = 1
|
||||||
backward_chunk = 0
|
|
||||||
already_ported = 0
|
already_ported = 0
|
||||||
else:
|
|
||||||
forward_chunk = row["forward_rowid"]
|
|
||||||
backward_chunk = row["backward_rowid"]
|
|
||||||
|
|
||||||
if total_to_port is None:
|
if total_to_port is None:
|
||||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||||
table, forward_chunk, backward_chunk
|
table, next_chunk
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
def delete_all(txn):
|
def delete_all(txn):
|
||||||
@@ -220,104 +195,42 @@ class Porter(object):
|
|||||||
|
|
||||||
yield self.postgres_store._simple_insert(
|
yield self.postgres_store._simple_insert(
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
values={
|
values={"table_name": table, "rowid": 0}
|
||||||
"table_name": table,
|
|
||||||
"forward_rowid": 1,
|
|
||||||
"backward_rowid": 0,
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
forward_chunk = 1
|
next_chunk = 1
|
||||||
backward_chunk = 0
|
|
||||||
|
|
||||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||||
table, forward_chunk, backward_chunk
|
table, next_chunk
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(
|
defer.returnValue((table, already_ported, total_to_port, next_chunk))
|
||||||
(table, already_ported, total_to_port, forward_chunk, backward_chunk)
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
def handle_table(self, table, postgres_size, table_size, next_chunk):
|
||||||
backward_chunk):
|
|
||||||
if not table_size:
|
if not table_size:
|
||||||
return
|
return
|
||||||
|
|
||||||
self.progress.add_table(table, postgres_size, table_size)
|
self.progress.add_table(table, postgres_size, table_size)
|
||||||
|
|
||||||
if table == "event_search":
|
select = (
|
||||||
yield self.handle_search_table(
|
|
||||||
postgres_size, table_size, forward_chunk, backward_chunk
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
if table in (
|
|
||||||
"user_directory", "user_directory_search", "users_who_share_rooms",
|
|
||||||
"users_in_pubic_room",
|
|
||||||
):
|
|
||||||
# We don't port these tables, as they're a faff and we can regenreate
|
|
||||||
# them anyway.
|
|
||||||
self.progress.update(table, table_size) # Mark table as done
|
|
||||||
return
|
|
||||||
|
|
||||||
if table == "user_directory_stream_pos":
|
|
||||||
# We need to make sure there is a single row, `(X, null), as that is
|
|
||||||
# what synapse expects to be there.
|
|
||||||
yield self.postgres_store._simple_insert(
|
|
||||||
table=table,
|
|
||||||
values={"stream_id": None},
|
|
||||||
)
|
|
||||||
self.progress.update(table, table_size) # Mark table as done
|
|
||||||
return
|
|
||||||
|
|
||||||
forward_select = (
|
|
||||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||||
% (table,)
|
% (table,)
|
||||||
)
|
)
|
||||||
|
|
||||||
backward_select = (
|
|
||||||
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
|
|
||||||
% (table,)
|
|
||||||
)
|
|
||||||
|
|
||||||
do_forward = [True]
|
|
||||||
do_backward = [True]
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
def r(txn):
|
def r(txn):
|
||||||
forward_rows = []
|
txn.execute(select, (next_chunk, self.batch_size,))
|
||||||
backward_rows = []
|
rows = txn.fetchall()
|
||||||
if do_forward[0]:
|
headers = [column[0] for column in txn.description]
|
||||||
txn.execute(forward_select, (forward_chunk, self.batch_size,))
|
|
||||||
forward_rows = txn.fetchall()
|
|
||||||
if not forward_rows:
|
|
||||||
do_forward[0] = False
|
|
||||||
|
|
||||||
if do_backward[0]:
|
return headers, rows
|
||||||
txn.execute(backward_select, (backward_chunk, self.batch_size,))
|
|
||||||
backward_rows = txn.fetchall()
|
|
||||||
if not backward_rows:
|
|
||||||
do_backward[0] = False
|
|
||||||
|
|
||||||
if forward_rows or backward_rows:
|
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||||
headers = [column[0] for column in txn.description]
|
|
||||||
else:
|
|
||||||
headers = None
|
|
||||||
|
|
||||||
return headers, forward_rows, backward_rows
|
if rows:
|
||||||
|
next_chunk = rows[-1][0] + 1
|
||||||
|
|
||||||
headers, frows, brows = yield self.sqlite_store.runInteraction(
|
|
||||||
"select", r
|
|
||||||
)
|
|
||||||
|
|
||||||
if frows or brows:
|
|
||||||
if frows:
|
|
||||||
forward_chunk = max(row[0] for row in frows) + 1
|
|
||||||
if brows:
|
|
||||||
backward_chunk = min(row[0] for row in brows) - 1
|
|
||||||
|
|
||||||
rows = frows + brows
|
|
||||||
self._convert_rows(table, headers, rows)
|
self._convert_rows(table, headers, rows)
|
||||||
|
|
||||||
def insert(txn):
|
def insert(txn):
|
||||||
@@ -329,10 +242,7 @@ class Porter(object):
|
|||||||
txn,
|
txn,
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
keyvalues={"table_name": table},
|
keyvalues={"table_name": table},
|
||||||
updatevalues={
|
updatevalues={"rowid": next_chunk},
|
||||||
"forward_rowid": forward_chunk,
|
|
||||||
"backward_rowid": backward_chunk,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.postgres_store.execute(insert)
|
yield self.postgres_store.execute(insert)
|
||||||
@@ -343,76 +253,6 @@ class Porter(object):
|
|||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def handle_search_table(self, postgres_size, table_size, forward_chunk,
|
|
||||||
backward_chunk):
|
|
||||||
select = (
|
|
||||||
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
|
||||||
" FROM event_search as es"
|
|
||||||
" INNER JOIN events AS e USING (event_id, room_id)"
|
|
||||||
" WHERE es.rowid >= ?"
|
|
||||||
" ORDER BY es.rowid LIMIT ?"
|
|
||||||
)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
def r(txn):
|
|
||||||
txn.execute(select, (forward_chunk, self.batch_size,))
|
|
||||||
rows = txn.fetchall()
|
|
||||||
headers = [column[0] for column in txn.description]
|
|
||||||
|
|
||||||
return headers, rows
|
|
||||||
|
|
||||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
|
||||||
|
|
||||||
if rows:
|
|
||||||
forward_chunk = rows[-1][0] + 1
|
|
||||||
|
|
||||||
# We have to treat event_search differently since it has a
|
|
||||||
# different structure in the two different databases.
|
|
||||||
def insert(txn):
|
|
||||||
sql = (
|
|
||||||
"INSERT INTO event_search (event_id, room_id, key,"
|
|
||||||
" sender, vector, origin_server_ts, stream_ordering)"
|
|
||||||
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
|
||||||
)
|
|
||||||
|
|
||||||
rows_dict = [
|
|
||||||
dict(zip(headers, row))
|
|
||||||
for row in rows
|
|
||||||
]
|
|
||||||
|
|
||||||
txn.executemany(sql, [
|
|
||||||
(
|
|
||||||
row["event_id"],
|
|
||||||
row["room_id"],
|
|
||||||
row["key"],
|
|
||||||
row["sender"],
|
|
||||||
row["value"],
|
|
||||||
row["origin_server_ts"],
|
|
||||||
row["stream_ordering"],
|
|
||||||
)
|
|
||||||
for row in rows_dict
|
|
||||||
])
|
|
||||||
|
|
||||||
self.postgres_store._simple_update_one_txn(
|
|
||||||
txn,
|
|
||||||
table="port_from_sqlite3",
|
|
||||||
keyvalues={"table_name": "event_search"},
|
|
||||||
updatevalues={
|
|
||||||
"forward_rowid": forward_chunk,
|
|
||||||
"backward_rowid": backward_chunk,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
yield self.postgres_store.execute(insert)
|
|
||||||
|
|
||||||
postgres_size += len(rows)
|
|
||||||
|
|
||||||
self.progress.update("event_search", postgres_size)
|
|
||||||
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
def setup_db(self, db_config, database_engine):
|
def setup_db(self, db_config, database_engine):
|
||||||
db_conn = database_engine.module.connect(
|
db_conn = database_engine.module.connect(
|
||||||
**{
|
**{
|
||||||
@@ -421,7 +261,7 @@ class Porter(object):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
prepare_database(db_conn, database_engine, config=None)
|
database_engine.prepare_database(db_conn)
|
||||||
|
|
||||||
db_conn.commit()
|
db_conn.commit()
|
||||||
|
|
||||||
@@ -438,8 +278,8 @@ class Porter(object):
|
|||||||
**self.postgres_config["args"]
|
**self.postgres_config["args"]
|
||||||
)
|
)
|
||||||
|
|
||||||
sqlite_engine = create_engine(sqlite_config)
|
sqlite_engine = create_engine("sqlite3")
|
||||||
postgres_engine = create_engine(postgres_config)
|
postgres_engine = create_engine("psycopg2")
|
||||||
|
|
||||||
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
||||||
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
||||||
@@ -467,7 +307,9 @@ class Porter(object):
|
|||||||
|
|
||||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||||
table="information_schema.tables",
|
table="information_schema.tables",
|
||||||
keyvalues={},
|
keyvalues={
|
||||||
|
"table_schema": "public",
|
||||||
|
},
|
||||||
retcol="distinct table_name",
|
retcol="distinct table_name",
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -481,32 +323,10 @@ class Porter(object):
|
|||||||
txn.execute(
|
txn.execute(
|
||||||
"CREATE TABLE port_from_sqlite3 ("
|
"CREATE TABLE port_from_sqlite3 ("
|
||||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||||
" forward_rowid bigint NOT NULL,"
|
" rowid bigint NOT NULL"
|
||||||
" backward_rowid bigint NOT NULL"
|
|
||||||
")"
|
")"
|
||||||
)
|
)
|
||||||
|
|
||||||
# The old port script created a table with just a "rowid" column.
|
|
||||||
# We want people to be able to rerun this script from an old port
|
|
||||||
# so that they can pick up any missing events that were not
|
|
||||||
# ported across.
|
|
||||||
def alter_table(txn):
|
|
||||||
txn.execute(
|
|
||||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
|
||||||
" RENAME rowid TO forward_rowid"
|
|
||||||
)
|
|
||||||
txn.execute(
|
|
||||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
|
||||||
" ADD backward_rowid bigint NOT NULL DEFAULT 0"
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
yield self.postgres_store.runInteraction(
|
|
||||||
"alter_table", alter_table
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.info("Failed to create port table: %s", e)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
yield self.postgres_store.runInteraction(
|
yield self.postgres_store.runInteraction(
|
||||||
"create_port_table", create_port_table
|
"create_port_table", create_port_table
|
||||||
@@ -566,7 +386,7 @@ class Porter(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _setup_sent_transactions(self):
|
def _setup_sent_transactions(self):
|
||||||
# Only save things from the last day
|
# Only save things from the last day
|
||||||
yesterday = int(time.time() * 1000) - 86400000
|
yesterday = int(time.time()*1000) - 86400000
|
||||||
|
|
||||||
# And save the max transaction id from each destination
|
# And save the max transaction id from each destination
|
||||||
select = (
|
select = (
|
||||||
@@ -592,17 +412,14 @@ class Porter(object):
|
|||||||
self._convert_rows("sent_transactions", headers, rows)
|
self._convert_rows("sent_transactions", headers, rows)
|
||||||
|
|
||||||
inserted_rows = len(rows)
|
inserted_rows = len(rows)
|
||||||
if inserted_rows:
|
max_inserted_rowid = max(r[0] for r in rows)
|
||||||
max_inserted_rowid = max(r[0] for r in rows)
|
|
||||||
|
|
||||||
def insert(txn):
|
def insert(txn):
|
||||||
self.postgres_store.insert_many_txn(
|
self.postgres_store.insert_many_txn(
|
||||||
txn, "sent_transactions", headers[1:], rows
|
txn, "sent_transactions", headers[1:], rows
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.postgres_store.execute(insert)
|
yield self.postgres_store.execute(insert)
|
||||||
else:
|
|
||||||
max_inserted_rowid = 0
|
|
||||||
|
|
||||||
def get_start_id(txn):
|
def get_start_id(txn):
|
||||||
txn.execute(
|
txn.execute(
|
||||||
@@ -622,11 +439,7 @@ class Porter(object):
|
|||||||
|
|
||||||
yield self.postgres_store._simple_insert(
|
yield self.postgres_store._simple_insert(
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
values={
|
values={"table_name": "sent_transactions", "rowid": next_chunk}
|
||||||
"table_name": "sent_transactions",
|
|
||||||
"forward_rowid": next_chunk,
|
|
||||||
"backward_rowid": 0,
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_sent_table_size(txn):
|
def get_sent_table_size(txn):
|
||||||
@@ -647,18 +460,13 @@ class Porter(object):
|
|||||||
defer.returnValue((next_chunk, inserted_rows, total_count))
|
defer.returnValue((next_chunk, inserted_rows, total_count))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
def _get_remaining_count_to_port(self, table, next_chunk):
|
||||||
frows = yield self.sqlite_store.execute_sql(
|
rows = yield self.sqlite_store.execute_sql(
|
||||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
||||||
forward_chunk,
|
next_chunk,
|
||||||
)
|
)
|
||||||
|
|
||||||
brows = yield self.sqlite_store.execute_sql(
|
defer.returnValue(rows[0][0])
|
||||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
|
|
||||||
backward_chunk,
|
|
||||||
)
|
|
||||||
|
|
||||||
defer.returnValue(frows[0][0] + brows[0][0])
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_already_ported_count(self, table):
|
def _get_already_ported_count(self, table):
|
||||||
@@ -669,10 +477,10 @@ class Porter(object):
|
|||||||
defer.returnValue(rows[0][0])
|
defer.returnValue(rows[0][0])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
|
def _get_total_count_to_port(self, table, next_chunk):
|
||||||
remaining, done = yield defer.gatherResults(
|
remaining, done = yield defer.gatherResults(
|
||||||
[
|
[
|
||||||
self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
|
self._get_remaining_count_to_port(table, next_chunk),
|
||||||
self._get_already_ported_count(table),
|
self._get_already_ported_count(table),
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
@@ -803,7 +611,7 @@ class CursesProgress(Progress):
|
|||||||
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
||||||
|
|
||||||
self.stdscr.addstr(
|
self.stdscr.addstr(
|
||||||
i + 2, left_margin + max_len - len(table),
|
i+2, left_margin + max_len - len(table),
|
||||||
table,
|
table,
|
||||||
curses.A_BOLD | color,
|
curses.A_BOLD | color,
|
||||||
)
|
)
|
||||||
@@ -811,18 +619,18 @@ class CursesProgress(Progress):
|
|||||||
size = 20
|
size = 20
|
||||||
|
|
||||||
progress = "[%s%s]" % (
|
progress = "[%s%s]" % (
|
||||||
"#" * int(perc * size / 100),
|
"#" * int(perc*size/100),
|
||||||
" " * (size - int(perc * size / 100)),
|
" " * (size - int(perc*size/100)),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.stdscr.addstr(
|
self.stdscr.addstr(
|
||||||
i + 2, left_margin + max_len + middle_space,
|
i+2, left_margin + max_len + middle_space,
|
||||||
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.finished:
|
if self.finished:
|
||||||
self.stdscr.addstr(
|
self.stdscr.addstr(
|
||||||
rows - 1, 0,
|
rows-1, 0,
|
||||||
"Press any key to exit...",
|
"Press any key to exit...",
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
# Copyright 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -25,26 +25,18 @@ import urllib2
|
|||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
def request_registration(user, password, server_location, shared_secret, admin=False):
|
def request_registration(user, password, server_location, shared_secret):
|
||||||
mac = hmac.new(
|
mac = hmac.new(
|
||||||
key=shared_secret,
|
key=shared_secret,
|
||||||
|
msg=user,
|
||||||
digestmod=hashlib.sha1,
|
digestmod=hashlib.sha1,
|
||||||
)
|
).hexdigest()
|
||||||
|
|
||||||
mac.update(user)
|
|
||||||
mac.update("\x00")
|
|
||||||
mac.update(password)
|
|
||||||
mac.update("\x00")
|
|
||||||
mac.update("admin" if admin else "notadmin")
|
|
||||||
|
|
||||||
mac = mac.hexdigest()
|
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
"user": user,
|
"user": user,
|
||||||
"password": password,
|
"password": password,
|
||||||
"mac": mac,
|
"mac": mac,
|
||||||
"type": "org.matrix.login.shared_secret",
|
"type": "org.matrix.login.shared_secret",
|
||||||
"admin": admin,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
server_location = server_location.rstrip("/")
|
server_location = server_location.rstrip("/")
|
||||||
@@ -76,7 +68,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def register_new_user(user, password, server_location, shared_secret, admin):
|
def register_new_user(user, password, server_location, shared_secret):
|
||||||
if not user:
|
if not user:
|
||||||
try:
|
try:
|
||||||
default_user = getpass.getuser()
|
default_user = getpass.getuser()
|
||||||
@@ -107,14 +99,7 @@ def register_new_user(user, password, server_location, shared_secret, admin):
|
|||||||
print "Passwords do not match"
|
print "Passwords do not match"
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if not admin:
|
request_registration(user, password, server_location, shared_secret)
|
||||||
admin = raw_input("Make admin [no]: ")
|
|
||||||
if admin in ("y", "yes", "true"):
|
|
||||||
admin = True
|
|
||||||
else:
|
|
||||||
admin = False
|
|
||||||
|
|
||||||
request_registration(user, password, server_location, shared_secret, bool(admin))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@@ -134,11 +119,6 @@ if __name__ == "__main__":
|
|||||||
default=None,
|
default=None,
|
||||||
help="New password for user. Will prompt if omitted.",
|
help="New password for user. Will prompt if omitted.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
|
||||||
"-a", "--admin",
|
|
||||||
action="store_true",
|
|
||||||
help="Register new user as an admin. Will prompt if omitted.",
|
|
||||||
)
|
|
||||||
|
|
||||||
group = parser.add_mutually_exclusive_group(required=True)
|
group = parser.add_mutually_exclusive_group(required=True)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
@@ -171,4 +151,4 @@ if __name__ == "__main__":
|
|||||||
else:
|
else:
|
||||||
secret = args.shared_secret
|
secret = args.shared_secret
|
||||||
|
|
||||||
register_new_user(args.user, args.password, args.server_url, secret, args.admin)
|
register_new_user(args.user, args.password, args.server_url, secret)
|
||||||
|
|||||||
331
scripts/upgrade_db_to_v0.6.0.py
Executable file
331
scripts/upgrade_db_to_v0.6.0.py
Executable file
@@ -0,0 +1,331 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from synapse.storage import SCHEMA_VERSION, read_schema
|
||||||
|
from synapse.storage._base import SQLBaseStore
|
||||||
|
from synapse.storage.signatures import SignatureStore
|
||||||
|
from synapse.storage.event_federation import EventFederationStore
|
||||||
|
|
||||||
|
from syutil.base64util import encode_base64, decode_base64
|
||||||
|
|
||||||
|
from synapse.crypto.event_signing import compute_event_signature
|
||||||
|
|
||||||
|
from synapse.events.builder import EventBuilder
|
||||||
|
from synapse.events.utils import prune_event
|
||||||
|
|
||||||
|
from synapse.crypto.event_signing import check_event_content_hash
|
||||||
|
|
||||||
|
from syutil.crypto.jsonsign import (
|
||||||
|
verify_signed_json, SignatureVerifyException,
|
||||||
|
)
|
||||||
|
from syutil.crypto.signing_key import decode_verify_key_bytes
|
||||||
|
|
||||||
|
from syutil.jsonutil import encode_canonical_json
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
# import dns.resolver
|
||||||
|
import hashlib
|
||||||
|
import httplib
|
||||||
|
import json
|
||||||
|
import sqlite3
|
||||||
|
import syutil
|
||||||
|
import urllib2
|
||||||
|
|
||||||
|
|
||||||
|
delta_sql = """
|
||||||
|
CREATE TABLE IF NOT EXISTS event_json(
|
||||||
|
event_id TEXT NOT NULL,
|
||||||
|
room_id TEXT NOT NULL,
|
||||||
|
internal_metadata NOT NULL,
|
||||||
|
json BLOB NOT NULL,
|
||||||
|
CONSTRAINT ev_j_uniq UNIQUE (event_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id);
|
||||||
|
|
||||||
|
PRAGMA user_version = 10;
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class Store(object):
|
||||||
|
_get_event_signatures_txn = SignatureStore.__dict__["_get_event_signatures_txn"]
|
||||||
|
_get_event_content_hashes_txn = SignatureStore.__dict__["_get_event_content_hashes_txn"]
|
||||||
|
_get_event_reference_hashes_txn = SignatureStore.__dict__["_get_event_reference_hashes_txn"]
|
||||||
|
_get_prev_event_hashes_txn = SignatureStore.__dict__["_get_prev_event_hashes_txn"]
|
||||||
|
_get_prev_events_and_state = EventFederationStore.__dict__["_get_prev_events_and_state"]
|
||||||
|
_get_auth_events = EventFederationStore.__dict__["_get_auth_events"]
|
||||||
|
cursor_to_dict = SQLBaseStore.__dict__["cursor_to_dict"]
|
||||||
|
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||||
|
_simple_select_list_txn = SQLBaseStore.__dict__["_simple_select_list_txn"]
|
||||||
|
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
||||||
|
|
||||||
|
def _generate_event_json(self, txn, rows):
|
||||||
|
events = []
|
||||||
|
for row in rows:
|
||||||
|
d = dict(row)
|
||||||
|
|
||||||
|
d.pop("stream_ordering", None)
|
||||||
|
d.pop("topological_ordering", None)
|
||||||
|
d.pop("processed", None)
|
||||||
|
|
||||||
|
if "origin_server_ts" not in d:
|
||||||
|
d["origin_server_ts"] = d.pop("ts", 0)
|
||||||
|
else:
|
||||||
|
d.pop("ts", 0)
|
||||||
|
|
||||||
|
d.pop("prev_state", None)
|
||||||
|
d.update(json.loads(d.pop("unrecognized_keys")))
|
||||||
|
|
||||||
|
d["sender"] = d.pop("user_id")
|
||||||
|
|
||||||
|
d["content"] = json.loads(d["content"])
|
||||||
|
|
||||||
|
if "age_ts" not in d:
|
||||||
|
# For compatibility
|
||||||
|
d["age_ts"] = d.get("origin_server_ts", 0)
|
||||||
|
|
||||||
|
d.setdefault("unsigned", {})["age_ts"] = d.pop("age_ts")
|
||||||
|
|
||||||
|
outlier = d.pop("outlier", False)
|
||||||
|
|
||||||
|
# d.pop("membership", None)
|
||||||
|
|
||||||
|
d.pop("state_hash", None)
|
||||||
|
|
||||||
|
d.pop("replaces_state", None)
|
||||||
|
|
||||||
|
b = EventBuilder(d)
|
||||||
|
b.internal_metadata.outlier = outlier
|
||||||
|
|
||||||
|
events.append(b)
|
||||||
|
|
||||||
|
for i, ev in enumerate(events):
|
||||||
|
signatures = self._get_event_signatures_txn(
|
||||||
|
txn, ev.event_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
ev.signatures = {
|
||||||
|
n: {
|
||||||
|
k: encode_base64(v) for k, v in s.items()
|
||||||
|
}
|
||||||
|
for n, s in signatures.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
hashes = self._get_event_content_hashes_txn(
|
||||||
|
txn, ev.event_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
ev.hashes = {
|
||||||
|
k: encode_base64(v) for k, v in hashes.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
prevs = self._get_prev_events_and_state(txn, ev.event_id)
|
||||||
|
|
||||||
|
ev.prev_events = [
|
||||||
|
(e_id, h)
|
||||||
|
for e_id, h, is_state in prevs
|
||||||
|
if is_state == 0
|
||||||
|
]
|
||||||
|
|
||||||
|
# ev.auth_events = self._get_auth_events(txn, ev.event_id)
|
||||||
|
|
||||||
|
hashes = dict(ev.auth_events)
|
||||||
|
|
||||||
|
for e_id, hash in ev.prev_events:
|
||||||
|
if e_id in hashes and not hash:
|
||||||
|
hash.update(hashes[e_id])
|
||||||
|
#
|
||||||
|
# if hasattr(ev, "state_key"):
|
||||||
|
# ev.prev_state = [
|
||||||
|
# (e_id, h)
|
||||||
|
# for e_id, h, is_state in prevs
|
||||||
|
# if is_state == 1
|
||||||
|
# ]
|
||||||
|
|
||||||
|
return [e.build() for e in events]
|
||||||
|
|
||||||
|
|
||||||
|
store = Store()
|
||||||
|
|
||||||
|
|
||||||
|
# def get_key(server_name):
|
||||||
|
# print "Getting keys for: %s" % (server_name,)
|
||||||
|
# targets = []
|
||||||
|
# if ":" in server_name:
|
||||||
|
# target, port = server_name.split(":")
|
||||||
|
# targets.append((target, int(port)))
|
||||||
|
# try:
|
||||||
|
# answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
|
||||||
|
# for srv in answers:
|
||||||
|
# targets.append((srv.target, srv.port))
|
||||||
|
# except dns.resolver.NXDOMAIN:
|
||||||
|
# targets.append((server_name, 8448))
|
||||||
|
# except:
|
||||||
|
# print "Failed to lookup keys for %s" % (server_name,)
|
||||||
|
# return {}
|
||||||
|
#
|
||||||
|
# for target, port in targets:
|
||||||
|
# url = "https://%s:%i/_matrix/key/v1" % (target, port)
|
||||||
|
# try:
|
||||||
|
# keys = json.load(urllib2.urlopen(url, timeout=2))
|
||||||
|
# verify_keys = {}
|
||||||
|
# for key_id, key_base64 in keys["verify_keys"].items():
|
||||||
|
# verify_key = decode_verify_key_bytes(
|
||||||
|
# key_id, decode_base64(key_base64)
|
||||||
|
# )
|
||||||
|
# verify_signed_json(keys, server_name, verify_key)
|
||||||
|
# verify_keys[key_id] = verify_key
|
||||||
|
# print "Got keys for: %s" % (server_name,)
|
||||||
|
# return verify_keys
|
||||||
|
# except urllib2.URLError:
|
||||||
|
# pass
|
||||||
|
# except urllib2.HTTPError:
|
||||||
|
# pass
|
||||||
|
# except httplib.HTTPException:
|
||||||
|
# pass
|
||||||
|
#
|
||||||
|
# print "Failed to get keys for %s" % (server_name,)
|
||||||
|
# return {}
|
||||||
|
|
||||||
|
|
||||||
|
def reinsert_events(cursor, server_name, signing_key):
|
||||||
|
print "Running delta: v10"
|
||||||
|
|
||||||
|
cursor.executescript(delta_sql)
|
||||||
|
|
||||||
|
cursor.execute(
|
||||||
|
"SELECT * FROM events ORDER BY rowid ASC"
|
||||||
|
)
|
||||||
|
|
||||||
|
print "Getting events..."
|
||||||
|
|
||||||
|
rows = store.cursor_to_dict(cursor)
|
||||||
|
|
||||||
|
events = store._generate_event_json(cursor, rows)
|
||||||
|
|
||||||
|
print "Got events from DB."
|
||||||
|
|
||||||
|
algorithms = {
|
||||||
|
"sha256": hashlib.sha256,
|
||||||
|
}
|
||||||
|
|
||||||
|
key_id = "%s:%s" % (signing_key.alg, signing_key.version)
|
||||||
|
verify_key = signing_key.verify_key
|
||||||
|
verify_key.alg = signing_key.alg
|
||||||
|
verify_key.version = signing_key.version
|
||||||
|
|
||||||
|
server_keys = {
|
||||||
|
server_name: {
|
||||||
|
key_id: verify_key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
N = len(events)
|
||||||
|
|
||||||
|
for event in events:
|
||||||
|
if i % 100 == 0:
|
||||||
|
print "Processed: %d/%d events" % (i,N,)
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
# for alg_name in event.hashes:
|
||||||
|
# if check_event_content_hash(event, algorithms[alg_name]):
|
||||||
|
# pass
|
||||||
|
# else:
|
||||||
|
# pass
|
||||||
|
# print "FAIL content hash %s %s" % (alg_name, event.event_id, )
|
||||||
|
|
||||||
|
have_own_correctly_signed = False
|
||||||
|
for host, sigs in event.signatures.items():
|
||||||
|
pruned = prune_event(event)
|
||||||
|
|
||||||
|
for key_id in sigs:
|
||||||
|
if host not in server_keys:
|
||||||
|
server_keys[host] = {} # get_key(host)
|
||||||
|
if key_id in server_keys[host]:
|
||||||
|
try:
|
||||||
|
verify_signed_json(
|
||||||
|
pruned.get_pdu_json(),
|
||||||
|
host,
|
||||||
|
server_keys[host][key_id]
|
||||||
|
)
|
||||||
|
|
||||||
|
if host == server_name:
|
||||||
|
have_own_correctly_signed = True
|
||||||
|
except SignatureVerifyException:
|
||||||
|
print "FAIL signature check %s %s" % (
|
||||||
|
key_id, event.event_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: Re sign with our own server key
|
||||||
|
if not have_own_correctly_signed:
|
||||||
|
sigs = compute_event_signature(event, server_name, signing_key)
|
||||||
|
event.signatures.update(sigs)
|
||||||
|
|
||||||
|
pruned = prune_event(event)
|
||||||
|
|
||||||
|
for key_id in event.signatures[server_name]:
|
||||||
|
verify_signed_json(
|
||||||
|
pruned.get_pdu_json(),
|
||||||
|
server_name,
|
||||||
|
server_keys[server_name][key_id]
|
||||||
|
)
|
||||||
|
|
||||||
|
event_json = encode_canonical_json(
|
||||||
|
event.get_dict()
|
||||||
|
).decode("UTF-8")
|
||||||
|
|
||||||
|
metadata_json = encode_canonical_json(
|
||||||
|
event.internal_metadata.get_dict()
|
||||||
|
).decode("UTF-8")
|
||||||
|
|
||||||
|
store._simple_insert_txn(
|
||||||
|
cursor,
|
||||||
|
table="event_json",
|
||||||
|
values={
|
||||||
|
"event_id": event.event_id,
|
||||||
|
"room_id": event.room_id,
|
||||||
|
"internal_metadata": metadata_json,
|
||||||
|
"json": event_json,
|
||||||
|
},
|
||||||
|
or_replace=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def main(database, server_name, signing_key):
|
||||||
|
conn = sqlite3.connect(database)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
# Do other deltas:
|
||||||
|
cursor.execute("PRAGMA user_version")
|
||||||
|
row = cursor.fetchone()
|
||||||
|
|
||||||
|
if row and row[0]:
|
||||||
|
user_version = row[0]
|
||||||
|
# Run every version since after the current version.
|
||||||
|
for v in range(user_version + 1, 10):
|
||||||
|
print "Running delta: %d" % (v,)
|
||||||
|
sql_script = read_schema("delta/v%d" % (v,))
|
||||||
|
cursor.executescript(sql_script)
|
||||||
|
|
||||||
|
reinsert_events(cursor, server_name, signing_key)
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
print "Success!"
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
|
parser.add_argument("database")
|
||||||
|
parser.add_argument("server_name")
|
||||||
|
parser.add_argument(
|
||||||
|
"signing_key", type=argparse.FileType('r'),
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
signing_key = syutil.crypto.signing_key.read_signing_keys(
|
||||||
|
args.signing_key
|
||||||
|
)
|
||||||
|
|
||||||
|
main(args.database, args.server_name, signing_key[0])
|
||||||
@@ -3,6 +3,9 @@ source-dir = docs/sphinx
|
|||||||
build-dir = docs/build
|
build-dir = docs/build
|
||||||
all_files = 1
|
all_files = 1
|
||||||
|
|
||||||
|
[aliases]
|
||||||
|
test = trial
|
||||||
|
|
||||||
[trial]
|
[trial]
|
||||||
test_suite = tests
|
test_suite = tests
|
||||||
|
|
||||||
@@ -13,8 +16,3 @@ ignore =
|
|||||||
docs/*
|
docs/*
|
||||||
pylint.cfg
|
pylint.cfg
|
||||||
tox.ini
|
tox.ini
|
||||||
|
|
||||||
[flake8]
|
|
||||||
max-line-length = 90
|
|
||||||
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
|
||||||
ignore = W503
|
|
||||||
|
|||||||
53
setup.py
53
setup.py
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -16,52 +16,12 @@
|
|||||||
|
|
||||||
import glob
|
import glob
|
||||||
import os
|
import os
|
||||||
from setuptools import setup, find_packages, Command
|
from setuptools import setup, find_packages
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
here = os.path.abspath(os.path.dirname(__file__))
|
here = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
|
||||||
|
|
||||||
# Some notes on `setup.py test`:
|
|
||||||
#
|
|
||||||
# Once upon a time we used to try to make `setup.py test` run `tox` to run the
|
|
||||||
# tests. That's a bad idea for three reasons:
|
|
||||||
#
|
|
||||||
# 1: `setup.py test` is supposed to find out whether the tests work in the
|
|
||||||
# *current* environmentt, not whatever tox sets up.
|
|
||||||
# 2: Empirically, trying to install tox during the test run wasn't working ("No
|
|
||||||
# module named virtualenv").
|
|
||||||
# 3: The tox documentation advises against it[1].
|
|
||||||
#
|
|
||||||
# Even further back in time, we used to use setuptools_trial [2]. That has its
|
|
||||||
# own set of issues: for instance, it requires installation of Twisted to build
|
|
||||||
# an sdist (because the recommended mode of usage is to add it to
|
|
||||||
# `setup_requires`). That in turn means that in order to successfully run tox
|
|
||||||
# you have to have the python header files installed for whichever version of
|
|
||||||
# python tox uses (which is python3 on recent ubuntus, for example).
|
|
||||||
#
|
|
||||||
# So, for now at least, we stick with what appears to be the convention among
|
|
||||||
# Twisted projects, and don't attempt to do anything when someone runs
|
|
||||||
# `setup.py test`; instead we direct people to run `trial` directly if they
|
|
||||||
# care.
|
|
||||||
#
|
|
||||||
# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
|
|
||||||
# [2]: https://pypi.python.org/pypi/setuptools_trial
|
|
||||||
class TestCommand(Command):
|
|
||||||
user_options = []
|
|
||||||
|
|
||||||
def initialize_options(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def finalize_options(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
print ("""Synapse's tests cannot be run via setup.py. To run them, try:
|
|
||||||
PYTHONPATH="." trial tests
|
|
||||||
""")
|
|
||||||
|
|
||||||
def read_file(path_segments):
|
def read_file(path_segments):
|
||||||
"""Read a file from the package. Takes a list of strings to join to
|
"""Read a file from the package. Takes a list of strings to join to
|
||||||
make the path"""
|
make the path"""
|
||||||
@@ -77,7 +37,6 @@ def exec_file(path_segments):
|
|||||||
exec(code, result)
|
exec(code, result)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||||
long_description = read_file(("README.rst",))
|
long_description = read_file(("README.rst",))
|
||||||
@@ -88,10 +47,14 @@ setup(
|
|||||||
packages=find_packages(exclude=["tests", "tests.*"]),
|
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||||
description="Reference Synapse Home Server",
|
description="Reference Synapse Home Server",
|
||||||
install_requires=dependencies['requirements'](include_conditional=True).keys(),
|
install_requires=dependencies['requirements'](include_conditional=True).keys(),
|
||||||
dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
|
setup_requires=[
|
||||||
|
"Twisted==14.0.2", # Here to override setuptools_trial's dependency on Twisted>=2.4.0
|
||||||
|
"setuptools_trial",
|
||||||
|
"mock"
|
||||||
|
],
|
||||||
|
dependency_links=dependencies["DEPENDENCY_LINKS"],
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||||
cmdclass={'test': TestCommand},
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014, 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -16,4 +16,4 @@
|
|||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.23.1"
|
__version__ = "0.9.2-r2"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014, 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014, 2015 OpenMarket Ltd
|
||||||
# Copyright 2017 Vector Creations Ltd
|
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -28,11 +27,22 @@ class Membership(object):
|
|||||||
LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)
|
LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)
|
||||||
|
|
||||||
|
|
||||||
|
class Feedback(object):
|
||||||
|
|
||||||
|
"""Represents the types of feedback a user can send in response to a
|
||||||
|
message."""
|
||||||
|
|
||||||
|
DELIVERED = u"delivered"
|
||||||
|
READ = u"read"
|
||||||
|
LIST = (DELIVERED, READ)
|
||||||
|
|
||||||
|
|
||||||
class PresenceState(object):
|
class PresenceState(object):
|
||||||
"""Represents the presence state of a user."""
|
"""Represents the presence state of a user."""
|
||||||
OFFLINE = u"offline"
|
OFFLINE = u"offline"
|
||||||
UNAVAILABLE = u"unavailable"
|
UNAVAILABLE = u"unavailable"
|
||||||
ONLINE = u"online"
|
ONLINE = u"online"
|
||||||
|
FREE_FOR_CHAT = u"free_for_chat"
|
||||||
|
|
||||||
|
|
||||||
class JoinRules(object):
|
class JoinRules(object):
|
||||||
@@ -44,8 +54,10 @@ class JoinRules(object):
|
|||||||
|
|
||||||
class LoginType(object):
|
class LoginType(object):
|
||||||
PASSWORD = u"m.login.password"
|
PASSWORD = u"m.login.password"
|
||||||
|
OAUTH = u"m.login.oauth2"
|
||||||
|
EMAIL_CODE = u"m.login.email.code"
|
||||||
|
EMAIL_URL = u"m.login.email.url"
|
||||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||||
MSISDN = u"m.login.msisdn"
|
|
||||||
RECAPTCHA = u"m.login.recaptcha"
|
RECAPTCHA = u"m.login.recaptcha"
|
||||||
DUMMY = u"m.login.dummy"
|
DUMMY = u"m.login.dummy"
|
||||||
|
|
||||||
@@ -61,12 +73,7 @@ class EventTypes(object):
|
|||||||
PowerLevels = "m.room.power_levels"
|
PowerLevels = "m.room.power_levels"
|
||||||
Aliases = "m.room.aliases"
|
Aliases = "m.room.aliases"
|
||||||
Redaction = "m.room.redaction"
|
Redaction = "m.room.redaction"
|
||||||
ThirdPartyInvite = "m.room.third_party_invite"
|
Feedback = "m.room.message.feedback"
|
||||||
|
|
||||||
RoomHistoryVisibility = "m.room.history_visibility"
|
|
||||||
CanonicalAlias = "m.room.canonical_alias"
|
|
||||||
RoomAvatar = "m.room.avatar"
|
|
||||||
GuestAccess = "m.room.guest_access"
|
|
||||||
|
|
||||||
# These are used for validation
|
# These are used for validation
|
||||||
Message = "m.room.message"
|
Message = "m.room.message"
|
||||||
@@ -78,14 +85,3 @@ class RejectedReason(object):
|
|||||||
AUTH_ERROR = "auth_error"
|
AUTH_ERROR = "auth_error"
|
||||||
REPLACED = "replaced"
|
REPLACED = "replaced"
|
||||||
NOT_ANCESTOR = "not_ancestor"
|
NOT_ANCESTOR = "not_ancestor"
|
||||||
|
|
||||||
|
|
||||||
class RoomCreationPreset(object):
|
|
||||||
PRIVATE_CHAT = "private_chat"
|
|
||||||
PUBLIC_CHAT = "public_chat"
|
|
||||||
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
|
||||||
|
|
||||||
|
|
||||||
class ThirdPartyEntityKind(object):
|
|
||||||
USER = "user"
|
|
||||||
LOCATION = "location"
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014, 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -15,7 +15,6 @@
|
|||||||
|
|
||||||
"""Contains exceptions and error codes."""
|
"""Contains exceptions and error codes."""
|
||||||
|
|
||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -30,67 +29,42 @@ class Codes(object):
|
|||||||
USER_IN_USE = "M_USER_IN_USE"
|
USER_IN_USE = "M_USER_IN_USE"
|
||||||
ROOM_IN_USE = "M_ROOM_IN_USE"
|
ROOM_IN_USE = "M_ROOM_IN_USE"
|
||||||
BAD_PAGINATION = "M_BAD_PAGINATION"
|
BAD_PAGINATION = "M_BAD_PAGINATION"
|
||||||
BAD_STATE = "M_BAD_STATE"
|
|
||||||
UNKNOWN = "M_UNKNOWN"
|
UNKNOWN = "M_UNKNOWN"
|
||||||
NOT_FOUND = "M_NOT_FOUND"
|
NOT_FOUND = "M_NOT_FOUND"
|
||||||
MISSING_TOKEN = "M_MISSING_TOKEN"
|
MISSING_TOKEN = "M_MISSING_TOKEN"
|
||||||
UNKNOWN_TOKEN = "M_UNKNOWN_TOKEN"
|
UNKNOWN_TOKEN = "M_UNKNOWN_TOKEN"
|
||||||
GUEST_ACCESS_FORBIDDEN = "M_GUEST_ACCESS_FORBIDDEN"
|
|
||||||
LIMIT_EXCEEDED = "M_LIMIT_EXCEEDED"
|
LIMIT_EXCEEDED = "M_LIMIT_EXCEEDED"
|
||||||
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
||||||
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
||||||
MISSING_PARAM = "M_MISSING_PARAM"
|
MISSING_PARAM = "M_MISSING_PARAM"
|
||||||
INVALID_PARAM = "M_INVALID_PARAM"
|
|
||||||
TOO_LARGE = "M_TOO_LARGE"
|
TOO_LARGE = "M_TOO_LARGE"
|
||||||
EXCLUSIVE = "M_EXCLUSIVE"
|
EXCLUSIVE = "M_EXCLUSIVE"
|
||||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
|
||||||
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
|
||||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
|
||||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
|
||||||
|
|
||||||
|
|
||||||
class CodeMessageException(RuntimeError):
|
class CodeMessageException(RuntimeError):
|
||||||
"""An exception with integer code and message string attributes.
|
"""An exception with integer code and message string attributes."""
|
||||||
|
|
||||||
Attributes:
|
|
||||||
code (int): HTTP error code
|
|
||||||
msg (str): string describing the error
|
|
||||||
"""
|
|
||||||
def __init__(self, code, msg):
|
def __init__(self, code, msg):
|
||||||
|
logger.info("%s: %s, %s", type(self).__name__, code, msg)
|
||||||
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
||||||
self.code = code
|
self.code = code
|
||||||
self.msg = msg
|
self.msg = msg
|
||||||
|
self.response_code_message = None
|
||||||
|
|
||||||
def error_dict(self):
|
def error_dict(self):
|
||||||
return cs_error(self.msg)
|
return cs_error(self.msg)
|
||||||
|
|
||||||
|
|
||||||
class MatrixCodeMessageException(CodeMessageException):
|
|
||||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
|
||||||
"""
|
|
||||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
|
||||||
super(MatrixCodeMessageException, self).__init__(code, msg)
|
|
||||||
self.errcode = errcode
|
|
||||||
|
|
||||||
|
|
||||||
class SynapseError(CodeMessageException):
|
class SynapseError(CodeMessageException):
|
||||||
"""A base exception type for matrix errors which have an errcode and error
|
"""A base error which can be caught for all synapse events."""
|
||||||
message (as well as an HTTP status code).
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
|
||||||
"""
|
|
||||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||||
"""Constructs a synapse error.
|
"""Constructs a synapse error.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
code (int): The integer error code (an HTTP response code)
|
code (int): The integer error code (an HTTP response code)
|
||||||
msg (str): The human-readable error message.
|
msg (str): The human-readable error message.
|
||||||
errcode (str): The matrix error code e.g 'M_FORBIDDEN'
|
err (str): The error code e.g 'M_FORBIDDEN'
|
||||||
"""
|
"""
|
||||||
super(SynapseError, self).__init__(code, msg)
|
super(SynapseError, self).__init__(code, msg)
|
||||||
self.errcode = errcode
|
self.errcode = errcode
|
||||||
@@ -101,38 +75,10 @@ class SynapseError(CodeMessageException):
|
|||||||
self.errcode,
|
self.errcode,
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_http_response_exception(cls, err):
|
|
||||||
"""Make a SynapseError based on an HTTPResponseException
|
|
||||||
|
|
||||||
This is useful when a proxied request has failed, and we need to
|
class RoomError(SynapseError):
|
||||||
decide how to map the failure onto a matrix error to send back to the
|
"""An error raised when a room event fails."""
|
||||||
client.
|
pass
|
||||||
|
|
||||||
An attempt is made to parse the body of the http response as a matrix
|
|
||||||
error. If that succeeds, the errcode and error message from the body
|
|
||||||
are used as the errcode and error message in the new synapse error.
|
|
||||||
|
|
||||||
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
|
|
||||||
set to the reason code from the HTTP response.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
err (HttpResponseException):
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
SynapseError:
|
|
||||||
"""
|
|
||||||
# try to parse the body as json, to get better errcode/msg, but
|
|
||||||
# default to M_UNKNOWN with the HTTP status as the error text
|
|
||||||
try:
|
|
||||||
j = json.loads(err.response)
|
|
||||||
except ValueError:
|
|
||||||
j = {}
|
|
||||||
errcode = j.get('errcode', Codes.UNKNOWN)
|
|
||||||
errmsg = j.get('error', err.msg)
|
|
||||||
|
|
||||||
res = SynapseError(err.code, errmsg, errcode)
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
class RegistrationError(SynapseError):
|
class RegistrationError(SynapseError):
|
||||||
@@ -159,11 +105,13 @@ class UnrecognizedRequestError(SynapseError):
|
|||||||
|
|
||||||
class NotFoundError(SynapseError):
|
class NotFoundError(SynapseError):
|
||||||
"""An error indicating we can't find the thing you asked for"""
|
"""An error indicating we can't find the thing you asked for"""
|
||||||
def __init__(self, msg="Not found", errcode=Codes.NOT_FOUND):
|
def __init__(self, *args, **kwargs):
|
||||||
|
if "errcode" not in kwargs:
|
||||||
|
kwargs["errcode"] = Codes.NOT_FOUND
|
||||||
super(NotFoundError, self).__init__(
|
super(NotFoundError, self).__init__(
|
||||||
404,
|
404,
|
||||||
msg,
|
"Not found",
|
||||||
errcode=errcode
|
**kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -176,15 +124,6 @@ class AuthError(SynapseError):
|
|||||||
super(AuthError, self).__init__(*args, **kwargs)
|
super(AuthError, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class EventSizeError(SynapseError):
|
|
||||||
"""An error raised when an event is too big."""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
if "errcode" not in kwargs:
|
|
||||||
kwargs["errcode"] = Codes.TOO_LARGE
|
|
||||||
super(EventSizeError, self).__init__(413, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class EventStreamError(SynapseError):
|
class EventStreamError(SynapseError):
|
||||||
"""An error raised when there a problem with the event stream."""
|
"""An error raised when there a problem with the event stream."""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
@@ -224,6 +163,7 @@ class LimitExceededError(SynapseError):
|
|||||||
errcode=Codes.LIMIT_EXCEEDED):
|
errcode=Codes.LIMIT_EXCEEDED):
|
||||||
super(LimitExceededError, self).__init__(code, msg, errcode)
|
super(LimitExceededError, self).__init__(code, msg, errcode)
|
||||||
self.retry_after_ms = retry_after_ms
|
self.retry_after_ms = retry_after_ms
|
||||||
|
self.response_code_message = "Too Many Requests"
|
||||||
|
|
||||||
def error_dict(self):
|
def error_dict(self):
|
||||||
return cs_error(
|
return cs_error(
|
||||||
@@ -293,19 +233,6 @@ class FederationError(RuntimeError):
|
|||||||
|
|
||||||
|
|
||||||
class HttpResponseException(CodeMessageException):
|
class HttpResponseException(CodeMessageException):
|
||||||
"""
|
|
||||||
Represents an HTTP-level failure of an outbound request
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
response (str): body of response
|
|
||||||
"""
|
|
||||||
def __init__(self, code, msg, response):
|
def __init__(self, code, msg, response):
|
||||||
"""
|
|
||||||
|
|
||||||
Args:
|
|
||||||
code (int): HTTP status code
|
|
||||||
msg (str): reason phrase from HTTP response status line
|
|
||||||
response (str): body of response
|
|
||||||
"""
|
|
||||||
super(HttpResponseException, self).__init__(code, msg)
|
|
||||||
self.response = response
|
self.response = response
|
||||||
|
super(HttpResponseException, self).__init__(code, msg)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
# Copyright 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -13,174 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.storage.presence import UserPresenceState
|
|
||||||
from synapse.types import UserID, RoomID
|
from synapse.types import UserID, RoomID
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
import ujson as json
|
|
||||||
import jsonschema
|
|
||||||
from jsonschema import FormatChecker
|
|
||||||
|
|
||||||
FILTER_SCHEMA = {
|
|
||||||
"additionalProperties": False,
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"limit": {
|
|
||||||
"type": "number"
|
|
||||||
},
|
|
||||||
"senders": {
|
|
||||||
"$ref": "#/definitions/user_id_array"
|
|
||||||
},
|
|
||||||
"not_senders": {
|
|
||||||
"$ref": "#/definitions/user_id_array"
|
|
||||||
},
|
|
||||||
# TODO: We don't limit event type values but we probably should...
|
|
||||||
# check types are valid event types
|
|
||||||
"types": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"not_types": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ROOM_FILTER_SCHEMA = {
|
|
||||||
"additionalProperties": False,
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"not_rooms": {
|
|
||||||
"$ref": "#/definitions/room_id_array"
|
|
||||||
},
|
|
||||||
"rooms": {
|
|
||||||
"$ref": "#/definitions/room_id_array"
|
|
||||||
},
|
|
||||||
"ephemeral": {
|
|
||||||
"$ref": "#/definitions/room_event_filter"
|
|
||||||
},
|
|
||||||
"include_leave": {
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
"state": {
|
|
||||||
"$ref": "#/definitions/room_event_filter"
|
|
||||||
},
|
|
||||||
"timeline": {
|
|
||||||
"$ref": "#/definitions/room_event_filter"
|
|
||||||
},
|
|
||||||
"account_data": {
|
|
||||||
"$ref": "#/definitions/room_event_filter"
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ROOM_EVENT_FILTER_SCHEMA = {
|
|
||||||
"additionalProperties": False,
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"limit": {
|
|
||||||
"type": "number"
|
|
||||||
},
|
|
||||||
"senders": {
|
|
||||||
"$ref": "#/definitions/user_id_array"
|
|
||||||
},
|
|
||||||
"not_senders": {
|
|
||||||
"$ref": "#/definitions/user_id_array"
|
|
||||||
},
|
|
||||||
"types": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"not_types": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"rooms": {
|
|
||||||
"$ref": "#/definitions/room_id_array"
|
|
||||||
},
|
|
||||||
"not_rooms": {
|
|
||||||
"$ref": "#/definitions/room_id_array"
|
|
||||||
},
|
|
||||||
"contains_url": {
|
|
||||||
"type": "boolean"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
USER_ID_ARRAY_SCHEMA = {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string",
|
|
||||||
"format": "matrix_user_id"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ROOM_ID_ARRAY_SCHEMA = {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string",
|
|
||||||
"format": "matrix_room_id"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
USER_FILTER_SCHEMA = {
|
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
|
||||||
"description": "schema for a Sync filter",
|
|
||||||
"type": "object",
|
|
||||||
"definitions": {
|
|
||||||
"room_id_array": ROOM_ID_ARRAY_SCHEMA,
|
|
||||||
"user_id_array": USER_ID_ARRAY_SCHEMA,
|
|
||||||
"filter": FILTER_SCHEMA,
|
|
||||||
"room_filter": ROOM_FILTER_SCHEMA,
|
|
||||||
"room_event_filter": ROOM_EVENT_FILTER_SCHEMA
|
|
||||||
},
|
|
||||||
"properties": {
|
|
||||||
"presence": {
|
|
||||||
"$ref": "#/definitions/filter"
|
|
||||||
},
|
|
||||||
"account_data": {
|
|
||||||
"$ref": "#/definitions/filter"
|
|
||||||
},
|
|
||||||
"room": {
|
|
||||||
"$ref": "#/definitions/room_filter"
|
|
||||||
},
|
|
||||||
"event_format": {
|
|
||||||
"type": "string",
|
|
||||||
"enum": ["client", "federation"]
|
|
||||||
},
|
|
||||||
"event_fields": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string",
|
|
||||||
# Don't allow '\\' in event field filters. This makes matching
|
|
||||||
# events a lot easier as we can then use a negative lookbehind
|
|
||||||
# assertion to split '\.' If we allowed \\ then it would
|
|
||||||
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
|
||||||
"pattern": "^((?!\\\).)*$"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": False
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@FormatChecker.cls_checks('matrix_room_id')
|
|
||||||
def matrix_room_id_validator(room_id_str):
|
|
||||||
return RoomID.from_string(room_id_str)
|
|
||||||
|
|
||||||
|
|
||||||
@FormatChecker.cls_checks('matrix_user_id')
|
|
||||||
def matrix_user_id_validator(user_id_str):
|
|
||||||
return UserID.from_string(user_id_str)
|
|
||||||
|
|
||||||
|
|
||||||
class Filtering(object):
|
class Filtering(object):
|
||||||
@@ -189,20 +22,20 @@ class Filtering(object):
|
|||||||
super(Filtering, self).__init__()
|
super(Filtering, self).__init__()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_user_filter(self, user_localpart, filter_id):
|
def get_user_filter(self, user_localpart, filter_id):
|
||||||
result = yield self.store.get_user_filter(user_localpart, filter_id)
|
result = self.store.get_user_filter(user_localpart, filter_id)
|
||||||
defer.returnValue(FilterCollection(result))
|
result.addCallback(Filter)
|
||||||
|
return result
|
||||||
|
|
||||||
def add_user_filter(self, user_localpart, user_filter):
|
def add_user_filter(self, user_localpart, user_filter):
|
||||||
self.check_valid_filter(user_filter)
|
self._check_valid_filter(user_filter)
|
||||||
return self.store.add_user_filter(user_localpart, user_filter)
|
return self.store.add_user_filter(user_localpart, user_filter)
|
||||||
|
|
||||||
# TODO(paul): surely we should probably add a delete_user_filter or
|
# TODO(paul): surely we should probably add a delete_user_filter or
|
||||||
# replace_user_filter at some point? There's no REST API specified for
|
# replace_user_filter at some point? There's no REST API specified for
|
||||||
# them however
|
# them however
|
||||||
|
|
||||||
def check_valid_filter(self, user_filter_json):
|
def _check_valid_filter(self, user_filter_json):
|
||||||
"""Check if the provided filter is valid.
|
"""Check if the provided filter is valid.
|
||||||
|
|
||||||
This inspects all definitions contained within the filter.
|
This inspects all definitions contained within the filter.
|
||||||
@@ -215,214 +48,182 @@ class Filtering(object):
|
|||||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||||
# individual top-level key e.g. public_user_data. Filters are made of
|
# individual top-level key e.g. public_user_data. Filters are made of
|
||||||
# many definitions.
|
# many definitions.
|
||||||
try:
|
|
||||||
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
|
|
||||||
format_checker=FormatChecker())
|
|
||||||
except jsonschema.ValidationError as e:
|
|
||||||
raise SynapseError(400, e.message)
|
|
||||||
|
|
||||||
|
top_level_definitions = [
|
||||||
|
"public_user_data", "private_user_data", "server_data"
|
||||||
|
]
|
||||||
|
|
||||||
class FilterCollection(object):
|
room_level_definitions = [
|
||||||
def __init__(self, filter_json):
|
"state", "events", "ephemeral"
|
||||||
self._filter_json = filter_json
|
]
|
||||||
|
|
||||||
room_filter_json = self._filter_json.get("room", {})
|
for key in top_level_definitions:
|
||||||
|
if key in user_filter_json:
|
||||||
|
self._check_definition(user_filter_json[key])
|
||||||
|
|
||||||
self._room_filter = Filter({
|
if "room" in user_filter_json:
|
||||||
k: v for k, v in room_filter_json.items()
|
for key in room_level_definitions:
|
||||||
if k in ("rooms", "not_rooms")
|
if key in user_filter_json["room"]:
|
||||||
})
|
self._check_definition(user_filter_json["room"][key])
|
||||||
|
|
||||||
self._room_timeline_filter = Filter(room_filter_json.get("timeline", {}))
|
def _check_definition(self, definition):
|
||||||
self._room_state_filter = Filter(room_filter_json.get("state", {}))
|
"""Check if the provided definition is valid.
|
||||||
self._room_ephemeral_filter = Filter(room_filter_json.get("ephemeral", {}))
|
|
||||||
self._room_account_data = Filter(room_filter_json.get("account_data", {}))
|
|
||||||
self._presence_filter = Filter(filter_json.get("presence", {}))
|
|
||||||
self._account_data = Filter(filter_json.get("account_data", {}))
|
|
||||||
|
|
||||||
self.include_leave = filter_json.get("room", {}).get(
|
This inspects not only the types but also the values to make sure they
|
||||||
"include_leave", False
|
make sense.
|
||||||
)
|
|
||||||
self.event_fields = filter_json.get("event_fields", [])
|
|
||||||
|
|
||||||
def __repr__(self):
|
Args:
|
||||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
definition(dict): The filter definition
|
||||||
|
Raises:
|
||||||
|
SynapseError: If there was a problem with this definition.
|
||||||
|
"""
|
||||||
|
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||||
|
# individual top-level key e.g. public_user_data. Filters are made of
|
||||||
|
# many definitions.
|
||||||
|
if type(definition) != dict:
|
||||||
|
raise SynapseError(
|
||||||
|
400, "Expected JSON object, not %s" % (definition,)
|
||||||
|
)
|
||||||
|
|
||||||
def get_filter_json(self):
|
# check rooms are valid room IDs
|
||||||
return self._filter_json
|
room_id_keys = ["rooms", "not_rooms"]
|
||||||
|
for key in room_id_keys:
|
||||||
|
if key in definition:
|
||||||
|
if type(definition[key]) != list:
|
||||||
|
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||||
|
for room_id in definition[key]:
|
||||||
|
RoomID.from_string(room_id)
|
||||||
|
|
||||||
def timeline_limit(self):
|
# check senders are valid user IDs
|
||||||
return self._room_timeline_filter.limit()
|
user_id_keys = ["senders", "not_senders"]
|
||||||
|
for key in user_id_keys:
|
||||||
|
if key in definition:
|
||||||
|
if type(definition[key]) != list:
|
||||||
|
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||||
|
for user_id in definition[key]:
|
||||||
|
UserID.from_string(user_id)
|
||||||
|
|
||||||
def presence_limit(self):
|
# TODO: We don't limit event type values but we probably should...
|
||||||
return self._presence_filter.limit()
|
# check types are valid event types
|
||||||
|
event_keys = ["types", "not_types"]
|
||||||
|
for key in event_keys:
|
||||||
|
if key in definition:
|
||||||
|
if type(definition[key]) != list:
|
||||||
|
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||||
|
for event_type in definition[key]:
|
||||||
|
if not isinstance(event_type, basestring):
|
||||||
|
raise SynapseError(400, "Event type should be a string")
|
||||||
|
|
||||||
def ephemeral_limit(self):
|
if "format" in definition:
|
||||||
return self._room_ephemeral_filter.limit()
|
event_format = definition["format"]
|
||||||
|
if event_format not in ["federation", "events"]:
|
||||||
|
raise SynapseError(400, "Invalid format: %s" % (event_format,))
|
||||||
|
|
||||||
def filter_presence(self, events):
|
if "select" in definition:
|
||||||
return self._presence_filter.filter(events)
|
event_select_list = definition["select"]
|
||||||
|
for select_key in event_select_list:
|
||||||
|
if select_key not in ["event_id", "origin_server_ts",
|
||||||
|
"thread_id", "content", "content.body"]:
|
||||||
|
raise SynapseError(400, "Bad select: %s" % (select_key,))
|
||||||
|
|
||||||
def filter_account_data(self, events):
|
if ("bundle_updates" in definition and
|
||||||
return self._account_data.filter(events)
|
type(definition["bundle_updates"]) != bool):
|
||||||
|
raise SynapseError(400, "Bad bundle_updates: expected bool.")
|
||||||
def filter_room_state(self, events):
|
|
||||||
return self._room_state_filter.filter(self._room_filter.filter(events))
|
|
||||||
|
|
||||||
def filter_room_timeline(self, events):
|
|
||||||
return self._room_timeline_filter.filter(self._room_filter.filter(events))
|
|
||||||
|
|
||||||
def filter_room_ephemeral(self, events):
|
|
||||||
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
|
|
||||||
|
|
||||||
def filter_room_account_data(self, events):
|
|
||||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
|
||||||
|
|
||||||
def blocks_all_presence(self):
|
|
||||||
return (
|
|
||||||
self._presence_filter.filters_all_types() or
|
|
||||||
self._presence_filter.filters_all_senders()
|
|
||||||
)
|
|
||||||
|
|
||||||
def blocks_all_room_ephemeral(self):
|
|
||||||
return (
|
|
||||||
self._room_ephemeral_filter.filters_all_types() or
|
|
||||||
self._room_ephemeral_filter.filters_all_senders() or
|
|
||||||
self._room_ephemeral_filter.filters_all_rooms()
|
|
||||||
)
|
|
||||||
|
|
||||||
def blocks_all_room_timeline(self):
|
|
||||||
return (
|
|
||||||
self._room_timeline_filter.filters_all_types() or
|
|
||||||
self._room_timeline_filter.filters_all_senders() or
|
|
||||||
self._room_timeline_filter.filters_all_rooms()
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Filter(object):
|
class Filter(object):
|
||||||
def __init__(self, filter_json):
|
def __init__(self, filter_json):
|
||||||
self.filter_json = filter_json
|
self.filter_json = filter_json
|
||||||
|
|
||||||
self.types = self.filter_json.get("types", None)
|
def filter_public_user_data(self, events):
|
||||||
self.not_types = self.filter_json.get("not_types", [])
|
return self._filter_on_key(events, ["public_user_data"])
|
||||||
|
|
||||||
self.rooms = self.filter_json.get("rooms", None)
|
def filter_private_user_data(self, events):
|
||||||
self.not_rooms = self.filter_json.get("not_rooms", [])
|
return self._filter_on_key(events, ["private_user_data"])
|
||||||
|
|
||||||
self.senders = self.filter_json.get("senders", None)
|
def filter_room_state(self, events):
|
||||||
self.not_senders = self.filter_json.get("not_senders", [])
|
return self._filter_on_key(events, ["room", "state"])
|
||||||
|
|
||||||
self.contains_url = self.filter_json.get("contains_url", None)
|
def filter_room_events(self, events):
|
||||||
|
return self._filter_on_key(events, ["room", "events"])
|
||||||
|
|
||||||
def filters_all_types(self):
|
def filter_room_ephemeral(self, events):
|
||||||
return "*" in self.not_types
|
return self._filter_on_key(events, ["room", "ephemeral"])
|
||||||
|
|
||||||
def filters_all_senders(self):
|
def _filter_on_key(self, events, keys):
|
||||||
return "*" in self.not_senders
|
filter_json = self.filter_json
|
||||||
|
if not filter_json:
|
||||||
|
return events
|
||||||
|
|
||||||
def filters_all_rooms(self):
|
try:
|
||||||
return "*" in self.not_rooms
|
# extract the right definition from the filter
|
||||||
|
definition = filter_json
|
||||||
|
for key in keys:
|
||||||
|
definition = definition[key]
|
||||||
|
return self._filter_with_definition(events, definition)
|
||||||
|
except KeyError:
|
||||||
|
# return all events if definition isn't specified.
|
||||||
|
return events
|
||||||
|
|
||||||
def check(self, event):
|
def _filter_with_definition(self, events, definition):
|
||||||
"""Checks whether the filter matches the given event.
|
return [e for e in events if self._passes_definition(definition, e)]
|
||||||
|
|
||||||
|
def _passes_definition(self, definition, event):
|
||||||
|
"""Check if the event passes through the given definition.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
definition(dict): The definition to check against.
|
||||||
|
event(Event): The event to check.
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if the event matches
|
True if the event passes through the filter.
|
||||||
"""
|
"""
|
||||||
# We usually get the full "events" as dictionaries coming through,
|
# Algorithm notes:
|
||||||
# except for presence which actually gets passed around as its own
|
# For each key in the definition, check the event meets the criteria:
|
||||||
# namedtuple type.
|
# * For types: Literal match or prefix match (if ends with wildcard)
|
||||||
if isinstance(event, UserPresenceState):
|
# * For senders/rooms: Literal match only
|
||||||
sender = event.user_id
|
# * "not_" checks take presedence (e.g. if "m.*" is in both 'types'
|
||||||
room_id = None
|
# and 'not_types' then it is treated as only being in 'not_types')
|
||||||
ev_type = "m.presence"
|
|
||||||
is_url = False
|
|
||||||
else:
|
|
||||||
sender = event.get("sender", None)
|
|
||||||
if not sender:
|
|
||||||
# Presence events had their 'sender' in content.user_id, but are
|
|
||||||
# now handled above. We don't know if anything else uses this
|
|
||||||
# form. TODO: Check this and probably remove it.
|
|
||||||
content = event.get("content")
|
|
||||||
# account_data has been allowed to have non-dict content, so
|
|
||||||
# check type first
|
|
||||||
if isinstance(content, dict):
|
|
||||||
sender = content.get("user_id")
|
|
||||||
|
|
||||||
room_id = event.get("room_id", None)
|
# room checks
|
||||||
ev_type = event.get("type", None)
|
if hasattr(event, "room_id"):
|
||||||
is_url = "url" in event.get("content", {})
|
room_id = event.room_id
|
||||||
|
allow_rooms = definition.get("rooms", None)
|
||||||
return self.check_fields(
|
reject_rooms = definition.get("not_rooms", None)
|
||||||
room_id,
|
if reject_rooms and room_id in reject_rooms:
|
||||||
sender,
|
return False
|
||||||
ev_type,
|
if allow_rooms and room_id not in allow_rooms:
|
||||||
is_url,
|
|
||||||
)
|
|
||||||
|
|
||||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
|
||||||
"""Checks whether the filter matches the given event fields.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if the event fields match
|
|
||||||
"""
|
|
||||||
literal_keys = {
|
|
||||||
"rooms": lambda v: room_id == v,
|
|
||||||
"senders": lambda v: sender == v,
|
|
||||||
"types": lambda v: _matches_wildcard(event_type, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, match_func in literal_keys.items():
|
|
||||||
not_name = "not_%s" % (name,)
|
|
||||||
disallowed_values = getattr(self, not_name)
|
|
||||||
if any(map(match_func, disallowed_values)):
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
allowed_values = getattr(self, name)
|
# sender checks
|
||||||
if allowed_values is not None:
|
if hasattr(event, "sender"):
|
||||||
if not any(map(match_func, allowed_values)):
|
# Should we be including event.state_key for some event types?
|
||||||
return False
|
sender = event.sender
|
||||||
|
allow_senders = definition.get("senders", None)
|
||||||
|
reject_senders = definition.get("not_senders", None)
|
||||||
|
if reject_senders and sender in reject_senders:
|
||||||
|
return False
|
||||||
|
if allow_senders and sender not in allow_senders:
|
||||||
|
return False
|
||||||
|
|
||||||
contains_url_filter = self.filter_json.get("contains_url")
|
# type checks
|
||||||
if contains_url_filter is not None:
|
if "not_types" in definition:
|
||||||
if contains_url_filter != contains_url:
|
for def_type in definition["not_types"]:
|
||||||
|
if self._event_matches_type(event, def_type):
|
||||||
|
return False
|
||||||
|
if "types" in definition:
|
||||||
|
included = False
|
||||||
|
for def_type in definition["types"]:
|
||||||
|
if self._event_matches_type(event, def_type):
|
||||||
|
included = True
|
||||||
|
break
|
||||||
|
if not included:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def filter_rooms(self, room_ids):
|
def _event_matches_type(self, event, def_type):
|
||||||
"""Apply the 'rooms' filter to a given list of rooms.
|
if def_type.endswith("*"):
|
||||||
|
type_prefix = def_type[:-1]
|
||||||
Args:
|
return event.type.startswith(type_prefix)
|
||||||
room_ids (list): A list of room_ids.
|
else:
|
||||||
|
return event.type == def_type
|
||||||
Returns:
|
|
||||||
list: A list of room_ids that match the filter
|
|
||||||
"""
|
|
||||||
room_ids = set(room_ids)
|
|
||||||
|
|
||||||
disallowed_rooms = set(self.filter_json.get("not_rooms", []))
|
|
||||||
room_ids -= disallowed_rooms
|
|
||||||
|
|
||||||
allowed_rooms = self.filter_json.get("rooms", None)
|
|
||||||
if allowed_rooms is not None:
|
|
||||||
room_ids &= set(allowed_rooms)
|
|
||||||
|
|
||||||
return room_ids
|
|
||||||
|
|
||||||
def filter(self, events):
|
|
||||||
return filter(self.check, events)
|
|
||||||
|
|
||||||
def limit(self):
|
|
||||||
return self.filter_json.get("limit", 10)
|
|
||||||
|
|
||||||
|
|
||||||
def _matches_wildcard(actual_value, filter_value):
|
|
||||||
if filter_value.endswith("*"):
|
|
||||||
type_prefix = filter_value[:-1]
|
|
||||||
return actual_value.startswith(type_prefix)
|
|
||||||
else:
|
|
||||||
return actual_value == filter_value
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_FILTER_COLLECTION = FilterCollection({})
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014, 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -23,7 +23,7 @@ class Ratelimiter(object):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.message_counts = collections.OrderedDict()
|
self.message_counts = collections.OrderedDict()
|
||||||
|
|
||||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count, update=True):
|
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count):
|
||||||
"""Can the user send a message?
|
"""Can the user send a message?
|
||||||
Args:
|
Args:
|
||||||
user_id: The user sending a message.
|
user_id: The user sending a message.
|
||||||
@@ -32,15 +32,12 @@ class Ratelimiter(object):
|
|||||||
second.
|
second.
|
||||||
burst_count: How many messages the user can send before being
|
burst_count: How many messages the user can send before being
|
||||||
limited.
|
limited.
|
||||||
update (bool): Whether to update the message rates or not. This is
|
|
||||||
useful to check if a message would be allowed to be sent before
|
|
||||||
its ready to be actually sent.
|
|
||||||
Returns:
|
Returns:
|
||||||
A pair of a bool indicating if they can send a message now and a
|
A pair of a bool indicating if they can send a message now and a
|
||||||
time in seconds of when they can next send a message.
|
time in seconds of when they can next send a message.
|
||||||
"""
|
"""
|
||||||
self.prune_message_counts(time_now_s)
|
self.prune_message_counts(time_now_s)
|
||||||
message_count, time_start, _ignored = self.message_counts.get(
|
message_count, time_start, _ignored = self.message_counts.pop(
|
||||||
user_id, (0., time_now_s, None),
|
user_id, (0., time_now_s, None),
|
||||||
)
|
)
|
||||||
time_delta = time_now_s - time_start
|
time_delta = time_now_s - time_start
|
||||||
@@ -55,10 +52,9 @@ class Ratelimiter(object):
|
|||||||
allowed = True
|
allowed = True
|
||||||
message_count += 1
|
message_count += 1
|
||||||
|
|
||||||
if update:
|
self.message_counts[user_id] = (
|
||||||
self.message_counts[user_id] = (
|
message_count, time_start, msg_rate_hz
|
||||||
message_count, time_start, msg_rate_hz
|
)
|
||||||
)
|
|
||||||
|
|
||||||
if msg_rate_hz > 0:
|
if msg_rate_hz > 0:
|
||||||
time_allowed = (
|
time_allowed = (
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014, 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -23,5 +23,5 @@ WEB_CLIENT_PREFIX = "/_matrix/client"
|
|||||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||||
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
MEDIA_PREFIX = "/_matrix/media/v1"
|
||||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014, 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -12,20 +12,3 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import sys
|
|
||||||
sys.dont_write_bytecode = True
|
|
||||||
|
|
||||||
from synapse import python_dependencies # noqa: E402
|
|
||||||
|
|
||||||
try:
|
|
||||||
python_dependencies.check_requirements()
|
|
||||||
except python_dependencies.MissingRequirementError as e:
|
|
||||||
message = "\n".join([
|
|
||||||
"Missing Requirement: %s" % (e.message,),
|
|
||||||
"To install run:",
|
|
||||||
" pip install --upgrade --force \"%s\"" % (e.dependency,),
|
|
||||||
"",
|
|
||||||
])
|
|
||||||
sys.stderr.writelines(message)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|||||||
@@ -1,122 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2017 New Vector Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import gc
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
|
|
||||||
try:
|
|
||||||
import affinity
|
|
||||||
except:
|
|
||||||
affinity = None
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
from synapse.util import PreserveLoggingContext
|
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from twisted.internet import reactor
|
|
||||||
|
|
||||||
|
|
||||||
def start_worker_reactor(appname, config):
|
|
||||||
""" Run the reactor in the main process
|
|
||||||
|
|
||||||
Daemonizes if necessary, and then configures some resources, before starting
|
|
||||||
the reactor. Pulls configuration from the 'worker' settings in 'config'.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
appname (str): application name which will be sent to syslog
|
|
||||||
config (synapse.config.Config): config object
|
|
||||||
"""
|
|
||||||
|
|
||||||
logger = logging.getLogger(config.worker_app)
|
|
||||||
|
|
||||||
start_reactor(
|
|
||||||
appname,
|
|
||||||
config.soft_file_limit,
|
|
||||||
config.gc_thresholds,
|
|
||||||
config.worker_pid_file,
|
|
||||||
config.worker_daemonize,
|
|
||||||
config.worker_cpu_affinity,
|
|
||||||
logger,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def start_reactor(
|
|
||||||
appname,
|
|
||||||
soft_file_limit,
|
|
||||||
gc_thresholds,
|
|
||||||
pid_file,
|
|
||||||
daemonize,
|
|
||||||
cpu_affinity,
|
|
||||||
logger,
|
|
||||||
):
|
|
||||||
""" Run the reactor in the main process
|
|
||||||
|
|
||||||
Daemonizes if necessary, and then configures some resources, before starting
|
|
||||||
the reactor
|
|
||||||
|
|
||||||
Args:
|
|
||||||
appname (str): application name which will be sent to syslog
|
|
||||||
soft_file_limit (int):
|
|
||||||
gc_thresholds:
|
|
||||||
pid_file (str): name of pid file to write to if daemonize is True
|
|
||||||
daemonize (bool): true to run the reactor in a background process
|
|
||||||
cpu_affinity (int|None): cpu affinity mask
|
|
||||||
logger (logging.Logger): logger instance to pass to Daemonize
|
|
||||||
"""
|
|
||||||
|
|
||||||
def run():
|
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
|
||||||
# between the sentinel and `run` logcontexts.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
logger.info("Running")
|
|
||||||
if cpu_affinity is not None:
|
|
||||||
if not affinity:
|
|
||||||
quit_with_error(
|
|
||||||
"Missing package 'affinity' required for cpu_affinity\n"
|
|
||||||
"option\n\n"
|
|
||||||
"Install by running:\n\n"
|
|
||||||
" pip install affinity\n\n"
|
|
||||||
)
|
|
||||||
logger.info("Setting CPU affinity to %s" % cpu_affinity)
|
|
||||||
affinity.set_process_affinity_mask(0, cpu_affinity)
|
|
||||||
change_resource_limit(soft_file_limit)
|
|
||||||
if gc_thresholds:
|
|
||||||
gc.set_threshold(*gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
if daemonize:
|
|
||||||
daemon = Daemonize(
|
|
||||||
app=appname,
|
|
||||||
pid=pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
def quit_with_error(error_string):
|
|
||||||
message_lines = error_string.split("\n")
|
|
||||||
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
|
||||||
sys.stderr.write("*" * line_length + '\n')
|
|
||||||
for line in message_lines:
|
|
||||||
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
|
||||||
sys.stderr.write("*" * line_length + '\n')
|
|
||||||
sys.exit(1)
|
|
||||||
@@ -1,188 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import synapse
|
|
||||||
from synapse import events
|
|
||||||
from synapse.app import _base
|
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
|
||||||
from synapse.util.manhole import manhole
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
from twisted.internet import reactor
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.appservice")
|
|
||||||
|
|
||||||
|
|
||||||
class AppserviceSlaveStore(
|
|
||||||
DirectoryStore, SlavedEventStore, SlavedApplicationServiceStore,
|
|
||||||
SlavedRegistrationStore,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class AppserviceServer(HomeServer):
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
|
||||||
port = listener_config["port"]
|
|
||||||
bind_addresses = listener_config["bind_addresses"]
|
|
||||||
site_tag = listener_config.get("tag", port)
|
|
||||||
resources = {}
|
|
||||||
for res in listener_config["resources"]:
|
|
||||||
for name in res["names"]:
|
|
||||||
if name == "metrics":
|
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
port,
|
|
||||||
SynapseSite(
|
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
|
||||||
site_tag,
|
|
||||||
listener_config,
|
|
||||||
root_resource,
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Synapse appservice now listening on port %d", port)
|
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
|
||||||
for listener in listeners:
|
|
||||||
if listener["type"] == "http":
|
|
||||||
self._listen_http(listener)
|
|
||||||
elif listener["type"] == "manhole":
|
|
||||||
bind_addresses = listener["bind_addresses"]
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
listener["port"],
|
|
||||||
manhole(
|
|
||||||
username="matrix",
|
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
|
||||||
|
|
||||||
self.get_tcp_replication().start_replication(self)
|
|
||||||
|
|
||||||
def build_tcp_replication(self):
|
|
||||||
return ASReplicationHandler(self)
|
|
||||||
|
|
||||||
|
|
||||||
class ASReplicationHandler(ReplicationClientHandler):
|
|
||||||
def __init__(self, hs):
|
|
||||||
super(ASReplicationHandler, self).__init__(hs.get_datastore())
|
|
||||||
self.appservice_handler = hs.get_application_service_handler()
|
|
||||||
|
|
||||||
def on_rdata(self, stream_name, token, rows):
|
|
||||||
super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
|
||||||
|
|
||||||
if stream_name == "events":
|
|
||||||
max_stream_id = self.store.get_room_max_stream_ordering()
|
|
||||||
preserve_fn(
|
|
||||||
self.appservice_handler.notify_interested_services
|
|
||||||
)(max_stream_id)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
|
||||||
try:
|
|
||||||
config = HomeServerConfig.load_config(
|
|
||||||
"Synapse appservice", config_options
|
|
||||||
)
|
|
||||||
except ConfigError as e:
|
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.appservice"
|
|
||||||
|
|
||||||
setup_logging(config, use_worker_options=True)
|
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
|
||||||
|
|
||||||
if config.notify_appservices:
|
|
||||||
sys.stderr.write(
|
|
||||||
"\nThe appservices must be disabled in the main synapse process"
|
|
||||||
"\nbefore they can be run in a separate worker."
|
|
||||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
|
||||||
"\n"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Force the pushers to start since they will be disabled in the main config
|
|
||||||
config.notify_appservices = True
|
|
||||||
|
|
||||||
ps = AppserviceServer(
|
|
||||||
config.server_name,
|
|
||||||
db_config=config.database_config,
|
|
||||||
config=config,
|
|
||||||
version_string="Synapse/" + get_version_string(synapse),
|
|
||||||
database_engine=database_engine,
|
|
||||||
)
|
|
||||||
|
|
||||||
ps.setup()
|
|
||||||
ps.start_listening(config.worker_listeners)
|
|
||||||
|
|
||||||
def start():
|
|
||||||
ps.get_datastore().start_profiling()
|
|
||||||
ps.get_state_handler().start_caching()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-appservice", config)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with LoggingContext("main"):
|
|
||||||
start(sys.argv[1:])
|
|
||||||
@@ -1,189 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import synapse
|
|
||||||
from synapse import events
|
|
||||||
from synapse.app import _base
|
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.http.server import JsonResource
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
||||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
|
||||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
|
||||||
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
from twisted.internet import reactor
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.client_reader")
|
|
||||||
|
|
||||||
|
|
||||||
class ClientReaderSlavedStore(
|
|
||||||
SlavedEventStore,
|
|
||||||
SlavedKeyStore,
|
|
||||||
RoomStore,
|
|
||||||
DirectoryStore,
|
|
||||||
SlavedApplicationServiceStore,
|
|
||||||
SlavedRegistrationStore,
|
|
||||||
TransactionStore,
|
|
||||||
SlavedClientIpStore,
|
|
||||||
BaseSlavedStore,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class ClientReaderServer(HomeServer):
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
|
||||||
port = listener_config["port"]
|
|
||||||
bind_addresses = listener_config["bind_addresses"]
|
|
||||||
site_tag = listener_config.get("tag", port)
|
|
||||||
resources = {}
|
|
||||||
for res in listener_config["resources"]:
|
|
||||||
for name in res["names"]:
|
|
||||||
if name == "metrics":
|
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
||||||
elif name == "client":
|
|
||||||
resource = JsonResource(self, canonical_json=False)
|
|
||||||
PublicRoomListRestServlet(self).register(resource)
|
|
||||||
resources.update({
|
|
||||||
"/_matrix/client/r0": resource,
|
|
||||||
"/_matrix/client/unstable": resource,
|
|
||||||
"/_matrix/client/v2_alpha": resource,
|
|
||||||
"/_matrix/client/api/v1": resource,
|
|
||||||
})
|
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
port,
|
|
||||||
SynapseSite(
|
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
|
||||||
site_tag,
|
|
||||||
listener_config,
|
|
||||||
root_resource,
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Synapse client reader now listening on port %d", port)
|
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
|
||||||
for listener in listeners:
|
|
||||||
if listener["type"] == "http":
|
|
||||||
self._listen_http(listener)
|
|
||||||
elif listener["type"] == "manhole":
|
|
||||||
bind_addresses = listener["bind_addresses"]
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
listener["port"],
|
|
||||||
manhole(
|
|
||||||
username="matrix",
|
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
|
||||||
|
|
||||||
self.get_tcp_replication().start_replication(self)
|
|
||||||
|
|
||||||
def build_tcp_replication(self):
|
|
||||||
return ReplicationClientHandler(self.get_datastore())
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
|
||||||
try:
|
|
||||||
config = HomeServerConfig.load_config(
|
|
||||||
"Synapse client reader", config_options
|
|
||||||
)
|
|
||||||
except ConfigError as e:
|
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.client_reader"
|
|
||||||
|
|
||||||
setup_logging(config, use_worker_options=True)
|
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
|
||||||
|
|
||||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
|
|
||||||
ss = ClientReaderServer(
|
|
||||||
config.server_name,
|
|
||||||
db_config=config.database_config,
|
|
||||||
tls_server_context_factory=tls_server_context_factory,
|
|
||||||
config=config,
|
|
||||||
version_string="Synapse/" + get_version_string(synapse),
|
|
||||||
database_engine=database_engine,
|
|
||||||
)
|
|
||||||
|
|
||||||
ss.setup()
|
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
|
|
||||||
def start():
|
|
||||||
ss.get_state_handler().start_caching()
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-client-reader", config)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with LoggingContext("main"):
|
|
||||||
start(sys.argv[1:])
|
|
||||||
@@ -1,178 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import synapse
|
|
||||||
from synapse import events
|
|
||||||
from synapse.api.urls import FEDERATION_PREFIX
|
|
||||||
from synapse.app import _base
|
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
||||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
|
||||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
from twisted.internet import reactor
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.federation_reader")
|
|
||||||
|
|
||||||
|
|
||||||
class FederationReaderSlavedStore(
|
|
||||||
SlavedEventStore,
|
|
||||||
SlavedKeyStore,
|
|
||||||
RoomStore,
|
|
||||||
DirectoryStore,
|
|
||||||
TransactionStore,
|
|
||||||
BaseSlavedStore,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class FederationReaderServer(HomeServer):
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
|
||||||
port = listener_config["port"]
|
|
||||||
bind_addresses = listener_config["bind_addresses"]
|
|
||||||
site_tag = listener_config.get("tag", port)
|
|
||||||
resources = {}
|
|
||||||
for res in listener_config["resources"]:
|
|
||||||
for name in res["names"]:
|
|
||||||
if name == "metrics":
|
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
||||||
elif name == "federation":
|
|
||||||
resources.update({
|
|
||||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
|
||||||
})
|
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
port,
|
|
||||||
SynapseSite(
|
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
|
||||||
site_tag,
|
|
||||||
listener_config,
|
|
||||||
root_resource,
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Synapse federation reader now listening on port %d", port)
|
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
|
||||||
for listener in listeners:
|
|
||||||
if listener["type"] == "http":
|
|
||||||
self._listen_http(listener)
|
|
||||||
elif listener["type"] == "manhole":
|
|
||||||
bind_addresses = listener["bind_addresses"]
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
listener["port"],
|
|
||||||
manhole(
|
|
||||||
username="matrix",
|
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
|
||||||
|
|
||||||
self.get_tcp_replication().start_replication(self)
|
|
||||||
|
|
||||||
def build_tcp_replication(self):
|
|
||||||
return ReplicationClientHandler(self.get_datastore())
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
|
||||||
try:
|
|
||||||
config = HomeServerConfig.load_config(
|
|
||||||
"Synapse federation reader", config_options
|
|
||||||
)
|
|
||||||
except ConfigError as e:
|
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.federation_reader"
|
|
||||||
|
|
||||||
setup_logging(config, use_worker_options=True)
|
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
|
||||||
|
|
||||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
|
|
||||||
ss = FederationReaderServer(
|
|
||||||
config.server_name,
|
|
||||||
db_config=config.database_config,
|
|
||||||
tls_server_context_factory=tls_server_context_factory,
|
|
||||||
config=config,
|
|
||||||
version_string="Synapse/" + get_version_string(synapse),
|
|
||||||
database_engine=database_engine,
|
|
||||||
)
|
|
||||||
|
|
||||||
ss.setup()
|
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
|
|
||||||
def start():
|
|
||||||
ss.get_state_handler().start_caching()
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-federation-reader", config)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with LoggingContext("main"):
|
|
||||||
start(sys.argv[1:])
|
|
||||||
@@ -1,274 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import synapse
|
|
||||||
from synapse import events
|
|
||||||
from synapse.app import _base
|
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.federation import send_queue
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
|
||||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
||||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
|
||||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.util.async import Linearizer
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
|
||||||
from synapse.util.manhole import manhole
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
from twisted.internet import defer, reactor
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.federation_sender")
|
|
||||||
|
|
||||||
|
|
||||||
class FederationSenderSlaveStore(
|
|
||||||
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
|
||||||
SlavedRegistrationStore, SlavedDeviceStore, SlavedPresenceStore,
|
|
||||||
):
|
|
||||||
def __init__(self, db_conn, hs):
|
|
||||||
super(FederationSenderSlaveStore, self).__init__(db_conn, hs)
|
|
||||||
|
|
||||||
# We pull out the current federation stream position now so that we
|
|
||||||
# always have a known value for the federation position in memory so
|
|
||||||
# that we don't have to bounce via a deferred once when we start the
|
|
||||||
# replication streams.
|
|
||||||
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
|
|
||||||
|
|
||||||
def _get_federation_out_pos(self, db_conn):
|
|
||||||
sql = (
|
|
||||||
"SELECT stream_id FROM federation_stream_position"
|
|
||||||
" WHERE type = ?"
|
|
||||||
)
|
|
||||||
sql = self.database_engine.convert_param_style(sql)
|
|
||||||
|
|
||||||
txn = db_conn.cursor()
|
|
||||||
txn.execute(sql, ("federation",))
|
|
||||||
rows = txn.fetchall()
|
|
||||||
txn.close()
|
|
||||||
|
|
||||||
return rows[0][0] if rows else -1
|
|
||||||
|
|
||||||
|
|
||||||
class FederationSenderServer(HomeServer):
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
|
||||||
port = listener_config["port"]
|
|
||||||
bind_addresses = listener_config["bind_addresses"]
|
|
||||||
site_tag = listener_config.get("tag", port)
|
|
||||||
resources = {}
|
|
||||||
for res in listener_config["resources"]:
|
|
||||||
for name in res["names"]:
|
|
||||||
if name == "metrics":
|
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
port,
|
|
||||||
SynapseSite(
|
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
|
||||||
site_tag,
|
|
||||||
listener_config,
|
|
||||||
root_resource,
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Synapse federation_sender now listening on port %d", port)
|
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
|
||||||
for listener in listeners:
|
|
||||||
if listener["type"] == "http":
|
|
||||||
self._listen_http(listener)
|
|
||||||
elif listener["type"] == "manhole":
|
|
||||||
bind_addresses = listener["bind_addresses"]
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
listener["port"],
|
|
||||||
manhole(
|
|
||||||
username="matrix",
|
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
|
||||||
|
|
||||||
self.get_tcp_replication().start_replication(self)
|
|
||||||
|
|
||||||
def build_tcp_replication(self):
|
|
||||||
return FederationSenderReplicationHandler(self)
|
|
||||||
|
|
||||||
|
|
||||||
class FederationSenderReplicationHandler(ReplicationClientHandler):
|
|
||||||
def __init__(self, hs):
|
|
||||||
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
|
|
||||||
self.send_handler = FederationSenderHandler(hs, self)
|
|
||||||
|
|
||||||
def on_rdata(self, stream_name, token, rows):
|
|
||||||
super(FederationSenderReplicationHandler, self).on_rdata(
|
|
||||||
stream_name, token, rows
|
|
||||||
)
|
|
||||||
self.send_handler.process_replication_rows(stream_name, token, rows)
|
|
||||||
|
|
||||||
def get_streams_to_replicate(self):
|
|
||||||
args = super(FederationSenderReplicationHandler, self).get_streams_to_replicate()
|
|
||||||
args.update(self.send_handler.stream_positions())
|
|
||||||
return args
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
|
||||||
try:
|
|
||||||
config = HomeServerConfig.load_config(
|
|
||||||
"Synapse federation sender", config_options
|
|
||||||
)
|
|
||||||
except ConfigError as e:
|
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.federation_sender"
|
|
||||||
|
|
||||||
setup_logging(config, use_worker_options=True)
|
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
|
||||||
|
|
||||||
if config.send_federation:
|
|
||||||
sys.stderr.write(
|
|
||||||
"\nThe send_federation must be disabled in the main synapse process"
|
|
||||||
"\nbefore they can be run in a separate worker."
|
|
||||||
"\nPlease add ``send_federation: false`` to the main config"
|
|
||||||
"\n"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Force the pushers to start since they will be disabled in the main config
|
|
||||||
config.send_federation = True
|
|
||||||
|
|
||||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
|
|
||||||
ps = FederationSenderServer(
|
|
||||||
config.server_name,
|
|
||||||
db_config=config.database_config,
|
|
||||||
tls_server_context_factory=tls_server_context_factory,
|
|
||||||
config=config,
|
|
||||||
version_string="Synapse/" + get_version_string(synapse),
|
|
||||||
database_engine=database_engine,
|
|
||||||
)
|
|
||||||
|
|
||||||
ps.setup()
|
|
||||||
ps.start_listening(config.worker_listeners)
|
|
||||||
|
|
||||||
def start():
|
|
||||||
ps.get_datastore().start_profiling()
|
|
||||||
ps.get_state_handler().start_caching()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
_base.start_worker_reactor("synapse-federation-sender", config)
|
|
||||||
|
|
||||||
|
|
||||||
class FederationSenderHandler(object):
|
|
||||||
"""Processes the replication stream and forwards the appropriate entries
|
|
||||||
to the federation sender.
|
|
||||||
"""
|
|
||||||
def __init__(self, hs, replication_client):
|
|
||||||
self.store = hs.get_datastore()
|
|
||||||
self.federation_sender = hs.get_federation_sender()
|
|
||||||
self.replication_client = replication_client
|
|
||||||
|
|
||||||
self.federation_position = self.store.federation_out_pos_startup
|
|
||||||
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
|
||||||
|
|
||||||
self._last_ack = self.federation_position
|
|
||||||
|
|
||||||
self._room_serials = {}
|
|
||||||
self._room_typing = {}
|
|
||||||
|
|
||||||
def on_start(self):
|
|
||||||
# There may be some events that are persisted but haven't been sent,
|
|
||||||
# so send them now.
|
|
||||||
self.federation_sender.notify_new_events(
|
|
||||||
self.store.get_room_max_stream_ordering()
|
|
||||||
)
|
|
||||||
|
|
||||||
def stream_positions(self):
|
|
||||||
return {"federation": self.federation_position}
|
|
||||||
|
|
||||||
def process_replication_rows(self, stream_name, token, rows):
|
|
||||||
# The federation stream contains things that we want to send out, e.g.
|
|
||||||
# presence, typing, etc.
|
|
||||||
if stream_name == "federation":
|
|
||||||
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
|
||||||
preserve_fn(self.update_token)(token)
|
|
||||||
|
|
||||||
# We also need to poke the federation sender when new events happen
|
|
||||||
elif stream_name == "events":
|
|
||||||
self.federation_sender.notify_new_events(token)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def update_token(self, token):
|
|
||||||
self.federation_position = token
|
|
||||||
|
|
||||||
# We linearize here to ensure we don't have races updating the token
|
|
||||||
with (yield self._fed_position_linearizer.queue(None)):
|
|
||||||
if self._last_ack < self.federation_position:
|
|
||||||
yield self.store.update_federation_out_pos(
|
|
||||||
"federation", self.federation_position
|
|
||||||
)
|
|
||||||
|
|
||||||
# We ACK this token over replication so that the master can drop
|
|
||||||
# its in memory queues
|
|
||||||
self.replication_client.send_federation_ack(self.federation_position)
|
|
||||||
self._last_ack = self.federation_position
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with LoggingContext("main"):
|
|
||||||
start(sys.argv[1:])
|
|
||||||
@@ -1,239 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import synapse
|
|
||||||
from synapse import events
|
|
||||||
from synapse.api.errors import SynapseError
|
|
||||||
from synapse.app import _base
|
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.http.server import JsonResource
|
|
||||||
from synapse.http.servlet import (
|
|
||||||
RestServlet, parse_json_object_from_request,
|
|
||||||
)
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
|
||||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
|
||||||
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
from twisted.internet import defer, reactor
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
|
||||||
|
|
||||||
|
|
||||||
class KeyUploadServlet(RestServlet):
|
|
||||||
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$",
|
|
||||||
releases=())
|
|
||||||
|
|
||||||
def __init__(self, hs):
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
hs (synapse.server.HomeServer): server
|
|
||||||
"""
|
|
||||||
super(KeyUploadServlet, self).__init__()
|
|
||||||
self.auth = hs.get_auth()
|
|
||||||
self.store = hs.get_datastore()
|
|
||||||
self.http_client = hs.get_simple_http_client()
|
|
||||||
self.main_uri = hs.config.worker_main_http_uri
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def on_POST(self, request, device_id):
|
|
||||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
user_id = requester.user.to_string()
|
|
||||||
body = parse_json_object_from_request(request)
|
|
||||||
|
|
||||||
if device_id is not None:
|
|
||||||
# passing the device_id here is deprecated; however, we allow it
|
|
||||||
# for now for compatibility with older clients.
|
|
||||||
if (requester.device_id is not None and
|
|
||||||
device_id != requester.device_id):
|
|
||||||
logger.warning("Client uploading keys for a different device "
|
|
||||||
"(logged in as %s, uploading for %s)",
|
|
||||||
requester.device_id, device_id)
|
|
||||||
else:
|
|
||||||
device_id = requester.device_id
|
|
||||||
|
|
||||||
if device_id is None:
|
|
||||||
raise SynapseError(
|
|
||||||
400,
|
|
||||||
"To upload keys, you must pass device_id when authenticating"
|
|
||||||
)
|
|
||||||
|
|
||||||
if body:
|
|
||||||
# They're actually trying to upload something, proxy to main synapse.
|
|
||||||
result = yield self.http_client.post_json_get_json(
|
|
||||||
self.main_uri + request.uri,
|
|
||||||
body,
|
|
||||||
)
|
|
||||||
|
|
||||||
defer.returnValue((200, result))
|
|
||||||
else:
|
|
||||||
# Just interested in counts.
|
|
||||||
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
|
||||||
defer.returnValue((200, {"one_time_key_counts": result}))
|
|
||||||
|
|
||||||
|
|
||||||
class FrontendProxySlavedStore(
|
|
||||||
SlavedDeviceStore,
|
|
||||||
SlavedClientIpStore,
|
|
||||||
SlavedApplicationServiceStore,
|
|
||||||
SlavedRegistrationStore,
|
|
||||||
BaseSlavedStore,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class FrontendProxyServer(HomeServer):
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
|
||||||
port = listener_config["port"]
|
|
||||||
bind_addresses = listener_config["bind_addresses"]
|
|
||||||
site_tag = listener_config.get("tag", port)
|
|
||||||
resources = {}
|
|
||||||
for res in listener_config["resources"]:
|
|
||||||
for name in res["names"]:
|
|
||||||
if name == "metrics":
|
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
||||||
elif name == "client":
|
|
||||||
resource = JsonResource(self, canonical_json=False)
|
|
||||||
KeyUploadServlet(self).register(resource)
|
|
||||||
resources.update({
|
|
||||||
"/_matrix/client/r0": resource,
|
|
||||||
"/_matrix/client/unstable": resource,
|
|
||||||
"/_matrix/client/v2_alpha": resource,
|
|
||||||
"/_matrix/client/api/v1": resource,
|
|
||||||
})
|
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
port,
|
|
||||||
SynapseSite(
|
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
|
||||||
site_tag,
|
|
||||||
listener_config,
|
|
||||||
root_resource,
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Synapse client reader now listening on port %d", port)
|
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
|
||||||
for listener in listeners:
|
|
||||||
if listener["type"] == "http":
|
|
||||||
self._listen_http(listener)
|
|
||||||
elif listener["type"] == "manhole":
|
|
||||||
bind_addresses = listener["bind_addresses"]
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
listener["port"],
|
|
||||||
manhole(
|
|
||||||
username="matrix",
|
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
|
||||||
|
|
||||||
self.get_tcp_replication().start_replication(self)
|
|
||||||
|
|
||||||
def build_tcp_replication(self):
|
|
||||||
return ReplicationClientHandler(self.get_datastore())
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
|
||||||
try:
|
|
||||||
config = HomeServerConfig.load_config(
|
|
||||||
"Synapse frontend proxy", config_options
|
|
||||||
)
|
|
||||||
except ConfigError as e:
|
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.frontend_proxy"
|
|
||||||
|
|
||||||
assert config.worker_main_http_uri is not None
|
|
||||||
|
|
||||||
setup_logging(config, use_worker_options=True)
|
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
|
||||||
|
|
||||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
|
|
||||||
ss = FrontendProxyServer(
|
|
||||||
config.server_name,
|
|
||||||
db_config=config.database_config,
|
|
||||||
tls_server_context_factory=tls_server_context_factory,
|
|
||||||
config=config,
|
|
||||||
version_string="Synapse/" + get_version_string(synapse),
|
|
||||||
database_engine=database_engine,
|
|
||||||
)
|
|
||||||
|
|
||||||
ss.setup()
|
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
|
|
||||||
def start():
|
|
||||||
ss.get_state_handler().start_caching()
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with LoggingContext("main"):
|
|
||||||
start(sys.argv[1:])
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014, 2015 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -13,179 +13,207 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import gc
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import synapse
|
import sys
|
||||||
import synapse.config.logger
|
sys.dont_write_bytecode = True
|
||||||
from synapse import events
|
from synapse.python_dependencies import check_requirements
|
||||||
from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \
|
|
||||||
LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \
|
if __name__ == '__main__':
|
||||||
STATIC_PREFIX, WEB_CLIENT_PREFIX
|
check_requirements()
|
||||||
from synapse.app import _base
|
|
||||||
from synapse.app._base import quit_with_error
|
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
||||||
from synapse.config._base import ConfigError
|
from synapse.storage import (
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
are_all_users_on_domain, UpgradeDatabaseException,
|
||||||
from synapse.crypto import context_factory
|
)
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
|
||||||
from synapse.http.server import RootRedirect
|
from synapse.server import HomeServer
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics import register_memory_metrics
|
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from twisted.internet import reactor
|
||||||
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
|
from twisted.application import service
|
||||||
check_requirements
|
from twisted.enterprise import adbapi
|
||||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
from twisted.web.resource import Resource, EncodingResourceWrapper
|
||||||
from synapse.rest import ClientRestResource
|
from twisted.web.static import File
|
||||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
from twisted.web.server import Site, GzipEncoderFactory, Request
|
||||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
from twisted.web.http import proxiedLogFormatter, combinedLogFormatter
|
||||||
|
from synapse.http.server import JsonResource, RootRedirect
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||||
from synapse.server import HomeServer
|
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||||
from synapse.storage import are_all_users_on_domain
|
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||||
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
from synapse.api.urls import (
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
CLIENT_PREFIX, FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
||||||
|
SERVER_KEY_PREFIX, MEDIA_PREFIX, CLIENT_V2_ALPHA_PREFIX, STATIC_PREFIX,
|
||||||
|
SERVER_KEY_V2_PREFIX,
|
||||||
|
)
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.crypto import context_factory
|
||||||
from synapse.util.logcontext import LoggingContext
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.rest.client.v1 import ClientV1RestResource
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.rest.client.v2_alpha import ClientV2AlphaRestResource
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||||
from twisted.application import service
|
|
||||||
from twisted.internet import defer, reactor
|
from synapse import events
|
||||||
from twisted.web.resource import EncodingResourceWrapper, Resource
|
|
||||||
from twisted.web.server import GzipEncoderFactory
|
from daemonize import Daemonize
|
||||||
from twisted.web.static import File
|
import twisted.manhole.telnet
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import resource
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.homeserver")
|
logger = logging.getLogger("synapse.app.homeserver")
|
||||||
|
|
||||||
|
|
||||||
|
class GzipFile(File):
|
||||||
|
def getChild(self, path, request):
|
||||||
|
child = File.getChild(self, path, request)
|
||||||
|
return EncodingResourceWrapper(child, [GzipEncoderFactory()])
|
||||||
|
|
||||||
|
|
||||||
def gz_wrap(r):
|
def gz_wrap(r):
|
||||||
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
|
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
|
||||||
|
|
||||||
|
|
||||||
def build_resource_for_web_client(hs):
|
class SynapseHomeServer(HomeServer):
|
||||||
webclient_path = hs.get_config().web_client_location
|
|
||||||
if not webclient_path:
|
def build_http_client(self):
|
||||||
try:
|
return MatrixFederationHttpClient(self)
|
||||||
import syweb
|
|
||||||
except ImportError:
|
def build_resource_for_client(self):
|
||||||
quit_with_error(
|
return ClientV1RestResource(self)
|
||||||
"Could not find a webclient.\n\n"
|
|
||||||
"Please either install the matrix-angular-sdk or configure\n"
|
def build_resource_for_client_v2_alpha(self):
|
||||||
"the location of the source to serve via the configuration\n"
|
return ClientV2AlphaRestResource(self)
|
||||||
"option `web_client_location`\n\n"
|
|
||||||
"To install the `matrix-angular-sdk` via pip, run:\n\n"
|
def build_resource_for_federation(self):
|
||||||
" pip install '%(dep)s'\n"
|
return JsonResource(self)
|
||||||
"\n"
|
|
||||||
"You can also disable hosting of the webclient via the\n"
|
def build_resource_for_web_client(self):
|
||||||
"configuration option `web_client`\n"
|
import syweb
|
||||||
% {"dep": CONDITIONAL_REQUIREMENTS["web_client"].keys()[0]}
|
|
||||||
)
|
|
||||||
syweb_path = os.path.dirname(syweb.__file__)
|
syweb_path = os.path.dirname(syweb.__file__)
|
||||||
webclient_path = os.path.join(syweb_path, "webclient")
|
webclient_path = os.path.join(syweb_path, "webclient")
|
||||||
# GZip is disabled here due to
|
# GZip is disabled here due to
|
||||||
# https://twistedmatrix.com/trac/ticket/7678
|
# https://twistedmatrix.com/trac/ticket/7678
|
||||||
# (It can stay enabled for the API resources: they call
|
# (It can stay enabled for the API resources: they call
|
||||||
# write() with the whole body and then finish() straight
|
# write() with the whole body and then finish() straight
|
||||||
# after and so do not trigger the bug.
|
# after and so do not trigger the bug.
|
||||||
# GzipFile was removed in commit 184ba09
|
# return GzipFile(webclient_path) # TODO configurable?
|
||||||
# return GzipFile(webclient_path) # TODO configurable?
|
return File(webclient_path) # TODO configurable?
|
||||||
return File(webclient_path) # TODO configurable?
|
|
||||||
|
|
||||||
|
def build_resource_for_static_content(self):
|
||||||
|
# This is old and should go away: not going to bother adding gzip
|
||||||
|
return File("static")
|
||||||
|
|
||||||
|
def build_resource_for_content_repo(self):
|
||||||
|
return ContentRepoResource(
|
||||||
|
self, self.upload_dir, self.auth, self.content_addr
|
||||||
|
)
|
||||||
|
|
||||||
|
def build_resource_for_media_repository(self):
|
||||||
|
return MediaRepositoryResource(self)
|
||||||
|
|
||||||
|
def build_resource_for_server_key(self):
|
||||||
|
return LocalKey(self)
|
||||||
|
|
||||||
|
def build_resource_for_server_key_v2(self):
|
||||||
|
return KeyApiV2Resource(self)
|
||||||
|
|
||||||
|
def build_resource_for_metrics(self):
|
||||||
|
if self.get_config().enable_metrics:
|
||||||
|
return MetricsResource(self)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def build_db_pool(self):
|
||||||
|
name = self.db_config["name"]
|
||||||
|
|
||||||
|
return adbapi.ConnectionPool(
|
||||||
|
name,
|
||||||
|
**self.db_config.get("args", {})
|
||||||
|
)
|
||||||
|
|
||||||
class SynapseHomeServer(HomeServer):
|
|
||||||
def _listener_http(self, config, listener_config):
|
def _listener_http(self, config, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_addresses = listener_config["bind_addresses"]
|
bind_address = listener_config.get("bind_address", "")
|
||||||
tls = listener_config.get("tls", False)
|
tls = listener_config.get("tls", False)
|
||||||
site_tag = listener_config.get("tag", port)
|
|
||||||
|
|
||||||
if tls and config.no_tls:
|
if tls and config.no_tls:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
metrics_resource = self.get_resource_for_metrics()
|
||||||
|
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
for name in res["names"]:
|
for name in res["names"]:
|
||||||
if name == "client":
|
if name == "client":
|
||||||
client_resource = ClientRestResource(self)
|
|
||||||
if res["compress"]:
|
if res["compress"]:
|
||||||
client_resource = gz_wrap(client_resource)
|
client_v1 = gz_wrap(self.get_resource_for_client())
|
||||||
|
client_v2 = gz_wrap(self.get_resource_for_client_v2_alpha())
|
||||||
|
else:
|
||||||
|
client_v1 = self.get_resource_for_client()
|
||||||
|
client_v2 = self.get_resource_for_client_v2_alpha()
|
||||||
|
|
||||||
resources.update({
|
resources.update({
|
||||||
"/_matrix/client/api/v1": client_resource,
|
CLIENT_PREFIX: client_v1,
|
||||||
"/_matrix/client/r0": client_resource,
|
CLIENT_V2_ALPHA_PREFIX: client_v2,
|
||||||
"/_matrix/client/unstable": client_resource,
|
|
||||||
"/_matrix/client/v2_alpha": client_resource,
|
|
||||||
"/_matrix/client/versions": client_resource,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if name == "federation":
|
if name == "federation":
|
||||||
resources.update({
|
resources.update({
|
||||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
FEDERATION_PREFIX: self.get_resource_for_federation(),
|
||||||
})
|
})
|
||||||
|
|
||||||
if name in ["static", "client"]:
|
if name in ["static", "client"]:
|
||||||
resources.update({
|
resources.update({
|
||||||
STATIC_PREFIX: File(
|
STATIC_PREFIX: self.get_resource_for_static_content(),
|
||||||
os.path.join(os.path.dirname(synapse.__file__), "static")
|
|
||||||
),
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if name in ["media", "federation", "client"]:
|
if name in ["media", "federation", "client"]:
|
||||||
media_repo = MediaRepositoryResource(self)
|
|
||||||
resources.update({
|
resources.update({
|
||||||
MEDIA_PREFIX: media_repo,
|
MEDIA_PREFIX: self.get_resource_for_media_repository(),
|
||||||
LEGACY_MEDIA_PREFIX: media_repo,
|
CONTENT_REPO_PREFIX: self.get_resource_for_content_repo(),
|
||||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
|
||||||
self, self.config.uploads_path
|
|
||||||
),
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if name in ["keys", "federation"]:
|
if name in ["keys", "federation"]:
|
||||||
resources.update({
|
resources.update({
|
||||||
SERVER_KEY_PREFIX: LocalKey(self),
|
SERVER_KEY_PREFIX: self.get_resource_for_server_key(),
|
||||||
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
SERVER_KEY_V2_PREFIX: self.get_resource_for_server_key_v2(),
|
||||||
})
|
})
|
||||||
|
|
||||||
if name == "webclient":
|
if name == "webclient":
|
||||||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
resources[WEB_CLIENT_PREFIX] = self.get_resource_for_web_client()
|
||||||
|
|
||||||
if name == "metrics" and self.get_config().enable_metrics:
|
if name == "metrics" and metrics_resource:
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = metrics_resource
|
||||||
|
|
||||||
if WEB_CLIENT_PREFIX in resources:
|
|
||||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
|
||||||
else:
|
|
||||||
root_resource = Resource()
|
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, root_resource)
|
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources)
|
||||||
if tls:
|
if tls:
|
||||||
for address in bind_addresses:
|
reactor.listenSSL(
|
||||||
reactor.listenSSL(
|
port,
|
||||||
port,
|
SynapseSite(
|
||||||
SynapseSite(
|
"synapse.access.https",
|
||||||
"synapse.access.https.%s" % (site_tag,),
|
listener_config,
|
||||||
site_tag,
|
root_resource,
|
||||||
listener_config,
|
),
|
||||||
root_resource,
|
self.tls_context_factory,
|
||||||
),
|
interface=bind_address
|
||||||
self.tls_server_context_factory,
|
)
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
for address in bind_addresses:
|
reactor.listenTCP(
|
||||||
reactor.listenTCP(
|
port,
|
||||||
port,
|
SynapseSite(
|
||||||
SynapseSite(
|
"synapse.access.https",
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
listener_config,
|
||||||
site_tag,
|
root_resource,
|
||||||
listener_config,
|
),
|
||||||
root_resource,
|
interface=bind_address
|
||||||
),
|
)
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
logger.info("Synapse now listening on port %d", port)
|
logger.info("Synapse now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self):
|
||||||
@@ -195,28 +223,15 @@ class SynapseHomeServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listener_http(config, listener)
|
self._listener_http(config, listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
bind_addresses = listener["bind_addresses"]
|
f = twisted.manhole.telnet.ShellFactory()
|
||||||
|
f.username = "matrix"
|
||||||
for address in bind_addresses:
|
f.password = "rabbithole"
|
||||||
reactor.listenTCP(
|
f.namespace['hs'] = self
|
||||||
listener["port"],
|
reactor.listenTCP(
|
||||||
manhole(
|
listener["port"],
|
||||||
username="matrix",
|
f,
|
||||||
password="rabbithole",
|
interface=listener.get("bind_address", '127.0.0.1')
|
||||||
globals={"hs": self},
|
)
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
elif listener["type"] == "replication":
|
|
||||||
bind_addresses = listener["bind_addresses"]
|
|
||||||
for address in bind_addresses:
|
|
||||||
factory = ReplicationStreamProtocolFactory(self)
|
|
||||||
server_listener = reactor.listenTCP(
|
|
||||||
listener["port"], factory, interface=address
|
|
||||||
)
|
|
||||||
reactor.addSystemEventTrigger(
|
|
||||||
"before", "shutdown", server_listener.stopListening,
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@@ -236,18 +251,93 @@ class SynapseHomeServer(HomeServer):
|
|||||||
except IncorrectDatabaseSetup as e:
|
except IncorrectDatabaseSetup as e:
|
||||||
quit_with_error(e.message)
|
quit_with_error(e.message)
|
||||||
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
def quit_with_error(error_string):
|
||||||
self.database_engine.on_new_connection(db_conn)
|
message_lines = error_string.split("\n")
|
||||||
return db_conn
|
line_length = max([len(l) for l in message_lines]) + 2
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
for line in message_lines:
|
||||||
|
if line.strip():
|
||||||
|
sys.stderr.write(" %s\n" % (line.strip(),))
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def get_version_string():
|
||||||
|
try:
|
||||||
|
null = open(os.devnull, 'w')
|
||||||
|
cwd = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
try:
|
||||||
|
git_branch = subprocess.check_output(
|
||||||
|
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||||
|
stderr=null,
|
||||||
|
cwd=cwd,
|
||||||
|
).strip()
|
||||||
|
git_branch = "b=" + git_branch
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
git_branch = ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
git_tag = subprocess.check_output(
|
||||||
|
['git', 'describe', '--exact-match'],
|
||||||
|
stderr=null,
|
||||||
|
cwd=cwd,
|
||||||
|
).strip()
|
||||||
|
git_tag = "t=" + git_tag
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
git_tag = ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
git_commit = subprocess.check_output(
|
||||||
|
['git', 'rev-parse', '--short', 'HEAD'],
|
||||||
|
stderr=null,
|
||||||
|
cwd=cwd,
|
||||||
|
).strip()
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
git_commit = ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
dirty_string = "-this_is_a_dirty_checkout"
|
||||||
|
is_dirty = subprocess.check_output(
|
||||||
|
['git', 'describe', '--dirty=' + dirty_string],
|
||||||
|
stderr=null,
|
||||||
|
cwd=cwd,
|
||||||
|
).strip().endswith(dirty_string)
|
||||||
|
|
||||||
|
git_dirty = "dirty" if is_dirty else ""
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
git_dirty = ""
|
||||||
|
|
||||||
|
if git_branch or git_tag or git_commit or git_dirty:
|
||||||
|
git_version = ",".join(
|
||||||
|
s for s in
|
||||||
|
(git_branch, git_tag, git_commit, git_dirty,)
|
||||||
|
if s
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
"Synapse/%s (%s)" % (
|
||||||
|
synapse.__version__, git_version,
|
||||||
|
)
|
||||||
|
).encode("ascii")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warn("Failed to check for git repository: %s", e)
|
||||||
|
|
||||||
|
return ("Synapse/%s" % (synapse.__version__,)).encode("ascii")
|
||||||
|
|
||||||
|
|
||||||
|
def change_resource_limit(soft_file_no):
|
||||||
|
try:
|
||||||
|
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||||
|
|
||||||
|
if not soft_file_no:
|
||||||
|
soft_file_no = hard
|
||||||
|
|
||||||
|
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
|
||||||
|
|
||||||
|
logger.info("Set file limit to: %d", soft_file_no)
|
||||||
|
except (ValueError, resource.error) as e:
|
||||||
|
logger.warn("Failed to set file limit: %s", e)
|
||||||
|
|
||||||
|
|
||||||
def setup(config_options):
|
def setup(config_options):
|
||||||
@@ -255,57 +345,56 @@ def setup(config_options):
|
|||||||
Args:
|
Args:
|
||||||
config_options_options: The options passed to Synapse. Usually
|
config_options_options: The options passed to Synapse. Usually
|
||||||
`sys.argv[1:]`.
|
`sys.argv[1:]`.
|
||||||
|
should_run (bool): Whether to start the reactor.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
HomeServer
|
HomeServer
|
||||||
"""
|
"""
|
||||||
try:
|
config = HomeServerConfig.load_config(
|
||||||
config = HomeServerConfig.load_or_generate_config(
|
"Synapse Homeserver",
|
||||||
"Synapse Homeserver",
|
config_options,
|
||||||
config_options,
|
generate_section="Homeserver"
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if not config:
|
config.setup_logging()
|
||||||
# If a config isn't returned, and an exception isn't raised, we're just
|
|
||||||
# generating config files and shouldn't try to continue.
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
synapse.config.logger.setup_logging(config, use_worker_options=False)
|
|
||||||
|
|
||||||
# check any extra requirements we have now we have a config
|
# check any extra requirements we have now we have a config
|
||||||
check_requirements(config)
|
check_requirements(config)
|
||||||
|
|
||||||
version_string = "Synapse/" + get_version_string(synapse)
|
version_string = get_version_string()
|
||||||
|
|
||||||
logger.info("Server hostname: %s", config.server_name)
|
logger.info("Server hostname: %s", config.server_name)
|
||||||
logger.info("Server version: %s", version_string)
|
logger.info("Server version: %s", version_string)
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
tls_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config["name"])
|
||||||
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
||||||
|
|
||||||
hs = SynapseHomeServer(
|
hs = SynapseHomeServer(
|
||||||
config.server_name,
|
config.server_name,
|
||||||
|
upload_dir=os.path.abspath("uploads"),
|
||||||
db_config=config.database_config,
|
db_config=config.database_config,
|
||||||
tls_server_context_factory=tls_server_context_factory,
|
tls_context_factory=tls_context_factory,
|
||||||
config=config,
|
config=config,
|
||||||
|
content_addr=config.content_addr,
|
||||||
version_string=version_string,
|
version_string=version_string,
|
||||||
database_engine=database_engine,
|
database_engine=database_engine,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Preparing database: %s...", config.database_config['name'])
|
logger.info("Preparing database: %r...", config.database_config)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
db_conn = hs.get_db_conn(run_new_connection=False)
|
db_conn = database_engine.module.connect(
|
||||||
prepare_database(db_conn, database_engine, config=config)
|
**{
|
||||||
database_engine.on_new_connection(db_conn)
|
k: v for k, v in config.database_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
database_engine.prepare_database(db_conn)
|
||||||
hs.run_startup_checks(db_conn, database_engine)
|
hs.run_startup_checks(db_conn, database_engine)
|
||||||
|
|
||||||
db_conn.commit()
|
db_conn.commit()
|
||||||
@@ -317,21 +406,14 @@ def setup(config_options):
|
|||||||
)
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
logger.info("Database prepared in %s.", config.database_config['name'])
|
logger.info("Database prepared in %r.", config.database_config)
|
||||||
|
|
||||||
hs.setup()
|
|
||||||
hs.start_listening()
|
hs.start_listening()
|
||||||
|
|
||||||
def start():
|
hs.get_pusherpool().start()
|
||||||
hs.get_pusherpool().start()
|
hs.get_state_handler().start_caching()
|
||||||
hs.get_state_handler().start_caching()
|
hs.get_datastore().start_profiling()
|
||||||
hs.get_datastore().start_profiling()
|
hs.get_replication_layer().start_get_pdu_cache()
|
||||||
hs.get_datastore().start_doing_background_updates()
|
|
||||||
hs.get_replication_layer().start_get_pdu_cache()
|
|
||||||
|
|
||||||
register_memory_metrics(hs)
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
return hs
|
return hs
|
||||||
|
|
||||||
@@ -346,13 +428,133 @@ class SynapseService(service.Service):
|
|||||||
def startService(self):
|
def startService(self):
|
||||||
hs = setup(self.config)
|
hs = setup(self.config)
|
||||||
change_resource_limit(hs.config.soft_file_limit)
|
change_resource_limit(hs.config.soft_file_limit)
|
||||||
if hs.config.gc_thresholds:
|
|
||||||
gc.set_threshold(*hs.config.gc_thresholds)
|
|
||||||
|
|
||||||
def stopService(self):
|
def stopService(self):
|
||||||
return self._port.stopListening()
|
return self._port.stopListening()
|
||||||
|
|
||||||
|
|
||||||
|
class XForwardedForRequest(Request):
|
||||||
|
def __init__(self, *args, **kw):
|
||||||
|
Request.__init__(self, *args, **kw)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Add a layer on top of another request that only uses the value of an
|
||||||
|
X-Forwarded-For header as the result of C{getClientIP}.
|
||||||
|
"""
|
||||||
|
def getClientIP(self):
|
||||||
|
"""
|
||||||
|
@return: The client address (the first address) in the value of the
|
||||||
|
I{X-Forwarded-For header}. If the header is not present, return
|
||||||
|
C{b"-"}.
|
||||||
|
"""
|
||||||
|
return self.requestHeaders.getRawHeaders(
|
||||||
|
b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip()
|
||||||
|
|
||||||
|
|
||||||
|
def XForwardedFactory(*args, **kwargs):
|
||||||
|
return XForwardedForRequest(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class SynapseSite(Site):
|
||||||
|
"""
|
||||||
|
Subclass of a twisted http Site that does access logging with python's
|
||||||
|
standard logging
|
||||||
|
"""
|
||||||
|
def __init__(self, logger_name, config, resource, *args, **kwargs):
|
||||||
|
Site.__init__(self, resource, *args, **kwargs)
|
||||||
|
if config.get("x_forwarded", False):
|
||||||
|
self.requestFactory = XForwardedFactory
|
||||||
|
self._log_formatter = proxiedLogFormatter
|
||||||
|
else:
|
||||||
|
self._log_formatter = combinedLogFormatter
|
||||||
|
self.access_logger = logging.getLogger(logger_name)
|
||||||
|
|
||||||
|
def log(self, request):
|
||||||
|
line = self._log_formatter(self._logDateTime, request)
|
||||||
|
self.access_logger.info(line)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource_tree(desired_tree, redirect_root_to_web_client=True):
|
||||||
|
"""Create the resource tree for this Home Server.
|
||||||
|
|
||||||
|
This in unduly complicated because Twisted does not support putting
|
||||||
|
child resources more than 1 level deep at a time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
web_client (bool): True to enable the web client.
|
||||||
|
redirect_root_to_web_client (bool): True to redirect '/' to the
|
||||||
|
location of the web client. This does nothing if web_client is not
|
||||||
|
True.
|
||||||
|
"""
|
||||||
|
if redirect_root_to_web_client and WEB_CLIENT_PREFIX in desired_tree:
|
||||||
|
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||||
|
else:
|
||||||
|
root_resource = Resource()
|
||||||
|
|
||||||
|
# ideally we'd just use getChild and putChild but getChild doesn't work
|
||||||
|
# unless you give it a Request object IN ADDITION to the name :/ So
|
||||||
|
# instead, we'll store a copy of this mapping so we can actually add
|
||||||
|
# extra resources to existing nodes. See self._resource_id for the key.
|
||||||
|
resource_mappings = {}
|
||||||
|
for full_path, res in desired_tree.items():
|
||||||
|
logger.info("Attaching %s to path %s", res, full_path)
|
||||||
|
last_resource = root_resource
|
||||||
|
for path_seg in full_path.split('/')[1:-1]:
|
||||||
|
if path_seg not in last_resource.listNames():
|
||||||
|
# resource doesn't exist, so make a "dummy resource"
|
||||||
|
child_resource = Resource()
|
||||||
|
last_resource.putChild(path_seg, child_resource)
|
||||||
|
res_id = _resource_id(last_resource, path_seg)
|
||||||
|
resource_mappings[res_id] = child_resource
|
||||||
|
last_resource = child_resource
|
||||||
|
else:
|
||||||
|
# we have an existing Resource, use that instead.
|
||||||
|
res_id = _resource_id(last_resource, path_seg)
|
||||||
|
last_resource = resource_mappings[res_id]
|
||||||
|
|
||||||
|
# ===========================
|
||||||
|
# now attach the actual desired resource
|
||||||
|
last_path_seg = full_path.split('/')[-1]
|
||||||
|
|
||||||
|
# if there is already a resource here, thieve its children and
|
||||||
|
# replace it
|
||||||
|
res_id = _resource_id(last_resource, last_path_seg)
|
||||||
|
if res_id in resource_mappings:
|
||||||
|
# there is a dummy resource at this path already, which needs
|
||||||
|
# to be replaced with the desired resource.
|
||||||
|
existing_dummy_resource = resource_mappings[res_id]
|
||||||
|
for child_name in existing_dummy_resource.listNames():
|
||||||
|
child_res_id = _resource_id(
|
||||||
|
existing_dummy_resource, child_name
|
||||||
|
)
|
||||||
|
child_resource = resource_mappings[child_res_id]
|
||||||
|
# steal the children
|
||||||
|
res.putChild(child_name, child_resource)
|
||||||
|
|
||||||
|
# finally, insert the desired resource in the right place
|
||||||
|
last_resource.putChild(last_path_seg, res)
|
||||||
|
res_id = _resource_id(last_resource, last_path_seg)
|
||||||
|
resource_mappings[res_id] = res
|
||||||
|
|
||||||
|
return root_resource
|
||||||
|
|
||||||
|
|
||||||
|
def _resource_id(resource, path_seg):
|
||||||
|
"""Construct an arbitrary resource ID so you can retrieve the mapping
|
||||||
|
later.
|
||||||
|
|
||||||
|
If you want to represent resource A putChild resource B with path C,
|
||||||
|
the mapping should looks like _resource_id(A,C) = B.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
resource (Resource): The *parent* Resource
|
||||||
|
path_seg (str): The name of the child Resource to be attached.
|
||||||
|
Returns:
|
||||||
|
str: A unique string which can be a key to the child Resource.
|
||||||
|
"""
|
||||||
|
return "%s-%s" % (resource, path_seg)
|
||||||
|
|
||||||
|
|
||||||
def run(hs):
|
def run(hs):
|
||||||
PROFILE_SYNAPSE = False
|
PROFILE_SYNAPSE = False
|
||||||
if PROFILE_SYNAPSE:
|
if PROFILE_SYNAPSE:
|
||||||
@@ -376,66 +578,27 @@ def run(hs):
|
|||||||
ThreadPool._worker = profile(ThreadPool._worker)
|
ThreadPool._worker = profile(ThreadPool._worker)
|
||||||
reactor.run = profile(reactor.run)
|
reactor.run = profile(reactor.run)
|
||||||
|
|
||||||
clock = hs.get_clock()
|
def in_thread():
|
||||||
start_time = clock.time()
|
with LoggingContext("run"):
|
||||||
|
change_resource_limit(hs.config.soft_file_limit)
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
stats = {}
|
if hs.config.daemonize:
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
print hs.config.pid_file
|
||||||
def phone_stats_home():
|
|
||||||
logger.info("Gathering stats for reporting")
|
|
||||||
now = int(hs.get_clock().time())
|
|
||||||
uptime = int(now - start_time)
|
|
||||||
if uptime < 0:
|
|
||||||
uptime = 0
|
|
||||||
|
|
||||||
stats["homeserver"] = hs.config.server_name
|
daemon = Daemonize(
|
||||||
stats["timestamp"] = now
|
app="synapse-homeserver",
|
||||||
stats["uptime_seconds"] = uptime
|
pid=hs.config.pid_file,
|
||||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
action=lambda: in_thread(),
|
||||||
|
auto_close_fds=False,
|
||||||
|
verbose=True,
|
||||||
|
logger=logger,
|
||||||
|
)
|
||||||
|
|
||||||
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
daemon.start()
|
||||||
stats["total_nonbridged_users"] = total_nonbridged_users
|
else:
|
||||||
|
in_thread()
|
||||||
room_count = yield hs.get_datastore().get_room_count()
|
|
||||||
stats["total_room_count"] = room_count
|
|
||||||
|
|
||||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
|
||||||
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
|
||||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
|
||||||
|
|
||||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
|
||||||
stats["daily_sent_messages"] = daily_sent_messages
|
|
||||||
|
|
||||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
|
||||||
try:
|
|
||||||
yield hs.get_simple_http_client().put_json(
|
|
||||||
"https://matrix.org/report-usage-stats/push",
|
|
||||||
stats
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.warn("Error reporting stats: %s", e)
|
|
||||||
|
|
||||||
if hs.config.report_stats:
|
|
||||||
logger.info("Scheduling stats reporting for 3 hour intervals")
|
|
||||||
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
|
|
||||||
|
|
||||||
# We wait 5 minutes to send the first set of stats as the server can
|
|
||||||
# be quite busy the first few minutes
|
|
||||||
clock.call_later(5 * 60, phone_stats_home)
|
|
||||||
|
|
||||||
if hs.config.daemonize and hs.config.print_pidfile:
|
|
||||||
print (hs.config.pid_file)
|
|
||||||
|
|
||||||
_base.start_reactor(
|
|
||||||
"synapse-homeserver",
|
|
||||||
hs.config.soft_file_limit,
|
|
||||||
hs.config.gc_thresholds,
|
|
||||||
hs.config.pid_file,
|
|
||||||
hs.config.daemonize,
|
|
||||||
hs.config.cpu_affinity,
|
|
||||||
logger,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|||||||
@@ -1,186 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import synapse
|
|
||||||
from synapse import events
|
|
||||||
from synapse.api.urls import (
|
|
||||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
|
||||||
)
|
|
||||||
from synapse.app import _base
|
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.crypto import context_factory
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
|
||||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
|
||||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.storage.media_repository import MediaRepositoryStore
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
from twisted.internet import reactor
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.media_repository")
|
|
||||||
|
|
||||||
|
|
||||||
class MediaRepositorySlavedStore(
|
|
||||||
SlavedApplicationServiceStore,
|
|
||||||
SlavedRegistrationStore,
|
|
||||||
SlavedClientIpStore,
|
|
||||||
TransactionStore,
|
|
||||||
BaseSlavedStore,
|
|
||||||
MediaRepositoryStore,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class MediaRepositoryServer(HomeServer):
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
|
||||||
port = listener_config["port"]
|
|
||||||
bind_addresses = listener_config["bind_addresses"]
|
|
||||||
site_tag = listener_config.get("tag", port)
|
|
||||||
resources = {}
|
|
||||||
for res in listener_config["resources"]:
|
|
||||||
for name in res["names"]:
|
|
||||||
if name == "metrics":
|
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
||||||
elif name == "media":
|
|
||||||
media_repo = MediaRepositoryResource(self)
|
|
||||||
resources.update({
|
|
||||||
MEDIA_PREFIX: media_repo,
|
|
||||||
LEGACY_MEDIA_PREFIX: media_repo,
|
|
||||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
|
||||||
self, self.config.uploads_path
|
|
||||||
),
|
|
||||||
})
|
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
port,
|
|
||||||
SynapseSite(
|
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
|
||||||
site_tag,
|
|
||||||
listener_config,
|
|
||||||
root_resource,
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Synapse media repository now listening on port %d", port)
|
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
|
||||||
for listener in listeners:
|
|
||||||
if listener["type"] == "http":
|
|
||||||
self._listen_http(listener)
|
|
||||||
elif listener["type"] == "manhole":
|
|
||||||
bind_addresses = listener["bind_addresses"]
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
listener["port"],
|
|
||||||
manhole(
|
|
||||||
username="matrix",
|
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
|
||||||
|
|
||||||
self.get_tcp_replication().start_replication(self)
|
|
||||||
|
|
||||||
def build_tcp_replication(self):
|
|
||||||
return ReplicationClientHandler(self.get_datastore())
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
|
||||||
try:
|
|
||||||
config = HomeServerConfig.load_config(
|
|
||||||
"Synapse media repository", config_options
|
|
||||||
)
|
|
||||||
except ConfigError as e:
|
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.media_repository"
|
|
||||||
|
|
||||||
setup_logging(config, use_worker_options=True)
|
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
|
||||||
|
|
||||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
|
||||||
|
|
||||||
ss = MediaRepositoryServer(
|
|
||||||
config.server_name,
|
|
||||||
db_config=config.database_config,
|
|
||||||
tls_server_context_factory=tls_server_context_factory,
|
|
||||||
config=config,
|
|
||||||
version_string="Synapse/" + get_version_string(synapse),
|
|
||||||
database_engine=database_engine,
|
|
||||||
)
|
|
||||||
|
|
||||||
ss.setup()
|
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
|
||||||
|
|
||||||
def start():
|
|
||||||
ss.get_state_handler().start_caching()
|
|
||||||
ss.get_datastore().start_profiling()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-media-repository", config)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with LoggingContext("main"):
|
|
||||||
start(sys.argv[1:])
|
|
||||||
@@ -1,251 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import synapse
|
|
||||||
from synapse import events
|
|
||||||
from synapse.app import _base
|
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
||||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage import DataStore
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
|
||||||
from synapse.util.manhole import manhole
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
from twisted.internet import defer, reactor
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.pusher")
|
|
||||||
|
|
||||||
|
|
||||||
class PusherSlaveStore(
|
|
||||||
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
|
|
||||||
SlavedAccountDataStore
|
|
||||||
):
|
|
||||||
update_pusher_last_stream_ordering_and_success = (
|
|
||||||
DataStore.update_pusher_last_stream_ordering_and_success.__func__
|
|
||||||
)
|
|
||||||
|
|
||||||
update_pusher_failing_since = (
|
|
||||||
DataStore.update_pusher_failing_since.__func__
|
|
||||||
)
|
|
||||||
|
|
||||||
update_pusher_last_stream_ordering = (
|
|
||||||
DataStore.update_pusher_last_stream_ordering.__func__
|
|
||||||
)
|
|
||||||
|
|
||||||
get_throttle_params_by_room = (
|
|
||||||
DataStore.get_throttle_params_by_room.__func__
|
|
||||||
)
|
|
||||||
|
|
||||||
set_throttle_params = (
|
|
||||||
DataStore.set_throttle_params.__func__
|
|
||||||
)
|
|
||||||
|
|
||||||
get_time_of_last_push_action_before = (
|
|
||||||
DataStore.get_time_of_last_push_action_before.__func__
|
|
||||||
)
|
|
||||||
|
|
||||||
get_profile_displayname = (
|
|
||||||
DataStore.get_profile_displayname.__func__
|
|
||||||
)
|
|
||||||
|
|
||||||
who_forgot_in_room = (
|
|
||||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class PusherServer(HomeServer):
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def remove_pusher(self, app_id, push_key, user_id):
|
|
||||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
|
||||||
port = listener_config["port"]
|
|
||||||
bind_addresses = listener_config["bind_addresses"]
|
|
||||||
site_tag = listener_config.get("tag", port)
|
|
||||||
resources = {}
|
|
||||||
for res in listener_config["resources"]:
|
|
||||||
for name in res["names"]:
|
|
||||||
if name == "metrics":
|
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
port,
|
|
||||||
SynapseSite(
|
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
|
||||||
site_tag,
|
|
||||||
listener_config,
|
|
||||||
root_resource,
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Synapse pusher now listening on port %d", port)
|
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
|
||||||
for listener in listeners:
|
|
||||||
if listener["type"] == "http":
|
|
||||||
self._listen_http(listener)
|
|
||||||
elif listener["type"] == "manhole":
|
|
||||||
bind_addresses = listener["bind_addresses"]
|
|
||||||
|
|
||||||
for address in bind_addresses:
|
|
||||||
reactor.listenTCP(
|
|
||||||
listener["port"],
|
|
||||||
manhole(
|
|
||||||
username="matrix",
|
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
|
||||||
|
|
||||||
self.get_tcp_replication().start_replication(self)
|
|
||||||
|
|
||||||
def build_tcp_replication(self):
|
|
||||||
return PusherReplicationHandler(self)
|
|
||||||
|
|
||||||
|
|
||||||
class PusherReplicationHandler(ReplicationClientHandler):
|
|
||||||
def __init__(self, hs):
|
|
||||||
super(PusherReplicationHandler, self).__init__(hs.get_datastore())
|
|
||||||
|
|
||||||
self.pusher_pool = hs.get_pusherpool()
|
|
||||||
|
|
||||||
def on_rdata(self, stream_name, token, rows):
|
|
||||||
super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
|
||||||
preserve_fn(self.poke_pushers)(stream_name, token, rows)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def poke_pushers(self, stream_name, token, rows):
|
|
||||||
if stream_name == "pushers":
|
|
||||||
for row in rows:
|
|
||||||
if row.deleted:
|
|
||||||
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
|
||||||
else:
|
|
||||||
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
|
||||||
elif stream_name == "events":
|
|
||||||
yield self.pusher_pool.on_new_notifications(
|
|
||||||
token, token,
|
|
||||||
)
|
|
||||||
elif stream_name == "receipts":
|
|
||||||
yield self.pusher_pool.on_new_receipts(
|
|
||||||
token, token, set(row.room_id for row in rows)
|
|
||||||
)
|
|
||||||
|
|
||||||
def stop_pusher(self, user_id, app_id, pushkey):
|
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
|
||||||
pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
|
|
||||||
pusher = pushers_for_user.pop(key, None)
|
|
||||||
if pusher is None:
|
|
||||||
return
|
|
||||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
|
||||||
pusher.on_stop()
|
|
||||||
|
|
||||||
def start_pusher(self, user_id, app_id, pushkey):
|
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
|
||||||
logger.info("Starting pusher %r / %r", user_id, key)
|
|
||||||
return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
|
||||||
try:
|
|
||||||
config = HomeServerConfig.load_config(
|
|
||||||
"Synapse pusher", config_options
|
|
||||||
)
|
|
||||||
except ConfigError as e:
|
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.pusher"
|
|
||||||
|
|
||||||
setup_logging(config, use_worker_options=True)
|
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
|
||||||
|
|
||||||
if config.start_pushers:
|
|
||||||
sys.stderr.write(
|
|
||||||
"\nThe pushers must be disabled in the main synapse process"
|
|
||||||
"\nbefore they can be run in a separate worker."
|
|
||||||
"\nPlease add ``start_pushers: false`` to the main config"
|
|
||||||
"\n"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Force the pushers to start since they will be disabled in the main config
|
|
||||||
config.start_pushers = True
|
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
|
||||||
|
|
||||||
ps = PusherServer(
|
|
||||||
config.server_name,
|
|
||||||
db_config=config.database_config,
|
|
||||||
config=config,
|
|
||||||
version_string="Synapse/" + get_version_string(synapse),
|
|
||||||
database_engine=database_engine,
|
|
||||||
)
|
|
||||||
|
|
||||||
ps.setup()
|
|
||||||
ps.start_listening(config.worker_listeners)
|
|
||||||
|
|
||||||
def start():
|
|
||||||
ps.get_pusherpool().start()
|
|
||||||
ps.get_datastore().start_profiling()
|
|
||||||
ps.get_state_handler().start_caching()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-pusher", config)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with LoggingContext("main"):
|
|
||||||
ps = start(sys.argv[1:])
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user