mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-19 02:20:44 +00:00
Compare commits
1 Commits
v0.33.6
...
initial_sy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8dbb624b3 |
@@ -1,165 +0,0 @@
|
|||||||
version: 2
|
|
||||||
jobs:
|
|
||||||
dockerhubuploadrelease:
|
|
||||||
machine: true
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG} .
|
|
||||||
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
|
|
||||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
|
||||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
|
||||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
|
|
||||||
dockerhubuploadlatest:
|
|
||||||
machine: true
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1} .
|
|
||||||
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1}-py3 --build-arg PYTHON_VERSION=3.6 .
|
|
||||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
|
||||||
- run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1} matrixdotorg/synapse:latest
|
|
||||||
- run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1}-py3 matrixdotorg/synapse:latest-py3
|
|
||||||
- run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}
|
|
||||||
- run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}-py3
|
|
||||||
- run: docker push matrixdotorg/synapse:latest
|
|
||||||
- run: docker push matrixdotorg/synapse:latest-py3
|
|
||||||
sytestpy2:
|
|
||||||
machine: true
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
|
|
||||||
- store_artifacts:
|
|
||||||
path: ~/project/logs
|
|
||||||
destination: logs
|
|
||||||
- store_test_results:
|
|
||||||
path: logs
|
|
||||||
sytestpy2postgres:
|
|
||||||
machine: true
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
|
|
||||||
- store_artifacts:
|
|
||||||
path: ~/project/logs
|
|
||||||
destination: logs
|
|
||||||
- store_test_results:
|
|
||||||
path: logs
|
|
||||||
sytestpy2merged:
|
|
||||||
machine: true
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: bash .circleci/merge_base_branch.sh
|
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
|
|
||||||
- store_artifacts:
|
|
||||||
path: ~/project/logs
|
|
||||||
destination: logs
|
|
||||||
- store_test_results:
|
|
||||||
path: logs
|
|
||||||
|
|
||||||
sytestpy2postgresmerged:
|
|
||||||
machine: true
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: bash .circleci/merge_base_branch.sh
|
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
|
|
||||||
- store_artifacts:
|
|
||||||
path: ~/project/logs
|
|
||||||
destination: logs
|
|
||||||
- store_test_results:
|
|
||||||
path: logs
|
|
||||||
|
|
||||||
sytestpy3:
|
|
||||||
machine: true
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3
|
|
||||||
- store_artifacts:
|
|
||||||
path: ~/project/logs
|
|
||||||
destination: logs
|
|
||||||
- store_test_results:
|
|
||||||
path: logs
|
|
||||||
sytestpy3postgres:
|
|
||||||
machine: true
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
|
|
||||||
- store_artifacts:
|
|
||||||
path: ~/project/logs
|
|
||||||
destination: logs
|
|
||||||
- store_test_results:
|
|
||||||
path: logs
|
|
||||||
sytestpy3merged:
|
|
||||||
machine: true
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: bash .circleci/merge_base_branch.sh
|
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3
|
|
||||||
- store_artifacts:
|
|
||||||
path: ~/project/logs
|
|
||||||
destination: logs
|
|
||||||
- store_test_results:
|
|
||||||
path: logs
|
|
||||||
sytestpy3postgresmerged:
|
|
||||||
machine: true
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: bash .circleci/merge_base_branch.sh
|
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
|
|
||||||
- store_artifacts:
|
|
||||||
path: ~/project/logs
|
|
||||||
destination: logs
|
|
||||||
- store_test_results:
|
|
||||||
path: logs
|
|
||||||
|
|
||||||
workflows:
|
|
||||||
version: 2
|
|
||||||
build:
|
|
||||||
jobs:
|
|
||||||
- sytestpy2:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only: /develop|master|release-.*/
|
|
||||||
- sytestpy2postgres:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only: /develop|master|release-.*/
|
|
||||||
- sytestpy3:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only: /develop|master|release-.*/
|
|
||||||
- sytestpy3postgres:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only: /develop|master|release-.*/
|
|
||||||
- sytestpy2merged:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
ignore: /develop|master|release-.*/
|
|
||||||
- sytestpy2postgresmerged:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
ignore: /develop|master|release-.*/
|
|
||||||
- sytestpy3merged:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
ignore: /develop|master|release-.*/
|
|
||||||
- sytestpy3postgresmerged:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
ignore: /develop|master|release-.*/
|
|
||||||
- dockerhubuploadrelease:
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /v[0-9].[0-9]+.[0-9]+.*/
|
|
||||||
branches:
|
|
||||||
ignore: /.*/
|
|
||||||
- dockerhubuploadlatest:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only: master
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
|
|
||||||
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
|
|
||||||
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
|
|
||||||
source $BASH_ENV
|
|
||||||
|
|
||||||
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
|
|
||||||
then
|
|
||||||
echo "Can't figure out what the PR number is! Assuming merge target is develop."
|
|
||||||
|
|
||||||
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
|
|
||||||
# can probably assume it's based on it and will be merged into it.
|
|
||||||
GITBASE="develop"
|
|
||||||
else
|
|
||||||
# Get the reference, using the GitHub API
|
|
||||||
GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Show what we are before
|
|
||||||
git show -s
|
|
||||||
|
|
||||||
# Set up username so it can do a merge
|
|
||||||
git config --global user.email bot@matrix.org
|
|
||||||
git config --global user.name "A robot"
|
|
||||||
|
|
||||||
# Fetch and merge. If it doesn't work, it will raise due to set -e.
|
|
||||||
git fetch -u origin $GITBASE
|
|
||||||
git merge --no-edit origin/$GITBASE
|
|
||||||
|
|
||||||
# Show what we are after.
|
|
||||||
git show -s
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
Dockerfile
|
|
||||||
.travis.yml
|
|
||||||
.gitignore
|
|
||||||
demo/etc
|
|
||||||
tox.ini
|
|
||||||
.git/*
|
|
||||||
.tox/*
|
|
||||||
48
.github/ISSUE_TEMPLATE.md
vendored
48
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,48 +0,0 @@
|
|||||||
<!--
|
|
||||||
|
|
||||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
|
|
||||||
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
|
|
||||||
|
|
||||||
|
|
||||||
This is a bug report template. By following the instructions below and
|
|
||||||
filling out the sections with your information, you will help the us to get all
|
|
||||||
the necessary data to fix your issue.
|
|
||||||
|
|
||||||
You can also preview your report before submitting it. You may remove sections
|
|
||||||
that aren't relevant to your particular case.
|
|
||||||
|
|
||||||
Text between <!-- and --> marks will be invisible in the report.
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
### Description
|
|
||||||
|
|
||||||
Describe here the problem that you are experiencing, or the feature you are requesting.
|
|
||||||
|
|
||||||
### Steps to reproduce
|
|
||||||
|
|
||||||
- For bugs, list the steps
|
|
||||||
- that reproduce the bug
|
|
||||||
- using hyphens as bullet points
|
|
||||||
|
|
||||||
Describe how what happens differs from what you expected.
|
|
||||||
|
|
||||||
<!-- If you can identify any relevant log snippets from _homeserver.log_, please include
|
|
||||||
those (please be careful to remove any personal or private data). Please surround them with
|
|
||||||
``` (three backticks, on a line on their own), so that they are formatted legibly. -->
|
|
||||||
|
|
||||||
### Version information
|
|
||||||
|
|
||||||
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
|
|
||||||
|
|
||||||
- **Homeserver**: Was this issue identified on matrix.org or another homeserver?
|
|
||||||
|
|
||||||
If not matrix.org:
|
|
||||||
- **Version**: What version of Synapse is running? <!--
|
|
||||||
You can find the Synapse version by inspecting the server headers (replace matrix.org with
|
|
||||||
your own homeserver domain):
|
|
||||||
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
|
|
||||||
-->
|
|
||||||
- **Install method**: package manager/git clone/pip
|
|
||||||
- **Platform**: Tell us about the environment in which your homeserver is operating
|
|
||||||
- distro, hardware, if it's running in a vm/container, etc.
|
|
||||||
23
.gitignore
vendored
23
.gitignore
vendored
@@ -1,11 +1,8 @@
|
|||||||
*.pyc
|
*.pyc
|
||||||
.*.swp
|
.*.swp
|
||||||
*~
|
|
||||||
*.lock
|
|
||||||
|
|
||||||
.DS_Store
|
.DS_Store
|
||||||
_trial_temp/
|
_trial_temp/
|
||||||
_trial_temp*/
|
|
||||||
logs/
|
logs/
|
||||||
dbs/
|
dbs/
|
||||||
*.egg
|
*.egg
|
||||||
@@ -16,7 +13,6 @@ docs/build/
|
|||||||
cmdclient_config.json
|
cmdclient_config.json
|
||||||
homeserver*.db
|
homeserver*.db
|
||||||
homeserver*.log
|
homeserver*.log
|
||||||
homeserver*.log.*
|
|
||||||
homeserver*.pid
|
homeserver*.pid
|
||||||
homeserver*.yaml
|
homeserver*.yaml
|
||||||
|
|
||||||
@@ -28,15 +24,14 @@ homeserver*.yaml
|
|||||||
.coverage
|
.coverage
|
||||||
htmlcov
|
htmlcov
|
||||||
|
|
||||||
demo/*/*.db
|
demo/*.db
|
||||||
demo/*/*.log
|
demo/*.log
|
||||||
demo/*/*.log.*
|
demo/*.log.*
|
||||||
demo/*/*.pid
|
demo/*.pid
|
||||||
demo/media_store.*
|
demo/media_store.*
|
||||||
demo/etc
|
demo/etc
|
||||||
|
|
||||||
uploads
|
uploads
|
||||||
cache
|
|
||||||
|
|
||||||
.idea/
|
.idea/
|
||||||
media_store/
|
media_store/
|
||||||
@@ -44,16 +39,6 @@ media_store/
|
|||||||
*.tac
|
*.tac
|
||||||
|
|
||||||
build/
|
build/
|
||||||
venv/
|
|
||||||
venv*/
|
|
||||||
*venv/
|
|
||||||
|
|
||||||
localhost-800*/
|
localhost-800*/
|
||||||
static/client/register/register_config.js
|
static/client/register/register_config.js
|
||||||
.tox
|
|
||||||
|
|
||||||
env/
|
|
||||||
*.config
|
|
||||||
|
|
||||||
.vscode/
|
|
||||||
.ropeproject/
|
|
||||||
|
|||||||
52
.travis.yml
52
.travis.yml
@@ -1,52 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
language: python
|
|
||||||
|
|
||||||
# tell travis to cache ~/.cache/pip
|
|
||||||
cache: pip
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- git remote set-branches --add origin develop
|
|
||||||
- git fetch origin develop
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
fast_finish: true
|
|
||||||
include:
|
|
||||||
- python: 2.7
|
|
||||||
env: TOX_ENV=packaging
|
|
||||||
|
|
||||||
- python: 2.7
|
|
||||||
env: TOX_ENV=pep8
|
|
||||||
|
|
||||||
- python: 2.7
|
|
||||||
env: TOX_ENV=py27
|
|
||||||
|
|
||||||
- python: 2.7
|
|
||||||
env: TOX_ENV=py27-old
|
|
||||||
|
|
||||||
- python: 2.7
|
|
||||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
|
||||||
services:
|
|
||||||
- postgresql
|
|
||||||
|
|
||||||
- python: 3.5
|
|
||||||
env: TOX_ENV=py35
|
|
||||||
|
|
||||||
- python: 3.6
|
|
||||||
env: TOX_ENV=py36
|
|
||||||
|
|
||||||
- python: 3.6
|
|
||||||
env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
|
|
||||||
services:
|
|
||||||
- postgresql
|
|
||||||
|
|
||||||
- python: 3.6
|
|
||||||
env: TOX_ENV=check_isort
|
|
||||||
|
|
||||||
- python: 3.6
|
|
||||||
env: TOX_ENV=check-newsfragment
|
|
||||||
|
|
||||||
install:
|
|
||||||
- pip install tox
|
|
||||||
|
|
||||||
script:
|
|
||||||
- tox -e $TOX_ENV
|
|
||||||
68
AUTHORS.rst
68
AUTHORS.rst
@@ -1,68 +0,0 @@
|
|||||||
Erik Johnston <erik at matrix.org>
|
|
||||||
* HS core
|
|
||||||
* Federation API impl
|
|
||||||
|
|
||||||
Mark Haines <mark at matrix.org>
|
|
||||||
* HS core
|
|
||||||
* Crypto
|
|
||||||
* Content repository
|
|
||||||
* CS v2 API impl
|
|
||||||
|
|
||||||
Kegan Dougal <kegan at matrix.org>
|
|
||||||
* HS core
|
|
||||||
* CS v1 API impl
|
|
||||||
* AS API impl
|
|
||||||
|
|
||||||
Paul "LeoNerd" Evans <paul at matrix.org>
|
|
||||||
* HS core
|
|
||||||
* Presence
|
|
||||||
* Typing Notifications
|
|
||||||
* Performance metrics and caching layer
|
|
||||||
|
|
||||||
Dave Baker <dave at matrix.org>
|
|
||||||
* Push notifications
|
|
||||||
* Auth CS v2 impl
|
|
||||||
|
|
||||||
Matthew Hodgson <matthew at matrix.org>
|
|
||||||
* General doc & housekeeping
|
|
||||||
* Vertobot/vertobridge matrix<->verto PoC
|
|
||||||
|
|
||||||
Emmanuel Rohee <manu at matrix.org>
|
|
||||||
* Supporting iOS clients (testability and fallback registration)
|
|
||||||
|
|
||||||
Turned to Dust <dwinslow86 at gmail.com>
|
|
||||||
* ArchLinux installation instructions
|
|
||||||
|
|
||||||
Brabo <brabo at riseup.net>
|
|
||||||
* Installation instruction fixes
|
|
||||||
|
|
||||||
Ivan Shapovalov <intelfx100 at gmail.com>
|
|
||||||
* contrib/systemd: a sample systemd unit file and a logger configuration
|
|
||||||
|
|
||||||
Eric Myhre <hash at exultant.us>
|
|
||||||
* Fix bug where ``media_store_path`` config option was ignored by v0 content
|
|
||||||
repository API.
|
|
||||||
|
|
||||||
Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
|
|
||||||
* Add SAML2 support for registration and login.
|
|
||||||
|
|
||||||
Steven Hammerton <steven.hammerton at openmarket.com>
|
|
||||||
* Add CAS support for registration and login.
|
|
||||||
|
|
||||||
Mads Robin Christensen <mads at v42 dot dk>
|
|
||||||
* CentOS 7 installation instructions.
|
|
||||||
|
|
||||||
Florent Violleau <floviolleau at gmail dot com>
|
|
||||||
* Add Raspberry Pi installation instructions and general troubleshooting items
|
|
||||||
|
|
||||||
Niklas Riekenbrauck <nikriek at gmail dot.com>
|
|
||||||
* Add JWT support for registration and login
|
|
||||||
|
|
||||||
Christoph Witzany <christoph at web.crofting.com>
|
|
||||||
* Add LDAP support for authentication
|
|
||||||
|
|
||||||
Pierre Jaury <pierre at jaury.eu>
|
|
||||||
* Docker packaging
|
|
||||||
|
|
||||||
Serban Constantin <serban.constantin at gmail dot com>
|
|
||||||
* Small bug fix
|
|
||||||
2862
CHANGES.md
2862
CHANGES.md
File diff suppressed because it is too large
Load Diff
449
CHANGES.rst
Normal file
449
CHANGES.rst
Normal file
@@ -0,0 +1,449 @@
|
|||||||
|
Changes in synapse v0.8.0 (2015-03-06)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
General:
|
||||||
|
|
||||||
|
* Add support for registration fallback. This is a page hosted on the server
|
||||||
|
which allows a user to register for an account, regardless of what client
|
||||||
|
they are using (e.g. mobile devices).
|
||||||
|
|
||||||
|
* Added new default push rules and made them configurable by clients:
|
||||||
|
|
||||||
|
* Suppress all notice messages.
|
||||||
|
* Notify when invited to a new room.
|
||||||
|
* Notify for messages that don't match any rule.
|
||||||
|
* Notify on incoming call.
|
||||||
|
|
||||||
|
Federation:
|
||||||
|
|
||||||
|
* Added per host server side rate-limiting of incoming federation requests.
|
||||||
|
* Added a ``/get_missing_events/`` API to federation to reduce number of
|
||||||
|
``/events/`` requests.
|
||||||
|
|
||||||
|
Configuration:
|
||||||
|
|
||||||
|
* Added configuration option to disable registration:
|
||||||
|
``disable_registration``.
|
||||||
|
* Added configuration option to change soft limit of number of open file
|
||||||
|
descriptors: ``soft_file_limit``.
|
||||||
|
* Make ``tls_private_key_path`` optional when running with ``no_tls``.
|
||||||
|
|
||||||
|
Application services:
|
||||||
|
|
||||||
|
* Application services can now poll on the CS API ``/events`` for their events,
|
||||||
|
by providing their application service ``access_token``.
|
||||||
|
* Added exclusive namespace support to application services API.
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.7.1 (2015-02-19)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
* Initial alpha implementation of parts of the Application Services API.
|
||||||
|
Including:
|
||||||
|
|
||||||
|
- AS Registration / Unregistration
|
||||||
|
- User Query API
|
||||||
|
- Room Alias Query API
|
||||||
|
- Push transport for receiving events.
|
||||||
|
- User/Alias namespace admin control
|
||||||
|
|
||||||
|
* Add cache when fetching events from remote servers to stop repeatedly
|
||||||
|
fetching events with bad signatures.
|
||||||
|
* Respect the per remote server retry scheme when fetching both events and
|
||||||
|
server keys to reduce the number of times we send requests to dead servers.
|
||||||
|
* Inform remote servers when the local server fails to handle a received event.
|
||||||
|
* Turn off python bytecode generation due to problems experienced when
|
||||||
|
upgrading from previous versions.
|
||||||
|
|
||||||
|
Changes in synapse v0.7.0 (2015-02-12)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
* Add initial implementation of the query auth federation API, allowing
|
||||||
|
servers to agree on whether an event should be allowed or rejected.
|
||||||
|
* Persist events we have rejected from federation, fixing the bug where
|
||||||
|
servers would keep requesting the same events.
|
||||||
|
* Various federation performance improvements, including:
|
||||||
|
|
||||||
|
- Add in memory caches on queries such as:
|
||||||
|
|
||||||
|
* Computing the state of a room at a point in time, used for
|
||||||
|
authorization on federation requests.
|
||||||
|
* Fetching events from the database.
|
||||||
|
* User's room membership, used for authorizing presence updates.
|
||||||
|
|
||||||
|
- Upgraded JSON library to improve parsing and serialisation speeds.
|
||||||
|
|
||||||
|
* Add default avatars to new user accounts using pydenticon library.
|
||||||
|
* Correctly time out federation requests.
|
||||||
|
* Retry federation requests against different servers.
|
||||||
|
* Add support for push and push rules.
|
||||||
|
* Add alpha versions of proposed new CSv2 APIs, including ``/sync`` API.
|
||||||
|
|
||||||
|
Changes in synapse 0.6.1 (2015-01-07)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
* Major optimizations to improve performance of initial sync and event sending
|
||||||
|
in large rooms (by up to 10x)
|
||||||
|
* Media repository now includes a Content-Length header on media downloads.
|
||||||
|
* Improve quality of thumbnails by changing resizing algorithm.
|
||||||
|
|
||||||
|
Changes in synapse 0.6.0 (2014-12-16)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
* Add new API for media upload and download that supports thumbnailing.
|
||||||
|
* Replicate media uploads over multiple homeservers so media is always served
|
||||||
|
to clients from their local homeserver. This obsoletes the
|
||||||
|
--content-addr parameter and confusion over accessing content directly
|
||||||
|
from remote homeservers.
|
||||||
|
* Implement exponential backoff when retrying federation requests when
|
||||||
|
sending to remote homeservers which are offline.
|
||||||
|
* Implement typing notifications.
|
||||||
|
* Fix bugs where we sent events with invalid signatures due to bugs where
|
||||||
|
we incorrectly persisted events.
|
||||||
|
* Improve performance of database queries involving retrieving events.
|
||||||
|
|
||||||
|
Changes in synapse 0.5.4a (2014-12-13)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
* Fix bug while generating the error message when a file path specified in
|
||||||
|
the config doesn't exist.
|
||||||
|
|
||||||
|
Changes in synapse 0.5.4 (2014-12-03)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
* Fix presence bug where some rooms did not display presence updates for
|
||||||
|
remote users.
|
||||||
|
* Do not log SQL timing log lines when started with "-v"
|
||||||
|
* Fix potential memory leak.
|
||||||
|
|
||||||
|
Changes in synapse 0.5.3c (2014-12-02)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
* Change the default value for the `content_addr` option to use the HTTP
|
||||||
|
listener, as by default the HTTPS listener will be using a self-signed
|
||||||
|
certificate.
|
||||||
|
|
||||||
|
Changes in synapse 0.5.3 (2014-11-27)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
* Fix bug that caused joining a remote room to fail if a single event was not
|
||||||
|
signed correctly.
|
||||||
|
* Fix bug which caused servers to continuously try and fetch events from other
|
||||||
|
servers.
|
||||||
|
|
||||||
|
Changes in synapse 0.5.2 (2014-11-26)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Fix major bug that caused rooms to disappear from peoples initial sync.
|
||||||
|
|
||||||
|
Changes in synapse 0.5.1 (2014-11-26)
|
||||||
|
=====================================
|
||||||
|
See UPGRADES.rst for specific instructions on how to upgrade.
|
||||||
|
|
||||||
|
* Fix bug where we served up an Event that did not match its signatures.
|
||||||
|
* Fix regression where we no longer correctly handled the case where a
|
||||||
|
homeserver receives an event for a room it doesn't recognise (but is in.)
|
||||||
|
|
||||||
|
Changes in synapse 0.5.0 (2014-11-19)
|
||||||
|
=====================================
|
||||||
|
This release includes changes to the federation protocol and client-server API
|
||||||
|
that is not backwards compatible.
|
||||||
|
|
||||||
|
This release also changes the internal database schemas and so requires servers to
|
||||||
|
drop their current history. See UPGRADES.rst for details.
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Add authentication and authorization to the federation protocol. Events are
|
||||||
|
now signed by their originating homeservers.
|
||||||
|
* Implement the new authorization model for rooms.
|
||||||
|
* Split out web client into a seperate repository: matrix-angular-sdk.
|
||||||
|
* Change the structure of PDUs.
|
||||||
|
* Fix bug where user could not join rooms via an alias containing 4-byte
|
||||||
|
UTF-8 characters.
|
||||||
|
* Merge concept of PDUs and Events internally.
|
||||||
|
* Improve logging by adding request ids to log lines.
|
||||||
|
* Implement a very basic room initial sync API.
|
||||||
|
* Implement the new invite/join federation APIs.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* The webclient has been moved to a seperate repository.
|
||||||
|
|
||||||
|
Changes in synapse 0.4.2 (2014-10-31)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Fix bugs where we did not notify users of correct presence updates.
|
||||||
|
* Fix bug where we did not handle sub second event stream timeouts.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Add ability to click on messages to see JSON.
|
||||||
|
* Add ability to redact messages.
|
||||||
|
* Add ability to view and edit all room state JSON.
|
||||||
|
* Handle incoming redactions.
|
||||||
|
* Improve feedback on errors.
|
||||||
|
* Fix bugs in mobile CSS.
|
||||||
|
* Fix bugs with desktop notifications.
|
||||||
|
|
||||||
|
Changes in synapse 0.4.1 (2014-10-17)
|
||||||
|
=====================================
|
||||||
|
Webclient:
|
||||||
|
* Fix bug with display of timestamps.
|
||||||
|
|
||||||
|
Changes in synpase 0.4.0 (2014-10-17)
|
||||||
|
=====================================
|
||||||
|
This release includes changes to the federation protocol and client-server API
|
||||||
|
that is not backwards compatible.
|
||||||
|
|
||||||
|
The Matrix specification has been moved to a separate git repository:
|
||||||
|
http://github.com/matrix-org/matrix-doc
|
||||||
|
|
||||||
|
You will also need an updated syutil and config. See UPGRADES.rst.
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Sign federation transactions to assert strong identity over federation.
|
||||||
|
* Rename timestamp keys in PDUs and events from 'ts' and 'hsob_ts' to 'origin_server_ts'.
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse 0.3.4 (2014-09-25)
|
||||||
|
=====================================
|
||||||
|
This version adds support for using a TURN server. See docs/turn-howto.rst on
|
||||||
|
how to set one up.
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Add support for redaction of messages.
|
||||||
|
* Fix bug where inviting a user on a remote home server could take up to
|
||||||
|
20-30s.
|
||||||
|
* Implement a get current room state API.
|
||||||
|
* Add support specifying and retrieving turn server configuration.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Add button to send messages to users from the home page.
|
||||||
|
* Add support for using TURN for VoIP calls.
|
||||||
|
* Show display name change messages.
|
||||||
|
* Fix bug where the client didn't get the state of a newly joined room
|
||||||
|
until after it has been refreshed.
|
||||||
|
* Fix bugs with tab complete.
|
||||||
|
* Fix bug where holding down the down arrow caused chrome to chew 100% CPU.
|
||||||
|
* Fix bug where desktop notifications occasionally used "Undefined" as the
|
||||||
|
display name.
|
||||||
|
* Fix more places where we sometimes saw room IDs incorrectly.
|
||||||
|
* Fix bug which caused lag when entering text in the text box.
|
||||||
|
|
||||||
|
Changes in synapse 0.3.3 (2014-09-22)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Fix bug where you continued to get events for rooms you had left.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Add support for video calls with basic UI.
|
||||||
|
* Fix bug where one to one chats were named after your display name rather
|
||||||
|
than the other person's.
|
||||||
|
* Fix bug which caused lag when typing in the textarea.
|
||||||
|
* Refuse to run on browsers we know won't work.
|
||||||
|
* Trigger pagination when joining new rooms.
|
||||||
|
* Fix bug where we sometimes didn't display invitations in recents.
|
||||||
|
* Automatically join room when accepting a VoIP call.
|
||||||
|
* Disable outgoing and reject incoming calls on browsers we don't support
|
||||||
|
VoIP in.
|
||||||
|
* Don't display desktop notifications for messages in the room you are
|
||||||
|
non-idle and speaking in.
|
||||||
|
|
||||||
|
Changes in synapse 0.3.2 (2014-09-18)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Fix bug where an empty "bing words" list in old accounts didn't send
|
||||||
|
notifications when it should have done.
|
||||||
|
|
||||||
|
Changes in synapse 0.3.1 (2014-09-18)
|
||||||
|
=====================================
|
||||||
|
This is a release to hotfix v0.3.0 to fix two regressions.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Fix a regression where we sometimes displayed duplicate events.
|
||||||
|
* Fix a regression where we didn't immediately remove rooms you were
|
||||||
|
banned in from the recents list.
|
||||||
|
|
||||||
|
Changes in synapse 0.3.0 (2014-09-18)
|
||||||
|
=====================================
|
||||||
|
See UPGRADE for information about changes to the client server API, including
|
||||||
|
breaking backwards compatibility with VoIP calls and registration API.
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* When a user changes their displayname or avatar the server will now update
|
||||||
|
all their join states to reflect this.
|
||||||
|
* The server now adds "age" key to events to indicate how old they are. This
|
||||||
|
is clock independent, so at no point does any server or webclient have to
|
||||||
|
assume their clock is in sync with everyone else.
|
||||||
|
* Fix bug where we didn't correctly pull in missing PDUs.
|
||||||
|
* Fix bug where prev_content key wasn't always returned.
|
||||||
|
* Add support for password resets.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Improve page content loading.
|
||||||
|
* Join/parts now trigger desktop notifications.
|
||||||
|
* Always show room aliases in the UI if one is present.
|
||||||
|
* No longer show user-count in the recents side panel.
|
||||||
|
* Add up & down arrow support to the text box for message sending to step
|
||||||
|
through your sent history.
|
||||||
|
* Don't display notifications for our own messages.
|
||||||
|
* Emotes are now formatted correctly in desktop notifications.
|
||||||
|
* The recents list now differentiates between public & private rooms.
|
||||||
|
* Fix bug where when switching between rooms the pagination flickered before
|
||||||
|
the view jumped to the bottom of the screen.
|
||||||
|
* Add bing word support.
|
||||||
|
|
||||||
|
Registration API:
|
||||||
|
* The registration API has been overhauled to function like the login API. In
|
||||||
|
practice, this means registration requests must now include the following:
|
||||||
|
'type':'m.login.password'. See UPGRADE for more information on this.
|
||||||
|
* The 'user_id' key has been renamed to 'user' to better match the login API.
|
||||||
|
* There is an additional login type: 'm.login.email.identity'.
|
||||||
|
* The command client and web client have been updated to reflect these changes.
|
||||||
|
|
||||||
|
Changes in synapse 0.2.3 (2014-09-12)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Fix bug where we stopped sending events to remote home servers if a
|
||||||
|
user from that home server left, even if there were some still in the
|
||||||
|
room.
|
||||||
|
* Fix bugs in the state conflict resolution where it was incorrectly
|
||||||
|
rejecting events.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Display room names and topics.
|
||||||
|
* Allow setting/editing of room names and topics.
|
||||||
|
* Display information about rooms on the main page.
|
||||||
|
* Handle ban and kick events in real time.
|
||||||
|
* VoIP UI and reliability improvements.
|
||||||
|
* Add glare support for VoIP.
|
||||||
|
* Improvements to initial startup speed.
|
||||||
|
* Don't display duplicate join events.
|
||||||
|
* Local echo of messages.
|
||||||
|
* Differentiate sending and sent of local echo.
|
||||||
|
* Various minor bug fixes.
|
||||||
|
|
||||||
|
Changes in synapse 0.2.2 (2014-09-06)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* When the server returns state events it now also includes the previous
|
||||||
|
content.
|
||||||
|
* Add support for inviting people when creating a new room.
|
||||||
|
* Make the homeserver inform the room via `m.room.aliases` when a new alias
|
||||||
|
is added for a room.
|
||||||
|
* Validate `m.room.power_level` events.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Add support for captchas on registration.
|
||||||
|
* Handle `m.room.aliases` events.
|
||||||
|
* Asynchronously send messages and show a local echo.
|
||||||
|
* Inform the UI when a message failed to send.
|
||||||
|
* Only autoscroll on receiving a new message if the user was already at the
|
||||||
|
bottom of the screen.
|
||||||
|
* Add support for ban/kick reasons.
|
||||||
|
|
||||||
|
Changes in synapse 0.2.1 (2014-09-03)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Added support for signing up with a third party id.
|
||||||
|
* Add synctl scripts.
|
||||||
|
* Added rate limiting.
|
||||||
|
* Add option to change the external address the content repo uses.
|
||||||
|
* Presence bug fixes.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Added support for signing up with a third party id.
|
||||||
|
* Added support for banning and kicking users.
|
||||||
|
* Added support for displaying and setting ops.
|
||||||
|
* Added support for room names.
|
||||||
|
* Fix bugs with room membership event display.
|
||||||
|
|
||||||
|
Changes in synapse 0.2.0 (2014-09-02)
|
||||||
|
=====================================
|
||||||
|
This update changes many configuration options, updates the
|
||||||
|
database schema and mandates SSL for server-server connections.
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Require SSL for server-server connections.
|
||||||
|
* Add SSL listener for client-server connections.
|
||||||
|
* Add ability to use config files.
|
||||||
|
* Add support for kicking/banning and power levels.
|
||||||
|
* Allow setting of room names and topics on creation.
|
||||||
|
* Change presence to include last seen time of the user.
|
||||||
|
* Change url path prefix to /_matrix/...
|
||||||
|
* Bug fixes to presence.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Reskin the CSS for registration and login.
|
||||||
|
* Various improvements to rooms CSS.
|
||||||
|
* Support changes in client-server API.
|
||||||
|
* Bug fixes to VOIP UI.
|
||||||
|
* Various bug fixes to handling of changes to room member list.
|
||||||
|
|
||||||
|
Changes in synapse 0.1.2 (2014-08-29)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Add basic call state UI for VoIP calls.
|
||||||
|
|
||||||
|
Changes in synapse 0.1.1 (2014-08-29)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Fix bug that caused the event stream to not notify some clients about
|
||||||
|
changes.
|
||||||
|
|
||||||
|
Changes in synapse 0.1.0 (2014-08-29)
|
||||||
|
=====================================
|
||||||
|
Presence has been reenabled in this release.
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Update client to server API, including:
|
||||||
|
- Use a more consistent url scheme.
|
||||||
|
- Provide more useful information in the initial sync api.
|
||||||
|
* Change the presence handling to be much more efficient.
|
||||||
|
* Change the presence server to server API to not require explicit polling of
|
||||||
|
all users who share a room with a user.
|
||||||
|
* Fix races in the event streaming logic.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Update to use new client to server API.
|
||||||
|
* Add basic VOIP support.
|
||||||
|
* Add idle timers that change your status to away.
|
||||||
|
* Add recent rooms column when viewing a room.
|
||||||
|
* Various network efficiency improvements.
|
||||||
|
* Add basic mobile browser support.
|
||||||
|
* Add a settings page.
|
||||||
|
|
||||||
|
Changes in synapse 0.0.1 (2014-08-22)
|
||||||
|
=====================================
|
||||||
|
Presence has been disabled in this release due to a bug that caused the
|
||||||
|
homeserver to spam other remote homeservers.
|
||||||
|
|
||||||
|
Homeserver:
|
||||||
|
* Completely change the database schema to support generic event types.
|
||||||
|
* Improve presence reliability.
|
||||||
|
* Improve reliability of joining remote rooms.
|
||||||
|
* Fix bug where room join events were duplicated.
|
||||||
|
* Improve initial sync API to return more information to the client.
|
||||||
|
* Stop generating fake messages for room membership events.
|
||||||
|
|
||||||
|
Webclient:
|
||||||
|
* Add tab completion of names.
|
||||||
|
* Add ability to upload and send images.
|
||||||
|
* Add profile pages.
|
||||||
|
* Improve CSS layout of room.
|
||||||
|
* Disambiguate identical display names.
|
||||||
|
* Don't get remote users display names and avatars individually.
|
||||||
|
* Use the new initial sync API to reduce number of round trips to the homeserver.
|
||||||
|
* Change url scheme to use room aliases instead of room ids where known.
|
||||||
|
* Increase longpoll timeout.
|
||||||
|
|
||||||
|
Changes in synapse 0.0.0 (2014-08-13)
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
* Initial alpha release
|
||||||
169
CONTRIBUTING.rst
169
CONTRIBUTING.rst
@@ -1,169 +0,0 @@
|
|||||||
Contributing code to Matrix
|
|
||||||
===========================
|
|
||||||
|
|
||||||
Everyone is welcome to contribute code to Matrix
|
|
||||||
(https://github.com/matrix-org), provided that they are willing to license
|
|
||||||
their contributions under the same license as the project itself. We follow a
|
|
||||||
simple 'inbound=outbound' model for contributions: the act of submitting an
|
|
||||||
'inbound' contribution means that the contributor agrees to license the code
|
|
||||||
under the same terms as the project's overall 'outbound' license - in our
|
|
||||||
case, this is almost always Apache Software License v2 (see LICENSE).
|
|
||||||
|
|
||||||
How to contribute
|
|
||||||
~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
The preferred and easiest way to contribute changes to Matrix is to fork the
|
|
||||||
relevant project on github, and then create a pull request to ask us to pull
|
|
||||||
your changes into our repo
|
|
||||||
(https://help.github.com/articles/using-pull-requests/)
|
|
||||||
|
|
||||||
**The single biggest thing you need to know is: please base your changes on
|
|
||||||
the develop branch - /not/ master.**
|
|
||||||
|
|
||||||
We use the master branch to track the most recent release, so that folks who
|
|
||||||
blindly clone the repo and automatically check out master get something that
|
|
||||||
works. Develop is the unstable branch where all the development actually
|
|
||||||
happens: the workflow is that contributors should fork the develop branch to
|
|
||||||
make a 'feature' branch for a particular contribution, and then make a pull
|
|
||||||
request to merge this back into the matrix.org 'official' develop branch. We
|
|
||||||
use github's pull request workflow to review the contribution, and either ask
|
|
||||||
you to make any refinements needed or merge it and make them ourselves. The
|
|
||||||
changes will then land on master when we next do a release.
|
|
||||||
|
|
||||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
|
|
||||||
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
|
|
||||||
pull requests to synapse get automatically tested by Travis and CircleCI.
|
|
||||||
If your change breaks the build, this will be shown in GitHub, so please
|
|
||||||
keep an eye on the pull request for feedback.
|
|
||||||
|
|
||||||
To run unit tests in a local development environment, you can use:
|
|
||||||
|
|
||||||
- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
|
|
||||||
SQLite-backed Synapse on Python 2.7.
|
|
||||||
- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
|
|
||||||
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
|
|
||||||
- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
|
|
||||||
(requires a running local PostgreSQL with access to create databases).
|
|
||||||
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
|
|
||||||
(requires Docker). Entirely self-contained, recommended if you don't want to
|
|
||||||
set up PostgreSQL yourself.
|
|
||||||
|
|
||||||
Docker images are available for running the integration tests (SyTest) locally,
|
|
||||||
see the `documentation in the SyTest repo
|
|
||||||
<https://github.com/matrix-org/sytest/blob/develop/docker/README.md>`_ for more
|
|
||||||
information.
|
|
||||||
|
|
||||||
Code style
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
All Matrix projects have a well-defined code-style - and sometimes we've even
|
|
||||||
got as far as documenting it... For instance, synapse's code style doc lives
|
|
||||||
at https://github.com/matrix-org/synapse/tree/master/docs/code_style.rst.
|
|
||||||
|
|
||||||
Please ensure your changes match the cosmetic style of the existing project,
|
|
||||||
and **never** mix cosmetic and functional changes in the same commit, as it
|
|
||||||
makes it horribly hard to review otherwise.
|
|
||||||
|
|
||||||
Changelog
|
|
||||||
~~~~~~~~~
|
|
||||||
|
|
||||||
All changes, even minor ones, need a corresponding changelog / newsfragment
|
|
||||||
entry. These are managed by Towncrier
|
|
||||||
(https://github.com/hawkowl/towncrier).
|
|
||||||
|
|
||||||
To create a changelog entry, make a new file in the ``changelog.d``
|
|
||||||
file named in the format of ``PRnumber.type``. The type can be
|
|
||||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
|
||||||
deprecations), or ``misc`` (for internal-only changes). The content of
|
|
||||||
the file is your changelog entry, which can contain Markdown
|
|
||||||
formatting. Adding credits to the changelog is encouraged, we value
|
|
||||||
your contributions and would like to have you shouted out in the
|
|
||||||
release notes!
|
|
||||||
|
|
||||||
For example, a fix in PR #1234 would have its changelog entry in
|
|
||||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
|
||||||
Florbs are now validated when recieved over federation. Contributed by Jane
|
|
||||||
Matrix".
|
|
||||||
|
|
||||||
Attribution
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
Everyone who contributes anything to Matrix is welcome to be listed in the
|
|
||||||
AUTHORS.rst file for the project in question. Please feel free to include a
|
|
||||||
change to AUTHORS.rst in your pull request to list yourself and a short
|
|
||||||
description of the area(s) you've worked on. Also, we sometimes have swag to
|
|
||||||
give away to contributors - if you feel that Matrix-branded apparel is missing
|
|
||||||
from your life, please mail us your shipping address to matrix at matrix.org and
|
|
||||||
we'll try to fix it :)
|
|
||||||
|
|
||||||
Sign off
|
|
||||||
~~~~~~~~
|
|
||||||
|
|
||||||
In order to have a concrete record that your contribution is intentional
|
|
||||||
and you agree to license it under the same terms as the project's license, we've adopted the
|
|
||||||
same lightweight approach that the Linux Kernel
|
|
||||||
(https://www.kernel.org/doc/Documentation/SubmittingPatches), Docker
|
|
||||||
(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
|
||||||
projects use: the DCO (Developer Certificate of Origin:
|
|
||||||
http://developercertificate.org/). This is a simple declaration that you wrote
|
|
||||||
the contribution or otherwise have the right to contribute it to Matrix::
|
|
||||||
|
|
||||||
Developer Certificate of Origin
|
|
||||||
Version 1.1
|
|
||||||
|
|
||||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
|
||||||
660 York Street, Suite 102,
|
|
||||||
San Francisco, CA 94110 USA
|
|
||||||
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies of this
|
|
||||||
license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
Developer's Certificate of Origin 1.1
|
|
||||||
|
|
||||||
By making a contribution to this project, I certify that:
|
|
||||||
|
|
||||||
(a) The contribution was created in whole or in part by me and I
|
|
||||||
have the right to submit it under the open source license
|
|
||||||
indicated in the file; or
|
|
||||||
|
|
||||||
(b) The contribution is based upon previous work that, to the best
|
|
||||||
of my knowledge, is covered under an appropriate open source
|
|
||||||
license and I have the right under that license to submit that
|
|
||||||
work with modifications, whether created in whole or in part
|
|
||||||
by me, under the same open source license (unless I am
|
|
||||||
permitted to submit under a different license), as indicated
|
|
||||||
in the file; or
|
|
||||||
|
|
||||||
(c) The contribution was provided directly to me by some other
|
|
||||||
person who certified (a), (b) or (c) and I have not modified
|
|
||||||
it.
|
|
||||||
|
|
||||||
(d) I understand and agree that this project and the contribution
|
|
||||||
are public and that a record of the contribution (including all
|
|
||||||
personal information I submit with it, including my sign-off) is
|
|
||||||
maintained indefinitely and may be redistributed consistent with
|
|
||||||
this project or the open source license(s) involved.
|
|
||||||
|
|
||||||
If you agree to this for your contribution, then all that's needed is to
|
|
||||||
include the line in your commit or pull request comment::
|
|
||||||
|
|
||||||
Signed-off-by: Your Name <your@email.example.org>
|
|
||||||
|
|
||||||
We accept contributions under a legally identifiable name, such as
|
|
||||||
your name on government documentation or common-law names (names
|
|
||||||
claimed by legitimate usage or repute). Unfortunately, we cannot
|
|
||||||
accept anonymous contributions at this time.
|
|
||||||
|
|
||||||
Git allows you to add this signoff automatically when using the ``-s``
|
|
||||||
flag to ``git commit``, which uses the name and email set in your
|
|
||||||
``user.name`` and ``user.email`` git configs.
|
|
||||||
|
|
||||||
Conclusion
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
That's it! Matrix is a very open and collaborative project as you might expect
|
|
||||||
given our obsession with open communication. If we're going to successfully
|
|
||||||
matrix together all the fragmented communication technologies out there we are
|
|
||||||
reliant on contributions and collaboration from the community to do so. So
|
|
||||||
please get involved - and we hope you have as much fun hacking on Matrix as we
|
|
||||||
do!
|
|
||||||
31
MANIFEST.in
31
MANIFEST.in
@@ -2,38 +2,13 @@ include synctl
|
|||||||
include LICENSE
|
include LICENSE
|
||||||
include VERSION
|
include VERSION
|
||||||
include *.rst
|
include *.rst
|
||||||
include *.md
|
|
||||||
include demo/README
|
include demo/README
|
||||||
include demo/demo.tls.dh
|
|
||||||
include demo/*.py
|
|
||||||
include demo/*.sh
|
|
||||||
|
|
||||||
recursive-include synapse/storage/schema *.sql
|
recursive-include synapse/storage/schema *.sql
|
||||||
recursive-include synapse/storage/schema *.py
|
|
||||||
|
|
||||||
|
recursive-include demo *.dh
|
||||||
|
recursive-include demo *.py
|
||||||
|
recursive-include demo *.sh
|
||||||
recursive-include docs *
|
recursive-include docs *
|
||||||
recursive-include res *
|
|
||||||
recursive-include scripts *
|
recursive-include scripts *
|
||||||
recursive-include scripts-dev *
|
|
||||||
recursive-include synapse *.pyi
|
|
||||||
recursive-include tests *.py
|
recursive-include tests *.py
|
||||||
|
|
||||||
recursive-include synapse/static *.css
|
|
||||||
recursive-include synapse/static *.gif
|
|
||||||
recursive-include synapse/static *.html
|
|
||||||
recursive-include synapse/static *.js
|
|
||||||
|
|
||||||
exclude Dockerfile
|
|
||||||
exclude .dockerignore
|
|
||||||
exclude test_postgresql.sh
|
|
||||||
|
|
||||||
include pyproject.toml
|
|
||||||
recursive-include changelog.d *
|
|
||||||
|
|
||||||
prune .github
|
|
||||||
prune demo/etc
|
|
||||||
prune docker
|
|
||||||
prune .circleci
|
|
||||||
|
|
||||||
exclude jenkins*
|
|
||||||
recursive-exclude jenkins *.sh
|
|
||||||
|
|||||||
35
MAP.rst
Normal file
35
MAP.rst
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
Directory Structure
|
||||||
|
===================
|
||||||
|
|
||||||
|
Warning: this may be a bit stale...
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
.
|
||||||
|
├── cmdclient Basic CLI python Matrix client
|
||||||
|
├── demo Scripts for running standalone Matrix demos
|
||||||
|
├── docs All doc, including the draft Matrix API spec
|
||||||
|
│ ├── client-server The client-server Matrix API spec
|
||||||
|
│ ├── model Domain-specific elements of the Matrix API spec
|
||||||
|
│ ├── server-server The server-server model of the Matrix API spec
|
||||||
|
│ └── sphinx The internal API doc of the Synapse homeserver
|
||||||
|
├── experiments Early experiments of using Synapse's internal APIs
|
||||||
|
├── graph Visualisation of Matrix's distributed message store
|
||||||
|
├── synapse The reference Matrix homeserver implementation
|
||||||
|
│ ├── api Common building blocks for the APIs
|
||||||
|
│ │ ├── events Definition of state representation Events
|
||||||
|
│ │ └── streams Definition of streamable Event objects
|
||||||
|
│ ├── app The __main__ entry point for the homeserver
|
||||||
|
│ ├── crypto The PKI client/server used for secure federation
|
||||||
|
│ │ └── resource PKI helper objects (e.g. keys)
|
||||||
|
│ ├── federation Server-server state replication logic
|
||||||
|
│ ├── handlers The main business logic of the homeserver
|
||||||
|
│ ├── http Wrappers around Twisted's HTTP server & client
|
||||||
|
│ ├── rest Servlet-style RESTful API
|
||||||
|
│ ├── storage Persistence subsystem (currently only sqlite3)
|
||||||
|
│ │ └── schema sqlite persistence schema
|
||||||
|
│ └── util Synapse-specific utilities
|
||||||
|
├── tests Unit tests for the Synapse homeserver
|
||||||
|
└── webclient Basic AngularJS Matrix web client
|
||||||
|
|
||||||
|
|
||||||
1109
README.rst
1109
README.rst
File diff suppressed because it is too large
Load Diff
140
UPGRADE.rst
140
UPGRADE.rst
@@ -1,121 +1,3 @@
|
|||||||
Upgrading Synapse
|
|
||||||
=================
|
|
||||||
|
|
||||||
Before upgrading check if any special steps are required to upgrade from the
|
|
||||||
what you currently have installed to current version of synapse. The extra
|
|
||||||
instructions that may be required are listed later in this document.
|
|
||||||
|
|
||||||
1. If synapse was installed in a virtualenv then active that virtualenv before
|
|
||||||
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then
|
|
||||||
run:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
source ~/.synapse/bin/activate
|
|
||||||
|
|
||||||
2. If synapse was installed using pip then upgrade to the latest version by
|
|
||||||
running:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
pip install --upgrade --process-dependency-links matrix-synapse
|
|
||||||
|
|
||||||
# restart synapse
|
|
||||||
synctl restart
|
|
||||||
|
|
||||||
|
|
||||||
If synapse was installed using git then upgrade to the latest version by
|
|
||||||
running:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
# Pull the latest version of the master branch.
|
|
||||||
git pull
|
|
||||||
# Update the versions of synapse's python dependencies.
|
|
||||||
python synapse/python_dependencies.py | xargs pip install --upgrade
|
|
||||||
|
|
||||||
# restart synapse
|
|
||||||
./synctl restart
|
|
||||||
|
|
||||||
|
|
||||||
To check whether your update was sucessful, you can check the Server header
|
|
||||||
returned by the Client-Server API:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
# replace <host.name> with the hostname of your synapse homeserver.
|
|
||||||
# You may need to specify a port (eg, :8448) if your server is not
|
|
||||||
# configured on port 443.
|
|
||||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
|
||||||
|
|
||||||
Upgrading to v0.27.3
|
|
||||||
====================
|
|
||||||
|
|
||||||
This release expands the anonymous usage stats sent if the opt-in
|
|
||||||
``report_stats`` configuration is set to ``true``. We now capture RSS memory
|
|
||||||
and cpu use at a very coarse level. This requires administrators to install
|
|
||||||
the optional ``psutil`` python module.
|
|
||||||
|
|
||||||
We would appreciate it if you could assist by ensuring this module is available
|
|
||||||
and ``report_stats`` is enabled. This will let us see if performance changes to
|
|
||||||
synapse are having an impact to the general community.
|
|
||||||
|
|
||||||
Upgrading to v0.15.0
|
|
||||||
====================
|
|
||||||
|
|
||||||
If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
|
|
||||||
then you have to explicitly enable it in the config and update your dependencies
|
|
||||||
dependencies. See README.rst for details.
|
|
||||||
|
|
||||||
|
|
||||||
Upgrading to v0.11.0
|
|
||||||
====================
|
|
||||||
|
|
||||||
This release includes the option to send anonymous usage stats to matrix.org,
|
|
||||||
and requires that administrators explictly opt in or out by setting the
|
|
||||||
``report_stats`` option to either ``true`` or ``false``.
|
|
||||||
|
|
||||||
We would really appreciate it if you could help our project out by reporting
|
|
||||||
anonymized usage statistics from your homeserver. Only very basic aggregate
|
|
||||||
data (e.g. number of users) will be reported, but it helps us to track the
|
|
||||||
growth of the Matrix community, and helps us to make Matrix a success, as well
|
|
||||||
as to convince other networks that they should peer with us.
|
|
||||||
|
|
||||||
|
|
||||||
Upgrading to v0.9.0
|
|
||||||
===================
|
|
||||||
|
|
||||||
Application services have had a breaking API change in this version.
|
|
||||||
|
|
||||||
They can no longer register themselves with a home server using the AS HTTP API. This
|
|
||||||
decision was made because a compromised application service with free reign to register
|
|
||||||
any regex in effect grants full read/write access to the home server if a regex of ``.*``
|
|
||||||
is used. An attack where a compromised AS re-registers itself with ``.*`` was deemed too
|
|
||||||
big of a security risk to ignore, and so the ability to register with the HS remotely has
|
|
||||||
been removed.
|
|
||||||
|
|
||||||
It has been replaced by specifying a list of application service registrations in
|
|
||||||
``homeserver.yaml``::
|
|
||||||
|
|
||||||
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
|
|
||||||
|
|
||||||
Where ``registration-01.yaml`` looks like::
|
|
||||||
|
|
||||||
url: <String> # e.g. "https://my.application.service.com"
|
|
||||||
as_token: <String>
|
|
||||||
hs_token: <String>
|
|
||||||
sender_localpart: <String> # This is a new field which denotes the user_id localpart when using the AS token
|
|
||||||
namespaces:
|
|
||||||
users:
|
|
||||||
- exclusive: <Boolean>
|
|
||||||
regex: <String> # e.g. "@prefix_.*"
|
|
||||||
aliases:
|
|
||||||
- exclusive: <Boolean>
|
|
||||||
regex: <String>
|
|
||||||
rooms:
|
|
||||||
- exclusive: <Boolean>
|
|
||||||
regex: <String>
|
|
||||||
|
|
||||||
Upgrading to v0.8.0
|
Upgrading to v0.8.0
|
||||||
===================
|
===================
|
||||||
|
|
||||||
@@ -187,7 +69,7 @@ This release completely changes the database schema and so requires upgrading
|
|||||||
it before starting the new version of the homeserver.
|
it before starting the new version of the homeserver.
|
||||||
|
|
||||||
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
|
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
|
||||||
database. This will save all user information, such as logins and profiles,
|
database. This will save all user information, such as logins and profiles,
|
||||||
but will otherwise purge the database. This includes messages, which
|
but will otherwise purge the database. This includes messages, which
|
||||||
rooms the home server was a member of and room alias mappings.
|
rooms the home server was a member of and room alias mappings.
|
||||||
|
|
||||||
@@ -196,18 +78,18 @@ file and ask for help in #matrix:matrix.org. The upgrade process is,
|
|||||||
unfortunately, non trivial and requires human intervention to resolve any
|
unfortunately, non trivial and requires human intervention to resolve any
|
||||||
resulting conflicts during the upgrade process.
|
resulting conflicts during the upgrade process.
|
||||||
|
|
||||||
Before running the command the homeserver should be first completely
|
Before running the command the homeserver should be first completely
|
||||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||||
|
|
||||||
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
|
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
|
||||||
|
|
||||||
Once this has successfully completed it will be safe to restart the
|
Once this has successfully completed it will be safe to restart the
|
||||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||||
restart than usual as it reinitializes the database.
|
restart than usual as it reinitializes the database.
|
||||||
|
|
||||||
On startup of the new version, users can either rejoin remote rooms using room
|
On startup of the new version, users can either rejoin remote rooms using room
|
||||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||||
message to a room that the homeserver was previously in the local HS will
|
message to a room that the homeserver was previously in the local HS will
|
||||||
automatically rejoin the room.
|
automatically rejoin the room.
|
||||||
|
|
||||||
Upgrading to v0.4.0
|
Upgrading to v0.4.0
|
||||||
@@ -266,7 +148,7 @@ automatically generate default config use::
|
|||||||
--config-path homeserver.config \
|
--config-path homeserver.config \
|
||||||
--generate-config
|
--generate-config
|
||||||
|
|
||||||
This config can be edited if desired, for example to specify a different SSL
|
This config can be edited if desired, for example to specify a different SSL
|
||||||
certificate to use. Once done you can run the home server using::
|
certificate to use. Once done you can run the home server using::
|
||||||
|
|
||||||
$ python synapse/app/homeserver.py --config-path homeserver.config
|
$ python synapse/app/homeserver.py --config-path homeserver.config
|
||||||
@@ -287,20 +169,20 @@ This release completely changes the database schema and so requires upgrading
|
|||||||
it before starting the new version of the homeserver.
|
it before starting the new version of the homeserver.
|
||||||
|
|
||||||
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
|
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
|
||||||
database. This will save all user information, such as logins and profiles,
|
database. This will save all user information, such as logins and profiles,
|
||||||
but will otherwise purge the database. This includes messages, which
|
but will otherwise purge the database. This includes messages, which
|
||||||
rooms the home server was a member of and room alias mappings.
|
rooms the home server was a member of and room alias mappings.
|
||||||
|
|
||||||
Before running the command the homeserver should be first completely
|
Before running the command the homeserver should be first completely
|
||||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||||
|
|
||||||
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
|
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
|
||||||
|
|
||||||
Once this has successfully completed it will be safe to restart the
|
Once this has successfully completed it will be safe to restart the
|
||||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||||
restart than usual as it reinitializes the database.
|
restart than usual as it reinitializes the database.
|
||||||
|
|
||||||
On startup of the new version, users can either rejoin remote rooms using room
|
On startup of the new version, users can either rejoin remote rooms using room
|
||||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||||
message to a room that the homeserver was previously in the local HS will
|
message to a room that the homeserver was previously in the local HS will
|
||||||
automatically rejoin the room.
|
automatically rejoin the room.
|
||||||
|
|||||||
1
changelog.d/.gitignore
vendored
1
changelog.d/.gitignore
vendored
@@ -1 +0,0 @@
|
|||||||
!.gitignore
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
Community Contributions
|
|
||||||
=======================
|
|
||||||
|
|
||||||
Everything in this directory are projects submitted by the community that may be useful
|
|
||||||
to others. As such, the project maintainers cannot guarantee support, stability
|
|
||||||
or backwards compatibility of these projects.
|
|
||||||
|
|
||||||
Files in this directory should *not* be relied on directly, as they may not
|
|
||||||
continue to work or exist in future. If you wish to use any of these files then
|
|
||||||
they should be copied to avoid them breaking from underneath you.
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -32,7 +32,7 @@ import urlparse
|
|||||||
import nacl.signing
|
import nacl.signing
|
||||||
import nacl.encoding
|
import nacl.encoding
|
||||||
|
|
||||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
|
||||||
|
|
||||||
CONFIG_JSON = "cmdclient_config.json"
|
CONFIG_JSON = "cmdclient_config.json"
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -36,13 +36,15 @@ class HttpClient(object):
|
|||||||
the request body. This will be encoded as JSON.
|
the request body. This will be encoded as JSON.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
Deferred: Succeeds when we get *any* HTTP response.
|
||||||
will be the decoded JSON body.
|
|
||||||
|
The result of the deferred is a tuple of `(code, response)`,
|
||||||
|
where `response` is a dict representing the decoded JSON body.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_json(self, url, args=None):
|
def get_json(self, url, args=None):
|
||||||
""" Gets some json from the given host homeserver and path
|
""" Get's some json from the given host homeserver and path
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
url (str): The URL to GET data from.
|
url (str): The URL to GET data from.
|
||||||
@@ -52,8 +54,10 @@ class HttpClient(object):
|
|||||||
and *not* a string.
|
and *not* a string.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
Deferred: Succeeds when we get *any* HTTP response.
|
||||||
will be the decoded JSON body.
|
|
||||||
|
The result of the deferred is a tuple of `(code, response)`,
|
||||||
|
where `response` is a dict representing the decoded JSON body.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -210,4 +214,4 @@ class _JsonProducer(object):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def stopProducing(self):
|
def stopProducing(self):
|
||||||
pass
|
pass
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
# Synapse Docker
|
|
||||||
|
|
||||||
### Automated configuration
|
|
||||||
|
|
||||||
It is recommended that you use Docker Compose to run your containers, including
|
|
||||||
this image and a Postgres server. A sample ``docker-compose.yml`` is provided,
|
|
||||||
including example labels for reverse proxying and other artifacts.
|
|
||||||
|
|
||||||
Read the section about environment variables and set at least mandatory variables,
|
|
||||||
then run the server:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
If secrets are not specified in the environment variables, they will be generated
|
|
||||||
as part of the startup. Please ensure these secrets are kept between launches of the
|
|
||||||
Docker container, as their loss may require users to log in again.
|
|
||||||
|
|
||||||
### Manual configuration
|
|
||||||
|
|
||||||
A sample ``docker-compose.yml`` is provided, including example labels for
|
|
||||||
reverse proxying and other artifacts. The docker-compose file is an example,
|
|
||||||
please comment/uncomment sections that are not suitable for your usecase.
|
|
||||||
|
|
||||||
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
|
|
||||||
to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, customize your configuration and run the server:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
### More information
|
|
||||||
|
|
||||||
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
# This compose file is compatible with Compose itself, it might need some
|
|
||||||
# adjustments to run properly with stack.
|
|
||||||
|
|
||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
synapse:
|
|
||||||
build: ../..
|
|
||||||
image: docker.io/matrixdotorg/synapse:latest
|
|
||||||
# Since snyapse does not retry to connect to the database, restart upon
|
|
||||||
# failure
|
|
||||||
restart: unless-stopped
|
|
||||||
# See the readme for a full documentation of the environment settings
|
|
||||||
environment:
|
|
||||||
- SYNAPSE_SERVER_NAME=my.matrix.host
|
|
||||||
- SYNAPSE_REPORT_STATS=no
|
|
||||||
- SYNAPSE_ENABLE_REGISTRATION=yes
|
|
||||||
- SYNAPSE_LOG_LEVEL=INFO
|
|
||||||
- POSTGRES_PASSWORD=changeme
|
|
||||||
volumes:
|
|
||||||
# You may either store all the files in a local folder
|
|
||||||
- ./files:/data
|
|
||||||
# .. or you may split this between different storage points
|
|
||||||
# - ./files:/data
|
|
||||||
# - /path/to/ssd:/data/uploads
|
|
||||||
# - /path/to/large_hdd:/data/media
|
|
||||||
depends_on:
|
|
||||||
- db
|
|
||||||
# In order to expose Synapse, remove one of the following, you might for
|
|
||||||
# instance expose the TLS port directly:
|
|
||||||
ports:
|
|
||||||
- 8448:8448/tcp
|
|
||||||
# ... or use a reverse proxy, here is an example for traefik:
|
|
||||||
labels:
|
|
||||||
- traefik.enable=true
|
|
||||||
- traefik.frontend.rule=Host:my.matrix.Host
|
|
||||||
- traefik.port=8448
|
|
||||||
|
|
||||||
db:
|
|
||||||
image: docker.io/postgres:10-alpine
|
|
||||||
# Change that password, of course!
|
|
||||||
environment:
|
|
||||||
- POSTGRES_USER=synapse
|
|
||||||
- POSTGRES_PASSWORD=changeme
|
|
||||||
volumes:
|
|
||||||
# You may store the database tables in a local folder..
|
|
||||||
- ./schemas:/var/lib/postgresql/data
|
|
||||||
# .. or store them on some high performance storage for better results
|
|
||||||
# - /path/to/ssd/storage:/var/lib/postfesql/data
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
|
||||||
# `homeserver.yaml`, and restart synapse.
|
|
||||||
#
|
|
||||||
# This configuration will produce similar results to the defaults within
|
|
||||||
# synapse, but can be edited to give more flexibility.
|
|
||||||
|
|
||||||
version: 1
|
|
||||||
|
|
||||||
formatters:
|
|
||||||
fmt:
|
|
||||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
|
||||||
|
|
||||||
filters:
|
|
||||||
context:
|
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
|
||||||
request: ""
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
# example output to console
|
|
||||||
console:
|
|
||||||
class: logging.StreamHandler
|
|
||||||
filters: [context]
|
|
||||||
|
|
||||||
# example output to file - to enable, edit 'root' config below.
|
|
||||||
file:
|
|
||||||
class: logging.handlers.RotatingFileHandler
|
|
||||||
formatter: fmt
|
|
||||||
filename: /var/log/synapse/homeserver.log
|
|
||||||
maxBytes: 100000000
|
|
||||||
backupCount: 3
|
|
||||||
filters: [context]
|
|
||||||
|
|
||||||
|
|
||||||
root:
|
|
||||||
level: INFO
|
|
||||||
handlers: [console] # to use file handler instead, switch to [file]
|
|
||||||
|
|
||||||
loggers:
|
|
||||||
synapse:
|
|
||||||
level: INFO
|
|
||||||
|
|
||||||
synapse.storage.SQL:
|
|
||||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
|
||||||
# information such as access tokens.
|
|
||||||
level: INFO
|
|
||||||
|
|
||||||
# example of enabling debugging for a component:
|
|
||||||
#
|
|
||||||
# synapse.federation.transport.server:
|
|
||||||
# level: DEBUG
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
# Using the Synapse Grafana dashboard
|
|
||||||
|
|
||||||
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
|
|
||||||
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
|
|
||||||
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
|
|
||||||
3. Set up additional recording rules
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,153 +0,0 @@
|
|||||||
# Copyright 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import pydot
|
|
||||||
import cgi
|
|
||||||
import simplejson as json
|
|
||||||
import datetime
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
from synapse.events import FrozenEvent
|
|
||||||
from synapse.util.frozenutils import unfreeze
|
|
||||||
|
|
||||||
from six import string_types
|
|
||||||
|
|
||||||
|
|
||||||
def make_graph(file_name, room_id, file_prefix, limit):
|
|
||||||
print "Reading lines"
|
|
||||||
with open(file_name) as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
|
|
||||||
print "Read lines"
|
|
||||||
|
|
||||||
events = [FrozenEvent(json.loads(line)) for line in lines]
|
|
||||||
|
|
||||||
print "Loaded events."
|
|
||||||
|
|
||||||
events.sort(key=lambda e: e.depth)
|
|
||||||
|
|
||||||
print "Sorted events"
|
|
||||||
|
|
||||||
if limit:
|
|
||||||
events = events[-int(limit):]
|
|
||||||
|
|
||||||
node_map = {}
|
|
||||||
|
|
||||||
graph = pydot.Dot(graph_name="Test")
|
|
||||||
|
|
||||||
for event in events:
|
|
||||||
t = datetime.datetime.fromtimestamp(
|
|
||||||
float(event.origin_server_ts) / 1000
|
|
||||||
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
|
||||||
|
|
||||||
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
|
|
||||||
content = content.replace("\n", "<br/>\n")
|
|
||||||
|
|
||||||
print content
|
|
||||||
content = []
|
|
||||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
|
||||||
if value is None:
|
|
||||||
value = "<null>"
|
|
||||||
elif isinstance(value, string_types):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
value = json.dumps(value)
|
|
||||||
|
|
||||||
content.append(
|
|
||||||
"<b>%s</b>: %s," % (
|
|
||||||
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
|
||||||
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
content = "<br/>\n".join(content)
|
|
||||||
|
|
||||||
print content
|
|
||||||
|
|
||||||
label = (
|
|
||||||
"<"
|
|
||||||
"<b>%(name)s </b><br/>"
|
|
||||||
"Type: <b>%(type)s </b><br/>"
|
|
||||||
"State key: <b>%(state_key)s </b><br/>"
|
|
||||||
"Content: <b>%(content)s </b><br/>"
|
|
||||||
"Time: <b>%(time)s </b><br/>"
|
|
||||||
"Depth: <b>%(depth)s </b><br/>"
|
|
||||||
">"
|
|
||||||
) % {
|
|
||||||
"name": event.event_id,
|
|
||||||
"type": event.type,
|
|
||||||
"state_key": event.get("state_key", None),
|
|
||||||
"content": content,
|
|
||||||
"time": t,
|
|
||||||
"depth": event.depth,
|
|
||||||
}
|
|
||||||
|
|
||||||
node = pydot.Node(
|
|
||||||
name=event.event_id,
|
|
||||||
label=label,
|
|
||||||
)
|
|
||||||
|
|
||||||
node_map[event.event_id] = node
|
|
||||||
graph.add_node(node)
|
|
||||||
|
|
||||||
print "Created Nodes"
|
|
||||||
|
|
||||||
for event in events:
|
|
||||||
for prev_id, _ in event.prev_events:
|
|
||||||
try:
|
|
||||||
end_node = node_map[prev_id]
|
|
||||||
except:
|
|
||||||
end_node = pydot.Node(
|
|
||||||
name=prev_id,
|
|
||||||
label="<<b>%s</b>>" % (prev_id,),
|
|
||||||
)
|
|
||||||
|
|
||||||
node_map[prev_id] = end_node
|
|
||||||
graph.add_node(end_node)
|
|
||||||
|
|
||||||
edge = pydot.Edge(node_map[event.event_id], end_node)
|
|
||||||
graph.add_edge(edge)
|
|
||||||
|
|
||||||
print "Created edges"
|
|
||||||
|
|
||||||
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
|
|
||||||
|
|
||||||
print "Created Dot"
|
|
||||||
|
|
||||||
graph.write_svg("%s.svg" % file_prefix, prog='dot')
|
|
||||||
|
|
||||||
print "Created svg"
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Generate a PDU graph for a given room by reading "
|
|
||||||
"from a file with line deliminated events. \n"
|
|
||||||
"Requires pydot."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-p", "--prefix", dest="prefix",
|
|
||||||
help="String to prefix output files with",
|
|
||||||
default="graph_output"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-l", "--limit",
|
|
||||||
help="Only retrieve the last N events.",
|
|
||||||
)
|
|
||||||
parser.add_argument('event_file')
|
|
||||||
parser.add_argument('room')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
make_graph(args.event_file, args.room, args.prefix, args.limit)
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
This directory contains some sample monitoring config for using the
|
|
||||||
'Prometheus' monitoring server against synapse.
|
|
||||||
|
|
||||||
To use it, first install prometheus by following the instructions at
|
|
||||||
|
|
||||||
http://prometheus.io/
|
|
||||||
|
|
||||||
### for Prometheus v1
|
|
||||||
Add a new job to the main prometheus.conf file:
|
|
||||||
|
|
||||||
job: {
|
|
||||||
name: "synapse"
|
|
||||||
|
|
||||||
target_group: {
|
|
||||||
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
### for Prometheus v2
|
|
||||||
Add a new job to the main prometheus.yml file:
|
|
||||||
|
|
||||||
- job_name: "synapse"
|
|
||||||
metrics_path: "/_synapse/metrics"
|
|
||||||
# when endpoint uses https:
|
|
||||||
scheme: "https"
|
|
||||||
|
|
||||||
static_configs:
|
|
||||||
- targets: ['SERVER.LOCATION:PORT']
|
|
||||||
|
|
||||||
To use `synapse.rules` add
|
|
||||||
|
|
||||||
rule_files:
|
|
||||||
- "/PATH/TO/synapse-v2.rules"
|
|
||||||
|
|
||||||
Metrics are disabled by default when running synapse; they must be enabled
|
|
||||||
with the 'enable-metrics' option, either in the synapse config file or as a
|
|
||||||
command-line option.
|
|
||||||
@@ -1,395 +0,0 @@
|
|||||||
{{ template "head" . }}
|
|
||||||
|
|
||||||
{{ template "prom_content_head" . }}
|
|
||||||
<h1>System Resources</h1>
|
|
||||||
|
|
||||||
<h3>CPU</h3>
|
|
||||||
<div id="process_resource_utime"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#process_resource_utime"),
|
|
||||||
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
|
||||||
name: "[[job]]",
|
|
||||||
min: 0,
|
|
||||||
max: 100,
|
|
||||||
renderer: "line",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "%",
|
|
||||||
yTitle: "CPU Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Memory</h3>
|
|
||||||
<div id="process_resource_maxrss"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#process_resource_maxrss"),
|
|
||||||
expr: "process_psutil_rss:max",
|
|
||||||
name: "Maxrss",
|
|
||||||
min: 0,
|
|
||||||
renderer: "line",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "bytes",
|
|
||||||
yTitle: "Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>File descriptors</h3>
|
|
||||||
<div id="process_fds"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#process_fds"),
|
|
||||||
expr: "process_open_fds{job='synapse'}",
|
|
||||||
name: "FDs",
|
|
||||||
min: 0,
|
|
||||||
renderer: "line",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "",
|
|
||||||
yTitle: "Descriptors"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h1>Reactor</h1>
|
|
||||||
|
|
||||||
<h3>Total reactor time</h3>
|
|
||||||
<div id="reactor_total_time"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#reactor_total_time"),
|
|
||||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
|
||||||
name: "time",
|
|
||||||
max: 1,
|
|
||||||
min: 0,
|
|
||||||
renderer: "area",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/s",
|
|
||||||
yTitle: "Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Average reactor tick time</h3>
|
|
||||||
<div id="reactor_average_time"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#reactor_average_time"),
|
|
||||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
|
||||||
name: "time",
|
|
||||||
min: 0,
|
|
||||||
renderer: "line",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s",
|
|
||||||
yTitle: "Time"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Pending calls per tick</h3>
|
|
||||||
<div id="reactor_pending_calls"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#reactor_pending_calls"),
|
|
||||||
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
|
||||||
name: "calls",
|
|
||||||
min: 0,
|
|
||||||
renderer: "line",
|
|
||||||
height: 150,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yTitle: "Pending Cals"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h1>Storage</h1>
|
|
||||||
|
|
||||||
<h3>Queries</h3>
|
|
||||||
<div id="synapse_storage_query_time"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_storage_query_time"),
|
|
||||||
expr: "rate(synapse_storage_query_time:count[2m])",
|
|
||||||
name: "[[verb]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "queries/s",
|
|
||||||
yTitle: "Queries"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Transactions</h3>
|
|
||||||
<div id="synapse_storage_transaction_time"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_storage_transaction_time"),
|
|
||||||
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
|
||||||
name: "[[desc]]",
|
|
||||||
min: 0,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "txn/s",
|
|
||||||
yTitle: "Transactions"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Transaction execution time</h3>
|
|
||||||
<div id="synapse_storage_transactions_time_msec"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
|
||||||
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
|
||||||
name: "[[desc]]",
|
|
||||||
min: 0,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/s",
|
|
||||||
yTitle: "Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Database scheduling latency</h3>
|
|
||||||
<div id="synapse_storage_schedule_time"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_storage_schedule_time"),
|
|
||||||
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
|
||||||
name: "Total latency",
|
|
||||||
min: 0,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/s",
|
|
||||||
yTitle: "Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Cache hit ratio</h3>
|
|
||||||
<div id="synapse_cache_ratio"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_cache_ratio"),
|
|
||||||
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
|
||||||
name: "[[name]]",
|
|
||||||
min: 0,
|
|
||||||
max: 100,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "%",
|
|
||||||
yTitle: "Percentage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Cache size</h3>
|
|
||||||
<div id="synapse_cache_size"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_cache_size"),
|
|
||||||
expr: "synapse_util_caches_cache:size",
|
|
||||||
name: "[[name]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "",
|
|
||||||
yTitle: "Items"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h1>Requests</h1>
|
|
||||||
|
|
||||||
<h3>Requests by Servlet</h3>
|
|
||||||
<div id="synapse_http_server_request_count_servlet"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_request_count_servlet"),
|
|
||||||
expr: "rate(synapse_http_server_request_count:servlet[2m])",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
|
||||||
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
|
|
||||||
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Average response times</h3>
|
|
||||||
<div id="synapse_http_server_response_time_avg"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
|
||||||
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/req",
|
|
||||||
yTitle: "Response time"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>All responses by code</h3>
|
|
||||||
<div id="synapse_http_server_responses"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_responses"),
|
|
||||||
expr: "rate(synapse_http_server_responses[2m])",
|
|
||||||
name: "[[method]] / [[code]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Error responses by code</h3>
|
|
||||||
<div id="synapse_http_server_responses_err"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_responses_err"),
|
|
||||||
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
|
|
||||||
name: "[[method]] / [[code]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
|
|
||||||
<h3>CPU Usage</h3>
|
|
||||||
<div id="synapse_http_server_response_ru_utime"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
|
||||||
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/s",
|
|
||||||
yTitle: "CPU Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
|
|
||||||
<h3>DB Usage</h3>
|
|
||||||
<div id="synapse_http_server_response_db_txn_duration"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
|
||||||
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/s",
|
|
||||||
yTitle: "DB Usage"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
|
|
||||||
<h3>Average event send times</h3>
|
|
||||||
<div id="synapse_http_server_send_time_avg"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
|
||||||
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
|
||||||
name: "[[servlet]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "s/req",
|
|
||||||
yTitle: "Response time"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h1>Federation</h1>
|
|
||||||
|
|
||||||
<h3>Sent Messages</h3>
|
|
||||||
<div id="synapse_federation_client_sent"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_federation_client_sent"),
|
|
||||||
expr: "rate(synapse_federation_client_sent[2m])",
|
|
||||||
name: "[[type]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Received Messages</h3>
|
|
||||||
<div id="synapse_federation_server_received"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_federation_server_received"),
|
|
||||||
expr: "rate(synapse_federation_server_received[2m])",
|
|
||||||
name: "[[type]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "req/s",
|
|
||||||
yTitle: "Requests"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Pending</h3>
|
|
||||||
<div id="synapse_federation_transaction_queue_pending"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
|
|
||||||
expr: "synapse_federation_transaction_queue_pending",
|
|
||||||
name: "[[type]]",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "",
|
|
||||||
yTitle: "Units"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h1>Clients</h1>
|
|
||||||
|
|
||||||
<h3>Notifiers</h3>
|
|
||||||
<div id="synapse_notifier_listeners"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_notifier_listeners"),
|
|
||||||
expr: "synapse_notifier_listeners",
|
|
||||||
name: "listeners",
|
|
||||||
min: 0,
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "",
|
|
||||||
yTitle: "Listeners"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Notified Events</h3>
|
|
||||||
<div id="synapse_notifier_notified_events"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#synapse_notifier_notified_events"),
|
|
||||||
expr: "rate(synapse_notifier_notified_events[2m])",
|
|
||||||
name: "events",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
|
||||||
yUnits: "events/s",
|
|
||||||
yTitle: "Event rate"
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
{{ template "prom_content_tail" . }}
|
|
||||||
|
|
||||||
{{ template "tail" }}
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
|
||||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
|
||||||
|
|
||||||
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
|
||||||
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
|
||||||
|
|
||||||
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
|
||||||
|
|
||||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
|
||||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
|
||||||
|
|
||||||
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
|
||||||
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
|
||||||
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
|
||||||
|
|
||||||
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
|
||||||
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
|
||||||
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
|
||||||
|
|
||||||
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
|
||||||
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
groups:
|
|
||||||
- name: synapse
|
|
||||||
rules:
|
|
||||||
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
|
||||||
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
|
||||||
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
|
||||||
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
|
||||||
- record: 'synapse_http_server_request_count:method'
|
|
||||||
labels:
|
|
||||||
servlet: ""
|
|
||||||
expr: "sum(synapse_http_server_request_count) by (method)"
|
|
||||||
- record: 'synapse_http_server_request_count:servlet'
|
|
||||||
labels:
|
|
||||||
method: ""
|
|
||||||
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
|
||||||
|
|
||||||
- record: 'synapse_http_server_request_count:total'
|
|
||||||
labels:
|
|
||||||
servlet: ""
|
|
||||||
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
|
||||||
|
|
||||||
- record: 'synapse_cache:hit_ratio_5m'
|
|
||||||
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
|
||||||
- record: 'synapse_cache:hit_ratio_30s'
|
|
||||||
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
|
||||||
|
|
||||||
- record: 'synapse_federation_client_sent'
|
|
||||||
labels:
|
|
||||||
type: "EDU"
|
|
||||||
expr: 'synapse_federation_client_sent_edus + 0'
|
|
||||||
- record: 'synapse_federation_client_sent'
|
|
||||||
labels:
|
|
||||||
type: "PDU"
|
|
||||||
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
|
||||||
- record: 'synapse_federation_client_sent'
|
|
||||||
labels:
|
|
||||||
type: "Query"
|
|
||||||
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
|
||||||
|
|
||||||
- record: 'synapse_federation_server_received'
|
|
||||||
labels:
|
|
||||||
type: "EDU"
|
|
||||||
expr: 'synapse_federation_server_received_edus + 0'
|
|
||||||
- record: 'synapse_federation_server_received'
|
|
||||||
labels:
|
|
||||||
type: "PDU"
|
|
||||||
expr: 'synapse_federation_server_received_pdus + 0'
|
|
||||||
- record: 'synapse_federation_server_received'
|
|
||||||
labels:
|
|
||||||
type: "Query"
|
|
||||||
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
|
||||||
|
|
||||||
- record: 'synapse_federation_transaction_queue_pending'
|
|
||||||
labels:
|
|
||||||
type: "EDU"
|
|
||||||
expr: 'synapse_federation_transaction_queue_pending_edus + 0'
|
|
||||||
- record: 'synapse_federation_transaction_queue_pending'
|
|
||||||
labels:
|
|
||||||
type: "PDU"
|
|
||||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
|
||||||
@@ -1,93 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
from argparse import ArgumentParser
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
import sys
|
|
||||||
import urllib
|
|
||||||
|
|
||||||
def _mkurl(template, kws):
|
|
||||||
for key in kws:
|
|
||||||
template = template.replace(key, kws[key])
|
|
||||||
return template
|
|
||||||
|
|
||||||
def main(hs, room_id, access_token, user_id_prefix, why):
|
|
||||||
if not why:
|
|
||||||
why = "Automated kick."
|
|
||||||
print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
|
|
||||||
room_state_url = _mkurl(
|
|
||||||
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
|
|
||||||
{
|
|
||||||
"$HS": hs,
|
|
||||||
"$ROOM": room_id,
|
|
||||||
"$TOKEN": access_token
|
|
||||||
}
|
|
||||||
)
|
|
||||||
print "Getting room state => %s" % room_state_url
|
|
||||||
res = requests.get(room_state_url)
|
|
||||||
print "HTTP %s" % res.status_code
|
|
||||||
state_events = res.json()
|
|
||||||
if "error" in state_events:
|
|
||||||
print "FATAL"
|
|
||||||
print state_events
|
|
||||||
return
|
|
||||||
|
|
||||||
kick_list = []
|
|
||||||
room_name = room_id
|
|
||||||
for event in state_events:
|
|
||||||
if not event["type"] == "m.room.member":
|
|
||||||
if event["type"] == "m.room.name":
|
|
||||||
room_name = event["content"].get("name")
|
|
||||||
continue
|
|
||||||
if not event["content"].get("membership") == "join":
|
|
||||||
continue
|
|
||||||
if event["state_key"].startswith(user_id_prefix):
|
|
||||||
kick_list.append(event["state_key"])
|
|
||||||
|
|
||||||
if len(kick_list) == 0:
|
|
||||||
print "No user IDs match the prefix '%s'" % user_id_prefix
|
|
||||||
return
|
|
||||||
|
|
||||||
print "The following user IDs will be kicked from %s" % room_name
|
|
||||||
for uid in kick_list:
|
|
||||||
print uid
|
|
||||||
doit = raw_input("Continue? [Y]es\n")
|
|
||||||
if len(doit) > 0 and doit.lower() == 'y':
|
|
||||||
print "Kicking members..."
|
|
||||||
# encode them all
|
|
||||||
kick_list = [urllib.quote(uid) for uid in kick_list]
|
|
||||||
for uid in kick_list:
|
|
||||||
kick_url = _mkurl(
|
|
||||||
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
|
|
||||||
{
|
|
||||||
"$HS": hs,
|
|
||||||
"$UID": uid,
|
|
||||||
"$ROOM": room_id,
|
|
||||||
"$TOKEN": access_token
|
|
||||||
}
|
|
||||||
)
|
|
||||||
kick_body = {
|
|
||||||
"membership": "leave",
|
|
||||||
"reason": why
|
|
||||||
}
|
|
||||||
print "Kicking %s" % uid
|
|
||||||
res = requests.put(kick_url, data=json.dumps(kick_body))
|
|
||||||
if res.status_code != 200:
|
|
||||||
print "ERROR: HTTP %s" % res.status_code
|
|
||||||
if res.json().get("error"):
|
|
||||||
print "ERROR: JSON %s" % res.json()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
|
|
||||||
parser.add_argument("-u","--user-id",help="The user ID prefix e.g. '@irc_'")
|
|
||||||
parser.add_argument("-t","--token",help="Your access_token")
|
|
||||||
parser.add_argument("-r","--room",help="The room ID to kick members in")
|
|
||||||
parser.add_argument("-s","--homeserver",help="The base HS url e.g. http://matrix.org")
|
|
||||||
parser.add_argument("-w","--why",help="Reason for the kick. Optional.")
|
|
||||||
args = parser.parse_args()
|
|
||||||
if not args.room or not args.token or not args.user_id or not args.homeserver:
|
|
||||||
parser.print_help()
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
main(args.homeserver, args.room, args.token, args.user_id, args.why)
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
version: 1
|
|
||||||
|
|
||||||
# In systemd's journal, loglevel is implicitly stored, so let's omit it
|
|
||||||
# from the message text.
|
|
||||||
formatters:
|
|
||||||
journal_fmt:
|
|
||||||
format: '%(name)s: [%(request)s] %(message)s'
|
|
||||||
|
|
||||||
filters:
|
|
||||||
context:
|
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
|
||||||
request: ""
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
journal:
|
|
||||||
class: systemd.journal.JournalHandler
|
|
||||||
formatter: journal_fmt
|
|
||||||
filters: [context]
|
|
||||||
SYSLOG_IDENTIFIER: synapse
|
|
||||||
|
|
||||||
root:
|
|
||||||
level: INFO
|
|
||||||
handlers: [journal]
|
|
||||||
|
|
||||||
disable_existing_loggers: False
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
# This assumes that Synapse has been installed as a system package
|
|
||||||
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
|
||||||
# rather than in a user home directory or similar under virtualenv.
|
|
||||||
|
|
||||||
# **NOTE:** This is an example service file that may change in the future. If you
|
|
||||||
# wish to use this please copy rather than symlink it.
|
|
||||||
|
|
||||||
[Unit]
|
|
||||||
Description=Synapse Matrix homeserver
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
User=synapse
|
|
||||||
Group=synapse
|
|
||||||
WorkingDirectory=/var/lib/synapse
|
|
||||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
|
|
||||||
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
|
|
||||||
# EnvironmentFile=-/etc/sysconfig/synapse # Can be used to e.g. set SYNAPSE_CACHE_FACTOR
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
|
|
||||||
@@ -126,26 +126,12 @@ sub on_unknown_event
|
|||||||
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||||
$bridgestate->{$room_id}->{gathered_candidates} = 1;
|
$bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||||
my $offer = $bridgestate->{$room_id}->{offer};
|
my $offer = $bridgestate->{$room_id}->{offer};
|
||||||
my $candidate_block = {
|
my $candidate_block = "";
|
||||||
audio => '',
|
|
||||||
video => '',
|
|
||||||
};
|
|
||||||
foreach (@{$event->{content}->{candidates}}) {
|
foreach (@{$event->{content}->{candidates}}) {
|
||||||
if ($_->{sdpMid}) {
|
$candidate_block .= "a=" . $_->{candidate} . "\r\n";
|
||||||
$candidate_block->{$_->{sdpMid}} .= "a=" . $_->{candidate} . "\r\n";
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
$candidate_block->{audio} .= "a=" . $_->{candidate} . "\r\n";
|
|
||||||
$candidate_block->{video} .= "a=" . $_->{candidate} . "\r\n";
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
# XXX: collate using the right m= line - for now assume audio call
|
||||||
# XXX: assumes audio comes first
|
$offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
|
||||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{audio}/;
|
|
||||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{video}/;
|
|
||||||
|
|
||||||
$offer =~ s/(m=video)/$candidate_block->{audio}$1/;
|
|
||||||
$offer =~ s/(.$)/$1\n$candidate_block->{video}$1/;
|
|
||||||
|
|
||||||
my $f = send_verto_json_request("verto.invite", {
|
my $f = send_verto_json_request("verto.invite", {
|
||||||
"sdp" => $offer,
|
"sdp" => $offer,
|
||||||
@@ -186,18 +172,23 @@ sub on_room_message
|
|||||||
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
|
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
my $verto_connecting = $loop->new_future;
|
||||||
|
$bot_verto->connect(
|
||||||
|
%{ $CONFIG{"verto-bot"} },
|
||||||
|
on_connected => sub {
|
||||||
|
warn("[Verto] connected to websocket");
|
||||||
|
$verto_connecting->done($bot_verto) if not $verto_connecting->is_done;
|
||||||
|
},
|
||||||
|
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||||
|
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||||
|
);
|
||||||
|
|
||||||
Future->needs_all(
|
Future->needs_all(
|
||||||
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
||||||
$bot_matrix->start;
|
$bot_matrix->start;
|
||||||
}),
|
}),
|
||||||
|
|
||||||
$bot_verto->connect(
|
$verto_connecting,
|
||||||
%{ $CONFIG{"verto-bot"} },
|
|
||||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
|
||||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
|
||||||
)->on_done( sub {
|
|
||||||
warn("[Verto] connected to websocket");
|
|
||||||
}),
|
|
||||||
)->get;
|
)->get;
|
||||||
|
|
||||||
$loop->attach_signal(
|
$loop->attach_signal(
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ sub create_virtual_user
|
|||||||
"user": "$localpart"
|
"user": "$localpart"
|
||||||
}
|
}
|
||||||
EOT
|
EOT
|
||||||
)->get;
|
)->get;
|
||||||
warn $response->as_string if ($response->code != 200);
|
warn $response->as_string if ($response->code != 200);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -266,21 +266,17 @@ my $as_url = $CONFIG{"matrix-bot"}->{as_url};
|
|||||||
|
|
||||||
Future->needs_all(
|
Future->needs_all(
|
||||||
$http->do_request(
|
$http->do_request(
|
||||||
method => "POST",
|
method => "POST",
|
||||||
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
|
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
|
||||||
content_type => "application/json",
|
content_type => "application/json",
|
||||||
content => <<EOT
|
content => <<EOT
|
||||||
{
|
{
|
||||||
"as_token": "$as_token",
|
"as_token": "$as_token",
|
||||||
"url": "$as_url",
|
"url": "$as_url",
|
||||||
"namespaces": { "users": [ { "regex": "\@\\\\+.*", "exclusive": false } ] }
|
"namespaces": { "users": ["\@\\\\+.*"] }
|
||||||
}
|
}
|
||||||
EOT
|
EOT
|
||||||
)->then( sub{
|
),
|
||||||
my ($response) = (@_);
|
|
||||||
warn $response->as_string if ($response->code != 200);
|
|
||||||
return Future->done;
|
|
||||||
}),
|
|
||||||
$verto_connecting,
|
$verto_connecting,
|
||||||
)->get;
|
)->get;
|
||||||
|
|
||||||
|
|||||||
@@ -7,9 +7,6 @@ matrix:
|
|||||||
matrix-bot:
|
matrix-bot:
|
||||||
user_id: '@vertobot:matrix.org'
|
user_id: '@vertobot:matrix.org'
|
||||||
password: ''
|
password: ''
|
||||||
domain: 'matrix.org"
|
|
||||||
as_url: 'http://localhost:8009'
|
|
||||||
as_token: 'vertobot123'
|
|
||||||
|
|
||||||
verto-bot:
|
verto-bot:
|
||||||
host: webrtc.freeswitch.org
|
host: webrtc.freeswitch.org
|
||||||
|
|||||||
@@ -11,4 +11,7 @@ requires 'YAML', 0;
|
|||||||
requires 'JSON', 0;
|
requires 'JSON', 0;
|
||||||
requires 'Getopt::Long', 0;
|
requires 'Getopt::Long', 0;
|
||||||
|
|
||||||
|
on 'test' => sub {
|
||||||
|
requires 'Test::More', '>= 0.98';
|
||||||
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -11,9 +11,7 @@ if [ -f $PID_FILE ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for port in 8080 8081 8082; do
|
find "$DIR" -name "*.log" -delete
|
||||||
rm -rf $DIR/$port
|
find "$DIR" -name "*.db" -delete
|
||||||
rm -rf $DIR/media_store.$port
|
|
||||||
done
|
|
||||||
|
|
||||||
rm -rf $DIR/etc
|
rm -rf $DIR/etc
|
||||||
|
|||||||
@@ -8,49 +8,37 @@ cd "$DIR/.."
|
|||||||
|
|
||||||
mkdir -p demo/etc
|
mkdir -p demo/etc
|
||||||
|
|
||||||
export PYTHONPATH=$(readlink -f $(pwd))
|
# Check the --no-rate-limit param
|
||||||
|
PARAMS=""
|
||||||
|
if [ $# -eq 1 ]; then
|
||||||
echo $PYTHONPATH
|
if [ $1 = "--no-rate-limit" ]; then
|
||||||
|
PARAMS="--rc-messages-per-second 1000 --rc-message-burst-count 1000"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
for port in 8080 8081 8082; do
|
for port in 8080 8081 8082; do
|
||||||
echo "Starting server on port $port... "
|
echo "Starting server on port $port... "
|
||||||
|
|
||||||
https_port=$((port + 400))
|
https_port=$((port + 400))
|
||||||
mkdir -p demo/$port
|
|
||||||
pushd demo/$port
|
|
||||||
|
|
||||||
#rm $DIR/etc/$port.config
|
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--generate-config \
|
--generate-config \
|
||||||
|
--config-path "demo/etc/$port.config" \
|
||||||
|
-p "$https_port" \
|
||||||
|
--unsecure-port "$port" \
|
||||||
-H "localhost:$https_port" \
|
-H "localhost:$https_port" \
|
||||||
--config-path "$DIR/etc/$port.config" \
|
-f "$DIR/$port.log" \
|
||||||
--report-stats no
|
-d "$DIR/$port.db" \
|
||||||
|
-D --pid-file "$DIR/$port.pid" \
|
||||||
# Check script parameters
|
--manhole $((port + 1000)) \
|
||||||
if [ $# -eq 1 ]; then
|
--tls-dh-params-path "demo/demo.tls.dh" \
|
||||||
if [ $1 = "--no-rate-limit" ]; then
|
--media-store-path "demo/media_store.$port" \
|
||||||
# Set high limits in config file to disable rate limiting
|
$PARAMS $SYNAPSE_PARAMS \
|
||||||
perl -p -i -e 's/rc_messages_per_second.*/rc_messages_per_second: 1000/g' $DIR/etc/$port.config
|
|
||||||
perl -p -i -e 's/rc_message_burst_count.*/rc_message_burst_count: 1000/g' $DIR/etc/$port.config
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
perl -p -i -e 's/^enable_registration:.*/enable_registration: true/g' $DIR/etc/$port.config
|
|
||||||
|
|
||||||
if ! grep -F "full_twisted_stacktraces" -q $DIR/etc/$port.config; then
|
|
||||||
echo "full_twisted_stacktraces: true" >> $DIR/etc/$port.config
|
|
||||||
fi
|
|
||||||
if ! grep -F "report_stats" -q $DIR/etc/$port.config ; then
|
|
||||||
echo "report_stats: false" >> $DIR/etc/$port.config
|
|
||||||
fi
|
|
||||||
|
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--config-path "$DIR/etc/$port.config" \
|
--config-path "demo/etc/$port.config" \
|
||||||
-D \
|
|
||||||
-vv \
|
-vv \
|
||||||
|
|
||||||
popd
|
|
||||||
done
|
done
|
||||||
|
|
||||||
cd "$CWD"
|
cd "$CWD"
|
||||||
|
|||||||
@@ -1,63 +0,0 @@
|
|||||||
ARG PYTHON_VERSION=2
|
|
||||||
|
|
||||||
###
|
|
||||||
### Stage 0: builder
|
|
||||||
###
|
|
||||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder
|
|
||||||
|
|
||||||
# install the OS build deps
|
|
||||||
|
|
||||||
RUN apk add \
|
|
||||||
build-base \
|
|
||||||
libffi-dev \
|
|
||||||
libjpeg-turbo-dev \
|
|
||||||
libressl-dev \
|
|
||||||
libxslt-dev \
|
|
||||||
linux-headers \
|
|
||||||
postgresql-dev \
|
|
||||||
zlib-dev
|
|
||||||
|
|
||||||
# build things which have slow build steps, before we copy synapse, so that
|
|
||||||
# the layer can be cached.
|
|
||||||
#
|
|
||||||
# (we really just care about caching a wheel here, as the "pip install" below
|
|
||||||
# will install them again.)
|
|
||||||
|
|
||||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
|
||||||
cryptography \
|
|
||||||
msgpack-python \
|
|
||||||
pillow \
|
|
||||||
pynacl
|
|
||||||
|
|
||||||
# now install synapse and all of the python deps to /install.
|
|
||||||
|
|
||||||
COPY . /synapse
|
|
||||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
|
||||||
lxml \
|
|
||||||
psycopg2 \
|
|
||||||
/synapse
|
|
||||||
|
|
||||||
###
|
|
||||||
### Stage 1: runtime
|
|
||||||
###
|
|
||||||
|
|
||||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
|
|
||||||
|
|
||||||
RUN apk add --no-cache --virtual .runtime_deps \
|
|
||||||
libffi \
|
|
||||||
libjpeg-turbo \
|
|
||||||
libressl \
|
|
||||||
libxslt \
|
|
||||||
libpq \
|
|
||||||
zlib \
|
|
||||||
su-exec
|
|
||||||
|
|
||||||
COPY --from=builder /install /usr/local
|
|
||||||
COPY ./docker/start.py /start.py
|
|
||||||
COPY ./docker/conf /conf
|
|
||||||
|
|
||||||
VOLUME ["/data"]
|
|
||||||
|
|
||||||
EXPOSE 8008/tcp 8448/tcp
|
|
||||||
|
|
||||||
ENTRYPOINT ["/start.py"]
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
# Use the Sytest image that comes with a lot of the build dependencies
|
|
||||||
# pre-installed
|
|
||||||
FROM matrixdotorg/sytest:latest
|
|
||||||
|
|
||||||
# The Sytest image doesn't come with python, so install that
|
|
||||||
RUN apt-get -qq install -y python python-dev python-pip
|
|
||||||
|
|
||||||
# We need tox to run the tests in run_pg_tests.sh
|
|
||||||
RUN pip install tox
|
|
||||||
|
|
||||||
ADD run_pg_tests.sh /pg_tests.sh
|
|
||||||
ENTRYPOINT /pg_tests.sh
|
|
||||||
125
docker/README.md
125
docker/README.md
@@ -1,125 +0,0 @@
|
|||||||
# Synapse Docker
|
|
||||||
|
|
||||||
This Docker image will run Synapse as a single process. It does not provide a database
|
|
||||||
server or a TURN server, you should run these separately.
|
|
||||||
|
|
||||||
## Run
|
|
||||||
|
|
||||||
We do not currently offer a `latest` image, as this has somewhat undefined semantics.
|
|
||||||
We instead release only tagged versions so upgrading between releases is entirely
|
|
||||||
within your control.
|
|
||||||
|
|
||||||
### Using docker-compose (easier)
|
|
||||||
|
|
||||||
This image is designed to run either with an automatically generated configuration
|
|
||||||
file or with a custom configuration that requires manual editing.
|
|
||||||
|
|
||||||
An easy way to make use of this image is via docker-compose. See the
|
|
||||||
[contrib/docker](../contrib/docker)
|
|
||||||
section of the synapse project for examples.
|
|
||||||
|
|
||||||
### Without Compose (harder)
|
|
||||||
|
|
||||||
If you do not wish to use Compose, you may still run this image using plain
|
|
||||||
Docker commands. Note that the following is just a guideline and you may need
|
|
||||||
to add parameters to the docker run command to account for the network situation
|
|
||||||
with your postgres database.
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run \
|
|
||||||
-d \
|
|
||||||
--name synapse \
|
|
||||||
-v ${DATA_PATH}:/data \
|
|
||||||
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
|
||||||
-e SYNAPSE_REPORT_STATS=yes \
|
|
||||||
docker.io/matrixdotorg/synapse:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
## Volumes
|
|
||||||
|
|
||||||
The image expects a single volume, located at ``/data``, that will hold:
|
|
||||||
|
|
||||||
* temporary files during uploads;
|
|
||||||
* uploaded media and thumbnails;
|
|
||||||
* the SQLite database if you do not configure postgres;
|
|
||||||
* the appservices configuration.
|
|
||||||
|
|
||||||
You are free to use separate volumes depending on storage endpoints at your
|
|
||||||
disposal. For instance, ``/data/media`` coud be stored on a large but low
|
|
||||||
performance hdd storage while other files could be stored on high performance
|
|
||||||
endpoints.
|
|
||||||
|
|
||||||
In order to setup an application service, simply create an ``appservices``
|
|
||||||
directory in the data volume and write the application service Yaml
|
|
||||||
configuration file there. Multiple application services are supported.
|
|
||||||
|
|
||||||
## Environment
|
|
||||||
|
|
||||||
Unless you specify a custom path for the configuration file, a very generic
|
|
||||||
file will be generated, based on the following environment settings.
|
|
||||||
These are a good starting point for setting up your own deployment.
|
|
||||||
|
|
||||||
Global settings:
|
|
||||||
|
|
||||||
* ``UID``, the user id Synapse will run as [default 991]
|
|
||||||
* ``GID``, the group id Synapse will run as [default 991]
|
|
||||||
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
|
|
||||||
|
|
||||||
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
|
|
||||||
then customize it manually. No other environment variable is required.
|
|
||||||
|
|
||||||
Otherwise, a dynamic configuration file will be used. The following environment
|
|
||||||
variables are available for configuration:
|
|
||||||
|
|
||||||
* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
|
|
||||||
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
|
|
||||||
statistics reporting back to the Matrix project which helps us to get funding.
|
|
||||||
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
|
|
||||||
you run your own TLS-capable reverse proxy).
|
|
||||||
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
|
|
||||||
the Synapse instance.
|
|
||||||
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
|
|
||||||
* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
|
|
||||||
* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
|
|
||||||
* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
|
|
||||||
key in order to enable recaptcha upon registration.
|
|
||||||
* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
|
|
||||||
key in order to enable recaptcha upon registration.
|
|
||||||
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
|
|
||||||
uris to enable TURN for this homeserver.
|
|
||||||
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
|
|
||||||
* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size [default `10M`].
|
|
||||||
|
|
||||||
Shared secrets, that will be initialized to random values if not set:
|
|
||||||
|
|
||||||
* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
|
|
||||||
registration is disable.
|
|
||||||
* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
|
|
||||||
to the server.
|
|
||||||
|
|
||||||
Database specific values (will use SQLite if not set):
|
|
||||||
|
|
||||||
* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
|
|
||||||
* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
|
|
||||||
* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
|
|
||||||
* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
|
|
||||||
|
|
||||||
Mail server specific values (will not send emails if not set):
|
|
||||||
|
|
||||||
* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
|
|
||||||
* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
|
|
||||||
* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
|
|
||||||
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
|
|
||||||
|
|
||||||
## Build
|
|
||||||
|
|
||||||
Build the docker image with the `docker build` command from the root of the synapse repository.
|
|
||||||
|
|
||||||
```
|
|
||||||
docker build -t docker.io/matrixdotorg/synapse . -f docker/Dockerfile
|
|
||||||
```
|
|
||||||
|
|
||||||
The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
|
|
||||||
|
|
||||||
You may have a local Python wheel cache available, in which case copy the relevant
|
|
||||||
packages in the ``cache/`` directory at the root of the project.
|
|
||||||
@@ -1,219 +0,0 @@
|
|||||||
# vim:ft=yaml
|
|
||||||
|
|
||||||
## TLS ##
|
|
||||||
|
|
||||||
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
|
||||||
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
|
||||||
tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
|
|
||||||
no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
|
|
||||||
tls_fingerprints: []
|
|
||||||
|
|
||||||
## Server ##
|
|
||||||
|
|
||||||
server_name: "{{ SYNAPSE_SERVER_NAME }}"
|
|
||||||
pid_file: /homeserver.pid
|
|
||||||
web_client: False
|
|
||||||
soft_file_limit: 0
|
|
||||||
|
|
||||||
## Ports ##
|
|
||||||
|
|
||||||
listeners:
|
|
||||||
{% if not SYNAPSE_NO_TLS %}
|
|
||||||
-
|
|
||||||
port: 8448
|
|
||||||
bind_addresses: ['0.0.0.0']
|
|
||||||
type: http
|
|
||||||
tls: true
|
|
||||||
x_forwarded: false
|
|
||||||
resources:
|
|
||||||
- names: [client]
|
|
||||||
compress: true
|
|
||||||
- names: [federation] # Federation APIs
|
|
||||||
compress: false
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
- port: 8008
|
|
||||||
tls: false
|
|
||||||
bind_addresses: ['0.0.0.0']
|
|
||||||
type: http
|
|
||||||
x_forwarded: false
|
|
||||||
|
|
||||||
resources:
|
|
||||||
- names: [client]
|
|
||||||
compress: true
|
|
||||||
- names: [federation]
|
|
||||||
compress: false
|
|
||||||
|
|
||||||
## Database ##
|
|
||||||
|
|
||||||
{% if POSTGRES_PASSWORD %}
|
|
||||||
database:
|
|
||||||
name: "psycopg2"
|
|
||||||
args:
|
|
||||||
user: "{{ POSTGRES_USER or "synapse" }}"
|
|
||||||
password: "{{ POSTGRES_PASSWORD }}"
|
|
||||||
database: "{{ POSTGRES_DB or "synapse" }}"
|
|
||||||
host: "{{ POSTGRES_HOST or "db" }}"
|
|
||||||
port: "{{ POSTGRES_PORT or "5432" }}"
|
|
||||||
cp_min: 5
|
|
||||||
cp_max: 10
|
|
||||||
{% else %}
|
|
||||||
database:
|
|
||||||
name: "sqlite3"
|
|
||||||
args:
|
|
||||||
database: "/data/homeserver.db"
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
## Performance ##
|
|
||||||
|
|
||||||
event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}"
|
|
||||||
verbose: 0
|
|
||||||
log_file: "/data/homeserver.log"
|
|
||||||
log_config: "/compiled/log.config"
|
|
||||||
|
|
||||||
## Ratelimiting ##
|
|
||||||
|
|
||||||
rc_messages_per_second: 0.2
|
|
||||||
rc_message_burst_count: 10.0
|
|
||||||
federation_rc_window_size: 1000
|
|
||||||
federation_rc_sleep_limit: 10
|
|
||||||
federation_rc_sleep_delay: 500
|
|
||||||
federation_rc_reject_limit: 50
|
|
||||||
federation_rc_concurrent: 3
|
|
||||||
|
|
||||||
## Files ##
|
|
||||||
|
|
||||||
media_store_path: "/data/media"
|
|
||||||
uploads_path: "/data/uploads"
|
|
||||||
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}"
|
|
||||||
max_image_pixels: "32M"
|
|
||||||
dynamic_thumbnails: false
|
|
||||||
|
|
||||||
# List of thumbnail to precalculate when an image is uploaded.
|
|
||||||
thumbnail_sizes:
|
|
||||||
- width: 32
|
|
||||||
height: 32
|
|
||||||
method: crop
|
|
||||||
- width: 96
|
|
||||||
height: 96
|
|
||||||
method: crop
|
|
||||||
- width: 320
|
|
||||||
height: 240
|
|
||||||
method: scale
|
|
||||||
- width: 640
|
|
||||||
height: 480
|
|
||||||
method: scale
|
|
||||||
- width: 800
|
|
||||||
height: 600
|
|
||||||
method: scale
|
|
||||||
|
|
||||||
url_preview_enabled: False
|
|
||||||
max_spider_size: "10M"
|
|
||||||
|
|
||||||
## Captcha ##
|
|
||||||
|
|
||||||
{% if SYNAPSE_RECAPTCHA_PUBLIC_KEY %}
|
|
||||||
recaptcha_public_key: "{{ SYNAPSE_RECAPTCHA_PUBLIC_KEY }}"
|
|
||||||
recaptcha_private_key: "{{ SYNAPSE_RECAPTCHA_PRIVATE_KEY }}"
|
|
||||||
enable_registration_captcha: True
|
|
||||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
|
||||||
{% else %}
|
|
||||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
|
||||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
|
||||||
enable_registration_captcha: False
|
|
||||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
## Turn ##
|
|
||||||
|
|
||||||
{% if SYNAPSE_TURN_URIS %}
|
|
||||||
turn_uris:
|
|
||||||
{% for uri in SYNAPSE_TURN_URIS.split(',') %} - "{{ uri }}"
|
|
||||||
{% endfor %}
|
|
||||||
turn_shared_secret: "{{ SYNAPSE_TURN_SECRET }}"
|
|
||||||
turn_user_lifetime: "1h"
|
|
||||||
turn_allow_guests: True
|
|
||||||
{% else %}
|
|
||||||
turn_uris: []
|
|
||||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
|
||||||
turn_user_lifetime: "1h"
|
|
||||||
turn_allow_guests: True
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
## Registration ##
|
|
||||||
|
|
||||||
enable_registration: {{ "True" if SYNAPSE_ENABLE_REGISTRATION else "False" }}
|
|
||||||
registration_shared_secret: "{{ SYNAPSE_REGISTRATION_SHARED_SECRET }}"
|
|
||||||
bcrypt_rounds: 12
|
|
||||||
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
|
|
||||||
enable_group_creation: true
|
|
||||||
|
|
||||||
# The list of identity servers trusted to verify third party
|
|
||||||
# identifiers by this server.
|
|
||||||
trusted_third_party_id_servers:
|
|
||||||
- matrix.org
|
|
||||||
- vector.im
|
|
||||||
- riot.im
|
|
||||||
|
|
||||||
## Metrics ###
|
|
||||||
|
|
||||||
{% if SYNAPSE_REPORT_STATS.lower() == "yes" %}
|
|
||||||
enable_metrics: True
|
|
||||||
report_stats: True
|
|
||||||
{% else %}
|
|
||||||
enable_metrics: False
|
|
||||||
report_stats: False
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
## API Configuration ##
|
|
||||||
|
|
||||||
room_invite_state_types:
|
|
||||||
- "m.room.join_rules"
|
|
||||||
- "m.room.canonical_alias"
|
|
||||||
- "m.room.avatar"
|
|
||||||
- "m.room.name"
|
|
||||||
|
|
||||||
{% if SYNAPSE_APPSERVICES %}
|
|
||||||
app_service_config_files:
|
|
||||||
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
|
|
||||||
{% endfor %}
|
|
||||||
{% else %}
|
|
||||||
app_service_config_files: []
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
|
||||||
expire_access_token: False
|
|
||||||
|
|
||||||
## Signing Keys ##
|
|
||||||
|
|
||||||
signing_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.signing.key"
|
|
||||||
old_signing_keys: {}
|
|
||||||
key_refresh_interval: "1d" # 1 Day.
|
|
||||||
|
|
||||||
# The trusted servers to download signing keys from.
|
|
||||||
perspectives:
|
|
||||||
servers:
|
|
||||||
"matrix.org":
|
|
||||||
verify_keys:
|
|
||||||
"ed25519:auto":
|
|
||||||
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
|
||||||
|
|
||||||
password_config:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
{% if SYNAPSE_SMTP_HOST %}
|
|
||||||
email:
|
|
||||||
enable_notifs: false
|
|
||||||
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
|
|
||||||
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
|
|
||||||
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
|
|
||||||
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
|
|
||||||
require_transport_security: False
|
|
||||||
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
|
||||||
app_name: Matrix
|
|
||||||
template_dir: res/templates
|
|
||||||
notif_template_html: notif_mail.html
|
|
||||||
notif_template_text: notif_mail.txt
|
|
||||||
notif_for_new_users: True
|
|
||||||
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
version: 1
|
|
||||||
|
|
||||||
formatters:
|
|
||||||
precise:
|
|
||||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
|
||||||
|
|
||||||
filters:
|
|
||||||
context:
|
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
|
||||||
request: ""
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
console:
|
|
||||||
class: logging.StreamHandler
|
|
||||||
formatter: precise
|
|
||||||
filters: [context]
|
|
||||||
|
|
||||||
loggers:
|
|
||||||
synapse:
|
|
||||||
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
|
||||||
|
|
||||||
synapse.storage.SQL:
|
|
||||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
|
||||||
# information such as access tokens.
|
|
||||||
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
|
||||||
|
|
||||||
root:
|
|
||||||
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
|
||||||
handlers: [console]
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This script runs the PostgreSQL tests inside a Docker container. It expects
|
|
||||||
# the relevant source files to be mounted into /src (done automatically by the
|
|
||||||
# caller script). It will set up the database, run it, and then use the tox
|
|
||||||
# configuration to run the tests.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Set PGUSER so Synapse's tests know what user to connect to the database with
|
|
||||||
export PGUSER=postgres
|
|
||||||
|
|
||||||
# Initialise & start the database
|
|
||||||
su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres
|
|
||||||
su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
|
||||||
|
|
||||||
# Run the tests
|
|
||||||
cd /src
|
|
||||||
export TRIAL_FLAGS="-j 4"
|
|
||||||
tox --workdir=/tmp -e py27-postgres
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
#!/usr/local/bin/python
|
|
||||||
|
|
||||||
import jinja2
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import subprocess
|
|
||||||
import glob
|
|
||||||
import codecs
|
|
||||||
|
|
||||||
# Utility functions
|
|
||||||
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
|
|
||||||
|
|
||||||
def check_arguments(environ, args):
|
|
||||||
for argument in args:
|
|
||||||
if argument not in environ:
|
|
||||||
print("Environment variable %s is mandatory, exiting." % argument)
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
def generate_secrets(environ, secrets):
|
|
||||||
for name, secret in secrets.items():
|
|
||||||
if secret not in environ:
|
|
||||||
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
|
|
||||||
if os.path.exists(filename):
|
|
||||||
with open(filename) as handle: value = handle.read()
|
|
||||||
else:
|
|
||||||
print("Generating a random secret for {}".format(name))
|
|
||||||
value = codecs.encode(os.urandom(32), "hex").decode()
|
|
||||||
with open(filename, "w") as handle: handle.write(value)
|
|
||||||
environ[secret] = value
|
|
||||||
|
|
||||||
# Prepare the configuration
|
|
||||||
mode = sys.argv[1] if len(sys.argv) > 1 else None
|
|
||||||
environ = os.environ.copy()
|
|
||||||
ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991))
|
|
||||||
args = ["python", "-m", "synapse.app.homeserver"]
|
|
||||||
|
|
||||||
# In generate mode, generate a configuration, missing keys, then exit
|
|
||||||
if mode == "generate":
|
|
||||||
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_CONFIG_PATH"))
|
|
||||||
args += [
|
|
||||||
"--server-name", environ["SYNAPSE_SERVER_NAME"],
|
|
||||||
"--report-stats", environ["SYNAPSE_REPORT_STATS"],
|
|
||||||
"--config-path", environ["SYNAPSE_CONFIG_PATH"],
|
|
||||||
"--generate-config"
|
|
||||||
]
|
|
||||||
os.execv("/usr/local/bin/python", args)
|
|
||||||
|
|
||||||
# In normal mode, generate missing keys if any, then run synapse
|
|
||||||
else:
|
|
||||||
# Parse the configuration file
|
|
||||||
if "SYNAPSE_CONFIG_PATH" in environ:
|
|
||||||
args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]]
|
|
||||||
else:
|
|
||||||
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"))
|
|
||||||
generate_secrets(environ, {
|
|
||||||
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
|
|
||||||
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY"
|
|
||||||
})
|
|
||||||
environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
|
|
||||||
if not os.path.exists("/compiled"): os.mkdir("/compiled")
|
|
||||||
convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ)
|
|
||||||
convert("/conf/log.config", "/compiled/log.config", environ)
|
|
||||||
subprocess.check_output(["chown", "-R", ownership, "/data"])
|
|
||||||
args += ["--config-path", "/compiled/homeserver.yaml"]
|
|
||||||
# Generate missing keys and start synapse
|
|
||||||
subprocess.check_output(args + ["--generate-keys"])
|
|
||||||
os.execv("/sbin/su-exec", ["su-exec", ownership] + args)
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
Captcha can be enabled for this home server. This file explains how to do that.
|
|
||||||
The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
|
|
||||||
|
|
||||||
Getting keys
|
|
||||||
------------
|
|
||||||
Requires a public/private key pair from:
|
|
||||||
|
|
||||||
https://developers.google.com/recaptcha/
|
|
||||||
|
|
||||||
|
|
||||||
Setting ReCaptcha Keys
|
|
||||||
----------------------
|
|
||||||
The keys are a config option on the home server config. If they are not
|
|
||||||
visible, you can generate them via --generate-config. Set the following value::
|
|
||||||
|
|
||||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
|
||||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
|
||||||
|
|
||||||
In addition, you MUST enable captchas via::
|
|
||||||
|
|
||||||
enable_registration_captcha: true
|
|
||||||
|
|
||||||
Configuring IP used for auth
|
|
||||||
----------------------------
|
|
||||||
The ReCaptcha API requires that the IP address of the user who solved the
|
|
||||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
|
||||||
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
|
||||||
IP address. This can be configured using the x_forwarded directive in the
|
|
||||||
listeners section of the homeserver.yaml configuration file.
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
Admin APIs
|
|
||||||
==========
|
|
||||||
|
|
||||||
This directory includes documentation for the various synapse specific admin
|
|
||||||
APIs available.
|
|
||||||
|
|
||||||
Only users that are server admins can use these APIs. A user can be marked as a
|
|
||||||
server admin by updating the database directly, e.g.:
|
|
||||||
|
|
||||||
``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
|
|
||||||
|
|
||||||
Restarting may be required for the changes to register.
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
# List all media in a room
|
|
||||||
|
|
||||||
This API gets a list of known media in a room.
|
|
||||||
|
|
||||||
The API is:
|
|
||||||
```
|
|
||||||
GET /_matrix/client/r0/admin/room/<room_id>/media
|
|
||||||
```
|
|
||||||
including an `access_token` of a server admin.
|
|
||||||
|
|
||||||
It returns a JSON body like the following:
|
|
||||||
```
|
|
||||||
{
|
|
||||||
"local": [
|
|
||||||
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
|
|
||||||
"mxc://localhost/abcdefghijklmnopqrstuvwx"
|
|
||||||
],
|
|
||||||
"remote": [
|
|
||||||
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
|
|
||||||
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
@@ -1,63 +0,0 @@
|
|||||||
Purge History API
|
|
||||||
=================
|
|
||||||
|
|
||||||
The purge history API allows server admins to purge historic events from their
|
|
||||||
database, reclaiming disk space.
|
|
||||||
|
|
||||||
Depending on the amount of history being purged a call to the API may take
|
|
||||||
several minutes or longer. During this period users will not be able to
|
|
||||||
paginate further back in the room from the point being purged from.
|
|
||||||
|
|
||||||
The API is:
|
|
||||||
|
|
||||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>[/<event_id>]``
|
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
|
||||||
|
|
||||||
By default, events sent by local users are not deleted, as they may represent
|
|
||||||
the only copies of this content in existence. (Events sent by remote users are
|
|
||||||
deleted.)
|
|
||||||
|
|
||||||
Room state data (such as joins, leaves, topic) is always preserved.
|
|
||||||
|
|
||||||
To delete local message events as well, set ``delete_local_events`` in the body:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"delete_local_events": true
|
|
||||||
}
|
|
||||||
|
|
||||||
The caller must specify the point in the room to purge up to. This can be
|
|
||||||
specified by including an event_id in the URI, or by setting a
|
|
||||||
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
|
|
||||||
id is given, that event (and others at the same graph depth) will be retained.
|
|
||||||
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
|
|
||||||
in milliseconds.
|
|
||||||
|
|
||||||
The API starts the purge running, and returns immediately with a JSON body with
|
|
||||||
a purge id:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"purge_id": "<opaque id>"
|
|
||||||
}
|
|
||||||
|
|
||||||
Purge status query
|
|
||||||
------------------
|
|
||||||
|
|
||||||
It is possible to poll for updates on recent purges with a second API;
|
|
||||||
|
|
||||||
``GET /_matrix/client/r0/admin/purge_history_status/<purge_id>``
|
|
||||||
|
|
||||||
(again, with a suitable ``access_token``). This API returns a JSON body like
|
|
||||||
the following:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"status": "active"
|
|
||||||
}
|
|
||||||
|
|
||||||
The status will be one of ``active``, ``complete``, or ``failed``.
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
Purge Remote Media API
|
|
||||||
======================
|
|
||||||
|
|
||||||
The purge remote media API allows server admins to purge old cached remote
|
|
||||||
media.
|
|
||||||
|
|
||||||
The API is::
|
|
||||||
|
|
||||||
POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
|
||||||
|
|
||||||
{}
|
|
||||||
|
|
||||||
Which will remove all cached media that was last accessed before
|
|
||||||
``<unix_timestamp_in_ms>``.
|
|
||||||
|
|
||||||
If the user re-requests purged remote media, synapse will re-request the media
|
|
||||||
from the originating server.
|
|
||||||
@@ -1,63 +0,0 @@
|
|||||||
Shared-Secret Registration
|
|
||||||
==========================
|
|
||||||
|
|
||||||
This API allows for the creation of users in an administrative and
|
|
||||||
non-interactive way. This is generally used for bootstrapping a Synapse
|
|
||||||
instance with administrator accounts.
|
|
||||||
|
|
||||||
To authenticate yourself to the server, you will need both the shared secret
|
|
||||||
(``registration_shared_secret`` in the homeserver configuration), and a
|
|
||||||
one-time nonce. If the registration shared secret is not configured, this API
|
|
||||||
is not enabled.
|
|
||||||
|
|
||||||
To fetch the nonce, you need to request one from the API::
|
|
||||||
|
|
||||||
> GET /_matrix/client/r0/admin/register
|
|
||||||
|
|
||||||
< {"nonce": "thisisanonce"}
|
|
||||||
|
|
||||||
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
|
|
||||||
body containing the nonce, username, password, whether they are an admin
|
|
||||||
(optional, False by default), and a HMAC digest of the content.
|
|
||||||
|
|
||||||
As an example::
|
|
||||||
|
|
||||||
> POST /_matrix/client/r0/admin/register
|
|
||||||
> {
|
|
||||||
"nonce": "thisisanonce",
|
|
||||||
"username": "pepper_roni",
|
|
||||||
"password": "pizza",
|
|
||||||
"admin": true,
|
|
||||||
"mac": "mac_digest_here"
|
|
||||||
}
|
|
||||||
|
|
||||||
< {
|
|
||||||
"access_token": "token_here",
|
|
||||||
"user_id": "@pepper_roni:localhost",
|
|
||||||
"home_server": "test",
|
|
||||||
"device_id": "device_id_here"
|
|
||||||
}
|
|
||||||
|
|
||||||
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
|
||||||
the shared secret and the content being the nonce, user, password, and either
|
|
||||||
the string "admin" or "notadmin", each separated by NULs. For an example of
|
|
||||||
generation in Python::
|
|
||||||
|
|
||||||
import hmac, hashlib
|
|
||||||
|
|
||||||
def generate_mac(nonce, user, password, admin=False):
|
|
||||||
|
|
||||||
mac = hmac.new(
|
|
||||||
key=shared_secret,
|
|
||||||
digestmod=hashlib.sha1,
|
|
||||||
)
|
|
||||||
|
|
||||||
mac.update(nonce.encode('utf8'))
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(user.encode('utf8'))
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(password.encode('utf8'))
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(b"admin" if admin else b"notadmin")
|
|
||||||
|
|
||||||
return mac.hexdigest()
|
|
||||||
@@ -1,86 +0,0 @@
|
|||||||
Query Account
|
|
||||||
=============
|
|
||||||
|
|
||||||
This API returns information about a specific user account.
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
GET /_matrix/client/r0/admin/whois/<user_id>
|
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
|
||||||
|
|
||||||
It returns a JSON body like the following:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"user_id": "<user_id>",
|
|
||||||
"devices": {
|
|
||||||
"": {
|
|
||||||
"sessions": [
|
|
||||||
{
|
|
||||||
"connections": [
|
|
||||||
{
|
|
||||||
"ip": "1.2.3.4",
|
|
||||||
"last_seen": 1417222374433,
|
|
||||||
"user_agent": "Mozilla/5.0 ..."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ip": "1.2.3.10",
|
|
||||||
"last_seen": 1417222374500,
|
|
||||||
"user_agent": "Dalvik/2.1.0 ..."
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
``last_seen`` is measured in milliseconds since the Unix epoch.
|
|
||||||
|
|
||||||
Deactivate Account
|
|
||||||
==================
|
|
||||||
|
|
||||||
This API deactivates an account. It removes active access tokens, resets the
|
|
||||||
password, and deletes third-party IDs (to prevent the user requesting a
|
|
||||||
password reset). It can also mark the user as GDPR-erased (stopping their data
|
|
||||||
from distributed further, and deleting it entirely if there are no other
|
|
||||||
references to it).
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
POST /_matrix/client/r0/admin/deactivate/<user_id>
|
|
||||||
|
|
||||||
with a body of:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"erase": true
|
|
||||||
}
|
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
|
||||||
|
|
||||||
The erase parameter is optional and defaults to 'false'.
|
|
||||||
An empty body may be passed for backwards compatibility.
|
|
||||||
|
|
||||||
|
|
||||||
Reset password
|
|
||||||
==============
|
|
||||||
|
|
||||||
Changes the password of another user.
|
|
||||||
|
|
||||||
The api is::
|
|
||||||
|
|
||||||
POST /_matrix/client/r0/admin/reset_password/<user_id>
|
|
||||||
|
|
||||||
with a body of:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"new_password": "<secret>"
|
|
||||||
}
|
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
Registering an Application Service
|
|
||||||
==================================
|
|
||||||
|
|
||||||
The registration of new application services depends on the homeserver used.
|
|
||||||
In synapse, you need to create a new configuration file for your AS and add it
|
|
||||||
to the list specified under the ``app_service_config_files`` config
|
|
||||||
option in your synapse config.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
app_service_config_files:
|
|
||||||
- /home/matrix/.synapse/<your-AS>.yaml
|
|
||||||
|
|
||||||
|
|
||||||
The format of the AS configuration file is as follows:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
url: <base url of AS>
|
|
||||||
as_token: <token AS will add to requests to HS>
|
|
||||||
hs_token: <token HS will add to requests to AS>
|
|
||||||
sender_localpart: <localpart of AS user>
|
|
||||||
namespaces:
|
|
||||||
users: # List of users we're interested in
|
|
||||||
- exclusive: <bool>
|
|
||||||
regex: <regex>
|
|
||||||
- ...
|
|
||||||
aliases: [] # List of aliases we're interested in
|
|
||||||
rooms: [] # List of room ids we're interested in
|
|
||||||
|
|
||||||
See the spec_ for further details on how application services work.
|
|
||||||
|
|
||||||
.. _spec: https://matrix.org/docs/spec/application_service/unstable.html
|
|
||||||
@@ -1,119 +1,49 @@
|
|||||||
- Everything should comply with PEP8. Code should pass
|
Basically, PEP8
|
||||||
``pep8 --max-line-length=100`` without any warnings.
|
|
||||||
|
|
||||||
- **Indenting**:
|
- NEVER tabs. 4 spaces to indent.
|
||||||
|
- Max line width: 79 chars (with flexibility to overflow by a "few chars" if
|
||||||
- NEVER tabs. 4 spaces to indent.
|
|
||||||
|
|
||||||
- follow PEP8; either hanging indent or multiline-visual indent depending
|
|
||||||
on the size and shape of the arguments and what makes more sense to the
|
|
||||||
author. In other words, both this::
|
|
||||||
|
|
||||||
print("I am a fish %s" % "moo")
|
|
||||||
|
|
||||||
and this::
|
|
||||||
|
|
||||||
print("I am a fish %s" %
|
|
||||||
"moo")
|
|
||||||
|
|
||||||
and this::
|
|
||||||
|
|
||||||
print(
|
|
||||||
"I am a fish %s" %
|
|
||||||
"moo",
|
|
||||||
)
|
|
||||||
|
|
||||||
...are valid, although given each one takes up 2x more vertical space than
|
|
||||||
the previous, it's up to the author's discretion as to which layout makes
|
|
||||||
most sense for their function invocation. (e.g. if they want to add
|
|
||||||
comments per-argument, or put expressions in the arguments, or group
|
|
||||||
related arguments together, or want to deliberately extend or preserve
|
|
||||||
vertical/horizontal space)
|
|
||||||
|
|
||||||
- **Line length**:
|
|
||||||
|
|
||||||
Max line length is 79 chars (with flexibility to overflow by a "few chars" if
|
|
||||||
the overflowing content is not semantically significant and avoids an
|
the overflowing content is not semantically significant and avoids an
|
||||||
explosion of vertical whitespace).
|
explosion of vertical whitespace).
|
||||||
|
- Use camel case for class and type names
|
||||||
Use parentheses instead of ``\`` for line continuation where ever possible
|
- Use underscores for functions and variables.
|
||||||
(which is pretty much everywhere).
|
- Use double quotes.
|
||||||
|
- Use parentheses instead of '\\' for line continuation where ever possible
|
||||||
- **Naming**:
|
(which is pretty much everywhere)
|
||||||
|
- There should be max a single new line between:
|
||||||
- Use camel case for class and type names
|
|
||||||
- Use underscores for functions and variables.
|
|
||||||
|
|
||||||
- Use double quotes ``"foo"`` rather than single quotes ``'foo'``.
|
|
||||||
|
|
||||||
- **Blank lines**:
|
|
||||||
|
|
||||||
- There should be max a single new line between:
|
|
||||||
|
|
||||||
- statements
|
- statements
|
||||||
- functions in a class
|
- functions in a class
|
||||||
|
- There should be two new lines between:
|
||||||
- There should be two new lines between:
|
|
||||||
|
|
||||||
- definitions in a module (e.g., between different classes)
|
- definitions in a module (e.g., between different classes)
|
||||||
|
- There should be spaces where spaces should be and not where there shouldn't be:
|
||||||
|
- a single space after a comma
|
||||||
|
- a single space before and after for '=' when used as assignment
|
||||||
|
- no spaces before and after for '=' for default values and keyword arguments.
|
||||||
|
- Indenting must follow PEP8; either hanging indent or multiline-visual indent
|
||||||
|
depending on the size and shape of the arguments and what makes more sense to
|
||||||
|
the author. In other words, both this::
|
||||||
|
|
||||||
- **Whitespace**:
|
print("I am a fish %s" % "moo")
|
||||||
|
|
||||||
There should be spaces where spaces should be and not where there shouldn't
|
and this::
|
||||||
be:
|
|
||||||
|
|
||||||
- a single space after a comma
|
print("I am a fish %s" %
|
||||||
- a single space before and after for '=' when used as assignment
|
"moo")
|
||||||
- no spaces before and after for '=' for default values and keyword arguments.
|
|
||||||
|
|
||||||
- **Comments**: should follow the `google code style
|
and this::
|
||||||
<http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
|
||||||
This is so that we can generate documentation with `sphinx
|
|
||||||
<http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
|
||||||
`examples
|
|
||||||
<http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
|
||||||
in the sphinx documentation.
|
|
||||||
|
|
||||||
- **Imports**:
|
print(
|
||||||
|
"I am a fish %s" %
|
||||||
|
"moo"
|
||||||
|
)
|
||||||
|
|
||||||
- Prefer to import classes and functions than packages or modules.
|
...are valid, although given each one takes up 2x more vertical space than
|
||||||
|
the previous, it's up to the author's discretion as to which layout makes most
|
||||||
|
sense for their function invocation. (e.g. if they want to add comments
|
||||||
|
per-argument, or put expressions in the arguments, or group related arguments
|
||||||
|
together, or want to deliberately extend or preserve vertical/horizontal
|
||||||
|
space)
|
||||||
|
|
||||||
Example::
|
Comments should follow the google code style. This is so that we can generate
|
||||||
|
documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/)
|
||||||
|
|
||||||
from synapse.types import UserID
|
Code should pass pep8 --max-line-length=100 without any warnings.
|
||||||
...
|
|
||||||
user_id = UserID(local, server)
|
|
||||||
|
|
||||||
is preferred over::
|
|
||||||
|
|
||||||
from synapse import types
|
|
||||||
...
|
|
||||||
user_id = types.UserID(local, server)
|
|
||||||
|
|
||||||
(or any other variant).
|
|
||||||
|
|
||||||
This goes against the advice in the Google style guide, but it means that
|
|
||||||
errors in the name are caught early (at import time).
|
|
||||||
|
|
||||||
- Multiple imports from the same package can be combined onto one line::
|
|
||||||
|
|
||||||
from synapse.types import GroupID, RoomID, UserID
|
|
||||||
|
|
||||||
An effort should be made to keep the individual imports in alphabetical
|
|
||||||
order.
|
|
||||||
|
|
||||||
If the list becomes long, wrap it with parentheses and split it over
|
|
||||||
multiple lines.
|
|
||||||
|
|
||||||
- As per `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_,
|
|
||||||
imports should be grouped in the following order, with a blank line between
|
|
||||||
each group:
|
|
||||||
|
|
||||||
1. standard library imports
|
|
||||||
2. related third party imports
|
|
||||||
3. local application/library specific imports
|
|
||||||
|
|
||||||
- Imports within each group should be sorted alphabetically by module name.
|
|
||||||
|
|
||||||
- Avoid wildcard imports (``from synapse.types import *``) and relative
|
|
||||||
imports (``from .types import UserID``).
|
|
||||||
|
|||||||
@@ -1,160 +0,0 @@
|
|||||||
Support in Synapse for tracking agreement to server terms and conditions
|
|
||||||
========================================================================
|
|
||||||
|
|
||||||
Synapse 0.30 introduces support for tracking whether users have agreed to the
|
|
||||||
terms and conditions set by the administrator of a server - and blocking access
|
|
||||||
to the server until they have.
|
|
||||||
|
|
||||||
There are several parts to this functionality; each requires some specific
|
|
||||||
configuration in `homeserver.yaml` to be enabled.
|
|
||||||
|
|
||||||
Note that various parts of the configuation and this document refer to the
|
|
||||||
"privacy policy": agreement with a privacy policy is one particular use of this
|
|
||||||
feature, but of course adminstrators can specify other terms and conditions
|
|
||||||
unrelated to "privacy" per se.
|
|
||||||
|
|
||||||
Collecting policy agreement from a user
|
|
||||||
---------------------------------------
|
|
||||||
|
|
||||||
Synapse can be configured to serve the user a simple policy form with an
|
|
||||||
"accept" button. Clicking "Accept" records the user's acceptance in the
|
|
||||||
database and shows a success page.
|
|
||||||
|
|
||||||
To enable this, first create templates for the policy and success pages.
|
|
||||||
These should be stored on the local filesystem.
|
|
||||||
|
|
||||||
These templates use the [Jinja2](http://jinja.pocoo.org) templating language,
|
|
||||||
and [docs/privacy_policy_templates](privacy_policy_templates) gives
|
|
||||||
examples of the sort of thing that can be done.
|
|
||||||
|
|
||||||
Note that the templates must be stored under a name giving the language of the
|
|
||||||
template - currently this must always be `en` (for "English");
|
|
||||||
internationalisation support is intended for the future.
|
|
||||||
|
|
||||||
The template for the policy itself should be versioned and named according to
|
|
||||||
the version: for example `1.0.html`. The version of the policy which the user
|
|
||||||
has agreed to is stored in the database.
|
|
||||||
|
|
||||||
Once the templates are in place, make the following changes to `homeserver.yaml`:
|
|
||||||
|
|
||||||
1. Add a `user_consent` section, which should look like:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
user_consent:
|
|
||||||
template_dir: privacy_policy_templates
|
|
||||||
version: 1.0
|
|
||||||
```
|
|
||||||
|
|
||||||
`template_dir` points to the directory containing the policy
|
|
||||||
templates. `version` defines the version of the policy which will be served
|
|
||||||
to the user. In the example above, Synapse will serve
|
|
||||||
`privacy_policy_templates/en/1.0.html`.
|
|
||||||
|
|
||||||
|
|
||||||
2. Add a `form_secret` setting at the top level:
|
|
||||||
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
form_secret: "<unique secret>"
|
|
||||||
```
|
|
||||||
|
|
||||||
This should be set to an arbitrary secret string (try `pwgen -y 30` to
|
|
||||||
generate suitable secrets).
|
|
||||||
|
|
||||||
More on what this is used for below.
|
|
||||||
|
|
||||||
3. Add `consent` wherever the `client` resource is currently enabled in the
|
|
||||||
`listeners` configuration. For example:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
listeners:
|
|
||||||
- port: 8008
|
|
||||||
resources:
|
|
||||||
- names:
|
|
||||||
- client
|
|
||||||
- consent
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
Finally, ensure that `jinja2` is installed. If you are using a virtualenv, this
|
|
||||||
should be a matter of `pip install Jinja2`. On debian, try `apt-get install
|
|
||||||
python-jinja2`.
|
|
||||||
|
|
||||||
Once this is complete, and the server has been restarted, try visiting
|
|
||||||
`https://<server>/_matrix/consent`. If correctly configured, this should give
|
|
||||||
an error "Missing string query parameter 'u'". It is now possible to manually
|
|
||||||
construct URIs where users can give their consent.
|
|
||||||
|
|
||||||
### Constructing the consent URI
|
|
||||||
|
|
||||||
It may be useful to manually construct the "consent URI" for a given user - for
|
|
||||||
instance, in order to send them an email asking them to consent. To do this,
|
|
||||||
take the base `https://<server>/_matrix/consent` URL and add the following
|
|
||||||
query parameters:
|
|
||||||
|
|
||||||
* `u`: the user id of the user. This can either be a full MXID
|
|
||||||
(`@user:server.com`) or just the localpart (`user`).
|
|
||||||
|
|
||||||
* `h`: hex-encoded HMAC-SHA256 of `u` using the `form_secret` as a key. It is
|
|
||||||
possible to calculate this on the commandline with something like:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
echo -n '<user>' | openssl sha256 -hmac '<form_secret>'
|
|
||||||
```
|
|
||||||
|
|
||||||
This should result in a URI which looks something like:
|
|
||||||
`https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
|
|
||||||
|
|
||||||
|
|
||||||
Sending users a server notice asking them to agree to the policy
|
|
||||||
----------------------------------------------------------------
|
|
||||||
|
|
||||||
It is possible to configure Synapse to send a [server
|
|
||||||
notice](server_notices.md) to anybody who has not yet agreed to the current
|
|
||||||
version of the policy. To do so:
|
|
||||||
|
|
||||||
* ensure that the consent resource is configured, as in the previous section
|
|
||||||
|
|
||||||
* ensure that server notices are configured, as in [server_notices.md](server_notices.md).
|
|
||||||
|
|
||||||
* Add `server_notice_content` under `user_consent` in `homeserver.yaml`. For
|
|
||||||
example:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
user_consent:
|
|
||||||
server_notice_content:
|
|
||||||
msgtype: m.text
|
|
||||||
body: >-
|
|
||||||
Please give your consent to the privacy policy at %(consent_uri)s.
|
|
||||||
```
|
|
||||||
|
|
||||||
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
|
|
||||||
consent uri for that user.
|
|
||||||
|
|
||||||
* ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
|
|
||||||
URI that clients use to connect to the server. (It is used to construct
|
|
||||||
`consent_uri` in the server notice.)
|
|
||||||
|
|
||||||
|
|
||||||
Blocking users from using the server until they agree to the policy
|
|
||||||
-------------------------------------------------------------------
|
|
||||||
|
|
||||||
Synapse can be configured to block any attempts to join rooms or send messages
|
|
||||||
until the user has given their agreement to the policy. (Joining the server
|
|
||||||
notices room is exempted from this).
|
|
||||||
|
|
||||||
To enable this, add `block_events_error` under `user_consent`. For example:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
user_consent:
|
|
||||||
block_events_error: >-
|
|
||||||
You can't send any messages until you consent to the privacy policy at
|
|
||||||
%(consent_uri)s.
|
|
||||||
```
|
|
||||||
|
|
||||||
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
|
|
||||||
consent uri for that user.
|
|
||||||
|
|
||||||
ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
|
|
||||||
URI that clients use to connect to the server. (It is used to construct
|
|
||||||
`consent_uri` in the error.)
|
|
||||||
@@ -1,442 +0,0 @@
|
|||||||
Log contexts
|
|
||||||
============
|
|
||||||
|
|
||||||
.. contents::
|
|
||||||
|
|
||||||
To help track the processing of individual requests, synapse uses a
|
|
||||||
'log context' to track which request it is handling at any given moment. This
|
|
||||||
is done via a thread-local variable; a ``logging.Filter`` is then used to fish
|
|
||||||
the information back out of the thread-local variable and add it to each log
|
|
||||||
record.
|
|
||||||
|
|
||||||
Logcontexts are also used for CPU and database accounting, so that we can track
|
|
||||||
which requests were responsible for high CPU use or database activity.
|
|
||||||
|
|
||||||
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
|
||||||
current log context (as well as providing the ``LoggingContextFilter`` class).
|
|
||||||
|
|
||||||
Deferreds make the whole thing complicated, so this document describes how it
|
|
||||||
all works, and how to write code which follows the rules.
|
|
||||||
|
|
||||||
Logcontexts without Deferreds
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
In the absence of any Deferred voodoo, things are simple enough. As with any
|
|
||||||
code of this nature, the rule is that our function should leave things as it
|
|
||||||
found them:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
from synapse.util import logcontext # omitted from future snippets
|
|
||||||
|
|
||||||
def handle_request(request_id):
|
|
||||||
request_context = logcontext.LoggingContext()
|
|
||||||
|
|
||||||
calling_context = logcontext.LoggingContext.current_context()
|
|
||||||
logcontext.LoggingContext.set_current_context(request_context)
|
|
||||||
try:
|
|
||||||
request_context.request = request_id
|
|
||||||
do_request_handling()
|
|
||||||
logger.debug("finished")
|
|
||||||
finally:
|
|
||||||
logcontext.LoggingContext.set_current_context(calling_context)
|
|
||||||
|
|
||||||
def do_request_handling():
|
|
||||||
logger.debug("phew") # this will be logged against request_id
|
|
||||||
|
|
||||||
|
|
||||||
LoggingContext implements the context management methods, so the above can be
|
|
||||||
written much more succinctly as:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def handle_request(request_id):
|
|
||||||
with logcontext.LoggingContext() as request_context:
|
|
||||||
request_context.request = request_id
|
|
||||||
do_request_handling()
|
|
||||||
logger.debug("finished")
|
|
||||||
|
|
||||||
def do_request_handling():
|
|
||||||
logger.debug("phew")
|
|
||||||
|
|
||||||
|
|
||||||
Using logcontexts with Deferreds
|
|
||||||
--------------------------------
|
|
||||||
|
|
||||||
Deferreds — and in particular, ``defer.inlineCallbacks`` — break
|
|
||||||
the linear flow of code so that there is no longer a single entry point where
|
|
||||||
we should set the logcontext and a single exit point where we should remove it.
|
|
||||||
|
|
||||||
Consider the example above, where ``do_request_handling`` needs to do some
|
|
||||||
blocking operation, and returns a deferred:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def handle_request(request_id):
|
|
||||||
with logcontext.LoggingContext() as request_context:
|
|
||||||
request_context.request = request_id
|
|
||||||
yield do_request_handling()
|
|
||||||
logger.debug("finished")
|
|
||||||
|
|
||||||
|
|
||||||
In the above flow:
|
|
||||||
|
|
||||||
* The logcontext is set
|
|
||||||
* ``do_request_handling`` is called, and returns a deferred
|
|
||||||
* ``handle_request`` yields the deferred
|
|
||||||
* The ``inlineCallbacks`` wrapper of ``handle_request`` returns a deferred
|
|
||||||
|
|
||||||
So we have stopped processing the request (and will probably go on to start
|
|
||||||
processing the next), without clearing the logcontext.
|
|
||||||
|
|
||||||
To circumvent this problem, synapse code assumes that, wherever you have a
|
|
||||||
deferred, you will want to yield on it. To that end, whereever functions return
|
|
||||||
a deferred, we adopt the following conventions:
|
|
||||||
|
|
||||||
**Rules for functions returning deferreds:**
|
|
||||||
|
|
||||||
* If the deferred is already complete, the function returns with the same
|
|
||||||
logcontext it started with.
|
|
||||||
* If the deferred is incomplete, the function clears the logcontext before
|
|
||||||
returning; when the deferred completes, it restores the logcontext before
|
|
||||||
running any callbacks.
|
|
||||||
|
|
||||||
That sounds complicated, but actually it means a lot of code (including the
|
|
||||||
example above) "just works". There are two cases:
|
|
||||||
|
|
||||||
* If ``do_request_handling`` returns a completed deferred, then the logcontext
|
|
||||||
will still be in place. In this case, execution will continue immediately
|
|
||||||
after the ``yield``; the "finished" line will be logged against the right
|
|
||||||
context, and the ``with`` block restores the original context before we
|
|
||||||
return to the caller.
|
|
||||||
|
|
||||||
* If the returned deferred is incomplete, ``do_request_handling`` clears the
|
|
||||||
logcontext before returning. The logcontext is therefore clear when
|
|
||||||
``handle_request`` yields the deferred. At that point, the ``inlineCallbacks``
|
|
||||||
wrapper adds a callback to the deferred, and returns another (incomplete)
|
|
||||||
deferred to the caller, and it is safe to begin processing the next request.
|
|
||||||
|
|
||||||
Once ``do_request_handling``'s deferred completes, it will reinstate the
|
|
||||||
logcontext, before running the callback added by the ``inlineCallbacks``
|
|
||||||
wrapper. That callback runs the second half of ``handle_request``, so again
|
|
||||||
the "finished" line will be logged against the right
|
|
||||||
context, and the ``with`` block restores the original context.
|
|
||||||
|
|
||||||
As an aside, it's worth noting that ``handle_request`` follows our rules -
|
|
||||||
though that only matters if the caller has its own logcontext which it cares
|
|
||||||
about.
|
|
||||||
|
|
||||||
The following sections describe pitfalls and helpful patterns when implementing
|
|
||||||
these rules.
|
|
||||||
|
|
||||||
Always yield your deferreds
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
Whenever you get a deferred back from a function, you should ``yield`` on it
|
|
||||||
as soon as possible. (Returning it directly to your caller is ok too, if you're
|
|
||||||
not doing ``inlineCallbacks``.) Do not pass go; do not do any logging; do not
|
|
||||||
call any other functions.
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def fun():
|
|
||||||
logger.debug("starting")
|
|
||||||
yield do_some_stuff() # just like this
|
|
||||||
|
|
||||||
d = more_stuff()
|
|
||||||
result = yield d # also fine, of course
|
|
||||||
|
|
||||||
defer.returnValue(result)
|
|
||||||
|
|
||||||
def nonInlineCallbacksFun():
|
|
||||||
logger.debug("just a wrapper really")
|
|
||||||
return do_some_stuff() # this is ok too - the caller will yield on
|
|
||||||
# it anyway.
|
|
||||||
|
|
||||||
Provided this pattern is followed all the way back up to the callchain to where
|
|
||||||
the logcontext was set, this will make things work out ok: provided
|
|
||||||
``do_some_stuff`` and ``more_stuff`` follow the rules above, then so will
|
|
||||||
``fun`` (as wrapped by ``inlineCallbacks``) and ``nonInlineCallbacksFun``.
|
|
||||||
|
|
||||||
It's all too easy to forget to ``yield``: for instance if we forgot that
|
|
||||||
``do_some_stuff`` returned a deferred, we might plough on regardless. This
|
|
||||||
leads to a mess; it will probably work itself out eventually, but not before
|
|
||||||
a load of stuff has been logged against the wrong content. (Normally, other
|
|
||||||
things will break, more obviously, if you forget to ``yield``, so this tends
|
|
||||||
not to be a major problem in practice.)
|
|
||||||
|
|
||||||
Of course sometimes you need to do something a bit fancier with your Deferreds
|
|
||||||
- not all code follows the linear A-then-B-then-C pattern. Notes on
|
|
||||||
implementing more complex patterns are in later sections.
|
|
||||||
|
|
||||||
Where you create a new Deferred, make it follow the rules
|
|
||||||
---------------------------------------------------------
|
|
||||||
|
|
||||||
Most of the time, a Deferred comes from another synapse function. Sometimes,
|
|
||||||
though, we need to make up a new Deferred, or we get a Deferred back from
|
|
||||||
external code. We need to make it follow our rules.
|
|
||||||
|
|
||||||
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
|
||||||
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
|
||||||
which returns a deferred which will run its callbacks after a given number of
|
|
||||||
seconds. That might look like:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
# not a logcontext-rules-compliant function
|
|
||||||
def get_sleep_deferred(seconds):
|
|
||||||
d = defer.Deferred()
|
|
||||||
reactor.callLater(seconds, d.callback, None)
|
|
||||||
return d
|
|
||||||
|
|
||||||
That doesn't follow the rules, but we can fix it by wrapping it with
|
|
||||||
``PreserveLoggingContext`` and ``yield`` ing on it:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def sleep(seconds):
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
yield get_sleep_deferred(seconds)
|
|
||||||
|
|
||||||
This technique works equally for external functions which return deferreds,
|
|
||||||
or deferreds we have made ourselves.
|
|
||||||
|
|
||||||
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
|
||||||
boilerplate for you, so the above could be written:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def sleep(seconds):
|
|
||||||
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
|
||||||
|
|
||||||
|
|
||||||
Fire-and-forget
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Sometimes you want to fire off a chain of execution, but not wait for its
|
|
||||||
result. That might look a bit like this:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_request_handling():
|
|
||||||
yield foreground_operation()
|
|
||||||
|
|
||||||
# *don't* do this
|
|
||||||
background_operation()
|
|
||||||
|
|
||||||
logger.debug("Request handling complete")
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def background_operation():
|
|
||||||
yield first_background_step()
|
|
||||||
logger.debug("Completed first step")
|
|
||||||
yield second_background_step()
|
|
||||||
logger.debug("Completed second step")
|
|
||||||
|
|
||||||
The above code does a couple of steps in the background after
|
|
||||||
``do_request_handling`` has finished. The log lines are still logged against
|
|
||||||
the ``request_context`` logcontext, which may or may not be desirable. There
|
|
||||||
are two big problems with the above, however. The first problem is that, if
|
|
||||||
``background_operation`` returns an incomplete Deferred, it will expect its
|
|
||||||
caller to ``yield`` immediately, so will have cleared the logcontext. In this
|
|
||||||
example, that means that 'Request handling complete' will be logged without any
|
|
||||||
context.
|
|
||||||
|
|
||||||
The second problem, which is potentially even worse, is that when the Deferred
|
|
||||||
returned by ``background_operation`` completes, it will restore the original
|
|
||||||
logcontext. There is nothing waiting on that Deferred, so the logcontext will
|
|
||||||
leak into the reactor and possibly get attached to some arbitrary future
|
|
||||||
operation.
|
|
||||||
|
|
||||||
There are two potential solutions to this.
|
|
||||||
|
|
||||||
One option is to surround the call to ``background_operation`` with a
|
|
||||||
``PreserveLoggingContext`` call. That will reset the logcontext before
|
|
||||||
starting ``background_operation`` (so the context restored when the deferred
|
|
||||||
completes will be the empty logcontext), and will restore the current
|
|
||||||
logcontext before continuing the foreground process:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_request_handling():
|
|
||||||
yield foreground_operation()
|
|
||||||
|
|
||||||
# start background_operation off in the empty logcontext, to
|
|
||||||
# avoid leaking the current context into the reactor.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
background_operation()
|
|
||||||
|
|
||||||
# this will now be logged against the request context
|
|
||||||
logger.debug("Request handling complete")
|
|
||||||
|
|
||||||
Obviously that option means that the operations done in
|
|
||||||
``background_operation`` would be not be logged against a logcontext (though
|
|
||||||
that might be fixed by setting a different logcontext via a ``with
|
|
||||||
LoggingContext(...)`` in ``background_operation``).
|
|
||||||
|
|
||||||
The second option is to use ``logcontext.run_in_background``, which wraps a
|
|
||||||
function so that it doesn't reset the logcontext even when it returns an
|
|
||||||
incomplete deferred, and adds a callback to the returned deferred to reset the
|
|
||||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
|
||||||
about logcontexts and Deferreds into one which behaves more like an external
|
|
||||||
function — the opposite operation to that described in the previous section.
|
|
||||||
It can be used like this:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_request_handling():
|
|
||||||
yield foreground_operation()
|
|
||||||
|
|
||||||
logcontext.run_in_background(background_operation)
|
|
||||||
|
|
||||||
# this will now be logged against the request context
|
|
||||||
logger.debug("Request handling complete")
|
|
||||||
|
|
||||||
Passing synapse deferreds into third-party functions
|
|
||||||
----------------------------------------------------
|
|
||||||
|
|
||||||
A typical example of this is where we want to collect together two or more
|
|
||||||
deferred via ``defer.gatherResults``:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
d1 = operation1()
|
|
||||||
d2 = operation2()
|
|
||||||
d3 = defer.gatherResults([d1, d2])
|
|
||||||
|
|
||||||
This is really a variation of the fire-and-forget problem above, in that we are
|
|
||||||
firing off ``d1`` and ``d2`` without yielding on them. The difference
|
|
||||||
is that we now have third-party code attached to their callbacks. Anyway either
|
|
||||||
technique given in the `Fire-and-forget`_ section will work.
|
|
||||||
|
|
||||||
Of course, the new Deferred returned by ``gatherResults`` needs to be wrapped
|
|
||||||
in order to make it follow the logcontext rules before we can yield it, as
|
|
||||||
described in `Where you create a new Deferred, make it follow the rules`_.
|
|
||||||
|
|
||||||
So, option one: reset the logcontext before starting the operations to be
|
|
||||||
gathered:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_request_handling():
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
d1 = operation1()
|
|
||||||
d2 = operation2()
|
|
||||||
result = yield defer.gatherResults([d1, d2])
|
|
||||||
|
|
||||||
In this case particularly, though, option two, of using
|
|
||||||
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
|
||||||
``operation1`` and ``operation2`` are both logged against the original
|
|
||||||
logcontext. This looks like:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_request_handling():
|
|
||||||
d1 = logcontext.preserve_fn(operation1)()
|
|
||||||
d2 = logcontext.preserve_fn(operation2)()
|
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
result = yield defer.gatherResults([d1, d2])
|
|
||||||
|
|
||||||
|
|
||||||
Was all this really necessary?
|
|
||||||
------------------------------
|
|
||||||
|
|
||||||
The conventions used work fine for a linear flow where everything happens in
|
|
||||||
series via ``defer.inlineCallbacks`` and ``yield``, but are certainly tricky to
|
|
||||||
follow for any more exotic flows. It's hard not to wonder if we could have done
|
|
||||||
something else.
|
|
||||||
|
|
||||||
We're not going to rewrite Synapse now, so the following is entirely of
|
|
||||||
academic interest, but I'd like to record some thoughts on an alternative
|
|
||||||
approach.
|
|
||||||
|
|
||||||
I briefly prototyped some code following an alternative set of rules. I think
|
|
||||||
it would work, but I certainly didn't get as far as thinking how it would
|
|
||||||
interact with concepts as complicated as the cache descriptors.
|
|
||||||
|
|
||||||
My alternative rules were:
|
|
||||||
|
|
||||||
* functions always preserve the logcontext of their caller, whether or not they
|
|
||||||
are returning a Deferred.
|
|
||||||
|
|
||||||
* Deferreds returned by synapse functions run their callbacks in the same
|
|
||||||
context as the function was orignally called in.
|
|
||||||
|
|
||||||
The main point of this scheme is that everywhere that sets the logcontext is
|
|
||||||
responsible for clearing it before returning control to the reactor.
|
|
||||||
|
|
||||||
So, for example, if you were the function which started a ``with
|
|
||||||
LoggingContext`` block, you wouldn't ``yield`` within it — instead you'd start
|
|
||||||
off the background process, and then leave the ``with`` block to wait for it:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def handle_request(request_id):
|
|
||||||
with logcontext.LoggingContext() as request_context:
|
|
||||||
request_context.request = request_id
|
|
||||||
d = do_request_handling()
|
|
||||||
|
|
||||||
def cb(r):
|
|
||||||
logger.debug("finished")
|
|
||||||
|
|
||||||
d.addCallback(cb)
|
|
||||||
return d
|
|
||||||
|
|
||||||
(in general, mixing ``with LoggingContext`` blocks and
|
|
||||||
``defer.inlineCallbacks`` in the same function leads to slighly
|
|
||||||
counter-intuitive code, under this scheme).
|
|
||||||
|
|
||||||
Because we leave the original ``with`` block as soon as the Deferred is
|
|
||||||
returned (as opposed to waiting for it to be resolved, as we do today), the
|
|
||||||
logcontext is cleared before control passes back to the reactor; so if there is
|
|
||||||
some code within ``do_request_handling`` which needs to wait for a Deferred to
|
|
||||||
complete, there is no need for it to worry about clearing the logcontext before
|
|
||||||
doing so:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def handle_request():
|
|
||||||
r = do_some_stuff()
|
|
||||||
r.addCallback(do_some_more_stuff)
|
|
||||||
return r
|
|
||||||
|
|
||||||
— and provided ``do_some_stuff`` follows the rules of returning a Deferred which
|
|
||||||
runs its callbacks in the original logcontext, all is happy.
|
|
||||||
|
|
||||||
The business of a Deferred which runs its callbacks in the original logcontext
|
|
||||||
isn't hard to achieve — we have it today, in the shape of
|
|
||||||
``logcontext._PreservingContextDeferred``:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def do_some_stuff():
|
|
||||||
deferred = do_some_io()
|
|
||||||
pcd = _PreservingContextDeferred(LoggingContext.current_context())
|
|
||||||
deferred.chainDeferred(pcd)
|
|
||||||
return pcd
|
|
||||||
|
|
||||||
It turns out that, thanks to the way that Deferreds chain together, we
|
|
||||||
automatically get the property of a context-preserving deferred with
|
|
||||||
``defer.inlineCallbacks``, provided the final Defered the function ``yields``
|
|
||||||
on has that property. So we can just write:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def handle_request():
|
|
||||||
yield do_some_stuff()
|
|
||||||
yield do_some_more_stuff()
|
|
||||||
|
|
||||||
To conclude: I think this scheme would have worked equally well, with less
|
|
||||||
danger of messing it up, and probably made some more esoteric code easier to
|
|
||||||
write. But again — changing the conventions of the entire Synapse codebase is
|
|
||||||
not a sensible option for the marginal improvement offered.
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
Using the synapse manhole
|
|
||||||
=========================
|
|
||||||
|
|
||||||
The "manhole" allows server administrators to access a Python shell on a running
|
|
||||||
Synapse installation. This is a very powerful mechanism for administration and
|
|
||||||
debugging.
|
|
||||||
|
|
||||||
To enable it, first uncomment the `manhole` listener configuration in
|
|
||||||
`homeserver.yaml`:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
listeners:
|
|
||||||
- port: 9000
|
|
||||||
bind_addresses: ['::1', '127.0.0.1']
|
|
||||||
type: manhole
|
|
||||||
```
|
|
||||||
|
|
||||||
(`bind_addresses` in the above is important: it ensures that access to the
|
|
||||||
manhole is only possible for local users).
|
|
||||||
|
|
||||||
Note that this will give administrative access to synapse to **all users** with
|
|
||||||
shell access to the server. It should therefore **not** be enabled in
|
|
||||||
environments where untrusted users have shell access.
|
|
||||||
|
|
||||||
Then restart synapse, and point an ssh client at port 9000 on localhost, using
|
|
||||||
the username `matrix`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ssh -p9000 matrix@localhost
|
|
||||||
```
|
|
||||||
|
|
||||||
The password is `rabbithole`.
|
|
||||||
|
|
||||||
This gives a Python REPL in which `hs` gives access to the
|
|
||||||
`synapse.server.HomeServer` object - which in turn gives access to many other
|
|
||||||
parts of the process.
|
|
||||||
|
|
||||||
As a simple example, retrieving an event from the database:
|
|
||||||
|
|
||||||
```
|
|
||||||
>>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')
|
|
||||||
<Deferred at 0x7ff253fc6998 current result: <FrozenEvent event_id='$1416420717069yeQaw:matrix.org', type='m.room.create', state_key=''>>
|
|
||||||
```
|
|
||||||
@@ -1,180 +0,0 @@
|
|||||||
How to monitor Synapse metrics using Prometheus
|
|
||||||
===============================================
|
|
||||||
|
|
||||||
1. Install Prometheus:
|
|
||||||
|
|
||||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
|
||||||
|
|
||||||
2. Enable Synapse metrics:
|
|
||||||
|
|
||||||
There are two methods of enabling metrics in Synapse.
|
|
||||||
|
|
||||||
The first serves the metrics as a part of the usual web server and can be
|
|
||||||
enabled by adding the "metrics" resource to the existing listener as such::
|
|
||||||
|
|
||||||
resources:
|
|
||||||
- names:
|
|
||||||
- client
|
|
||||||
- metrics
|
|
||||||
|
|
||||||
This provides a simple way of adding metrics to your Synapse installation,
|
|
||||||
and serves under ``/_synapse/metrics``. If you do not wish your metrics be
|
|
||||||
publicly exposed, you will need to either filter it out at your load
|
|
||||||
balancer, or use the second method.
|
|
||||||
|
|
||||||
The second method runs the metrics server on a different port, in a
|
|
||||||
different thread to Synapse. This can make it more resilient to heavy load
|
|
||||||
meaning metrics cannot be retrieved, and can be exposed to just internal
|
|
||||||
networks easier. The served metrics are available over HTTP only, and will
|
|
||||||
be available at ``/``.
|
|
||||||
|
|
||||||
Add a new listener to homeserver.yaml::
|
|
||||||
|
|
||||||
listeners:
|
|
||||||
- type: metrics
|
|
||||||
port: 9000
|
|
||||||
bind_addresses:
|
|
||||||
- '0.0.0.0'
|
|
||||||
|
|
||||||
For both options, you will need to ensure that ``enable_metrics`` is set to
|
|
||||||
``True``.
|
|
||||||
|
|
||||||
Restart Synapse.
|
|
||||||
|
|
||||||
3. Add a Prometheus target for Synapse.
|
|
||||||
|
|
||||||
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
|
|
||||||
|
|
||||||
- job_name: "synapse"
|
|
||||||
metrics_path: "/_synapse/metrics"
|
|
||||||
static_configs:
|
|
||||||
- targets: ["my.server.here:9092"]
|
|
||||||
|
|
||||||
If your prometheus is older than 1.5.2, you will need to replace
|
|
||||||
``static_configs`` in the above with ``target_groups``.
|
|
||||||
|
|
||||||
Restart Prometheus.
|
|
||||||
|
|
||||||
|
|
||||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
|
||||||
---------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
The duplicated metrics deprecated in Synapse 0.27.0 have been removed.
|
|
||||||
|
|
||||||
All time duration-based metrics have been changed to be seconds. This affects:
|
|
||||||
|
|
||||||
+----------------------------------+
|
|
||||||
| msec -> sec metrics |
|
|
||||||
+==================================+
|
|
||||||
| python_gc_time |
|
|
||||||
+----------------------------------+
|
|
||||||
| python_twisted_reactor_tick_time |
|
|
||||||
+----------------------------------+
|
|
||||||
| synapse_storage_query_time |
|
|
||||||
+----------------------------------+
|
|
||||||
| synapse_storage_schedule_time |
|
|
||||||
+----------------------------------+
|
|
||||||
| synapse_storage_transaction_time |
|
|
||||||
+----------------------------------+
|
|
||||||
|
|
||||||
Several metrics have been changed to be histograms, which sort entries into
|
|
||||||
buckets and allow better analysis. The following metrics are now histograms:
|
|
||||||
|
|
||||||
+-------------------------------------------+
|
|
||||||
| Altered metrics |
|
|
||||||
+===========================================+
|
|
||||||
| python_gc_time |
|
|
||||||
+-------------------------------------------+
|
|
||||||
| python_twisted_reactor_pending_calls |
|
|
||||||
+-------------------------------------------+
|
|
||||||
| python_twisted_reactor_tick_time |
|
|
||||||
+-------------------------------------------+
|
|
||||||
| synapse_http_server_response_time_seconds |
|
|
||||||
+-------------------------------------------+
|
|
||||||
| synapse_storage_query_time |
|
|
||||||
+-------------------------------------------+
|
|
||||||
| synapse_storage_schedule_time |
|
|
||||||
+-------------------------------------------+
|
|
||||||
| synapse_storage_transaction_time |
|
|
||||||
+-------------------------------------------+
|
|
||||||
|
|
||||||
|
|
||||||
Block and response metrics renamed for 0.27.0
|
|
||||||
---------------------------------------------
|
|
||||||
|
|
||||||
Synapse 0.27.0 begins the process of rationalising the duplicate ``*:count``
|
|
||||||
metrics reported for the resource tracking for code blocks and HTTP requests.
|
|
||||||
|
|
||||||
At the same time, the corresponding ``*:total`` metrics are being renamed, as
|
|
||||||
the ``:total`` suffix no longer makes sense in the absence of a corresponding
|
|
||||||
``:count`` metric.
|
|
||||||
|
|
||||||
To enable a graceful migration path, this release just adds new names for the
|
|
||||||
metrics being renamed. A future release will remove the old ones.
|
|
||||||
|
|
||||||
The following table shows the new metrics, and the old metrics which they are
|
|
||||||
replacing.
|
|
||||||
|
|
||||||
==================================================== ===================================================
|
|
||||||
New name Old name
|
|
||||||
==================================================== ===================================================
|
|
||||||
synapse_util_metrics_block_count synapse_util_metrics_block_timer:count
|
|
||||||
synapse_util_metrics_block_count synapse_util_metrics_block_ru_utime:count
|
|
||||||
synapse_util_metrics_block_count synapse_util_metrics_block_ru_stime:count
|
|
||||||
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_count:count
|
|
||||||
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_duration:count
|
|
||||||
|
|
||||||
synapse_util_metrics_block_time_seconds synapse_util_metrics_block_timer:total
|
|
||||||
synapse_util_metrics_block_ru_utime_seconds synapse_util_metrics_block_ru_utime:total
|
|
||||||
synapse_util_metrics_block_ru_stime_seconds synapse_util_metrics_block_ru_stime:total
|
|
||||||
synapse_util_metrics_block_db_txn_count synapse_util_metrics_block_db_txn_count:total
|
|
||||||
synapse_util_metrics_block_db_txn_duration_seconds synapse_util_metrics_block_db_txn_duration:total
|
|
||||||
|
|
||||||
synapse_http_server_response_count synapse_http_server_requests
|
|
||||||
synapse_http_server_response_count synapse_http_server_response_time:count
|
|
||||||
synapse_http_server_response_count synapse_http_server_response_ru_utime:count
|
|
||||||
synapse_http_server_response_count synapse_http_server_response_ru_stime:count
|
|
||||||
synapse_http_server_response_count synapse_http_server_response_db_txn_count:count
|
|
||||||
synapse_http_server_response_count synapse_http_server_response_db_txn_duration:count
|
|
||||||
|
|
||||||
synapse_http_server_response_time_seconds synapse_http_server_response_time:total
|
|
||||||
synapse_http_server_response_ru_utime_seconds synapse_http_server_response_ru_utime:total
|
|
||||||
synapse_http_server_response_ru_stime_seconds synapse_http_server_response_ru_stime:total
|
|
||||||
synapse_http_server_response_db_txn_count synapse_http_server_response_db_txn_count:total
|
|
||||||
synapse_http_server_response_db_txn_duration_seconds synapse_http_server_response_db_txn_duration:total
|
|
||||||
==================================================== ===================================================
|
|
||||||
|
|
||||||
|
|
||||||
Standard Metric Names
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
As of synapse version 0.18.2, the format of the process-wide metrics has been
|
|
||||||
changed to fit prometheus standard naming conventions. Additionally the units
|
|
||||||
have been changed to seconds, from miliseconds.
|
|
||||||
|
|
||||||
================================== =============================
|
|
||||||
New name Old name
|
|
||||||
================================== =============================
|
|
||||||
process_cpu_user_seconds_total process_resource_utime / 1000
|
|
||||||
process_cpu_system_seconds_total process_resource_stime / 1000
|
|
||||||
process_open_fds (no 'type' label) process_fds
|
|
||||||
================================== =============================
|
|
||||||
|
|
||||||
The python-specific counts of garbage collector performance have been renamed.
|
|
||||||
|
|
||||||
=========================== ======================
|
|
||||||
New name Old name
|
|
||||||
=========================== ======================
|
|
||||||
python_gc_time reactor_gc_time
|
|
||||||
python_gc_unreachable_total reactor_gc_unreachable
|
|
||||||
python_gc_counts reactor_gc_counts
|
|
||||||
=========================== ======================
|
|
||||||
|
|
||||||
The twisted-specific reactor metrics have been renamed.
|
|
||||||
|
|
||||||
==================================== =====================
|
|
||||||
New name Old name
|
|
||||||
==================================== =====================
|
|
||||||
python_twisted_reactor_pending_calls reactor_pending_calls
|
|
||||||
python_twisted_reactor_tick_time reactor_tick_time
|
|
||||||
==================================== =====================
|
|
||||||
@@ -1,99 +0,0 @@
|
|||||||
Password auth provider modules
|
|
||||||
==============================
|
|
||||||
|
|
||||||
Password auth providers offer a way for server administrators to integrate
|
|
||||||
their Synapse installation with an existing authentication system.
|
|
||||||
|
|
||||||
A password auth provider is a Python class which is dynamically loaded into
|
|
||||||
Synapse, and provides a number of methods by which it can integrate with the
|
|
||||||
authentication system.
|
|
||||||
|
|
||||||
This document serves as a reference for those looking to implement their own
|
|
||||||
password auth providers.
|
|
||||||
|
|
||||||
Required methods
|
|
||||||
----------------
|
|
||||||
|
|
||||||
Password auth provider classes must provide the following methods:
|
|
||||||
|
|
||||||
*class* ``SomeProvider.parse_config``\(*config*)
|
|
||||||
|
|
||||||
This method is passed the ``config`` object for this module from the
|
|
||||||
homeserver configuration file.
|
|
||||||
|
|
||||||
It should perform any appropriate sanity checks on the provided
|
|
||||||
configuration, and return an object which is then passed into ``__init__``.
|
|
||||||
|
|
||||||
*class* ``SomeProvider``\(*config*, *account_handler*)
|
|
||||||
|
|
||||||
The constructor is passed the config object returned by ``parse_config``,
|
|
||||||
and a ``synapse.module_api.ModuleApi`` object which allows the
|
|
||||||
password provider to check if accounts exist and/or create new ones.
|
|
||||||
|
|
||||||
Optional methods
|
|
||||||
----------------
|
|
||||||
|
|
||||||
Password auth provider classes may optionally provide the following methods.
|
|
||||||
|
|
||||||
*class* ``SomeProvider.get_db_schema_files``\()
|
|
||||||
|
|
||||||
This method, if implemented, should return an Iterable of ``(name,
|
|
||||||
stream)`` pairs of database schema files. Each file is applied in turn at
|
|
||||||
initialisation, and a record is then made in the database so that it is
|
|
||||||
not re-applied on the next start.
|
|
||||||
|
|
||||||
``someprovider.get_supported_login_types``\()
|
|
||||||
|
|
||||||
This method, if implemented, should return a ``dict`` mapping from a login
|
|
||||||
type identifier (such as ``m.login.password``) to an iterable giving the
|
|
||||||
fields which must be provided by the user in the submission to the
|
|
||||||
``/login`` api. These fields are passed in the ``login_dict`` dictionary
|
|
||||||
to ``check_auth``.
|
|
||||||
|
|
||||||
For example, if a password auth provider wants to implement a custom login
|
|
||||||
type of ``com.example.custom_login``, where the client is expected to pass
|
|
||||||
the fields ``secret1`` and ``secret2``, the provider should implement this
|
|
||||||
method and return the following dict::
|
|
||||||
|
|
||||||
{"com.example.custom_login": ("secret1", "secret2")}
|
|
||||||
|
|
||||||
``someprovider.check_auth``\(*username*, *login_type*, *login_dict*)
|
|
||||||
|
|
||||||
This method is the one that does the real work. If implemented, it will be
|
|
||||||
called for each login attempt where the login type matches one of the keys
|
|
||||||
returned by ``get_supported_login_types``.
|
|
||||||
|
|
||||||
It is passed the (possibly UNqualified) ``user`` provided by the client,
|
|
||||||
the login type, and a dictionary of login secrets passed by the client.
|
|
||||||
|
|
||||||
The method should return a Twisted ``Deferred`` object, which resolves to
|
|
||||||
the canonical ``@localpart:domain`` user id if authentication is successful,
|
|
||||||
and ``None`` if not.
|
|
||||||
|
|
||||||
Alternatively, the ``Deferred`` can resolve to a ``(str, func)`` tuple, in
|
|
||||||
which case the second field is a callback which will be called with the
|
|
||||||
result from the ``/login`` call (including ``access_token``, ``device_id``,
|
|
||||||
etc.)
|
|
||||||
|
|
||||||
``someprovider.check_password``\(*user_id*, *password*)
|
|
||||||
|
|
||||||
This method provides a simpler interface than ``get_supported_login_types``
|
|
||||||
and ``check_auth`` for password auth providers that just want to provide a
|
|
||||||
mechanism for validating ``m.login.password`` logins.
|
|
||||||
|
|
||||||
Iif implemented, it will be called to check logins with an
|
|
||||||
``m.login.password`` login type. It is passed a qualified
|
|
||||||
``@localpart:domain`` user id, and the password provided by the user.
|
|
||||||
|
|
||||||
The method should return a Twisted ``Deferred`` object, which resolves to
|
|
||||||
``True`` if authentication is successful, and ``False`` if not.
|
|
||||||
|
|
||||||
``someprovider.on_logged_out``\(*user_id*, *device_id*, *access_token*)
|
|
||||||
|
|
||||||
This method, if implemented, is called when a user logs out. It is passed
|
|
||||||
the qualified user ID, the ID of the deactivated device (if any: access
|
|
||||||
tokens are occasionally created without an associated device ID), and the
|
|
||||||
(now deactivated) access token.
|
|
||||||
|
|
||||||
It may return a Twisted ``Deferred`` object; the logout request will wait
|
|
||||||
for the deferred to complete but the result is ignored.
|
|
||||||
@@ -1,136 +0,0 @@
|
|||||||
Using Postgres
|
|
||||||
--------------
|
|
||||||
|
|
||||||
Postgres version 9.4 or later is known to work.
|
|
||||||
|
|
||||||
Set up database
|
|
||||||
===============
|
|
||||||
|
|
||||||
Assuming your PostgreSQL database user is called ``postgres``, create a user
|
|
||||||
``synapse_user`` with::
|
|
||||||
|
|
||||||
su - postgres
|
|
||||||
createuser --pwprompt synapse_user
|
|
||||||
|
|
||||||
The PostgreSQL database used *must* have the correct encoding set, otherwise it
|
|
||||||
would not be able to store UTF8 strings. To create a database with the correct
|
|
||||||
encoding use, e.g.::
|
|
||||||
|
|
||||||
CREATE DATABASE synapse
|
|
||||||
ENCODING 'UTF8'
|
|
||||||
LC_COLLATE='C'
|
|
||||||
LC_CTYPE='C'
|
|
||||||
template=template0
|
|
||||||
OWNER synapse_user;
|
|
||||||
|
|
||||||
This would create an appropriate database named ``synapse`` owned by the
|
|
||||||
``synapse_user`` user (which must already exist).
|
|
||||||
|
|
||||||
Set up client in Debian/Ubuntu
|
|
||||||
===========================
|
|
||||||
|
|
||||||
Postgres support depends on the postgres python connector ``psycopg2``. In the
|
|
||||||
virtual env::
|
|
||||||
|
|
||||||
sudo apt-get install libpq-dev
|
|
||||||
pip install psycopg2
|
|
||||||
|
|
||||||
Set up client in RHEL/CentOs 7
|
|
||||||
==============================
|
|
||||||
|
|
||||||
Make sure you have the appropriate version of postgres-devel installed. For a
|
|
||||||
postgres 9.4, use the postgres 9.4 packages from
|
|
||||||
[here](https://wiki.postgresql.org/wiki/YUM_Installation).
|
|
||||||
|
|
||||||
As with Debian/Ubuntu, postgres support depends on the postgres python connector
|
|
||||||
``psycopg2``. In the virtual env::
|
|
||||||
|
|
||||||
sudo yum install postgresql-devel libpqxx-devel.x86_64
|
|
||||||
export PATH=/usr/pgsql-9.4/bin/:$PATH
|
|
||||||
pip install psycopg2
|
|
||||||
|
|
||||||
Synapse config
|
|
||||||
==============
|
|
||||||
|
|
||||||
When you are ready to start using PostgreSQL, edit the ``database`` section in
|
|
||||||
your config file to match the following lines::
|
|
||||||
|
|
||||||
database:
|
|
||||||
name: psycopg2
|
|
||||||
args:
|
|
||||||
user: <user>
|
|
||||||
password: <pass>
|
|
||||||
database: <db>
|
|
||||||
host: <host>
|
|
||||||
cp_min: 5
|
|
||||||
cp_max: 10
|
|
||||||
|
|
||||||
All key, values in ``args`` are passed to the ``psycopg2.connect(..)``
|
|
||||||
function, except keys beginning with ``cp_``, which are consumed by the twisted
|
|
||||||
adbapi connection pool.
|
|
||||||
|
|
||||||
|
|
||||||
Porting from SQLite
|
|
||||||
===================
|
|
||||||
|
|
||||||
Overview
|
|
||||||
~~~~~~~~
|
|
||||||
|
|
||||||
The script ``synapse_port_db`` allows porting an existing synapse server
|
|
||||||
backed by SQLite to using PostgreSQL. This is done in as a two phase process:
|
|
||||||
|
|
||||||
1. Copy the existing SQLite database to a separate location (while the server
|
|
||||||
is down) and running the port script against that offline database.
|
|
||||||
2. Shut down the server. Rerun the port script to port any data that has come
|
|
||||||
in since taking the first snapshot. Restart server against the PostgreSQL
|
|
||||||
database.
|
|
||||||
|
|
||||||
The port script is designed to be run repeatedly against newer snapshots of the
|
|
||||||
SQLite database file. This makes it safe to repeat step 1 if there was a delay
|
|
||||||
between taking the previous snapshot and being ready to do step 2.
|
|
||||||
|
|
||||||
It is safe to at any time kill the port script and restart it.
|
|
||||||
|
|
||||||
Using the port script
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Firstly, shut down the currently running synapse server and copy its database
|
|
||||||
file (typically ``homeserver.db``) to another location. Once the copy is
|
|
||||||
complete, restart synapse. For instance::
|
|
||||||
|
|
||||||
./synctl stop
|
|
||||||
cp homeserver.db homeserver.db.snapshot
|
|
||||||
./synctl start
|
|
||||||
|
|
||||||
Copy the old config file into a new config file::
|
|
||||||
|
|
||||||
cp homeserver.yaml homeserver-postgres.yaml
|
|
||||||
|
|
||||||
Edit the database section as described in the section *Synapse config* above
|
|
||||||
and with the SQLite snapshot located at ``homeserver.db.snapshot`` simply run::
|
|
||||||
|
|
||||||
synapse_port_db --sqlite-database homeserver.db.snapshot \
|
|
||||||
--postgres-config homeserver-postgres.yaml
|
|
||||||
|
|
||||||
The flag ``--curses`` displays a coloured curses progress UI.
|
|
||||||
|
|
||||||
If the script took a long time to complete, or time has otherwise passed since
|
|
||||||
the original snapshot was taken, repeat the previous steps with a newer
|
|
||||||
snapshot.
|
|
||||||
|
|
||||||
To complete the conversion shut down the synapse server and run the port
|
|
||||||
script one last time, e.g. if the SQLite database is at ``homeserver.db``
|
|
||||||
run::
|
|
||||||
|
|
||||||
synapse_port_db --sqlite-database homeserver.db \
|
|
||||||
--postgres-config homeserver-postgres.yaml
|
|
||||||
|
|
||||||
Once that has completed, change the synapse config to point at the PostgreSQL
|
|
||||||
database configuration file ``homeserver-postgres.yaml``::
|
|
||||||
|
|
||||||
./synctl stop
|
|
||||||
mv homeserver.yaml homeserver-old-sqlite.yaml
|
|
||||||
mv homeserver-postgres.yaml homeserver.yaml
|
|
||||||
./synctl start
|
|
||||||
|
|
||||||
Synapse should now be running against PostgreSQL.
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<title>Matrix.org Privacy policy</title>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
{% if has_consented %}
|
|
||||||
<p>
|
|
||||||
Your base already belong to us.
|
|
||||||
</p>
|
|
||||||
{% else %}
|
|
||||||
<p>
|
|
||||||
All your base are belong to us.
|
|
||||||
</p>
|
|
||||||
<form method="post" action="consent">
|
|
||||||
<input type="hidden" name="v" value="{{version}}"/>
|
|
||||||
<input type="hidden" name="u" value="{{user}}"/>
|
|
||||||
<input type="hidden" name="h" value="{{userhmac}}"/>
|
|
||||||
<input type="submit" value="Sure thing!"/>
|
|
||||||
</form>
|
|
||||||
{% endif %}
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<title>Matrix.org Privacy policy</title>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<p>
|
|
||||||
Sweet.
|
|
||||||
</p>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
Replication Architecture
|
|
||||||
========================
|
|
||||||
|
|
||||||
Motivation
|
|
||||||
----------
|
|
||||||
|
|
||||||
We'd like to be able to split some of the work that synapse does into multiple
|
|
||||||
python processes. In theory multiple synapse processes could share a single
|
|
||||||
postgresql database and we'd scale up by running more synapse processes.
|
|
||||||
However much of synapse assumes that only one process is interacting with the
|
|
||||||
database, both for assigning unique identifiers when inserting into tables,
|
|
||||||
notifying components about new updates, and for invalidating its caches.
|
|
||||||
|
|
||||||
So running multiple copies of the current code isn't an option. One way to
|
|
||||||
run multiple processes would be to have a single writer process and multiple
|
|
||||||
reader processes connected to the same database. In order to do this we'd need
|
|
||||||
a way for the reader process to invalidate its in-memory caches when an update
|
|
||||||
happens on the writer. One way to do this is for the writer to present an
|
|
||||||
append-only log of updates which the readers can consume to invalidate their
|
|
||||||
caches and to push updates to listening clients or pushers.
|
|
||||||
|
|
||||||
Synapse already stores much of its data as an append-only log so that it can
|
|
||||||
correctly respond to /sync requests so the amount of code changes needed to
|
|
||||||
expose the append-only log to the readers should be fairly minimal.
|
|
||||||
|
|
||||||
Architecture
|
|
||||||
------------
|
|
||||||
|
|
||||||
The Replication Protocol
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
See ``tcp_replication.rst``
|
|
||||||
|
|
||||||
|
|
||||||
The Slaved DataStore
|
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
There are read-only version of the synapse storage layer in
|
|
||||||
``synapse/replication/slave/storage`` that use the response of the replication
|
|
||||||
API to invalidate their caches.
|
|
||||||
@@ -1,74 +0,0 @@
|
|||||||
Server Notices
|
|
||||||
==============
|
|
||||||
|
|
||||||
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
|
|
||||||
channel whereby server administrators can send messages to users on the server.
|
|
||||||
|
|
||||||
They are used as part of communication of the server polices(see
|
|
||||||
[consent_tracking.md](consent_tracking.md)), however the intention is that
|
|
||||||
they may also find a use for features such as "Message of the day".
|
|
||||||
|
|
||||||
This is a feature specific to Synapse, but it uses standard Matrix
|
|
||||||
communication mechanisms, so should work with any Matrix client.
|
|
||||||
|
|
||||||
User experience
|
|
||||||
---------------
|
|
||||||
|
|
||||||
When the user is first sent a server notice, they will get an invitation to a
|
|
||||||
room (typically called 'Server Notices', though this is configurable in
|
|
||||||
`homeserver.yaml`). They will be **unable to reject** this invitation -
|
|
||||||
attempts to do so will receive an error.
|
|
||||||
|
|
||||||
Once they accept the invitation, they will see the notice message in the room
|
|
||||||
history; it will appear to have come from the 'server notices user' (see
|
|
||||||
below).
|
|
||||||
|
|
||||||
The user is prevented from sending any messages in this room by the power
|
|
||||||
levels.
|
|
||||||
|
|
||||||
Having joined the room, the user can leave the room if they want. Subsequent
|
|
||||||
server notices will then cause a new room to be created.
|
|
||||||
|
|
||||||
Synapse configuration
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
Server notices come from a specific user id on the server. Server
|
|
||||||
administrators are free to choose the user id - something like `server` is
|
|
||||||
suggested, meaning the notices will come from
|
|
||||||
`@server:<your_server_name>`. Once the Server Notices user is configured, that
|
|
||||||
user id becomes a special, privileged user, so administrators should ensure
|
|
||||||
that **it is not already allocated**.
|
|
||||||
|
|
||||||
In order to support server notices, it is necessary to add some configuration
|
|
||||||
to the `homeserver.yaml` file. In particular, you should add a `server_notices`
|
|
||||||
section, which should look like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
server_notices:
|
|
||||||
system_mxid_localpart: server
|
|
||||||
system_mxid_display_name: "Server Notices"
|
|
||||||
system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
|
||||||
room_name: "Server Notices"
|
|
||||||
```
|
|
||||||
|
|
||||||
The only compulsory setting is `system_mxid_localpart`, which defines the user
|
|
||||||
id of the Server Notices user, as above. `room_name` defines the name of the
|
|
||||||
room which will be created.
|
|
||||||
|
|
||||||
`system_mxid_display_name` and `system_mxid_avatar_url` can be used to set the
|
|
||||||
displayname and avatar of the Server Notices user.
|
|
||||||
|
|
||||||
Sending notices
|
|
||||||
---------------
|
|
||||||
|
|
||||||
As of the current version of synapse, there is no convenient interface for
|
|
||||||
sending notices (other than the automated ones sent as part of consent
|
|
||||||
tracking).
|
|
||||||
|
|
||||||
In the meantime, it is possible to test this feature using the manhole. Having
|
|
||||||
gone into the manhole as described in [manhole.md](manhole.md), a notice can be
|
|
||||||
sent with something like:
|
|
||||||
|
|
||||||
```
|
|
||||||
>>> hs.get_server_notices_manager().send_notice('@user:server.com', {'msgtype':'m.text', 'body':'foo'})
|
|
||||||
```
|
|
||||||
@@ -50,7 +50,7 @@ master_doc = 'index'
|
|||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = u'Synapse'
|
project = u'Synapse'
|
||||||
copyright = u'Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd'
|
copyright = u'2014, TNG'
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
# |version| and |release|, also used in various other places throughout the
|
# |version| and |release|, also used in various other places throughout the
|
||||||
|
|||||||
@@ -1,223 +0,0 @@
|
|||||||
TCP Replication
|
|
||||||
===============
|
|
||||||
|
|
||||||
Motivation
|
|
||||||
----------
|
|
||||||
|
|
||||||
Previously the workers used an HTTP long poll mechanism to get updates from the
|
|
||||||
master, which had the problem of causing a lot of duplicate work on the server.
|
|
||||||
This TCP protocol replaces those APIs with the aim of increased efficiency.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Overview
|
|
||||||
--------
|
|
||||||
|
|
||||||
The protocol is based on fire and forget, line based commands. An example flow
|
|
||||||
would be (where '>' indicates master to worker and '<' worker to master flows)::
|
|
||||||
|
|
||||||
> SERVER example.com
|
|
||||||
< REPLICATE events 53
|
|
||||||
> RDATA events 54 ["$foo1:bar.com", ...]
|
|
||||||
> RDATA events 55 ["$foo4:bar.com", ...]
|
|
||||||
|
|
||||||
The example shows the server accepting a new connection and sending its identity
|
|
||||||
with the ``SERVER`` command, followed by the client asking to subscribe to the
|
|
||||||
``events`` stream from the token ``53``. The server then periodically sends ``RDATA``
|
|
||||||
commands which have the format ``RDATA <stream_name> <token> <row>``, where the
|
|
||||||
format of ``<row>`` is defined by the individual streams.
|
|
||||||
|
|
||||||
Error reporting happens by either the client or server sending an `ERROR`
|
|
||||||
command, and usually the connection will be closed.
|
|
||||||
|
|
||||||
|
|
||||||
Since the protocol is a simple line based, its possible to manually connect to
|
|
||||||
the server using a tool like netcat. A few things should be noted when manually
|
|
||||||
using the protocol:
|
|
||||||
|
|
||||||
* When subscribing to a stream using ``REPLICATE``, the special token ``NOW`` can
|
|
||||||
be used to get all future updates. The special stream name ``ALL`` can be used
|
|
||||||
with ``NOW`` to subscribe to all available streams.
|
|
||||||
* The federation stream is only available if federation sending has been
|
|
||||||
disabled on the main process.
|
|
||||||
* The server will only time connections out that have sent a ``PING`` command.
|
|
||||||
If a ping is sent then the connection will be closed if no further commands
|
|
||||||
are receieved within 15s. Both the client and server protocol implementations
|
|
||||||
will send an initial PING on connection and ensure at least one command every
|
|
||||||
5s is sent (not necessarily ``PING``).
|
|
||||||
* ``RDATA`` commands *usually* include a numeric token, however if the stream
|
|
||||||
has multiple rows to replicate per token the server will send multiple
|
|
||||||
``RDATA`` commands, with all but the last having a token of ``batch``. See
|
|
||||||
the documentation on ``commands.RdataCommand`` for further details.
|
|
||||||
|
|
||||||
|
|
||||||
Architecture
|
|
||||||
------------
|
|
||||||
|
|
||||||
The basic structure of the protocol is line based, where the initial word of
|
|
||||||
each line specifies the command. The rest of the line is parsed based on the
|
|
||||||
command. For example, the `RDATA` command is defined as::
|
|
||||||
|
|
||||||
RDATA <stream_name> <token> <row_json>
|
|
||||||
|
|
||||||
(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
|
|
||||||
|
|
||||||
Blank lines are ignored.
|
|
||||||
|
|
||||||
|
|
||||||
Keep alives
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
Both sides are expected to send at least one command every 5s or so, and
|
|
||||||
should send a ``PING`` command if necessary. If either side do not receive a
|
|
||||||
command within e.g. 15s then the connection should be closed.
|
|
||||||
|
|
||||||
Because the server may be connected to manually using e.g. netcat, the timeouts
|
|
||||||
aren't enabled until an initial ``PING`` command is seen. Both the client and
|
|
||||||
server implementations below send a ``PING`` command immediately on connection to
|
|
||||||
ensure the timeouts are enabled.
|
|
||||||
|
|
||||||
This ensures that both sides can quickly realize if the tcp connection has gone
|
|
||||||
and handle the situation appropriately.
|
|
||||||
|
|
||||||
|
|
||||||
Start up
|
|
||||||
~~~~~~~~
|
|
||||||
|
|
||||||
When a new connection is made, the server:
|
|
||||||
|
|
||||||
* Sends a ``SERVER`` command, which includes the identity of the server, allowing
|
|
||||||
the client to detect if its connected to the expected server
|
|
||||||
* Sends a ``PING`` command as above, to enable the client to time out connections
|
|
||||||
promptly.
|
|
||||||
|
|
||||||
The client:
|
|
||||||
|
|
||||||
* Sends a ``NAME`` command, allowing the server to associate a human friendly
|
|
||||||
name with the connection. This is optional.
|
|
||||||
* Sends a ``PING`` as above
|
|
||||||
* For each stream the client wishes to subscribe to it sends a ``REPLICATE``
|
|
||||||
with the stream_name and token it wants to subscribe from.
|
|
||||||
* On receipt of a ``SERVER`` command, checks that the server name matches the
|
|
||||||
expected server name.
|
|
||||||
|
|
||||||
|
|
||||||
Error handling
|
|
||||||
~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
If either side detects an error it can send an ``ERROR`` command and close the
|
|
||||||
connection.
|
|
||||||
|
|
||||||
If the client side loses the connection to the server it should reconnect,
|
|
||||||
following the steps above.
|
|
||||||
|
|
||||||
|
|
||||||
Congestion
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
If the server sends messages faster than the client can consume them the server
|
|
||||||
will first buffer a (fairly large) number of commands and then disconnect the
|
|
||||||
client. This ensures that we don't queue up an unbounded number of commands in
|
|
||||||
memory and gives us a potential oppurtunity to squawk loudly. When/if the client
|
|
||||||
recovers it can reconnect to the server and ask for missed messages.
|
|
||||||
|
|
||||||
|
|
||||||
Reliability
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
In general the replication stream should be considered an unreliable transport
|
|
||||||
since e.g. commands are not resent if the connection disappears.
|
|
||||||
|
|
||||||
The exception to that are the replication streams, i.e. RDATA commands, since
|
|
||||||
these include tokens which can be used to restart the stream on connection
|
|
||||||
errors.
|
|
||||||
|
|
||||||
The client should keep track of the token in the last RDATA command received
|
|
||||||
for each stream so that on reconneciton it can start streaming from the correct
|
|
||||||
place. Note: not all RDATA have valid tokens due to batching. See
|
|
||||||
``RdataCommand`` for more details.
|
|
||||||
|
|
||||||
|
|
||||||
Example
|
|
||||||
~~~~~~~
|
|
||||||
|
|
||||||
An example iteraction is shown below. Each line is prefixed with '>' or '<' to
|
|
||||||
indicate which side is sending, these are *not* included on the wire::
|
|
||||||
|
|
||||||
* connection established *
|
|
||||||
> SERVER localhost:8823
|
|
||||||
> PING 1490197665618
|
|
||||||
< NAME synapse.app.appservice
|
|
||||||
< PING 1490197665618
|
|
||||||
< REPLICATE events 1
|
|
||||||
< REPLICATE backfill 1
|
|
||||||
< REPLICATE caches 1
|
|
||||||
> POSITION events 1
|
|
||||||
> POSITION backfill 1
|
|
||||||
> POSITION caches 1
|
|
||||||
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
|
||||||
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
|
|
||||||
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
|
||||||
< PING 1490197675618
|
|
||||||
> ERROR server stopping
|
|
||||||
* connection closed by server *
|
|
||||||
|
|
||||||
The ``POSITION`` command sent by the server is used to set the clients position
|
|
||||||
without needing to send data with the ``RDATA`` command.
|
|
||||||
|
|
||||||
|
|
||||||
An example of a batched set of ``RDATA`` is::
|
|
||||||
|
|
||||||
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
|
||||||
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
|
||||||
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
|
||||||
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
|
||||||
|
|
||||||
In this case the client shouldn't advance their caches token until it sees the
|
|
||||||
the last ``RDATA``.
|
|
||||||
|
|
||||||
|
|
||||||
List of commands
|
|
||||||
~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
The list of valid commands, with which side can send it: server (S) or client (C):
|
|
||||||
|
|
||||||
SERVER (S)
|
|
||||||
Sent at the start to identify which server the client is talking to
|
|
||||||
|
|
||||||
RDATA (S)
|
|
||||||
A single update in a stream
|
|
||||||
|
|
||||||
POSITION (S)
|
|
||||||
The position of the stream has been updated
|
|
||||||
|
|
||||||
ERROR (S, C)
|
|
||||||
There was an error
|
|
||||||
|
|
||||||
PING (S, C)
|
|
||||||
Sent periodically to ensure the connection is still alive
|
|
||||||
|
|
||||||
NAME (C)
|
|
||||||
Sent at the start by client to inform the server who they are
|
|
||||||
|
|
||||||
REPLICATE (C)
|
|
||||||
Asks the server to replicate a given stream
|
|
||||||
|
|
||||||
USER_SYNC (C)
|
|
||||||
A user has started or stopped syncing
|
|
||||||
|
|
||||||
FEDERATION_ACK (C)
|
|
||||||
Acknowledge receipt of some federation data
|
|
||||||
|
|
||||||
REMOVE_PUSHER (C)
|
|
||||||
Inform the server a pusher should be removed
|
|
||||||
|
|
||||||
INVALIDATE_CACHE (C)
|
|
||||||
Inform the server a cache should be invalidated
|
|
||||||
|
|
||||||
SYNC (S, C)
|
|
||||||
Used exclusively in tests
|
|
||||||
|
|
||||||
|
|
||||||
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
|
|
||||||
format of each command.
|
|
||||||
@@ -9,35 +9,31 @@ the Home Server to generate credentials that are valid for use on the TURN
|
|||||||
server through the use of a secret shared between the Home Server and the
|
server through the use of a secret shared between the Home Server and the
|
||||||
TURN server.
|
TURN server.
|
||||||
|
|
||||||
This document describes how to install coturn
|
This document described how to install coturn
|
||||||
(https://github.com/coturn/coturn) which also supports the TURN REST API,
|
(https://code.google.com/p/coturn/) which also supports the TURN REST API,
|
||||||
and integrate it with synapse.
|
and integrate it with synapse.
|
||||||
|
|
||||||
coturn Setup
|
coturn Setup
|
||||||
============
|
============
|
||||||
|
|
||||||
You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
|
|
||||||
|
|
||||||
1. Check out coturn::
|
1. Check out coturn::
|
||||||
|
svn checkout http://coturn.googlecode.com/svn/trunk/ coturn
|
||||||
git clone https://github.com/coturn/coturn.git coturn
|
|
||||||
cd coturn
|
cd coturn
|
||||||
|
|
||||||
2. Configure it::
|
2. Configure it::
|
||||||
|
|
||||||
./configure
|
./configure
|
||||||
|
|
||||||
You may need to install ``libevent2``: if so, you should do so
|
You may need to install libevent2: if so, you should do so
|
||||||
in the way recommended by your operating system.
|
in the way recommended by your operating system.
|
||||||
You can ignore warnings about lack of database support: a
|
You can ignore warnings about lack of database support: a
|
||||||
database is unnecessary for this purpose.
|
database is unnecessary for this purpose.
|
||||||
|
|
||||||
3. Build and install it::
|
3. Build and install it::
|
||||||
|
|
||||||
make
|
make
|
||||||
make install
|
make install
|
||||||
|
|
||||||
4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
|
4. Make a config file in /etc/turnserver.conf. You can customise
|
||||||
|
a config file from turnserver.conf.default. The relevant
|
||||||
lines, with example values, are::
|
lines, with example values, are::
|
||||||
|
|
||||||
lt-cred-mech
|
lt-cred-mech
|
||||||
@@ -45,43 +41,19 @@ You may be able to setup coturn via your package manager, or set it up manually
|
|||||||
static-auth-secret=[your secret key here]
|
static-auth-secret=[your secret key here]
|
||||||
realm=turn.myserver.org
|
realm=turn.myserver.org
|
||||||
|
|
||||||
See turnserver.conf for explanations of the options.
|
See turnserver.conf.default for explanations of the options.
|
||||||
One way to generate the static-auth-secret is with pwgen::
|
One way to generate the static-auth-secret is with pwgen::
|
||||||
|
|
||||||
pwgen -s 64 1
|
pwgen -s 64 1
|
||||||
|
|
||||||
5. Consider your security settings. TURN lets users request a relay
|
5. Ensure youe firewall allows traffic into the TURN server on
|
||||||
which will connect to arbitrary IP addresses and ports. At the least
|
|
||||||
we recommend:
|
|
||||||
|
|
||||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
|
||||||
no-tcp-relay
|
|
||||||
|
|
||||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
|
||||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
|
||||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
|
||||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
|
||||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
|
||||||
|
|
||||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
|
||||||
allowed-peer-ip=10.0.0.1
|
|
||||||
|
|
||||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
|
||||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
|
||||||
total-quota=1200
|
|
||||||
|
|
||||||
Ideally coturn should refuse to relay traffic which isn't SRTP;
|
|
||||||
see https://github.com/matrix-org/synapse/issues/2009
|
|
||||||
|
|
||||||
6. Ensure your firewall allows traffic into the TURN server on
|
|
||||||
the ports you've configured it to listen on (remember to allow
|
the ports you've configured it to listen on (remember to allow
|
||||||
both TCP and UDP TURN traffic)
|
both TCP and UDP if you've enabled both).
|
||||||
|
|
||||||
7. If you've configured coturn to support TLS/DTLS, generate or
|
6. If you've configured coturn to support TLS/DTLS, generate or
|
||||||
import your private key and certificate.
|
import your private key and certificate.
|
||||||
|
|
||||||
8. Start the turn server::
|
7. Start the turn server::
|
||||||
|
|
||||||
bin/turnserver -o
|
bin/turnserver -o
|
||||||
|
|
||||||
|
|
||||||
@@ -106,19 +78,12 @@ Your home server configuration file needs the following extra keys:
|
|||||||
to refresh credentials. The TURN REST API specification recommends
|
to refresh credentials. The TURN REST API specification recommends
|
||||||
one day (86400000).
|
one day (86400000).
|
||||||
|
|
||||||
4. "turn_allow_guests": Whether to allow guest users to use the TURN
|
|
||||||
server. This is enabled by default, as otherwise VoIP will not
|
|
||||||
work reliably for guests. However, it does introduce a security risk
|
|
||||||
as it lets guests connect to arbitrary endpoints without having gone
|
|
||||||
through a CAPTCHA or similar to register a real account.
|
|
||||||
|
|
||||||
As an example, here is the relevant section of the config file for
|
As an example, here is the relevant section of the config file for
|
||||||
matrix.org::
|
matrix.org::
|
||||||
|
|
||||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||||
turn_user_lifetime: 86400000
|
turn_user_lifetime: 86400000
|
||||||
turn_allow_guests: True
|
|
||||||
|
|
||||||
Now, restart synapse::
|
Now, restart synapse::
|
||||||
|
|
||||||
|
|||||||
@@ -1,76 +0,0 @@
|
|||||||
URL Previews
|
|
||||||
============
|
|
||||||
|
|
||||||
Design notes on a URL previewing service for Matrix:
|
|
||||||
|
|
||||||
Options are:
|
|
||||||
|
|
||||||
1. Have an AS which listens for URLs, downloads them, and inserts an event that describes their metadata.
|
|
||||||
* Pros:
|
|
||||||
* Decouples the implementation entirely from Synapse.
|
|
||||||
* Uses existing Matrix events & content repo to store the metadata.
|
|
||||||
* Cons:
|
|
||||||
* Which AS should provide this service for a room, and why should you trust it?
|
|
||||||
* Doesn't work well with E2E; you'd have to cut the AS into every room
|
|
||||||
* the AS would end up subscribing to every room anyway.
|
|
||||||
|
|
||||||
2. Have a generic preview API (nothing to do with Matrix) that provides a previewing service:
|
|
||||||
* Pros:
|
|
||||||
* Simple and flexible; can be used by any clients at any point
|
|
||||||
* Cons:
|
|
||||||
* If each HS provides one of these independently, all the HSes in a room may needlessly DoS the target URI
|
|
||||||
* We need somewhere to store the URL metadata rather than just using Matrix itself
|
|
||||||
* We can't piggyback on matrix to distribute the metadata between HSes.
|
|
||||||
|
|
||||||
3. Make the synapse of the sending user responsible for spidering the URL and inserting an event asynchronously which describes the metadata.
|
|
||||||
* Pros:
|
|
||||||
* Works transparently for all clients
|
|
||||||
* Piggy-backs nicely on using Matrix for distributing the metadata.
|
|
||||||
* No confusion as to which AS
|
|
||||||
* Cons:
|
|
||||||
* Doesn't work with E2E
|
|
||||||
* We might want to decouple the implementation of the spider from the HS, given spider behaviour can be quite complicated and evolve much more rapidly than the HS. It's more like a bot than a core part of the server.
|
|
||||||
|
|
||||||
4. Make the sending client use the preview API and insert the event itself when successful.
|
|
||||||
* Pros:
|
|
||||||
* Works well with E2E
|
|
||||||
* No custom server functionality
|
|
||||||
* Lets the client customise the preview that they send (like on FB)
|
|
||||||
* Cons:
|
|
||||||
* Entirely specific to the sending client, whereas it'd be nice if /any/ URL was correctly previewed if clients support it.
|
|
||||||
|
|
||||||
5. Have the option of specifying a shared (centralised) previewing service used by a room, to avoid all the different HSes in the room DoSing the target.
|
|
||||||
|
|
||||||
Best solution is probably a combination of both 2 and 4.
|
|
||||||
* Sending clients do their best to create and send a preview at the point of sending the message, perhaps delaying the message until the preview is computed? (This also lets the user validate the preview before sending)
|
|
||||||
* Receiving clients have the option of going and creating their own preview if one doesn't arrive soon enough (or if the original sender didn't create one)
|
|
||||||
|
|
||||||
This is a bit magical though in that the preview could come from two entirely different sources - the sending HS or your local one. However, this can always be exposed to users: "Generate your own URL previews if none are available?"
|
|
||||||
|
|
||||||
This is tantamount also to senders calculating their own thumbnails for sending in advance of the main content - we are trusting the sender not to lie about the content in the thumbnail. Whereas currently thumbnails are calculated by the receiving homeserver to avoid this attack.
|
|
||||||
|
|
||||||
However, this kind of phishing attack does exist whether we let senders pick their thumbnails or not, in that a malicious sender can send normal text messages around the attachment claiming it to be legitimate. We could rely on (future) reputation/abuse management to punish users who phish (be it with bogus metadata or bogus descriptions). Bogus metadata is particularly bad though, especially if it's avoidable.
|
|
||||||
|
|
||||||
As a first cut, let's do #2 and have the receiver hit the API to calculate its own previews (as it does currently for image thumbnails). We can then extend/optimise this to option 4 as a special extra if needed.
|
|
||||||
|
|
||||||
API
|
|
||||||
---
|
|
||||||
|
|
||||||
```
|
|
||||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
|
||||||
200 OK
|
|
||||||
{
|
|
||||||
"og:type" : "article"
|
|
||||||
"og:url" : "https://twitter.com/matrixdotorg/status/684074366691356672"
|
|
||||||
"og:title" : "Matrix on Twitter"
|
|
||||||
"og:image" : "https://pbs.twimg.com/profile_images/500400952029888512/yI0qtFi7_400x400.png"
|
|
||||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
|
||||||
"og:site_name" : "Twitter"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Downloads the URL
|
|
||||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
|
||||||
* Download any media OG meta tags to the media repo, and refer to them in the OG via mxc:// URIs.
|
|
||||||
* If a media filetype we know we can thumbnail: store it on disk, and hand it to the thumbnailer. Generate OG meta tags from the thumbnailer contents.
|
|
||||||
* Otherwise, don't bother downloading further.
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
User Directory API Implementation
|
|
||||||
=================================
|
|
||||||
|
|
||||||
The user directory is currently maintained based on the 'visible' users
|
|
||||||
on this particular server - i.e. ones which your account shares a room with, or
|
|
||||||
who are present in a publicly viewable room present on the server.
|
|
||||||
|
|
||||||
The directory info is stored in various tables, which can (typically after
|
|
||||||
DB corruption) get stale or out of sync. If this happens, for now the
|
|
||||||
quickest solution to fix it is:
|
|
||||||
|
|
||||||
```
|
|
||||||
UPDATE user_directory_stream_pos SET stream_id = NULL;
|
|
||||||
```
|
|
||||||
|
|
||||||
and restart the synapse, which should then start a background task to
|
|
||||||
flush the current tables and regenerate the directory.
|
|
||||||
271
docs/workers.rst
271
docs/workers.rst
@@ -1,271 +0,0 @@
|
|||||||
Scaling synapse via workers
|
|
||||||
===========================
|
|
||||||
|
|
||||||
Synapse has experimental support for splitting out functionality into
|
|
||||||
multiple separate python processes, helping greatly with scalability. These
|
|
||||||
processes are called 'workers', and are (eventually) intended to scale
|
|
||||||
horizontally independently.
|
|
||||||
|
|
||||||
All of the below is highly experimental and subject to change as Synapse evolves,
|
|
||||||
but documenting it here to help folks needing highly scalable Synapses similar
|
|
||||||
to the one running matrix.org!
|
|
||||||
|
|
||||||
All processes continue to share the same database instance, and as such, workers
|
|
||||||
only work with postgres based synapse deployments (sharing a single sqlite
|
|
||||||
across multiple processes is a recipe for disaster, plus you should be using
|
|
||||||
postgres anyway if you care about scalability).
|
|
||||||
|
|
||||||
The workers communicate with the master synapse process via a synapse-specific
|
|
||||||
TCP protocol called 'replication' - analogous to MySQL or Postgres style
|
|
||||||
database replication; feeding a stream of relevant data to the workers so they
|
|
||||||
can be kept in sync with the main synapse process and database state.
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
-------------
|
|
||||||
|
|
||||||
To make effective use of the workers, you will need to configure an HTTP
|
|
||||||
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
|
|
||||||
the correct worker, or to the main synapse instance. Note that this includes
|
|
||||||
requests made to the federation port. The caveats regarding running a
|
|
||||||
reverse-proxy on the federation port still apply (see
|
|
||||||
https://github.com/matrix-org/synapse/blob/master/README.rst#reverse-proxying-the-federation-port).
|
|
||||||
|
|
||||||
To enable workers, you need to add two replication listeners to the master
|
|
||||||
synapse, e.g.::
|
|
||||||
|
|
||||||
listeners:
|
|
||||||
# The TCP replication port
|
|
||||||
- port: 9092
|
|
||||||
bind_address: '127.0.0.1'
|
|
||||||
type: replication
|
|
||||||
# The HTTP replication port
|
|
||||||
- port: 9093
|
|
||||||
bind_address: '127.0.0.1'
|
|
||||||
type: http
|
|
||||||
resources:
|
|
||||||
- names: [replication]
|
|
||||||
|
|
||||||
Under **no circumstances** should these replication API listeners be exposed to
|
|
||||||
the public internet; it currently implements no authentication whatsoever and is
|
|
||||||
unencrypted.
|
|
||||||
|
|
||||||
(Roughly, the TCP port is used for streaming data from the master to the
|
|
||||||
workers, and the HTTP port for the workers to send data to the main
|
|
||||||
synapse process.)
|
|
||||||
|
|
||||||
You then create a set of configs for the various worker processes. These
|
|
||||||
should be worker configuration files, and should be stored in a dedicated
|
|
||||||
subdirectory, to allow synctl to manipulate them. An additional configuration
|
|
||||||
for the master synapse process will need to be created because the process will
|
|
||||||
not be started automatically. That configuration should look like this::
|
|
||||||
|
|
||||||
worker_app: synapse.app.homeserver
|
|
||||||
daemonize: true
|
|
||||||
|
|
||||||
Each worker configuration file inherits the configuration of the main homeserver
|
|
||||||
configuration file. You can then override configuration specific to that worker,
|
|
||||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
|
||||||
You should minimise the number of overrides though to maintain a usable config.
|
|
||||||
|
|
||||||
You must specify the type of worker application (``worker_app``). The currently
|
|
||||||
available worker applications are listed below. You must also specify the
|
|
||||||
replication endpoints that it's talking to on the main synapse process.
|
|
||||||
``worker_replication_host`` should specify the host of the main synapse,
|
|
||||||
``worker_replication_port`` should point to the TCP replication listener port and
|
|
||||||
``worker_replication_http_port`` should point to the HTTP replication port.
|
|
||||||
|
|
||||||
Currently, the ``event_creator`` and ``federation_reader`` workers require specifying
|
|
||||||
``worker_replication_http_port``.
|
|
||||||
|
|
||||||
For instance::
|
|
||||||
|
|
||||||
worker_app: synapse.app.synchrotron
|
|
||||||
|
|
||||||
# The replication listener on the synapse to talk to.
|
|
||||||
worker_replication_host: 127.0.0.1
|
|
||||||
worker_replication_port: 9092
|
|
||||||
worker_replication_http_port: 9093
|
|
||||||
|
|
||||||
worker_listeners:
|
|
||||||
- type: http
|
|
||||||
port: 8083
|
|
||||||
resources:
|
|
||||||
- names:
|
|
||||||
- client
|
|
||||||
|
|
||||||
worker_daemonize: True
|
|
||||||
worker_pid_file: /home/matrix/synapse/synchrotron.pid
|
|
||||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
|
||||||
|
|
||||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
|
||||||
plain HTTP ``/sync`` endpoint on port 8083 separately from the ``/sync`` endpoint provided
|
|
||||||
by the main synapse.
|
|
||||||
|
|
||||||
Obviously you should configure your reverse-proxy to route the relevant
|
|
||||||
endpoints to the worker (``localhost:8083`` in the above example).
|
|
||||||
|
|
||||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
|
||||||
commandline option to tell it to operate on all the worker configurations found
|
|
||||||
in the given directory, e.g.::
|
|
||||||
|
|
||||||
synctl -a $CONFIG/workers start
|
|
||||||
|
|
||||||
Currently one should always restart all workers when restarting or upgrading
|
|
||||||
synapse, unless you explicitly know it's safe not to. For instance, restarting
|
|
||||||
synapse without restarting all the synchrotrons may result in broken typing
|
|
||||||
notifications.
|
|
||||||
|
|
||||||
To manipulate a specific worker, you pass the -w option to synctl::
|
|
||||||
|
|
||||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
|
||||||
|
|
||||||
|
|
||||||
Available worker applications
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
``synapse.app.pusher``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
|
||||||
REST endpoints itself, but you should set ``start_pushers: False`` in the
|
|
||||||
shared configuration file to stop the main synapse sending these notifications.
|
|
||||||
|
|
||||||
Note this worker cannot be load-balanced: only one instance should be active.
|
|
||||||
|
|
||||||
``synapse.app.synchrotron``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
The synchrotron handles ``sync`` requests from clients. In particular, it can
|
|
||||||
handle REST endpoints matching the following regular expressions::
|
|
||||||
|
|
||||||
^/_matrix/client/(v2_alpha|r0)/sync$
|
|
||||||
^/_matrix/client/(api/v1|v2_alpha|r0)/events$
|
|
||||||
^/_matrix/client/(api/v1|r0)/initialSync$
|
|
||||||
^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$
|
|
||||||
|
|
||||||
The above endpoints should all be routed to the synchrotron worker by the
|
|
||||||
reverse-proxy configuration.
|
|
||||||
|
|
||||||
It is possible to run multiple instances of the synchrotron to scale
|
|
||||||
horizontally. In this case the reverse-proxy should be configured to
|
|
||||||
load-balance across the instances, though it will be more efficient if all
|
|
||||||
requests from a particular user are routed to a single instance. Extracting
|
|
||||||
a userid from the access token is currently left as an exercise for the reader.
|
|
||||||
|
|
||||||
``synapse.app.appservice``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Handles sending output traffic to Application Services. Doesn't handle any
|
|
||||||
REST endpoints itself, but you should set ``notify_appservices: False`` in the
|
|
||||||
shared configuration file to stop the main synapse sending these notifications.
|
|
||||||
|
|
||||||
Note this worker cannot be load-balanced: only one instance should be active.
|
|
||||||
|
|
||||||
``synapse.app.federation_reader``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Handles a subset of federation endpoints. In particular, it can handle REST
|
|
||||||
endpoints matching the following regular expressions::
|
|
||||||
|
|
||||||
^/_matrix/federation/v1/event/
|
|
||||||
^/_matrix/federation/v1/state/
|
|
||||||
^/_matrix/federation/v1/state_ids/
|
|
||||||
^/_matrix/federation/v1/backfill/
|
|
||||||
^/_matrix/federation/v1/get_missing_events/
|
|
||||||
^/_matrix/federation/v1/publicRooms
|
|
||||||
^/_matrix/federation/v1/query/
|
|
||||||
^/_matrix/federation/v1/make_join/
|
|
||||||
^/_matrix/federation/v1/make_leave/
|
|
||||||
^/_matrix/federation/v1/send_join/
|
|
||||||
^/_matrix/federation/v1/send_leave/
|
|
||||||
^/_matrix/federation/v1/invite/
|
|
||||||
^/_matrix/federation/v1/query_auth/
|
|
||||||
^/_matrix/federation/v1/event_auth/
|
|
||||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
|
||||||
^/_matrix/federation/v1/send/
|
|
||||||
|
|
||||||
The above endpoints should all be routed to the federation_reader worker by the
|
|
||||||
reverse-proxy configuration.
|
|
||||||
|
|
||||||
The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
|
|
||||||
instance.
|
|
||||||
|
|
||||||
``synapse.app.federation_sender``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Handles sending federation traffic to other servers. Doesn't handle any
|
|
||||||
REST endpoints itself, but you should set ``send_federation: False`` in the
|
|
||||||
shared configuration file to stop the main synapse sending this traffic.
|
|
||||||
|
|
||||||
Note this worker cannot be load-balanced: only one instance should be active.
|
|
||||||
|
|
||||||
``synapse.app.media_repository``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Handles the media repository. It can handle all endpoints starting with::
|
|
||||||
|
|
||||||
/_matrix/media/
|
|
||||||
|
|
||||||
You should also set ``enable_media_repo: False`` in the shared configuration
|
|
||||||
file to stop the main synapse running background jobs related to managing the
|
|
||||||
media repository.
|
|
||||||
|
|
||||||
Note this worker cannot be load-balanced: only one instance should be active.
|
|
||||||
|
|
||||||
``synapse.app.client_reader``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Handles client API endpoints. It can handle REST endpoints matching the
|
|
||||||
following regular expressions::
|
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
|
|
||||||
|
|
||||||
``synapse.app.user_dir``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Handles searches in the user directory. It can handle REST endpoints matching
|
|
||||||
the following regular expressions::
|
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
|
|
||||||
|
|
||||||
``synapse.app.frontend_proxy``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Proxies some frequently-requested client endpoints to add caching and remove
|
|
||||||
load from the main synapse. It can handle REST endpoints matching the following
|
|
||||||
regular expressions::
|
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/keys/upload
|
|
||||||
|
|
||||||
If ``use_presence`` is False in the homeserver config, it can also handle REST
|
|
||||||
endpoints matching the following regular expressions::
|
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/presence/[^/]+/status
|
|
||||||
|
|
||||||
This "stub" presence handler will pass through ``GET`` request but make the
|
|
||||||
``PUT`` effectively a no-op.
|
|
||||||
|
|
||||||
It will proxy any requests it cannot handle to the main synapse instance. It
|
|
||||||
must therefore be configured with the location of the main instance, via
|
|
||||||
the ``worker_main_http_uri`` setting in the frontend_proxy worker configuration
|
|
||||||
file. For example::
|
|
||||||
|
|
||||||
worker_main_http_uri: http://127.0.0.1:8008
|
|
||||||
|
|
||||||
|
|
||||||
``synapse.app.event_creator``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Handles some event creation. It can handle REST endpoints matching::
|
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/join/
|
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/profile/
|
|
||||||
|
|
||||||
It will create events locally and then send them on to the main synapse
|
|
||||||
instance to be persisted and handled.
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
#! /bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
cd "`dirname $0`/.."
|
|
||||||
|
|
||||||
TOX_DIR=$WORKSPACE/.tox
|
|
||||||
|
|
||||||
mkdir -p $TOX_DIR
|
|
||||||
|
|
||||||
if ! [ $TOX_DIR -ef .tox ]; then
|
|
||||||
ln -s "$TOX_DIR" .tox
|
|
||||||
fi
|
|
||||||
|
|
||||||
# set up the virtualenv
|
|
||||||
tox -e py27 --notest -v
|
|
||||||
|
|
||||||
TOX_BIN=$TOX_DIR/py27/bin
|
|
||||||
|
|
||||||
# cryptography 2.2 requires setuptools >= 18.5.
|
|
||||||
#
|
|
||||||
# older versions of virtualenv (?) give us a virtualenv with the same version
|
|
||||||
# of setuptools as is installed on the system python (and tox runs virtualenv
|
|
||||||
# under python3, so we get the version of setuptools that is installed on that).
|
|
||||||
#
|
|
||||||
# anyway, make sure that we have a recent enough setuptools.
|
|
||||||
$TOX_BIN/pip install 'setuptools>=18.5'
|
|
||||||
|
|
||||||
# we also need a semi-recent version of pip, because old ones fail to install
|
|
||||||
# the "enum34" dependency of cryptography.
|
|
||||||
$TOX_BIN/pip install 'pip>=10'
|
|
||||||
|
|
||||||
{ python synapse/python_dependencies.py
|
|
||||||
echo lxml
|
|
||||||
} | xargs $TOX_BIN/pip install
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
[tool.towncrier]
|
|
||||||
package = "synapse"
|
|
||||||
filename = "CHANGES.md"
|
|
||||||
directory = "changelog.d"
|
|
||||||
issue_format = "[\\#{issue}](https://github.com/matrix-org/synapse/issues/{issue})"
|
|
||||||
|
|
||||||
[[tool.towncrier.type]]
|
|
||||||
directory = "feature"
|
|
||||||
name = "Features"
|
|
||||||
showcontent = true
|
|
||||||
|
|
||||||
[[tool.towncrier.type]]
|
|
||||||
directory = "bugfix"
|
|
||||||
name = "Bugfixes"
|
|
||||||
showcontent = true
|
|
||||||
|
|
||||||
[[tool.towncrier.type]]
|
|
||||||
directory = "doc"
|
|
||||||
name = "Improved Documentation"
|
|
||||||
showcontent = true
|
|
||||||
|
|
||||||
[[tool.towncrier.type]]
|
|
||||||
directory = "removal"
|
|
||||||
name = "Deprecations and Removals"
|
|
||||||
showcontent = true
|
|
||||||
|
|
||||||
[[tool.towncrier.type]]
|
|
||||||
directory = "misc"
|
|
||||||
name = "Internal Changes"
|
|
||||||
showcontent = true
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
.header {
|
|
||||||
border-bottom: 4px solid #e4f7ed ! important;
|
|
||||||
}
|
|
||||||
|
|
||||||
.notif_link a, .footer a {
|
|
||||||
color: #76CFA6 ! important;
|
|
||||||
}
|
|
||||||
@@ -1,156 +0,0 @@
|
|||||||
body {
|
|
||||||
margin: 0px;
|
|
||||||
}
|
|
||||||
|
|
||||||
pre, code {
|
|
||||||
word-break: break-word;
|
|
||||||
white-space: pre-wrap;
|
|
||||||
}
|
|
||||||
|
|
||||||
#page {
|
|
||||||
font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
|
|
||||||
font-color: #454545;
|
|
||||||
font-size: 12pt;
|
|
||||||
width: 100%;
|
|
||||||
padding: 20px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#inner {
|
|
||||||
width: 640px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.header {
|
|
||||||
width: 100%;
|
|
||||||
height: 87px;
|
|
||||||
color: #454545;
|
|
||||||
border-bottom: 4px solid #e5e5e5;
|
|
||||||
}
|
|
||||||
|
|
||||||
.logo {
|
|
||||||
text-align: right;
|
|
||||||
margin-left: 20px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.salutation {
|
|
||||||
padding-top: 10px;
|
|
||||||
font-weight: bold;
|
|
||||||
}
|
|
||||||
|
|
||||||
.summarytext {
|
|
||||||
}
|
|
||||||
|
|
||||||
.room {
|
|
||||||
width: 100%;
|
|
||||||
color: #454545;
|
|
||||||
border-bottom: 1px solid #e5e5e5;
|
|
||||||
}
|
|
||||||
|
|
||||||
.room_header td {
|
|
||||||
padding-top: 38px;
|
|
||||||
padding-bottom: 10px;
|
|
||||||
border-bottom: 1px solid #e5e5e5;
|
|
||||||
}
|
|
||||||
|
|
||||||
.room_name {
|
|
||||||
vertical-align: middle;
|
|
||||||
font-size: 18px;
|
|
||||||
font-weight: bold;
|
|
||||||
}
|
|
||||||
|
|
||||||
.room_header h2 {
|
|
||||||
margin-top: 0px;
|
|
||||||
margin-left: 75px;
|
|
||||||
font-size: 20px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.room_avatar {
|
|
||||||
width: 56px;
|
|
||||||
line-height: 0px;
|
|
||||||
text-align: center;
|
|
||||||
vertical-align: middle;
|
|
||||||
}
|
|
||||||
|
|
||||||
.room_avatar img {
|
|
||||||
width: 48px;
|
|
||||||
height: 48px;
|
|
||||||
object-fit: cover;
|
|
||||||
border-radius: 24px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.notif {
|
|
||||||
border-bottom: 1px solid #e5e5e5;
|
|
||||||
margin-top: 16px;
|
|
||||||
padding-bottom: 16px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.historical_message .sender_avatar {
|
|
||||||
opacity: 0.3;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* spell out opacity and historical_message class names for Outlook aka Word */
|
|
||||||
.historical_message .sender_name {
|
|
||||||
color: #e3e3e3;
|
|
||||||
}
|
|
||||||
|
|
||||||
.historical_message .message_time {
|
|
||||||
color: #e3e3e3;
|
|
||||||
}
|
|
||||||
|
|
||||||
.historical_message .message_body {
|
|
||||||
color: #c7c7c7;
|
|
||||||
}
|
|
||||||
|
|
||||||
.historical_message td,
|
|
||||||
.message td {
|
|
||||||
padding-top: 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.sender_avatar {
|
|
||||||
width: 56px;
|
|
||||||
text-align: center;
|
|
||||||
vertical-align: top;
|
|
||||||
}
|
|
||||||
|
|
||||||
.sender_avatar img {
|
|
||||||
margin-top: -2px;
|
|
||||||
width: 32px;
|
|
||||||
height: 32px;
|
|
||||||
border-radius: 16px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.sender_name {
|
|
||||||
display: inline;
|
|
||||||
font-size: 13px;
|
|
||||||
color: #a2a2a2;
|
|
||||||
}
|
|
||||||
|
|
||||||
.message_time {
|
|
||||||
text-align: right;
|
|
||||||
width: 100px;
|
|
||||||
font-size: 11px;
|
|
||||||
color: #a2a2a2;
|
|
||||||
}
|
|
||||||
|
|
||||||
.message_body {
|
|
||||||
}
|
|
||||||
|
|
||||||
.notif_link td {
|
|
||||||
padding-top: 10px;
|
|
||||||
padding-bottom: 10px;
|
|
||||||
font-weight: bold;
|
|
||||||
}
|
|
||||||
|
|
||||||
.notif_link a, .footer a {
|
|
||||||
color: #454545;
|
|
||||||
text-decoration: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
.debug {
|
|
||||||
font-size: 10px;
|
|
||||||
color: #888;
|
|
||||||
}
|
|
||||||
|
|
||||||
.footer {
|
|
||||||
margin-top: 20px;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
{% for message in notif.messages %}
|
|
||||||
<tr class="{{ "historical_message" if message.is_historical else "message" }}">
|
|
||||||
<td class="sender_avatar">
|
|
||||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
|
||||||
{% if message.sender_avatar_url %}
|
|
||||||
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
|
|
||||||
{% else %}
|
|
||||||
{% if message.sender_hash % 3 == 0 %}
|
|
||||||
<img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
|
|
||||||
{% elif message.sender_hash % 3 == 1 %}
|
|
||||||
<img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
|
|
||||||
{% else %}
|
|
||||||
<img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td class="message_contents">
|
|
||||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
|
||||||
<div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
|
|
||||||
{% endif %}
|
|
||||||
<div class="message_body">
|
|
||||||
{% if message.msgtype == "m.text" %}
|
|
||||||
{{ message.body_text_html }}
|
|
||||||
{% elif message.msgtype == "m.emote" %}
|
|
||||||
{{ message.body_text_html }}
|
|
||||||
{% elif message.msgtype == "m.notice" %}
|
|
||||||
{{ message.body_text_html }}
|
|
||||||
{% elif message.msgtype == "m.image" %}
|
|
||||||
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
|
|
||||||
{% elif message.msgtype == "m.file" %}
|
|
||||||
<span class="filename">{{ message.body_text_plain }}</span>
|
|
||||||
{% endif %}
|
|
||||||
</div>
|
|
||||||
</td>
|
|
||||||
<td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
<tr class="notif_link">
|
|
||||||
<td></td>
|
|
||||||
<td>
|
|
||||||
<a href="{{ notif.link }}">View {{ room.title }}</a>
|
|
||||||
</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
{% for message in notif.messages %}
|
|
||||||
{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
|
|
||||||
{% if message.msgtype == "m.text" %}
|
|
||||||
{{ message.body_text_plain }}
|
|
||||||
{% elif message.msgtype == "m.emote" %}
|
|
||||||
{{ message.body_text_plain }}
|
|
||||||
{% elif message.msgtype == "m.notice" %}
|
|
||||||
{{ message.body_text_plain }}
|
|
||||||
{% elif message.msgtype == "m.image" %}
|
|
||||||
{{ message.body_text_plain }}
|
|
||||||
{% elif message.msgtype == "m.file" %}
|
|
||||||
{{ message.body_text_plain }}
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
View {{ room.title }} at {{ notif.link }}
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<style type="text/css">
|
|
||||||
{% include 'mail.css' without context %}
|
|
||||||
{% include "mail-%s.css" % app_name ignore missing without context %}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<table id="page">
|
|
||||||
<tr>
|
|
||||||
<td> </td>
|
|
||||||
<td id="inner">
|
|
||||||
<table class="header">
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<div class="salutation">Hi {{ user_display_name }},</div>
|
|
||||||
<div class="summarytext">{{ summary_text }}</div>
|
|
||||||
</td>
|
|
||||||
<td class="logo">
|
|
||||||
{% if app_name == "Riot" %}
|
|
||||||
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
|
||||||
{% elif app_name == "Vector" %}
|
|
||||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
|
||||||
{% else %}
|
|
||||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
{% for room in rooms %}
|
|
||||||
{% include 'room.html' with context %}
|
|
||||||
{% endfor %}
|
|
||||||
<div class="footer">
|
|
||||||
<a href="{{ unsubscribe_link }}">Unsubscribe</a>
|
|
||||||
<br/>
|
|
||||||
<br/>
|
|
||||||
<div class="debug">
|
|
||||||
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
|
|
||||||
an event was received at {{ reason.received_at|format_ts("%c") }}
|
|
||||||
which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
|
|
||||||
{% if reason.last_sent_ts %}
|
|
||||||
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
|
|
||||||
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
|
|
||||||
{% else %}
|
|
||||||
and we don't have a last time we sent a mail for this room.
|
|
||||||
{% endif %}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</td>
|
|
||||||
<td> </td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
Hi {{ user_display_name }},
|
|
||||||
|
|
||||||
{{ summary_text }}
|
|
||||||
|
|
||||||
{% for room in rooms %}
|
|
||||||
{% include 'room.txt' with context %}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
You can disable these notifications at {{ unsubscribe_link }}
|
|
||||||
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
<table class="room">
|
|
||||||
<tr class="room_header">
|
|
||||||
<td class="room_avatar">
|
|
||||||
{% if room.avatar_url %}
|
|
||||||
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
|
|
||||||
{% else %}
|
|
||||||
{% if room.hash % 3 == 0 %}
|
|
||||||
<img alt="" src="https://vector.im/beta/img/76cfa6.png" />
|
|
||||||
{% elif room.hash % 3 == 1 %}
|
|
||||||
<img alt="" src="https://vector.im/beta/img/50e2c2.png" />
|
|
||||||
{% else %}
|
|
||||||
<img alt="" src="https://vector.im/beta/img/f4c371.png" />
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td class="room_name" colspan="2">
|
|
||||||
{{ room.title }}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% if room.invite %}
|
|
||||||
<tr>
|
|
||||||
<td></td>
|
|
||||||
<td>
|
|
||||||
<a href="{{ room.link }}">Join the conversation.</a>
|
|
||||||
</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
{% else %}
|
|
||||||
{% for notif in room.notifs %}
|
|
||||||
{% include 'notif.html' with context %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
</table>
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
{{ room.title }}
|
|
||||||
|
|
||||||
{% if room.invite %}
|
|
||||||
You've been invited, join at {{ room.link }}
|
|
||||||
{% else %}
|
|
||||||
{% for notif in room.notifs %}
|
|
||||||
{% include 'notif.txt' with context %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,116 +0,0 @@
|
|||||||
import psycopg2
|
|
||||||
import yaml
|
|
||||||
import sys
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
import hashlib
|
|
||||||
from unpaddedbase64 import encode_base64
|
|
||||||
from signedjson.key import read_signing_keys
|
|
||||||
from signedjson.sign import sign_json
|
|
||||||
from canonicaljson import encode_canonical_json
|
|
||||||
|
|
||||||
|
|
||||||
def select_v1_keys(connection):
|
|
||||||
cursor = connection.cursor()
|
|
||||||
cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
|
|
||||||
rows = cursor.fetchall()
|
|
||||||
cursor.close()
|
|
||||||
results = {}
|
|
||||||
for server_name, key_id, verify_key in rows:
|
|
||||||
results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def select_v1_certs(connection):
|
|
||||||
cursor = connection.cursor()
|
|
||||||
cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
|
|
||||||
rows = cursor.fetchall()
|
|
||||||
cursor.close()
|
|
||||||
results = {}
|
|
||||||
for server_name, tls_certificate in rows:
|
|
||||||
results[server_name] = tls_certificate
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def select_v2_json(connection):
|
|
||||||
cursor = connection.cursor()
|
|
||||||
cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
|
|
||||||
rows = cursor.fetchall()
|
|
||||||
cursor.close()
|
|
||||||
results = {}
|
|
||||||
for server_name, key_id, key_json in rows:
|
|
||||||
results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
|
||||||
return {
|
|
||||||
"old_verify_keys": {},
|
|
||||||
"server_name": server_name,
|
|
||||||
"verify_keys": {
|
|
||||||
key_id: {"key": key}
|
|
||||||
for key_id, key in keys.items()
|
|
||||||
},
|
|
||||||
"valid_until_ts": valid_until,
|
|
||||||
"tls_fingerprints": [fingerprint(certificate)],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def fingerprint(certificate):
|
|
||||||
finger = hashlib.sha256(certificate)
|
|
||||||
return {"sha256": encode_base64(finger.digest())}
|
|
||||||
|
|
||||||
|
|
||||||
def rows_v2(server, json):
|
|
||||||
valid_until = json["valid_until_ts"]
|
|
||||||
key_json = encode_canonical_json(json)
|
|
||||||
for key_id in json["verify_keys"]:
|
|
||||||
yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
config = yaml.load(open(sys.argv[1]))
|
|
||||||
valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
|
|
||||||
|
|
||||||
server_name = config["server_name"]
|
|
||||||
signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
|
|
||||||
|
|
||||||
database = config["database"]
|
|
||||||
assert database["name"] == "psycopg2", "Can only convert for postgresql"
|
|
||||||
args = database["args"]
|
|
||||||
args.pop("cp_max")
|
|
||||||
args.pop("cp_min")
|
|
||||||
connection = psycopg2.connect(**args)
|
|
||||||
keys = select_v1_keys(connection)
|
|
||||||
certificates = select_v1_certs(connection)
|
|
||||||
json = select_v2_json(connection)
|
|
||||||
|
|
||||||
result = {}
|
|
||||||
for server in keys:
|
|
||||||
if not server in json:
|
|
||||||
v2_json = convert_v1_to_v2(
|
|
||||||
server, valid_until, keys[server], certificates[server]
|
|
||||||
)
|
|
||||||
v2_json = sign_json(v2_json, server_name, signing_key)
|
|
||||||
result[server] = v2_json
|
|
||||||
|
|
||||||
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
|
||||||
|
|
||||||
rows = list(
|
|
||||||
row for server, json in result.items()
|
|
||||||
for row in rows_v2(server, json)
|
|
||||||
)
|
|
||||||
|
|
||||||
cursor = connection.cursor()
|
|
||||||
cursor.executemany(
|
|
||||||
"INSERT INTO server_keys_json ("
|
|
||||||
" server_name, key_id, from_server,"
|
|
||||||
" ts_added_ms, ts_valid_until_ms, key_json"
|
|
||||||
") VALUES (%s, %s, %s, %s, %s, %s)",
|
|
||||||
rows
|
|
||||||
)
|
|
||||||
connection.commit()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@@ -1,196 +0,0 @@
|
|||||||
#! /usr/bin/python
|
|
||||||
|
|
||||||
import ast
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
class DefinitionVisitor(ast.NodeVisitor):
|
|
||||||
def __init__(self):
|
|
||||||
super(DefinitionVisitor, self).__init__()
|
|
||||||
self.functions = {}
|
|
||||||
self.classes = {}
|
|
||||||
self.names = {}
|
|
||||||
self.attrs = set()
|
|
||||||
self.definitions = {
|
|
||||||
'def': self.functions,
|
|
||||||
'class': self.classes,
|
|
||||||
'names': self.names,
|
|
||||||
'attrs': self.attrs,
|
|
||||||
}
|
|
||||||
|
|
||||||
def visit_Name(self, node):
|
|
||||||
self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
|
|
||||||
|
|
||||||
def visit_Attribute(self, node):
|
|
||||||
self.attrs.add(node.attr)
|
|
||||||
for child in ast.iter_child_nodes(node):
|
|
||||||
self.visit(child)
|
|
||||||
|
|
||||||
def visit_ClassDef(self, node):
|
|
||||||
visitor = DefinitionVisitor()
|
|
||||||
self.classes[node.name] = visitor.definitions
|
|
||||||
for child in ast.iter_child_nodes(node):
|
|
||||||
visitor.visit(child)
|
|
||||||
|
|
||||||
def visit_FunctionDef(self, node):
|
|
||||||
visitor = DefinitionVisitor()
|
|
||||||
self.functions[node.name] = visitor.definitions
|
|
||||||
for child in ast.iter_child_nodes(node):
|
|
||||||
visitor.visit(child)
|
|
||||||
|
|
||||||
|
|
||||||
def non_empty(defs):
|
|
||||||
functions = {name: non_empty(f) for name, f in defs['def'].items()}
|
|
||||||
classes = {name: non_empty(f) for name, f in defs['class'].items()}
|
|
||||||
result = {}
|
|
||||||
if functions: result['def'] = functions
|
|
||||||
if classes: result['class'] = classes
|
|
||||||
names = defs['names']
|
|
||||||
uses = []
|
|
||||||
for name in names.get('Load', ()):
|
|
||||||
if name not in names.get('Param', ()) and name not in names.get('Store', ()):
|
|
||||||
uses.append(name)
|
|
||||||
uses.extend(defs['attrs'])
|
|
||||||
if uses: result['uses'] = uses
|
|
||||||
result['names'] = names
|
|
||||||
result['attrs'] = defs['attrs']
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def definitions_in_code(input_code):
|
|
||||||
input_ast = ast.parse(input_code)
|
|
||||||
visitor = DefinitionVisitor()
|
|
||||||
visitor.visit(input_ast)
|
|
||||||
definitions = non_empty(visitor.definitions)
|
|
||||||
return definitions
|
|
||||||
|
|
||||||
|
|
||||||
def definitions_in_file(filepath):
|
|
||||||
with open(filepath) as f:
|
|
||||||
return definitions_in_code(f.read())
|
|
||||||
|
|
||||||
|
|
||||||
def defined_names(prefix, defs, names):
|
|
||||||
for name, funcs in defs.get('def', {}).items():
|
|
||||||
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
|
|
||||||
defined_names(prefix + name + ".", funcs, names)
|
|
||||||
|
|
||||||
for name, funcs in defs.get('class', {}).items():
|
|
||||||
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
|
|
||||||
defined_names(prefix + name + ".", funcs, names)
|
|
||||||
|
|
||||||
|
|
||||||
def used_names(prefix, item, defs, names):
|
|
||||||
for name, funcs in defs.get('def', {}).items():
|
|
||||||
used_names(prefix + name + ".", name, funcs, names)
|
|
||||||
|
|
||||||
for name, funcs in defs.get('class', {}).items():
|
|
||||||
used_names(prefix + name + ".", name, funcs, names)
|
|
||||||
|
|
||||||
path = prefix.rstrip('.')
|
|
||||||
for used in defs.get('uses', ()):
|
|
||||||
if used in names:
|
|
||||||
if item:
|
|
||||||
names[item].setdefault('uses', []).append(used)
|
|
||||||
names[used].setdefault('used', {}).setdefault(item, []).append(path)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
import sys, os, argparse, re
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Find definitions.')
|
|
||||||
parser.add_argument(
|
|
||||||
"--unused", action="store_true", help="Only list unused definitions"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--pattern", action="append", metavar="REGEXP",
|
|
||||||
help="Search for a pattern"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"directories", nargs='+', metavar="DIR",
|
|
||||||
help="Directories to search for definitions"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--referrers", default=0, type=int,
|
|
||||||
help="Include referrers up to the given depth"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--referred", default=0, type=int,
|
|
||||||
help="Include referred down to the given depth"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--format", default="yaml",
|
|
||||||
help="Output format, one of 'yaml' or 'dot'"
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
definitions = {}
|
|
||||||
for directory in args.directories:
|
|
||||||
for root, dirs, files in os.walk(directory):
|
|
||||||
for filename in files:
|
|
||||||
if filename.endswith(".py"):
|
|
||||||
filepath = os.path.join(root, filename)
|
|
||||||
definitions[filepath] = definitions_in_file(filepath)
|
|
||||||
|
|
||||||
names = {}
|
|
||||||
for filepath, defs in definitions.items():
|
|
||||||
defined_names(filepath + ":", defs, names)
|
|
||||||
|
|
||||||
for filepath, defs in definitions.items():
|
|
||||||
used_names(filepath + ":", None, defs, names)
|
|
||||||
|
|
||||||
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
|
|
||||||
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
|
|
||||||
|
|
||||||
result = {}
|
|
||||||
for name, definition in names.items():
|
|
||||||
if patterns and not any(pattern.match(name) for pattern in patterns):
|
|
||||||
continue
|
|
||||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
|
||||||
continue
|
|
||||||
if args.unused and definition.get('used'):
|
|
||||||
continue
|
|
||||||
result[name] = definition
|
|
||||||
|
|
||||||
referrer_depth = args.referrers
|
|
||||||
referrers = set()
|
|
||||||
while referrer_depth:
|
|
||||||
referrer_depth -= 1
|
|
||||||
for entry in result.values():
|
|
||||||
for used_by in entry.get("used", ()):
|
|
||||||
referrers.add(used_by)
|
|
||||||
for name, definition in names.items():
|
|
||||||
if not name in referrers:
|
|
||||||
continue
|
|
||||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
|
||||||
continue
|
|
||||||
result[name] = definition
|
|
||||||
|
|
||||||
referred_depth = args.referred
|
|
||||||
referred = set()
|
|
||||||
while referred_depth:
|
|
||||||
referred_depth -= 1
|
|
||||||
for entry in result.values():
|
|
||||||
for uses in entry.get("uses", ()):
|
|
||||||
referred.add(uses)
|
|
||||||
for name, definition in names.items():
|
|
||||||
if not name in referred:
|
|
||||||
continue
|
|
||||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
|
||||||
continue
|
|
||||||
result[name] = definition
|
|
||||||
|
|
||||||
if args.format == 'yaml':
|
|
||||||
yaml.dump(result, sys.stdout, default_flow_style=False)
|
|
||||||
elif args.format == 'dot':
|
|
||||||
print "digraph {"
|
|
||||||
for name, entry in result.items():
|
|
||||||
print name
|
|
||||||
for used_by in entry.get("used", ()):
|
|
||||||
if used_by in result:
|
|
||||||
print used_by, "->", name
|
|
||||||
print "}"
|
|
||||||
else:
|
|
||||||
raise ValueError("Unknown format %r" % (args.format))
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
|
|
||||||
import pymacaroons
|
|
||||||
import sys
|
|
||||||
|
|
||||||
if len(sys.argv) == 1:
|
|
||||||
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
macaroon_string = sys.argv[1]
|
|
||||||
key = sys.argv[2] if len(sys.argv) > 2 else None
|
|
||||||
|
|
||||||
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
|
|
||||||
print macaroon.inspect()
|
|
||||||
|
|
||||||
print ""
|
|
||||||
|
|
||||||
verifier = pymacaroons.Verifier()
|
|
||||||
verifier.satisfy_general(lambda c: True)
|
|
||||||
try:
|
|
||||||
verifier.verify(macaroon, key)
|
|
||||||
print "Signature is correct"
|
|
||||||
except Exception as e:
|
|
||||||
print str(e)
|
|
||||||
@@ -1,283 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
#
|
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
|
||||||
# Copyright 2017 New Vector Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
from urlparse import urlparse, urlunparse
|
|
||||||
|
|
||||||
import nacl.signing
|
|
||||||
import json
|
|
||||||
import base64
|
|
||||||
import requests
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from requests.adapters import HTTPAdapter
|
|
||||||
import srvlookup
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
# uncomment the following to enable debug logging of http requests
|
|
||||||
#from httplib import HTTPConnection
|
|
||||||
#HTTPConnection.debuglevel = 1
|
|
||||||
|
|
||||||
def encode_base64(input_bytes):
|
|
||||||
"""Encode bytes as a base64 string without any padding."""
|
|
||||||
|
|
||||||
input_len = len(input_bytes)
|
|
||||||
output_len = 4 * ((input_len + 2) // 3) + (input_len + 2) % 3 - 2
|
|
||||||
output_bytes = base64.b64encode(input_bytes)
|
|
||||||
output_string = output_bytes[:output_len].decode("ascii")
|
|
||||||
return output_string
|
|
||||||
|
|
||||||
|
|
||||||
def decode_base64(input_string):
|
|
||||||
"""Decode a base64 string to bytes inferring padding from the length of the
|
|
||||||
string."""
|
|
||||||
|
|
||||||
input_bytes = input_string.encode("ascii")
|
|
||||||
input_len = len(input_bytes)
|
|
||||||
padding = b"=" * (3 - ((input_len + 3) % 4))
|
|
||||||
output_len = 3 * ((input_len + 2) // 4) + (input_len + 2) % 4 - 2
|
|
||||||
output_bytes = base64.b64decode(input_bytes + padding)
|
|
||||||
return output_bytes[:output_len]
|
|
||||||
|
|
||||||
|
|
||||||
def encode_canonical_json(value):
|
|
||||||
return json.dumps(
|
|
||||||
value,
|
|
||||||
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes
|
|
||||||
ensure_ascii=False,
|
|
||||||
# Remove unecessary white space.
|
|
||||||
separators=(',',':'),
|
|
||||||
# Sort the keys of dictionaries.
|
|
||||||
sort_keys=True,
|
|
||||||
# Encode the resulting unicode as UTF-8 bytes.
|
|
||||||
).encode("UTF-8")
|
|
||||||
|
|
||||||
|
|
||||||
def sign_json(json_object, signing_key, signing_name):
|
|
||||||
signatures = json_object.pop("signatures", {})
|
|
||||||
unsigned = json_object.pop("unsigned", None)
|
|
||||||
|
|
||||||
signed = signing_key.sign(encode_canonical_json(json_object))
|
|
||||||
signature_base64 = encode_base64(signed.signature)
|
|
||||||
|
|
||||||
key_id = "%s:%s" % (signing_key.alg, signing_key.version)
|
|
||||||
signatures.setdefault(signing_name, {})[key_id] = signature_base64
|
|
||||||
|
|
||||||
json_object["signatures"] = signatures
|
|
||||||
if unsigned is not None:
|
|
||||||
json_object["unsigned"] = unsigned
|
|
||||||
|
|
||||||
return json_object
|
|
||||||
|
|
||||||
|
|
||||||
NACL_ED25519 = "ed25519"
|
|
||||||
|
|
||||||
def decode_signing_key_base64(algorithm, version, key_base64):
|
|
||||||
"""Decode a base64 encoded signing key
|
|
||||||
Args:
|
|
||||||
algorithm (str): The algorithm the key is for (currently "ed25519").
|
|
||||||
version (str): Identifies this key out of the keys for this entity.
|
|
||||||
key_base64 (str): Base64 encoded bytes of the key.
|
|
||||||
Returns:
|
|
||||||
A SigningKey object.
|
|
||||||
"""
|
|
||||||
if algorithm == NACL_ED25519:
|
|
||||||
key_bytes = decode_base64(key_base64)
|
|
||||||
key = nacl.signing.SigningKey(key_bytes)
|
|
||||||
key.version = version
|
|
||||||
key.alg = NACL_ED25519
|
|
||||||
return key
|
|
||||||
else:
|
|
||||||
raise ValueError("Unsupported algorithm %s" % (algorithm,))
|
|
||||||
|
|
||||||
|
|
||||||
def read_signing_keys(stream):
|
|
||||||
"""Reads a list of keys from a stream
|
|
||||||
Args:
|
|
||||||
stream : A stream to iterate for keys.
|
|
||||||
Returns:
|
|
||||||
list of SigningKey objects.
|
|
||||||
"""
|
|
||||||
keys = []
|
|
||||||
for line in stream:
|
|
||||||
algorithm, version, key_base64 = line.split()
|
|
||||||
keys.append(decode_signing_key_base64(algorithm, version, key_base64))
|
|
||||||
return keys
|
|
||||||
|
|
||||||
|
|
||||||
def request_json(method, origin_name, origin_key, destination, path, content):
|
|
||||||
if method is None:
|
|
||||||
if content is None:
|
|
||||||
method = "GET"
|
|
||||||
else:
|
|
||||||
method = "POST"
|
|
||||||
|
|
||||||
json_to_sign = {
|
|
||||||
"method": method,
|
|
||||||
"uri": path,
|
|
||||||
"origin": origin_name,
|
|
||||||
"destination": destination,
|
|
||||||
}
|
|
||||||
|
|
||||||
if content is not None:
|
|
||||||
json_to_sign["content"] = json.loads(content)
|
|
||||||
|
|
||||||
signed_json = sign_json(json_to_sign, origin_key, origin_name)
|
|
||||||
|
|
||||||
authorization_headers = []
|
|
||||||
|
|
||||||
for key, sig in signed_json["signatures"][origin_name].items():
|
|
||||||
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
|
||||||
origin_name, key, sig,
|
|
||||||
)
|
|
||||||
authorization_headers.append(bytes(header))
|
|
||||||
print ("Authorization: %s" % header, file=sys.stderr)
|
|
||||||
|
|
||||||
dest = "matrix://%s%s" % (destination, path)
|
|
||||||
print ("Requesting %s" % dest, file=sys.stderr)
|
|
||||||
|
|
||||||
s = requests.Session()
|
|
||||||
s.mount("matrix://", MatrixConnectionAdapter())
|
|
||||||
|
|
||||||
result = s.request(
|
|
||||||
method=method,
|
|
||||||
url=dest,
|
|
||||||
headers={
|
|
||||||
"Host": destination,
|
|
||||||
"Authorization": authorization_headers[0]
|
|
||||||
},
|
|
||||||
verify=False,
|
|
||||||
data=content,
|
|
||||||
)
|
|
||||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
|
||||||
return result.json()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description=
|
|
||||||
"Signs and sends a federation request to a matrix homeserver",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"-N", "--server-name",
|
|
||||||
help="Name to give as the local homeserver. If unspecified, will be "
|
|
||||||
"read from the config file.",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"-k", "--signing-key-path",
|
|
||||||
help="Path to the file containing the private ed25519 key to sign the "
|
|
||||||
"request with.",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"-c", "--config",
|
|
||||||
default="homeserver.yaml",
|
|
||||||
help="Path to server config file. Ignored if --server-name and "
|
|
||||||
"--signing-key-path are both given.",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"-d", "--destination",
|
|
||||||
default="matrix.org",
|
|
||||||
help="name of the remote homeserver. We will do SRV lookups and "
|
|
||||||
"connect appropriately.",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"-X", "--method",
|
|
||||||
help="HTTP method to use for the request. Defaults to GET if --data is"
|
|
||||||
"unspecified, POST if it is."
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--body",
|
|
||||||
help="Data to send as the body of the HTTP request"
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"path",
|
|
||||||
help="request path. We will add '/_matrix/federation/v1/' to this."
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if not args.server_name or not args.signing_key_path:
|
|
||||||
read_args_from_config(args)
|
|
||||||
|
|
||||||
with open(args.signing_key_path) as f:
|
|
||||||
key = read_signing_keys(f)[0]
|
|
||||||
|
|
||||||
result = request_json(
|
|
||||||
args.method,
|
|
||||||
args.server_name, key, args.destination,
|
|
||||||
"/_matrix/federation/v1/" + args.path,
|
|
||||||
content=args.body,
|
|
||||||
)
|
|
||||||
|
|
||||||
json.dump(result, sys.stdout)
|
|
||||||
print ("")
|
|
||||||
|
|
||||||
|
|
||||||
def read_args_from_config(args):
|
|
||||||
with open(args.config, 'r') as fh:
|
|
||||||
config = yaml.safe_load(fh)
|
|
||||||
if not args.server_name:
|
|
||||||
args.server_name = config['server_name']
|
|
||||||
if not args.signing_key_path:
|
|
||||||
args.signing_key_path = config['signing_key_path']
|
|
||||||
|
|
||||||
|
|
||||||
class MatrixConnectionAdapter(HTTPAdapter):
|
|
||||||
@staticmethod
|
|
||||||
def lookup(s):
|
|
||||||
if s[-1] == ']':
|
|
||||||
# ipv6 literal (with no port)
|
|
||||||
return s, 8448
|
|
||||||
|
|
||||||
if ":" in s:
|
|
||||||
out = s.rsplit(":",1)
|
|
||||||
try:
|
|
||||||
port = int(out[1])
|
|
||||||
except ValueError:
|
|
||||||
raise ValueError("Invalid host:port '%s'" % s)
|
|
||||||
return out[0], port
|
|
||||||
|
|
||||||
try:
|
|
||||||
srv = srvlookup.lookup("matrix", "tcp", s)[0]
|
|
||||||
return srv.host, srv.port
|
|
||||||
except:
|
|
||||||
return s, 8448
|
|
||||||
|
|
||||||
def get_connection(self, url, proxies=None):
|
|
||||||
parsed = urlparse(url)
|
|
||||||
|
|
||||||
(host, port) = self.lookup(parsed.netloc)
|
|
||||||
netloc = "%s:%d" % (host, port)
|
|
||||||
print("Connecting to %s" % (netloc,), file=sys.stderr)
|
|
||||||
url = urlunparse((
|
|
||||||
"https", netloc, parsed.path, parsed.params, parsed.query,
|
|
||||||
parsed.fragment,
|
|
||||||
))
|
|
||||||
return super(MatrixConnectionAdapter, self).get_connection(url, proxies)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
#! /usr/bin/python
|
|
||||||
|
|
||||||
import ast
|
|
||||||
import argparse
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
PATTERNS_V1 = []
|
|
||||||
PATTERNS_V2 = []
|
|
||||||
|
|
||||||
RESULT = {
|
|
||||||
"v1": PATTERNS_V1,
|
|
||||||
"v2": PATTERNS_V2,
|
|
||||||
}
|
|
||||||
|
|
||||||
class CallVisitor(ast.NodeVisitor):
|
|
||||||
def visit_Call(self, node):
|
|
||||||
if isinstance(node.func, ast.Name):
|
|
||||||
name = node.func.id
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
if name == "client_path_patterns":
|
|
||||||
PATTERNS_V1.append(node.args[0].s)
|
|
||||||
elif name == "client_v2_patterns":
|
|
||||||
PATTERNS_V2.append(node.args[0].s)
|
|
||||||
|
|
||||||
|
|
||||||
def find_patterns_in_code(input_code):
|
|
||||||
input_ast = ast.parse(input_code)
|
|
||||||
visitor = CallVisitor()
|
|
||||||
visitor.visit(input_ast)
|
|
||||||
|
|
||||||
|
|
||||||
def find_patterns_in_file(filepath):
|
|
||||||
with open(filepath) as f:
|
|
||||||
find_patterns_in_code(f.read())
|
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Find url patterns.')
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"directories", nargs='+', metavar="DIR",
|
|
||||||
help="Directories to search for definitions"
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
for directory in args.directories:
|
|
||||||
for root, dirs, files in os.walk(directory):
|
|
||||||
for filename in files:
|
|
||||||
if filename.endswith(".py"):
|
|
||||||
filepath = os.path.join(root, filename)
|
|
||||||
find_patterns_in_file(filepath)
|
|
||||||
|
|
||||||
PATTERNS_V1.sort()
|
|
||||||
PATTERNS_V2.sort()
|
|
||||||
|
|
||||||
yaml.dump(RESULT, sys.stdout, default_flow_style=False)
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Fetch the current GitHub issue number, add one to it -- presto! The likely
|
|
||||||
# next PR number.
|
|
||||||
CURRENT_NUMBER=`curl -s "https://api.github.com/repos/matrix-org/synapse/issues?state=all&per_page=1" | jq -r ".[0].number"`
|
|
||||||
CURRENT_NUMBER=$((CURRENT_NUMBER+1))
|
|
||||||
echo $CURRENT_NUMBER
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
import requests
|
|
||||||
import collections
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import json
|
|
||||||
|
|
||||||
Entry = collections.namedtuple("Entry", "name position rows")
|
|
||||||
|
|
||||||
ROW_TYPES = {}
|
|
||||||
|
|
||||||
|
|
||||||
def row_type_for_columns(name, column_names):
|
|
||||||
column_names = tuple(column_names)
|
|
||||||
row_type = ROW_TYPES.get((name, column_names))
|
|
||||||
if row_type is None:
|
|
||||||
row_type = collections.namedtuple(name, column_names)
|
|
||||||
ROW_TYPES[(name, column_names)] = row_type
|
|
||||||
return row_type
|
|
||||||
|
|
||||||
|
|
||||||
def parse_response(content):
|
|
||||||
streams = json.loads(content)
|
|
||||||
result = {}
|
|
||||||
for name, value in streams.items():
|
|
||||||
row_type = row_type_for_columns(name, value["field_names"])
|
|
||||||
position = value["position"]
|
|
||||||
rows = [row_type(*row) for row in value["rows"]]
|
|
||||||
result[name] = Entry(name, position, rows)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def replicate(server, streams):
|
|
||||||
return parse_response(requests.get(
|
|
||||||
server + "/_synapse/replication",
|
|
||||||
verify=False,
|
|
||||||
params=streams
|
|
||||||
).content)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
server = sys.argv[1]
|
|
||||||
|
|
||||||
streams = None
|
|
||||||
while not streams:
|
|
||||||
try:
|
|
||||||
streams = {
|
|
||||||
row.name: row.position
|
|
||||||
for row in replicate(server, {"streams":"-1"})["streams"].rows
|
|
||||||
}
|
|
||||||
except requests.exceptions.ConnectionError as e:
|
|
||||||
time.sleep(0.1)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
results = replicate(server, streams)
|
|
||||||
except:
|
|
||||||
sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
|
|
||||||
break
|
|
||||||
for update in results.values():
|
|
||||||
for row in update.rows:
|
|
||||||
sys.stdout.write(repr(row) + "\n")
|
|
||||||
streams[update.name] = update.position
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__=='__main__':
|
|
||||||
main()
|
|
||||||
@@ -56,9 +56,10 @@ if __name__ == '__main__':
|
|||||||
|
|
||||||
js = json.load(args.json)
|
js = json.load(args.json)
|
||||||
|
|
||||||
|
|
||||||
auth = Auth(Mock())
|
auth = Auth(Mock())
|
||||||
check_auth(
|
check_auth(
|
||||||
auth,
|
auth,
|
||||||
[FrozenEvent(d) for d in js["auth_chain"]],
|
[FrozenEvent(d) for d in js["auth_chain"]],
|
||||||
[FrozenEvent(d) for d in js.get("pdus", [])],
|
[FrozenEvent(d) for d in js["pdus"]],
|
||||||
)
|
)
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
from synapse.crypto.event_signing import *
|
from synapse.crypto.event_signing import *
|
||||||
from unpaddedbase64 import encode_base64
|
from syutil.base64util import encode_base64
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import hashlib
|
import hashlib
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
|
|
||||||
from signedjson.sign import verify_signed_json
|
from syutil.crypto.jsonsign import verify_signed_json
|
||||||
from signedjson.key import decode_verify_key_bytes, write_signing_keys
|
from syutil.crypto.signing_key import (
|
||||||
from unpaddedbase64 import decode_base64
|
decode_verify_key_bytes, write_signing_keys
|
||||||
|
)
|
||||||
|
from syutil.base64util import decode_base64
|
||||||
|
|
||||||
import urllib2
|
import urllib2
|
||||||
import json
|
import json
|
||||||
33
scripts/copyrighter-sql.pl
Executable file
33
scripts/copyrighter-sql.pl
Executable file
@@ -0,0 +1,33 @@
|
|||||||
|
#!/usr/bin/perl -pi
|
||||||
|
# Copyright 2015 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
$copyright = <<EOT;
|
||||||
|
/* Copyright 2015 OpenMarket Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
EOT
|
||||||
|
|
||||||
|
s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
|
||||||
33
scripts/copyrighter.pl
Executable file
33
scripts/copyrighter.pl
Executable file
@@ -0,0 +1,33 @@
|
|||||||
|
#!/usr/bin/perl -pi
|
||||||
|
# Copyright 2014 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
$copyright = <<EOT;
|
||||||
|
# Copyright 2015 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
EOT
|
||||||
|
|
||||||
|
s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
|
||||||
21
scripts/database-prepare-for-0.0.1.sh
Executable file
21
scripts/database-prepare-for-0.0.1.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This is will prepare a synapse database for running with v0.0.1 of synapse.
|
||||||
|
# It will store all the user information, but will *delete* all messages and
|
||||||
|
# room data.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cp "$1" "$1.bak"
|
||||||
|
|
||||||
|
DUMP=$(sqlite3 "$1" << 'EOF'
|
||||||
|
.dump users
|
||||||
|
.dump access_tokens
|
||||||
|
.dump presence
|
||||||
|
.dump profiles
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
rm "$1"
|
||||||
|
|
||||||
|
sqlite3 "$1" <<< "$DUMP"
|
||||||
21
scripts/database-prepare-for-0.5.0.sh
Executable file
21
scripts/database-prepare-for-0.5.0.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This is will prepare a synapse database for running with v0.5.0 of synapse.
|
||||||
|
# It will store all the user information, but will *delete* all messages and
|
||||||
|
# room data.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cp "$1" "$1.bak"
|
||||||
|
|
||||||
|
DUMP=$(sqlite3 "$1" << 'EOF'
|
||||||
|
.dump users
|
||||||
|
.dump access_tokens
|
||||||
|
.dump presence
|
||||||
|
.dump profiles
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
rm "$1"
|
||||||
|
|
||||||
|
sqlite3 "$1" <<< "$DUMP"
|
||||||
146
scripts/federation_client.py
Normal file
146
scripts/federation_client.py
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
import nacl.signing
|
||||||
|
import json
|
||||||
|
import base64
|
||||||
|
import requests
|
||||||
|
import sys
|
||||||
|
import srvlookup
|
||||||
|
|
||||||
|
|
||||||
|
def encode_base64(input_bytes):
|
||||||
|
"""Encode bytes as a base64 string without any padding."""
|
||||||
|
|
||||||
|
input_len = len(input_bytes)
|
||||||
|
output_len = 4 * ((input_len + 2) // 3) + (input_len + 2) % 3 - 2
|
||||||
|
output_bytes = base64.b64encode(input_bytes)
|
||||||
|
output_string = output_bytes[:output_len].decode("ascii")
|
||||||
|
return output_string
|
||||||
|
|
||||||
|
|
||||||
|
def decode_base64(input_string):
|
||||||
|
"""Decode a base64 string to bytes inferring padding from the length of the
|
||||||
|
string."""
|
||||||
|
|
||||||
|
input_bytes = input_string.encode("ascii")
|
||||||
|
input_len = len(input_bytes)
|
||||||
|
padding = b"=" * (3 - ((input_len + 3) % 4))
|
||||||
|
output_len = 3 * ((input_len + 2) // 4) + (input_len + 2) % 4 - 2
|
||||||
|
output_bytes = base64.b64decode(input_bytes + padding)
|
||||||
|
return output_bytes[:output_len]
|
||||||
|
|
||||||
|
|
||||||
|
def encode_canonical_json(value):
|
||||||
|
return json.dumps(
|
||||||
|
value,
|
||||||
|
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes
|
||||||
|
ensure_ascii=False,
|
||||||
|
# Remove unecessary white space.
|
||||||
|
separators=(',',':'),
|
||||||
|
# Sort the keys of dictionaries.
|
||||||
|
sort_keys=True,
|
||||||
|
# Encode the resulting unicode as UTF-8 bytes.
|
||||||
|
).encode("UTF-8")
|
||||||
|
|
||||||
|
|
||||||
|
def sign_json(json_object, signing_key, signing_name):
|
||||||
|
signatures = json_object.pop("signatures", {})
|
||||||
|
unsigned = json_object.pop("unsigned", None)
|
||||||
|
|
||||||
|
signed = signing_key.sign(encode_canonical_json(json_object))
|
||||||
|
signature_base64 = encode_base64(signed.signature)
|
||||||
|
|
||||||
|
key_id = "%s:%s" % (signing_key.alg, signing_key.version)
|
||||||
|
signatures.setdefault(signing_name, {})[key_id] = signature_base64
|
||||||
|
|
||||||
|
json_object["signatures"] = signatures
|
||||||
|
if unsigned is not None:
|
||||||
|
json_object["unsigned"] = unsigned
|
||||||
|
|
||||||
|
return json_object
|
||||||
|
|
||||||
|
|
||||||
|
NACL_ED25519 = "ed25519"
|
||||||
|
|
||||||
|
def decode_signing_key_base64(algorithm, version, key_base64):
|
||||||
|
"""Decode a base64 encoded signing key
|
||||||
|
Args:
|
||||||
|
algorithm (str): The algorithm the key is for (currently "ed25519").
|
||||||
|
version (str): Identifies this key out of the keys for this entity.
|
||||||
|
key_base64 (str): Base64 encoded bytes of the key.
|
||||||
|
Returns:
|
||||||
|
A SigningKey object.
|
||||||
|
"""
|
||||||
|
if algorithm == NACL_ED25519:
|
||||||
|
key_bytes = decode_base64(key_base64)
|
||||||
|
key = nacl.signing.SigningKey(key_bytes)
|
||||||
|
key.version = version
|
||||||
|
key.alg = NACL_ED25519
|
||||||
|
return key
|
||||||
|
else:
|
||||||
|
raise ValueError("Unsupported algorithm %s" % (algorithm,))
|
||||||
|
|
||||||
|
|
||||||
|
def read_signing_keys(stream):
|
||||||
|
"""Reads a list of keys from a stream
|
||||||
|
Args:
|
||||||
|
stream : A stream to iterate for keys.
|
||||||
|
Returns:
|
||||||
|
list of SigningKey objects.
|
||||||
|
"""
|
||||||
|
keys = []
|
||||||
|
for line in stream:
|
||||||
|
algorithm, version, key_base64 = line.split()
|
||||||
|
keys.append(decode_signing_key_base64(algorithm, version, key_base64))
|
||||||
|
return keys
|
||||||
|
|
||||||
|
|
||||||
|
def lookup(destination, path):
|
||||||
|
if ":" in destination:
|
||||||
|
return "https://%s%s" % (destination, path)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
srv = srvlookup.lookup("matrix", "tcp", destination)[0]
|
||||||
|
return "https://%s:%d%s" % (srv.host, srv.port, path)
|
||||||
|
except:
|
||||||
|
return "https://%s:%d%s" % (destination, 8448, path)
|
||||||
|
|
||||||
|
def get_json(origin_name, origin_key, destination, path):
|
||||||
|
request_json = {
|
||||||
|
"method": "GET",
|
||||||
|
"uri": path,
|
||||||
|
"origin": origin_name,
|
||||||
|
"destination": destination,
|
||||||
|
}
|
||||||
|
|
||||||
|
signed_json = sign_json(request_json, origin_key, origin_name)
|
||||||
|
|
||||||
|
authorization_headers = []
|
||||||
|
|
||||||
|
for key, sig in signed_json["signatures"][origin_name].items():
|
||||||
|
authorization_headers.append(bytes(
|
||||||
|
"X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||||
|
origin_name, key, sig,
|
||||||
|
)
|
||||||
|
))
|
||||||
|
|
||||||
|
result = requests.get(
|
||||||
|
lookup(destination, path),
|
||||||
|
headers={"Authorization": authorization_headers[0]},
|
||||||
|
verify=False,
|
||||||
|
)
|
||||||
|
return result.json()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
origin_name, keyfile, destination, path = sys.argv[1:]
|
||||||
|
|
||||||
|
with open(keyfile) as f:
|
||||||
|
key = read_signing_keys(f)[0]
|
||||||
|
|
||||||
|
result = get_json(
|
||||||
|
origin_name, key, destination, "/_matrix/federation/v1/" + path
|
||||||
|
)
|
||||||
|
|
||||||
|
json.dump(result, sys.stdout)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -6,8 +6,8 @@ from synapse.crypto.event_signing import (
|
|||||||
add_event_pdu_content_hash, compute_pdu_event_reference_hash
|
add_event_pdu_content_hash, compute_pdu_event_reference_hash
|
||||||
)
|
)
|
||||||
from synapse.api.events.utils import prune_pdu
|
from synapse.api.events.utils import prune_pdu
|
||||||
from unpaddedbase64 import encode_base64, decode_base64
|
from syutil.base64util import encode_base64, decode_base64
|
||||||
from canonicaljson import encode_canonical_json
|
from syutil.jsonutil import encode_canonical_json
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user