mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
1 Commits
v0.21.0-rc
...
erikj/init
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
171829bb94 |
12
.gitignore
vendored
12
.gitignore
vendored
@@ -24,10 +24,10 @@ homeserver*.yaml
|
||||
.coverage
|
||||
htmlcov
|
||||
|
||||
demo/*/*.db
|
||||
demo/*/*.log
|
||||
demo/*/*.log.*
|
||||
demo/*/*.pid
|
||||
demo/*.db
|
||||
demo/*.log
|
||||
demo/*.log.*
|
||||
demo/*.pid
|
||||
demo/media_store.*
|
||||
demo/etc
|
||||
|
||||
@@ -42,7 +42,3 @@ build/
|
||||
|
||||
localhost-800*/
|
||||
static/client/register/register_config.js
|
||||
.tox
|
||||
|
||||
env/
|
||||
*.config
|
||||
|
||||
17
.travis.yml
17
.travis.yml
@@ -1,17 +0,0 @@
|
||||
sudo: false
|
||||
language: python
|
||||
python: 2.7
|
||||
|
||||
# tell travis to cache ~/.cache/pip
|
||||
cache: pip
|
||||
|
||||
env:
|
||||
- TOX_ENV=packaging
|
||||
- TOX_ENV=pep8
|
||||
- TOX_ENV=py27
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
||||
script:
|
||||
- tox -e $TOX_ENV
|
||||
27
AUTHORS.rst
27
AUTHORS.rst
@@ -29,34 +29,9 @@ Matthew Hodgson <matthew at matrix.org>
|
||||
|
||||
Emmanuel Rohee <manu at matrix.org>
|
||||
* Supporting iOS clients (testability and fallback registration)
|
||||
|
||||
|
||||
Turned to Dust <dwinslow86 at gmail.com>
|
||||
* ArchLinux installation instructions
|
||||
|
||||
Brabo <brabo at riseup.net>
|
||||
* Installation instruction fixes
|
||||
|
||||
Ivan Shapovalov <intelfx100 at gmail.com>
|
||||
* contrib/systemd: a sample systemd unit file and a logger configuration
|
||||
|
||||
Eric Myhre <hash at exultant.us>
|
||||
* Fix bug where ``media_store_path`` config option was ignored by v0 content
|
||||
repository API.
|
||||
|
||||
Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
|
||||
* Add SAML2 support for registration and login.
|
||||
|
||||
Steven Hammerton <steven.hammerton at openmarket.com>
|
||||
* Add CAS support for registration and login.
|
||||
|
||||
Mads Robin Christensen <mads at v42 dot dk>
|
||||
* CentOS 7 installation instructions.
|
||||
|
||||
Florent Violleau <floviolleau at gmail dot com>
|
||||
* Add Raspberry Pi installation instructions and general troubleshooting items
|
||||
|
||||
Niklas Riekenbrauck <nikriek at gmail dot.com>
|
||||
* Add JWT support for registration and login
|
||||
|
||||
Christoph Witzany <christoph at web.crofting.com>
|
||||
* Add LDAP support for authentication
|
||||
|
||||
@@ -10,13 +10,13 @@ https://developers.google.com/recaptcha/
|
||||
|
||||
Setting ReCaptcha Keys
|
||||
----------------------
|
||||
The keys are a config option on the home server config. If they are not
|
||||
visible, you can generate them via --generate-config. Set the following value::
|
||||
The keys are a config option on the home server config. If they are not
|
||||
visible, you can generate them via --generate-config. Set the following value:
|
||||
|
||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
||||
|
||||
In addition, you MUST enable captchas via::
|
||||
|
||||
In addition, you MUST enable captchas via:
|
||||
|
||||
enable_registration_captcha: true
|
||||
|
||||
@@ -25,5 +25,7 @@ Configuring IP used for auth
|
||||
The ReCaptcha API requires that the IP address of the user who solved the
|
||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
||||
IP address. This can be configured using the x_forwarded directive in the
|
||||
listeners section of the homeserver.yaml configuration file.
|
||||
IP address. This can be configured as an option on the home server like so:
|
||||
|
||||
captcha_ip_origin_is_x_forwarded: true
|
||||
|
||||
1550
CHANGES.rst
1550
CHANGES.rst
File diff suppressed because it is too large
Load Diff
22
MANIFEST.in
22
MANIFEST.in
@@ -3,28 +3,12 @@ include LICENSE
|
||||
include VERSION
|
||||
include *.rst
|
||||
include demo/README
|
||||
include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
include demo/*.sh
|
||||
|
||||
recursive-include synapse/storage/schema *.sql
|
||||
recursive-include synapse/storage/schema *.py
|
||||
|
||||
recursive-include demo *.dh
|
||||
recursive-include demo *.py
|
||||
recursive-include demo *.sh
|
||||
recursive-include docs *
|
||||
recursive-include res *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
|
||||
recursive-include synapse/static *.css
|
||||
recursive-include synapse/static *.gif
|
||||
recursive-include synapse/static *.html
|
||||
recursive-include synapse/static *.js
|
||||
|
||||
exclude jenkins.sh
|
||||
exclude jenkins*.sh
|
||||
exclude jenkins*
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
prune demo/etc
|
||||
|
||||
926
README.rst
926
README.rst
File diff suppressed because it is too large
Load Diff
56
UPGRADE.rst
56
UPGRADE.rst
@@ -1,58 +1,4 @@
|
||||
Upgrading Synapse
|
||||
=================
|
||||
|
||||
Before upgrading check if any special steps are required to upgrade from the
|
||||
what you currently have installed to current version of synapse. The extra
|
||||
instructions that may be required are listed later in this document.
|
||||
|
||||
If synapse was installed in a virtualenv then active that virtualenv before
|
||||
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
source ~/.synapse/bin/activate
|
||||
|
||||
If synapse was installed using pip then upgrade to the latest version by
|
||||
running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
If synapse was installed using git then upgrade to the latest version by
|
||||
running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
# Pull the latest version of the master branch.
|
||||
git pull
|
||||
# Update the versions of synapse's python dependencies.
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
|
||||
|
||||
|
||||
Upgrading to v0.15.0
|
||||
====================
|
||||
|
||||
If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
|
||||
then you have to explicitly enable it in the config and update your dependencies
|
||||
dependencies. See README.rst for details.
|
||||
|
||||
|
||||
Upgrading to v0.11.0
|
||||
====================
|
||||
|
||||
This release includes the option to send anonymous usage stats to matrix.org,
|
||||
and requires that administrators explictly opt in or out by setting the
|
||||
``report_stats`` option to either ``true`` or ``false``.
|
||||
|
||||
We would really appreciate it if you could help our project out by reporting
|
||||
anonymized usage statistics from your homeserver. Only very basic aggregate
|
||||
data (e.g. number of users) will be reported, but it helps us to track the
|
||||
growth of the Matrix community, and helps us to make Matrix a success, as well
|
||||
as to convince other networks that they should peer with us.
|
||||
|
||||
|
||||
Upgrading to v0.9.0
|
||||
Upgrading to v0.x.x
|
||||
===================
|
||||
|
||||
Application services have had a breaking API change in this version.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -32,7 +32,7 @@ import urlparse
|
||||
import nacl.signing
|
||||
import nacl.encoding
|
||||
|
||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
|
||||
|
||||
CONFIG_JSON = "cmdclient_config.json"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -36,13 +36,15 @@ class HttpClient(object):
|
||||
the request body. This will be encoded as JSON.
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
Deferred: Succeeds when we get *any* HTTP response.
|
||||
|
||||
The result of the deferred is a tuple of `(code, response)`,
|
||||
where `response` is a dict representing the decoded JSON body.
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_json(self, url, args=None):
|
||||
""" Gets some json from the given host homeserver and path
|
||||
""" Get's some json from the given host homeserver and path
|
||||
|
||||
Args:
|
||||
url (str): The URL to GET data from.
|
||||
@@ -52,8 +54,10 @@ class HttpClient(object):
|
||||
and *not* a string.
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
Deferred: Succeeds when we get *any* HTTP response.
|
||||
|
||||
The result of the deferred is a tuple of `(code, response)`,
|
||||
where `response` is a dict representing the decoded JSON body.
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -210,4 +214,4 @@ class _JsonProducer(object):
|
||||
pass
|
||||
|
||||
def stopProducing(self):
|
||||
pass
|
||||
pass
|
||||
@@ -1,50 +0,0 @@
|
||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||
# `homeserver.yaml`, and restart synapse.
|
||||
#
|
||||
# This configuration will produce similar results to the defaults within
|
||||
# synapse, but can be edited to give more flexibility.
|
||||
|
||||
version: 1
|
||||
|
||||
formatters:
|
||||
fmt:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
# example output to console
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
filters: [context]
|
||||
|
||||
# example output to file - to enable, edit 'root' config below.
|
||||
file:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
formatter: fmt
|
||||
filename: /var/log/synapse/homeserver.log
|
||||
maxBytes: 100000000
|
||||
backupCount: 3
|
||||
filters: [context]
|
||||
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [console] # to use file handler instead, switch to [file]
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
# example of enabling debugging for a component:
|
||||
#
|
||||
# synapse.federation.transport.server:
|
||||
# level: DEBUG
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,151 +0,0 @@
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import pydot
|
||||
import cgi
|
||||
import simplejson as json
|
||||
import datetime
|
||||
import argparse
|
||||
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
|
||||
def make_graph(file_name, room_id, file_prefix, limit):
|
||||
print "Reading lines"
|
||||
with open(file_name) as f:
|
||||
lines = f.readlines()
|
||||
|
||||
print "Read lines"
|
||||
|
||||
events = [FrozenEvent(json.loads(line)) for line in lines]
|
||||
|
||||
print "Loaded events."
|
||||
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
print "Sorted events"
|
||||
|
||||
if limit:
|
||||
events = events[-int(limit):]
|
||||
|
||||
node_map = {}
|
||||
|
||||
graph = pydot.Dot(graph_name="Test")
|
||||
|
||||
for event in events:
|
||||
t = datetime.datetime.fromtimestamp(
|
||||
float(event.origin_server_ts) / 1000
|
||||
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
||||
|
||||
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
|
||||
content = content.replace("\n", "<br/>\n")
|
||||
|
||||
print content
|
||||
content = []
|
||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||
if value is None:
|
||||
value = "<null>"
|
||||
elif isinstance(value, basestring):
|
||||
pass
|
||||
else:
|
||||
value = json.dumps(value)
|
||||
|
||||
content.append(
|
||||
"<b>%s</b>: %s," % (
|
||||
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
||||
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
||||
)
|
||||
)
|
||||
|
||||
content = "<br/>\n".join(content)
|
||||
|
||||
print content
|
||||
|
||||
label = (
|
||||
"<"
|
||||
"<b>%(name)s </b><br/>"
|
||||
"Type: <b>%(type)s </b><br/>"
|
||||
"State key: <b>%(state_key)s </b><br/>"
|
||||
"Content: <b>%(content)s </b><br/>"
|
||||
"Time: <b>%(time)s </b><br/>"
|
||||
"Depth: <b>%(depth)s </b><br/>"
|
||||
">"
|
||||
) % {
|
||||
"name": event.event_id,
|
||||
"type": event.type,
|
||||
"state_key": event.get("state_key", None),
|
||||
"content": content,
|
||||
"time": t,
|
||||
"depth": event.depth,
|
||||
}
|
||||
|
||||
node = pydot.Node(
|
||||
name=event.event_id,
|
||||
label=label,
|
||||
)
|
||||
|
||||
node_map[event.event_id] = node
|
||||
graph.add_node(node)
|
||||
|
||||
print "Created Nodes"
|
||||
|
||||
for event in events:
|
||||
for prev_id, _ in event.prev_events:
|
||||
try:
|
||||
end_node = node_map[prev_id]
|
||||
except:
|
||||
end_node = pydot.Node(
|
||||
name=prev_id,
|
||||
label="<<b>%s</b>>" % (prev_id,),
|
||||
)
|
||||
|
||||
node_map[prev_id] = end_node
|
||||
graph.add_node(end_node)
|
||||
|
||||
edge = pydot.Edge(node_map[event.event_id], end_node)
|
||||
graph.add_edge(edge)
|
||||
|
||||
print "Created edges"
|
||||
|
||||
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
|
||||
|
||||
print "Created Dot"
|
||||
|
||||
graph.write_svg("%s.svg" % file_prefix, prog='dot')
|
||||
|
||||
print "Created svg"
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate a PDU graph for a given room by reading "
|
||||
"from a file with line deliminated events. \n"
|
||||
"Requires pydot."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--prefix", dest="prefix",
|
||||
help="String to prefix output files with",
|
||||
default="graph_output"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l", "--limit",
|
||||
help="Only retrieve the last N events.",
|
||||
)
|
||||
parser.add_argument('event_file')
|
||||
parser.add_argument('room')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
make_graph(args.event_file, args.room, args.prefix, args.limit)
|
||||
@@ -21,5 +21,3 @@ handlers:
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [journal]
|
||||
|
||||
disable_existing_loggers: False
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# This assumes that Synapse has been installed as a system package
|
||||
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
||||
# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
|
||||
# rather than in a user home directory or similar under virtualenv.
|
||||
|
||||
[Unit]
|
||||
@@ -9,7 +9,6 @@ Description=Synapse Matrix homeserver
|
||||
Type=simple
|
||||
User=synapse
|
||||
Group=synapse
|
||||
EnvironmentFile=-/etc/sysconfig/synapse
|
||||
WorkingDirectory=/var/lib/synapse
|
||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
||||
|
||||
|
||||
@@ -126,26 +126,12 @@ sub on_unknown_event
|
||||
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||
$bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||
my $offer = $bridgestate->{$room_id}->{offer};
|
||||
my $candidate_block = {
|
||||
audio => '',
|
||||
video => '',
|
||||
};
|
||||
my $candidate_block = "";
|
||||
foreach (@{$event->{content}->{candidates}}) {
|
||||
if ($_->{sdpMid}) {
|
||||
$candidate_block->{$_->{sdpMid}} .= "a=" . $_->{candidate} . "\r\n";
|
||||
}
|
||||
else {
|
||||
$candidate_block->{audio} .= "a=" . $_->{candidate} . "\r\n";
|
||||
$candidate_block->{video} .= "a=" . $_->{candidate} . "\r\n";
|
||||
}
|
||||
$candidate_block .= "a=" . $_->{candidate} . "\r\n";
|
||||
}
|
||||
|
||||
# XXX: assumes audio comes first
|
||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{audio}/;
|
||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{video}/;
|
||||
|
||||
$offer =~ s/(m=video)/$candidate_block->{audio}$1/;
|
||||
$offer =~ s/(.$)/$1\n$candidate_block->{video}$1/;
|
||||
# XXX: collate using the right m= line - for now assume audio call
|
||||
$offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
|
||||
|
||||
my $f = send_verto_json_request("verto.invite", {
|
||||
"sdp" => $offer,
|
||||
@@ -186,18 +172,22 @@ sub on_room_message
|
||||
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
|
||||
}
|
||||
|
||||
my $verto_connecting = $loop->new_future;
|
||||
$bot_verto->connect(
|
||||
%{ $CONFIG{"verto-bot"} },
|
||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||
)->then( sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
$verto_connecting->done($bot_verto) if not $verto_connecting->is_done;
|
||||
});
|
||||
|
||||
Future->needs_all(
|
||||
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
||||
$bot_matrix->start;
|
||||
}),
|
||||
|
||||
$bot_verto->connect(
|
||||
%{ $CONFIG{"verto-bot"} },
|
||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||
)->on_done( sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
}),
|
||||
$verto_connecting,
|
||||
)->get;
|
||||
|
||||
$loop->attach_signal(
|
||||
|
||||
@@ -11,4 +11,7 @@ requires 'YAML', 0;
|
||||
requires 'JSON', 0;
|
||||
requires 'Getopt::Long', 0;
|
||||
|
||||
on 'test' => sub {
|
||||
requires 'Test::More', '>= 0.98';
|
||||
};
|
||||
|
||||
|
||||
@@ -11,9 +11,7 @@ if [ -f $PID_FILE ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for port in 8080 8081 8082; do
|
||||
rm -rf $DIR/$port
|
||||
rm -rf $DIR/media_store.$port
|
||||
done
|
||||
find "$DIR" -name "*.log" -delete
|
||||
find "$DIR" -name "*.db" -delete
|
||||
|
||||
rm -rf $DIR/etc
|
||||
|
||||
@@ -8,49 +8,38 @@ cd "$DIR/.."
|
||||
|
||||
mkdir -p demo/etc
|
||||
|
||||
export PYTHONPATH=$(readlink -f $(pwd))
|
||||
|
||||
|
||||
echo $PYTHONPATH
|
||||
# Check the --no-rate-limit param
|
||||
PARAMS=""
|
||||
if [ $# -eq 1 ]; then
|
||||
if [ $1 = "--no-rate-limit" ]; then
|
||||
PARAMS="--rc-messages-per-second 1000 --rc-message-burst-count 1000"
|
||||
fi
|
||||
fi
|
||||
|
||||
for port in 8080 8081 8082; do
|
||||
echo "Starting server on port $port... "
|
||||
|
||||
https_port=$((port + 400))
|
||||
mkdir -p demo/$port
|
||||
pushd demo/$port
|
||||
|
||||
#rm $DIR/etc/$port.config
|
||||
python -m synapse.app.homeserver \
|
||||
--generate-config \
|
||||
--config-path "demo/etc/$port.config" \
|
||||
-p "$https_port" \
|
||||
--unsecure-port "$port" \
|
||||
-H "localhost:$https_port" \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
--report-stats no
|
||||
|
||||
# Check script parameters
|
||||
if [ $# -eq 1 ]; then
|
||||
if [ $1 = "--no-rate-limit" ]; then
|
||||
# Set high limits in config file to disable rate limiting
|
||||
perl -p -i -e 's/rc_messages_per_second.*/rc_messages_per_second: 1000/g' $DIR/etc/$port.config
|
||||
perl -p -i -e 's/rc_message_burst_count.*/rc_message_burst_count: 1000/g' $DIR/etc/$port.config
|
||||
fi
|
||||
fi
|
||||
|
||||
perl -p -i -e 's/^enable_registration:.*/enable_registration: true/g' $DIR/etc/$port.config
|
||||
|
||||
if ! grep -F "full_twisted_stacktraces" -q $DIR/etc/$port.config; then
|
||||
echo "full_twisted_stacktraces: true" >> $DIR/etc/$port.config
|
||||
fi
|
||||
if ! grep -F "report_stats" -q $DIR/etc/$port.config ; then
|
||||
echo "report_stats: false" >> $DIR/etc/$port.config
|
||||
fi
|
||||
-f "$DIR/$port.log" \
|
||||
-d "$DIR/$port.db" \
|
||||
-D --pid-file "$DIR/$port.pid" \
|
||||
--manhole $((port + 1000)) \
|
||||
--tls-dh-params-path "demo/demo.tls.dh" \
|
||||
--media-store-path "demo/media_store.$port" \
|
||||
$PARAMS $SYNAPSE_PARAMS \
|
||||
--enable-registration
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
-D \
|
||||
--config-path "demo/etc/$port.config" \
|
||||
-vv \
|
||||
|
||||
popd
|
||||
done
|
||||
|
||||
cd "$CWD"
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
Admin APIs
|
||||
==========
|
||||
|
||||
This directory includes documentation for the various synapse specific admin
|
||||
APIs available.
|
||||
|
||||
Only users that are server admins can use these APIs. A user can be marked as a
|
||||
server admin by updating the database directly, e.g.:
|
||||
|
||||
``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
|
||||
|
||||
Restarting may be required for the changes to register.
|
||||
@@ -1,15 +0,0 @@
|
||||
Purge History API
|
||||
=================
|
||||
|
||||
The purge history API allows server admins to purge historic events from their
|
||||
database, reclaiming disk space.
|
||||
|
||||
Depending on the amount of history being purged a call to the API may take
|
||||
several minutes or longer. During this period users will not be able to
|
||||
paginate further back in the room from the point being purged from.
|
||||
|
||||
The API is simply:
|
||||
|
||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
@@ -1,17 +0,0 @@
|
||||
Purge Remote Media API
|
||||
======================
|
||||
|
||||
The purge remote media API allows server admins to purge old cached remote
|
||||
media.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
||||
|
||||
{}
|
||||
|
||||
Which will remove all cached media that was last accessed before
|
||||
``<unix_timestamp_in_ms>``.
|
||||
|
||||
If the user re-requests purged remote media, synapse will re-request the media
|
||||
from the originating server.
|
||||
@@ -1,73 +0,0 @@
|
||||
Query Account
|
||||
=============
|
||||
|
||||
This API returns information about a specific user account.
|
||||
|
||||
The api is::
|
||||
|
||||
GET /_matrix/client/r0/admin/whois/<user_id>
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"user_id": "<user_id>",
|
||||
"devices": {
|
||||
"": {
|
||||
"sessions": [
|
||||
{
|
||||
"connections": [
|
||||
{
|
||||
"ip": "1.2.3.4",
|
||||
"last_seen": 1417222374433,
|
||||
"user_agent": "Mozilla/5.0 ..."
|
||||
},
|
||||
{
|
||||
"ip": "1.2.3.10",
|
||||
"last_seen": 1417222374500,
|
||||
"user_agent": "Dalvik/2.1.0 ..."
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
``last_seen`` is measured in milliseconds since the Unix epoch.
|
||||
|
||||
Deactivate Account
|
||||
==================
|
||||
|
||||
This API deactivates an account. It removes active access tokens, resets the
|
||||
password, and deletes third-party IDs (to prevent the user requesting a
|
||||
password reset).
|
||||
|
||||
The api is::
|
||||
|
||||
POST /_matrix/client/r0/admin/deactivate/<user_id>
|
||||
|
||||
including an ``access_token`` of a server admin, and an empty request body.
|
||||
|
||||
|
||||
Reset password
|
||||
==============
|
||||
|
||||
Changes the password of another user.
|
||||
|
||||
The api is::
|
||||
|
||||
POST /_matrix/client/r0/admin/reset_password/<user_id>
|
||||
|
||||
with a body of:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"new_password": "<secret>"
|
||||
}
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
@@ -1,35 +0,0 @@
|
||||
Registering an Application Service
|
||||
==================================
|
||||
|
||||
The registration of new application services depends on the homeserver used.
|
||||
In synapse, you need to create a new configuration file for your AS and add it
|
||||
to the list specified under the ``app_service_config_files`` config
|
||||
option in your synapse config.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
app_service_config_files:
|
||||
- /home/matrix/.synapse/<your-AS>.yaml
|
||||
|
||||
|
||||
The format of the AS configuration file is as follows:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
url: <base url of AS>
|
||||
as_token: <token AS will add to requests to HS>
|
||||
hs_token: <token HS will add to requests to AS>
|
||||
sender_localpart: <localpart of AS user>
|
||||
namespaces:
|
||||
users: # List of users we're interested in
|
||||
- exclusive: <bool>
|
||||
regex: <regex>
|
||||
- ...
|
||||
aliases: [] # List of aliases we're interested in
|
||||
rooms: [] # List of room ids we're interested in
|
||||
|
||||
See the spec_ for further details on how application services work.
|
||||
|
||||
.. _spec: https://matrix.org/docs/spec/application_service/unstable.html
|
||||
@@ -43,10 +43,7 @@ Basically, PEP8
|
||||
together, or want to deliberately extend or preserve vertical/horizontal
|
||||
space)
|
||||
|
||||
Comments should follow the `google code style <http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||
This is so that we can generate documentation with
|
||||
`sphinx <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||
`examples <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
||||
in the sphinx documentation.
|
||||
Comments should follow the google code style. This is so that we can generate
|
||||
documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/)
|
||||
|
||||
Code should pass pep8 --max-line-length=100 without any warnings.
|
||||
|
||||
@@ -1,446 +0,0 @@
|
||||
Log contexts
|
||||
============
|
||||
|
||||
.. contents::
|
||||
|
||||
To help track the processing of individual requests, synapse uses a
|
||||
'log context' to track which request it is handling at any given moment. This
|
||||
is done via a thread-local variable; a ``logging.Filter`` is then used to fish
|
||||
the information back out of the thread-local variable and add it to each log
|
||||
record.
|
||||
|
||||
Logcontexts are also used for CPU and database accounting, so that we can track
|
||||
which requests were responsible for high CPU use or database activity.
|
||||
|
||||
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
||||
current log context (as well as providing the ``LoggingContextFilter`` class).
|
||||
|
||||
Deferreds make the whole thing complicated, so this document describes how it
|
||||
all works, and how to write code which follows the rules.
|
||||
|
||||
Logcontexts without Deferreds
|
||||
-----------------------------
|
||||
|
||||
In the absence of any Deferred voodoo, things are simple enough. As with any
|
||||
code of this nature, the rule is that our function should leave things as it
|
||||
found them:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from synapse.util import logcontext # omitted from future snippets
|
||||
|
||||
def handle_request(request_id):
|
||||
request_context = logcontext.LoggingContext()
|
||||
|
||||
calling_context = logcontext.LoggingContext.current_context()
|
||||
logcontext.LoggingContext.set_current_context(request_context)
|
||||
try:
|
||||
request_context.request = request_id
|
||||
do_request_handling()
|
||||
logger.debug("finished")
|
||||
finally:
|
||||
logcontext.LoggingContext.set_current_context(calling_context)
|
||||
|
||||
def do_request_handling():
|
||||
logger.debug("phew") # this will be logged against request_id
|
||||
|
||||
|
||||
LoggingContext implements the context management methods, so the above can be
|
||||
written much more succinctly as:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
do_request_handling()
|
||||
logger.debug("finished")
|
||||
|
||||
def do_request_handling():
|
||||
logger.debug("phew")
|
||||
|
||||
|
||||
Using logcontexts with Deferreds
|
||||
--------------------------------
|
||||
|
||||
Deferreds — and in particular, ``defer.inlineCallbacks`` — break
|
||||
the linear flow of code so that there is no longer a single entry point where
|
||||
we should set the logcontext and a single exit point where we should remove it.
|
||||
|
||||
Consider the example above, where ``do_request_handling`` needs to do some
|
||||
blocking operation, and returns a deferred:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
yield do_request_handling()
|
||||
logger.debug("finished")
|
||||
|
||||
|
||||
In the above flow:
|
||||
|
||||
* The logcontext is set
|
||||
* ``do_request_handling`` is called, and returns a deferred
|
||||
* ``handle_request`` yields the deferred
|
||||
* The ``inlineCallbacks`` wrapper of ``handle_request`` returns a deferred
|
||||
|
||||
So we have stopped processing the request (and will probably go on to start
|
||||
processing the next), without clearing the logcontext.
|
||||
|
||||
To circumvent this problem, synapse code assumes that, wherever you have a
|
||||
deferred, you will want to yield on it. To that end, whereever functions return
|
||||
a deferred, we adopt the following conventions:
|
||||
|
||||
**Rules for functions returning deferreds:**
|
||||
|
||||
* If the deferred is already complete, the function returns with the same
|
||||
logcontext it started with.
|
||||
* If the deferred is incomplete, the function clears the logcontext before
|
||||
returning; when the deferred completes, it restores the logcontext before
|
||||
running any callbacks.
|
||||
|
||||
That sounds complicated, but actually it means a lot of code (including the
|
||||
example above) "just works". There are two cases:
|
||||
|
||||
* If ``do_request_handling`` returns a completed deferred, then the logcontext
|
||||
will still be in place. In this case, execution will continue immediately
|
||||
after the ``yield``; the "finished" line will be logged against the right
|
||||
context, and the ``with`` block restores the original context before we
|
||||
return to the caller.
|
||||
|
||||
* If the returned deferred is incomplete, ``do_request_handling`` clears the
|
||||
logcontext before returning. The logcontext is therefore clear when
|
||||
``handle_request`` yields the deferred. At that point, the ``inlineCallbacks``
|
||||
wrapper adds a callback to the deferred, and returns another (incomplete)
|
||||
deferred to the caller, and it is safe to begin processing the next request.
|
||||
|
||||
Once ``do_request_handling``'s deferred completes, it will reinstate the
|
||||
logcontext, before running the callback added by the ``inlineCallbacks``
|
||||
wrapper. That callback runs the second half of ``handle_request``, so again
|
||||
the "finished" line will be logged against the right
|
||||
context, and the ``with`` block restores the original context.
|
||||
|
||||
As an aside, it's worth noting that ``handle_request`` follows our rules -
|
||||
though that only matters if the caller has its own logcontext which it cares
|
||||
about.
|
||||
|
||||
The following sections describe pitfalls and helpful patterns when implementing
|
||||
these rules.
|
||||
|
||||
Always yield your deferreds
|
||||
---------------------------
|
||||
|
||||
Whenever you get a deferred back from a function, you should ``yield`` on it
|
||||
as soon as possible. (Returning it directly to your caller is ok too, if you're
|
||||
not doing ``inlineCallbacks``.) Do not pass go; do not do any logging; do not
|
||||
call any other functions.
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def fun():
|
||||
logger.debug("starting")
|
||||
yield do_some_stuff() # just like this
|
||||
|
||||
d = more_stuff()
|
||||
result = yield d # also fine, of course
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
def nonInlineCallbacksFun():
|
||||
logger.debug("just a wrapper really")
|
||||
return do_some_stuff() # this is ok too - the caller will yield on
|
||||
# it anyway.
|
||||
|
||||
Provided this pattern is followed all the way back up to the callchain to where
|
||||
the logcontext was set, this will make things work out ok: provided
|
||||
``do_some_stuff`` and ``more_stuff`` follow the rules above, then so will
|
||||
``fun`` (as wrapped by ``inlineCallbacks``) and ``nonInlineCallbacksFun``.
|
||||
|
||||
It's all too easy to forget to ``yield``: for instance if we forgot that
|
||||
``do_some_stuff`` returned a deferred, we might plough on regardless. This
|
||||
leads to a mess; it will probably work itself out eventually, but not before
|
||||
a load of stuff has been logged against the wrong content. (Normally, other
|
||||
things will break, more obviously, if you forget to ``yield``, so this tends
|
||||
not to be a major problem in practice.)
|
||||
|
||||
Of course sometimes you need to do something a bit fancier with your Deferreds
|
||||
- not all code follows the linear A-then-B-then-C pattern. Notes on
|
||||
implementing more complex patterns are in later sections.
|
||||
|
||||
Where you create a new Deferred, make it follow the rules
|
||||
---------------------------------------------------------
|
||||
|
||||
Most of the time, a Deferred comes from another synapse function. Sometimes,
|
||||
though, we need to make up a new Deferred, or we get a Deferred back from
|
||||
external code. We need to make it follow our rules.
|
||||
|
||||
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
||||
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||
which returns a deferred which will run its callbacks after a given number of
|
||||
seconds. That might look like:
|
||||
|
||||
.. code:: python
|
||||
|
||||
# not a logcontext-rules-compliant function
|
||||
def get_sleep_deferred(seconds):
|
||||
d = defer.Deferred()
|
||||
reactor.callLater(seconds, d.callback, None)
|
||||
return d
|
||||
|
||||
That doesn't follow the rules, but we can fix it by wrapping it with
|
||||
``PreserveLoggingContext`` and ``yield`` ing on it:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def sleep(seconds):
|
||||
with PreserveLoggingContext():
|
||||
yield get_sleep_deferred(seconds)
|
||||
|
||||
This technique works equally for external functions which return deferreds,
|
||||
or deferreds we have made ourselves.
|
||||
|
||||
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
||||
boilerplate for you, so the above could be written:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def sleep(seconds):
|
||||
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||
|
||||
|
||||
Fire-and-forget
|
||||
---------------
|
||||
|
||||
Sometimes you want to fire off a chain of execution, but not wait for its
|
||||
result. That might look a bit like this:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
# *don't* do this
|
||||
background_operation()
|
||||
|
||||
logger.debug("Request handling complete")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def background_operation():
|
||||
yield first_background_step()
|
||||
logger.debug("Completed first step")
|
||||
yield second_background_step()
|
||||
logger.debug("Completed second step")
|
||||
|
||||
The above code does a couple of steps in the background after
|
||||
``do_request_handling`` has finished. The log lines are still logged against
|
||||
the ``request_context`` logcontext, which may or may not be desirable. There
|
||||
are two big problems with the above, however. The first problem is that, if
|
||||
``background_operation`` returns an incomplete Deferred, it will expect its
|
||||
caller to ``yield`` immediately, so will have cleared the logcontext. In this
|
||||
example, that means that 'Request handling complete' will be logged without any
|
||||
context.
|
||||
|
||||
The second problem, which is potentially even worse, is that when the Deferred
|
||||
returned by ``background_operation`` completes, it will restore the original
|
||||
logcontext. There is nothing waiting on that Deferred, so the logcontext will
|
||||
leak into the reactor and possibly get attached to some arbitrary future
|
||||
operation.
|
||||
|
||||
There are two potential solutions to this.
|
||||
|
||||
One option is to surround the call to ``background_operation`` with a
|
||||
``PreserveLoggingContext`` call. That will reset the logcontext before
|
||||
starting ``background_operation`` (so the context restored when the deferred
|
||||
completes will be the empty logcontext), and will restore the current
|
||||
logcontext before continuing the foreground process:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
# start background_operation off in the empty logcontext, to
|
||||
# avoid leaking the current context into the reactor.
|
||||
with PreserveLoggingContext():
|
||||
background_operation()
|
||||
|
||||
# this will now be logged against the request context
|
||||
logger.debug("Request handling complete")
|
||||
|
||||
Obviously that option means that the operations done in
|
||||
``background_operation`` would be not be logged against a logcontext (though
|
||||
that might be fixed by setting a different logcontext via a ``with
|
||||
LoggingContext(...)`` in ``background_operation``).
|
||||
|
||||
The second option is to use ``logcontext.preserve_fn``, which wraps a function
|
||||
so that it doesn't reset the logcontext even when it returns an incomplete
|
||||
deferred, and adds a callback to the returned deferred to reset the
|
||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||
about logcontexts and Deferreds into one which behaves more like an external
|
||||
function — the opposite operation to that described in the previous section.
|
||||
It can be used like this:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
logcontext.preserve_fn(background_operation)()
|
||||
|
||||
# this will now be logged against the request context
|
||||
logger.debug("Request handling complete")
|
||||
|
||||
XXX: I think ``preserve_context_over_fn`` is supposed to do the first option,
|
||||
but the fact that it does ``preserve_context_over_deferred`` on its results
|
||||
means that its use is fraught with difficulty.
|
||||
|
||||
Passing synapse deferreds into third-party functions
|
||||
----------------------------------------------------
|
||||
|
||||
A typical example of this is where we want to collect together two or more
|
||||
deferred via ``defer.gatherResults``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
d1 = operation1()
|
||||
d2 = operation2()
|
||||
d3 = defer.gatherResults([d1, d2])
|
||||
|
||||
This is really a variation of the fire-and-forget problem above, in that we are
|
||||
firing off ``d1`` and ``d2`` without yielding on them. The difference
|
||||
is that we now have third-party code attached to their callbacks. Anyway either
|
||||
technique given in the `Fire-and-forget`_ section will work.
|
||||
|
||||
Of course, the new Deferred returned by ``gatherResults`` needs to be wrapped
|
||||
in order to make it follow the logcontext rules before we can yield it, as
|
||||
described in `Where you create a new Deferred, make it follow the rules`_.
|
||||
|
||||
So, option one: reset the logcontext before starting the operations to be
|
||||
gathered:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
with PreserveLoggingContext():
|
||||
d1 = operation1()
|
||||
d2 = operation2()
|
||||
result = yield defer.gatherResults([d1, d2])
|
||||
|
||||
In this case particularly, though, option two, of using
|
||||
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
||||
``operation1`` and ``operation2`` are both logged against the original
|
||||
logcontext. This looks like:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
d1 = logcontext.preserve_fn(operation1)()
|
||||
d2 = logcontext.preserve_fn(operation2)()
|
||||
|
||||
with PreserveLoggingContext():
|
||||
result = yield defer.gatherResults([d1, d2])
|
||||
|
||||
|
||||
Was all this really necessary?
|
||||
------------------------------
|
||||
|
||||
The conventions used work fine for a linear flow where everything happens in
|
||||
series via ``defer.inlineCallbacks`` and ``yield``, but are certainly tricky to
|
||||
follow for any more exotic flows. It's hard not to wonder if we could have done
|
||||
something else.
|
||||
|
||||
We're not going to rewrite Synapse now, so the following is entirely of
|
||||
academic interest, but I'd like to record some thoughts on an alternative
|
||||
approach.
|
||||
|
||||
I briefly prototyped some code following an alternative set of rules. I think
|
||||
it would work, but I certainly didn't get as far as thinking how it would
|
||||
interact with concepts as complicated as the cache descriptors.
|
||||
|
||||
My alternative rules were:
|
||||
|
||||
* functions always preserve the logcontext of their caller, whether or not they
|
||||
are returning a Deferred.
|
||||
|
||||
* Deferreds returned by synapse functions run their callbacks in the same
|
||||
context as the function was orignally called in.
|
||||
|
||||
The main point of this scheme is that everywhere that sets the logcontext is
|
||||
responsible for clearing it before returning control to the reactor.
|
||||
|
||||
So, for example, if you were the function which started a ``with
|
||||
LoggingContext`` block, you wouldn't ``yield`` within it — instead you'd start
|
||||
off the background process, and then leave the ``with`` block to wait for it:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
d = do_request_handling()
|
||||
|
||||
def cb(r):
|
||||
logger.debug("finished")
|
||||
|
||||
d.addCallback(cb)
|
||||
return d
|
||||
|
||||
(in general, mixing ``with LoggingContext`` blocks and
|
||||
``defer.inlineCallbacks`` in the same function leads to slighly
|
||||
counter-intuitive code, under this scheme).
|
||||
|
||||
Because we leave the original ``with`` block as soon as the Deferred is
|
||||
returned (as opposed to waiting for it to be resolved, as we do today), the
|
||||
logcontext is cleared before control passes back to the reactor; so if there is
|
||||
some code within ``do_request_handling`` which needs to wait for a Deferred to
|
||||
complete, there is no need for it to worry about clearing the logcontext before
|
||||
doing so:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def handle_request():
|
||||
r = do_some_stuff()
|
||||
r.addCallback(do_some_more_stuff)
|
||||
return r
|
||||
|
||||
— and provided ``do_some_stuff`` follows the rules of returning a Deferred which
|
||||
runs its callbacks in the original logcontext, all is happy.
|
||||
|
||||
The business of a Deferred which runs its callbacks in the original logcontext
|
||||
isn't hard to achieve — we have it today, in the shape of
|
||||
``logcontext._PreservingContextDeferred``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def do_some_stuff():
|
||||
deferred = do_some_io()
|
||||
pcd = _PreservingContextDeferred(LoggingContext.current_context())
|
||||
deferred.chainDeferred(pcd)
|
||||
return pcd
|
||||
|
||||
It turns out that, thanks to the way that Deferreds chain together, we
|
||||
automatically get the property of a context-preserving deferred with
|
||||
``defer.inlineCallbacks``, provided the final Defered the function ``yields``
|
||||
on has that property. So we can just write:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_request():
|
||||
yield do_some_stuff()
|
||||
yield do_some_more_stuff()
|
||||
|
||||
To conclude: I think this scheme would have worked equally well, with less
|
||||
danger of messing it up, and probably made some more esoteric code easier to
|
||||
write. But again — changing the conventions of the entire Synapse codebase is
|
||||
not a sensible option for the marginal improvement offered.
|
||||
@@ -1,68 +1,50 @@
|
||||
How to monitor Synapse metrics using Prometheus
|
||||
===============================================
|
||||
|
||||
1. Install prometheus:
|
||||
1: Install prometheus:
|
||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||
|
||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||
2: Enable synapse metrics:
|
||||
Simply setting a (local) port number will enable it. Pick a port.
|
||||
prometheus itself defaults to 9090, so starting just above that for
|
||||
locally monitored services seems reasonable. E.g. 9092:
|
||||
|
||||
2. Enable synapse metrics:
|
||||
Add to homeserver.yaml
|
||||
|
||||
Simply setting a (local) port number will enable it. Pick a port.
|
||||
prometheus itself defaults to 9090, so starting just above that for
|
||||
locally monitored services seems reasonable. E.g. 9092:
|
||||
metrics_port: 9092
|
||||
|
||||
Add to homeserver.yaml::
|
||||
Restart synapse
|
||||
|
||||
metrics_port: 9092
|
||||
3: Check out synapse-prometheus-config
|
||||
https://github.com/matrix-org/synapse-prometheus-config
|
||||
|
||||
Also ensure that ``enable_metrics`` is set to ``True``.
|
||||
|
||||
Restart synapse.
|
||||
4: Add ``synapse.html`` and ``synapse.rules``
|
||||
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
|
||||
and the ``.rules`` file needs to be invoked somewhere in the main config
|
||||
file. A symlink to each from the git checkout into the prometheus directory
|
||||
might be easiest to ensure ``git pull`` keeps it updated.
|
||||
|
||||
3. Add a prometheus target for synapse.
|
||||
5: Add a prometheus target for synapse
|
||||
This is easiest if prometheus runs on the same machine as synapse, as it can
|
||||
then just use localhost::
|
||||
|
||||
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
|
||||
global: {
|
||||
rule_file: "synapse.rules"
|
||||
}
|
||||
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:9092"]
|
||||
job: {
|
||||
name: "synapse"
|
||||
|
||||
If your prometheus is older than 1.5.2, you will need to replace
|
||||
``static_configs`` in the above with ``target_groups``.
|
||||
|
||||
Restart prometheus.
|
||||
target_group: {
|
||||
target: "http://localhost:9092/"
|
||||
}
|
||||
}
|
||||
|
||||
Standard Metric Names
|
||||
---------------------
|
||||
6: Start prometheus::
|
||||
|
||||
As of synapse version 0.18.2, the format of the process-wide metrics has been
|
||||
changed to fit prometheus standard naming conventions. Additionally the units
|
||||
have been changed to seconds, from miliseconds.
|
||||
./prometheus -config.file=prometheus.conf
|
||||
|
||||
================================== =============================
|
||||
New name Old name
|
||||
---------------------------------- -----------------------------
|
||||
process_cpu_user_seconds_total process_resource_utime / 1000
|
||||
process_cpu_system_seconds_total process_resource_stime / 1000
|
||||
process_open_fds (no 'type' label) process_fds
|
||||
================================== =============================
|
||||
7: Wait a few seconds for it to start and perform the first scrape,
|
||||
then visit the console:
|
||||
|
||||
The python-specific counts of garbage collector performance have been renamed.
|
||||
|
||||
=========================== ======================
|
||||
New name Old name
|
||||
--------------------------- ----------------------
|
||||
python_gc_time reactor_gc_time
|
||||
python_gc_unreachable_total reactor_gc_unreachable
|
||||
python_gc_counts reactor_gc_counts
|
||||
=========================== ======================
|
||||
|
||||
The twisted-specific reactor metrics have been renamed.
|
||||
|
||||
==================================== =====================
|
||||
New name Old name
|
||||
------------------------------------ ---------------------
|
||||
python_twisted_reactor_pending_calls reactor_pending_calls
|
||||
python_twisted_reactor_tick_time reactor_tick_time
|
||||
==================================== =====================
|
||||
http://server-where-prometheus-runs:9090/consoles/synapse.html
|
||||
|
||||
@@ -18,8 +18,8 @@ encoding use, e.g.::
|
||||
This would create an appropriate database named ``synapse`` owned by the
|
||||
``synapse_user`` user (which must already exist).
|
||||
|
||||
Set up client in Debian/Ubuntu
|
||||
===========================
|
||||
Set up client
|
||||
=============
|
||||
|
||||
Postgres support depends on the postgres python connector ``psycopg2``. In the
|
||||
virtual env::
|
||||
@@ -27,19 +27,6 @@ virtual env::
|
||||
sudo apt-get install libpq-dev
|
||||
pip install psycopg2
|
||||
|
||||
Set up client in RHEL/CentOs 7
|
||||
==============================
|
||||
|
||||
Make sure you have the appropriate version of postgres-devel installed. For a
|
||||
postgres 9.4, use the postgres 9.4 packages from
|
||||
[here](https://wiki.postgresql.org/wiki/YUM_Installation).
|
||||
|
||||
As with Debian/Ubuntu, postgres support depends on the postgres python connector
|
||||
``psycopg2``. In the virtual env::
|
||||
|
||||
sudo yum install postgresql-devel libpqxx-devel.x86_64
|
||||
export PATH=/usr/pgsql-9.4/bin/:$PATH
|
||||
pip install psycopg2
|
||||
|
||||
Synapse config
|
||||
==============
|
||||
@@ -47,15 +34,19 @@ Synapse config
|
||||
When you are ready to start using PostgreSQL, add the following line to your
|
||||
config file::
|
||||
|
||||
database:
|
||||
name: psycopg2
|
||||
args:
|
||||
user: <user>
|
||||
password: <pass>
|
||||
database: <db>
|
||||
host: <host>
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
database_config: <db_config_file>
|
||||
|
||||
Where ``<db_config_file>`` is the file name that points to a yaml file of the
|
||||
following form::
|
||||
|
||||
name: psycopg2
|
||||
args:
|
||||
user: <user>
|
||||
password: <pass>
|
||||
database: <db>
|
||||
host: <host>
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
|
||||
All key, values in ``args`` are passed to the ``psycopg2.connect(..)``
|
||||
function, except keys beginning with ``cp_``, which are consumed by the twisted
|
||||
@@ -68,8 +59,9 @@ Porting from SQLite
|
||||
Overview
|
||||
~~~~~~~~
|
||||
|
||||
The script ``synapse_port_db`` allows porting an existing synapse server
|
||||
backed by SQLite to using PostgreSQL. This is done in as a two phase process:
|
||||
The script ``port_from_sqlite_to_postgres.py`` allows porting an existing
|
||||
synapse server backed by SQLite to using PostgreSQL. This is done in as a two
|
||||
phase process:
|
||||
|
||||
1. Copy the existing SQLite database to a separate location (while the server
|
||||
is down) and running the port script against that offline database.
|
||||
@@ -94,12 +86,13 @@ complete, restart synapse. For instance::
|
||||
cp homeserver.db homeserver.db.snapshot
|
||||
./synctl start
|
||||
|
||||
Assuming your new config file (as described in the section *Synapse config*)
|
||||
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
|
||||
Assuming your database config file (as described in the section *Synapse
|
||||
config*) is named ``database_config.yaml`` and the SQLite snapshot is at
|
||||
``homeserver.db.snapshot`` then simply run::
|
||||
|
||||
synapse_port_db --sqlite-database homeserver.db.snapshot \
|
||||
--postgres-config homeserver-postgres.yaml
|
||||
python scripts/port_from_sqlite_to_postgres.py \
|
||||
--sqlite-database homeserver.db.snapshot \
|
||||
--postgres-config database_config.yaml
|
||||
|
||||
The flag ``--curses`` displays a coloured curses progress UI.
|
||||
|
||||
@@ -111,10 +104,11 @@ To complete the conversion shut down the synapse server and run the port
|
||||
script one last time, e.g. if the SQLite database is at ``homeserver.db``
|
||||
run::
|
||||
|
||||
synapse_port_db --sqlite-database homeserver.db \
|
||||
--postgres-config homeserver-postgres.yaml
|
||||
python scripts/port_from_sqlite_to_postgres.py \
|
||||
--sqlite-database homeserver.db \
|
||||
--postgres-config database_config.yaml
|
||||
|
||||
Once that has completed, change the synapse config to point at the PostgreSQL
|
||||
database configuration file ``homeserver-postgres.yaml`` (i.e. rename it to
|
||||
``homeserver.yaml``) and restart synapse. Synapse should now be running against
|
||||
database configuration file using the ``database_config`` parameter (see
|
||||
`Synapse Config`_) and restart synapse. Synapse should now be running against
|
||||
PostgreSQL.
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
Replication Architecture
|
||||
========================
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
We'd like to be able to split some of the work that synapse does into multiple
|
||||
python processes. In theory multiple synapse processes could share a single
|
||||
postgresql database and we'd scale up by running more synapse processes.
|
||||
However much of synapse assumes that only one process is interacting with the
|
||||
database, both for assigning unique identifiers when inserting into tables,
|
||||
notifying components about new updates, and for invalidating its caches.
|
||||
|
||||
So running multiple copies of the current code isn't an option. One way to
|
||||
run multiple processes would be to have a single writer process and multiple
|
||||
reader processes connected to the same database. In order to do this we'd need
|
||||
a way for the reader process to invalidate its in-memory caches when an update
|
||||
happens on the writer. One way to do this is for the writer to present an
|
||||
append-only log of updates which the readers can consume to invalidate their
|
||||
caches and to push updates to listening clients or pushers.
|
||||
|
||||
Synapse already stores much of its data as an append-only log so that it can
|
||||
correctly respond to /sync requests so the amount of code changes needed to
|
||||
expose the append-only log to the readers should be fairly minimal.
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
The Replication Protocol
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
See ``tcp_replication.rst``
|
||||
|
||||
|
||||
The Slaved DataStore
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are read-only version of the synapse storage layer in
|
||||
``synapse/replication/slave/storage`` that use the response of the replication
|
||||
API to invalidate their caches.
|
||||
@@ -1,223 +0,0 @@
|
||||
TCP Replication
|
||||
===============
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
Previously the workers used an HTTP long poll mechanism to get updates from the
|
||||
master, which had the problem of causing a lot of duplicate work on the server.
|
||||
This TCP protocol replaces those APIs with the aim of increased efficiency.
|
||||
|
||||
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The protocol is based on fire and forget, line based commands. An example flow
|
||||
would be (where '>' indicates master to worker and '<' worker to master flows)::
|
||||
|
||||
> SERVER example.com
|
||||
< REPLICATE events 53
|
||||
> RDATA events 54 ["$foo1:bar.com", ...]
|
||||
> RDATA events 55 ["$foo4:bar.com", ...]
|
||||
|
||||
The example shows the server accepting a new connection and sending its identity
|
||||
with the ``SERVER`` command, followed by the client asking to subscribe to the
|
||||
``events`` stream from the token ``53``. The server then periodically sends ``RDATA``
|
||||
commands which have the format ``RDATA <stream_name> <token> <row>``, where the
|
||||
format of ``<row>`` is defined by the individual streams.
|
||||
|
||||
Error reporting happens by either the client or server sending an `ERROR`
|
||||
command, and usually the connection will be closed.
|
||||
|
||||
|
||||
Since the protocol is a simple line based, its possible to manually connect to
|
||||
the server using a tool like netcat. A few things should be noted when manually
|
||||
using the protocol:
|
||||
|
||||
* When subscribing to a stream using ``REPLICATE``, the special token ``NOW`` can
|
||||
be used to get all future updates. The special stream name ``ALL`` can be used
|
||||
with ``NOW`` to subscribe to all available streams.
|
||||
* The federation stream is only available if federation sending has been
|
||||
disabled on the main process.
|
||||
* The server will only time connections out that have sent a ``PING`` command.
|
||||
If a ping is sent then the connection will be closed if no further commands
|
||||
are receieved within 15s. Both the client and server protocol implementations
|
||||
will send an initial PING on connection and ensure at least one command every
|
||||
5s is sent (not necessarily ``PING``).
|
||||
* ``RDATA`` commands *usually* include a numeric token, however if the stream
|
||||
has multiple rows to replicate per token the server will send multiple
|
||||
``RDATA`` commands, with all but the last having a token of ``batch``. See
|
||||
the documentation on ``commands.RdataCommand`` for further details.
|
||||
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
The basic structure of the protocol is line based, where the initial word of
|
||||
each line specifies the command. The rest of the line is parsed based on the
|
||||
command. For example, the `RDATA` command is defined as::
|
||||
|
||||
RDATA <stream_name> <token> <row_json>
|
||||
|
||||
(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
|
||||
|
||||
Blank lines are ignored.
|
||||
|
||||
|
||||
Keep alives
|
||||
~~~~~~~~~~~
|
||||
|
||||
Both sides are expected to send at least one command every 5s or so, and
|
||||
should send a ``PING`` command if necessary. If either side do not receive a
|
||||
command within e.g. 15s then the connection should be closed.
|
||||
|
||||
Because the server may be connected to manually using e.g. netcat, the timeouts
|
||||
aren't enabled until an initial ``PING`` command is seen. Both the client and
|
||||
server implementations below send a ``PING`` command immediately on connection to
|
||||
ensure the timeouts are enabled.
|
||||
|
||||
This ensures that both sides can quickly realize if the tcp connection has gone
|
||||
and handle the situation appropriately.
|
||||
|
||||
|
||||
Start up
|
||||
~~~~~~~~
|
||||
|
||||
When a new connection is made, the server:
|
||||
|
||||
* Sends a ``SERVER`` command, which includes the identity of the server, allowing
|
||||
the client to detect if its connected to the expected server
|
||||
* Sends a ``PING`` command as above, to enable the client to time out connections
|
||||
promptly.
|
||||
|
||||
The client:
|
||||
|
||||
* Sends a ``NAME`` command, allowing the server to associate a human friendly
|
||||
name with the connection. This is optional.
|
||||
* Sends a ``PING`` as above
|
||||
* For each stream the client wishes to subscribe to it sends a ``REPLICATE``
|
||||
with the stream_name and token it wants to subscribe from.
|
||||
* On receipt of a ``SERVER`` command, checks that the server name matches the
|
||||
expected server name.
|
||||
|
||||
|
||||
Error handling
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
If either side detects an error it can send an ``ERROR`` command and close the
|
||||
connection.
|
||||
|
||||
If the client side loses the connection to the server it should reconnect,
|
||||
following the steps above.
|
||||
|
||||
|
||||
Congestion
|
||||
~~~~~~~~~~
|
||||
|
||||
If the server sends messages faster than the client can consume them the server
|
||||
will first buffer a (fairly large) number of commands and then disconnect the
|
||||
client. This ensures that we don't queue up an unbounded number of commands in
|
||||
memory and gives us a potential oppurtunity to squawk loudly. When/if the client
|
||||
recovers it can reconnect to the server and ask for missed messages.
|
||||
|
||||
|
||||
Reliability
|
||||
~~~~~~~~~~~
|
||||
|
||||
In general the replication stream should be considered an unreliable transport
|
||||
since e.g. commands are not resent if the connection disappears.
|
||||
|
||||
The exception to that are the replication streams, i.e. RDATA commands, since
|
||||
these include tokens which can be used to restart the stream on connection
|
||||
errors.
|
||||
|
||||
The client should keep track of the token in the last RDATA command received
|
||||
for each stream so that on reconneciton it can start streaming from the correct
|
||||
place. Note: not all RDATA have valid tokens due to batching. See
|
||||
``RdataCommand`` for more details.
|
||||
|
||||
|
||||
Example
|
||||
~~~~~~~
|
||||
|
||||
An example iteraction is shown below. Each line is prefixed with '>' or '<' to
|
||||
indicate which side is sending, these are *not* included on the wire::
|
||||
|
||||
* connection established *
|
||||
> SERVER localhost:8823
|
||||
> PING 1490197665618
|
||||
< NAME synapse.app.appservice
|
||||
< PING 1490197665618
|
||||
< REPLICATE events 1
|
||||
< REPLICATE backfill 1
|
||||
< REPLICATE caches 1
|
||||
> POSITION events 1
|
||||
> POSITION backfill 1
|
||||
> POSITION caches 1
|
||||
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
|
||||
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
||||
< PING 1490197675618
|
||||
> ERROR server stopping
|
||||
* connection closed by server *
|
||||
|
||||
The ``POSITION`` command sent by the server is used to set the clients position
|
||||
without needing to send data with the ``RDATA`` command.
|
||||
|
||||
|
||||
An example of a batched set of ``RDATA`` is::
|
||||
|
||||
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
||||
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
||||
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
||||
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
||||
|
||||
In this case the client shouldn't advance their caches token until it sees the
|
||||
the last ``RDATA``.
|
||||
|
||||
|
||||
List of commands
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
The list of valid commands, with which side can send it: server (S) or client (C):
|
||||
|
||||
SERVER (S)
|
||||
Sent at the start to identify which server the client is talking to
|
||||
|
||||
RDATA (S)
|
||||
A single update in a stream
|
||||
|
||||
POSITION (S)
|
||||
The position of the stream has been updated
|
||||
|
||||
ERROR (S, C)
|
||||
There was an error
|
||||
|
||||
PING (S, C)
|
||||
Sent periodically to ensure the connection is still alive
|
||||
|
||||
NAME (C)
|
||||
Sent at the start by client to inform the server who they are
|
||||
|
||||
REPLICATE (C)
|
||||
Asks the server to replicate a given stream
|
||||
|
||||
USER_SYNC (C)
|
||||
A user has started or stopped syncing
|
||||
|
||||
FEDERATION_ACK (C)
|
||||
Acknowledge receipt of some federation data
|
||||
|
||||
REMOVE_PUSHER (C)
|
||||
Inform the server a pusher should be removed
|
||||
|
||||
INVALIDATE_CACHE (C)
|
||||
Inform the server a cache should be invalidated
|
||||
|
||||
SYNC (S, C)
|
||||
Used exclusively in tests
|
||||
|
||||
|
||||
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
|
||||
format of each command.
|
||||
@@ -9,35 +9,31 @@ the Home Server to generate credentials that are valid for use on the TURN
|
||||
server through the use of a secret shared between the Home Server and the
|
||||
TURN server.
|
||||
|
||||
This document describes how to install coturn
|
||||
(https://github.com/coturn/coturn) which also supports the TURN REST API,
|
||||
This document described how to install coturn
|
||||
(https://code.google.com/p/coturn/) which also supports the TURN REST API,
|
||||
and integrate it with synapse.
|
||||
|
||||
coturn Setup
|
||||
============
|
||||
|
||||
You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
|
||||
|
||||
1. Check out coturn::
|
||||
|
||||
git clone https://github.com/coturn/coturn.git coturn
|
||||
svn checkout http://coturn.googlecode.com/svn/trunk/ coturn
|
||||
cd coturn
|
||||
|
||||
2. Configure it::
|
||||
|
||||
./configure
|
||||
|
||||
You may need to install ``libevent2``: if so, you should do so
|
||||
You may need to install libevent2: if so, you should do so
|
||||
in the way recommended by your operating system.
|
||||
You can ignore warnings about lack of database support: a
|
||||
database is unnecessary for this purpose.
|
||||
|
||||
3. Build and install it::
|
||||
|
||||
make
|
||||
make install
|
||||
|
||||
4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
|
||||
4. Make a config file in /etc/turnserver.conf. You can customise
|
||||
a config file from turnserver.conf.default. The relevant
|
||||
lines, with example values, are::
|
||||
|
||||
lt-cred-mech
|
||||
@@ -45,43 +41,19 @@ You may be able to setup coturn via your package manager, or set it up manually
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
|
||||
See turnserver.conf for explanations of the options.
|
||||
See turnserver.conf.default for explanations of the options.
|
||||
One way to generate the static-auth-secret is with pwgen::
|
||||
|
||||
pwgen -s 64 1
|
||||
|
||||
5. Consider your security settings. TURN lets users request a relay
|
||||
which will connect to arbitrary IP addresses and ports. At the least
|
||||
we recommend:
|
||||
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
|
||||
Ideally coturn should refuse to relay traffic which isn't SRTP;
|
||||
see https://github.com/matrix-org/synapse/issues/2009
|
||||
|
||||
6. Ensure your firewall allows traffic into the TURN server on
|
||||
5. Ensure youe firewall allows traffic into the TURN server on
|
||||
the ports you've configured it to listen on (remember to allow
|
||||
both TCP and UDP TURN traffic)
|
||||
both TCP and UDP if you've enabled both).
|
||||
|
||||
7. If you've configured coturn to support TLS/DTLS, generate or
|
||||
6. If you've configured coturn to support TLS/DTLS, generate or
|
||||
import your private key and certificate.
|
||||
|
||||
8. Start the turn server::
|
||||
|
||||
7. Start the turn server::
|
||||
bin/turnserver -o
|
||||
|
||||
|
||||
@@ -106,19 +78,12 @@ Your home server configuration file needs the following extra keys:
|
||||
to refresh credentials. The TURN REST API specification recommends
|
||||
one day (86400000).
|
||||
|
||||
4. "turn_allow_guests": Whether to allow guest users to use the TURN
|
||||
server. This is enabled by default, as otherwise VoIP will not
|
||||
work reliably for guests. However, it does introduce a security risk
|
||||
as it lets guests connect to arbitrary endpoints without having gone
|
||||
through a CAPTCHA or similar to register a real account.
|
||||
|
||||
As an example, here is the relevant section of the config file for
|
||||
matrix.org::
|
||||
|
||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||
turn_user_lifetime: 86400000
|
||||
turn_allow_guests: True
|
||||
|
||||
Now, restart synapse::
|
||||
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
URL Previews
|
||||
============
|
||||
|
||||
Design notes on a URL previewing service for Matrix:
|
||||
|
||||
Options are:
|
||||
|
||||
1. Have an AS which listens for URLs, downloads them, and inserts an event that describes their metadata.
|
||||
* Pros:
|
||||
* Decouples the implementation entirely from Synapse.
|
||||
* Uses existing Matrix events & content repo to store the metadata.
|
||||
* Cons:
|
||||
* Which AS should provide this service for a room, and why should you trust it?
|
||||
* Doesn't work well with E2E; you'd have to cut the AS into every room
|
||||
* the AS would end up subscribing to every room anyway.
|
||||
|
||||
2. Have a generic preview API (nothing to do with Matrix) that provides a previewing service:
|
||||
* Pros:
|
||||
* Simple and flexible; can be used by any clients at any point
|
||||
* Cons:
|
||||
* If each HS provides one of these independently, all the HSes in a room may needlessly DoS the target URI
|
||||
* We need somewhere to store the URL metadata rather than just using Matrix itself
|
||||
* We can't piggyback on matrix to distribute the metadata between HSes.
|
||||
|
||||
3. Make the synapse of the sending user responsible for spidering the URL and inserting an event asynchronously which describes the metadata.
|
||||
* Pros:
|
||||
* Works transparently for all clients
|
||||
* Piggy-backs nicely on using Matrix for distributing the metadata.
|
||||
* No confusion as to which AS
|
||||
* Cons:
|
||||
* Doesn't work with E2E
|
||||
* We might want to decouple the implementation of the spider from the HS, given spider behaviour can be quite complicated and evolve much more rapidly than the HS. It's more like a bot than a core part of the server.
|
||||
|
||||
4. Make the sending client use the preview API and insert the event itself when successful.
|
||||
* Pros:
|
||||
* Works well with E2E
|
||||
* No custom server functionality
|
||||
* Lets the client customise the preview that they send (like on FB)
|
||||
* Cons:
|
||||
* Entirely specific to the sending client, whereas it'd be nice if /any/ URL was correctly previewed if clients support it.
|
||||
|
||||
5. Have the option of specifying a shared (centralised) previewing service used by a room, to avoid all the different HSes in the room DoSing the target.
|
||||
|
||||
Best solution is probably a combination of both 2 and 4.
|
||||
* Sending clients do their best to create and send a preview at the point of sending the message, perhaps delaying the message until the preview is computed? (This also lets the user validate the preview before sending)
|
||||
* Receiving clients have the option of going and creating their own preview if one doesn't arrive soon enough (or if the original sender didn't create one)
|
||||
|
||||
This is a bit magical though in that the preview could come from two entirely different sources - the sending HS or your local one. However, this can always be exposed to users: "Generate your own URL previews if none are available?"
|
||||
|
||||
This is tantamount also to senders calculating their own thumbnails for sending in advance of the main content - we are trusting the sender not to lie about the content in the thumbnail. Whereas currently thumbnails are calculated by the receiving homeserver to avoid this attack.
|
||||
|
||||
However, this kind of phishing attack does exist whether we let senders pick their thumbnails or not, in that a malicious sender can send normal text messages around the attachment claiming it to be legitimate. We could rely on (future) reputation/abuse management to punish users who phish (be it with bogus metadata or bogus descriptions). Bogus metadata is particularly bad though, especially if it's avoidable.
|
||||
|
||||
As a first cut, let's do #2 and have the receiver hit the API to calculate its own previews (as it does currently for image thumbnails). We can then extend/optimise this to option 4 as a special extra if needed.
|
||||
|
||||
API
|
||||
---
|
||||
|
||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
||||
200 OK
|
||||
{
|
||||
"og:type" : "article"
|
||||
"og:url" : "https://twitter.com/matrixdotorg/status/684074366691356672"
|
||||
"og:title" : "Matrix on Twitter"
|
||||
"og:image" : "https://pbs.twimg.com/profile_images/500400952029888512/yI0qtFi7_400x400.png"
|
||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
||||
"og:site_name" : "Twitter"
|
||||
}
|
||||
|
||||
* Downloads the URL
|
||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
||||
* Download any media OG meta tags to the media repo, and refer to them in the OG via mxc:// URIs.
|
||||
* If a media filetype we know we can thumbnail: store it on disk, and hand it to the thumbnailer. Generate OG meta tags from the thumbnailer contents.
|
||||
* Otherwise, don't bother downloading further.
|
||||
@@ -1,94 +0,0 @@
|
||||
Scaling synapse via workers
|
||||
---------------------------
|
||||
|
||||
Synapse has experimental support for splitting out functionality into
|
||||
multiple separate python processes, helping greatly with scalability. These
|
||||
processes are called 'workers', and are (eventually) intended to scale
|
||||
horizontally independently.
|
||||
|
||||
All processes continue to share the same database instance, and as such, workers
|
||||
only work with postgres based synapse deployments (sharing a single sqlite
|
||||
across multiple processes is a recipe for disaster, plus you should be using
|
||||
postgres anyway if you care about scalability).
|
||||
|
||||
The workers communicate with the master synapse process via a synapse-specific
|
||||
TCP protocol called 'replication' - analogous to MySQL or Postgres style
|
||||
database replication; feeding a stream of relevant data to the workers so they
|
||||
can be kept in sync with the main synapse process and database state.
|
||||
|
||||
To enable workers, you need to add a replication listener to the master synapse, e.g.::
|
||||
|
||||
listeners:
|
||||
- port: 9092
|
||||
bind_address: '127.0.0.1'
|
||||
type: replication
|
||||
|
||||
Under **no circumstances** should this replication API listener be exposed to the
|
||||
public internet; it currently implements no authentication whatsoever and is
|
||||
unencrypted.
|
||||
|
||||
You then create a set of configs for the various worker processes. These should be
|
||||
worker configuration files should be stored in a dedicated subdirectory, to allow
|
||||
synctl to manipulate them.
|
||||
|
||||
The current available worker applications are:
|
||||
* synapse.app.pusher - handles sending push notifications to sygnal and email
|
||||
* synapse.app.synchrotron - handles /sync endpoints. can scales horizontally through multiple instances.
|
||||
* synapse.app.appservice - handles output traffic to Application Services
|
||||
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
|
||||
* synapse.app.media_repository - handles the media repository.
|
||||
* synapse.app.client_reader - handles client API endpoints like /publicRooms
|
||||
|
||||
Each worker configuration file inherits the configuration of the main homeserver
|
||||
configuration file. You can then override configuration specific to that worker,
|
||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
||||
You should minimise the number of overrides though to maintain a usable config.
|
||||
|
||||
You must specify the type of worker application (worker_app) and the replication
|
||||
endpoint that it's talking to on the main synapse process (worker_replication_host
|
||||
and worker_replication_port).
|
||||
|
||||
For instance::
|
||||
|
||||
worker_app: synapse.app.synchrotron
|
||||
|
||||
# The replication listener on the synapse to talk to.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_port: 9092
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
|
||||
worker_daemonize: True
|
||||
worker_pid_file: /home/matrix/synapse/synchrotron.pid
|
||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||
|
||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||
plain HTTP /sync endpoint on port 8083 separately from the /sync endpoint provided
|
||||
by the main synapse.
|
||||
|
||||
Obviously you should configure your loadbalancer to route the /sync endpoint to
|
||||
the synchrotron instance(s) in this instance.
|
||||
|
||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
||||
commandline option to tell it to operate on all the worker configurations found
|
||||
in the given directory, e.g.::
|
||||
|
||||
synctl -a $CONFIG/workers start
|
||||
|
||||
Currently one should always restart all workers when restarting or upgrading
|
||||
synapse, unless you explicitly know it's safe not to. For instance, restarting
|
||||
synapse without restarting all the synchrotrons may result in broken typing
|
||||
notifications.
|
||||
|
||||
To manipulate a specific worker, you pass the -w option to synctl::
|
||||
|
||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
||||
|
||||
All of the above is highly experimental and subject to change as Synapse evolves,
|
||||
but documenting it here to help folks needing highly scalable Synapses similar
|
||||
to the one running matrix.org!
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
|
||||
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
||||
./dendron/jenkins/build_dendron.sh
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
--haproxy \
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
||||
./dendron/jenkins/build_dendron.sh
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox -e packaging -e pep8
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
@@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
tox -e py27
|
||||
@@ -1,44 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
# This clones a project from github into a named subdirectory
|
||||
# If the project has a branch with the same name as this branch
|
||||
# then it will checkout that branch after cloning.
|
||||
# Otherwise it will checkout "origin/develop."
|
||||
# The first argument is the name of the directory to checkout
|
||||
# the branch into.
|
||||
# The second argument is the URL of the remote repository to checkout.
|
||||
# Usually something like https://github.com/matrix-org/sytest.git
|
||||
|
||||
set -eux
|
||||
|
||||
NAME=$1
|
||||
PROJECT=$2
|
||||
BASE=".$NAME-base"
|
||||
|
||||
# Update our mirror.
|
||||
if [ ! -d ".$NAME-base" ]; then
|
||||
# Create a local mirror of the source repository.
|
||||
# This saves us from having to download the entire repository
|
||||
# when this script is next run.
|
||||
git clone "$PROJECT" "$BASE" --mirror
|
||||
else
|
||||
# Fetch any updates from the source repository.
|
||||
(cd "$BASE"; git fetch -p)
|
||||
fi
|
||||
|
||||
# Remove the existing repository so that we have a clean copy
|
||||
rm -rf "$NAME"
|
||||
# Cloning with --shared means that we will share portions of the
|
||||
# .git directory with our local mirror.
|
||||
git clone "$BASE" "$NAME" --shared
|
||||
|
||||
# Jenkins may have supplied us with the name of the branch in the
|
||||
# environment. Otherwise we will have to guess based on the current
|
||||
# commit.
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
cd "$NAME"
|
||||
# check out the relevant branch
|
||||
git checkout "${GIT_BRANCH}" || (
|
||||
echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
|
||||
git checkout "origin/develop"
|
||||
)
|
||||
@@ -1,20 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
cd "`dirname $0`/.."
|
||||
|
||||
TOX_DIR=$WORKSPACE/.tox
|
||||
|
||||
mkdir -p $TOX_DIR
|
||||
|
||||
if ! [ $TOX_DIR -ef .tox ]; then
|
||||
ln -s "$TOX_DIR" .tox
|
||||
fi
|
||||
|
||||
# set up the virtualenv
|
||||
tox -e py27 --notest -v
|
||||
|
||||
TOX_BIN=$TOX_DIR/py27/bin
|
||||
$TOX_BIN/pip install setuptools
|
||||
{ python synapse/python_dependencies.py
|
||||
echo lxml psycopg2
|
||||
} | xargs $TOX_BIN/pip install
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -25,26 +25,17 @@ import urllib2
|
||||
import yaml
|
||||
|
||||
|
||||
def request_registration(user, password, server_location, shared_secret, admin=False):
|
||||
def request_registration(user, password, server_location, shared_secret):
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
msg=user,
|
||||
digestmod=hashlib.sha1,
|
||||
)
|
||||
|
||||
mac.update(user)
|
||||
mac.update("\x00")
|
||||
mac.update(password)
|
||||
mac.update("\x00")
|
||||
mac.update("admin" if admin else "notadmin")
|
||||
|
||||
mac = mac.hexdigest()
|
||||
).hexdigest()
|
||||
|
||||
data = {
|
||||
"user": user,
|
||||
"username": user,
|
||||
"password": password,
|
||||
"mac": mac,
|
||||
"type": "org.matrix.login.shared_secret",
|
||||
"admin": admin,
|
||||
}
|
||||
|
||||
server_location = server_location.rstrip("/")
|
||||
@@ -52,7 +43,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
|
||||
print "Sending registration request..."
|
||||
|
||||
req = urllib2.Request(
|
||||
"%s/_matrix/client/api/v1/register" % (server_location,),
|
||||
"%s/_matrix/client/v2_alpha/register" % (server_location,),
|
||||
data=json.dumps(data),
|
||||
headers={'Content-Type': 'application/json'}
|
||||
)
|
||||
@@ -76,7 +67,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def register_new_user(user, password, server_location, shared_secret, admin):
|
||||
def register_new_user(user, password, server_location, shared_secret):
|
||||
if not user:
|
||||
try:
|
||||
default_user = getpass.getuser()
|
||||
@@ -107,14 +98,7 @@ def register_new_user(user, password, server_location, shared_secret, admin):
|
||||
print "Passwords do not match"
|
||||
sys.exit(1)
|
||||
|
||||
if not admin:
|
||||
admin = raw_input("Make admin [no]: ")
|
||||
if admin in ("y", "yes", "true"):
|
||||
admin = True
|
||||
else:
|
||||
admin = False
|
||||
|
||||
request_registration(user, password, server_location, shared_secret, bool(admin))
|
||||
request_registration(user, password, server_location, shared_secret)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -134,11 +118,6 @@ if __name__ == "__main__":
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-a", "--admin",
|
||||
action="store_true",
|
||||
help="Register new user as an admin. Will prompt if omitted.",
|
||||
)
|
||||
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument(
|
||||
@@ -171,4 +150,4 @@ if __name__ == "__main__":
|
||||
else:
|
||||
secret = args.shared_secret
|
||||
|
||||
register_new_user(args.user, args.password, args.server_url, secret, args.admin)
|
||||
register_new_user(args.user, args.password, args.server_url, secret)
|
||||
@@ -1,7 +0,0 @@
|
||||
.header {
|
||||
border-bottom: 4px solid #e4f7ed ! important;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #76CFA6 ! important;
|
||||
}
|
||||
@@ -1,156 +0,0 @@
|
||||
body {
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
pre, code {
|
||||
word-break: break-word;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
#page {
|
||||
font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
|
||||
font-color: #454545;
|
||||
font-size: 12pt;
|
||||
width: 100%;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
#inner {
|
||||
width: 640px;
|
||||
}
|
||||
|
||||
.header {
|
||||
width: 100%;
|
||||
height: 87px;
|
||||
color: #454545;
|
||||
border-bottom: 4px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.logo {
|
||||
text-align: right;
|
||||
margin-left: 20px;
|
||||
}
|
||||
|
||||
.salutation {
|
||||
padding-top: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.summarytext {
|
||||
}
|
||||
|
||||
.room {
|
||||
width: 100%;
|
||||
color: #454545;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_header td {
|
||||
padding-top: 38px;
|
||||
padding-bottom: 10px;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_name {
|
||||
vertical-align: middle;
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.room_header h2 {
|
||||
margin-top: 0px;
|
||||
margin-left: 75px;
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
.room_avatar {
|
||||
width: 56px;
|
||||
line-height: 0px;
|
||||
text-align: center;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.room_avatar img {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
object-fit: cover;
|
||||
border-radius: 24px;
|
||||
}
|
||||
|
||||
.notif {
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
margin-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
}
|
||||
|
||||
.historical_message .sender_avatar {
|
||||
opacity: 0.3;
|
||||
}
|
||||
|
||||
/* spell out opacity and historical_message class names for Outlook aka Word */
|
||||
.historical_message .sender_name {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_time {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_body {
|
||||
color: #c7c7c7;
|
||||
}
|
||||
|
||||
.historical_message td,
|
||||
.message td {
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
.sender_avatar {
|
||||
width: 56px;
|
||||
text-align: center;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.sender_avatar img {
|
||||
margin-top: -2px;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 16px;
|
||||
}
|
||||
|
||||
.sender_name {
|
||||
display: inline;
|
||||
font-size: 13px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_time {
|
||||
text-align: right;
|
||||
width: 100px;
|
||||
font-size: 11px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_body {
|
||||
}
|
||||
|
||||
.notif_link td {
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #454545;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.debug {
|
||||
font-size: 10px;
|
||||
color: #888;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: 20px;
|
||||
text-align: center;
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
{% for message in notif.messages %}
|
||||
<tr class="{{ "historical_message" if message.is_historical else "message" }}">
|
||||
<td class="sender_avatar">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
{% if message.sender_avatar_url %}
|
||||
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
|
||||
{% else %}
|
||||
{% if message.sender_hash % 3 == 0 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif message.sender_hash % 3 == 1 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="message_contents">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
<div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
|
||||
{% endif %}
|
||||
<div class="message_body">
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
<span class="filename">{{ message.body_text_plain }}</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</td>
|
||||
<td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
<tr class="notif_link">
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ notif.link }}">View {{ room.title }}</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
@@ -1,16 +0,0 @@
|
||||
{% for message in notif.messages %}
|
||||
{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
View {{ room.title }} at {{ notif.link }}
|
||||
@@ -1,55 +0,0 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<style type="text/css">
|
||||
{% include 'mail.css' without context %}
|
||||
{% include "mail-%s.css" % app_name ignore missing without context %}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table id="page">
|
||||
<tr>
|
||||
<td> </td>
|
||||
<td id="inner">
|
||||
<table class="header">
|
||||
<tr>
|
||||
<td>
|
||||
<div class="salutation">Hi {{ user_display_name }},</div>
|
||||
<div class="summarytext">{{ summary_text }}</div>
|
||||
</td>
|
||||
<td class="logo">
|
||||
{% if app_name == "Riot" %}
|
||||
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
||||
{% elif app_name == "Vector" %}
|
||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
||||
{% else %}
|
||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
{% for room in rooms %}
|
||||
{% include 'room.html' with context %}
|
||||
{% endfor %}
|
||||
<div class="footer">
|
||||
<a href="{{ unsubscribe_link }}">Unsubscribe</a>
|
||||
<br/>
|
||||
<br/>
|
||||
<div class="debug">
|
||||
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
|
||||
an event was received at {{ reason.received_at|format_ts("%c") }}
|
||||
which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
|
||||
{% if reason.last_sent_ts %}
|
||||
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
|
||||
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
|
||||
{% else %}
|
||||
and we don't have a last time we sent a mail for this room.
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td> </td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,10 +0,0 @@
|
||||
Hi {{ user_display_name }},
|
||||
|
||||
{{ summary_text }}
|
||||
|
||||
{% for room in rooms %}
|
||||
{% include 'room.txt' with context %}
|
||||
{% endfor %}
|
||||
|
||||
You can disable these notifications at {{ unsubscribe_link }}
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
<table class="room">
|
||||
<tr class="room_header">
|
||||
<td class="room_avatar">
|
||||
{% if room.avatar_url %}
|
||||
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
|
||||
{% else %}
|
||||
{% if room.hash % 3 == 0 %}
|
||||
<img alt="" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif room.hash % 3 == 1 %}
|
||||
<img alt="" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img alt="" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="room_name" colspan="2">
|
||||
{{ room.title }}
|
||||
</td>
|
||||
</tr>
|
||||
{% if room.invite %}
|
||||
<tr>
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ room.link }}">Join the conversation.</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.html' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</table>
|
||||
@@ -1,9 +0,0 @@
|
||||
{{ room.title }}
|
||||
|
||||
{% if room.invite %}
|
||||
You've been invited, join at {{ room.link }}
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.txt' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -1,116 +0,0 @@
|
||||
import psycopg2
|
||||
import yaml
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import hashlib
|
||||
from unpaddedbase64 import encode_base64
|
||||
from signedjson.key import read_signing_keys
|
||||
from signedjson.sign import sign_json
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
|
||||
def select_v1_keys(connection):
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
results = {}
|
||||
for server_name, key_id, verify_key in rows:
|
||||
results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
|
||||
return results
|
||||
|
||||
|
||||
def select_v1_certs(connection):
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
results = {}
|
||||
for server_name, tls_certificate in rows:
|
||||
results[server_name] = tls_certificate
|
||||
return results
|
||||
|
||||
|
||||
def select_v2_json(connection):
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
results = {}
|
||||
for server_name, key_id, key_json in rows:
|
||||
results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
|
||||
return results
|
||||
|
||||
|
||||
def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
||||
return {
|
||||
"old_verify_keys": {},
|
||||
"server_name": server_name,
|
||||
"verify_keys": {
|
||||
key_id: {"key": key}
|
||||
for key_id, key in keys.items()
|
||||
},
|
||||
"valid_until_ts": valid_until,
|
||||
"tls_fingerprints": [fingerprint(certificate)],
|
||||
}
|
||||
|
||||
|
||||
def fingerprint(certificate):
|
||||
finger = hashlib.sha256(certificate)
|
||||
return {"sha256": encode_base64(finger.digest())}
|
||||
|
||||
|
||||
def rows_v2(server, json):
|
||||
valid_until = json["valid_until_ts"]
|
||||
key_json = encode_canonical_json(json)
|
||||
for key_id in json["verify_keys"]:
|
||||
yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
|
||||
|
||||
|
||||
def main():
|
||||
config = yaml.load(open(sys.argv[1]))
|
||||
valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
|
||||
|
||||
server_name = config["server_name"]
|
||||
signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
|
||||
|
||||
database = config["database"]
|
||||
assert database["name"] == "psycopg2", "Can only convert for postgresql"
|
||||
args = database["args"]
|
||||
args.pop("cp_max")
|
||||
args.pop("cp_min")
|
||||
connection = psycopg2.connect(**args)
|
||||
keys = select_v1_keys(connection)
|
||||
certificates = select_v1_certs(connection)
|
||||
json = select_v2_json(connection)
|
||||
|
||||
result = {}
|
||||
for server in keys:
|
||||
if not server in json:
|
||||
v2_json = convert_v1_to_v2(
|
||||
server, valid_until, keys[server], certificates[server]
|
||||
)
|
||||
v2_json = sign_json(v2_json, server_name, signing_key)
|
||||
result[server] = v2_json
|
||||
|
||||
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
||||
|
||||
rows = list(
|
||||
row for server, json in result.items()
|
||||
for row in rows_v2(server, json)
|
||||
)
|
||||
|
||||
cursor = connection.cursor()
|
||||
cursor.executemany(
|
||||
"INSERT INTO server_keys_json ("
|
||||
" server_name, key_id, from_server,"
|
||||
" ts_added_ms, ts_valid_until_ms, key_json"
|
||||
") VALUES (%s, %s, %s, %s, %s, %s)",
|
||||
rows
|
||||
)
|
||||
connection.commit()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,196 +0,0 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import ast
|
||||
import yaml
|
||||
|
||||
class DefinitionVisitor(ast.NodeVisitor):
|
||||
def __init__(self):
|
||||
super(DefinitionVisitor, self).__init__()
|
||||
self.functions = {}
|
||||
self.classes = {}
|
||||
self.names = {}
|
||||
self.attrs = set()
|
||||
self.definitions = {
|
||||
'def': self.functions,
|
||||
'class': self.classes,
|
||||
'names': self.names,
|
||||
'attrs': self.attrs,
|
||||
}
|
||||
|
||||
def visit_Name(self, node):
|
||||
self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
|
||||
|
||||
def visit_Attribute(self, node):
|
||||
self.attrs.add(node.attr)
|
||||
for child in ast.iter_child_nodes(node):
|
||||
self.visit(child)
|
||||
|
||||
def visit_ClassDef(self, node):
|
||||
visitor = DefinitionVisitor()
|
||||
self.classes[node.name] = visitor.definitions
|
||||
for child in ast.iter_child_nodes(node):
|
||||
visitor.visit(child)
|
||||
|
||||
def visit_FunctionDef(self, node):
|
||||
visitor = DefinitionVisitor()
|
||||
self.functions[node.name] = visitor.definitions
|
||||
for child in ast.iter_child_nodes(node):
|
||||
visitor.visit(child)
|
||||
|
||||
|
||||
def non_empty(defs):
|
||||
functions = {name: non_empty(f) for name, f in defs['def'].items()}
|
||||
classes = {name: non_empty(f) for name, f in defs['class'].items()}
|
||||
result = {}
|
||||
if functions: result['def'] = functions
|
||||
if classes: result['class'] = classes
|
||||
names = defs['names']
|
||||
uses = []
|
||||
for name in names.get('Load', ()):
|
||||
if name not in names.get('Param', ()) and name not in names.get('Store', ()):
|
||||
uses.append(name)
|
||||
uses.extend(defs['attrs'])
|
||||
if uses: result['uses'] = uses
|
||||
result['names'] = names
|
||||
result['attrs'] = defs['attrs']
|
||||
return result
|
||||
|
||||
|
||||
def definitions_in_code(input_code):
|
||||
input_ast = ast.parse(input_code)
|
||||
visitor = DefinitionVisitor()
|
||||
visitor.visit(input_ast)
|
||||
definitions = non_empty(visitor.definitions)
|
||||
return definitions
|
||||
|
||||
|
||||
def definitions_in_file(filepath):
|
||||
with open(filepath) as f:
|
||||
return definitions_in_code(f.read())
|
||||
|
||||
|
||||
def defined_names(prefix, defs, names):
|
||||
for name, funcs in defs.get('def', {}).items():
|
||||
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
|
||||
defined_names(prefix + name + ".", funcs, names)
|
||||
|
||||
for name, funcs in defs.get('class', {}).items():
|
||||
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
|
||||
defined_names(prefix + name + ".", funcs, names)
|
||||
|
||||
|
||||
def used_names(prefix, item, defs, names):
|
||||
for name, funcs in defs.get('def', {}).items():
|
||||
used_names(prefix + name + ".", name, funcs, names)
|
||||
|
||||
for name, funcs in defs.get('class', {}).items():
|
||||
used_names(prefix + name + ".", name, funcs, names)
|
||||
|
||||
path = prefix.rstrip('.')
|
||||
for used in defs.get('uses', ()):
|
||||
if used in names:
|
||||
if item:
|
||||
names[item].setdefault('uses', []).append(used)
|
||||
names[used].setdefault('used', {}).setdefault(item, []).append(path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys, os, argparse, re
|
||||
|
||||
parser = argparse.ArgumentParser(description='Find definitions.')
|
||||
parser.add_argument(
|
||||
"--unused", action="store_true", help="Only list unused definitions"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pattern", action="append", metavar="REGEXP",
|
||||
help="Search for a pattern"
|
||||
)
|
||||
parser.add_argument(
|
||||
"directories", nargs='+', metavar="DIR",
|
||||
help="Directories to search for definitions"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--referrers", default=0, type=int,
|
||||
help="Include referrers up to the given depth"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--referred", default=0, type=int,
|
||||
help="Include referred down to the given depth"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--format", default="yaml",
|
||||
help="Output format, one of 'yaml' or 'dot'"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
definitions = {}
|
||||
for directory in args.directories:
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
definitions[filepath] = definitions_in_file(filepath)
|
||||
|
||||
names = {}
|
||||
for filepath, defs in definitions.items():
|
||||
defined_names(filepath + ":", defs, names)
|
||||
|
||||
for filepath, defs in definitions.items():
|
||||
used_names(filepath + ":", None, defs, names)
|
||||
|
||||
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
|
||||
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
|
||||
|
||||
result = {}
|
||||
for name, definition in names.items():
|
||||
if patterns and not any(pattern.match(name) for pattern in patterns):
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
if args.unused and definition.get('used'):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
referrer_depth = args.referrers
|
||||
referrers = set()
|
||||
while referrer_depth:
|
||||
referrer_depth -= 1
|
||||
for entry in result.values():
|
||||
for used_by in entry.get("used", ()):
|
||||
referrers.add(used_by)
|
||||
for name, definition in names.items():
|
||||
if not name in referrers:
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
referred_depth = args.referred
|
||||
referred = set()
|
||||
while referred_depth:
|
||||
referred_depth -= 1
|
||||
for entry in result.values():
|
||||
for uses in entry.get("uses", ()):
|
||||
referred.add(uses)
|
||||
for name, definition in names.items():
|
||||
if not name in referred:
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
if args.format == 'yaml':
|
||||
yaml.dump(result, sys.stdout, default_flow_style=False)
|
||||
elif args.format == 'dot':
|
||||
print "digraph {"
|
||||
for name, entry in result.items():
|
||||
print name
|
||||
for used_by in entry.get("used", ()):
|
||||
if used_by in result:
|
||||
print used_by, "->", name
|
||||
print "}"
|
||||
else:
|
||||
raise ValueError("Unknown format %r" % (args.format))
|
||||
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
|
||||
import pymacaroons
|
||||
import sys
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
|
||||
sys.exit(1)
|
||||
|
||||
macaroon_string = sys.argv[1]
|
||||
key = sys.argv[2] if len(sys.argv) > 2 else None
|
||||
|
||||
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
|
||||
print macaroon.inspect()
|
||||
|
||||
print ""
|
||||
|
||||
verifier = pymacaroons.Verifier()
|
||||
verifier.satisfy_general(lambda c: True)
|
||||
try:
|
||||
verifier.verify(macaroon, key)
|
||||
print "Signature is correct"
|
||||
except Exception as e:
|
||||
print e.message
|
||||
@@ -1,62 +0,0 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import ast
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
PATTERNS_V1 = []
|
||||
PATTERNS_V2 = []
|
||||
|
||||
RESULT = {
|
||||
"v1": PATTERNS_V1,
|
||||
"v2": PATTERNS_V2,
|
||||
}
|
||||
|
||||
class CallVisitor(ast.NodeVisitor):
|
||||
def visit_Call(self, node):
|
||||
if isinstance(node.func, ast.Name):
|
||||
name = node.func.id
|
||||
else:
|
||||
return
|
||||
|
||||
|
||||
if name == "client_path_patterns":
|
||||
PATTERNS_V1.append(node.args[0].s)
|
||||
elif name == "client_v2_patterns":
|
||||
PATTERNS_V2.append(node.args[0].s)
|
||||
|
||||
|
||||
def find_patterns_in_code(input_code):
|
||||
input_ast = ast.parse(input_code)
|
||||
visitor = CallVisitor()
|
||||
visitor.visit(input_ast)
|
||||
|
||||
|
||||
def find_patterns_in_file(filepath):
|
||||
with open(filepath) as f:
|
||||
find_patterns_in_code(f.read())
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Find url patterns.')
|
||||
|
||||
parser.add_argument(
|
||||
"directories", nargs='+', metavar="DIR",
|
||||
help="Directories to search for definitions"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
for directory in args.directories:
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
find_patterns_in_file(filepath)
|
||||
|
||||
PATTERNS_V1.sort()
|
||||
PATTERNS_V2.sort()
|
||||
|
||||
yaml.dump(RESULT, sys.stdout, default_flow_style=False)
|
||||
@@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
## CAUTION:
|
||||
## This script will remove (hopefully) all trace of the given room ID from
|
||||
## your homeserver.db
|
||||
|
||||
## Do not run it lightly.
|
||||
|
||||
ROOMID="$1"
|
||||
|
||||
sqlite3 homeserver.db <<EOF
|
||||
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_edges WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_depth WHERE room_id = '$ROOMID';
|
||||
DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID';
|
||||
DELETE FROM events WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_json WHERE room_id = '$ROOMID';
|
||||
DELETE FROM state_events WHERE room_id = '$ROOMID';
|
||||
DELETE FROM current_state_events WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
||||
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
||||
DELETE FROM topics WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_names WHERE room_id = '$ROOMID';
|
||||
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_hosts WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_aliases WHERE room_id = '$ROOMID';
|
||||
DELETE FROM state_groups WHERE room_id = '$ROOMID';
|
||||
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
|
||||
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
|
||||
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_search_content WHERE c1room_id = '$ROOMID';
|
||||
DELETE FROM guest_access WHERE room_id = '$ROOMID';
|
||||
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_tags WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_account_data WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_push_actions WHERE room_id = '$ROOMID';
|
||||
DELETE FROM local_invites WHERE room_id = '$ROOMID';
|
||||
DELETE FROM pusher_throttle WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_reports WHERE room_id = '$ROOMID';
|
||||
DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID';
|
||||
DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_auth WHERE room_id = '$ROOMID';
|
||||
DELETE FROM appservice_room_list WHERE room_id = '$ROOMID';
|
||||
VACUUM;
|
||||
EOF
|
||||
@@ -1,67 +0,0 @@
|
||||
import requests
|
||||
import collections
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
|
||||
Entry = collections.namedtuple("Entry", "name position rows")
|
||||
|
||||
ROW_TYPES = {}
|
||||
|
||||
|
||||
def row_type_for_columns(name, column_names):
|
||||
column_names = tuple(column_names)
|
||||
row_type = ROW_TYPES.get((name, column_names))
|
||||
if row_type is None:
|
||||
row_type = collections.namedtuple(name, column_names)
|
||||
ROW_TYPES[(name, column_names)] = row_type
|
||||
return row_type
|
||||
|
||||
|
||||
def parse_response(content):
|
||||
streams = json.loads(content)
|
||||
result = {}
|
||||
for name, value in streams.items():
|
||||
row_type = row_type_for_columns(name, value["field_names"])
|
||||
position = value["position"]
|
||||
rows = [row_type(*row) for row in value["rows"]]
|
||||
result[name] = Entry(name, position, rows)
|
||||
return result
|
||||
|
||||
|
||||
def replicate(server, streams):
|
||||
return parse_response(requests.get(
|
||||
server + "/_synapse/replication",
|
||||
verify=False,
|
||||
params=streams
|
||||
).content)
|
||||
|
||||
|
||||
def main():
|
||||
server = sys.argv[1]
|
||||
|
||||
streams = None
|
||||
while not streams:
|
||||
try:
|
||||
streams = {
|
||||
row.name: row.position
|
||||
for row in replicate(server, {"streams":"-1"})["streams"].rows
|
||||
}
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
time.sleep(0.1)
|
||||
|
||||
while True:
|
||||
try:
|
||||
results = replicate(server, streams)
|
||||
except:
|
||||
sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
|
||||
break
|
||||
for update in results.values():
|
||||
for row in update.rows:
|
||||
sys.stdout.write(repr(row) + "\n")
|
||||
streams[update.name] = update.position
|
||||
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
@@ -56,9 +56,10 @@ if __name__ == '__main__':
|
||||
|
||||
js = json.load(args.json)
|
||||
|
||||
|
||||
auth = Auth(Mock())
|
||||
check_auth(
|
||||
auth,
|
||||
[FrozenEvent(d) for d in js["auth_chain"]],
|
||||
[FrozenEvent(d) for d in js.get("pdus", [])],
|
||||
[FrozenEvent(d) for d in js["pdus"]],
|
||||
)
|
||||
@@ -1,5 +1,5 @@
|
||||
from synapse.crypto.event_signing import *
|
||||
from unpaddedbase64 import encode_base64
|
||||
from syutil.base64util import encode_base64
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
@@ -1,7 +1,9 @@
|
||||
|
||||
from signedjson.sign import verify_signed_json
|
||||
from signedjson.key import decode_verify_key_bytes, write_signing_keys
|
||||
from unpaddedbase64 import decode_base64
|
||||
from syutil.crypto.jsonsign import verify_signed_json
|
||||
from syutil.crypto.signing_key import (
|
||||
decode_verify_key_bytes, write_signing_keys
|
||||
)
|
||||
from syutil.base64util import decode_base64
|
||||
|
||||
import urllib2
|
||||
import json
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/perl -pi
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
$copyright = <<EOT;
|
||||
/* Copyright 2016 OpenMarket Ltd
|
||||
/* Copyright 2015 OpenMarket Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/perl -pi
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
$copyright = <<EOT;
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
21
scripts/database-prepare-for-0.0.1.sh
Executable file
21
scripts/database-prepare-for-0.0.1.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This is will prepare a synapse database for running with v0.0.1 of synapse.
|
||||
# It will store all the user information, but will *delete* all messages and
|
||||
# room data.
|
||||
|
||||
set -e
|
||||
|
||||
cp "$1" "$1.bak"
|
||||
|
||||
DUMP=$(sqlite3 "$1" << 'EOF'
|
||||
.dump users
|
||||
.dump access_tokens
|
||||
.dump presence
|
||||
.dump profiles
|
||||
EOF
|
||||
)
|
||||
|
||||
rm "$1"
|
||||
|
||||
sqlite3 "$1" <<< "$DUMP"
|
||||
21
scripts/database-prepare-for-0.5.0.sh
Executable file
21
scripts/database-prepare-for-0.5.0.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This is will prepare a synapse database for running with v0.5.0 of synapse.
|
||||
# It will store all the user information, but will *delete* all messages and
|
||||
# room data.
|
||||
|
||||
set -e
|
||||
|
||||
cp "$1" "$1.bak"
|
||||
|
||||
DUMP=$(sqlite3 "$1" << 'EOF'
|
||||
.dump users
|
||||
.dump access_tokens
|
||||
.dump presence
|
||||
.dump profiles
|
||||
EOF
|
||||
)
|
||||
|
||||
rm "$1"
|
||||
|
||||
sqlite3 "$1" <<< "$DUMP"
|
||||
@@ -116,19 +116,17 @@ def get_json(origin_name, origin_key, destination, path):
|
||||
authorization_headers = []
|
||||
|
||||
for key, sig in signed_json["signatures"][origin_name].items():
|
||||
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||
origin_name, key, sig,
|
||||
)
|
||||
authorization_headers.append(bytes(header))
|
||||
sys.stderr.write(header)
|
||||
sys.stderr.write("\n")
|
||||
authorization_headers.append(bytes(
|
||||
"X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||
origin_name, key, sig,
|
||||
)
|
||||
))
|
||||
|
||||
result = requests.get(
|
||||
lookup(destination, path),
|
||||
headers={"Authorization": authorization_headers[0]},
|
||||
verify=False,
|
||||
)
|
||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||
return result.json()
|
||||
|
||||
|
||||
@@ -143,7 +141,6 @@ def main():
|
||||
)
|
||||
|
||||
json.dump(result, sys.stdout)
|
||||
print ""
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -6,8 +6,8 @@ from synapse.crypto.event_signing import (
|
||||
add_event_pdu_content_hash, compute_pdu_event_reference_hash
|
||||
)
|
||||
from synapse.api.events.utils import prune_pdu
|
||||
from unpaddedbase64 import encode_base64, decode_base64
|
||||
from canonicaljson import encode_canonical_json
|
||||
from syutil.base64util import encode_base64, decode_base64
|
||||
from syutil.jsonutil import encode_canonical_json
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
|
||||
import sys
|
||||
|
||||
import bcrypt
|
||||
import getpass
|
||||
|
||||
import yaml
|
||||
|
||||
bcrypt_rounds=12
|
||||
password_pepper = ""
|
||||
|
||||
def prompt_for_pass():
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
if not password:
|
||||
raise Exception("Password cannot be blank.")
|
||||
|
||||
confirm_password = getpass.getpass("Confirm password: ")
|
||||
|
||||
if password != confirm_password:
|
||||
raise Exception("Passwords do not match.")
|
||||
|
||||
return password
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Calculate the hash of a new password, so that passwords"
|
||||
" can be reset")
|
||||
parser.add_argument(
|
||||
"-p", "--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c", "--config",
|
||||
type=argparse.FileType('r'),
|
||||
help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
|
||||
password_config = config.get("password_config", {})
|
||||
password_pepper = password_config.get("pepper", password_pepper)
|
||||
password = args.password
|
||||
|
||||
if not password:
|
||||
password = prompt_for_pass()
|
||||
|
||||
print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
|
||||
|
||||
24
scripts/nuke-room-from-db.sh
Executable file
24
scripts/nuke-room-from-db.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
## CAUTION:
|
||||
## This script will remove (hopefully) all trace of the given room ID from
|
||||
## your homeserver.db
|
||||
|
||||
## Do not run it lightly.
|
||||
|
||||
ROOMID="$1"
|
||||
|
||||
sqlite3 homeserver.db <<EOF
|
||||
DELETE FROM context_depth WHERE context = '$ROOMID';
|
||||
DELETE FROM current_state WHERE context = '$ROOMID';
|
||||
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
||||
DELETE FROM messages WHERE room_id = '$ROOMID';
|
||||
DELETE FROM pdu_backward_extremities WHERE context = '$ROOMID';
|
||||
DELETE FROM pdu_edges WHERE context = '$ROOMID';
|
||||
DELETE FROM pdu_forward_extremities WHERE context = '$ROOMID';
|
||||
DELETE FROM pdus WHERE context = '$ROOMID';
|
||||
DELETE FROM room_data WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
||||
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
||||
DELETE FROM state_pdus WHERE context = '$ROOMID';
|
||||
EOF
|
||||
288
scripts/synapse_port_db → scripts/port_from_sqlite_to_postgres.py
Executable file → Normal file
288
scripts/synapse_port_db → scripts/port_from_sqlite_to_postgres.py
Executable file → Normal file
@@ -1,6 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -19,7 +18,6 @@ from twisted.enterprise import adbapi
|
||||
|
||||
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
|
||||
import argparse
|
||||
import curses
|
||||
@@ -30,17 +28,14 @@ import traceback
|
||||
import yaml
|
||||
|
||||
|
||||
logger = logging.getLogger("synapse_port_db")
|
||||
logger = logging.getLogger("port_from_sqlite_to_postgres")
|
||||
|
||||
|
||||
BOOLEAN_COLUMNS = {
|
||||
"events": ["processed", "outlier", "contains_url"],
|
||||
"events": ["processed", "outlier"],
|
||||
"rooms": ["is_public"],
|
||||
"event_edges": ["is_state"],
|
||||
"presence_list": ["accepted"],
|
||||
"presence_stream": ["currently_active"],
|
||||
"public_room_list_stream": ["visibility"],
|
||||
"device_lists_outbound_pokes": ["sent"],
|
||||
}
|
||||
|
||||
|
||||
@@ -72,15 +67,6 @@ APPEND_ONLY_TABLES = [
|
||||
"state_groups_state",
|
||||
"event_to_state_groups",
|
||||
"rejections",
|
||||
"event_search",
|
||||
"presence_stream",
|
||||
"push_rules_stream",
|
||||
"current_state_resets",
|
||||
"ex_outlier_stream",
|
||||
"cache_invalidation_stream",
|
||||
"public_room_list_stream",
|
||||
"state_group_edges",
|
||||
"stream_ordering_to_exterm",
|
||||
]
|
||||
|
||||
|
||||
@@ -102,16 +88,14 @@ class Store(object):
|
||||
|
||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
||||
_simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
|
||||
_simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
|
||||
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__[
|
||||
"_simple_select_one_onecol_txn"
|
||||
]
|
||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
|
||||
|
||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||
|
||||
_execute_and_decode = SQLBaseStore.__dict__["_execute_and_decode"]
|
||||
|
||||
def runInteraction(self, desc, func, *args, **kwargs):
|
||||
def r(conn):
|
||||
try:
|
||||
@@ -121,7 +105,7 @@ class Store(object):
|
||||
try:
|
||||
txn = conn.cursor()
|
||||
return func(
|
||||
LoggingTransaction(txn, desc, self.database_engine, []),
|
||||
LoggingTransaction(txn, desc, self.database_engine),
|
||||
*args, **kwargs
|
||||
)
|
||||
except self.database_engine.module.DatabaseError as e:
|
||||
@@ -172,40 +156,31 @@ class Porter(object):
|
||||
def setup_table(self, table):
|
||||
if table in APPEND_ONLY_TABLES:
|
||||
# It's safe to just carry on inserting.
|
||||
row = yield self.postgres_store._simple_select_one(
|
||||
next_chunk = yield self.postgres_store._simple_select_one_onecol(
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
retcols=("forward_rowid", "backward_rowid"),
|
||||
retcol="rowid",
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
total_to_port = None
|
||||
if row is None:
|
||||
if next_chunk is None:
|
||||
if table == "sent_transactions":
|
||||
forward_chunk, already_ported, total_to_port = (
|
||||
next_chunk, already_ported, total_to_port = (
|
||||
yield self._setup_sent_transactions()
|
||||
)
|
||||
backward_chunk = 0
|
||||
else:
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": table,
|
||||
"forward_rowid": 1,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
values={"table_name": table, "rowid": 1}
|
||||
)
|
||||
|
||||
forward_chunk = 1
|
||||
backward_chunk = 0
|
||||
next_chunk = 1
|
||||
already_ported = 0
|
||||
else:
|
||||
forward_chunk = row["forward_rowid"]
|
||||
backward_chunk = row["backward_rowid"]
|
||||
|
||||
if total_to_port is None:
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
table, forward_chunk, backward_chunk
|
||||
table, next_chunk
|
||||
)
|
||||
else:
|
||||
def delete_all(txn):
|
||||
@@ -219,85 +194,42 @@ class Porter(object):
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": table,
|
||||
"forward_rowid": 1,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
values={"table_name": table, "rowid": 0}
|
||||
)
|
||||
|
||||
forward_chunk = 1
|
||||
backward_chunk = 0
|
||||
next_chunk = 1
|
||||
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
table, forward_chunk, backward_chunk
|
||||
table, next_chunk
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
(table, already_ported, total_to_port, forward_chunk, backward_chunk)
|
||||
)
|
||||
defer.returnValue((table, already_ported, total_to_port, next_chunk))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
def handle_table(self, table, postgres_size, table_size, next_chunk):
|
||||
if not table_size:
|
||||
return
|
||||
|
||||
self.progress.add_table(table, postgres_size, table_size)
|
||||
|
||||
if table == "event_search":
|
||||
yield self.handle_search_table(
|
||||
postgres_size, table_size, forward_chunk, backward_chunk
|
||||
)
|
||||
return
|
||||
|
||||
forward_select = (
|
||||
select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
)
|
||||
|
||||
backward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
)
|
||||
|
||||
do_forward = [True]
|
||||
do_backward = [True]
|
||||
|
||||
while True:
|
||||
def r(txn):
|
||||
forward_rows = []
|
||||
backward_rows = []
|
||||
if do_forward[0]:
|
||||
txn.execute(forward_select, (forward_chunk, self.batch_size,))
|
||||
forward_rows = txn.fetchall()
|
||||
if not forward_rows:
|
||||
do_forward[0] = False
|
||||
txn.execute(select, (next_chunk, self.batch_size,))
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
if do_backward[0]:
|
||||
txn.execute(backward_select, (backward_chunk, self.batch_size,))
|
||||
backward_rows = txn.fetchall()
|
||||
if not backward_rows:
|
||||
do_backward[0] = False
|
||||
return headers, rows
|
||||
|
||||
if forward_rows or backward_rows:
|
||||
headers = [column[0] for column in txn.description]
|
||||
else:
|
||||
headers = None
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
|
||||
return headers, forward_rows, backward_rows
|
||||
if rows:
|
||||
next_chunk = rows[-1][0] + 1
|
||||
|
||||
headers, frows, brows = yield self.sqlite_store.runInteraction(
|
||||
"select", r
|
||||
)
|
||||
|
||||
if frows or brows:
|
||||
if frows:
|
||||
forward_chunk = max(row[0] for row in frows) + 1
|
||||
if brows:
|
||||
backward_chunk = min(row[0] for row in brows) - 1
|
||||
|
||||
rows = frows + brows
|
||||
self._convert_rows(table, headers, rows)
|
||||
|
||||
def insert(txn):
|
||||
@@ -309,10 +241,7 @@ class Porter(object):
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
updatevalues={
|
||||
"forward_rowid": forward_chunk,
|
||||
"backward_rowid": backward_chunk,
|
||||
},
|
||||
updatevalues={"rowid": next_chunk},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
@@ -323,76 +252,6 @@ class Porter(object):
|
||||
else:
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_search_table(self, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
select = (
|
||||
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
||||
" FROM event_search as es"
|
||||
" INNER JOIN events AS e USING (event_id, room_id)"
|
||||
" WHERE es.rowid >= ?"
|
||||
" ORDER BY es.rowid LIMIT ?"
|
||||
)
|
||||
|
||||
while True:
|
||||
def r(txn):
|
||||
txn.execute(select, (forward_chunk, self.batch_size,))
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
return headers, rows
|
||||
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
|
||||
if rows:
|
||||
forward_chunk = rows[-1][0] + 1
|
||||
|
||||
# We have to treat event_search differently since it has a
|
||||
# different structure in the two different databases.
|
||||
def insert(txn):
|
||||
sql = (
|
||||
"INSERT INTO event_search (event_id, room_id, key,"
|
||||
" sender, vector, origin_server_ts, stream_ordering)"
|
||||
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
||||
)
|
||||
|
||||
rows_dict = [
|
||||
dict(zip(headers, row))
|
||||
for row in rows
|
||||
]
|
||||
|
||||
txn.executemany(sql, [
|
||||
(
|
||||
row["event_id"],
|
||||
row["room_id"],
|
||||
row["key"],
|
||||
row["sender"],
|
||||
row["value"],
|
||||
row["origin_server_ts"],
|
||||
row["stream_ordering"],
|
||||
)
|
||||
for row in rows_dict
|
||||
])
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": "event_search"},
|
||||
updatevalues={
|
||||
"forward_rowid": forward_chunk,
|
||||
"backward_rowid": backward_chunk,
|
||||
},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
|
||||
postgres_size += len(rows)
|
||||
|
||||
self.progress.update("event_search", postgres_size)
|
||||
|
||||
else:
|
||||
return
|
||||
|
||||
def setup_db(self, db_config, database_engine):
|
||||
db_conn = database_engine.module.connect(
|
||||
**{
|
||||
@@ -401,7 +260,7 @@ class Porter(object):
|
||||
}
|
||||
)
|
||||
|
||||
prepare_database(db_conn, database_engine, config=None)
|
||||
database_engine.prepare_database(db_conn)
|
||||
|
||||
db_conn.commit()
|
||||
|
||||
@@ -418,8 +277,8 @@ class Porter(object):
|
||||
**self.postgres_config["args"]
|
||||
)
|
||||
|
||||
sqlite_engine = create_engine(sqlite_config)
|
||||
postgres_engine = create_engine(postgres_config)
|
||||
sqlite_engine = create_engine("sqlite3")
|
||||
postgres_engine = create_engine("psycopg2")
|
||||
|
||||
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
||||
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
||||
@@ -447,7 +306,9 @@ class Porter(object):
|
||||
|
||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||
table="information_schema.tables",
|
||||
keyvalues={},
|
||||
keyvalues={
|
||||
"table_schema": "public",
|
||||
},
|
||||
retcol="distinct table_name",
|
||||
)
|
||||
|
||||
@@ -461,32 +322,10 @@ class Porter(object):
|
||||
txn.execute(
|
||||
"CREATE TABLE port_from_sqlite3 ("
|
||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||
" forward_rowid bigint NOT NULL,"
|
||||
" backward_rowid bigint NOT NULL"
|
||||
" rowid bigint NOT NULL"
|
||||
")"
|
||||
)
|
||||
|
||||
# The old port script created a table with just a "rowid" column.
|
||||
# We want people to be able to rerun this script from an old port
|
||||
# so that they can pick up any missing events that were not
|
||||
# ported across.
|
||||
def alter_table(txn):
|
||||
txn.execute(
|
||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||
" RENAME rowid TO forward_rowid"
|
||||
)
|
||||
txn.execute(
|
||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||
" ADD backward_rowid bigint NOT NULL DEFAULT 0"
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"alter_table", alter_table
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info("Failed to create port table: %s", e)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
@@ -538,7 +377,9 @@ class Porter(object):
|
||||
|
||||
for i, row in enumerate(rows):
|
||||
rows[i] = tuple(
|
||||
conv(j, col)
|
||||
self.postgres_store.database_engine.encode_parameter(
|
||||
conv(j, col)
|
||||
)
|
||||
for j, col in enumerate(row)
|
||||
if j > 0
|
||||
)
|
||||
@@ -546,7 +387,7 @@ class Porter(object):
|
||||
@defer.inlineCallbacks
|
||||
def _setup_sent_transactions(self):
|
||||
# Only save things from the last day
|
||||
yesterday = int(time.time() * 1000) - 86400000
|
||||
yesterday = int(time.time()*1000) - 86400000
|
||||
|
||||
# And save the max transaction id from each destination
|
||||
select = (
|
||||
@@ -572,17 +413,14 @@ class Porter(object):
|
||||
self._convert_rows("sent_transactions", headers, rows)
|
||||
|
||||
inserted_rows = len(rows)
|
||||
if inserted_rows:
|
||||
max_inserted_rowid = max(r[0] for r in rows)
|
||||
max_inserted_rowid = max(r[0] for r in rows)
|
||||
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(
|
||||
txn, "sent_transactions", headers[1:], rows
|
||||
)
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(
|
||||
txn, "sent_transactions", headers[1:], rows
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
else:
|
||||
max_inserted_rowid = 0
|
||||
yield self.postgres_store.execute(insert)
|
||||
|
||||
def get_start_id(txn):
|
||||
txn.execute(
|
||||
@@ -602,11 +440,7 @@ class Porter(object):
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": "sent_transactions",
|
||||
"forward_rowid": next_chunk,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
values={"table_name": "sent_transactions", "rowid": next_chunk}
|
||||
)
|
||||
|
||||
def get_sent_table_size(txn):
|
||||
@@ -627,18 +461,13 @@ class Porter(object):
|
||||
defer.returnValue((next_chunk, inserted_rows, total_count))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
frows = yield self.sqlite_store.execute_sql(
|
||||
def _get_remaining_count_to_port(self, table, next_chunk):
|
||||
rows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
||||
forward_chunk,
|
||||
next_chunk,
|
||||
)
|
||||
|
||||
brows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
|
||||
backward_chunk,
|
||||
)
|
||||
|
||||
defer.returnValue(frows[0][0] + brows[0][0])
|
||||
defer.returnValue(rows[0][0])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_already_ported_count(self, table):
|
||||
@@ -649,10 +478,10 @@ class Porter(object):
|
||||
defer.returnValue(rows[0][0])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
def _get_total_count_to_port(self, table, next_chunk):
|
||||
remaining, done = yield defer.gatherResults(
|
||||
[
|
||||
self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
|
||||
self._get_remaining_count_to_port(table, next_chunk),
|
||||
self._get_already_ported_count(table),
|
||||
],
|
||||
consumeErrors=True,
|
||||
@@ -783,7 +612,7 @@ class CursesProgress(Progress):
|
||||
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i + 2, left_margin + max_len - len(table),
|
||||
i+2, left_margin + max_len - len(table),
|
||||
table,
|
||||
curses.A_BOLD | color,
|
||||
)
|
||||
@@ -791,18 +620,18 @@ class CursesProgress(Progress):
|
||||
size = 20
|
||||
|
||||
progress = "[%s%s]" % (
|
||||
"#" * int(perc * size / 100),
|
||||
" " * (size - int(perc * size / 100)),
|
||||
"#" * int(perc*size/100),
|
||||
" " * (size - int(perc*size/100)),
|
||||
)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i + 2, left_margin + max_len + middle_space,
|
||||
i+2, left_margin + max_len + middle_space,
|
||||
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
||||
)
|
||||
|
||||
if self.finished:
|
||||
self.stdscr.addstr(
|
||||
rows - 1, 0,
|
||||
rows-1, 0,
|
||||
"Press any key to exit...",
|
||||
)
|
||||
|
||||
@@ -895,9 +724,6 @@ if __name__ == "__main__":
|
||||
|
||||
postgres_config = yaml.safe_load(args.postgres_config)
|
||||
|
||||
if "database" in postgres_config:
|
||||
postgres_config = postgres_config["database"]
|
||||
|
||||
if "name" not in postgres_config:
|
||||
sys.stderr.write("Malformed database config: no 'name'")
|
||||
sys.exit(2)
|
||||
331
scripts/upgrade_db_to_v0.6.0.py
Normal file
331
scripts/upgrade_db_to_v0.6.0.py
Normal file
@@ -0,0 +1,331 @@
|
||||
|
||||
from synapse.storage import SCHEMA_VERSION, read_schema
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.signatures import SignatureStore
|
||||
from synapse.storage.event_federation import EventFederationStore
|
||||
|
||||
from syutil.base64util import encode_base64, decode_base64
|
||||
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
|
||||
from synapse.events.builder import EventBuilder
|
||||
from synapse.events.utils import prune_event
|
||||
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
|
||||
from syutil.crypto.jsonsign import (
|
||||
verify_signed_json, SignatureVerifyException,
|
||||
)
|
||||
from syutil.crypto.signing_key import decode_verify_key_bytes
|
||||
|
||||
from syutil.jsonutil import encode_canonical_json
|
||||
|
||||
import argparse
|
||||
# import dns.resolver
|
||||
import hashlib
|
||||
import httplib
|
||||
import json
|
||||
import sqlite3
|
||||
import syutil
|
||||
import urllib2
|
||||
|
||||
|
||||
delta_sql = """
|
||||
CREATE TABLE IF NOT EXISTS event_json(
|
||||
event_id TEXT NOT NULL,
|
||||
room_id TEXT NOT NULL,
|
||||
internal_metadata NOT NULL,
|
||||
json BLOB NOT NULL,
|
||||
CONSTRAINT ev_j_uniq UNIQUE (event_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id);
|
||||
CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id);
|
||||
|
||||
PRAGMA user_version = 10;
|
||||
"""
|
||||
|
||||
|
||||
class Store(object):
|
||||
_get_event_signatures_txn = SignatureStore.__dict__["_get_event_signatures_txn"]
|
||||
_get_event_content_hashes_txn = SignatureStore.__dict__["_get_event_content_hashes_txn"]
|
||||
_get_event_reference_hashes_txn = SignatureStore.__dict__["_get_event_reference_hashes_txn"]
|
||||
_get_prev_event_hashes_txn = SignatureStore.__dict__["_get_prev_event_hashes_txn"]
|
||||
_get_prev_events_and_state = EventFederationStore.__dict__["_get_prev_events_and_state"]
|
||||
_get_auth_events = EventFederationStore.__dict__["_get_auth_events"]
|
||||
cursor_to_dict = SQLBaseStore.__dict__["cursor_to_dict"]
|
||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||
_simple_select_list_txn = SQLBaseStore.__dict__["_simple_select_list_txn"]
|
||||
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
||||
|
||||
def _generate_event_json(self, txn, rows):
|
||||
events = []
|
||||
for row in rows:
|
||||
d = dict(row)
|
||||
|
||||
d.pop("stream_ordering", None)
|
||||
d.pop("topological_ordering", None)
|
||||
d.pop("processed", None)
|
||||
|
||||
if "origin_server_ts" not in d:
|
||||
d["origin_server_ts"] = d.pop("ts", 0)
|
||||
else:
|
||||
d.pop("ts", 0)
|
||||
|
||||
d.pop("prev_state", None)
|
||||
d.update(json.loads(d.pop("unrecognized_keys")))
|
||||
|
||||
d["sender"] = d.pop("user_id")
|
||||
|
||||
d["content"] = json.loads(d["content"])
|
||||
|
||||
if "age_ts" not in d:
|
||||
# For compatibility
|
||||
d["age_ts"] = d.get("origin_server_ts", 0)
|
||||
|
||||
d.setdefault("unsigned", {})["age_ts"] = d.pop("age_ts")
|
||||
|
||||
outlier = d.pop("outlier", False)
|
||||
|
||||
# d.pop("membership", None)
|
||||
|
||||
d.pop("state_hash", None)
|
||||
|
||||
d.pop("replaces_state", None)
|
||||
|
||||
b = EventBuilder(d)
|
||||
b.internal_metadata.outlier = outlier
|
||||
|
||||
events.append(b)
|
||||
|
||||
for i, ev in enumerate(events):
|
||||
signatures = self._get_event_signatures_txn(
|
||||
txn, ev.event_id,
|
||||
)
|
||||
|
||||
ev.signatures = {
|
||||
n: {
|
||||
k: encode_base64(v) for k, v in s.items()
|
||||
}
|
||||
for n, s in signatures.items()
|
||||
}
|
||||
|
||||
hashes = self._get_event_content_hashes_txn(
|
||||
txn, ev.event_id,
|
||||
)
|
||||
|
||||
ev.hashes = {
|
||||
k: encode_base64(v) for k, v in hashes.items()
|
||||
}
|
||||
|
||||
prevs = self._get_prev_events_and_state(txn, ev.event_id)
|
||||
|
||||
ev.prev_events = [
|
||||
(e_id, h)
|
||||
for e_id, h, is_state in prevs
|
||||
if is_state == 0
|
||||
]
|
||||
|
||||
# ev.auth_events = self._get_auth_events(txn, ev.event_id)
|
||||
|
||||
hashes = dict(ev.auth_events)
|
||||
|
||||
for e_id, hash in ev.prev_events:
|
||||
if e_id in hashes and not hash:
|
||||
hash.update(hashes[e_id])
|
||||
#
|
||||
# if hasattr(ev, "state_key"):
|
||||
# ev.prev_state = [
|
||||
# (e_id, h)
|
||||
# for e_id, h, is_state in prevs
|
||||
# if is_state == 1
|
||||
# ]
|
||||
|
||||
return [e.build() for e in events]
|
||||
|
||||
|
||||
store = Store()
|
||||
|
||||
|
||||
# def get_key(server_name):
|
||||
# print "Getting keys for: %s" % (server_name,)
|
||||
# targets = []
|
||||
# if ":" in server_name:
|
||||
# target, port = server_name.split(":")
|
||||
# targets.append((target, int(port)))
|
||||
# try:
|
||||
# answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
|
||||
# for srv in answers:
|
||||
# targets.append((srv.target, srv.port))
|
||||
# except dns.resolver.NXDOMAIN:
|
||||
# targets.append((server_name, 8448))
|
||||
# except:
|
||||
# print "Failed to lookup keys for %s" % (server_name,)
|
||||
# return {}
|
||||
#
|
||||
# for target, port in targets:
|
||||
# url = "https://%s:%i/_matrix/key/v1" % (target, port)
|
||||
# try:
|
||||
# keys = json.load(urllib2.urlopen(url, timeout=2))
|
||||
# verify_keys = {}
|
||||
# for key_id, key_base64 in keys["verify_keys"].items():
|
||||
# verify_key = decode_verify_key_bytes(
|
||||
# key_id, decode_base64(key_base64)
|
||||
# )
|
||||
# verify_signed_json(keys, server_name, verify_key)
|
||||
# verify_keys[key_id] = verify_key
|
||||
# print "Got keys for: %s" % (server_name,)
|
||||
# return verify_keys
|
||||
# except urllib2.URLError:
|
||||
# pass
|
||||
# except urllib2.HTTPError:
|
||||
# pass
|
||||
# except httplib.HTTPException:
|
||||
# pass
|
||||
#
|
||||
# print "Failed to get keys for %s" % (server_name,)
|
||||
# return {}
|
||||
|
||||
|
||||
def reinsert_events(cursor, server_name, signing_key):
|
||||
print "Running delta: v10"
|
||||
|
||||
cursor.executescript(delta_sql)
|
||||
|
||||
cursor.execute(
|
||||
"SELECT * FROM events ORDER BY rowid ASC"
|
||||
)
|
||||
|
||||
print "Getting events..."
|
||||
|
||||
rows = store.cursor_to_dict(cursor)
|
||||
|
||||
events = store._generate_event_json(cursor, rows)
|
||||
|
||||
print "Got events from DB."
|
||||
|
||||
algorithms = {
|
||||
"sha256": hashlib.sha256,
|
||||
}
|
||||
|
||||
key_id = "%s:%s" % (signing_key.alg, signing_key.version)
|
||||
verify_key = signing_key.verify_key
|
||||
verify_key.alg = signing_key.alg
|
||||
verify_key.version = signing_key.version
|
||||
|
||||
server_keys = {
|
||||
server_name: {
|
||||
key_id: verify_key
|
||||
}
|
||||
}
|
||||
|
||||
i = 0
|
||||
N = len(events)
|
||||
|
||||
for event in events:
|
||||
if i % 100 == 0:
|
||||
print "Processed: %d/%d events" % (i,N,)
|
||||
i += 1
|
||||
|
||||
# for alg_name in event.hashes:
|
||||
# if check_event_content_hash(event, algorithms[alg_name]):
|
||||
# pass
|
||||
# else:
|
||||
# pass
|
||||
# print "FAIL content hash %s %s" % (alg_name, event.event_id, )
|
||||
|
||||
have_own_correctly_signed = False
|
||||
for host, sigs in event.signatures.items():
|
||||
pruned = prune_event(event)
|
||||
|
||||
for key_id in sigs:
|
||||
if host not in server_keys:
|
||||
server_keys[host] = {} # get_key(host)
|
||||
if key_id in server_keys[host]:
|
||||
try:
|
||||
verify_signed_json(
|
||||
pruned.get_pdu_json(),
|
||||
host,
|
||||
server_keys[host][key_id]
|
||||
)
|
||||
|
||||
if host == server_name:
|
||||
have_own_correctly_signed = True
|
||||
except SignatureVerifyException:
|
||||
print "FAIL signature check %s %s" % (
|
||||
key_id, event.event_id
|
||||
)
|
||||
|
||||
# TODO: Re sign with our own server key
|
||||
if not have_own_correctly_signed:
|
||||
sigs = compute_event_signature(event, server_name, signing_key)
|
||||
event.signatures.update(sigs)
|
||||
|
||||
pruned = prune_event(event)
|
||||
|
||||
for key_id in event.signatures[server_name]:
|
||||
verify_signed_json(
|
||||
pruned.get_pdu_json(),
|
||||
server_name,
|
||||
server_keys[server_name][key_id]
|
||||
)
|
||||
|
||||
event_json = encode_canonical_json(
|
||||
event.get_dict()
|
||||
).decode("UTF-8")
|
||||
|
||||
metadata_json = encode_canonical_json(
|
||||
event.internal_metadata.get_dict()
|
||||
).decode("UTF-8")
|
||||
|
||||
store._simple_insert_txn(
|
||||
cursor,
|
||||
table="event_json",
|
||||
values={
|
||||
"event_id": event.event_id,
|
||||
"room_id": event.room_id,
|
||||
"internal_metadata": metadata_json,
|
||||
"json": event_json,
|
||||
},
|
||||
or_replace=True,
|
||||
)
|
||||
|
||||
|
||||
def main(database, server_name, signing_key):
|
||||
conn = sqlite3.connect(database)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Do other deltas:
|
||||
cursor.execute("PRAGMA user_version")
|
||||
row = cursor.fetchone()
|
||||
|
||||
if row and row[0]:
|
||||
user_version = row[0]
|
||||
# Run every version since after the current version.
|
||||
for v in range(user_version + 1, 10):
|
||||
print "Running delta: %d" % (v,)
|
||||
sql_script = read_schema("delta/v%d" % (v,))
|
||||
cursor.executescript(sql_script)
|
||||
|
||||
reinsert_events(cursor, server_name, signing_key)
|
||||
|
||||
conn.commit()
|
||||
|
||||
print "Success!"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("database")
|
||||
parser.add_argument("server_name")
|
||||
parser.add_argument(
|
||||
"signing_key", type=argparse.FileType('r'),
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
signing_key = syutil.crypto.signing_key.read_signing_keys(
|
||||
args.signing_key
|
||||
)
|
||||
|
||||
main(args.database, args.server_name, signing_key[0])
|
||||
@@ -3,6 +3,9 @@ source-dir = docs/sphinx
|
||||
build-dir = docs/build
|
||||
all_files = 1
|
||||
|
||||
[aliases]
|
||||
test = trial
|
||||
|
||||
[trial]
|
||||
test_suite = tests
|
||||
|
||||
@@ -13,8 +16,3 @@ ignore =
|
||||
docs/*
|
||||
pylint.cfg
|
||||
tox.ini
|
||||
|
||||
[flake8]
|
||||
max-line-length = 90
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
ignore = W503
|
||||
|
||||
56
setup.py
56
setup.py
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,54 +14,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import glob
|
||||
import os
|
||||
from setuptools import setup, find_packages, Command
|
||||
import sys
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
# Some notes on `setup.py test`:
|
||||
#
|
||||
# Once upon a time we used to try to make `setup.py test` run `tox` to run the
|
||||
# tests. That's a bad idea for three reasons:
|
||||
#
|
||||
# 1: `setup.py test` is supposed to find out whether the tests work in the
|
||||
# *current* environmentt, not whatever tox sets up.
|
||||
# 2: Empirically, trying to install tox during the test run wasn't working ("No
|
||||
# module named virtualenv").
|
||||
# 3: The tox documentation advises against it[1].
|
||||
#
|
||||
# Even further back in time, we used to use setuptools_trial [2]. That has its
|
||||
# own set of issues: for instance, it requires installation of Twisted to build
|
||||
# an sdist (because the recommended mode of usage is to add it to
|
||||
# `setup_requires`). That in turn means that in order to successfully run tox
|
||||
# you have to have the python header files installed for whichever version of
|
||||
# python tox uses (which is python3 on recent ubuntus, for example).
|
||||
#
|
||||
# So, for now at least, we stick with what appears to be the convention among
|
||||
# Twisted projects, and don't attempt to do anything when someone runs
|
||||
# `setup.py test`; instead we direct people to run `trial` directly if they
|
||||
# care.
|
||||
#
|
||||
# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
|
||||
# [2]: https://pypi.python.org/pypi/setuptools_trial
|
||||
class TestCommand(Command):
|
||||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
print ("""Synapse's tests cannot be run via setup.py. To run them, try:
|
||||
PYTHONPATH="." trial tests
|
||||
""")
|
||||
|
||||
def read_file(path_segments):
|
||||
"""Read a file from the package. Takes a list of strings to join to
|
||||
make the path"""
|
||||
@@ -77,7 +36,6 @@ def exec_file(path_segments):
|
||||
exec(code, result)
|
||||
return result
|
||||
|
||||
|
||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||
long_description = read_file(("README.rst",))
|
||||
@@ -88,10 +46,14 @@ setup(
|
||||
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||
description="Reference Synapse Home Server",
|
||||
install_requires=dependencies['requirements'](include_conditional=True).keys(),
|
||||
dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
|
||||
setup_requires=[
|
||||
"Twisted==14.0.2", # Here to override setuptools_trial's dependency on Twisted>=2.4.0
|
||||
"setuptools_trial",
|
||||
"mock"
|
||||
],
|
||||
dependency_links=dependencies["DEPENDENCY_LINKS"],
|
||||
include_package_data=True,
|
||||
zip_safe=False,
|
||||
long_description=long_description,
|
||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||
cmdclass={'test': TestCommand},
|
||||
scripts=["synctl", "register_new_matrix_user"],
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,4 +16,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.21.0-rc2"
|
||||
__version__ = "0.8.1-r4"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -28,11 +27,22 @@ class Membership(object):
|
||||
LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)
|
||||
|
||||
|
||||
class Feedback(object):
|
||||
|
||||
"""Represents the types of feedback a user can send in response to a
|
||||
message."""
|
||||
|
||||
DELIVERED = u"delivered"
|
||||
READ = u"read"
|
||||
LIST = (DELIVERED, READ)
|
||||
|
||||
|
||||
class PresenceState(object):
|
||||
"""Represents the presence state of a user."""
|
||||
OFFLINE = u"offline"
|
||||
UNAVAILABLE = u"unavailable"
|
||||
ONLINE = u"online"
|
||||
FREE_FOR_CHAT = u"free_for_chat"
|
||||
|
||||
|
||||
class JoinRules(object):
|
||||
@@ -44,8 +54,10 @@ class JoinRules(object):
|
||||
|
||||
class LoginType(object):
|
||||
PASSWORD = u"m.login.password"
|
||||
OAUTH = u"m.login.oauth2"
|
||||
EMAIL_CODE = u"m.login.email.code"
|
||||
EMAIL_URL = u"m.login.email.url"
|
||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||
MSISDN = u"m.login.msisdn"
|
||||
RECAPTCHA = u"m.login.recaptcha"
|
||||
DUMMY = u"m.login.dummy"
|
||||
|
||||
@@ -61,12 +73,7 @@ class EventTypes(object):
|
||||
PowerLevels = "m.room.power_levels"
|
||||
Aliases = "m.room.aliases"
|
||||
Redaction = "m.room.redaction"
|
||||
ThirdPartyInvite = "m.room.third_party_invite"
|
||||
|
||||
RoomHistoryVisibility = "m.room.history_visibility"
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
Feedback = "m.room.message.feedback"
|
||||
|
||||
# These are used for validation
|
||||
Message = "m.room.message"
|
||||
@@ -78,14 +85,3 @@ class RejectedReason(object):
|
||||
AUTH_ERROR = "auth_error"
|
||||
REPLACED = "replaced"
|
||||
NOT_ANCESTOR = "not_ancestor"
|
||||
|
||||
|
||||
class RoomCreationPreset(object):
|
||||
PRIVATE_CHAT = "private_chat"
|
||||
PUBLIC_CHAT = "public_chat"
|
||||
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
||||
|
||||
|
||||
class ThirdPartyEntityKind(object):
|
||||
USER = "user"
|
||||
LOCATION = "location"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
"""Contains exceptions and error codes."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -30,67 +29,42 @@ class Codes(object):
|
||||
USER_IN_USE = "M_USER_IN_USE"
|
||||
ROOM_IN_USE = "M_ROOM_IN_USE"
|
||||
BAD_PAGINATION = "M_BAD_PAGINATION"
|
||||
BAD_STATE = "M_BAD_STATE"
|
||||
UNKNOWN = "M_UNKNOWN"
|
||||
NOT_FOUND = "M_NOT_FOUND"
|
||||
MISSING_TOKEN = "M_MISSING_TOKEN"
|
||||
UNKNOWN_TOKEN = "M_UNKNOWN_TOKEN"
|
||||
GUEST_ACCESS_FORBIDDEN = "M_GUEST_ACCESS_FORBIDDEN"
|
||||
LIMIT_EXCEEDED = "M_LIMIT_EXCEEDED"
|
||||
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
||||
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
||||
MISSING_PARAM = "M_MISSING_PARAM"
|
||||
INVALID_PARAM = "M_INVALID_PARAM"
|
||||
TOO_LARGE = "M_TOO_LARGE"
|
||||
EXCLUSIVE = "M_EXCLUSIVE"
|
||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
||||
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
"""An exception with integer code and message string attributes.
|
||||
"""An exception with integer code and message string attributes."""
|
||||
|
||||
Attributes:
|
||||
code (int): HTTP error code
|
||||
msg (str): string describing the error
|
||||
"""
|
||||
def __init__(self, code, msg):
|
||||
logger.info("%s: %s, %s", type(self).__name__, code, msg)
|
||||
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
||||
self.code = code
|
||||
self.msg = msg
|
||||
self.response_code_message = None
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(self.msg)
|
||||
|
||||
|
||||
class MatrixCodeMessageException(CodeMessageException):
|
||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||
|
||||
Attributes:
|
||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||
super(MatrixCodeMessageException, self).__init__(code, msg)
|
||||
self.errcode = errcode
|
||||
|
||||
|
||||
class SynapseError(CodeMessageException):
|
||||
"""A base exception type for matrix errors which have an errcode and error
|
||||
message (as well as an HTTP status code).
|
||||
|
||||
Attributes:
|
||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
"""A base error which can be caught for all synapse events."""
|
||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||
"""Constructs a synapse error.
|
||||
|
||||
Args:
|
||||
code (int): The integer error code (an HTTP response code)
|
||||
msg (str): The human-readable error message.
|
||||
errcode (str): The matrix error code e.g 'M_FORBIDDEN'
|
||||
err (str): The error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
super(SynapseError, self).__init__(code, msg)
|
||||
self.errcode = errcode
|
||||
@@ -101,38 +75,10 @@ class SynapseError(CodeMessageException):
|
||||
self.errcode,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_http_response_exception(cls, err):
|
||||
"""Make a SynapseError based on an HTTPResponseException
|
||||
|
||||
This is useful when a proxied request has failed, and we need to
|
||||
decide how to map the failure onto a matrix error to send back to the
|
||||
client.
|
||||
|
||||
An attempt is made to parse the body of the http response as a matrix
|
||||
error. If that succeeds, the errcode and error message from the body
|
||||
are used as the errcode and error message in the new synapse error.
|
||||
|
||||
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
|
||||
set to the reason code from the HTTP response.
|
||||
|
||||
Args:
|
||||
err (HttpResponseException):
|
||||
|
||||
Returns:
|
||||
SynapseError:
|
||||
"""
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
try:
|
||||
j = json.loads(err.response)
|
||||
except ValueError:
|
||||
j = {}
|
||||
errcode = j.get('errcode', Codes.UNKNOWN)
|
||||
errmsg = j.get('error', err.msg)
|
||||
|
||||
res = SynapseError(err.code, errmsg, errcode)
|
||||
return res
|
||||
class RoomError(SynapseError):
|
||||
"""An error raised when a room event fails."""
|
||||
pass
|
||||
|
||||
|
||||
class RegistrationError(SynapseError):
|
||||
@@ -159,11 +105,13 @@ class UnrecognizedRequestError(SynapseError):
|
||||
|
||||
class NotFoundError(SynapseError):
|
||||
"""An error indicating we can't find the thing you asked for"""
|
||||
def __init__(self, msg="Not found", errcode=Codes.NOT_FOUND):
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.NOT_FOUND
|
||||
super(NotFoundError, self).__init__(
|
||||
404,
|
||||
msg,
|
||||
errcode=errcode
|
||||
"Not found",
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
@@ -176,15 +124,6 @@ class AuthError(SynapseError):
|
||||
super(AuthError, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class EventSizeError(SynapseError):
|
||||
"""An error raised when an event is too big."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.TOO_LARGE
|
||||
super(EventSizeError, self).__init__(413, *args, **kwargs)
|
||||
|
||||
|
||||
class EventStreamError(SynapseError):
|
||||
"""An error raised when there a problem with the event stream."""
|
||||
def __init__(self, *args, **kwargs):
|
||||
@@ -224,6 +163,7 @@ class LimitExceededError(SynapseError):
|
||||
errcode=Codes.LIMIT_EXCEEDED):
|
||||
super(LimitExceededError, self).__init__(code, msg, errcode)
|
||||
self.retry_after_ms = retry_after_ms
|
||||
self.response_code_message = "Too Many Requests"
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(
|
||||
@@ -293,19 +233,6 @@ class FederationError(RuntimeError):
|
||||
|
||||
|
||||
class HttpResponseException(CodeMessageException):
|
||||
"""
|
||||
Represents an HTTP-level failure of an outbound request
|
||||
|
||||
Attributes:
|
||||
response (str): body of response
|
||||
"""
|
||||
def __init__(self, code, msg, response):
|
||||
"""
|
||||
|
||||
Args:
|
||||
code (int): HTTP status code
|
||||
msg (str): reason phrase from HTTP response status line
|
||||
response (str): body of response
|
||||
"""
|
||||
super(HttpResponseException, self).__init__(code, msg)
|
||||
self.response = response
|
||||
super(HttpResponseException, self).__init__(code, msg)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,174 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import UserID, RoomID
|
||||
from twisted.internet import defer
|
||||
|
||||
import ujson as json
|
||||
import jsonschema
|
||||
from jsonschema import FormatChecker
|
||||
|
||||
FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {
|
||||
"type": "number"
|
||||
},
|
||||
"senders": {
|
||||
"$ref": "#/definitions/user_id_array"
|
||||
},
|
||||
"not_senders": {
|
||||
"$ref": "#/definitions/user_id_array"
|
||||
},
|
||||
# TODO: We don't limit event type values but we probably should...
|
||||
# check types are valid event types
|
||||
"types": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"not_types": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ROOM_FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"not_rooms": {
|
||||
"$ref": "#/definitions/room_id_array"
|
||||
},
|
||||
"rooms": {
|
||||
"$ref": "#/definitions/room_id_array"
|
||||
},
|
||||
"ephemeral": {
|
||||
"$ref": "#/definitions/room_event_filter"
|
||||
},
|
||||
"include_leave": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"state": {
|
||||
"$ref": "#/definitions/room_event_filter"
|
||||
},
|
||||
"timeline": {
|
||||
"$ref": "#/definitions/room_event_filter"
|
||||
},
|
||||
"account_data": {
|
||||
"$ref": "#/definitions/room_event_filter"
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
ROOM_EVENT_FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {
|
||||
"type": "number"
|
||||
},
|
||||
"senders": {
|
||||
"$ref": "#/definitions/user_id_array"
|
||||
},
|
||||
"not_senders": {
|
||||
"$ref": "#/definitions/user_id_array"
|
||||
},
|
||||
"types": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"not_types": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"rooms": {
|
||||
"$ref": "#/definitions/room_id_array"
|
||||
},
|
||||
"not_rooms": {
|
||||
"$ref": "#/definitions/room_id_array"
|
||||
},
|
||||
"contains_url": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
USER_ID_ARRAY_SCHEMA = {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"format": "matrix_user_id"
|
||||
}
|
||||
}
|
||||
|
||||
ROOM_ID_ARRAY_SCHEMA = {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"format": "matrix_room_id"
|
||||
}
|
||||
}
|
||||
|
||||
USER_FILTER_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "schema for a Sync filter",
|
||||
"type": "object",
|
||||
"definitions": {
|
||||
"room_id_array": ROOM_ID_ARRAY_SCHEMA,
|
||||
"user_id_array": USER_ID_ARRAY_SCHEMA,
|
||||
"filter": FILTER_SCHEMA,
|
||||
"room_filter": ROOM_FILTER_SCHEMA,
|
||||
"room_event_filter": ROOM_EVENT_FILTER_SCHEMA
|
||||
},
|
||||
"properties": {
|
||||
"presence": {
|
||||
"$ref": "#/definitions/filter"
|
||||
},
|
||||
"account_data": {
|
||||
"$ref": "#/definitions/filter"
|
||||
},
|
||||
"room": {
|
||||
"$ref": "#/definitions/room_filter"
|
||||
},
|
||||
"event_format": {
|
||||
"type": "string",
|
||||
"enum": ["client", "federation"]
|
||||
},
|
||||
"event_fields": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
# Don't allow '\\' in event field filters. This makes matching
|
||||
# events a lot easier as we can then use a negative lookbehind
|
||||
# assertion to split '\.' If we allowed \\ then it would
|
||||
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
||||
"pattern": "^((?!\\\).)*$"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": False
|
||||
}
|
||||
|
||||
|
||||
@FormatChecker.cls_checks('matrix_room_id')
|
||||
def matrix_room_id_validator(room_id_str):
|
||||
return RoomID.from_string(room_id_str)
|
||||
|
||||
|
||||
@FormatChecker.cls_checks('matrix_user_id')
|
||||
def matrix_user_id_validator(user_id_str):
|
||||
return UserID.from_string(user_id_str)
|
||||
|
||||
|
||||
class Filtering(object):
|
||||
@@ -189,20 +22,20 @@ class Filtering(object):
|
||||
super(Filtering, self).__init__()
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_filter(self, user_localpart, filter_id):
|
||||
result = yield self.store.get_user_filter(user_localpart, filter_id)
|
||||
defer.returnValue(FilterCollection(result))
|
||||
result = self.store.get_user_filter(user_localpart, filter_id)
|
||||
result.addCallback(Filter)
|
||||
return result
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
self.check_valid_filter(user_filter)
|
||||
self._check_valid_filter(user_filter)
|
||||
return self.store.add_user_filter(user_localpart, user_filter)
|
||||
|
||||
# TODO(paul): surely we should probably add a delete_user_filter or
|
||||
# replace_user_filter at some point? There's no REST API specified for
|
||||
# them however
|
||||
|
||||
def check_valid_filter(self, user_filter_json):
|
||||
def _check_valid_filter(self, user_filter_json):
|
||||
"""Check if the provided filter is valid.
|
||||
|
||||
This inspects all definitions contained within the filter.
|
||||
@@ -215,214 +48,182 @@ class Filtering(object):
|
||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||
# individual top-level key e.g. public_user_data. Filters are made of
|
||||
# many definitions.
|
||||
try:
|
||||
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
|
||||
format_checker=FormatChecker())
|
||||
except jsonschema.ValidationError as e:
|
||||
raise SynapseError(400, e.message)
|
||||
|
||||
top_level_definitions = [
|
||||
"public_user_data", "private_user_data", "server_data"
|
||||
]
|
||||
|
||||
class FilterCollection(object):
|
||||
def __init__(self, filter_json):
|
||||
self._filter_json = filter_json
|
||||
room_level_definitions = [
|
||||
"state", "events", "ephemeral"
|
||||
]
|
||||
|
||||
room_filter_json = self._filter_json.get("room", {})
|
||||
for key in top_level_definitions:
|
||||
if key in user_filter_json:
|
||||
self._check_definition(user_filter_json[key])
|
||||
|
||||
self._room_filter = Filter({
|
||||
k: v for k, v in room_filter_json.items()
|
||||
if k in ("rooms", "not_rooms")
|
||||
})
|
||||
if "room" in user_filter_json:
|
||||
for key in room_level_definitions:
|
||||
if key in user_filter_json["room"]:
|
||||
self._check_definition(user_filter_json["room"][key])
|
||||
|
||||
self._room_timeline_filter = Filter(room_filter_json.get("timeline", {}))
|
||||
self._room_state_filter = Filter(room_filter_json.get("state", {}))
|
||||
self._room_ephemeral_filter = Filter(room_filter_json.get("ephemeral", {}))
|
||||
self._room_account_data = Filter(room_filter_json.get("account_data", {}))
|
||||
self._presence_filter = Filter(filter_json.get("presence", {}))
|
||||
self._account_data = Filter(filter_json.get("account_data", {}))
|
||||
def _check_definition(self, definition):
|
||||
"""Check if the provided definition is valid.
|
||||
|
||||
self.include_leave = filter_json.get("room", {}).get(
|
||||
"include_leave", False
|
||||
)
|
||||
self.event_fields = filter_json.get("event_fields", [])
|
||||
This inspects not only the types but also the values to make sure they
|
||||
make sense.
|
||||
|
||||
def __repr__(self):
|
||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||
Args:
|
||||
definition(dict): The filter definition
|
||||
Raises:
|
||||
SynapseError: If there was a problem with this definition.
|
||||
"""
|
||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||
# individual top-level key e.g. public_user_data. Filters are made of
|
||||
# many definitions.
|
||||
if type(definition) != dict:
|
||||
raise SynapseError(
|
||||
400, "Expected JSON object, not %s" % (definition,)
|
||||
)
|
||||
|
||||
def get_filter_json(self):
|
||||
return self._filter_json
|
||||
# check rooms are valid room IDs
|
||||
room_id_keys = ["rooms", "not_rooms"]
|
||||
for key in room_id_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for room_id in definition[key]:
|
||||
RoomID.from_string(room_id)
|
||||
|
||||
def timeline_limit(self):
|
||||
return self._room_timeline_filter.limit()
|
||||
# check senders are valid user IDs
|
||||
user_id_keys = ["senders", "not_senders"]
|
||||
for key in user_id_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for user_id in definition[key]:
|
||||
UserID.from_string(user_id)
|
||||
|
||||
def presence_limit(self):
|
||||
return self._presence_filter.limit()
|
||||
# TODO: We don't limit event type values but we probably should...
|
||||
# check types are valid event types
|
||||
event_keys = ["types", "not_types"]
|
||||
for key in event_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for event_type in definition[key]:
|
||||
if not isinstance(event_type, basestring):
|
||||
raise SynapseError(400, "Event type should be a string")
|
||||
|
||||
def ephemeral_limit(self):
|
||||
return self._room_ephemeral_filter.limit()
|
||||
if "format" in definition:
|
||||
event_format = definition["format"]
|
||||
if event_format not in ["federation", "events"]:
|
||||
raise SynapseError(400, "Invalid format: %s" % (event_format,))
|
||||
|
||||
def filter_presence(self, events):
|
||||
return self._presence_filter.filter(events)
|
||||
if "select" in definition:
|
||||
event_select_list = definition["select"]
|
||||
for select_key in event_select_list:
|
||||
if select_key not in ["event_id", "origin_server_ts",
|
||||
"thread_id", "content", "content.body"]:
|
||||
raise SynapseError(400, "Bad select: %s" % (select_key,))
|
||||
|
||||
def filter_account_data(self, events):
|
||||
return self._account_data.filter(events)
|
||||
|
||||
def filter_room_state(self, events):
|
||||
return self._room_state_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_timeline(self, events):
|
||||
return self._room_timeline_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_ephemeral(self, events):
|
||||
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_account_data(self, events):
|
||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||
|
||||
def blocks_all_presence(self):
|
||||
return (
|
||||
self._presence_filter.filters_all_types() or
|
||||
self._presence_filter.filters_all_senders()
|
||||
)
|
||||
|
||||
def blocks_all_room_ephemeral(self):
|
||||
return (
|
||||
self._room_ephemeral_filter.filters_all_types() or
|
||||
self._room_ephemeral_filter.filters_all_senders() or
|
||||
self._room_ephemeral_filter.filters_all_rooms()
|
||||
)
|
||||
|
||||
def blocks_all_room_timeline(self):
|
||||
return (
|
||||
self._room_timeline_filter.filters_all_types() or
|
||||
self._room_timeline_filter.filters_all_senders() or
|
||||
self._room_timeline_filter.filters_all_rooms()
|
||||
)
|
||||
if ("bundle_updates" in definition and
|
||||
type(definition["bundle_updates"]) != bool):
|
||||
raise SynapseError(400, "Bad bundle_updates: expected bool.")
|
||||
|
||||
|
||||
class Filter(object):
|
||||
def __init__(self, filter_json):
|
||||
self.filter_json = filter_json
|
||||
|
||||
self.types = self.filter_json.get("types", None)
|
||||
self.not_types = self.filter_json.get("not_types", [])
|
||||
def filter_public_user_data(self, events):
|
||||
return self._filter_on_key(events, ["public_user_data"])
|
||||
|
||||
self.rooms = self.filter_json.get("rooms", None)
|
||||
self.not_rooms = self.filter_json.get("not_rooms", [])
|
||||
def filter_private_user_data(self, events):
|
||||
return self._filter_on_key(events, ["private_user_data"])
|
||||
|
||||
self.senders = self.filter_json.get("senders", None)
|
||||
self.not_senders = self.filter_json.get("not_senders", [])
|
||||
def filter_room_state(self, events):
|
||||
return self._filter_on_key(events, ["room", "state"])
|
||||
|
||||
self.contains_url = self.filter_json.get("contains_url", None)
|
||||
def filter_room_events(self, events):
|
||||
return self._filter_on_key(events, ["room", "events"])
|
||||
|
||||
def filters_all_types(self):
|
||||
return "*" in self.not_types
|
||||
def filter_room_ephemeral(self, events):
|
||||
return self._filter_on_key(events, ["room", "ephemeral"])
|
||||
|
||||
def filters_all_senders(self):
|
||||
return "*" in self.not_senders
|
||||
def _filter_on_key(self, events, keys):
|
||||
filter_json = self.filter_json
|
||||
if not filter_json:
|
||||
return events
|
||||
|
||||
def filters_all_rooms(self):
|
||||
return "*" in self.not_rooms
|
||||
try:
|
||||
# extract the right definition from the filter
|
||||
definition = filter_json
|
||||
for key in keys:
|
||||
definition = definition[key]
|
||||
return self._filter_with_definition(events, definition)
|
||||
except KeyError:
|
||||
# return all events if definition isn't specified.
|
||||
return events
|
||||
|
||||
def check(self, event):
|
||||
"""Checks whether the filter matches the given event.
|
||||
def _filter_with_definition(self, events, definition):
|
||||
return [e for e in events if self._passes_definition(definition, e)]
|
||||
|
||||
def _passes_definition(self, definition, event):
|
||||
"""Check if the event passes through the given definition.
|
||||
|
||||
Args:
|
||||
definition(dict): The definition to check against.
|
||||
event(Event): The event to check.
|
||||
Returns:
|
||||
bool: True if the event matches
|
||||
True if the event passes through the filter.
|
||||
"""
|
||||
# We usually get the full "events" as dictionaries coming through,
|
||||
# except for presence which actually gets passed around as its own
|
||||
# namedtuple type.
|
||||
if isinstance(event, UserPresenceState):
|
||||
sender = event.user_id
|
||||
room_id = None
|
||||
ev_type = "m.presence"
|
||||
is_url = False
|
||||
else:
|
||||
sender = event.get("sender", None)
|
||||
if not sender:
|
||||
# Presence events had their 'sender' in content.user_id, but are
|
||||
# now handled above. We don't know if anything else uses this
|
||||
# form. TODO: Check this and probably remove it.
|
||||
content = event.get("content")
|
||||
# account_data has been allowed to have non-dict content, so
|
||||
# check type first
|
||||
if isinstance(content, dict):
|
||||
sender = content.get("user_id")
|
||||
# Algorithm notes:
|
||||
# For each key in the definition, check the event meets the criteria:
|
||||
# * For types: Literal match or prefix match (if ends with wildcard)
|
||||
# * For senders/rooms: Literal match only
|
||||
# * "not_" checks take presedence (e.g. if "m.*" is in both 'types'
|
||||
# and 'not_types' then it is treated as only being in 'not_types')
|
||||
|
||||
room_id = event.get("room_id", None)
|
||||
ev_type = event.get("type", None)
|
||||
is_url = "url" in event.get("content", {})
|
||||
|
||||
return self.check_fields(
|
||||
room_id,
|
||||
sender,
|
||||
ev_type,
|
||||
is_url,
|
||||
)
|
||||
|
||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||
"""Checks whether the filter matches the given event fields.
|
||||
|
||||
Returns:
|
||||
bool: True if the event fields match
|
||||
"""
|
||||
literal_keys = {
|
||||
"rooms": lambda v: room_id == v,
|
||||
"senders": lambda v: sender == v,
|
||||
"types": lambda v: _matches_wildcard(event_type, v)
|
||||
}
|
||||
|
||||
for name, match_func in literal_keys.items():
|
||||
not_name = "not_%s" % (name,)
|
||||
disallowed_values = getattr(self, not_name)
|
||||
if any(map(match_func, disallowed_values)):
|
||||
# room checks
|
||||
if hasattr(event, "room_id"):
|
||||
room_id = event.room_id
|
||||
allow_rooms = definition.get("rooms", None)
|
||||
reject_rooms = definition.get("not_rooms", None)
|
||||
if reject_rooms and room_id in reject_rooms:
|
||||
return False
|
||||
if allow_rooms and room_id not in allow_rooms:
|
||||
return False
|
||||
|
||||
allowed_values = getattr(self, name)
|
||||
if allowed_values is not None:
|
||||
if not any(map(match_func, allowed_values)):
|
||||
return False
|
||||
# sender checks
|
||||
if hasattr(event, "sender"):
|
||||
# Should we be including event.state_key for some event types?
|
||||
sender = event.sender
|
||||
allow_senders = definition.get("senders", None)
|
||||
reject_senders = definition.get("not_senders", None)
|
||||
if reject_senders and sender in reject_senders:
|
||||
return False
|
||||
if allow_senders and sender not in allow_senders:
|
||||
return False
|
||||
|
||||
contains_url_filter = self.filter_json.get("contains_url")
|
||||
if contains_url_filter is not None:
|
||||
if contains_url_filter != contains_url:
|
||||
# type checks
|
||||
if "not_types" in definition:
|
||||
for def_type in definition["not_types"]:
|
||||
if self._event_matches_type(event, def_type):
|
||||
return False
|
||||
if "types" in definition:
|
||||
included = False
|
||||
for def_type in definition["types"]:
|
||||
if self._event_matches_type(event, def_type):
|
||||
included = True
|
||||
break
|
||||
if not included:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def filter_rooms(self, room_ids):
|
||||
"""Apply the 'rooms' filter to a given list of rooms.
|
||||
|
||||
Args:
|
||||
room_ids (list): A list of room_ids.
|
||||
|
||||
Returns:
|
||||
list: A list of room_ids that match the filter
|
||||
"""
|
||||
room_ids = set(room_ids)
|
||||
|
||||
disallowed_rooms = set(self.filter_json.get("not_rooms", []))
|
||||
room_ids -= disallowed_rooms
|
||||
|
||||
allowed_rooms = self.filter_json.get("rooms", None)
|
||||
if allowed_rooms is not None:
|
||||
room_ids &= set(allowed_rooms)
|
||||
|
||||
return room_ids
|
||||
|
||||
def filter(self, events):
|
||||
return filter(self.check, events)
|
||||
|
||||
def limit(self):
|
||||
return self.filter_json.get("limit", 10)
|
||||
|
||||
|
||||
def _matches_wildcard(actual_value, filter_value):
|
||||
if filter_value.endswith("*"):
|
||||
type_prefix = filter_value[:-1]
|
||||
return actual_value.startswith(type_prefix)
|
||||
else:
|
||||
return actual_value == filter_value
|
||||
|
||||
|
||||
DEFAULT_FILTER_COLLECTION = FilterCollection({})
|
||||
def _event_matches_type(self, event, def_type):
|
||||
if def_type.endswith("*"):
|
||||
type_prefix = def_type[:-1]
|
||||
return event.type.startswith(type_prefix)
|
||||
else:
|
||||
return event.type == def_type
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -23,7 +23,7 @@ class Ratelimiter(object):
|
||||
def __init__(self):
|
||||
self.message_counts = collections.OrderedDict()
|
||||
|
||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count, update=True):
|
||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count):
|
||||
"""Can the user send a message?
|
||||
Args:
|
||||
user_id: The user sending a message.
|
||||
@@ -32,15 +32,12 @@ class Ratelimiter(object):
|
||||
second.
|
||||
burst_count: How many messages the user can send before being
|
||||
limited.
|
||||
update (bool): Whether to update the message rates or not. This is
|
||||
useful to check if a message would be allowed to be sent before
|
||||
its ready to be actually sent.
|
||||
Returns:
|
||||
A pair of a bool indicating if they can send a message now and a
|
||||
time in seconds of when they can next send a message.
|
||||
"""
|
||||
self.prune_message_counts(time_now_s)
|
||||
message_count, time_start, _ignored = self.message_counts.get(
|
||||
message_count, time_start, _ignored = self.message_counts.pop(
|
||||
user_id, (0., time_now_s, None),
|
||||
)
|
||||
time_delta = time_now_s - time_start
|
||||
@@ -55,10 +52,9 @@ class Ratelimiter(object):
|
||||
allowed = True
|
||||
message_count += 1
|
||||
|
||||
if update:
|
||||
self.message_counts[user_id] = (
|
||||
message_count, time_start, msg_rate_hz
|
||||
)
|
||||
self.message_counts[user_id] = (
|
||||
message_count, time_start, msg_rate_hz
|
||||
)
|
||||
|
||||
if msg_rate_hz > 0:
|
||||
time_allowed = (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -23,5 +23,5 @@ WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,20 +12,3 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
try:
|
||||
python_dependencies.check_requirements()
|
||||
except python_dependencies.MissingRequirementError as e:
|
||||
message = "\n".join([
|
||||
"Missing Requirement: %s" % (e.message,),
|
||||
"To install run:",
|
||||
" pip install --upgrade --force \"%s\"" % (e.dependency,),
|
||||
"",
|
||||
])
|
||||
sys.stderr.writelines(message)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1,218 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
|
||||
class AppserviceSlaveStore(
|
||||
DirectoryStore, SlavedEventStore, SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class AppserviceServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse appservice now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ASReplicationHandler(self)
|
||||
|
||||
|
||||
class ASReplicationHandler(ReplicationClientHandler):
|
||||
def __init__(self, hs):
|
||||
super(ASReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.appservice_handler = hs.get_application_service_handler()
|
||||
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
|
||||
if stream_name == "events":
|
||||
max_stream_id = self.store.get_room_max_stream_ordering()
|
||||
preserve_fn(
|
||||
self.appservice_handler.notify_interested_services
|
||||
)(max_stream_id)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse appservice", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.appservice"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.notify_appservices = True
|
||||
|
||||
ps = AppserviceServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-appservice",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -1,220 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
|
||||
class ClientReaderSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class ClientReaderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
PublicRoomListRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ReplicationClientHandler(self.get_datastore())
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse client reader", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.client_reader"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = ClientReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-client-reader",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -1,209 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.api.urls import FEDERATION_PREFIX
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
|
||||
class FederationReaderSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class FederationReaderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "federation":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse federation reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ReplicationClientHandler(self.get_datastore())
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse federation reader", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.federation_reader"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = FederationReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-federation-reader",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -1,305 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.federation import send_queue
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
|
||||
class FederationSenderSlaveStore(
|
||||
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||
SlavedRegistrationStore, SlavedDeviceStore, SlavedPresenceStore,
|
||||
):
|
||||
def __init__(self, db_conn, hs):
|
||||
super(FederationSenderSlaveStore, self).__init__(db_conn, hs)
|
||||
|
||||
# We pull out the current federation stream position now so that we
|
||||
# always have a known value for the federation position in memory so
|
||||
# that we don't have to bounce via a deferred once when we start the
|
||||
# replication streams.
|
||||
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
|
||||
|
||||
def _get_federation_out_pos(self, db_conn):
|
||||
sql = (
|
||||
"SELECT stream_id FROM federation_stream_position"
|
||||
" WHERE type = ?"
|
||||
)
|
||||
sql = self.database_engine.convert_param_style(sql)
|
||||
|
||||
txn = db_conn.cursor()
|
||||
txn.execute(sql, ("federation",))
|
||||
rows = txn.fetchall()
|
||||
txn.close()
|
||||
|
||||
return rows[0][0] if rows else -1
|
||||
|
||||
|
||||
class FederationSenderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse federation_sender now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return FederationSenderReplicationHandler(self)
|
||||
|
||||
|
||||
class FederationSenderReplicationHandler(ReplicationClientHandler):
|
||||
def __init__(self, hs):
|
||||
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.send_handler = FederationSenderHandler(hs, self)
|
||||
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(FederationSenderReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
self.send_handler.process_replication_rows(stream_name, token, rows)
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
args = super(FederationSenderReplicationHandler, self).get_streams_to_replicate()
|
||||
args.update(self.send_handler.stream_positions())
|
||||
return args
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse federation sender", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.federation_sender"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.send_federation:
|
||||
sys.stderr.write(
|
||||
"\nThe send_federation must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``send_federation: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.send_federation = True
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ps = FederationSenderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-federation-sender",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
class FederationSenderHandler(object):
|
||||
"""Processes the replication stream and forwards the appropriate entries
|
||||
to the federation sender.
|
||||
"""
|
||||
def __init__(self, hs, replication_client):
|
||||
self.store = hs.get_datastore()
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self.replication_client = replication_client
|
||||
|
||||
self.federation_position = self.store.federation_out_pos_startup
|
||||
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
||||
|
||||
self._last_ack = self.federation_position
|
||||
|
||||
self._room_serials = {}
|
||||
self._room_typing = {}
|
||||
|
||||
def on_start(self):
|
||||
# There may be some events that are persisted but haven't been sent,
|
||||
# so send them now.
|
||||
self.federation_sender.notify_new_events(
|
||||
self.store.get_room_max_stream_ordering()
|
||||
)
|
||||
|
||||
def stream_positions(self):
|
||||
return {"federation": self.federation_position}
|
||||
|
||||
def process_replication_rows(self, stream_name, token, rows):
|
||||
# The federation stream contains things that we want to send out, e.g.
|
||||
# presence, typing, etc.
|
||||
if stream_name == "federation":
|
||||
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
||||
preserve_fn(self.update_token)(token)
|
||||
|
||||
# We also need to poke the federation sender when new events happen
|
||||
elif stream_name == "events":
|
||||
self.federation_sender.notify_new_events(token)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_token(self, token):
|
||||
self.federation_position = token
|
||||
|
||||
# We linearize here to ensure we don't have races updating the token
|
||||
with (yield self._fed_position_linearizer.queue(None)):
|
||||
if self._last_ack < self.federation_position:
|
||||
yield self.store.update_federation_out_pos(
|
||||
"federation", self.federation_position
|
||||
)
|
||||
|
||||
# We ACK this token over replication so that the master can drop
|
||||
# its in memory queues
|
||||
self.replication_client.send_federation_ack(self.federation_position)
|
||||
self._last_ack = self.federation_position
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,223 +14,255 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
sys.dont_write_bytecode = True
|
||||
from synapse.python_dependencies import check_requirements
|
||||
|
||||
import synapse.config.logger
|
||||
from synapse.config._base import ConfigError
|
||||
if __name__ == '__main__':
|
||||
check_requirements()
|
||||
|
||||
from synapse.python_dependencies import (
|
||||
check_requirements, CONDITIONAL_REQUIREMENTS
|
||||
)
|
||||
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
from synapse.storage import (
|
||||
are_all_users_on_domain, UpgradeDatabaseException,
|
||||
)
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
from twisted.internet import reactor, task, defer
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.application import service
|
||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
||||
from twisted.enterprise import adbapi
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.static import File
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from synapse.http.server import RootRedirect
|
||||
from twisted.web.server import Site
|
||||
from twisted.web.http import proxiedLogFormatter, combinedLogFormatter
|
||||
from synapse.http.server import JsonResource, RootRedirect
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
||||
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
|
||||
CLIENT_PREFIX, FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
||||
SERVER_KEY_PREFIX, MEDIA_PREFIX, CLIENT_V2_ALPHA_PREFIX, STATIC_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
)
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.metrics import register_memory_metrics, get_metrics_for
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.rest.client.v1 import ClientV1RestResource
|
||||
from synapse.rest.client.v2_alpha import ClientV2AlphaRestResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
|
||||
from synapse.http.site import SynapseSite
|
||||
|
||||
from synapse import events
|
||||
|
||||
from daemonize import Daemonize
|
||||
import twisted.manhole.telnet
|
||||
|
||||
import synapse
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import resource
|
||||
import subprocess
|
||||
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
|
||||
def gz_wrap(r):
|
||||
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
|
||||
class SynapseHomeServer(HomeServer):
|
||||
|
||||
def build_http_client(self):
|
||||
return MatrixFederationHttpClient(self)
|
||||
|
||||
def build_resource_for_web_client(hs):
|
||||
webclient_path = hs.get_config().web_client_location
|
||||
if not webclient_path:
|
||||
try:
|
||||
import syweb
|
||||
except ImportError:
|
||||
quit_with_error(
|
||||
"Could not find a webclient.\n\n"
|
||||
"Please either install the matrix-angular-sdk or configure\n"
|
||||
"the location of the source to serve via the configuration\n"
|
||||
"option `web_client_location`\n\n"
|
||||
"To install the `matrix-angular-sdk` via pip, run:\n\n"
|
||||
" pip install '%(dep)s'\n"
|
||||
"\n"
|
||||
"You can also disable hosting of the webclient via the\n"
|
||||
"configuration option `web_client`\n"
|
||||
% {"dep": CONDITIONAL_REQUIREMENTS["web_client"].keys()[0]}
|
||||
)
|
||||
def build_resource_for_client(self):
|
||||
return ClientV1RestResource(self)
|
||||
|
||||
def build_resource_for_client_v2_alpha(self):
|
||||
return ClientV2AlphaRestResource(self)
|
||||
|
||||
def build_resource_for_federation(self):
|
||||
return JsonResource(self)
|
||||
|
||||
def build_resource_for_web_client(self):
|
||||
import syweb
|
||||
syweb_path = os.path.dirname(syweb.__file__)
|
||||
webclient_path = os.path.join(syweb_path, "webclient")
|
||||
# GZip is disabled here due to
|
||||
# https://twistedmatrix.com/trac/ticket/7678
|
||||
# (It can stay enabled for the API resources: they call
|
||||
# write() with the whole body and then finish() straight
|
||||
# after and so do not trigger the bug.
|
||||
# GzipFile was removed in commit 184ba09
|
||||
# return GzipFile(webclient_path) # TODO configurable?
|
||||
return File(webclient_path) # TODO configurable?
|
||||
return File(webclient_path) # TODO configurable?
|
||||
|
||||
def build_resource_for_static_content(self):
|
||||
return File("static")
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
tls = listener_config.get("tls", False)
|
||||
site_tag = listener_config.get("tag", port)
|
||||
def build_resource_for_content_repo(self):
|
||||
return ContentRepoResource(
|
||||
self, self.upload_dir, self.auth, self.content_addr
|
||||
)
|
||||
|
||||
if tls and config.no_tls:
|
||||
return
|
||||
def build_resource_for_media_repository(self):
|
||||
return MediaRepositoryResource(self)
|
||||
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "client":
|
||||
client_resource = ClientRestResource(self)
|
||||
if res["compress"]:
|
||||
client_resource = gz_wrap(client_resource)
|
||||
def build_resource_for_server_key(self):
|
||||
return LocalKey(self)
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/api/v1": client_resource,
|
||||
"/_matrix/client/r0": client_resource,
|
||||
"/_matrix/client/unstable": client_resource,
|
||||
"/_matrix/client/v2_alpha": client_resource,
|
||||
"/_matrix/client/versions": client_resource,
|
||||
})
|
||||
def build_resource_for_server_key_v2(self):
|
||||
return KeyApiV2Resource(self)
|
||||
|
||||
if name == "federation":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
|
||||
if name in ["static", "client"]:
|
||||
resources.update({
|
||||
STATIC_PREFIX: File(
|
||||
os.path.join(os.path.dirname(synapse.__file__), "static")
|
||||
),
|
||||
})
|
||||
|
||||
if name in ["media", "federation", "client"]:
|
||||
media_repo = MediaRepositoryResource(self)
|
||||
resources.update({
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||
self, self.config.uploads_path
|
||||
),
|
||||
})
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources.update({
|
||||
SERVER_KEY_PREFIX: LocalKey(self),
|
||||
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
||||
})
|
||||
|
||||
if name == "webclient":
|
||||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||
|
||||
if name == "metrics" and self.get_config().enable_metrics:
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
if WEB_CLIENT_PREFIX in resources:
|
||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
def build_resource_for_metrics(self):
|
||||
if self.get_config().enable_metrics:
|
||||
return MetricsResource(self)
|
||||
else:
|
||||
root_resource = Resource()
|
||||
return None
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
def build_db_pool(self):
|
||||
name = self.db_config["name"]
|
||||
|
||||
if tls:
|
||||
for address in bind_addresses:
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.https.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
interface=address
|
||||
)
|
||||
return adbapi.ConnectionPool(
|
||||
name,
|
||||
**self.db_config.get("args", {})
|
||||
)
|
||||
|
||||
def create_resource_tree(self, redirect_root_to_web_client):
|
||||
"""Create the resource tree for this Home Server.
|
||||
|
||||
This in unduly complicated because Twisted does not support putting
|
||||
child resources more than 1 level deep at a time.
|
||||
|
||||
Args:
|
||||
web_client (bool): True to enable the web client.
|
||||
redirect_root_to_web_client (bool): True to redirect '/' to the
|
||||
location of the web client. This does nothing if web_client is not
|
||||
True.
|
||||
"""
|
||||
config = self.get_config()
|
||||
web_client = config.web_client
|
||||
|
||||
# list containing (path_str, Resource) e.g:
|
||||
# [ ("/aaa/bbb/cc", Resource1), ("/aaa/dummy", Resource2) ]
|
||||
desired_tree = [
|
||||
(CLIENT_PREFIX, self.get_resource_for_client()),
|
||||
(CLIENT_V2_ALPHA_PREFIX, self.get_resource_for_client_v2_alpha()),
|
||||
(FEDERATION_PREFIX, self.get_resource_for_federation()),
|
||||
(CONTENT_REPO_PREFIX, self.get_resource_for_content_repo()),
|
||||
(SERVER_KEY_PREFIX, self.get_resource_for_server_key()),
|
||||
(SERVER_KEY_V2_PREFIX, self.get_resource_for_server_key_v2()),
|
||||
(MEDIA_PREFIX, self.get_resource_for_media_repository()),
|
||||
(STATIC_PREFIX, self.get_resource_for_static_content()),
|
||||
]
|
||||
|
||||
if web_client:
|
||||
logger.info("Adding the web client.")
|
||||
desired_tree.append((WEB_CLIENT_PREFIX,
|
||||
self.get_resource_for_web_client()))
|
||||
|
||||
if web_client and redirect_root_to_web_client:
|
||||
self.root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", port)
|
||||
self.root_resource = Resource()
|
||||
|
||||
metrics_resource = self.get_resource_for_metrics()
|
||||
if config.metrics_port is None and metrics_resource is not None:
|
||||
desired_tree.append((METRICS_PREFIX, metrics_resource))
|
||||
|
||||
# ideally we'd just use getChild and putChild but getChild doesn't work
|
||||
# unless you give it a Request object IN ADDITION to the name :/ So
|
||||
# instead, we'll store a copy of this mapping so we can actually add
|
||||
# extra resources to existing nodes. See self._resource_id for the key.
|
||||
resource_mappings = {}
|
||||
for full_path, res in desired_tree:
|
||||
logger.info("Attaching %s to path %s", res, full_path)
|
||||
last_resource = self.root_resource
|
||||
for path_seg in full_path.split('/')[1:-1]:
|
||||
if path_seg not in last_resource.listNames():
|
||||
# resource doesn't exist, so make a "dummy resource"
|
||||
child_resource = Resource()
|
||||
last_resource.putChild(path_seg, child_resource)
|
||||
res_id = self._resource_id(last_resource, path_seg)
|
||||
resource_mappings[res_id] = child_resource
|
||||
last_resource = child_resource
|
||||
else:
|
||||
# we have an existing Resource, use that instead.
|
||||
res_id = self._resource_id(last_resource, path_seg)
|
||||
last_resource = resource_mappings[res_id]
|
||||
|
||||
# ===========================
|
||||
# now attach the actual desired resource
|
||||
last_path_seg = full_path.split('/')[-1]
|
||||
|
||||
# if there is already a resource here, thieve its children and
|
||||
# replace it
|
||||
res_id = self._resource_id(last_resource, last_path_seg)
|
||||
if res_id in resource_mappings:
|
||||
# there is a dummy resource at this path already, which needs
|
||||
# to be replaced with the desired resource.
|
||||
existing_dummy_resource = resource_mappings[res_id]
|
||||
for child_name in existing_dummy_resource.listNames():
|
||||
child_res_id = self._resource_id(existing_dummy_resource,
|
||||
child_name)
|
||||
child_resource = resource_mappings[child_res_id]
|
||||
# steal the children
|
||||
res.putChild(child_name, child_resource)
|
||||
|
||||
# finally, insert the desired resource in the right place
|
||||
last_resource.putChild(last_path_seg, res)
|
||||
res_id = self._resource_id(last_resource, last_path_seg)
|
||||
resource_mappings[res_id] = res
|
||||
|
||||
return self.root_resource
|
||||
|
||||
def _resource_id(self, resource, path_seg):
|
||||
"""Construct an arbitrary resource ID so you can retrieve the mapping
|
||||
later.
|
||||
|
||||
If you want to represent resource A putChild resource B with path C,
|
||||
the mapping should looks like _resource_id(A,C) = B.
|
||||
|
||||
Args:
|
||||
resource (Resource): The *parent* Resource
|
||||
path_seg (str): The name of the child Resource to be attached.
|
||||
Returns:
|
||||
str: A unique string which can be a key to the child Resource.
|
||||
"""
|
||||
return "%s-%s" % (resource, path_seg)
|
||||
|
||||
def start_listening(self):
|
||||
config = self.get_config()
|
||||
|
||||
for listener in config.listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listener_http(config, listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
if not config.no_tls and config.bind_port is not None:
|
||||
reactor.listenSSL(
|
||||
config.bind_port,
|
||||
SynapseSite(
|
||||
"synapse.access.https",
|
||||
config,
|
||||
self.root_resource,
|
||||
),
|
||||
self.tls_context_factory,
|
||||
interface=config.bind_host
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", config.bind_port)
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
elif listener["type"] == "replication":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
for address in bind_addresses:
|
||||
factory = ReplicationStreamProtocolFactory(self)
|
||||
server_listener = reactor.listenTCP(
|
||||
listener["port"], factory, interface=address
|
||||
)
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "shutdown", server_listener.stopListening,
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
if config.unsecure_port is not None:
|
||||
reactor.listenTCP(
|
||||
config.unsecure_port,
|
||||
SynapseSite(
|
||||
"synapse.access.http",
|
||||
config,
|
||||
self.root_resource,
|
||||
),
|
||||
interface=config.bind_host
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", config.unsecure_port)
|
||||
|
||||
metrics_resource = self.get_resource_for_metrics()
|
||||
if metrics_resource and config.metrics_port is not None:
|
||||
reactor.listenTCP(
|
||||
config.metrics_port,
|
||||
SynapseSite(
|
||||
"synapse.access.metrics",
|
||||
config,
|
||||
metrics_resource,
|
||||
),
|
||||
interface="127.0.0.1",
|
||||
)
|
||||
logger.info("Metrics now running on 127.0.0.1 port %d", config.metrics_port)
|
||||
|
||||
def run_startup_checks(self, db_conn, database_engine):
|
||||
all_users_native = are_all_users_on_domain(
|
||||
@@ -248,86 +280,161 @@ class SynapseHomeServer(HomeServer):
|
||||
except IncorrectDatabaseSetup as e:
|
||||
quit_with_error(e.message)
|
||||
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
|
||||
def quit_with_error(error_string):
|
||||
message_lines = error_string.split("\n")
|
||||
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
||||
line_length = max([len(l) for l in message_lines]) + 2
|
||||
sys.stderr.write("*" * line_length + '\n')
|
||||
for line in message_lines:
|
||||
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
||||
if line.strip():
|
||||
sys.stderr.write(" %s\n" % (line.strip(),))
|
||||
sys.stderr.write("*" * line_length + '\n')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_version_string():
|
||||
try:
|
||||
null = open(os.devnull, 'w')
|
||||
cwd = os.path.dirname(os.path.abspath(__file__))
|
||||
try:
|
||||
git_branch = subprocess.check_output(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_branch = "b=" + git_branch
|
||||
except subprocess.CalledProcessError:
|
||||
git_branch = ""
|
||||
|
||||
try:
|
||||
git_tag = subprocess.check_output(
|
||||
['git', 'describe', '--exact-match'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_tag = "t=" + git_tag
|
||||
except subprocess.CalledProcessError:
|
||||
git_tag = ""
|
||||
|
||||
try:
|
||||
git_commit = subprocess.check_output(
|
||||
['git', 'rev-parse', '--short', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
except subprocess.CalledProcessError:
|
||||
git_commit = ""
|
||||
|
||||
try:
|
||||
dirty_string = "-this_is_a_dirty_checkout"
|
||||
is_dirty = subprocess.check_output(
|
||||
['git', 'describe', '--dirty=' + dirty_string],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip().endswith(dirty_string)
|
||||
|
||||
git_dirty = "dirty" if is_dirty else ""
|
||||
except subprocess.CalledProcessError:
|
||||
git_dirty = ""
|
||||
|
||||
if git_branch or git_tag or git_commit or git_dirty:
|
||||
git_version = ",".join(
|
||||
s for s in
|
||||
(git_branch, git_tag, git_commit, git_dirty,)
|
||||
if s
|
||||
)
|
||||
|
||||
return (
|
||||
"Synapse/%s (%s)" % (
|
||||
synapse.__version__, git_version,
|
||||
)
|
||||
).encode("ascii")
|
||||
except Exception as e:
|
||||
logger.warn("Failed to check for git repository: %s", e)
|
||||
|
||||
return ("Synapse/%s" % (synapse.__version__,)).encode("ascii")
|
||||
|
||||
|
||||
def change_resource_limit(soft_file_no):
|
||||
try:
|
||||
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
|
||||
if not soft_file_no:
|
||||
soft_file_no = hard
|
||||
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
|
||||
|
||||
logger.info("Set file limit to: %d", soft_file_no)
|
||||
except (ValueError, resource.error) as e:
|
||||
logger.warn("Failed to set file limit: %s", e)
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
"""
|
||||
Args:
|
||||
config_options_options: The options passed to Synapse. Usually
|
||||
`sys.argv[1:]`.
|
||||
should_run (bool): Whether to start the reactor.
|
||||
|
||||
Returns:
|
||||
HomeServer
|
||||
"""
|
||||
try:
|
||||
config = HomeServerConfig.load_or_generate_config(
|
||||
"Synapse Homeserver",
|
||||
config_options,
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse Homeserver",
|
||||
config_options,
|
||||
generate_section="Homeserver"
|
||||
)
|
||||
|
||||
if not config:
|
||||
# If a config isn't returned, and an exception isn't raised, we're just
|
||||
# generating config files and shouldn't try to continue.
|
||||
sys.exit(0)
|
||||
|
||||
synapse.config.logger.setup_logging(config, use_worker_options=False)
|
||||
config.setup_logging()
|
||||
|
||||
# check any extra requirements we have now we have a config
|
||||
check_requirements(config)
|
||||
|
||||
version_string = "Synapse/" + get_version_string(synapse)
|
||||
version_string = get_version_string()
|
||||
|
||||
logger.info("Server hostname: %s", config.server_name)
|
||||
logger.info("Server version: %s", version_string)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
if re.search(":[0-9]+$", config.server_name):
|
||||
domain_with_port = config.server_name
|
||||
else:
|
||||
domain_with_port = "%s:%s" % (config.server_name, config.bind_port)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
database_engine = create_engine(config.database_config["name"])
|
||||
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
||||
|
||||
hs = SynapseHomeServer(
|
||||
config.server_name,
|
||||
domain_with_port=domain_with_port,
|
||||
upload_dir=os.path.abspath("uploads"),
|
||||
db_name=config.database_path,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_context_factory=tls_context_factory,
|
||||
config=config,
|
||||
content_addr=config.content_addr,
|
||||
version_string=version_string,
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
logger.info("Preparing database: %s...", config.database_config['name'])
|
||||
hs.create_resource_tree(
|
||||
redirect_root_to_web_client=True,
|
||||
)
|
||||
|
||||
db_name = hs.get_db_name()
|
||||
|
||||
logger.info("Preparing database: %s...", db_name)
|
||||
|
||||
try:
|
||||
db_conn = hs.get_db_conn(run_new_connection=False)
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
db_conn = database_engine.module.connect(
|
||||
**{
|
||||
k: v for k, v in config.database_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
)
|
||||
|
||||
database_engine.prepare_database(db_conn)
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
|
||||
db_conn.commit()
|
||||
@@ -339,21 +446,21 @@ def setup(config_options):
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
logger.info("Database prepared in %s.", config.database_config['name'])
|
||||
logger.info("Database prepared in %s.", db_name)
|
||||
|
||||
if config.manhole:
|
||||
f = twisted.manhole.telnet.ShellFactory()
|
||||
f.username = "matrix"
|
||||
f.password = "rabbithole"
|
||||
f.namespace['hs'] = hs
|
||||
reactor.listenTCP(config.manhole, f, interface='127.0.0.1')
|
||||
|
||||
hs.setup()
|
||||
hs.start_listening()
|
||||
|
||||
def start():
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_state_handler().start_caching()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
|
||||
register_memory_metrics(hs)
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_state_handler().start_caching()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
|
||||
return hs
|
||||
|
||||
@@ -368,116 +475,40 @@ class SynapseService(service.Service):
|
||||
def startService(self):
|
||||
hs = setup(self.config)
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
if hs.config.gc_thresholds:
|
||||
gc.set_threshold(*hs.config.gc_thresholds)
|
||||
|
||||
def stopService(self):
|
||||
return self._port.stopListening()
|
||||
|
||||
|
||||
def run(hs):
|
||||
PROFILE_SYNAPSE = False
|
||||
if PROFILE_SYNAPSE:
|
||||
def profile(func):
|
||||
from cProfile import Profile
|
||||
from threading import current_thread
|
||||
|
||||
def profiled(*args, **kargs):
|
||||
profile = Profile()
|
||||
profile.enable()
|
||||
func(*args, **kargs)
|
||||
profile.disable()
|
||||
ident = current_thread().ident
|
||||
profile.dump_stats("/tmp/%s.%s.%i.pstat" % (
|
||||
hs.hostname, func.__name__, ident
|
||||
))
|
||||
|
||||
return profiled
|
||||
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
ThreadPool._worker = profile(ThreadPool._worker)
|
||||
reactor.run = profile(reactor.run)
|
||||
|
||||
start_time = hs.get_clock().time()
|
||||
|
||||
stats = {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
uptime = int(now - start_time)
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
# If the stats directory is empty then this is the first time we've
|
||||
# reported stats.
|
||||
first_time = not stats
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
||||
if daily_messages is not None:
|
||||
stats["daily_messages"] = daily_messages
|
||||
class SynapseSite(Site):
|
||||
"""
|
||||
Subclass of a twisted http Site that does access logging with python's
|
||||
standard logging
|
||||
"""
|
||||
def __init__(self, logger_name, config, resource, *args, **kwargs):
|
||||
Site.__init__(self, resource, *args, **kwargs)
|
||||
if config.captcha_ip_origin_is_x_forwarded:
|
||||
self._log_formatter = proxiedLogFormatter
|
||||
else:
|
||||
stats.pop("daily_messages", None)
|
||||
self._log_formatter = combinedLogFormatter
|
||||
self.access_logger = logging.getLogger(logger_name)
|
||||
|
||||
if first_time:
|
||||
# Add callbacks to report the synapse stats as metrics whenever
|
||||
# prometheus requests them, typically every 30s.
|
||||
# As some of the stats are expensive to calculate we only update
|
||||
# them when synapse phones home to matrix.org every 24 hours.
|
||||
metrics = get_metrics_for("synapse.usage")
|
||||
metrics.add_callback("timestamp", lambda: stats["timestamp"])
|
||||
metrics.add_callback("uptime_seconds", lambda: stats["uptime_seconds"])
|
||||
metrics.add_callback("total_users", lambda: stats["total_users"])
|
||||
metrics.add_callback("total_room_count", lambda: stats["total_room_count"])
|
||||
metrics.add_callback(
|
||||
"daily_active_users", lambda: stats["daily_active_users"]
|
||||
)
|
||||
metrics.add_callback(
|
||||
"daily_messages", lambda: stats.get("daily_messages", 0)
|
||||
)
|
||||
def log(self, request):
|
||||
line = self._log_formatter(self._logDateTime, request)
|
||||
self.access_logger.info(line)
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
yield hs.get_simple_http_client().put_json(
|
||||
"https://matrix.org/report-usage-stats/push",
|
||||
stats
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Error reporting stats: %s", e)
|
||||
|
||||
if hs.config.report_stats:
|
||||
phone_home_task = task.LoopingCall(phone_stats_home)
|
||||
logger.info("Scheduling stats reporting for 24 hour intervals")
|
||||
phone_home_task.start(60 * 60 * 24, now=False)
|
||||
def run(hs):
|
||||
|
||||
def in_thread():
|
||||
# Uncomment to enable tracing of log context changes.
|
||||
# sys.settrace(logcontext_tracer)
|
||||
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
with LoggingContext("run"):
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
if hs.config.gc_thresholds:
|
||||
gc.set_threshold(*hs.config.gc_thresholds)
|
||||
|
||||
reactor.run()
|
||||
|
||||
if hs.config.daemonize:
|
||||
|
||||
if hs.config.print_pidfile:
|
||||
print (hs.config.pid_file)
|
||||
print hs.config.pid_file
|
||||
|
||||
daemon = Daemonize(
|
||||
app="synapse-homeserver",
|
||||
|
||||
@@ -1,217 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.media_repository import MediaRepositoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.api.urls import (
|
||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||
)
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
|
||||
class MediaRepositorySlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
MediaRepositoryStore,
|
||||
ClientIpStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class MediaRepositoryServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "media":
|
||||
media_repo = MediaRepositoryResource(self)
|
||||
resources.update({
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||
self, self.config.uploads_path
|
||||
),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse media repository now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ReplicationClientHandler(self.get_datastore())
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse media repository", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.media_repository"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = MediaRepositoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-media-repository",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -1,282 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage import DataStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn, \
|
||||
PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
|
||||
class PusherSlaveStore(
|
||||
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
|
||||
SlavedAccountDataStore
|
||||
):
|
||||
update_pusher_last_stream_ordering_and_success = (
|
||||
DataStore.update_pusher_last_stream_ordering_and_success.__func__
|
||||
)
|
||||
|
||||
update_pusher_failing_since = (
|
||||
DataStore.update_pusher_failing_since.__func__
|
||||
)
|
||||
|
||||
update_pusher_last_stream_ordering = (
|
||||
DataStore.update_pusher_last_stream_ordering.__func__
|
||||
)
|
||||
|
||||
get_throttle_params_by_room = (
|
||||
DataStore.get_throttle_params_by_room.__func__
|
||||
)
|
||||
|
||||
set_throttle_params = (
|
||||
DataStore.set_throttle_params.__func__
|
||||
)
|
||||
|
||||
get_time_of_last_push_action_before = (
|
||||
DataStore.get_time_of_last_push_action_before.__func__
|
||||
)
|
||||
|
||||
get_profile_displayname = (
|
||||
DataStore.get_profile_displayname.__func__
|
||||
)
|
||||
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
|
||||
|
||||
class PusherServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse pusher now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return PusherReplicationHandler(self)
|
||||
|
||||
|
||||
class PusherReplicationHandler(ReplicationClientHandler):
|
||||
def __init__(self, hs):
|
||||
super(PusherReplicationHandler, self).__init__(hs.get_datastore())
|
||||
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
preserve_fn(self.poke_pushers)(stream_name, token, rows)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def poke_pushers(self, stream_name, token, rows):
|
||||
if stream_name == "pushers":
|
||||
for row in rows:
|
||||
if row.deleted:
|
||||
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
else:
|
||||
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
elif stream_name == "events":
|
||||
yield self.pusher_pool.on_new_notifications(
|
||||
token, token,
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
yield self.pusher_pool.on_new_receipts(
|
||||
token, token, set(row.room_id for row in rows)
|
||||
)
|
||||
|
||||
def stop_pusher(self, user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
|
||||
pusher = pushers_for_user.pop(key, None)
|
||||
if pusher is None:
|
||||
return
|
||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||
pusher.on_stop()
|
||||
|
||||
def start_pusher(self, user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
logger.info("Starting pusher %r / %r", user_id, key)
|
||||
return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse pusher", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.pusher"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
if config.start_pushers:
|
||||
sys.stderr.write(
|
||||
"\nThe pushers must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``start_pushers: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.start_pushers = True
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ps = PusherServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.get_pusherpool().start()
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-pusher",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
ps = start(sys.argv[1:])
|
||||
@@ -1,477 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.rest.client.v1 import events
|
||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import contextlib
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.synchrotron")
|
||||
|
||||
|
||||
class SynchrotronSlavedStore(
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedFilteringStore,
|
||||
SlavedPresenceStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
|
||||
did_forget = (
|
||||
RoomMemberStore.__dict__["did_forget"]
|
||||
)
|
||||
|
||||
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
|
||||
|
||||
class SynchrotronPresence(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.store = hs.get_datastore()
|
||||
self.user_to_num_current_syncs = {}
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
active_presence = self.store.take_presence_startup_info()
|
||||
self.user_to_current_state = {
|
||||
state.user_id: state
|
||||
for state in active_presence
|
||||
}
|
||||
|
||||
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
||||
# but we haven't notified the master of that yet
|
||||
self.users_going_offline = {}
|
||||
|
||||
self._send_stop_syncing_loop = self.clock.looping_call(
|
||||
self.send_stop_syncing, 10 * 1000
|
||||
)
|
||||
|
||||
self.process_id = random_string(16)
|
||||
logger.info("Presence process_id is %r", self.process_id)
|
||||
|
||||
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||
self.hs.get_tcp_replication().send_user_sync(user_id, is_syncing, last_sync_ms)
|
||||
|
||||
def mark_as_coming_online(self, user_id):
|
||||
"""A user has started syncing. Send a UserSync to the master, unless they
|
||||
had recently stopped syncing.
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
"""
|
||||
going_offline = self.users_going_offline.pop(user_id, None)
|
||||
if not going_offline:
|
||||
# Safe to skip because we haven't yet told the master they were offline
|
||||
self.send_user_sync(user_id, True, self.clock.time_msec())
|
||||
|
||||
def mark_as_going_offline(self, user_id):
|
||||
"""A user has stopped syncing. We wait before notifying the master as
|
||||
its likely they'll come back soon. This allows us to avoid sending
|
||||
a stopped syncing immediately followed by a started syncing notification
|
||||
to the master
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
"""
|
||||
self.users_going_offline[user_id] = self.clock.time_msec()
|
||||
|
||||
def send_stop_syncing(self):
|
||||
"""Check if there are any users who have stopped syncing a while ago
|
||||
and haven't come back yet. If there are poke the master about them.
|
||||
"""
|
||||
now = self.clock.time_msec()
|
||||
for user_id, last_sync_ms in self.users_going_offline.items():
|
||||
if now - last_sync_ms > 10 * 1000:
|
||||
self.users_going_offline.pop(user_id, None)
|
||||
self.send_user_sync(user_id, False, last_sync_ms)
|
||||
|
||||
def set_state(self, user, state, ignore_status_msg=False):
|
||||
# TODO Hows this supposed to work?
|
||||
pass
|
||||
|
||||
get_states = PresenceHandler.get_states.__func__
|
||||
get_state = PresenceHandler.get_state.__func__
|
||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
||||
|
||||
def user_syncing(self, user_id, affect_presence):
|
||||
if affect_presence:
|
||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
|
||||
# If we went from no in flight sync to some, notify replication
|
||||
if self.user_to_num_current_syncs[user_id] == 1:
|
||||
self.mark_as_coming_online(user_id)
|
||||
|
||||
def _end():
|
||||
# We check that the user_id is in user_to_num_current_syncs because
|
||||
# user_to_num_current_syncs may have been cleared if we are
|
||||
# shutting down.
|
||||
if affect_presence and user_id in self.user_to_num_current_syncs:
|
||||
self.user_to_num_current_syncs[user_id] -= 1
|
||||
|
||||
# If we went from one in flight sync to non, notify replication
|
||||
if self.user_to_num_current_syncs[user_id] == 0:
|
||||
self.mark_as_going_offline(user_id)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _user_syncing():
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_end()
|
||||
|
||||
return defer.succeed(_user_syncing())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_from_replication(self, states, stream_id):
|
||||
parties = yield get_interested_parties(self.store, states)
|
||||
room_ids_to_states, users_to_states = parties
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
||||
users=users_to_states.keys()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_replication_rows(self, token, rows):
|
||||
states = [UserPresenceState(
|
||||
row.user_id, row.state, row.last_active_ts,
|
||||
row.last_federation_update_ts, row.last_user_sync_ts, row.status_msg,
|
||||
row.currently_active
|
||||
) for row in rows]
|
||||
|
||||
for state in states:
|
||||
self.user_to_current_state[row.user_id] = state
|
||||
|
||||
stream_id = token
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
|
||||
def get_currently_syncing_users(self):
|
||||
return [
|
||||
user_id for user_id, count in self.user_to_num_current_syncs.iteritems()
|
||||
if count > 0
|
||||
]
|
||||
|
||||
|
||||
class SynchrotronTyping(object):
|
||||
def __init__(self, hs):
|
||||
self._latest_room_serial = 0
|
||||
self._room_serials = {}
|
||||
self._room_typing = {}
|
||||
|
||||
def stream_positions(self):
|
||||
# We must update this typing token from the response of the previous
|
||||
# sync. In particular, the stream id may "reset" back to zero/a low
|
||||
# value which we *must* use for the next replication request.
|
||||
return {"typing": self._latest_room_serial}
|
||||
|
||||
def process_replication_rows(self, token, rows):
|
||||
self._latest_room_serial = token
|
||||
|
||||
for row in rows:
|
||||
self._room_serials[row.room_id] = token
|
||||
self._room_typing[row.room_id] = row.user_ids
|
||||
|
||||
|
||||
class SynchrotronApplicationService(object):
|
||||
def notify_interested_services(self, event):
|
||||
pass
|
||||
|
||||
|
||||
class SynchrotronServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
InitialSyncRestServlet(self).register(resource)
|
||||
RoomInitialSyncRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return SyncReplicationHandler(self)
|
||||
|
||||
def build_presence_handler(self):
|
||||
return SynchrotronPresence(self)
|
||||
|
||||
def build_typing_handler(self):
|
||||
return SynchrotronTyping(self)
|
||||
|
||||
|
||||
class SyncReplicationHandler(ReplicationClientHandler):
|
||||
def __init__(self, hs):
|
||||
super(SyncReplicationHandler, self).__init__(hs.get_datastore())
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.typing_handler = hs.get_typing_handler()
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
self.presence_handler.sync_callback = self.send_user_sync
|
||||
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
|
||||
preserve_fn(self.process_and_notify)(stream_name, token, rows)
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
|
||||
args.update(self.typing_handler.stream_positions())
|
||||
return args
|
||||
|
||||
def get_currently_syncing_users(self):
|
||||
return self.presence_handler.get_currently_syncing_users()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_and_notify(self, stream_name, token, rows):
|
||||
if stream_name == "events":
|
||||
# We shouldn't get multiple rows per token for events stream, so
|
||||
# we don't need to optimise this for multiple rows.
|
||||
for row in rows:
|
||||
event = yield self.store.get_event(row.event_id)
|
||||
extra_users = ()
|
||||
if event.type == EventTypes.Member:
|
||||
extra_users = (event.state_key,)
|
||||
max_token = self.store.get_room_max_stream_ordering()
|
||||
self.notifier.on_new_room_event(
|
||||
event, token, max_token, extra_users
|
||||
)
|
||||
elif stream_name == "push_rules":
|
||||
self.notifier.on_new_event(
|
||||
"push_rules_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name in ("account_data", "tag_account_data",):
|
||||
self.notifier.on_new_event(
|
||||
"account_data_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"receipt_key", token, rooms=[row.room_id for row in rows],
|
||||
)
|
||||
elif stream_name == "typing":
|
||||
self.typing_handler.process_replication_rows(token, rows)
|
||||
self.notifier.on_new_event(
|
||||
"typing_key", token, rooms=[row.room_id for row in rows],
|
||||
)
|
||||
elif stream_name == "to_device":
|
||||
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||
if entities:
|
||||
self.notifier.on_new_event(
|
||||
"to_device_key", token, users=entities,
|
||||
)
|
||||
elif stream_name == "device_lists":
|
||||
all_room_ids = set()
|
||||
for row in rows:
|
||||
room_ids = yield self.store.get_rooms_for_user(row.user_id)
|
||||
all_room_ids.update(room_ids)
|
||||
self.notifier.on_new_event(
|
||||
"device_list_key", token, rooms=all_room_ids,
|
||||
)
|
||||
elif stream_name == "presence":
|
||||
yield self.presence_handler.process_replication_rows(token, rows)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse synchrotron", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.synchrotron"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = SynchrotronServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
application_service_handler=SynchrotronApplicationService(),
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-synchrotron",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user