Compare commits

..

308 Commits

Author SHA1 Message Date
Andrew Morgan
9690e9dbac WIP 2019-01-31 18:31:17 +00:00
Richard van der Hoff
6fba9fd20c Merge remote-tracking branch 'origin/release-v0.99.0' into develop 2019-01-30 17:02:32 +00:00
Richard van der Hoff
b8b898666e v0.99.0rc2 2019-01-30 16:31:07 +00:00
Matthew Hodgson
ad7ac8853c by default include m.room.encryption on invites (#3902)
* by default include m.room.encryption on invites

* fix constant

* changelog
2019-01-30 16:26:13 +00:00
Richard van der Hoff
c74b96755c Merge remote-tracking branch 'origin/develop' into release-v0.99.0 2019-01-30 16:23:28 +00:00
Amber Brown
fbaee26c68 ACME Upgrade Docs (#4528) 2019-01-30 16:22:37 +00:00
Erik Johnston
d534a27fe8 Merge pull request #4527 from matrix-org/erikj/fix_sending_remote_invite_rejections
Fix remote invite rejections not coming down sync
2019-01-30 16:15:35 +00:00
Neil Johnson
81b7e7eed3 Update constants.py
remove trailing ,
2019-01-30 16:11:36 +00:00
Erik Johnston
e25ab58c5e Newsfile 2019-01-30 15:50:28 +00:00
Erik Johnston
ed8c5e4cda Fix remote invite rejections not comming down sync
This was broken in PR #4405, commit 886e5ac, where we changed remote
rejections to be outliers.

The fix is to explicitly add the leave event in when we know its an out
of band invite. We can't always add the event as if the server is/was in
the room there might be more events to send down the sync than just the
leave.
2019-01-30 15:46:27 +00:00
Richard van der Hoff
a5d0c771a3 0.99.0rc1 2019-01-30 15:11:18 +00:00
Erik Johnston
6587b0b89b Merge pull request #4472 from matrix-org/neilj/room_capabilities
Server capabilities support
2019-01-30 14:26:56 +00:00
Erik Johnston
a4f52a33fe Fix replication for room v3 (#4523)
* Fix replication for room v3

We were not correctly quoting the path fragments over http replication,
which meant that it exploded when the event IDs had a slash in them

* Newsfile
2019-01-30 14:19:52 +00:00
Richard van der Hoff
7615a8ced1 ACME config cleanups (#4525)
* Handle listening for ACME requests on IPv6 addresses

the weird url-but-not-actually-a-url-string doesn't handle IPv6 addresses
without extra quoting. Building a string which you are about to parse again
seems like a weird choice. Let's just use listenTCP, which is consistent with
what we do elsewhere.

* Clean up the default ACME config

make it look a bit more consistent with everything else, and tweak the defaults
to listen on port 80.

* newsfile
2019-01-30 14:17:55 +00:00
Erik Johnston
43c6fca960 Merge pull request #4524 from matrix-org/erikj/fix_no_tls
Fix bug where synapse fails to start if no_tls set
2019-01-30 13:11:05 +00:00
Erik Johnston
e87d7a4b0f Raise ConfigError instead 2019-01-30 12:48:09 +00:00
Erik Johnston
e6a7a15f93 Newsfile 2019-01-30 12:17:38 +00:00
Erik Johnston
270f212a2a _listener_http should return a list 2019-01-30 12:14:50 +00:00
Richard van der Hoff
a79034aedf Merge pull request #4521 from matrix-org/rav/fed_routing/cleanups
Tiny .well-known fixes
2019-01-30 11:47:24 +00:00
Richard van der Hoff
c7b24ac3d0 Follow redirects on .well-known (#4520) 2019-01-30 11:43:33 +00:00
Amber Brown
f6813919e8 SIGHUP for TLS cert reloading (#4495) 2019-01-30 11:00:02 +00:00
Richard van der Hoff
283753c33a newsfile 2019-01-30 10:59:21 +00:00
Richard van der Hoff
09a1a6b55e fix exception text 2019-01-30 10:58:52 +00:00
Richard van der Hoff
928c50b59a Also jitter the invalid cache period 2019-01-30 10:58:52 +00:00
Neil Johnson
b37e8c9572 Merge branch 'neilj/room_capabilities' of github.com:matrix-org/synapse into neilj/room_capabilities 2019-01-30 10:56:47 +00:00
Neil Johnson
f834d98402 isort 2019-01-30 10:55:42 +00:00
Richard van der Hoff
bc5f6e1797 Add a caching layer to .well-known responses (#4516) 2019-01-30 10:55:25 +00:00
Amber Brown
3f189c902e Fix flake8 (#4519) 2019-01-30 10:53:17 +00:00
Neil Johnson
ee4df7fd7a Merge branch 'develop' into neilj/room_capabilities 2019-01-30 10:28:08 +00:00
Neil Johnson
c5a0f82cca define room dispositions for use in exposing room capabilities 2019-01-30 10:24:24 +00:00
Neil Johnson
9c850d9d5e formatting and use constants where available 2019-01-30 10:23:26 +00:00
Neil Johnson
2f46804055 Populate default room version from Constants 2019-01-30 09:39:10 +00:00
Neil Johnson
c7837dce24 reflect that rooms v3 is a stable room version 2019-01-30 09:33:30 +00:00
Richard van der Hoff
457fbfaf22 Merge pull request #4486 from xperimental/workaround-4216
Implement workaround for login error.
2019-01-30 07:06:11 +00:00
Robert Jacob
2a7f0b8953 Implement workaround for login error.
Signed-off-by: Robert Jacob <xperimental@solidproject.de>
2019-01-30 01:06:39 +01:00
Erik Johnston
47d03a79fc Merge pull request #4515 from matrix-org/erikj/room_version_v3
Enable support for room version 3
2019-01-29 23:53:14 +00:00
Erik Johnston
0b24d58e05 No vdh tests! 2019-01-29 23:11:48 +00:00
Erik Johnston
ebcffbc3eb Newsfile 2019-01-29 23:09:10 +00:00
Erik Johnston
a1b0e1879b Enable room version v3 2019-01-29 23:09:10 +00:00
Erik Johnston
e12313ba25 Merge pull request #4499 from matrix-org/erikj/redactions_eiah
Implement rechecking of redactions for room versions v3
2019-01-29 23:07:00 +00:00
Erik Johnston
67b82f1336 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/redactions_eiah 2019-01-29 22:58:38 +00:00
Erik Johnston
afeea319df Fixup comment 2019-01-29 22:55:29 +00:00
Erik Johnston
7740eddd04 Merge pull request #4514 from matrix-org/erikj/remove_event_id
Remove usages of event ID's domain
2019-01-29 22:54:25 +00:00
Erik Johnston
ff2f65d737 Update comment 2019-01-29 22:35:36 +00:00
Erik Johnston
655ce037fd check event format version not room version 2019-01-29 22:33:43 +00:00
Erik Johnston
f46a818ce5 kill vdh test some more 2019-01-29 22:02:58 +00:00
Erik Johnston
a696c48133 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/redactions_eiah 2019-01-29 22:00:33 +00:00
Erik Johnston
c21b7cbc09 Update synapse/storage/events_worker.py 2019-01-29 21:53:48 +00:00
Erik Johnston
4db252c073 Check redaction state when event is pulled out of the database 2019-01-29 21:48:36 +00:00
Erik Johnston
6d23ec2111 Fix typo 2019-01-29 21:45:53 +00:00
Erik Johnston
b5d510ad64 Remove unused arg 2019-01-29 21:45:28 +00:00
Erik Johnston
6f9cdc2d47 Merge pull request #4483 from matrix-org/erikj/event_v2
Implement event format V2
2019-01-29 21:40:00 +00:00
Erik Johnston
47e2dd1994 Drop vdh support 2019-01-29 21:24:34 +00:00
Erik Johnston
38590a4870 Add docstring 2019-01-29 21:22:47 +00:00
Richard van der Hoff
82165eeb05 Update synapse/storage/events_worker.py
Co-Authored-By: erikjohnston <erikj@jki.re>
2019-01-29 21:14:39 +00:00
Andrew Morgan
03b086647f Merge pull request #4512 from matrix-org/anoa/consent_dir
Check consent dir path on startup
2019-01-29 20:08:18 +00:00
Erik Johnston
3680bc18e9 Newsfile 2019-01-29 18:06:11 +00:00
Erik Johnston
84af577356 Implement event format v2 2019-01-29 18:06:11 +00:00
Neil Johnson
e4bef9d470 rework format of change password capability 2019-01-29 18:04:56 +00:00
Erik Johnston
b40abe0724 Newsfile 2019-01-29 18:02:26 +00:00
Erik Johnston
610f0830b0 Don't assert an event must have an event ID 2019-01-29 18:02:26 +00:00
Erik Johnston
840068bd78 Only check event ID domain for signatures for V1 events
In future version events won't have an event ID, so we won't be able to
do this check.
2019-01-29 18:02:02 +00:00
Erik Johnston
8e3d34e3c5 Use event origin for filtering incoming events
We only process events sent to us from a server if the event ID matches
the server, to help guard against federation storms. We replace this
with a check against the event origin.
2019-01-29 16:57:00 +00:00
Erik Johnston
55d9024835 Use snder and not event ID domain to check if ours
The transaction queue only sends out events that we generate. This was
done by checking domain of event ID, but that can no longer be used.
Instead, we may as well use the sender field.
2019-01-29 16:54:23 +00:00
Richard van der Hoff
cc2d650ef7 Relax requirement for a content-type on .well-known (#4511) 2019-01-29 16:49:17 +00:00
Erik Johnston
b1fffca345 Remove event ID usage when checking if new room
The event ID is changing, so we can no longer get the domain from it. On
the other hand, the check is unnecessary.
2019-01-29 16:15:02 +00:00
Erik Johnston
770b823445 Only check event IDs domain signed event for V1 and V2
Since newer versions of events don't have the same format for event ID.
2019-01-29 16:15:00 +00:00
Travis Ralston
d02c5ccb11 Merge pull request #4498 from matrix-org/travis/fix-docs-public_baseurl
Don't recommend :8448 to people on public_baseurl
2019-01-29 09:06:16 -07:00
Neil Johnson
19259d903c update to reflect broadening scope 2019-01-29 16:01:46 +00:00
Neil Johnson
f03b3a7a3a support change_password in capabilities end-point 2019-01-29 15:58:37 +00:00
Andrew Morgan
9adbc912b3 Add changelog 2019-01-29 15:34:06 +00:00
Andrew Morgan
e65a17b26f Check consent dir path on startup 2019-01-29 15:30:33 +00:00
Amber Brown
6bd4374636 Do not generate self-signed TLS certificates by default. (#4509) 2019-01-29 14:09:10 +00:00
Erik Johnston
b8d75ef53e Merge pull request #4481 from matrix-org/erikj/event_builder
Refactor event building into EventBuilder
2019-01-29 14:07:23 +00:00
Richard van der Hoff
99e36d5e24 Implement MSC1708 (.well-known lookups for server routing) (#4489) 2019-01-29 13:53:02 +00:00
Erik Johnston
b82a76c384 Finish comment... 2019-01-29 13:50:59 +00:00
Erik Johnston
2562319821 Merge pull request #4510 from matrix-org/erikj/fixup_compute_event_signature
Fixup calls to `comput_event_signature`
2019-01-29 13:35:19 +00:00
Neil Johnson
4eeb2fb215 isort 2019-01-29 12:44:10 +00:00
Erik Johnston
5891a6edc8 Correctly set context.app_service 2019-01-29 12:09:10 +00:00
Erik Johnston
fb99dae9c8 Don't set event_id twice 2019-01-29 12:08:23 +00:00
Richard van der Hoff
5488cadaae Enable configuring test log level via env var (#4506)
I got fed up with always adding '@unittest.DEBUG' every time I needed to debug a test.
2019-01-29 12:07:00 +00:00
Erik Johnston
64c1bd1d21 Remove dead function 2019-01-29 12:06:28 +00:00
Erik Johnston
7d1024d574 Newsfile 2019-01-29 11:58:16 +00:00
Erik Johnston
7709d2bd16 Implement rechecking of redactions 2019-01-29 11:56:20 +00:00
Erik Johnston
7a3ec5b022 Add RoomVersions.V3 constant, without enabling it
We add the constant, but don't add it to the known room versions. This
lets us start adding V3 logic, but the servers will never join or create
V3 rooms
2019-01-29 11:55:33 +00:00
Erik Johnston
0c55b7701c Newsfile 2019-01-29 11:42:33 +00:00
Erik Johnston
6598992b01 Fixup calls to comput_event_signature
We currently pass FrozenEvent instead of `dict` to
`compute_event_signature`, which works by accident due to `dict(event)`
producing the correct result.

This fixes PR #4493 commit 855a151
2019-01-29 11:41:58 +00:00
Neil Johnson
a124025dab enforce auth for capabilities endpoint 2019-01-29 11:37:56 +00:00
Erik Johnston
ff37acb8ce Merge pull request #4496 from matrix-org/erikj/invite_fallback
Implement fallback for V2 invite API
2019-01-29 11:28:23 +00:00
Erik Johnston
aee39f7de8 Fix test to use valid event format 2019-01-29 11:19:50 +00:00
Erik Johnston
5180f12bae Replace usage of builder.user_id with builder.sender
`.user_id` is proxed to `.sender` in FrozenEvent, so this has no
functional change
2019-01-29 11:18:38 +00:00
Erik Johnston
a388d59d44 Newsfile 2019-01-29 11:13:08 +00:00
Erik Johnston
be47cfa9c9 Refactor event building into EventBuilder
This is so that everything is done in one place, making it easier to
change the event format based on room version
2019-01-29 11:13:00 +00:00
Erik Johnston
554ca58ea1 Make add_hashes_and_signatures operate on dicts 2019-01-29 11:12:38 +00:00
Amber Brown
f815bd7feb Make linearizer more quiet (#4507) 2019-01-29 11:05:31 +00:00
Erik Johnston
073f6c2e5e Merge pull request #4494 from matrix-org/erikj/fixup_event_validator
Split up event validation between event and builder
2019-01-29 10:55:07 +00:00
Erik Johnston
40638ae7f5 Remove duplicate checks 2019-01-29 10:37:40 +00:00
Erik Johnston
9fa3c6ffa3 Fix up error messages 2019-01-29 10:36:46 +00:00
Erik Johnston
28efc80723 Fold validate into validate_new 2019-01-29 10:34:49 +00:00
Erik Johnston
b6b73a0bcf Fix receiving events from federation via a worker
This bug was introduced in PR #4470, commit 678a92cb56
2019-01-29 10:30:26 +00:00
Neil Johnson
327b992e17 register capabilities servlet 2019-01-29 10:28:35 +00:00
Amber Brown
94fb63e44f Fix typo in upserts code (#4505)
* fix obvious problem :|

* changelog
2019-01-29 10:04:23 +00:00
Erik Johnston
17709f8f9c Merge pull request #4493 from matrix-org/erikj/refactor_event_signing
Refactor event signing to work on dicts
2019-01-29 09:52:54 +00:00
Richard van der Hoff
f2b553d656 Use SimpleResolverComplexifier in tests (#4497)
two reasons for this. One, it saves a bunch of boilerplate. Two, it squashes
unicode to IDNA-in-a-`str` (even on python 3) in a way that it turns out we
rely on to give consistent behaviour between python 2 and 3.
2019-01-29 09:38:29 +00:00
Erik Johnston
f1a04462eb Merge pull request #4482 from matrix-org/erikj/event_auth_room_version
Pass through room version to event auth
2019-01-28 20:09:38 +00:00
Travis Ralston
c4045647eb Create 4498.misc 2019-01-28 12:16:39 -07:00
Travis Ralston
6901ac7e9d Don't recommend :8448 to people on public_baseurl 2019-01-28 12:15:22 -07:00
Erik Johnston
8cbc99cc19 Newsfile 2019-01-28 17:36:06 +00:00
Erik Johnston
d414f30019 Implement fallback for V2 invite API
If the room version is either 1 or 2 then a server should retry failed
`/v2/invite` requests with the v1 API
2019-01-28 17:33:25 +00:00
Erik Johnston
d758d5310e Correctly use default room version if none is set 2019-01-28 17:26:39 +00:00
Amber Brown
5d976c0c7c Fix worker TLS (#4492)
* load cert

* changelog

* fix
2019-01-28 17:18:33 +00:00
Erik Johnston
1977a9b006 Newsfile 2019-01-28 17:05:04 +00:00
Erik Johnston
b872c7b1b4 Split up event validation between event and builder
The validator was being run on the EventBuilder objects, and so the
validator only checked a subset of fields. With the upcoming
EventBuilder refactor even fewer fields will be there to validate.

To get around this we split the validation into those that can be run
against an EventBuilder and those run against a fully fledged event.
2019-01-28 17:00:14 +00:00
Erik Johnston
b8bea3424f Newsfile 2019-01-28 16:47:12 +00:00
Erik Johnston
855a151015 Refactor event signing to work on dicts
This is in preparation for making EventBuilder format agnostic, which
means event signing should be done against the event dict rather than
the EventBuilder object.
2019-01-28 16:42:10 +00:00
Amber Brown
7072fe3084 Fix UPSERTs on SQLite 3.24+ (#4477) 2019-01-28 15:43:32 +00:00
Andrew Morgan
88f4df85ca Merge pull request #4412 from matrix-org/anoa/dm_room_upgrade
Migrate direct message and tag state on room upgrade
2019-01-28 15:16:50 +00:00
Andrew Morgan
f0e96ab66a Change return syntax in doc string 2019-01-28 14:09:45 +00:00
Andrew Morgan
4026d555fa Merge branch 'develop' of github.com:matrix-org/synapse into anoa/dm_room_upgrade 2019-01-28 14:08:24 +00:00
Andrew Morgan
1ce463963d Reuse predecessor method 2019-01-28 14:08:18 +00:00
Aaron Raimist
57a3e96e8e Remove --process-dependency-links from UPGRADE.rst (#4485)
* Remove --process-dependency-links from UPGRADE.rst

Signed-off-by: Aaron Raimist <aaron@raim.ist>

* Add changelog

Signed-off-by: Aaron Raimist <aaron@raim.ist>
2019-01-28 13:54:25 +00:00
Richard van der Hoff
d04ff2a03d Merge pull request #4488 from matrix-org/rav/fed_routing/refactor
Refactoring in MatrixFederationAgent
2019-01-28 12:57:41 +00:00
Richard van der Hoff
3bd0f1a4a3 docstrings for _RoutingResult 2019-01-28 12:43:09 +00:00
Richard van der Hoff
ff05ad147a changelog 2019-01-28 10:34:30 +00:00
Richard van der Hoff
0fd5b3b53e Handle IP literals explicitly
We don't want to be doing .well-known lookups on these guys.
2019-01-28 10:34:30 +00:00
Richard van der Hoff
51958df766 MatrixFederationAgent: factor out routing logic
This is going to get too big and unmanageable.
2019-01-28 10:34:30 +00:00
Richard van der Hoff
d840019192 Fix idna and ipv6 literal handling in MatrixFederationAgent (#4487)
Turns out that the library does a better job of parsing URIs than our
reinvented wheel. Who knew.

There are two things going on here. The first is that, unlike
parse_server_name, URI.fromBytes will strip off square brackets from IPv6
literals, which means that it is valid input to ClientTLSOptionsFactory and
HostnameEndpoint.

The second is that we stay in `bytes` throughout (except for the argument to
ClientTLSOptionsFactory), which avoids the weirdness of (sometimes) ending up
with idna-encoded values being held in `unicode` variables. TBH it probably
would have been ok but it made the tests fragile.
2019-01-28 09:56:59 +00:00
Erik Johnston
f01c7488ab Newsfile 2019-01-25 18:32:50 +00:00
Erik Johnston
ae2a957dba Pass through room version to event auth 2019-01-25 18:31:41 +00:00
Neil Johnson
893107be78 backout v3 2019-01-25 17:27:36 +00:00
Erik Johnston
b6dce9b9fd Merge pull request #4470 from matrix-org/erikj/require_format_version
Require event format version to parse or create events
2019-01-25 15:59:36 +00:00
Erik Johnston
57c035debe Merge pull request #4471 from matrix-org/erikj/sqlite_native_upsert
Disable native upsert on sqlite
2019-01-25 14:31:05 +00:00
Erik Johnston
431e485914 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sqlite_native_upsert 2019-01-25 14:11:17 +00:00
Richard van der Hoff
4a3f138832 Fix quoting for allowed_local_3pids example config (#4476)
If you use double-quotes here, you have to escape your backslashes. It's much
easier with single-quotes.

(Note that the existing double-backslashes are already interpreted by python's
""" parsing.)
2019-01-25 13:57:52 +00:00
Richard van der Hoff
8520bc3109 Fix Host header sent by MatrixFederationAgent (#4468)
Move the Host header logic down here so that (a) it is used if we reuse the
agent elsewhere, and (b) we can mess about with it with .well-known.
2019-01-25 12:38:16 +00:00
Andrew Morgan
9244a3089e Fixes 2019-01-25 11:48:38 +00:00
Andrew Morgan
da0d2219d2 Clean up direct_rooms access 2019-01-25 11:37:12 +00:00
Andrew Morgan
8265995498 Use python magic 2019-01-25 11:26:06 +00:00
Andrew Morgan
0b3fd1401f Don't require sqlite3 when using postgres (#4466) 2019-01-25 22:25:02 +11:00
Andrew Morgan
c4cdafa81f Destructure account data tuple before use 2019-01-25 11:24:28 +00:00
Andrew Morgan
516456b763 Remove unnecessary null check 2019-01-25 11:22:14 +00:00
Andrew Morgan
6f3fda79ce Move room_tag declaration to be closer to its use 2019-01-25 11:21:25 +00:00
Neil Johnson
95f871fc0d Support room version capabilities in CS API (MSC1804) 2019-01-25 11:16:29 +00:00
Neil Johnson
a3f0556bea towncrier 2019-01-25 11:15:41 +00:00
Andrew Morgan
821b65aeb5 Merge branch 'develop' of github.com:matrix-org/synapse into anoa/dm_room_upgrade 2019-01-25 11:09:53 +00:00
Andrew Morgan
0862d35b8e Move tag and direct state copying into separate function 2019-01-25 11:09:34 +00:00
Neil Johnson
53ef4da8c2 track unstable room v3 2019-01-25 11:04:11 +00:00
Andrew Morgan
b1b6dba2d2 Merge pull request #4415 from matrix-org/anoa/full_search_upgraded_rooms
Ability to search entire room history after upgrading room
2019-01-25 10:49:58 +00:00
Erik Johnston
8dcfa6e75c Newsfile 2019-01-25 10:48:40 +00:00
Erik Johnston
1953067136 Disable native upserts for sqlite, as they don't work 2019-01-25 10:46:49 +00:00
Erik Johnston
5d881cbcb8 Newsfile 2019-01-25 10:37:13 +00:00
Erik Johnston
678a92cb56 Replace missed usages of FrozenEvent 2019-01-25 10:32:30 +00:00
Erik Johnston
9770ed91c2 Fix tests 2019-01-25 10:32:26 +00:00
Erik Johnston
a50cf929c1 Require event format version to parse or create events 2019-01-25 10:32:19 +00:00
Erik Johnston
28c21cd578 Merge pull request #4447 from matrix-org/erikj/msc_1813
Implement MSC 1813 - Add room version to make APIs
2019-01-25 10:27:25 +00:00
Erik Johnston
829a7b2032 Merge pull request #4469 from matrix-org/revert-4451-erikj/require_format_version
Revert "Require event format version to parse or create events"
2019-01-25 10:26:13 +00:00
Erik Johnston
be6a7e47fa Revert "Require event format version to parse or create events" 2019-01-25 10:23:51 +00:00
Erik Johnston
03b7df1af2 Merge pull request #4451 from matrix-org/erikj/require_format_version
Require event format version to parse or create events
2019-01-25 10:20:22 +00:00
Erik Johnston
62514bb81b Merge branch 'develop' of github.com:matrix-org/synapse into erikj/msc_1813 2019-01-25 10:07:08 +00:00
Erik Johnston
efb8ed1d45 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/require_format_version 2019-01-24 18:52:34 +00:00
Erik Johnston
edc1e21dbe Merge pull request #4448 from matrix-org/erikj/get_pdu_versions
Add room_version param to get_pdu
2019-01-24 18:47:15 +00:00
Erik Johnston
80bcca659e Merge pull request #4405 from matrix-org/erikj/fixup_rejecting_invites
Store rejected remote invite events as outliers
2019-01-24 18:33:27 +00:00
Erik Johnston
664b7a2920 Merge pull request #4437 from matrix-org/erikj/event_format_version_v2
Add support for persisting event format versions
2019-01-24 18:32:30 +00:00
Erik Johnston
d148c43050 Review comments 2019-01-24 18:31:23 +00:00
Erik Johnston
26f44164c8 Review comments 2019-01-24 18:28:00 +00:00
Erik Johnston
5ee1f997a8 Update make_membership_event docs 2019-01-24 18:08:08 +00:00
Erik Johnston
9139b87be4 Remove unecessary setting of outlier bit 2019-01-24 18:04:02 +00:00
Erik Johnston
b8082a5445 Use term 'out of band membership' instead 2019-01-24 17:33:19 +00:00
Andrew Morgan
a383289b0d Merge branch 'anoa/full_search_upgraded_rooms' of github.com:matrix-org/synapse into anoa/full_search_upgraded_rooms 2019-01-24 17:23:51 +00:00
Andrew Morgan
e1781b043b Remove redundant create event None check 2019-01-24 17:23:39 +00:00
Richard van der Hoff
03c85335d1 Apply suggestions from code review
Co-Authored-By: anoadragon453 <1342360+anoadragon453@users.noreply.github.com>
2019-01-24 17:22:09 +00:00
Richard van der Hoff
8ea509a935 Update synapse/api/filtering.py
Co-Authored-By: anoadragon453 <1342360+anoadragon453@users.noreply.github.com>
2019-01-24 17:21:35 +00:00
Andrew Morgan
50c396a7ee Merge pull request #4461 from matrix-org/anoa/room_dir_quick_fix
Add a 60s timeout to filtered room directory queries
2019-01-24 15:24:35 +00:00
Andrew Morgan
075ff3ede9 Change default timeout value from 0 to None 2019-01-24 15:10:22 +00:00
Andrew Morgan
5b1dc94083 Use self.clock instead of datetime 2019-01-24 14:59:50 +00:00
Andrew Morgan
a2d85144e5 isort 2019-01-24 14:22:26 +00:00
Richard van der Hoff
4a6e863843 Merge pull request #4464 from matrix-org/rav/fix_srv_lookup
MatrixFederationAgent: Look up the right SRV record
2019-01-24 13:57:51 +00:00
Richard van der Hoff
8c58c10697 Generate the debian config during build (#4444)
Rather than hardcoding a config which we always forget to update, generate it
from the default config.
2019-01-24 13:39:01 +00:00
Richard van der Hoff
f4697b5ec1 Fix UnboundLocalError in post_urlencoded_get_json (#4460)
This could cause exceptions if the id server returned 4xx responses.
2019-01-24 13:38:29 +00:00
Richard van der Hoff
afd69a0920 Look up the right SRV record 2019-01-24 13:31:43 +00:00
Richard van der Hoff
e1c8440e0c lots more tests for MatrixFederationAgent 2019-01-24 13:28:07 +00:00
Neil Johnson
10b89d5c2e Merge pull request #4435 from matrix-org/neilj/fix_threepid_auth_check
Neilj/fix threepid auth check
2019-01-24 13:02:50 +00:00
Andrew Morgan
2a360e834f Add changelog 2019-01-24 12:47:35 +00:00
Andrew Morgan
5541645e80 lint 2019-01-24 12:45:32 +00:00
Andrew Morgan
068aa1d228 Time out filtered room dir queries after 60s 2019-01-24 12:44:27 +00:00
Neil Johnson
5c41b22359 Merge pull request #4458 from matrix-org/dbkr/public_baseurl_doc
Clarify docs for public_baseurl
2019-01-24 12:36:09 +00:00
Amber Brown
0e27501ee5 Fix UPSERT check (#4459) 2019-01-24 22:57:41 +11:00
Erik Johnston
e8c9f15397 Replace missed usages of FrozenEvent 2019-01-24 11:14:07 +00:00
David Baker
1f2058fca5 Changelog 2019-01-24 10:57:12 +00:00
David Baker
92d8a068ad Clarify docs for public_baseurl
This is leading to problems with people upgrading to clients that
support MSC1730 because people have this misconfigured, so try
to make the docs completely unambiguous.
2019-01-24 10:52:06 +00:00
Amber Brown
58f6c48183 Use native UPSERTs where possible (#4306) 2019-01-24 21:31:54 +11:00
Richard van der Hoff
97fd29c019 Don't send IP addresses as SNI (#4452)
The problem here is that we have cut-and-pasted an impl from Twisted, and then
failed to maintain it. It was fixed in Twisted in
https://github.com/twisted/twisted/pull/1047/files; let's do the same here.
2019-01-24 09:34:44 +00:00
Erik Johnston
e79ba9eb34 Fix tests 2019-01-24 09:28:16 +00:00
Erik Johnston
073173277c Newsfile 2019-01-23 20:27:31 +00:00
Erik Johnston
f431ff3fb8 Require event format version to parse or create events 2019-01-23 20:21:33 +00:00
Erik Johnston
17898a5ba6 Merge branch 'erikj/fixup_rejecting_invites' of github.com:matrix-org/synapse into erikj/require_format_version 2019-01-23 20:13:07 +00:00
Erik Johnston
7c288c2250 Clarify the invite flows 2019-01-23 20:07:47 +00:00
Erik Johnston
07f62da55a Remove unnecessary '_sign_event' 2019-01-23 20:04:57 +00:00
Erik Johnston
183738f469 Newsfile 2019-01-23 20:04:57 +00:00
Erik Johnston
886e5acc76 Store rejected remote invite events as outliers
Currently they're stored as non-outliers even though the server isn't in
the room, which can be problematic in places where the code assumes it
has the state for all non outlier events.

In particular, there is an edge case where persisting the leave event
triggers a state resolution, which requires looking up the room version
from state. Since the server doesn't have the state, this causes an
exception to be thrown.
2019-01-23 20:04:57 +00:00
Erik Johnston
2a8edbaf74 Merge branch 'erikj/get_pdu_versions' into erikj/require_format_version 2019-01-23 17:27:49 +00:00
Erik Johnston
fd654a4d54 Merge branch 'erikj/msc_1813' into erikj/require_format_version 2019-01-23 17:27:43 +00:00
Erik Johnston
c5a125b24f Update newsfile 2019-01-23 17:26:06 +00:00
Erik Johnston
4a8b715679 Newsfile 2019-01-23 17:25:27 +00:00
Erik Johnston
a4ef8d8dd5 Newsfile 2019-01-23 17:24:44 +00:00
Erik Johnston
6a41d2a187 Add room_version param to get_pdu
When we add new event format we'll need to know the event format or room
version when parsing events.
2019-01-23 17:19:58 +00:00
Erik Johnston
67cd4dad81 Implement MSC 1813 - Add room version to make APIs
We also implement `make_membership_event` converting the returned
room version to an event format version.
2019-01-23 16:51:46 +00:00
Andrew Morgan
6b90ae6efc Merge pull request #4445 from matrix-org/anoa/user_dir_develop_backport
Add metric for user dir current event stream position
2019-01-23 15:58:40 +00:00
Richard van der Hoff
a0ae475219 Merge pull request #4428 from matrix-org/rav/matrix_federation_agent
Move SRV magic into an Agent-like thing
2019-01-23 15:50:25 +00:00
Andrew Morgan
cb0e637a94 Add changelog 2019-01-23 15:38:27 +00:00
Andrew Morgan
82a92ba535 Add metric for user dir current event stream position 2019-01-23 15:34:47 +00:00
Erik Johnston
be1065af59 isort 2019-01-23 11:48:16 +00:00
Richard van der Hoff
2f88881c93 debian package: symlink to python-3.X (#4433)
In the debian package, make the virtualenv symlink python to /usr/bin/python3.X
rather than /usr/bin/python3. Also make sure we depend on the right python3.x
package.

This might help a bit with subtle failures when people install a package from
the wrong distro (https://github.com/matrix-org/synapse/issues/4431).
2019-01-23 11:43:04 +00:00
Erik Johnston
4cd50d983d Newsfile 2019-01-23 11:30:24 +00:00
Erik Johnston
c5a296b10c Add support for persisting event format versions
Currently we only have the one event format version defined, but this
adds the necessary infrastructure to persist and fetch the format
versions alongside the events.

We specify the format version rather than the room version as:

1. We don't necessarily know the room version, existing events may be
   either v1 or v2.
2. We'd need to be careful to prevent/handle correctly if different
   events in the same room reported to be of different versions, which
   sounds annoying.
2019-01-23 11:30:01 +00:00
Richard van der Hoff
6b574f3df7 fix python2 test failure 2019-01-23 11:25:36 +00:00
Erik Johnston
90743c9d89 Fixup removal of duplicate user_ips rows (#4432)
* Remove unnecessary ORDER BY clause

* Add logging

* Newsfile
2019-01-23 19:45:18 +11:00
Amber Brown
6129e52f43 Support ACME for certificate provisioning (#4384) 2019-01-23 19:39:06 +11:00
Richard van der Hoff
d02c4532c0 Add a test for MatrixFederationAgent 2019-01-22 20:35:12 +00:00
Richard van der Hoff
7021784d46 put resolve_service in an object
this makes it easier to stub things out for tests.
2019-01-22 20:35:12 +00:00
Richard van der Hoff
53a327b4d5 Require that service_name be a byte string
it is only ever a bytes now, so let's enforce that.
2019-01-22 20:35:12 +00:00
Richard van der Hoff
c66f4bf7f1 changelog 2019-01-22 20:34:35 +00:00
Richard van der Hoff
fe212bbe4a Kill off matrix_federation_endpoint
this thing is now redundant.
2019-01-22 20:34:35 +00:00
Richard van der Hoff
7871146667 Make MatrixFederationClient use MatrixFederationAgent
... instead of the matrix_federation_endpoint
2019-01-22 20:34:35 +00:00
Richard van der Hoff
44be7513bf MatrixFederationAgent
Pull the magic that is currently in matrix_federation_endpoint and friends into
an agent-like thing
2019-01-22 20:34:35 +00:00
Neil Johnson
c99c2d58d7 move guard out of is_threepid_reserved and into register.py 2019-01-22 17:47:00 +00:00
Neil Johnson
d619b113ed Fix None guard in config.server.is_threepid_reserved 2019-01-22 16:52:29 +00:00
Erik Johnston
12699a701f Merge pull request #4434 from matrix-org/erikj/fix_user_ips_dedup
Fix bug when removing duplicate rows from user_ips
2019-01-22 16:51:57 +00:00
Erik Johnston
7f503f83b9 Refactor to rewrite the SQL instead 2019-01-22 16:31:05 +00:00
Neil Johnson
388c164aea Merge pull request #4423 from matrix-org/neilj/disable_msisdn_on_registration
Config option to disable requesting MSISDN on registration
2019-01-22 16:23:08 +00:00
Erik Johnston
1c9704f8ab Don't shadow params 2019-01-22 16:20:33 +00:00
Andrew Morgan
766a172b99 lint 2019-01-22 13:51:40 +00:00
Erik Johnston
c658425e6f Newsfile 2019-01-22 13:34:10 +00:00
Erik Johnston
2557531f0f Fix bug when removing duplicate rows from user_ips
This was caused by accidentally overwritting a `last_seen` variable
in a for loop, causing the wrong value to be written to the progress
table. The result of which was that we didn't scan sections of the table
when searching for duplicates, and so some duplicates did not get
deleted.
2019-01-22 13:33:46 +00:00
Andrew Morgan
117bc94cd2 Merge branch 'develop' of github.com:matrix-org/synapse into anoa/dm_room_upgrade 2019-01-22 13:16:51 +00:00
Andrew Morgan
277e50462d Do not return in a deferred function 2019-01-22 12:40:26 +00:00
Andrew Morgan
c4875d8c76 Prevent duplicate room IDs in m.direct 2019-01-22 12:13:46 +00:00
Andrew Morgan
c433f61091 Ensure new filter is actually created 2019-01-22 12:06:36 +00:00
Andrew Morgan
c9bfb058d8 Fix a bug with single-room search searching all rooms
* Create a new method for getting predecessor rooms
* Remove formatting change
2019-01-22 12:00:41 +00:00
Andrew Morgan
8086a5c05e Fix comments 2019-01-22 11:16:23 +00:00
Richard van der Hoff
6bfa735a69 Make key fetches use regular federation client (#4426)
All this magic is redundant.
2019-01-22 11:04:20 +00:00
Andrew Morgan
48951f437f Join logic covers both room creator and arbitrary users 2019-01-22 11:00:04 +00:00
Andrew Morgan
8c85f0833d tags, m.direct copying over correctly 2019-01-22 11:00:04 +00:00
Andrew Morgan
25d64a846a Fix typos 2019-01-22 11:00:04 +00:00
Andrew Morgan
ea8903fcc9 Migrating dm and room tags work for migrator 2019-01-22 11:00:04 +00:00
Andrew Morgan
887ca93a1b Prevent crash on user who doesn't have any direct rooms 2019-01-22 11:00:04 +00:00
Andrew Morgan
1f18c7cfc9 Add changelog 2019-01-22 11:00:04 +00:00
Andrew Morgan
4ff6d22245 Preserve DM status of a room on upgrade
Signed-off-by: Andrew Morgan <andrew@amorgan.xyz>
2019-01-22 11:00:04 +00:00
Andrew Morgan
75942af1db Fix typo 2019-01-22 11:00:04 +00:00
Richard van der Hoff
33a55289cb Refactor and bugfix for resove_service (#4427) 2019-01-22 10:59:27 +00:00
Amber Brown
23b0813599 Require ECDH key exchange & remove dh_params (#4429)
* remove dh_params and set better cipher string
2019-01-22 21:58:50 +11:00
Neil Johnson
1b53cc3cb4 fix line length 2019-01-21 15:17:20 +00:00
Neil Johnson
5349262302 Config option to disable requesting MSISDN on registration 2019-01-21 14:59:37 +00:00
Erik Johnston
83f335bedf Merge pull request #4402 from matrix-org/erikj/fed_v2_invite_server
Implement server side of MSC1794 - Federation v2 Invite API
2019-01-21 14:50:54 +00:00
Erik Johnston
35e1d67b4e Merge branch 'develop' of github.com:matrix-org/synapse into erikj/fed_v2_invite_server 2019-01-21 14:04:19 +00:00
Erik Johnston
5f54765587 Merge pull request #4390 from matrix-org/erikj/versioned_fed_apis
Add groundwork for new versions of federation APIs
2019-01-21 11:44:05 +00:00
Andrew Morgan
702c4b750c Migrate encryption state on room upgrade (#4411)
* Migrate encryption state on room upgrade

Signed-off-by: Andrew Morgan <andrew@amorgan.xyz>

* Add changelog file
2019-01-21 20:42:58 +11:00
Erik Johnston
25dd56ace3 Fix race when persisting create event (#4404)
* Fix race when persisting create event

When persisting a chunk of DAG it is sometimes requried to do a state
resolution, which requires knowledge of the room version. If this
happens while we're persisting the create event then we need to use that
event rather than attempting to look it up in the database.
2019-01-18 23:17:04 +11:00
Erik Johnston
71b94eac46 Tweak code coverage settings (#4400)
* Tweak code coverage settings

* Fix manifest

* Newsfile

* Fix commit status?
2019-01-18 23:13:14 +11:00
Richard van der Hoff
de6888e7ce Remove redundant WrappedConnection (#4409)
* Remove redundant WrappedConnection

The matrix federation client uses an HTTP connection pool, which times out its
idle HTTP connections, so there is no need for any of this business.
2019-01-18 23:07:38 +11:00
Andrew Morgan
cb80db8941 Add changelog 2019-01-18 11:22:00 +00:00
Andrew Morgan
df3a661e4a Search for messages across predecessor rooms
Signed-off-by: Andrew Morgan <andrew@amorgan.xyz>
2019-01-18 11:19:20 +00:00
Richard van der Hoff
676cf2ee26 Fix incorrect logcontexts after a Deferred was cancelled (#4407) 2019-01-17 14:00:23 +00:00
Richard van der Hoff
9feb5d0b71 sign_request -> build_auth_headers (#4408)
Just got very confused about the fact that the headers are only an output, not
an input.
2019-01-17 12:40:09 +00:00
Richard van der Hoff
3982a6ee07 Changing macaroon_secret_key no longer logs you out (#4387) 2019-01-16 23:14:41 +00:00
Richard van der Hoff
05e1296649 don't store more remote device lists if they have more than 1K devices (#4397) 2019-01-16 23:14:11 +00:00
Erik Johnston
f788c9eb70 Newsfile 2019-01-16 13:46:36 +00:00
Erik Johnston
7f1a6a4ea5 Merge pull request #4399 from andrewshadura/update-python-deps
Update Dependencies
2019-01-16 13:37:47 +00:00
Andrej Shadura
3b31a54a6e Add a changelog entry
Signed-off-by: Andrej Shadura <andrew.shadura@collabora.co.uk>
2019-01-16 10:54:43 +01:00
Andrej Shadura
64cf6788d9 Depend on pymacaroons >= 0.13.0 instead on pymacaroons-pynacl
Since 0.13.0, pymacaroons works correctly with pynacl, so there
isn’t any more reason to depend on an outdated pynacl fork.

Signed-off-by: Andrej Shadura <andrew.shadura@collabora.co.uk>
2019-01-16 10:54:41 +01:00
Andrej Shadura
fab948120f Use msgpack instead of msgpack-python
The package msgpack-python has been deprecated.

Signed-off-by: Andrej Shadura <andrew.shadura@collabora.co.uk>
2019-01-16 10:53:49 +01:00
Erik Johnston
aa955f2d15 Merge pull request #4392 from matrix-org/neilj/fix_user_type_typo
ALL_USER_TYPES should be a tuple
2019-01-15 14:56:52 +00:00
Neil Johnson
9ec56d6935 ALL_USER_TYPES should be a tuple 2019-01-15 14:38:15 +00:00
Erik Johnston
4a4d2e17bc Add /v2/invite federation API 2019-01-15 13:22:44 +00:00
Erik Johnston
1a8f4139a5 Newsfile 2019-01-15 11:21:52 +00:00
Erik Johnston
bb63e7ca4f Add groundwork for new versions of federation APIs 2019-01-15 11:14:34 +00:00
Richard van der Hoff
7e41545e8b Merge branch 'release-v0.34.1.1' into develop 2019-01-12 13:30:30 +00:00
Richard van der Hoff
4fd051f9c3 moar plusses!
turns out that 0.34.1.1+1 comes before 0.34.1.1+bionic (etc).

The version may only contain "~ 0-9 A-Z a-z + - ." (sorting in that order).

Option 1: replace "+" with something that sorts after +. Options are "-" (but
dpkg-source complains about that) or "." (but that would mean we couldn't
distinguish packaging-only changes from real changes).

Option 2: stick with + and just find something that sorts after 'xenial'. The
only options there are "-", "." (same problems as before), "z", and "+".

Hence, ++1. Sorry.
2019-01-12 13:08:32 +00:00
Richard van der Hoff
b5b868d41e Rewrite build_debian_packages
Rewrite this in python so that it can be run in parallel.
2019-01-12 12:40:58 +00:00
Richard van der Hoff
34b25dcc8e Silence travis-ci build warnings by removing non-functional python3.6 (#4377)
* Remove non-functional python3.6 in travis env

* changelog
2019-01-12 06:22:56 +11:00
Amber Brown
a35c66a00b Remove duplicates in the user_ips table and add an index (#4370) 2019-01-12 06:21:50 +11:00
Richard van der Hoff
91fa34b3fa s/Breaks/Conflicts/ in debian/control
Otherwise people can't upgrade from matrix-synapse without removing it first
2019-01-11 17:05:45 +00:00
Amber Brown
522dada206 Merge remote-tracking branch 'origin/master' into develop 2019-01-11 02:22:48 +11:00
Amber Brown
ea00f18135 Merge Synapse v0.34.1.1 2019-01-11 02:21:54 +11:00
Amber Brown
c0dba73aa0 changelog, for debian 2019-01-11 02:20:29 +11:00
Richard van der Hoff
8c818af38e Merge pull request #4342 from aaronraimist/new-virtualenv
Update README to use new virtualenv (#4328)
2019-01-10 14:59:33 +00:00
Amber Brown
5c792ee5c3 changelog 2019-01-11 01:59:10 +11:00
Amber Brown
6dc06c3775 version 2019-01-11 01:56:37 +11:00
Amber Brown
3933ce9f13 Merge pull request #4374 from matrix-org/rav/macaroon_key_fix_0.34.1
Fix spontaneous logout
2019-01-11 01:48:53 +11:00
Richard van der Hoff
de80e979c9 changelog 2019-01-10 14:26:01 +00:00
Richard van der Hoff
e0910d0145 Merge branch rav/macaroon_key_fix_0.34 into rav/macaroon_key_fix_0.34.1
Fixes #4371
2019-01-10 14:12:50 +00:00
Richard van der Hoff
ba41aeed6a Revert "Fix macaroon_secret_key fallback logic"
This is already fixed in 0.34.1, by 59f93bb

This reverts commit efc522c55e.
2019-01-10 14:09:26 +00:00
Richard van der Hoff
4f24452ead changelog 2019-01-10 14:00:23 +00:00
Richard van der Hoff
aa70d24125 Merge branch 'rav/macaroon_key_fix' into rav/macaroon_key_fix_0.34 2019-01-10 12:58:33 +00:00
Richard van der Hoff
efc522c55e Fix macaroon_secret_key fallback logic 2019-01-10 12:57:27 +00:00
Richard van der Hoff
566947ff34 Skip macaroon check for access tokens in the db 2019-01-10 12:57:21 +00:00
Richard van der Hoff
353f2407b7 Fix fallback to signing key for macaroon-secret-key 2019-01-10 12:42:56 +00:00
Richard van der Hoff
8d4b4e781f Merge branch 'master' into develop 2019-01-09 16:52:08 +00:00
Richard van der Hoff
95fca1c7e9 fix docker build to install optional deps 2019-01-09 16:37:51 +00:00
Richard van der Hoff
58fe88c47e Merge branch 'master' into develop 2019-01-09 16:02:05 +00:00
Richard van der Hoff
d566e6b17a Merge branch 'master' into develop 2019-01-09 15:00:46 +00:00
Erik Johnston
55c3e853c5 Merge pull request #4368 from matrix-org/erikj/better_errors
Fixup docstrings for matrixfederationclient
2019-01-09 11:39:28 +00:00
Amber Brown
7960c26fda Fix adding new rows instead of updating them if one of the key values is a NULL in upserts. (#4369) 2019-01-09 22:26:25 +11:00
Erik Johnston
0dce21ba77 Newsfile 2019-01-09 09:27:03 +00:00
Erik Johnston
34ea14139d Fixup docstrings for matrixfederationclient 2019-01-09 09:25:59 +00:00
Richard van der Hoff
dd3bf40152 README.rst: fix hash_password path 2019-01-02 16:36:29 +00:00
Aaron Raimist
7975d39cbd Add changelog
Signed-off-by: Aaron Raimist <aaron@raim.ist>
2019-01-01 15:40:55 -06:00
Aaron Raimist
0e62fcd0eb Update README to use new virtualenv (#4328)
Signed-off-by: Aaron Raimist <aaron@raim.ist>
2019-01-01 15:27:28 -06:00
143 changed files with 6276 additions and 2704 deletions

15
.codecov.yml Normal file
View File

@@ -0,0 +1,15 @@
comment:
layout: "diff"
coverage:
status:
project:
default:
target: 0 # Target % coverage, can be auto. Turned off for now
threshold: null
base: auto
patch:
default:
target: 0
threshold: null
base: auto

View File

@@ -1,11 +1,7 @@
[run]
branch = True
parallel = True
source = synapse
[paths]
source=
coverage
include = synapse/*
[report]
precision = 2

7
.gitignore vendored
View File

@@ -12,6 +12,7 @@ dbs/
dist/
docs/build/
*.egg-info
pip-wheel-metadata/
cmdclient_config.json
homeserver*.db
@@ -25,9 +26,9 @@ homeserver*.pid
*.tls.dh
*.tls.key
.coverage
.coverage.*
!.coverage.rc
.coverage*
coverage.*
!.coveragerc
htmlcov
demo/*/*.db

View File

@@ -12,6 +12,9 @@ cache:
#
- $HOME/.cache/pip/wheels
addons:
postgresql: "9.4"
# don't clone the whole repo history, one commit will do
git:
depth: 1
@@ -68,6 +71,13 @@ matrix:
install:
- pip install tox
# if we don't have python3.6 in this environment, travis unhelpfully gives us
# a `python3.6` on our path which does nothing but spit out a warning. Tox
# tries to run it (even if we're not running a py36 env), so the build logs
# then have warnings which look like errors. To reduce the noise, remove the
# non-functional python3.6.
- ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
script:
- tox -e $TOX_ENV

View File

@@ -1,3 +1,100 @@
Synapse 0.99.0rc2 (2019-01-30)
==============================
Bugfixes
--------
- Fix bug when rejecting remote invites. ([\#4527](https://github.com/matrix-org/synapse/issues/4527))
- Fix incorrect rendering of server capabilities. ([81b7e7eed](https://github.com/matrix-org/synapse/commit/81b7e7eed323f55d6550e7a270a9dc2c4c7b0fe0))
Improved Documentation
----------------------
- Add documentation on enabling ACME support when upgrading to v0.99. ([\#4528](https://github.com/matrix-org/synapse/issues/4528))
Synapse 0.99.0rc1 (2019-01-30)
==============================
Synapse v0.99.x is a precursor to the upcoming Synapse v1.0 release. It contains foundational changes to room architecture and the federation security model necessary to support the upcoming r0 release of the Server to Server API.
Features
--------
- Synapse's cipher string has been updated to require ECDH key exchange. Configuring and generating dh_params is no longer required, and they will be ignored. ([\#4229](https://github.com/matrix-org/synapse/issues/4229))
- Synapse can now automatically provision TLS certificates via ACME (the protocol used by CAs like Let's Encrypt). ([\#4384](https://github.com/matrix-org/synapse/issues/4384), [\#4492](https://github.com/matrix-org/synapse/issues/4492), [\#4525](https://github.com/matrix-org/synapse/issues/4525))
- Implement MSC1708 (.well-known routing for server-server federation) ([\#4408](https://github.com/matrix-org/synapse/issues/4408), [\#4409](https://github.com/matrix-org/synapse/issues/4409), [\#4426](https://github.com/matrix-org/synapse/issues/4426), [\#4427](https://github.com/matrix-org/synapse/issues/4427), [\#4428](https://github.com/matrix-org/synapse/issues/4428), [\#4464](https://github.com/matrix-org/synapse/issues/4464), [\#4468](https://github.com/matrix-org/synapse/issues/4468), [\#4487](https://github.com/matrix-org/synapse/issues/4487), [\#4488](https://github.com/matrix-org/synapse/issues/4488), [\#4489](https://github.com/matrix-org/synapse/issues/4489), [\#4497](https://github.com/matrix-org/synapse/issues/4497), [\#4511](https://github.com/matrix-org/synapse/issues/4511), [\#4516](https://github.com/matrix-org/synapse/issues/4516), [\#4520](https://github.com/matrix-org/synapse/issues/4520), [\#4521](https://github.com/matrix-org/synapse/issues/4521))
- Search now includes results from predecessor rooms after a room upgrade. ([\#4415](https://github.com/matrix-org/synapse/issues/4415))
- Config option to disable requesting MSISDN on registration. ([\#4423](https://github.com/matrix-org/synapse/issues/4423))
- Add a metric for tracking event stream position of the user directory. ([\#4445](https://github.com/matrix-org/synapse/issues/4445))
- Support exposing server capabilities in CS API (MSC1753, MSC1804) ([\#4472](https://github.com/matrix-org/synapse/issues/4472))
- Add support for room version 3 ([\#4483](https://github.com/matrix-org/synapse/issues/4483), [\#4499](https://github.com/matrix-org/synapse/issues/4499), [\#4515](https://github.com/matrix-org/synapse/issues/4515), [\#4523](https://github.com/matrix-org/synapse/issues/4523))
- Synapse will now reload TLS certificates from disk upon SIGHUP. ([\#4495](https://github.com/matrix-org/synapse/issues/4495), [\#4524](https://github.com/matrix-org/synapse/issues/4524))
Bugfixes
--------
- Prevent users with access tokens predating the introduction of device IDs from creating spurious entries in the user_ips table. ([\#4369](https://github.com/matrix-org/synapse/issues/4369))
- Fix typo in ALL_USER_TYPES definition to ensure type is a tuple ([\#4392](https://github.com/matrix-org/synapse/issues/4392))
- Fix high CPU usage due to remote devicelist updates ([\#4397](https://github.com/matrix-org/synapse/issues/4397))
- Fix potential bug where creating or joining a room could fail ([\#4404](https://github.com/matrix-org/synapse/issues/4404))
- Fix bug when rejecting remote invites ([\#4405](https://github.com/matrix-org/synapse/issues/4405))
- Fix incorrect logcontexts after a Deferred was cancelled ([\#4407](https://github.com/matrix-org/synapse/issues/4407))
- Ensure encrypted room state is persisted across room upgrades. ([\#4411](https://github.com/matrix-org/synapse/issues/4411))
- Copy over whether a room is a direct message and any associated room tags on room upgrade. ([\#4412](https://github.com/matrix-org/synapse/issues/4412))
- Fix None guard in calling config.server.is_threepid_reserved ([\#4435](https://github.com/matrix-org/synapse/issues/4435))
- Don't send IP addresses as SNI ([\#4452](https://github.com/matrix-org/synapse/issues/4452))
- Fix UnboundLocalError in post_urlencoded_get_json ([\#4460](https://github.com/matrix-org/synapse/issues/4460))
- Add a timeout to filtered room directory queries. ([\#4461](https://github.com/matrix-org/synapse/issues/4461))
- Workaround for login error when using both LDAP and internal authentication. ([\#4486](https://github.com/matrix-org/synapse/issues/4486))
- Fix a bug where setting a relative consent directory path would cause a crash. ([\#4512](https://github.com/matrix-org/synapse/issues/4512))
Deprecations and Removals
-------------------------
- Synapse no longer generates self-signed TLS certificates when generating a configuration file. ([\#4509](https://github.com/matrix-org/synapse/issues/4509))
Internal Changes
----------------
- Synapse will now take advantage of native UPSERT functionality in PostgreSQL 9.5+ and SQLite 3.24+. ([\#4306](https://github.com/matrix-org/synapse/issues/4306), [\#4459](https://github.com/matrix-org/synapse/issues/4459), [\#4466](https://github.com/matrix-org/synapse/issues/4466), [\#4471](https://github.com/matrix-org/synapse/issues/4471), [\#4477](https://github.com/matrix-org/synapse/issues/4477), [\#4505](https://github.com/matrix-org/synapse/issues/4505))
- Update README to use the new virtualenv everywhere ([\#4342](https://github.com/matrix-org/synapse/issues/4342))
- Add better logging for unexpected errors while sending transactions ([\#4368](https://github.com/matrix-org/synapse/issues/4368))
- Apply a unique index to the user_ips table, preventing duplicates. ([\#4370](https://github.com/matrix-org/synapse/issues/4370), [\#4432](https://github.com/matrix-org/synapse/issues/4432), [\#4434](https://github.com/matrix-org/synapse/issues/4434))
- Silence travis-ci build warnings by removing non-functional python3.6 ([\#4377](https://github.com/matrix-org/synapse/issues/4377))
- Fix a comment in the generated config file ([\#4387](https://github.com/matrix-org/synapse/issues/4387))
- Add ground work for implementing future federation API versions ([\#4390](https://github.com/matrix-org/synapse/issues/4390))
- Update dependencies on msgpack and pymacaroons to use the up-to-date packages. ([\#4399](https://github.com/matrix-org/synapse/issues/4399))
- Tweak codecov settings to make them less loud. ([\#4400](https://github.com/matrix-org/synapse/issues/4400))
- Implement server support for MSC1794 - Federation v2 Invite API ([\#4402](https://github.com/matrix-org/synapse/issues/4402))
- debian package: symlink to explicit python version ([\#4433](https://github.com/matrix-org/synapse/issues/4433))
- Add infrastructure to support different event formats ([\#4437](https://github.com/matrix-org/synapse/issues/4437), [\#4447](https://github.com/matrix-org/synapse/issues/4447), [\#4448](https://github.com/matrix-org/synapse/issues/4448), [\#4470](https://github.com/matrix-org/synapse/issues/4470), [\#4481](https://github.com/matrix-org/synapse/issues/4481), [\#4482](https://github.com/matrix-org/synapse/issues/4482), [\#4493](https://github.com/matrix-org/synapse/issues/4493), [\#4494](https://github.com/matrix-org/synapse/issues/4494), [\#4496](https://github.com/matrix-org/synapse/issues/4496), [\#4510](https://github.com/matrix-org/synapse/issues/4510), [\#4514](https://github.com/matrix-org/synapse/issues/4514))
- Generate the debian config during build ([\#4444](https://github.com/matrix-org/synapse/issues/4444))
- Clarify documentation for the `public_baseurl` config param ([\#4458](https://github.com/matrix-org/synapse/issues/4458), [\#4498](https://github.com/matrix-org/synapse/issues/4498))
- Fix quoting for allowed_local_3pids example config ([\#4476](https://github.com/matrix-org/synapse/issues/4476))
- Remove deprecated --process-dependency-links option from UPGRADE.rst ([\#4485](https://github.com/matrix-org/synapse/issues/4485))
- Make it possible to set the log level for tests via an environment variable ([\#4506](https://github.com/matrix-org/synapse/issues/4506))
- Reduce the log level of linearizer lock acquirement to DEBUG. ([\#4507](https://github.com/matrix-org/synapse/issues/4507))
- Fix code to comply with linting in PyFlakes 3.7.1. ([\#4519](https://github.com/matrix-org/synapse/issues/4519))
Synapse 0.34.1.1 (2019-01-11)
=============================
This release fixes CVE-2019-5885 and is recommended for all users of Synapse 0.34.1.
This release is compatible with Python 2.7 and 3.5+. Python 3.7 is fully supported.
Bugfixes
--------
- Fix spontaneous logout on upgrade
([\#4374](https://github.com/matrix-org/synapse/issues/4374))
Synapse 0.34.1 (2019-01-09)
===========================

View File

@@ -15,6 +15,7 @@ recursive-include docs *
recursive-include scripts *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.pem
recursive-include tests *.py
recursive-include synapse/res *
@@ -37,6 +38,7 @@ prune docker
prune .circleci
prune .coveragerc
prune debian
prune .codecov.yml
exclude jenkins*
recursive-exclude jenkins *.sh

View File

@@ -4,15 +4,15 @@ Introduction
============
Matrix is an ambitious new ecosystem for open federated Instant Messaging and
VoIP. The basics you need to know to get up and running are:
VoIP. The basics you need to know to get up and running are:
- Everything in Matrix happens in a room. Rooms are distributed and do not
exist on any single server. Rooms can be located using convenience aliases
- Everything in Matrix happens in a room. Rooms are distributed and do not
exist on any single server. Rooms can be located using convenience aliases
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
you will normally refer to yourself and others using a third party identifier
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs).
The overall architecture is::
@@ -89,110 +89,219 @@ System requirements:
- Python 3.5, 3.6, 3.7, or 2.7
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
Installing from source
----------------------
The currently supported environment is [Ubuntu 18.04
LTS](http://releases.ubuntu.com/18.04/).
(Prebuilt packages are available for some platforms - see `Platform-Specific
Instructions`_.)
Recommended installation procedure
----------------------------------
Synapse is written in Python but some of the libraries it uses are written in
C. So before we can install Synapse itself we need a working C compiler and the
header files for Python C extensions.
Building and running Synapse from source in a python3 environment is the
recommended path for installation, as it is the most well-tested route.
Binary packages are available for various platforms, but not officially
supported by the Synapse team. See `Platform Specific Instructions`_ for
details.
Install prerequisites
*********************
Installing prerequisites on Ubuntu or Debian::
sudo apt-get install build-essential python3-dev libffi-dev \
python-pip python-setuptools sqlite3 \
libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
sudo apt-get update && sudo apt-get dist-upgrade
sudo apt-get install build-essential python3-dev python3-venv \
python3-pip python-setuptools libssl-dev \
libjpeg-dev libffi-dev zlib1g-dev \
libxslt1-dev postgresql libwebp-dev libpq-dev
**TODO: Update and check non-debian distro pre-req's for new process**
Installing prerequisites on ArchLinux::
sudo pacman -S base-devel python python-pip \
python-setuptools python-virtualenv sqlite3
python-setuptools python-virtualenv
Installing prerequisites on CentOS 7 or Fedora 25::
Installing prerequisites on CentOS 7 or Fedora::
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
python-virtualenv libffi-devel openssl-devel
sudo yum groupinstall "Development Tools"
Installing prerequisites on Mac OS X::
xcode-select --install
sudo easy_install pip
sudo pip install virtualenv
brew install pkg-config libffi
Installing prerequisites on Raspbian::
sudo apt-get install build-essential python3-dev libffi-dev \
python-pip python-setuptools sqlite3 \
libssl-dev python-virtualenv libjpeg-dev
sudo apt-get update && sudo apt-get dist-upgrade
sudo apt-get install build-essential python3-dev python3-venv \
python3-pip python-setuptools libssl-dev \
libjpeg-dev libffi-dev zlib1g-dev \
libxslt1-dev postgresql libwebp-dev libpq-dev
Installing prerequisites on openSUSE::
sudo zypper in -t pattern devel_basis
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
python-devel libffi-devel libopenssl-devel libjpeg62-devel
Set up python environment
*************************
Installing prerequisites on OpenBSD::
Add a new user for Synapse and log in as them::
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
libxslt jpeg
useradd matrix
su -l matrix
To install the Synapse homeserver run::
Create a python3 virtualenv and install dependencies::
mkdir -p ~/synapse
virtualenv -p python3 ~/synapse/env
source ~/synapse/env/bin/activate
pip install --upgrade pip
pip install --upgrade setuptools
pip install matrix-synapse[all]
python3 -m venv matrix-synapse
./matrix-synapse/bin/python -m pip install -U pip setuptools wheel
./matrix-synapse/bin/python -m pip install -U matrix-synapse[all]
This installs Synapse, along with the libraries it uses, into a virtual
environment under ``~/synapse/env``. Feel free to pick a different directory
if you prefer.
Create a Synapse configuration directory. **Make sure you change
``matrix.mydomain.com`` to your own domain**::
This Synapse installation can then be later upgraded by using pip again with the
update flag::
mkdir cfg
./matrix-synapse/bin/python -m synapse.app.homeserver --generate-config \
-H matrix.mydomain.com \ # Change
-c cfg/homeserver.yaml \
--report-stats=yes
Installing postgres
*******************
`PostgreSQL <https://www.postgresql.org/>`_ is the recommended database backend
supported by Synapse. If you are upgrading from SQLite, please consult the
`documentation on how to switch
<https://github.com/matrix-org/synapse/blob/master/docs/postgres.rst#porting-from-sqlite>`_
for improved performance.
Enable and start postgresql::
systemctl enable postgresql && systemctl start postgresql
Assuming your postgres user is called ``postgres``, login and create a user.
This will prompt for a password, make sure you set a strong passphrase::
su - postgres
createuser --pwprompt synapse_user
Create a Synapse database::
CREATE DATABASE synapse
ENCODING 'UTF8'
LC_COLLATE='C'
LC_CTYPE='C'
template=template0
OWNER synapse_user;
Finally, edit the ``database`` section in your ``cfg/homeserver.yaml`` file
to point to the new database::
database:
name: psycopg2
args:
user: synapse_user
password: <password defined in the createuser step>
database: synapse
host: localhost
cp_min: 5
cp_max: 10
More information can be found at `Using Postgres with Synapse
<docs/postgres.rst>`_.
Systemd
*******
Running Synapse under `systemd <https://en.wikipedia.org/wiki/Systemd>`_ is
recommended, as it allows for simple management and automatic restarts in case
of a server error. To integrate Synapse with systemd, create a file at
`/etc/systemd/system/synapse.service` with the following contents::
[Unit]
Description="Synapse homeserver"
[Service]
ExecStart=/home/matrix/matrix-synapse/bin/python -m synapse.app.homeserver
PIDFile=/home/matrix/matrix-synapse/homeserver.pid
Type=forking
WorkingDirectory=/home/matrix/matrix-synapse/
Restart=always
Then tell systemd to update service file information::
sudo systemctl daemon-reload
Synapse should now be enabled to run under Systemd, but **don't start Synapse
yet!**
ACME setup
**********
Synapse requires valid TLS certificates for communication between servers
(port ``8448`` by default) in addition to those that are client-facing (port
``443``). Synapse **will provision server-to-server certificates
automatically for you for free** through `Let's Encrypt
<https://letsencrypt.org/>`_ if you tell it to.
Note: Synapse does not currently hot-renew Let's Encrypt certificates for
you, it only checks for certificates that need renewing on restart. This
functionality will be implemented promptly, but if in the meantime your
federation certificates expire, simply restarting Synapse should renew
them automatically.
In order for Synapse to complete the ACME challenge to provision a
certificate, it needs access to port 80. Typically listening on port 80 is
only granted to applications running as root. There are thus two solutions to
this problem.
**Using a reverse proxy**
A reverse proxy such as Apache or Nginx allows a single process (the web
server) to listen on port 80 and redirect traffic to the appropriate program
running on your server.
**Authbind**
``authbind`` allows a program which does not or should not run as root to
bind to low-numbered ports in a controlled way. The setup is simpler, but
requires a webserver not to already be running on port 80. **This includes
every time Synapse renews a certificate**, which may be cumbersome if you
usually run a web server on port 80. Nevertheless, if that isn't a concern,
follow the instructions below.
Install ``authbind``. This can be done on Ubuntu/Debian with::
sudo apt-get install authbind
**Add authbind to the systemd script**
**TODO: This right?** If you would like to use your own
certificates, specifying them in Synapse's config file is sufficient.
**TODO: Fit this in**
These keys will allow your Home Server to identify itself to other Home
Servers, so don't lose or delete them. It would be wise to back them up
somewhere safe. (If, for whatever reason, you do need to change your Home
Server's keys, you may find that other Home Servers have the old key cached.
If you update the signing key, you should change the name of the key in the
``<server name>.signing.key`` file (the second word) to something different.
See `the spec`__ for more information on key management.)
**TODO: Does this still work?** This Synapse installation can then be later
upgraded by using pip again with the update flag::
source ~/synapse/env/bin/activate
pip install -U matrix-synapse[all]
In case of problems, please see the _`Troubleshooting` section below.
There is an offical synapse image available at
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
this including configuration options is available in the README on
hub.docker.com.
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
Dockerfile to automate a synapse server in a single Docker image, at
https://hub.docker.com/r/avhost/docker-matrix/tags/
Slavi Pantaleev has created an Ansible playbook,
which installs the offical Docker image of Matrix Synapse
along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
For more details, see
https://github.com/spantaleev/matrix-docker-ansible-deploy
We have now created a "matrix" user with its own home directory that stores
Synapse's data and configuration files, backed by a postgres database, all
packaged into a isolated python virtual environment.
Configuring Synapse
-------------------
Before you can start Synapse, you will need to generate a configuration
file. To do this, run (in your virtualenv, as before)::
cd ~/.synapse
python -m synapse.app.homeserver \
--server-name my.domain.name \
--config-path homeserver.yaml \
--generate-config \
--report-stats=[yes|no]
... substituting an appropriate value for ``--server-name``. The server name
determines the "domain" part of user-ids for users on your server: these will
Before starting Synapse, inspect the ``cfg/homeserver.yaml`` file. ``server_name``
determines the "domain" part of user-ids for users on your server, which will
all be of the format ``@user:my.domain.name``. It also determines how other
matrix servers will reach yours for `Federation`_. For a test configuration,
set this to the hostname of your server. For a more production-ready setup, you
@@ -200,16 +309,7 @@ will probably want to specify your domain (``example.com``) rather than a
matrix-specific hostname here (in the same way that your email address is
probably ``user@example.com`` rather than ``user@email.example.com``) - but
doing so may require more advanced setup - see `Setting up
Federation`_. Beware that the server name cannot be changed later.
This command will generate you a config file that you can then customise, but it will
also generate a set of keys for you. These keys will allow your Home Server to
identify itself to other Home Servers, so don't lose or delete them. It would be
wise to back them up somewhere safe. (If, for whatever reason, you do need to
change your Home Server's keys, you may find that other Home Servers have the
old key cached. If you update the signing key, you should change the name of the
key in the ``<server name>.signing.key`` file (the second word) to something
different. See `the spec`__ for more information on key management.)
Federation`_. **Be aware that the server name cannot be changed later.**
.. __: `key_management`_
@@ -217,10 +317,9 @@ The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
configured without TLS; it should be behind a reverse proxy for TLS/SSL
termination on port 443 which in turn should be used for clients. Port 8448
is configured to use TLS with a self-signed certificate. If you would like
to do initial test with a client without having to setup a reverse proxy,
you can temporarly use another certificate. (Note that a self-signed
certificate is fine for `Federation`_). You can do so by changing
``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
to do an initial test with a client without having to setup a reverse proxy,
you can temporarly use another certificate. You can do so by changing
``tls_certificate_path`` and ``tls_private_key_path``
in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
to read `Using a reverse proxy with Synapse`_ when doing so.
@@ -263,6 +362,8 @@ a TURN server. See `<docs/turn-howto.rst>`_ for details.
Running Synapse
===============
**TODO: Needs update**
To actually run your new homeserver, pick a working directory for Synapse to
run (e.g. ``~/synapse``), and::
@@ -270,6 +371,16 @@ run (e.g. ``~/synapse``), and::
source env/bin/activate
synctl start
Upgrading an existing Synapse
=============================
The instructions for upgrading synapse are in `UPGRADE.rst`_.
Please check these instructions as upgrading may require extra steps for some
versions of synapse.
.. _UPGRADE.rst: UPGRADE.rst
Connecting to Synapse from a client
===================================
@@ -330,15 +441,20 @@ See https://github.com/vector-im/riot-web/issues/1977 and
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
Platform-Specific Instructions
==============================
Platform-Specific Packages
==========================
Note that the only officially supported installation method is what is listed
in `Synapse installation`_. Instructions and packages for other platforms are
listed below, but beware that they may be outdated.
Debian
------
Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/.
Note that these packages do not include a client - choose one from
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :).
Fedora
------
@@ -390,7 +506,6 @@ Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Mo
- Ports: ``cd /usr/ports/net-im/py-matrix-synapse && make install clean``
- Packages: ``pkg install py27-matrix-synapse``
OpenBSD
-------
@@ -424,12 +539,33 @@ https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-
Windows Install
---------------
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
Linux provides a Linux environment on Windows 10 which is capable of using the
Debian, Fedora, or source installation methods. More information about WSL can
be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
for Windows Server.
Running Synapse on Windows is not recommended or supported. However, if you
wish to run Synapse on Windows, the Windows Subsystem For Linux provides a
Linux environment on Windows 10 which is capable of using the Debian, Fedora,
or source installation methods. More information about WSL can be found at
https://docs.microsoft.com/en-us/windows/wsl/install-win10 for Windows 10 and
https://docs.microsoft.com/en-us/windows/wsl/install-on-server for Windows
Server.
Alternative installation methods
================================
There is an offical synapse image available at
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
the docker-compose file available at `contrib/docker <contrib/docker>`_.
Further information on this including configuration options is available in
the README on hub.docker.com.
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
Dockerfile to automate a synapse server in a single Docker image, at
https://hub.docker.com/r/avhost/docker-matrix/tags/
Slavi Pantaleev has created an Ansible playbook, which installs the offical
Docker image of Matrix Synapse along with many other Matrix-related services
(Postgres database, riot-web, coturn, mxisd, SSL support, etc.). For more
details, see https://github.com/spantaleev/matrix-docker-ansible-deploy
Troubleshooting
===============
@@ -473,7 +609,7 @@ failing, e.g.::
pip install twisted
Running out of File Handles
~~~~~~~~~~~~~~~~~~~~~~~~~~~
***************************
If synapse runs out of filehandles, it typically fails badly - live-locking
at 100% CPU, and/or failing to accept new TCP connections (blocking the
@@ -496,16 +632,6 @@ log lines and looking for any 'Processed request' lines which take more than
a few seconds to execute. Please let us know at #matrix-dev:matrix.org if
you see this failure mode so we can help debug it, however.
Upgrading an existing Synapse
=============================
The instructions for upgrading synapse are in `UPGRADE.rst`_.
Please check these instructions as upgrading may require extra steps for some
versions of synapse.
.. _UPGRADE.rst: UPGRADE.rst
.. _federation:
Setting up Federation
@@ -796,8 +922,7 @@ A manual password reset can be done via direct database access as follows.
First calculate the hash of the new password::
$ source ~/.synapse/bin/activate
$ ./scripts/hash_password
$ ~/synapse/env/bin/hash_password
Password:
Confirm password:
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

View File

@@ -18,7 +18,7 @@ instructions that may be required are listed later in this document.
.. code:: bash
pip install --upgrade --process-dependency-links matrix-synapse
pip install --upgrade matrix-synapse
# restart synapse
synctl restart
@@ -48,6 +48,38 @@ returned by the Client-Server API:
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
Upgrading to v0.99.0
====================
In preparation for Synapse v1.0, you must update your TLS certificates from
self-signed ones to verifiable ones signed by a trusted root CA.
If you do not already have a certificate for your domain, the easiest way to get
one is with Synapse's new ACME support, which will use the ACME protocol to
provision a certificate automatically. By default, certificates will be obtained
from the publicly trusted CA Let's Encrypt.
For a sample configuration, please inspect the new ACME section in the example
generated config by running the ``generate-config`` executable. For example::
~/synapse/env3/bin/generate-config
You will need to provide Let's Encrypt (or other ACME provider) access to your
Synapse ACME challenge responder on port 80, at the domain of your homeserver.
This requires you either change the port of the ACME listener provided by
Synapse to a high port and reverse proxy to it, or use a tool like authbind to
allow Synapse to listen on port 80 without root access. (Do not run Synapse with
root permissions!)
You will need to back up or delete your self signed TLS certificate
(``example.com.tls.crt`` and ``example.com.tls.key``), Synapse's ACME
implementation will not overwrite them.
You may wish to use alternate methods such as Certbot to obtain a certificate
from Let's Encrypt, depending on your server configuration. Of course, if you
already have a valid certificate for your homeserver's domain, that can be
placed in Synapse's config directory without the need for ACME.
Upgrading to v0.34.0
====================

1
changelog.d/3902.feature Normal file
View File

@@ -0,0 +1 @@
Include m.room.encryption on invites by default

View File

@@ -6,7 +6,16 @@
set -e
export DH_VIRTUALENV_INSTALL_ROOT=/opt/venvs
SNAKE=/usr/bin/python3
# make sure that the virtualenv links to the specific version of python, by
# dereferencing the python3 symlink.
#
# Otherwise, if somebody tries to install (say) the stretch package on buster,
# they will get a confusing error about "No module named 'synapse'", because
# python won't look in the right directory. At least this way, the error will
# be a *bit* more obvious.
#
SNAKE=`readlink -e /usr/bin/python3`
# try to set the CFLAGS so any compiled C extensions are compiled with the most
# generic as possible x64 instructions, so that compiling it on a new Intel chip
@@ -36,6 +45,10 @@ dh_virtualenv \
--extra-pip-arg="--compile" \
--extras="all"
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
# we copy the tests to a temporary directory so that we can put them on the
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
tmpdir=`mktemp -d`
@@ -44,5 +57,35 @@ trap "rm -r $tmpdir" EXIT
cp -r tests "$tmpdir"
PYTHONPATH="$tmpdir" \
debian/matrix-synapse-py3/opt/venvs/matrix-synapse/bin/python \
-B -m twisted.trial --reporter=text -j2 tests
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
# build the config file
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
--config-dir="/etc/matrix-synapse" \
--data-dir="/var/lib/matrix-synapse" |
perl -pe '
# tweak the paths to the tls certs and signing keys
/^tls_.*_path:/ and s/SERVERNAME/homeserver/;
/^signing_key_path:/ and s/SERVERNAME/homeserver/;
# tweak the pid file location
/^pid_file:/ and s#:.*#: "/var/run/matrix-synapse.pid"#;
# tweak the path to the log config
/^log_config:/ and s/SERVERNAME\.log\.config/log.yaml/;
# tweak the path to the media store
/^media_store_path:/ and s#/media_store#/media#;
# remove the server_name setting, which is set in a separate file
/^server_name:/ and $_ = "#\n# This is set in /etc/matrix-synapse/conf.d/server_name.yaml for Debian installations.\n# $_";
# remove the report_stats setting, which is set in a separate file
/^# report_stats:/ and $_ = "";
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
# add a dependency on the right version of python to substvars.
PYPKG=`basename $SNAKE`
echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars

12
debian/changelog vendored
View File

@@ -1,3 +1,15 @@
matrix-synapse-py3 (0.34.1.1++1) stable; urgency=medium
* Update conflicts specifications to allow smoother transition from matrix-synapse.
-- Synapse Packaging team <packages@matrix.org> Sat, 12 Jan 2019 12:58:35 +0000
matrix-synapse-py3 (0.34.1.1) stable; urgency=high
* New synapse release 0.34.1.1
-- Synapse Packaging team <packages@matrix.org> Thu, 10 Jan 2019 15:04:52 +0000
matrix-synapse-py3 (0.34.1+1) stable; urgency=medium
* Remove 'Breaks: matrix-synapse-ldap3'. (matrix-synapse-py3 includes

8
debian/control vendored
View File

@@ -19,16 +19,16 @@ Homepage: https://github.com/matrix-org/synapse
Package: matrix-synapse-py3
Architecture: amd64
Provides: matrix-synapse
Breaks:
matrix-synapse (<< 0.34.0-0matrix2),
matrix-synapse (>= 0.34.0-1),
Conflicts:
matrix-synapse (<< 0.34.0.1-0matrix2),
matrix-synapse (>= 0.34.0.1-1),
Pre-Depends: dpkg (>= 1.16.1)
Depends:
adduser,
debconf,
python3-distutils|libpython3-stdlib (<< 3.6),
python3,
${misc:Depends},
${synapse:pydepends},
# some of our scripts use perl, but none of them are important,
# so we put perl:Depends in Suggests rather than Depends.
Suggests:

617
debian/homeserver.yaml vendored
View File

@@ -1,617 +0,0 @@
# vim:ft=yaml
# PEM encoded X509 certificate for TLS.
# You can replace the self-signed certificate that synapse
# autogenerates on launch with your own SSL certificate + key pair
# if you like. Any required intermediary certificates can be
# appended after the primary certificate in hierarchical order.
tls_certificate_path: "/etc/matrix-synapse/homeserver.tls.crt"
# PEM encoded private key for TLS
tls_private_key_path: "/etc/matrix-synapse/homeserver.tls.key"
# PEM dh parameters for ephemeral keys
tls_dh_params_path: "/etc/matrix-synapse/homeserver.tls.dh"
# Don't bind to the https port
no_tls: False
# List of allowed TLS fingerprints for this server to publish along
# with the signing keys for this server. Other matrix servers that
# make HTTPS requests to this server will check that the TLS
# certificates returned by this server match one of the fingerprints.
#
# Synapse automatically adds the fingerprint of its own certificate
# to the list. So if federation traffic is handled directly by synapse
# then no modification to the list is required.
#
# If synapse is run behind a load balancer that handles the TLS then it
# will be necessary to add the fingerprints of the certificates used by
# the loadbalancers to this list if they are different to the one
# synapse is using.
#
# Homeservers are permitted to cache the list of TLS fingerprints
# returned in the key responses up to the "valid_until_ts" returned in
# key. It may be necessary to publish the fingerprints of a new
# certificate and wait until the "valid_until_ts" of the previous key
# responses have passed before deploying it.
#
# You can calculate a fingerprint from a given TLS listener via:
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
# or by checking matrix.org/federationtester/api/report?server_name=$host
#
tls_fingerprints: []
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
## Server ##
# When running as a daemon, the file to store the pid in
pid_file: "/var/run/matrix-synapse.pid"
# CPU affinity mask. Setting this restricts the CPUs on which the
# process will be scheduled. It is represented as a bitmask, with the
# lowest order bit corresponding to the first logical CPU and the
# highest order bit corresponding to the last logical CPU. Not all CPUs
# may exist on a given system but a mask may specify more CPUs than are
# present.
#
# For example:
# 0x00000001 is processor #0,
# 0x00000003 is processors #0 and #1,
# 0xFFFFFFFF is all processors (#0 through #31).
#
# Pinning a Python process to a single CPU is desirable, because Python
# is inherently single-threaded due to the GIL, and can suffer a
# 30-40% slowdown due to cache blow-out and thread context switching
# if the scheduler happens to schedule the underlying threads across
# different cores. See
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
#
# cpu_affinity: 0xFFFFFFFF
# The path to the web client which will be served at /_matrix/client/
# if 'webclient' is configured under the 'listeners' configuration.
#
# web_client_location: "/path/to/web/root"
# The public-facing base URL for the client API (not including _matrix/...)
# public_baseurl: https://example.com:8448/
# Set the soft limit on the number of file descriptors synapse can use
# Zero is used to indicate synapse should set the soft limit to the
# hard limit.
soft_file_limit: 0
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
# gc_thresholds: [700, 10, 10]
# Set the limit on the returned events in the timeline in the get
# and sync operations. The default value is -1, means no upper limit.
# filter_timeline_limit: 5000
# Whether room invites to users on this server should be blocked
# (except those sent by local server admins). The default is False.
# block_non_admin_invites: True
# Restrict federation to the following whitelist of domains.
# N.B. we recommend also firewalling your federation listener to limit
# inbound federation traffic as early as possible, rather than relying
# purely on this application-layer restriction. If not specified, the
# default is to whitelist everything.
#
# federation_domain_whitelist:
# - lon.example.com
# - nyc.example.com
# - syd.example.com
# List of ports that Synapse should listen on, their purpose and their
# configuration.
listeners:
# Main HTTPS listener
# For when matrix traffic is sent directly to synapse.
-
# The port to listen for HTTPS requests on.
port: 8448
# Local addresses to listen on.
# On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
# addresses by default. For most other OSes, this will only listen
# on IPv6.
bind_addresses:
- '::'
- '0.0.0.0'
# This is a 'http' listener, allows us to specify 'resources'.
type: http
tls: true
# Use the X-Forwarded-For (XFF) header as the client IP and not the
# actual client IP.
x_forwarded: false
# List of HTTP resources to serve on this listener.
resources:
-
# List of resources to host on this listener.
names:
- client # The client-server APIs, both v1 and v2
- webclient # The bundled webclient.
# Should synapse compress HTTP responses to clients that support it?
# This should be disabled if running synapse behind a load balancer
# that can do automatic compression.
compress: true
- names: [federation] # Federation APIs
compress: false
# optional list of additional endpoints which can be loaded via
# dynamic modules
# additional_resources:
# "/_matrix/my/custom/endpoint":
# module: my_module.CustomRequestHandler
# config: {}
# Unsecure HTTP listener,
# For when matrix traffic passes through loadbalancer that unwraps TLS.
- port: 8008
tls: false
bind_addresses: ['::', '0.0.0.0']
type: http
x_forwarded: false
resources:
- names: [client, webclient]
compress: true
- names: [federation]
compress: false
# Turn on the twisted ssh manhole service on localhost on the given
# port.
# - port: 9000
# bind_addresses: ['::1', '127.0.0.1']
# type: manhole
# Database configuration
database:
# The database engine name
name: "sqlite3"
# Arguments to pass to the engine
args:
# Path to the database
database: "/var/lib/matrix-synapse/homeserver.db"
# Number of events to cache in memory.
event_cache_size: "10K"
# A yaml python logging config file
log_config: "/etc/matrix-synapse/log.yaml"
## Ratelimiting ##
# Number of messages a client can send per second
rc_messages_per_second: 0.2
# Number of message a client can send before being throttled
rc_message_burst_count: 10.0
# The federation window size in milliseconds
federation_rc_window_size: 1000
# The number of federation requests from a single server in a window
# before the server will delay processing the request.
federation_rc_sleep_limit: 10
# The duration in milliseconds to delay processing events from
# remote servers by if they go over the sleep limit.
federation_rc_sleep_delay: 500
# The maximum number of concurrent federation requests allowed
# from a single server
federation_rc_reject_limit: 50
# The number of federation requests to concurrently process from a
# single server
federation_rc_concurrent: 3
# Directory where uploaded images and attachments are stored.
media_store_path: "/var/lib/matrix-synapse/media"
# Media storage providers allow media to be stored in different
# locations.
# media_storage_providers:
# - module: file_system
# # Whether to write new local files.
# store_local: false
# # Whether to write new remote media
# store_remote: false
# # Whether to block upload requests waiting for write to this
# # provider to complete
# store_synchronous: false
# config:
# directory: /mnt/some/other/directory
# Directory where in-progress uploads are stored.
uploads_path: "/var/lib/matrix-synapse/uploads"
# The largest allowed upload size in bytes
max_upload_size: "10M"
# Maximum number of pixels that will be thumbnailed
max_image_pixels: "32M"
# Whether to generate new thumbnails on the fly to precisely match
# the resolution requested by the client. If true then whenever
# a new resolution is requested by the client the server will
# generate a new thumbnail. If false the server will pick a thumbnail
# from a precalculated list.
dynamic_thumbnails: false
# List of thumbnail to precalculate when an image is uploaded.
thumbnail_sizes:
- width: 32
height: 32
method: crop
- width: 96
height: 96
method: crop
- width: 320
height: 240
method: scale
- width: 640
height: 480
method: scale
- width: 800
height: 600
method: scale
# Is the preview URL API enabled? If enabled, you *must* specify
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
# denied from accessing.
url_preview_enabled: False
# List of IP address CIDR ranges that the URL preview spider is denied
# from accessing. There are no defaults: you must explicitly
# specify a list for URL previewing to work. You should specify any
# internal services in your network that you do not want synapse to try
# to connect to, otherwise anyone in any Matrix room could cause your
# synapse to issue arbitrary GET requests to your internal services,
# causing serious security issues.
#
# url_preview_ip_range_blacklist:
# - '127.0.0.0/8'
# - '10.0.0.0/8'
# - '172.16.0.0/12'
# - '192.168.0.0/16'
# - '100.64.0.0/10'
# - '169.254.0.0/16'
#
# List of IP address CIDR ranges that the URL preview spider is allowed
# to access even if they are specified in url_preview_ip_range_blacklist.
# This is useful for specifying exceptions to wide-ranging blacklisted
# target IP ranges - e.g. for enabling URL previews for a specific private
# website only visible in your network.
#
# url_preview_ip_range_whitelist:
# - '192.168.1.1'
# Optional list of URL matches that the URL preview spider is
# denied from accessing. You should use url_preview_ip_range_blacklist
# in preference to this, otherwise someone could define a public DNS
# entry that points to a private IP address and circumvent the blacklist.
# This is more useful if you know there is an entire shape of URL that
# you know that will never want synapse to try to spider.
#
# Each list entry is a dictionary of url component attributes as returned
# by urlparse.urlsplit as applied to the absolute form of the URL. See
# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
# The values of the dictionary are treated as an filename match pattern
# applied to that component of URLs, unless they start with a ^ in which
# case they are treated as a regular expression match. If all the
# specified component matches for a given list item succeed, the URL is
# blacklisted.
#
# url_preview_url_blacklist:
# # blacklist any URL with a username in its URI
# - username: '*'
#
# # blacklist all *.google.com URLs
# - netloc: 'google.com'
# - netloc: '*.google.com'
#
# # blacklist all plain HTTP URLs
# - scheme: 'http'
#
# # blacklist http(s)://www.acme.com/foo
# - netloc: 'www.acme.com'
# path: '/foo'
#
# # blacklist any URL with a literal IPv4 address
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
# The largest allowed URL preview spidering size in bytes
max_spider_size: "10M"
## Captcha ##
# See docs/CAPTCHA_SETUP for full details of configuring this.
# This Home Server's ReCAPTCHA public key.
recaptcha_public_key: "YOUR_PUBLIC_KEY"
# This Home Server's ReCAPTCHA private key.
recaptcha_private_key: "YOUR_PRIVATE_KEY"
# Enables ReCaptcha checks when registering, preventing signup
# unless a captcha is answered. Requires a valid ReCaptcha
# public/private key.
enable_registration_captcha: False
# A secret key used to bypass the captcha test entirely.
#captcha_bypass_secret: "YOUR_SECRET_HERE"
# The API endpoint to use for verifying m.login.recaptcha responses.
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
## Turn ##
# The public URIs of the TURN server to give to clients
turn_uris: []
# The shared secret used to compute passwords for the TURN server
turn_shared_secret: "YOUR_SHARED_SECRET"
# The Username and password if the TURN server needs them and
# does not use a token
#turn_username: "TURNSERVER_USERNAME"
#turn_password: "TURNSERVER_PASSWORD"
# How long generated TURN credentials last
turn_user_lifetime: "1h"
# Whether guests should be allowed to use the TURN server.
# This defaults to True, otherwise VoIP will be unreliable for guests.
# However, it does introduce a slight security risk as it allows users to
# connect to arbitrary endpoints without having first signed up for a
# valid account (e.g. by passing a CAPTCHA).
turn_allow_guests: False
## Registration ##
# Enable registration for new users.
enable_registration: False
# The user must provide all of the below types of 3PID when registering.
#
# registrations_require_3pid:
# - email
# - msisdn
# Mandate that users are only allowed to associate certain formats of
# 3PIDs with accounts on this server.
#
# allowed_local_3pids:
# - medium: email
# pattern: ".*@matrix\.org"
# - medium: email
# pattern: ".*@vector\.im"
# - medium: msisdn
# pattern: "\+44"
# If set, allows registration by anyone who also has the shared
# secret, even if registration is otherwise disabled.
# registration_shared_secret: <PRIVATE STRING>
# Set the number of bcrypt rounds used to generate password hash.
# Larger numbers increase the work factor needed to generate the hash.
# The default number is 12 (which equates to 2^12 rounds).
# N.B. that increasing this will exponentially increase the time required
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
bcrypt_rounds: 12
# Allows users to register as guests without a password/email/etc, and
# participate in rooms hosted on this server which have been made
# accessible to anonymous users.
allow_guest_access: False
# The list of identity servers trusted to verify third party
# identifiers by this server.
trusted_third_party_id_servers:
- matrix.org
- vector.im
- riot.im
# Users who register on this homeserver will automatically be joined
# to these rooms
#auto_join_rooms:
# - "#example:example.com"
## Metrics ###
# Enable collection and rendering of performance metrics
enable_metrics: False
## API Configuration ##
# A list of event types that will be included in the room_invite_state
room_invite_state_types:
- "m.room.join_rules"
- "m.room.canonical_alias"
- "m.room.avatar"
- "m.room.name"
# A list of application service config file to use
app_service_config_files: []
# macaroon_secret_key: <PRIVATE STRING>
# Used to enable access token expiration.
expire_access_token: False
## Signing Keys ##
# Path to the signing key to sign messages with
signing_key_path: "/etc/matrix-synapse/homeserver.signing.key"
# The keys that the server used to sign messages with but won't use
# to sign new messages. E.g. it has lost its private key
old_signing_keys: {}
# "ed25519:auto":
# # Base64 encoded public key
# key: "The public part of your old signing key."
# # Millisecond POSIX timestamp when the key expired.
# expired_ts: 123456789123
# How long key response published by this server is valid for.
# Used to set the valid_until_ts in /key/v2 APIs.
# Determines how quickly servers will query to check which keys
# are still valid.
key_refresh_interval: "1d" # 1 Day.
# The trusted servers to download signing keys from.
perspectives:
servers:
"matrix.org":
verify_keys:
"ed25519:auto":
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
# Enable SAML2 for registration and login. Uses pysaml2
# config_path: Path to the sp_conf.py configuration file
# idp_redirect_url: Identity provider URL which will redirect
# the user back to /login/saml2 with proper info.
# See pysaml2 docs for format of config.
#saml2_config:
# enabled: true
# config_path: "/home/erikj/git/synapse/sp_conf.py"
# idp_redirect_url: "http://test/idp"
# Enable CAS for registration and login.
#cas_config:
# enabled: true
# server_url: "https://cas-server.com"
# service_url: "https://homeserver.domain.com:8448"
# #required_attributes:
# # name: value
# The JWT needs to contain a globally unique "sub" (subject) claim.
#
# jwt_config:
# enabled: true
# secret: "a secret"
# algorithm: "HS256"
# Enable password for login.
password_config:
enabled: true
# Uncomment and change to a secret random string for extra security.
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
#pepper: ""
# Enable sending emails for notification events
# Defining a custom URL for Riot is only needed if email notifications
# should contain links to a self-hosted installation of Riot; when set
# the "app_name" setting is ignored.
#
# If your SMTP server requires authentication, the optional smtp_user &
# smtp_pass variables should be used
#
#email:
# enable_notifs: false
# smtp_host: "localhost"
# smtp_port: 25
# smtp_user: "exampleusername"
# smtp_pass: "examplepassword"
# require_transport_security: False
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
# app_name: Matrix
# template_dir: res/templates
# notif_template_html: notif_mail.html
# notif_template_text: notif_mail.txt
# notif_for_new_users: True
# riot_base_url: "http://localhost/riot"
# password_providers:
# - module: "ldap_auth_provider.LdapAuthProvider"
# config:
# enabled: true
# uri: "ldap://ldap.example.com:389"
# start_tls: true
# base: "ou=users,dc=example,dc=com"
# attributes:
# uid: "cn"
# mail: "email"
# name: "givenName"
# #bind_dn:
# #bind_password:
# #filter: "(objectClass=posixAccount)"
# Clients requesting push notifications can either have the body of
# the message sent in the notification poke along with other details
# like the sender, or just the event ID and room ID (`event_id_only`).
# If clients choose the former, this option controls whether the
# notification request includes the content of the event (other details
# like the sender are still included). For `event_id_only` push, it
# has no effect.
# For modern android devices the notification content will still appear
# because it is loaded by the app. iPhone, however will send a
# notification saying only that a message arrived and who it came from.
#
#push:
# include_content: true
# spam_checker:
# module: "my_custom_project.SuperSpamChecker"
# config:
# example_option: 'things'
# Whether to allow non server admins to create groups on this server
enable_group_creation: false
# If enabled, non server admins can only create groups with local parts
# starting with this prefix
# group_creation_prefix: "unofficial/"
# User Directory configuration
#
# 'search_all_users' defines whether to search all users visible to your HS
# when searching the user directory, rather than limiting to users visible
# in public rooms. Defaults to false. If you set it True, you'll have to run
# UPDATE user_directory_stream_pos SET stream_id = NULL;
# on your database to tell it to rebuild the user_directory search indexes.
#
#user_directory:
# search_all_users: false

1
debian/install vendored
View File

@@ -1,2 +1 @@
debian/homeserver.yaml etc/matrix-synapse
debian/log.yaml etc/matrix-synapse

View File

@@ -1,9 +0,0 @@
2048-bit DH parameters taken from rfc3526
-----BEGIN DH PARAMETERS-----
MIIBCAKCAQEA///////////JD9qiIWjCNMTGYouA3BzRKQJOCIpnzHQCC76mOxOb
IlFKCHmONATd75UZs806QxswKwpt8l8UN0/hNW1tUcJF5IW1dmJefsb0TELppjft
awv/XLb0Brft7jhr+1qJn6WunyQRfEsf5kkoZlHs5Fs9wgB8uKFjvwWY2kg2HFXT
mmkWP6j9JM9fg2VdI9yjrZYcYvNWIIVSu57VKQdwlpZtZww1Tkq8mATxdGwIyhgh
fDKQXkYuNs474553LBgOhgObJ4Oi7Aeij7XFXfBvTFLJ3ivL9pVYFxg5lUl86pVq
5RXSJhiY+gUQFXKOWoqsqmj//////////wIBAg==
-----END DH PARAMETERS-----

View File

@@ -33,9 +33,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \
COPY . /synapse
RUN pip install --prefix="/install" --no-warn-script-location \
lxml \
psycopg2 \
/synapse
/synapse[all]
###
### Stage 1: runtime

View File

@@ -1,46 +0,0 @@
#!/bin/bash
# Build the Debian packages using Docker images.
#
# This script builds the Docker images and then executes them sequentially, each
# one building a Debian package for the targeted operating system. It is
# designed to be a "single command" to produce all the images.
#
# By default, builds for all known distributions, but a list of distributions
# can be passed on the commandline for debugging.
set -ex
cd `dirname $0`
if [ $# -lt 1 ]; then
DISTS=(
debian:stretch
debian:buster
debian:sid
ubuntu:xenial
ubuntu:bionic
ubuntu:cosmic
)
else
DISTS=("$@")
fi
# Make the dir where the debs will live.
#
# Note that we deliberately put this outside the source tree, otherwise we tend
# to get source packages which are full of debs. (We could hack around that
# with more magic in the build_debian.sh script, but that doesn't solve the
# problem for natively-run dpkg-buildpakage).
mkdir -p ../../debs
# Build each OS image;
for i in "${DISTS[@]}"; do
TAG=$(echo ${i} | cut -d ":" -f 2)
docker build --tag dh-venv-builder:${TAG} --build-arg distro=${i} -f Dockerfile-dhvirtualenv .
docker run -it --rm --volume=$(pwd)/../\:/synapse/source:ro --volume=$(pwd)/../../debs:/debs \
-e TARGET_USERID=$(id -u) \
-e TARGET_GROUPID=$(id -g) \
dh-venv-builder:${TAG}
done

View File

@@ -4,7 +4,6 @@
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
tls_fingerprints: []

154
scripts-dev/build_debian_packages Executable file
View File

@@ -0,0 +1,154 @@
#!/usr/bin/env python3
# Build the Debian packages using Docker images.
#
# This script builds the Docker images and then executes them sequentially, each
# one building a Debian package for the targeted operating system. It is
# designed to be a "single command" to produce all the images.
#
# By default, builds for all known distributions, but a list of distributions
# can be passed on the commandline for debugging.
import argparse
import os
import signal
import subprocess
import sys
import threading
from concurrent.futures import ThreadPoolExecutor
DISTS = (
"debian:stretch",
"debian:buster",
"debian:sid",
"ubuntu:xenial",
"ubuntu:bionic",
"ubuntu:cosmic",
)
DESC = '''\
Builds .debs for synapse, using a Docker image for the build environment.
By default, builds for all known distributions, but a list of distributions
can be passed on the commandline for debugging.
'''
class Builder(object):
def __init__(self, redirect_stdout=False):
self.redirect_stdout = redirect_stdout
self.active_containers = set()
self._lock = threading.Lock()
self._failed = False
def run_build(self, dist):
"""Build deb for a single distribution"""
if self._failed:
print("not building %s due to earlier failure" % (dist, ))
raise Exception("failed")
try:
self._inner_build(dist)
except Exception as e:
print("build of %s failed: %s" % (dist, e), file=sys.stderr)
self._failed = True
raise
def _inner_build(self, dist):
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
os.chdir(projdir)
tag = dist.split(":", 1)[1]
# Make the dir where the debs will live.
#
# Note that we deliberately put this outside the source tree, otherwise
# we tend to get source packages which are full of debs. (We could hack
# around that with more magic in the build_debian.sh script, but that
# doesn't solve the problem for natively-run dpkg-buildpakage).
debsdir = os.path.join(projdir, '../debs')
os.makedirs(debsdir, exist_ok=True)
if self.redirect_stdout:
logfile = os.path.join(debsdir, "%s.buildlog" % (tag, ))
print("building %s: directing output to %s" % (dist, logfile))
stdout = open(logfile, "w")
else:
stdout = None
# first build a docker image for the build environment
subprocess.check_call([
"docker", "build",
"--tag", "dh-venv-builder:" + tag,
"--build-arg", "distro=" + dist,
"-f", "docker/Dockerfile-dhvirtualenv",
"docker",
], stdout=stdout, stderr=subprocess.STDOUT)
container_name = "synapse_build_" + tag
with self._lock:
self.active_containers.add(container_name)
# then run the build itself
subprocess.check_call([
"docker", "run",
"--rm",
"--name", container_name,
"--volume=" + projdir + ":/synapse/source:ro",
"--volume=" + debsdir + ":/debs",
"-e", "TARGET_USERID=%i" % (os.getuid(), ),
"-e", "TARGET_GROUPID=%i" % (os.getgid(), ),
"dh-venv-builder:" + tag,
], stdout=stdout, stderr=subprocess.STDOUT)
with self._lock:
self.active_containers.remove(container_name)
if stdout is not None:
stdout.close()
print("Completed build of %s" % (dist, ))
def kill_containers(self):
with self._lock:
active = list(self.active_containers)
for c in active:
print("killing container %s" % (c,))
subprocess.run([
"docker", "kill", c,
], stdout=subprocess.DEVNULL)
with self._lock:
self.active_containers.remove(c)
def run_builds(dists, jobs=1):
builder = Builder(redirect_stdout=(jobs > 1))
def sig(signum, _frame):
print("Caught SIGINT")
builder.kill_containers()
signal.signal(signal.SIGINT, sig)
with ThreadPoolExecutor(max_workers=jobs) as e:
res = e.map(builder.run_build, dists)
# make sure we consume the iterable so that exceptions are raised.
for r in res:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=DESC,
)
parser.add_argument(
'-j', '--jobs', type=int, default=1,
help='specify the number of builds to run in parallel',
)
parser.add_argument(
'dist', nargs='*', default=DISTS,
help='a list of distributions to build for. Default: %(default)s',
)
args = parser.parse_args()
run_builds(dists=args.dist, jobs=args.jobs)

View File

@@ -27,4 +27,4 @@ try:
except ImportError:
pass
__version__ = "0.34.1"
__version__ = "0.99.0rc2"

View File

@@ -46,7 +46,7 @@ def request_registration(
# Get the nonce
r = requests.get(url, verify=False)
if r.status_code is not 200:
if r.status_code != 200:
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
if 400 <= r.status_code < 500:
try:
@@ -84,7 +84,7 @@ def request_registration(
_print("Sending registration request...")
r = requests.post(url, json=data, verify=False)
if r.status_code is not 200:
if r.status_code != 200:
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
if 400 <= r.status_code < 500:
try:

View File

@@ -65,7 +65,7 @@ class Auth(object):
register_cache("cache", "token_cache", self.token_cache)
@defer.inlineCallbacks
def check_from_context(self, event, context, do_sig_check=True):
def check_from_context(self, room_version, event, context, do_sig_check=True):
prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_events_ids = yield self.compute_auth_events(
event, prev_state_ids, for_verification=True,
@@ -74,12 +74,16 @@ class Auth(object):
auth_events = {
(e.type, e.state_key): e for e in itervalues(auth_events)
}
self.check(event, auth_events=auth_events, do_sig_check=do_sig_check)
self.check(
room_version, event,
auth_events=auth_events, do_sig_check=do_sig_check,
)
def check(self, event, auth_events, do_sig_check=True):
def check(self, room_version, event, auth_events, do_sig_check=True):
""" Checks if this event is correctly authed.
Args:
room_version (str): version of the room
event: the event being checked.
auth_events (dict: event-key -> event): the existing room state.
@@ -88,7 +92,9 @@ class Auth(object):
True if the auth checks pass.
"""
with Measure(self.clock, "auth.check"):
event_auth.check(event, auth_events, do_sig_check=do_sig_check)
event_auth.check(
room_version, event, auth_events, do_sig_check=do_sig_check
)
@defer.inlineCallbacks
def check_joined_room(self, room_id, user_id, current_state=None):
@@ -300,20 +306,28 @@ class Auth(object):
Raises:
AuthError if no user by that token exists or the token is invalid.
"""
if rights == "access":
# first look in the database
r = yield self._look_up_user_by_access_token(token)
if r:
defer.returnValue(r)
# otherwise it needs to be a valid macaroon
try:
user_id, guest = self._parse_and_validate_macaroon(token, rights)
except _InvalidMacaroonException:
# doesn't look like a macaroon: treat it as an opaque token which
# must be in the database.
# TODO: it would be nice to get rid of this, but apparently some
# people use access tokens which aren't macaroons
r = yield self._look_up_user_by_access_token(token)
defer.returnValue(r)
try:
user = UserID.from_string(user_id)
if guest:
if rights == "access":
if not guest:
# non-guest access tokens must be in the database
logger.warning("Unrecognised access token - not in store.")
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN,
)
# Guest access tokens are not stored in the database (there can
# only be one access token per guest, anyway).
#
@@ -354,31 +368,15 @@ class Auth(object):
"device_id": None,
}
else:
# This codepath exists for several reasons:
# * so that we can actually return a token ID, which is used
# in some parts of the schema (where we probably ought to
# use device IDs instead)
# * the only way we currently have to invalidate an
# access_token is by removing it from the database, so we
# have to check here that it is still in the db
# * some attributes (notably device_id) aren't stored in the
# macaroon. They probably should be.
# TODO: build the dictionary from the macaroon once the
# above are fixed
ret = yield self._look_up_user_by_access_token(token)
if ret["user"] != user:
logger.error(
"Macaroon user (%s) != DB user (%s)",
user,
ret["user"]
)
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"User mismatch in macaroon",
errcode=Codes.UNKNOWN_TOKEN
)
raise RuntimeError("Unknown rights setting %s", rights)
defer.returnValue(ret)
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
except (
_InvalidMacaroonException,
pymacaroons.exceptions.MacaroonException,
TypeError,
ValueError,
) as e:
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Invalid macaroon passed.",
errcode=Codes.UNKNOWN_TOKEN
@@ -508,11 +506,8 @@ class Auth(object):
def _look_up_user_by_access_token(self, token):
ret = yield self.store.get_user_by_access_token(token)
if not ret:
logger.warn("Unrecognised access token - not in store.")
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN
)
defer.returnValue(None)
# we use ret.get() below because *lots* of unit tests stub out
# get_user_by_access_token in a way where it only returns a couple of
# the fields.
@@ -555,17 +550,6 @@ class Auth(object):
"""
return self.store.is_server_admin(user)
@defer.inlineCallbacks
def add_auth_events(self, builder, context):
prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_ids = yield self.compute_auth_events(builder, prev_state_ids)
auth_events_entries = yield self.store.add_event_hashes(
auth_ids
)
builder.auth_events = auth_events_entries
@defer.inlineCallbacks
def compute_auth_events(self, event, current_state_ids, for_verification=False):
if event.type == EventTypes.Create:
@@ -582,7 +566,7 @@ class Auth(object):
key = (EventTypes.JoinRules, "", )
join_rule_event_id = current_state_ids.get(key)
key = (EventTypes.Member, event.user_id, )
key = (EventTypes.Member, event.sender, )
member_event_id = current_state_ids.get(key)
key = (EventTypes.Create, "", )
@@ -632,7 +616,7 @@ class Auth(object):
defer.returnValue(auth_ids)
def check_redaction(self, event, auth_events):
def check_redaction(self, room_version, event, auth_events):
"""Check whether the event sender is allowed to redact the target event.
Returns:
@@ -645,7 +629,7 @@ class Auth(object):
AuthError if the event sender is definitely not allowed to redact
the target event.
"""
return event_auth.check_redaction(event, auth_events)
return event_auth.check_redaction(room_version, event, auth_events)
@defer.inlineCallbacks
def check_can_change_room_list(self, room_id, user):
@@ -830,7 +814,9 @@ class Auth(object):
elif threepid:
# If the user does not exist yet, but is signing up with a
# reserved threepid then pass auth check
if is_threepid_reserved(self.hs.config, threepid):
if is_threepid_reserved(
self.hs.config.mau_limits_reserved_threepids, threepid
):
return
# Else if there is no room in the MAU bucket, bail
current_mau = yield self.store.get_monthly_active_count()

View File

@@ -68,10 +68,12 @@ class EventTypes(object):
Aliases = "m.room.aliases"
Redaction = "m.room.redaction"
ThirdPartyInvite = "m.room.third_party_invite"
Encryption = "m.room.encryption"
RoomHistoryVisibility = "m.room.history_visibility"
CanonicalAlias = "m.room.canonical_alias"
RoomAvatar = "m.room.avatar"
RoomEncryption = "m.room.encryption"
GuestAccess = "m.room.guest_access"
# These are used for validation
@@ -103,10 +105,15 @@ class ThirdPartyEntityKind(object):
class RoomVersions(object):
V1 = "1"
V2 = "2"
VDH_TEST = "vdh-test-version"
V3 = "3"
STATE_V2_TEST = "state-v2-test"
class RoomDisposition(object):
STABLE = "stable"
UNSTABLE = "unstable"
# the version we will give rooms which are created on this server
DEFAULT_ROOM_VERSION = RoomVersions.V1
@@ -115,10 +122,26 @@ DEFAULT_ROOM_VERSION = RoomVersions.V1
KNOWN_ROOM_VERSIONS = {
RoomVersions.V1,
RoomVersions.V2,
RoomVersions.VDH_TEST,
RoomVersions.V3,
RoomVersions.STATE_V2_TEST,
RoomVersions.V3,
}
class EventFormatVersions(object):
"""This is an internal enum for tracking the version of the event format,
independently from the room version.
"""
V1 = 1
V2 = 2
KNOWN_EVENT_FORMAT_VERSIONS = {
EventFormatVersions.V1,
EventFormatVersions.V2,
}
ServerNoticeMsgType = "m.server_notice"
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
@@ -128,4 +151,4 @@ class UserTypes(object):
'admin' and 'guest' users should also be UserTypes. Normal users are type None
"""
SUPPORT = "support"
ALL_USER_TYPES = (SUPPORT)
ALL_USER_TYPES = (SUPPORT,)

View File

@@ -444,6 +444,20 @@ class Filter(object):
def include_redundant_members(self):
return self.filter_json.get("include_redundant_members", False)
def with_room_ids(self, room_ids):
"""Returns a new filter with the given room IDs appended.
Args:
room_ids (iterable[unicode]): The room_ids to add
Returns:
filter: A new filter including the given rooms and the old
filter's rooms.
"""
newFilter = Filter(self.filter_json)
newFilter.rooms += room_ids
return newFilter
def _matches_wildcard(actual_value, filter_value):
if filter_value.endswith("*"):

View File

@@ -24,7 +24,9 @@ from synapse.config import ConfigError
CLIENT_PREFIX = "/_matrix/client/api/v1"
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
FEDERATION_PREFIX = "/_matrix/federation/v1"
FEDERATION_PREFIX = "/_matrix/federation"
FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
STATIC_PREFIX = "/_matrix/static"
WEB_CLIENT_PREFIX = "/_matrix/client"
CONTENT_REPO_PREFIX = "/_matrix/content"

View File

@@ -12,15 +12,38 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from synapse import python_dependencies # noqa: E402
sys.dont_write_bytecode = True
logger = logging.getLogger(__name__)
try:
python_dependencies.check_requirements()
except python_dependencies.DependencyException as e:
sys.stderr.writelines(e.message)
sys.exit(1)
def check_bind_error(e, address, bind_addresses):
"""
This method checks an exception occurred while binding on 0.0.0.0.
If :: is specified in the bind addresses a warning is shown.
The exception is still raised otherwise.
Binding on both 0.0.0.0 and :: causes an exception on Linux and macOS
because :: binds on both IPv4 and IPv6 (as per RFC 3493).
When binding on 0.0.0.0 after :: this can safely be ignored.
Args:
e (Exception): Exception that was caught.
address (str): Address on which binding was attempted.
bind_addresses (list): Addresses on which the service listens.
"""
if address == '0.0.0.0' and '::' in bind_addresses:
logger.warn('Failed to listen on 0.0.0.0, continuing because listening on [::]')
else:
raise e

View File

@@ -22,6 +22,7 @@ from daemonize import Daemonize
from twisted.internet import error, reactor
from synapse.app import check_bind_error
from synapse.util import PreserveLoggingContext
from synapse.util.rlimit import change_resource_limit
@@ -143,6 +144,9 @@ def listen_metrics(bind_addresses, port):
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
"""
Create a TCP socket for a port and several addresses
Returns:
list (empty)
"""
for address in bind_addresses:
try:
@@ -155,42 +159,33 @@ def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
except error.CannotListenError as e:
check_bind_error(e, address, bind_addresses)
logger.info("Synapse now listening on TCP port %d", port)
return []
def listen_ssl(
bind_addresses, port, factory, context_factory, reactor=reactor, backlog=50
):
"""
Create an SSL socket for a port and several addresses
Create an TLS-over-TCP socket for a port and several addresses
Returns:
list of twisted.internet.tcp.Port listening for TLS connections
"""
r = []
for address in bind_addresses:
try:
reactor.listenSSL(
port,
factory,
context_factory,
backlog,
address
r.append(
reactor.listenSSL(
port,
factory,
context_factory,
backlog,
address
)
)
except error.CannotListenError as e:
check_bind_error(e, address, bind_addresses)
def check_bind_error(e, address, bind_addresses):
"""
This method checks an exception occurred while binding on 0.0.0.0.
If :: is specified in the bind addresses a warning is shown.
The exception is still raised otherwise.
Binding on both 0.0.0.0 and :: causes an exception on Linux and macOS
because :: binds on both IPv4 and IPv6 (as per RFC 3493).
When binding on 0.0.0.0 after :: this can safely be ignored.
Args:
e (Exception): Exception that was caught.
address (str): Address on which binding was attempted.
bind_addresses (list): Addresses on which the service listens.
"""
if address == '0.0.0.0' and '::' in bind_addresses:
logger.warn('Failed to listen on 0.0.0.0, continuing because listening on [::]')
else:
raise e
logger.info("Synapse now listening on port %d (TLS)", port)
return r

View File

@@ -164,23 +164,23 @@ def start(config_options):
database_engine = create_engine(config.database_config)
tls_server_context_factory = context_factory.ServerContextFactory(config)
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
ss = ClientReaderServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
tls_client_options_factory=tls_client_options_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ss.setup()
ss.start_listening(config.worker_listeners)
def start():
ss.config.read_certificate_from_disk()
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
config
)
ss.start_listening(config.worker_listeners)
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View File

@@ -185,23 +185,23 @@ def start(config_options):
database_engine = create_engine(config.database_config)
tls_server_context_factory = context_factory.ServerContextFactory(config)
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
ss = EventCreatorServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
tls_client_options_factory=tls_client_options_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ss.setup()
ss.start_listening(config.worker_listeners)
def start():
ss.config.read_certificate_from_disk()
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
config
)
ss.start_listening(config.worker_listeners)
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View File

@@ -151,23 +151,23 @@ def start(config_options):
database_engine = create_engine(config.database_config)
tls_server_context_factory = context_factory.ServerContextFactory(config)
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
ss = FederationReaderServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
tls_client_options_factory=tls_client_options_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ss.setup()
ss.start_listening(config.worker_listeners)
def start():
ss.config.read_certificate_from_disk()
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
config
)
ss.start_listening(config.worker_listeners)
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View File

@@ -183,24 +183,24 @@ def start(config_options):
# Force the pushers to start since they will be disabled in the main config
config.send_federation = True
tls_server_context_factory = context_factory.ServerContextFactory(config)
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
ps = FederationSenderServer(
ss = FederationSenderServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
tls_client_options_factory=tls_client_options_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ps.setup()
ps.start_listening(config.worker_listeners)
ss.setup()
def start():
ps.get_datastore().start_profiling()
ss.config.read_certificate_from_disk()
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
config
)
ss.start_listening(config.worker_listeners)
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)
_base.start_worker_reactor("synapse-federation-sender", config)

View File

@@ -241,23 +241,23 @@ def start(config_options):
database_engine = create_engine(config.database_config)
tls_server_context_factory = context_factory.ServerContextFactory(config)
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
ss = FrontendProxyServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
tls_client_options_factory=tls_client_options_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ss.setup()
ss.start_listening(config.worker_listeners)
def start():
ss.config.read_certificate_from_disk()
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
config
)
ss.start_listening(config.worker_listeners)
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View File

@@ -13,10 +13,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import logging
import os
import signal
import sys
import traceback
from six import iteritems
@@ -25,6 +28,7 @@ from prometheus_client import Gauge
from twisted.application import service
from twisted.internet import defer, reactor
from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.web.resource import EncodingResourceWrapper, NoResource
from twisted.web.server import GzipEncoderFactory
from twisted.web.static import File
@@ -82,6 +86,7 @@ def gz_wrap(r):
class SynapseHomeServer(HomeServer):
DATASTORE_CLASS = DataStore
_listening_services = []
def _listener_http(self, config, listener_config):
port = listener_config["port"]
@@ -90,7 +95,9 @@ class SynapseHomeServer(HomeServer):
site_tag = listener_config.get("tag", port)
if tls and config.no_tls:
return
raise ConfigError(
"Listener on port %i has TLS enabled, but no_tls is set" % (port,),
)
resources = {}
for res in listener_config["resources"]:
@@ -119,7 +126,7 @@ class SynapseHomeServer(HomeServer):
root_resource = create_resource_tree(resources, root_resource)
if tls:
listen_ssl(
return listen_ssl(
bind_addresses,
port,
SynapseSite(
@@ -133,7 +140,7 @@ class SynapseHomeServer(HomeServer):
)
else:
listen_tcp(
return listen_tcp(
bind_addresses,
port,
SynapseSite(
@@ -144,7 +151,6 @@ class SynapseHomeServer(HomeServer):
self.version_string,
)
)
logger.info("Synapse now listening on port %d", port)
def _configure_named_resource(self, name, compress=False):
"""Build a resource map for a named resource
@@ -240,7 +246,9 @@ class SynapseHomeServer(HomeServer):
for listener in config.listeners:
if listener["type"] == "http":
self._listener_http(config, listener)
self._listening_services.extend(
self._listener_http(config, listener)
)
elif listener["type"] == "manhole":
listen_tcp(
listener["bind_addresses"],
@@ -320,21 +328,28 @@ def setup(config_options):
# generating config files and shouldn't try to continue.
sys.exit(0)
synapse.config.logger.setup_logging(config, use_worker_options=False)
sighup_callbacks = []
synapse.config.logger.setup_logging(
config,
use_worker_options=False,
register_sighup=sighup_callbacks.append
)
def handle_sighup(*args, **kwargs):
for i in sighup_callbacks:
i(*args, **kwargs)
if hasattr(signal, "SIGHUP"):
signal.signal(signal.SIGHUP, handle_sighup)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
tls_server_context_factory = context_factory.ServerContextFactory(config)
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
database_engine = create_engine(config.database_config)
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
hs = SynapseHomeServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
tls_client_options_factory=tls_client_options_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
@@ -361,12 +376,78 @@ def setup(config_options):
logger.info("Database prepared in %s.", config.database_config['name'])
hs.setup()
hs.start_listening()
def refresh_certificate(*args):
"""
Refresh the TLS certificates that Synapse is using by re-reading them
from disk and updating the TLS context factories to use them.
"""
logging.info("Reloading certificate from disk...")
hs.config.read_certificate_from_disk()
hs.tls_server_context_factory = context_factory.ServerContextFactory(config)
hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
config
)
logging.info("Certificate reloaded.")
logging.info("Updating context factories...")
for i in hs._listening_services:
if isinstance(i.factory, TLSMemoryBIOFactory):
i.factory = TLSMemoryBIOFactory(
hs.tls_server_context_factory,
False,
i.factory.wrappedFactory
)
logging.info("Context factories updated.")
sighup_callbacks.append(refresh_certificate)
@defer.inlineCallbacks
def start():
hs.get_pusherpool().start()
hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates()
try:
# Check if the certificate is still valid.
cert_days_remaining = hs.config.is_disk_cert_valid()
if hs.config.acme_enabled:
# If ACME is enabled, we might need to provision a certificate
# before starting.
acme = hs.get_acme_handler()
# Start up the webservices which we will respond to ACME
# challenges with.
yield acme.start_listening()
# We want to reprovision if cert_days_remaining is None (meaning no
# certificate exists), or the days remaining number it returns
# is less than our re-registration threshold.
if (cert_days_remaining is None) or (
not cert_days_remaining > hs.config.acme_reprovision_threshold
):
yield acme.provision_certificate()
# Read the certificate from disk and build the context factories for
# TLS.
hs.config.read_certificate_from_disk()
hs.tls_server_context_factory = context_factory.ServerContextFactory(config)
hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
config
)
# It is now safe to start your Synapse.
hs.start_listening()
hs.get_pusherpool().start()
hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates()
except Exception as e:
# If a DeferredList failed (like in listening on the ACME listener),
# we need to print the subfailure explicitly.
if isinstance(e, defer.FirstError):
e.subFailure.printTraceback(sys.stderr)
sys.exit(1)
# Something else went wrong when starting. Print it and bail out.
traceback.print_exc(file=sys.stderr)
sys.exit(1)
reactor.callWhenRunning(start)

View File

@@ -151,23 +151,23 @@ def start(config_options):
database_engine = create_engine(config.database_config)
tls_server_context_factory = context_factory.ServerContextFactory(config)
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
ss = MediaRepositoryServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
tls_client_options_factory=tls_client_options_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ss.setup()
ss.start_listening(config.worker_listeners)
def start():
ss.config.read_certificate_from_disk()
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
config
)
ss.start_listening(config.worker_listeners)
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View File

@@ -211,24 +211,24 @@ def start(config_options):
# Force the pushers to start since they will be disabled in the main config
config.update_user_directory = True
tls_server_context_factory = context_factory.ServerContextFactory(config)
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
ps = UserDirectoryServer(
ss = UserDirectoryServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
tls_client_options_factory=tls_client_options_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ps.setup()
ps.start_listening(config.worker_listeners)
ss.setup()
def start():
ps.get_datastore().start_profiling()
ss.config.read_certificate_from_disk()
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
config
)
ss.start_listening(config.worker_listeners)
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View File

@@ -367,7 +367,7 @@ class Config(object):
if not keys_directory:
keys_directory = os.path.dirname(config_files[-1])
config_dir_path = os.path.abspath(keys_directory)
self.config_dir_path = os.path.abspath(keys_directory)
specified_config = {}
for config_file in config_files:
@@ -379,7 +379,7 @@ class Config(object):
server_name = specified_config["server_name"]
config_string = self.generate_config(
config_dir_path=config_dir_path,
config_dir_path=self.config_dir_path,
data_dir_path=os.getcwd(),
server_name=server_name,
generate_secrets=False,

View File

@@ -24,6 +24,7 @@ class ApiConfig(Config):
EventTypes.JoinRules,
EventTypes.CanonicalAlias,
EventTypes.RoomAvatar,
EventTypes.RoomEncryption,
EventTypes.Name,
])
@@ -36,5 +37,6 @@ class ApiConfig(Config):
- "{JoinRules}"
- "{CanonicalAlias}"
- "{RoomAvatar}"
- "{RoomEncryption}"
- "{Name}"
""".format(**vars(EventTypes))

View File

@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
from synapse.config import ConfigError
from ._base import Config
DEFAULT_CONFIG = """\
@@ -85,7 +89,15 @@ class ConsentConfig(Config):
if consent_config is None:
return
self.user_consent_version = str(consent_config["version"])
self.user_consent_template_dir = consent_config["template_dir"]
self.user_consent_template_dir = self.abspath(
consent_config["template_dir"]
)
if not path.isdir(self.user_consent_template_dir):
raise ConfigError(
"Could not find template directory '%s'" % (
self.user_consent_template_dir,
),
)
self.user_consent_server_notice_content = consent_config.get(
"server_notice_content",
)

View File

@@ -57,8 +57,8 @@ class KeyConfig(Config):
# Unfortunately, there are people out there that don't have this
# set. Lets just be "nice" and derive one from their secret key.
logger.warn("Config is missing missing macaroon_secret_key")
seed = self.signing_key[0].seed
self.macaroon_secret_key = hashlib.sha256(seed)
seed = bytes(self.signing_key[0])
self.macaroon_secret_key = hashlib.sha256(seed).digest()
self.expire_access_token = config.get("expire_access_token", False)
@@ -83,9 +83,6 @@ class KeyConfig(Config):
# a secret which is used to sign access tokens. If none is specified,
# the registration_shared_secret is used, if one is given; otherwise,
# a secret key is derived from the signing key.
#
# Note that changing this will invalidate any active access tokens, so
# all clients will have to log back in.
%(macaroon_secret_key)s
# Used to enable access token expiration.

View File

@@ -127,7 +127,7 @@ class LoggingConfig(Config):
)
def setup_logging(config, use_worker_options=False):
def setup_logging(config, use_worker_options=False, register_sighup=None):
""" Set up python logging
Args:
@@ -136,7 +136,16 @@ def setup_logging(config, use_worker_options=False):
use_worker_options (bool): True to use 'worker_log_config' and
'worker_log_file' options instead of 'log_config' and 'log_file'.
register_sighup (func | None): Function to call to register a
sighup handler.
"""
if not register_sighup:
if getattr(signal, "SIGHUP"):
register_sighup = lambda x: signal.signal(signal.SIGHUP, x)
else:
register_sighup = lambda x: None
log_config = (config.worker_log_config if use_worker_options
else config.log_config)
log_file = (config.worker_log_file if use_worker_options
@@ -198,13 +207,7 @@ def setup_logging(config, use_worker_options=False):
load_log_config()
# TODO(paul): obviously this is a terrible mechanism for
# stealing SIGHUP, because it means no other part of synapse
# can use it instead. If we want to catch SIGHUP anywhere
# else as well, I'd suggest we find a nicer way to broadcast
# it around.
if getattr(signal, "SIGHUP"):
signal.signal(signal.SIGHUP, sighup)
register_sighup(sighup)
# make sure that the first thing we log is a thing we can grep backwards
# for

View File

@@ -50,6 +50,10 @@ class RegistrationConfig(Config):
raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,))
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
self.disable_msisdn_registration = (
config.get("disable_msisdn_registration", False)
)
def default_config(self, generate_secrets=False, **kwargs):
if generate_secrets:
registration_shared_secret = 'registration_shared_secret: "%s"' % (
@@ -70,16 +74,21 @@ class RegistrationConfig(Config):
# - email
# - msisdn
# Explicitly disable asking for MSISDNs from the registration
# flow (overrides registrations_require_3pid if MSISDNs are set as required)
#
# disable_msisdn_registration = True
# Mandate that users are only allowed to associate certain formats of
# 3PIDs with accounts on this server.
#
# allowed_local_3pids:
# - medium: email
# pattern: ".*@matrix\\.org"
# pattern: '.*@matrix\\.org'
# - medium: email
# pattern: ".*@vector\\.im"
# pattern: '.*@vector\\.im'
# - medium: msisdn
# pattern: "\\+44"
# pattern: '\\+44'
# If set, allows registration by anyone who also has the shared
# secret, even if registration is otherwise disabled.

View File

@@ -256,8 +256,12 @@ class ServerConfig(Config):
#
# web_client_location: "/path/to/web/root"
# The public-facing base URL for the client API (not including _matrix/...)
# public_baseurl: https://example.com:8448/
# The public-facing base URL that clients use to access this HS
# (not including _matrix/...). This is the same URL a user would
# enter into the 'custom HS URL' field on their client. If you
# use synapse with a reverse proxy, this should be the URL to reach
# synapse via the proxy.
# public_baseurl: https://example.com/
# Set the soft limit on the number of file descriptors synapse can use
# Zero is used to indicate synapse should set the soft limit to the
@@ -420,19 +424,18 @@ class ServerConfig(Config):
" service on the given port.")
def is_threepid_reserved(config, threepid):
def is_threepid_reserved(reserved_threepids, threepid):
"""Check the threepid against the reserved threepid config
Args:
config(ServerConfig) - to access server config attributes
reserved_threepids([dict]) - list of reserved threepids
threepid(dict) - The threepid to test for
Returns:
boolean Is the threepid undertest reserved_user
"""
for tp in config.mau_limits_reserved_threepids:
if (threepid['medium'] == tp['medium']
and threepid['address'] == tp['address']):
for tp in reserved_threepids:
if (threepid['medium'] == tp['medium'] and threepid['address'] == tp['address']):
return True
return False

View File

@@ -13,52 +13,42 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import warnings
from datetime import datetime
from hashlib import sha256
from unpaddedbase64 import encode_base64
from OpenSSL import crypto
from ._base import Config
from synapse.config._base import Config
GENERATE_DH_PARAMS = False
logger = logging.getLogger()
class TlsConfig(Config):
def read_config(self, config):
self.tls_certificate = self.read_tls_certificate(
config.get("tls_certificate_path")
)
self.tls_certificate_file = config.get("tls_certificate_path")
acme_config = config.get("acme", None)
if acme_config is None:
acme_config = {}
self.acme_enabled = acme_config.get("enabled", False)
self.acme_url = acme_config.get(
"url", "https://acme-v01.api.letsencrypt.org/directory"
)
self.acme_port = acme_config.get("port", 80)
self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0'])
self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
self._original_tls_fingerprints = config["tls_fingerprints"]
self.tls_fingerprints = list(self._original_tls_fingerprints)
self.no_tls = config.get("no_tls", False)
if self.no_tls:
self.tls_private_key = None
else:
self.tls_private_key = self.read_tls_private_key(
config.get("tls_private_key_path")
)
self.tls_dh_params_path = self.check_file(
config.get("tls_dh_params_path"), "tls_dh_params"
)
self.tls_fingerprints = config["tls_fingerprints"]
# Check that our own certificate is included in the list of fingerprints
# and include it if it is not.
x509_certificate_bytes = crypto.dump_certificate(
crypto.FILETYPE_ASN1,
self.tls_certificate
)
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
if sha256_fingerprint not in sha256_fingerprints:
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
# This config option applies to non-federation HTTP clients
# (e.g. for talking to recaptcha, identity servers, and such)
# It should never be used in production, and is intended for
@@ -67,29 +57,152 @@ class TlsConfig(Config):
"use_insecure_ssl_client_just_for_testing_do_not_use"
)
self.tls_certificate = None
self.tls_private_key = None
def is_disk_cert_valid(self):
"""
Is the certificate we have on disk valid, and if so, for how long?
Returns:
int: Days remaining of certificate validity.
None: No certificate exists.
"""
if not os.path.exists(self.tls_certificate_file):
return None
try:
with open(self.tls_certificate_file, 'rb') as f:
cert_pem = f.read()
except Exception:
logger.exception("Failed to read existing certificate off disk!")
raise
try:
tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
except Exception:
logger.exception("Failed to parse existing certificate off disk!")
raise
# YYYYMMDDhhmmssZ -- in UTC
expires_on = datetime.strptime(
tls_certificate.get_notAfter().decode('ascii'), "%Y%m%d%H%M%SZ"
)
now = datetime.utcnow()
days_remaining = (expires_on - now).days
return days_remaining
def read_certificate_from_disk(self):
"""
Read the certificates from disk.
"""
self.tls_certificate = self.read_tls_certificate(self.tls_certificate_file)
# Check if it is self-signed, and issue a warning if so.
if self.tls_certificate.get_issuer() == self.tls_certificate.get_subject():
warnings.warn(
(
"Self-signed TLS certificates will not be accepted by Synapse 1.0. "
"Please either provide a valid certificate, or use Synapse's ACME "
"support to provision one."
)
)
if not self.no_tls:
self.tls_private_key = self.read_tls_private_key(self.tls_private_key_file)
self.tls_fingerprints = list(self._original_tls_fingerprints)
# Check that our own certificate is included in the list of fingerprints
# and include it if it is not.
x509_certificate_bytes = crypto.dump_certificate(
crypto.FILETYPE_ASN1, self.tls_certificate
)
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
if sha256_fingerprint not in sha256_fingerprints:
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
def default_config(self, config_dir_path, server_name, **kwargs):
base_key_name = os.path.join(config_dir_path, server_name)
tls_certificate_path = base_key_name + ".tls.crt"
tls_private_key_path = base_key_name + ".tls.key"
tls_dh_params_path = base_key_name + ".tls.dh"
return """\
# PEM encoded X509 certificate for TLS.
# You can replace the self-signed certificate that synapse
# autogenerates on launch with your own SSL certificate + key pair
# if you like. Any required intermediary certificates can be
# appended after the primary certificate in hierarchical order.
# this is to avoid the max line length. Sorrynotsorry
proxypassline = (
'ProxyPass /.well-known/acme-challenge '
'http://localhost:8009/.well-known/acme-challenge'
)
return (
"""\
# PEM-encoded X509 certificate for TLS.
# This certificate, as of Synapse 1.0, will need to be a valid and verifiable
# certificate, signed by a recognised Certificate Authority.
#
# See 'ACME support' below to enable auto-provisioning this certificate via
# Let's Encrypt.
#
tls_certificate_path: "%(tls_certificate_path)s"
# PEM encoded private key for TLS
# PEM-encoded private key for TLS
tls_private_key_path: "%(tls_private_key_path)s"
# PEM dh parameters for ephemeral keys
tls_dh_params_path: "%(tls_dh_params_path)s"
# ACME support: This will configure Synapse to request a valid TLS certificate
# for your configured `server_name` via Let's Encrypt.
#
# Note that provisioning a certificate in this way requires port 80 to be
# routed to Synapse so that it can complete the http-01 ACME challenge.
# By default, if you enable ACME support, Synapse will attempt to listen on
# port 80 for incoming http-01 challenges - however, this will likely fail
# with 'Permission denied' or a similar error.
#
# There are a couple of potential solutions to this:
#
# * If you already have an Apache, Nginx, or similar listening on port 80,
# you can configure Synapse to use an alternate port, and have your web
# server forward the requests. For example, assuming you set 'port: 8009'
# below, on Apache, you would write:
#
# %(proxypassline)s
#
# * Alternatively, you can use something like `authbind` to give Synapse
# permission to listen on port 80.
#
acme:
# ACME support is disabled by default. Uncomment the following line
# to enable it.
#
# enabled: true
# Don't bind to the https port
no_tls: False
# Endpoint to use to request certificates. If you only want to test,
# use Let's Encrypt's staging url:
# https://acme-staging.api.letsencrypt.org/directory
#
# url: https://acme-v01.api.letsencrypt.org/directory
# Port number to listen on for the HTTP-01 challenge. Change this if
# you are forwarding connections through Apache/Nginx/etc.
#
# port: 80
# Local addresses to listen on for incoming connections.
# Again, you may want to change this if you are forwarding connections
# through Apache/Nginx/etc.
#
# bind_addresses: ['::', '0.0.0.0']
# How many days remaining on a certificate before it is renewed.
#
# reprovision_threshold: 30
# If your server runs behind a reverse-proxy which terminates TLS connections
# (for both client and federation connections), it may be useful to disable
# All TLS support for incoming connections. Setting no_tls to False will
# do so (and avoid the need to give synapse a TLS private key).
#
# no_tls: False
# List of allowed TLS fingerprints for this server to publish along
# with the signing keys for this server. Other matrix servers that
@@ -118,7 +231,10 @@ class TlsConfig(Config):
#
tls_fingerprints: []
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
""" % locals()
"""
% locals()
)
def read_tls_certificate(self, cert_path):
cert_pem = self.read_file(cert_path, "tls_certificate")
@@ -127,69 +243,3 @@ class TlsConfig(Config):
def read_tls_private_key(self, private_key_path):
private_key_pem = self.read_file(private_key_path, "tls_private_key")
return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
def generate_files(self, config):
tls_certificate_path = config["tls_certificate_path"]
tls_private_key_path = config["tls_private_key_path"]
tls_dh_params_path = config["tls_dh_params_path"]
if not self.path_exists(tls_private_key_path):
with open(tls_private_key_path, "wb") as private_key_file:
tls_private_key = crypto.PKey()
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
private_key_pem = crypto.dump_privatekey(
crypto.FILETYPE_PEM, tls_private_key
)
private_key_file.write(private_key_pem)
else:
with open(tls_private_key_path) as private_key_file:
private_key_pem = private_key_file.read()
tls_private_key = crypto.load_privatekey(
crypto.FILETYPE_PEM, private_key_pem
)
if not self.path_exists(tls_certificate_path):
with open(tls_certificate_path, "wb") as certificate_file:
cert = crypto.X509()
subject = cert.get_subject()
subject.CN = config["server_name"]
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(tls_private_key)
cert.sign(tls_private_key, 'sha256')
cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
certificate_file.write(cert_pem)
if not self.path_exists(tls_dh_params_path):
if GENERATE_DH_PARAMS:
subprocess.check_call([
"openssl", "dhparam",
"-outform", "PEM",
"-out", tls_dh_params_path,
"2048"
])
else:
with open(tls_dh_params_path, "w") as dh_params_file:
dh_params_file.write(
"2048-bit DH parameters taken from rfc3526\n"
"-----BEGIN DH PARAMETERS-----\n"
"MIIBCAKCAQEA///////////JD9qiIWjC"
"NMTGYouA3BzRKQJOCIpnzHQCC76mOxOb\n"
"IlFKCHmONATd75UZs806QxswKwpt8l8U"
"N0/hNW1tUcJF5IW1dmJefsb0TELppjft\n"
"awv/XLb0Brft7jhr+1qJn6WunyQRfEsf"
"5kkoZlHs5Fs9wgB8uKFjvwWY2kg2HFXT\n"
"mmkWP6j9JM9fg2VdI9yjrZYcYvNWIIVS"
"u57VKQdwlpZtZww1Tkq8mATxdGwIyhgh\n"
"fDKQXkYuNs474553LBgOhgObJ4Oi7Aei"
"j7XFXfBvTFLJ3ivL9pVYFxg5lUl86pVq\n"
"5RXSJhiY+gUQFXKOWoqsqmj/////////"
"/wIBAg==\n"
"-----END DH PARAMETERS-----\n"
)

View File

@@ -17,6 +17,7 @@ from zope.interface import implementer
from OpenSSL import SSL, crypto
from twisted.internet._sslverify import _defaultCurveName
from twisted.internet.abstract import isIPAddress, isIPv6Address
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from twisted.internet.ssl import CertificateOptions, ContextFactory
from twisted.python.failure import Failure
@@ -46,8 +47,10 @@ class ServerContextFactory(ContextFactory):
if not config.no_tls:
context.use_privatekey(config.tls_private_key)
context.load_tmp_dh(config.tls_dh_params_path)
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
context.set_cipher_list(
"ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1"
)
def getContext(self):
return self._context
@@ -96,8 +99,14 @@ class ClientTLSOptions(object):
def __init__(self, hostname, ctx):
self._ctx = ctx
self._hostname = hostname
self._hostnameBytes = _idnaBytes(hostname)
if isIPAddress(hostname) or isIPv6Address(hostname):
self._hostnameBytes = hostname.encode('ascii')
self._sendSNI = False
else:
self._hostnameBytes = _idnaBytes(hostname)
self._sendSNI = True
ctx.set_info_callback(
_tolerateErrors(self._identityVerifyingInfoCallback)
)
@@ -109,7 +118,9 @@ class ClientTLSOptions(object):
return connection
def _identityVerifyingInfoCallback(self, connection, where, ret):
if where & SSL.SSL_CB_HANDSHAKE_START:
# Literal IPv4 and IPv6 addresses are not permitted
# as host names according to the RFCs
if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI:
connection.set_tlsext_host_name(self._hostnameBytes)

View File

@@ -23,14 +23,14 @@ from signedjson.sign import sign_json
from unpaddedbase64 import decode_base64, encode_base64
from synapse.api.errors import Codes, SynapseError
from synapse.events.utils import prune_event
from synapse.events.utils import prune_event, prune_event_dict
logger = logging.getLogger(__name__)
def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
"""Check whether the hash for this PDU matches the contents"""
name, expected_hash = compute_content_hash(event, hash_algorithm)
name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm)
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
# some malformed events lack a 'hashes'. Protect against it being missing
@@ -59,35 +59,70 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
return message_hash_bytes == expected_hash
def compute_content_hash(event, hash_algorithm):
event_json = event.get_pdu_json()
event_json.pop("age_ts", None)
event_json.pop("unsigned", None)
event_json.pop("signatures", None)
event_json.pop("hashes", None)
event_json.pop("outlier", None)
event_json.pop("destinations", None)
def compute_content_hash(event_dict, hash_algorithm):
"""Compute the content hash of an event, which is the hash of the
unredacted event.
event_json_bytes = encode_canonical_json(event_json)
Args:
event_dict (dict): The unredacted event as a dict
hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
to hash the event
Returns:
tuple[str, bytes]: A tuple of the name of hash and the hash as raw
bytes.
"""
event_dict = dict(event_dict)
event_dict.pop("age_ts", None)
event_dict.pop("unsigned", None)
event_dict.pop("signatures", None)
event_dict.pop("hashes", None)
event_dict.pop("outlier", None)
event_dict.pop("destinations", None)
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
return (hashed.name, hashed.digest())
def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
"""Computes the event reference hash. This is the hash of the redacted
event.
Args:
event (FrozenEvent)
hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
to hash the event
Returns:
tuple[str, bytes]: A tuple of the name of hash and the hash as raw
bytes.
"""
tmp_event = prune_event(event)
event_json = tmp_event.get_pdu_json()
event_json.pop("signatures", None)
event_json.pop("age_ts", None)
event_json.pop("unsigned", None)
event_json_bytes = encode_canonical_json(event_json)
event_dict = tmp_event.get_pdu_json()
event_dict.pop("signatures", None)
event_dict.pop("age_ts", None)
event_dict.pop("unsigned", None)
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
return (hashed.name, hashed.digest())
def compute_event_signature(event, signature_name, signing_key):
tmp_event = prune_event(event)
redact_json = tmp_event.get_pdu_json()
def compute_event_signature(event_dict, signature_name, signing_key):
"""Compute the signature of the event for the given name and key.
Args:
event_dict (dict): The event as a dict
signature_name (str): The name of the entity signing the event
(typically the server's hostname).
signing_key (syutil.crypto.SigningKey): The key to sign with
Returns:
dict[str, dict[str, str]]: Returns a dictionary in the same format of
an event's signatures field.
"""
redact_json = prune_event_dict(event_dict)
redact_json.pop("age_ts", None)
redact_json.pop("unsigned", None)
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
@@ -96,25 +131,25 @@ def compute_event_signature(event, signature_name, signing_key):
return redact_json["signatures"]
def add_hashes_and_signatures(event, signature_name, signing_key,
def add_hashes_and_signatures(event_dict, signature_name, signing_key,
hash_algorithm=hashlib.sha256):
# if hasattr(event, "old_state_events"):
# state_json_bytes = encode_canonical_json(
# [e.event_id for e in event.old_state_events.values()]
# )
# hashed = hash_algorithm(state_json_bytes)
# event.state_hash = {
# hashed.name: encode_base64(hashed.digest())
# }
"""Add content hash and sign the event
name, digest = compute_content_hash(event, hash_algorithm=hash_algorithm)
Args:
event_dict (dict): The event to add hashes to and sign
signature_name (str): The name of the entity signing the event
(typically the server's hostname).
signing_key (syutil.crypto.SigningKey): The key to sign with
hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
to hash the event
"""
if not hasattr(event, "hashes"):
event.hashes = {}
event.hashes[name] = encode_base64(digest)
name, digest = compute_content_hash(event_dict, hash_algorithm=hash_algorithm)
event.signatures = compute_event_signature(
event,
event_dict.setdefault("hashes", {})[name] = encode_base64(digest)
event_dict["signatures"] = compute_event_signature(
event_dict,
signature_name=signature_name,
signing_key=signing_key,
)

View File

@@ -1,149 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from six.moves import urllib
from canonicaljson import json
from twisted.internet import defer, reactor
from twisted.internet.error import ConnectError
from twisted.internet.protocol import Factory
from twisted.names.error import DomainError
from twisted.web.http import HTTPClient
from synapse.http.endpoint import matrix_federation_endpoint
from synapse.util import logcontext
logger = logging.getLogger(__name__)
KEY_API_V2 = "/_matrix/key/v2/server/%s"
@defer.inlineCallbacks
def fetch_server_key(server_name, tls_client_options_factory, key_id):
"""Fetch the keys for a remote server."""
factory = SynapseKeyClientFactory()
factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), )
factory.host = server_name
endpoint = matrix_federation_endpoint(
reactor, server_name, tls_client_options_factory, timeout=30
)
for i in range(5):
try:
with logcontext.PreserveLoggingContext():
protocol = yield endpoint.connect(factory)
server_response, server_certificate = yield protocol.remote_key
defer.returnValue((server_response, server_certificate))
except SynapseKeyClientError as e:
logger.warn("Error getting key for %r: %s", server_name, e)
if e.status.startswith(b"4"):
# Don't retry for 4xx responses.
raise IOError("Cannot get key for %r" % server_name)
except (ConnectError, DomainError) as e:
logger.warn("Error getting key for %r: %s", server_name, e)
except Exception:
logger.exception("Error getting key for %r", server_name)
raise IOError("Cannot get key for %r" % server_name)
class SynapseKeyClientError(Exception):
"""The key wasn't retrieved from the remote server."""
status = None
pass
class SynapseKeyClientProtocol(HTTPClient):
"""Low level HTTPS client which retrieves an application/json response from
the server and extracts the X.509 certificate for the remote peer from the
SSL connection."""
timeout = 30
def __init__(self):
self.remote_key = defer.Deferred()
self.host = None
self._peer = None
def connectionMade(self):
self._peer = self.transport.getPeer()
logger.debug("Connected to %s", self._peer)
if not isinstance(self.path, bytes):
self.path = self.path.encode('ascii')
if not isinstance(self.host, bytes):
self.host = self.host.encode('ascii')
self.sendCommand(b"GET", self.path)
if self.host:
self.sendHeader(b"Host", self.host)
self.endHeaders()
self.timer = reactor.callLater(
self.timeout,
self.on_timeout
)
def errback(self, error):
if not self.remote_key.called:
self.remote_key.errback(error)
def callback(self, result):
if not self.remote_key.called:
self.remote_key.callback(result)
def handleStatus(self, version, status, message):
if status != b"200":
# logger.info("Non-200 response from %s: %s %s",
# self.transport.getHost(), status, message)
error = SynapseKeyClientError(
"Non-200 response %r from %r" % (status, self.host)
)
error.status = status
self.errback(error)
self.transport.abortConnection()
def handleResponse(self, response_body_bytes):
try:
json_response = json.loads(response_body_bytes)
except ValueError:
# logger.info("Invalid JSON response from %s",
# self.transport.getHost())
self.transport.abortConnection()
return
certificate = self.transport.getPeerCertificate()
self.callback((json_response, certificate))
self.transport.abortConnection()
self.timer.cancel()
def on_timeout(self):
logger.debug(
"Timeout waiting for response from %s: %s",
self.host, self._peer,
)
self.errback(IOError("Timeout waiting for response"))
self.transport.abortConnection()
class SynapseKeyClientFactory(Factory):
def protocol(self):
protocol = SynapseKeyClientProtocol()
protocol.path = self.path
protocol.host = self.host
return protocol

View File

@@ -14,10 +14,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
from collections import namedtuple
from six.moves import urllib
from signedjson.key import (
decode_verify_key_bytes,
encode_verify_key_base64,
@@ -30,13 +31,11 @@ from signedjson.sign import (
signature_ids,
verify_signed_json,
)
from unpaddedbase64 import decode_base64, encode_base64
from unpaddedbase64 import decode_base64
from OpenSSL import crypto
from twisted.internet import defer
from synapse.api.errors import Codes, SynapseError
from synapse.crypto.keyclient import fetch_server_key
from synapse.util import logcontext, unwrapFirstError
from synapse.util.logcontext import (
LoggingContext,
@@ -503,31 +502,16 @@ class Keyring(object):
if requested_key_id in keys:
continue
(response, tls_certificate) = yield fetch_server_key(
server_name, self.hs.tls_client_options_factory, requested_key_id
response = yield self.client.get_json(
destination=server_name,
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
ignore_backoff=True,
)
if (u"signatures" not in response
or server_name not in response[u"signatures"]):
raise KeyLookupError("Key response not signed by remote server")
if "tls_fingerprints" not in response:
raise KeyLookupError("Key response missing TLS fingerprints")
certificate_bytes = crypto.dump_certificate(
crypto.FILETYPE_ASN1, tls_certificate
)
sha256_fingerprint = hashlib.sha256(certificate_bytes).digest()
sha256_fingerprint_b64 = encode_base64(sha256_fingerprint)
response_sha256_fingerprints = set()
for fingerprint in response[u"tls_fingerprints"]:
if u"sha256" in fingerprint:
response_sha256_fingerprints.add(fingerprint[u"sha256"])
if sha256_fingerprint_b64 not in response_sha256_fingerprints:
raise KeyLookupError("TLS certificate not allowed by fingerprints")
response_keys = yield self.process_v2_response(
from_server=server_name,
requested_ids=[requested_key_id],

View File

@@ -20,17 +20,25 @@ from signedjson.key import decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from unpaddedbase64 import decode_base64
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, JoinRules, Membership
from synapse.api.constants import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
EventTypes,
JoinRules,
Membership,
RoomVersions,
)
from synapse.api.errors import AuthError, EventSizeError, SynapseError
from synapse.types import UserID, get_domain_from_id
logger = logging.getLogger(__name__)
def check(event, auth_events, do_sig_check=True, do_size_check=True):
def check(room_version, event, auth_events, do_sig_check=True, do_size_check=True):
""" Checks if this event is correctly authed.
Args:
room_version (str): the version of the room
event: the event being checked.
auth_events (dict: event-key -> event): the existing room state.
@@ -48,7 +56,6 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
if do_sig_check:
sender_domain = get_domain_from_id(event.sender)
event_id_domain = get_domain_from_id(event.event_id)
is_invite_via_3pid = (
event.type == EventTypes.Member
@@ -65,9 +72,13 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
if not is_invite_via_3pid:
raise AuthError(403, "Event not signed by sender's server")
# Check the event_id's domain has signed the event
if not event.signatures.get(event_id_domain):
raise AuthError(403, "Event not signed by sending server")
if event.format_version in (EventFormatVersions.V1,):
# Only older room versions have event IDs to check.
event_id_domain = get_domain_from_id(event.event_id)
# Check the origin domain has signed the event
if not event.signatures.get(event_id_domain):
raise AuthError(403, "Event not signed by sending server")
if auth_events is None:
# Oh, we don't know what the state of the room was, so we
@@ -167,7 +178,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
_check_power_levels(event, auth_events)
if event.type == EventTypes.Redaction:
check_redaction(event, auth_events)
check_redaction(room_version, event, auth_events)
logger.debug("Allowing! %s", event)
@@ -421,7 +432,7 @@ def _can_send_event(event, auth_events):
return True
def check_redaction(event, auth_events):
def check_redaction(room_version, event, auth_events):
"""Check whether the event sender is allowed to redact the target event.
Returns:
@@ -441,10 +452,16 @@ def check_redaction(event, auth_events):
if user_level >= redact_level:
return False
redacter_domain = get_domain_from_id(event.event_id)
redactee_domain = get_domain_from_id(event.redacts)
if redacter_domain == redactee_domain:
if room_version in (RoomVersions.V1, RoomVersions.V2,):
redacter_domain = get_domain_from_id(event.event_id)
redactee_domain = get_domain_from_id(event.redacts)
if redacter_domain == redactee_domain:
return True
elif room_version == RoomVersions.V3:
event.internal_metadata.recheck_redaction = True
return True
else:
raise RuntimeError("Unrecognized room version %r" % (room_version,))
raise AuthError(
403,

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,6 +19,9 @@ from distutils.util import strtobool
import six
from unpaddedbase64 import encode_base64
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventFormatVersions, RoomVersions
from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze
@@ -41,8 +45,13 @@ class _EventInternalMetadata(object):
def is_outlier(self):
return getattr(self, "outlier", False)
def is_invite_from_remote(self):
return getattr(self, "invite_from_remote", False)
def is_out_of_band_membership(self):
"""Whether this is an out of band membership, like an invite or an invite
rejection. This is needed as those events are marked as outliers, but
they still need to be processed as if they're new events (e.g. updating
invite state in the database, relaying to clients, etc).
"""
return getattr(self, "out_of_band_membership", False)
def get_send_on_behalf_of(self):
"""Whether this server should send the event on behalf of another server.
@@ -53,6 +62,21 @@ class _EventInternalMetadata(object):
"""
return getattr(self, "send_on_behalf_of", None)
def need_to_check_redaction(self):
"""Whether the redaction event needs to be rechecked when fetching
from the database.
Starting in room v3 redaction events are accepted up front, and later
checked to see if the redacter and redactee's domains match.
If the sender of the redaction event is allowed to redact any event
due to auth rules, then this will always return false.
Returns:
bool
"""
return getattr(self, "recheck_redaction", False)
def _event_dict_property(key):
# We want to be able to use hasattr with the event dict properties.
@@ -179,6 +203,8 @@ class EventBase(object):
class FrozenEvent(EventBase):
format_version = EventFormatVersions.V1 # All events of this type are V1
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
event_dict = dict(event_dict)
@@ -213,16 +239,6 @@ class FrozenEvent(EventBase):
rejected_reason=rejected_reason,
)
@staticmethod
def from_event(event):
e = FrozenEvent(
event.get_pdu_json()
)
e.internal_metadata = event.internal_metadata
return e
def __str__(self):
return self.__repr__()
@@ -232,3 +248,127 @@ class FrozenEvent(EventBase):
self.get("type", None),
self.get("state_key", None),
)
class FrozenEventV2(EventBase):
format_version = EventFormatVersions.V2 # All events of this type are V2
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
event_dict = dict(event_dict)
# Signatures is a dict of dicts, and this is faster than doing a
# copy.deepcopy
signatures = {
name: {sig_id: sig for sig_id, sig in sigs.items()}
for name, sigs in event_dict.pop("signatures", {}).items()
}
assert "event_id" not in event_dict
unsigned = dict(event_dict.pop("unsigned", {}))
# We intern these strings because they turn up a lot (especially when
# caching).
event_dict = intern_dict(event_dict)
if USE_FROZEN_DICTS:
frozen_dict = freeze(event_dict)
else:
frozen_dict = event_dict
self._event_id = None
self.type = event_dict["type"]
if "state_key" in event_dict:
self.state_key = event_dict["state_key"]
super(FrozenEventV2, self).__init__(
frozen_dict,
signatures=signatures,
unsigned=unsigned,
internal_metadata_dict=internal_metadata_dict,
rejected_reason=rejected_reason,
)
@property
def event_id(self):
# We have to import this here as otherwise we get an import loop which
# is hard to break.
from synapse.crypto.event_signing import compute_event_reference_hash
if self._event_id:
return self._event_id
self._event_id = "$" + encode_base64(compute_event_reference_hash(self)[1])
return self._event_id
def prev_event_ids(self):
"""Returns the list of prev event IDs. The order matches the order
specified in the event, though there is no meaning to it.
Returns:
list[str]: The list of event IDs of this event's prev_events
"""
return self.prev_events
def auth_event_ids(self):
"""Returns the list of auth event IDs. The order matches the order
specified in the event, though there is no meaning to it.
Returns:
list[str]: The list of event IDs of this event's auth_events
"""
return self.auth_events
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<FrozenEventV2 event_id='%s', type='%s', state_key='%s'>" % (
self.event_id,
self.get("type", None),
self.get("state_key", None),
)
def room_version_to_event_format(room_version):
"""Converts a room version string to the event format
Args:
room_version (str)
Returns:
int
"""
if room_version not in KNOWN_ROOM_VERSIONS:
# We should have already checked version, so this should not happen
raise RuntimeError("Unrecognized room version %s" % (room_version,))
if room_version in (
RoomVersions.V1, RoomVersions.V2, RoomVersions.STATE_V2_TEST,
):
return EventFormatVersions.V1
elif room_version in (RoomVersions.V3,):
return EventFormatVersions.V2
else:
raise RuntimeError("Unrecognized room version %s" % (room_version,))
def event_type_from_format_version(format_version):
"""Returns the python type to use to construct an Event object for the
given event format version.
Args:
format_version (int): The event format version
Returns:
type: A type that can be initialized as per the initializer of
`FrozenEvent`
"""
if format_version == EventFormatVersions.V1:
return FrozenEvent
elif format_version == EventFormatVersions.V2:
return FrozenEventV2
else:
raise Exception(
"No event format %r" % (format_version,)
)

View File

@@ -13,63 +13,270 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import attr
from twisted.internet import defer
from synapse.api.constants import (
KNOWN_EVENT_FORMAT_VERSIONS,
KNOWN_ROOM_VERSIONS,
MAX_DEPTH,
EventFormatVersions,
)
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.types import EventID
from synapse.util.stringutils import random_string
from . import EventBase, FrozenEvent, _event_dict_property
from . import (
_EventInternalMetadata,
event_type_from_format_version,
room_version_to_event_format,
)
class EventBuilder(EventBase):
def __init__(self, key_values={}, internal_metadata_dict={}):
signatures = copy.deepcopy(key_values.pop("signatures", {}))
unsigned = copy.deepcopy(key_values.pop("unsigned", {}))
@attr.s(slots=True, cmp=False, frozen=True)
class EventBuilder(object):
"""A format independent event builder used to build up the event content
before signing the event.
super(EventBuilder, self).__init__(
key_values,
signatures=signatures,
unsigned=unsigned,
internal_metadata_dict=internal_metadata_dict,
(Note that while objects of this class are frozen, the
content/unsigned/internal_metadata fields are still mutable)
Attributes:
format_version (int): Event format version
room_id (str)
type (str)
sender (str)
content (dict)
unsigned (dict)
internal_metadata (_EventInternalMetadata)
_state (StateHandler)
_auth (synapse.api.Auth)
_store (DataStore)
_clock (Clock)
_hostname (str): The hostname of the server creating the event
_signing_key: The signing key to use to sign the event as the server
"""
_state = attr.ib()
_auth = attr.ib()
_store = attr.ib()
_clock = attr.ib()
_hostname = attr.ib()
_signing_key = attr.ib()
format_version = attr.ib()
room_id = attr.ib()
type = attr.ib()
sender = attr.ib()
content = attr.ib(default=attr.Factory(dict))
unsigned = attr.ib(default=attr.Factory(dict))
# These only exist on a subset of events, so they raise AttributeError if
# someone tries to get them when they don't exist.
_state_key = attr.ib(default=None)
_redacts = attr.ib(default=None)
internal_metadata = attr.ib(default=attr.Factory(lambda: _EventInternalMetadata({})))
@property
def state_key(self):
if self._state_key is not None:
return self._state_key
raise AttributeError("state_key")
def is_state(self):
return self._state_key is not None
@defer.inlineCallbacks
def build(self, prev_event_ids):
"""Transform into a fully signed and hashed event
Args:
prev_event_ids (list[str]): The event IDs to use as the prev events
Returns:
Deferred[FrozenEvent]
"""
state_ids = yield self._state.get_current_state_ids(
self.room_id, prev_event_ids,
)
auth_ids = yield self._auth.compute_auth_events(
self, state_ids,
)
event_id = _event_dict_property("event_id")
state_key = _event_dict_property("state_key")
type = _event_dict_property("type")
if self.format_version == EventFormatVersions.V1:
auth_events = yield self._store.add_event_hashes(auth_ids)
prev_events = yield self._store.add_event_hashes(prev_event_ids)
else:
auth_events = auth_ids
prev_events = prev_event_ids
def build(self):
return FrozenEvent.from_event(self)
old_depth = yield self._store.get_max_depth_of(
prev_event_ids,
)
depth = old_depth + 1
# we cap depth of generated events, to ensure that they are not
# rejected by other servers (and so that they can be persisted in
# the db)
depth = min(depth, MAX_DEPTH)
event_dict = {
"auth_events": auth_events,
"prev_events": prev_events,
"type": self.type,
"room_id": self.room_id,
"sender": self.sender,
"content": self.content,
"unsigned": self.unsigned,
"depth": depth,
"prev_state": [],
}
if self.is_state():
event_dict["state_key"] = self._state_key
if self._redacts is not None:
event_dict["redacts"] = self._redacts
defer.returnValue(
create_local_event_from_event_dict(
clock=self._clock,
hostname=self._hostname,
signing_key=self._signing_key,
format_version=self.format_version,
event_dict=event_dict,
internal_metadata_dict=self.internal_metadata.get_dict(),
)
)
class EventBuilderFactory(object):
def __init__(self, clock, hostname):
self.clock = clock
self.hostname = hostname
def __init__(self, hs):
self.clock = hs.get_clock()
self.hostname = hs.hostname
self.signing_key = hs.config.signing_key[0]
self.event_id_count = 0
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self.auth = hs.get_auth()
def create_event_id(self):
i = str(self.event_id_count)
self.event_id_count += 1
def new(self, room_version, key_values):
"""Generate an event builder appropriate for the given room version
local_part = str(int(self.clock.time())) + i + random_string(5)
Args:
room_version (str): Version of the room that we're creating an
event builder for
key_values (dict): Fields used as the basis of the new event
e_id = EventID(local_part, self.hostname)
Returns:
EventBuilder
"""
return e_id.to_string()
# There's currently only the one event version defined
if room_version not in KNOWN_ROOM_VERSIONS:
raise Exception(
"No event format defined for version %r" % (room_version,)
)
def new(self, key_values={}):
key_values["event_id"] = self.create_event_id()
return EventBuilder(
store=self.store,
state=self.state,
auth=self.auth,
clock=self.clock,
hostname=self.hostname,
signing_key=self.signing_key,
format_version=room_version_to_event_format(room_version),
type=key_values["type"],
state_key=key_values.get("state_key"),
room_id=key_values["room_id"],
sender=key_values["sender"],
content=key_values.get("content", {}),
unsigned=key_values.get("unsigned", {}),
redacts=key_values.get("redacts", None),
)
time_now = int(self.clock.time_msec())
key_values.setdefault("origin", self.hostname)
key_values.setdefault("origin_server_ts", time_now)
def create_local_event_from_event_dict(clock, hostname, signing_key,
format_version, event_dict,
internal_metadata_dict=None):
"""Takes a fully formed event dict, ensuring that fields like `origin`
and `origin_server_ts` have correct values for a locally produced event,
then signs and hashes it.
key_values.setdefault("unsigned", {})
age = key_values["unsigned"].pop("age", 0)
key_values["unsigned"].setdefault("age_ts", time_now - age)
Args:
clock (Clock)
hostname (str)
signing_key
format_version (int)
event_dict (dict)
internal_metadata_dict (dict|None)
key_values["signatures"] = {}
Returns:
FrozenEvent
"""
return EventBuilder(key_values=key_values,)
# There's currently only the one event version defined
if format_version not in KNOWN_EVENT_FORMAT_VERSIONS:
raise Exception(
"No event format defined for version %r" % (format_version,)
)
if internal_metadata_dict is None:
internal_metadata_dict = {}
time_now = int(clock.time_msec())
if format_version == EventFormatVersions.V1:
event_dict["event_id"] = _create_event_id(clock, hostname)
event_dict["origin"] = hostname
event_dict["origin_server_ts"] = time_now
event_dict.setdefault("unsigned", {})
age = event_dict["unsigned"].pop("age", 0)
event_dict["unsigned"].setdefault("age_ts", time_now - age)
event_dict.setdefault("signatures", {})
add_hashes_and_signatures(
event_dict,
hostname,
signing_key,
)
return event_type_from_format_version(format_version)(
event_dict, internal_metadata_dict=internal_metadata_dict,
)
# A counter used when generating new event IDs
_event_id_counter = 0
def _create_event_id(clock, hostname):
"""Create a new event ID
Args:
clock (Clock)
hostname (str): The server name for the event ID
Returns:
str
"""
global _event_id_counter
i = str(_event_id_counter)
_event_id_counter += 1
local_part = str(int(clock.time())) + i + random_string(5)
e_id = EventID(local_part, hostname)
return e_id.to_string()

View File

@@ -38,8 +38,31 @@ def prune_event(event):
This is used when we "redact" an event. We want to remove all fields that
the user has specified, but we do want to keep necessary information like
type, state_key etc.
Args:
event (FrozenEvent)
Returns:
FrozenEvent
"""
pruned_event_dict = prune_event_dict(event.get_dict())
from . import event_type_from_format_version
return event_type_from_format_version(event.format_version)(
pruned_event_dict, event.internal_metadata.get_dict()
)
def prune_event_dict(event_dict):
"""Redacts the event_dict in the same way as `prune_event`, except it
operates on dicts rather than event objects
Args:
event_dict (dict)
Returns:
dict: A copy of the pruned event dict
"""
event_type = event.type
allowed_keys = [
"event_id",
@@ -59,13 +82,13 @@ def prune_event(event):
"membership",
]
event_dict = event.get_dict()
event_type = event_dict["type"]
new_content = {}
def add_fields(*fields):
for field in fields:
if field in event.content:
if field in event_dict["content"]:
new_content[field] = event_dict["content"][field]
if event_type == EventTypes.Member:
@@ -98,17 +121,17 @@ def prune_event(event):
allowed_fields["content"] = new_content
allowed_fields["unsigned"] = {}
unsigned = {}
allowed_fields["unsigned"] = unsigned
if "age_ts" in event.unsigned:
allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
if "replaces_state" in event.unsigned:
allowed_fields["unsigned"]["replaces_state"] = event.unsigned["replaces_state"]
event_unsigned = event_dict.get("unsigned", {})
return type(event)(
allowed_fields,
internal_metadata_dict=event.internal_metadata.get_dict()
)
if "age_ts" in event_unsigned:
unsigned["age_ts"] = event_unsigned["age_ts"]
if "replaces_state" in event_unsigned:
unsigned["replaces_state"] = event_unsigned["replaces_state"]
return allowed_fields
def _copy_field(src, dst, field):
@@ -244,6 +267,7 @@ def serialize_event(e, time_now_ms, as_client_event=True,
Returns:
dict
"""
# FIXME(erikj): To handle the case of presence events and the like
if not isinstance(e, EventBase):
return e
@@ -253,6 +277,8 @@ def serialize_event(e, time_now_ms, as_client_event=True,
# Should this strip out None's?
d = {k: v for k, v in e.get_dict().items()}
d["event_id"] = e.event_id
if "age_ts" in d["unsigned"]:
d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"]
del d["unsigned"]["age_ts"]

View File

@@ -15,23 +15,29 @@
from six import string_types
from synapse.api.constants import EventTypes, Membership
from synapse.api.constants import EventFormatVersions, EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.types import EventID, RoomID, UserID
class EventValidator(object):
def validate_new(self, event):
"""Validates the event has roughly the right format
def validate(self, event):
EventID.from_string(event.event_id)
RoomID.from_string(event.room_id)
Args:
event (FrozenEvent)
"""
self.validate_builder(event)
if event.format_version == EventFormatVersions.V1:
EventID.from_string(event.event_id)
required = [
# "auth_events",
"auth_events",
"content",
# "hashes",
"hashes",
"origin",
# "prev_events",
"prev_events",
"sender",
"type",
]
@@ -41,8 +47,25 @@ class EventValidator(object):
raise SynapseError(400, "Event does not have key %s" % (k,))
# Check that the following keys have string values
strings = [
event_strings = [
"origin",
]
for s in event_strings:
if not isinstance(getattr(event, s), string_types):
raise SynapseError(400, "'%s' not a string type" % (s,))
def validate_builder(self, event):
"""Validates that the builder/event has roughly the right format. Only
checks values that we expect a proto event to have, rather than all the
fields an event would have
Args:
event (EventBuilder|FrozenEvent)
"""
strings = [
"room_id",
"sender",
"type",
]
@@ -54,22 +77,7 @@ class EventValidator(object):
if not isinstance(getattr(event, s), string_types):
raise SynapseError(400, "Not '%s' a string type" % (s,))
if event.type == EventTypes.Member:
if "membership" not in event.content:
raise SynapseError(400, "Content has not membership key")
if event.content["membership"] not in Membership.LIST:
raise SynapseError(400, "Invalid membership key")
# Check that the following keys have dictionary values
# TODO
# Check that the following keys have the correct format for DAGs
# TODO
def validate_new(self, event):
self.validate(event)
RoomID.from_string(event.room_id)
UserID.from_string(event.sender)
if event.type == EventTypes.Message:
@@ -86,9 +94,16 @@ class EventValidator(object):
elif event.type == EventTypes.Name:
self._ensure_strings(event.content, ["name"])
elif event.type == EventTypes.Member:
if "membership" not in event.content:
raise SynapseError(400, "Content has not membership key")
if event.content["membership"] not in Membership.LIST:
raise SynapseError(400, "Invalid membership key")
def _ensure_strings(self, d, keys):
for s in keys:
if s not in d:
raise SynapseError(400, "'%s' not in content" % (s,))
if not isinstance(d[s], string_types):
raise SynapseError(400, "Not '%s' a string type" % (s,))
raise SynapseError(400, "'%s' not a string type" % (s,))

View File

@@ -20,10 +20,10 @@ import six
from twisted.internet import defer
from twisted.internet.defer import DeferredList
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership, RoomVersions
from synapse.api.errors import Codes, SynapseError
from synapse.crypto.event_signing import check_event_content_hash
from synapse.events import FrozenEvent
from synapse.events import event_type_from_format_version
from synapse.events.utils import prune_event
from synapse.http.servlet import assert_params_in_dict
from synapse.types import get_domain_from_id
@@ -43,8 +43,8 @@ class FederationBase(object):
self._clock = hs.get_clock()
@defer.inlineCallbacks
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
include_none=False):
def _check_sigs_and_hash_and_fetch(self, origin, pdus, room_version,
outlier=False, include_none=False):
"""Takes a list of PDUs and checks the signatures and hashs of each
one. If a PDU fails its signature check then we check if we have it in
the database and if not then request if from the originating server of
@@ -56,13 +56,17 @@ class FederationBase(object):
a new list.
Args:
origin (str)
pdu (list)
outlier (bool)
room_version (str)
outlier (bool): Whether the events are outliers or not
include_none (str): Whether to include None in the returned list
for events that have failed their checks
Returns:
Deferred : A list of PDUs that have valid signatures and hashes.
"""
deferreds = self._check_sigs_and_hashes(pdus)
deferreds = self._check_sigs_and_hashes(room_version, pdus)
@defer.inlineCallbacks
def handle_check_result(pdu, deferred):
@@ -84,6 +88,7 @@ class FederationBase(object):
res = yield self.get_pdu(
destinations=[pdu.origin],
event_id=pdu.event_id,
room_version=room_version,
outlier=outlier,
timeout=10000,
)
@@ -116,16 +121,17 @@ class FederationBase(object):
else:
defer.returnValue([p for p in valid_pdus if p])
def _check_sigs_and_hash(self, pdu):
def _check_sigs_and_hash(self, room_version, pdu):
return logcontext.make_deferred_yieldable(
self._check_sigs_and_hashes([pdu])[0],
self._check_sigs_and_hashes(room_version, [pdu])[0],
)
def _check_sigs_and_hashes(self, pdus):
def _check_sigs_and_hashes(self, room_version, pdus):
"""Checks that each of the received events is correctly signed by the
sending server.
Args:
room_version (str): The room version of the PDUs
pdus (list[FrozenEvent]): the events to be checked
Returns:
@@ -136,7 +142,7 @@ class FederationBase(object):
* throws a SynapseError if the signature check failed.
The deferreds run their callbacks in the sentinel logcontext.
"""
deferreds = _check_sigs_on_pdus(self.keyring, pdus)
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
ctx = logcontext.LoggingContext.current_context()
@@ -198,16 +204,17 @@ class FederationBase(object):
class PduToCheckSig(namedtuple("PduToCheckSig", [
"pdu", "redacted_pdu_json", "event_id_domain", "sender_domain", "deferreds",
"pdu", "redacted_pdu_json", "sender_domain", "deferreds",
])):
pass
def _check_sigs_on_pdus(keyring, pdus):
def _check_sigs_on_pdus(keyring, room_version, pdus):
"""Check that the given events are correctly signed
Args:
keyring (synapse.crypto.Keyring): keyring object to do the checks
room_version (str): the room version of the PDUs
pdus (Collection[EventBase]): the events to be checked
Returns:
@@ -220,9 +227,7 @@ def _check_sigs_on_pdus(keyring, pdus):
# we want to check that the event is signed by:
#
# (a) the server which created the event_id
#
# (b) the sender's server.
# (a) the sender's server
#
# - except in the case of invites created from a 3pid invite, which are exempt
# from this check, because the sender has to match that of the original 3pid
@@ -236,34 +241,26 @@ def _check_sigs_on_pdus(keyring, pdus):
# and signatures are *supposed* to be valid whether or not an event has been
# redacted. But this isn't the worst of the ways that 3pid invites are broken.
#
# (b) for V1 and V2 rooms, the server which created the event_id
#
# let's start by getting the domain for each pdu, and flattening the event back
# to JSON.
pdus_to_check = [
PduToCheckSig(
pdu=p,
redacted_pdu_json=prune_event(p).get_pdu_json(),
event_id_domain=get_domain_from_id(p.event_id),
sender_domain=get_domain_from_id(p.sender),
deferreds=[],
)
for p in pdus
]
# first make sure that the event is signed by the event_id's domain
deferreds = keyring.verify_json_objects_for_server([
(p.event_id_domain, p.redacted_pdu_json)
for p in pdus_to_check
])
for p, d in zip(pdus_to_check, deferreds):
p.deferreds.append(d)
# now let's look for events where the sender's domain is different to the
# event id's domain (normally only the case for joins/leaves), and add additional
# checks.
# First we check that the sender event is signed by the sender's domain
# (except if its a 3pid invite, in which case it may be sent by any server)
pdus_to_check_sender = [
p for p in pdus_to_check
if p.sender_domain != p.event_id_domain and not _is_invite_via_3pid(p.pdu)
if not _is_invite_via_3pid(p.pdu)
]
more_deferreds = keyring.verify_json_objects_for_server([
@@ -274,19 +271,43 @@ def _check_sigs_on_pdus(keyring, pdus):
for p, d in zip(pdus_to_check_sender, more_deferreds):
p.deferreds.append(d)
# now let's look for events where the sender's domain is different to the
# event id's domain (normally only the case for joins/leaves), and add additional
# checks. Only do this if the room version has a concept of event ID domain
if room_version in (
RoomVersions.V1, RoomVersions.V2, RoomVersions.STATE_V2_TEST,
):
pdus_to_check_event_id = [
p for p in pdus_to_check
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
]
more_deferreds = keyring.verify_json_objects_for_server([
(get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json)
for p in pdus_to_check_event_id
])
for p, d in zip(pdus_to_check_event_id, more_deferreds):
p.deferreds.append(d)
elif room_version in (RoomVersions.V3,):
pass # No further checks needed, as event IDs are hashes here
else:
raise RuntimeError("Unrecognized room version %s" % (room_version,))
# replace lists of deferreds with single Deferreds
return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
def _flatten_deferred_list(deferreds):
"""Given a list of one or more deferreds, either return the single deferred, or
combine into a DeferredList.
"""Given a list of deferreds, either return the single deferred,
combine into a DeferredList, or return an already resolved deferred.
"""
if len(deferreds) > 1:
return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
else:
assert len(deferreds) == 1
elif len(deferreds) == 1:
return deferreds[0]
else:
return defer.succeed(None)
def _is_invite_via_3pid(event):
@@ -297,11 +318,12 @@ def _is_invite_via_3pid(event):
)
def event_from_pdu_json(pdu_json, outlier=False):
def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
"""Construct a FrozenEvent from an event json received over federation
Args:
pdu_json (object): pdu as received over federation
event_format_version (int): The event format version
outlier (bool): True to mark this event as an outlier
Returns:
@@ -313,7 +335,7 @@ def event_from_pdu_json(pdu_json, outlier=False):
"""
# we could probably enforce a bunch of other fields here (room_id, sender,
# origin, etc etc)
assert_params_in_dict(pdu_json, ('event_id', 'type', 'depth'))
assert_params_in_dict(pdu_json, ('type', 'depth'))
depth = pdu_json['depth']
if not isinstance(depth, six.integer_types):
@@ -325,8 +347,8 @@ def event_from_pdu_json(pdu_json, outlier=False):
elif depth > MAX_DEPTH:
raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
event = FrozenEvent(
pdu_json
event = event_type_from_format_version(event_format_version)(
pdu_json,
)
event.internal_metadata.outlier = outlier

View File

@@ -25,14 +25,19 @@ from prometheus_client import Counter
from twisted.internet import defer
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
from synapse.api.constants import (
KNOWN_ROOM_VERSIONS,
EventTypes,
Membership,
RoomVersions,
)
from synapse.api.errors import (
CodeMessageException,
FederationDeniedError,
HttpResponseException,
SynapseError,
)
from synapse.events import builder
from synapse.events import builder, room_version_to_event_format
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.util import logcontext, unwrapFirstError
from synapse.util.caches.expiringcache import ExpiringCache
@@ -66,6 +71,9 @@ class FederationClient(FederationBase):
self.state = hs.get_state_handler()
self.transport_layer = hs.get_federation_transport_client()
self.hostname = hs.hostname
self.signing_key = hs.config.signing_key[0]
self._get_pdu_cache = ExpiringCache(
cache_name="get_pdu_cache",
clock=self._clock,
@@ -162,13 +170,13 @@ class FederationClient(FederationBase):
@defer.inlineCallbacks
@log_function
def backfill(self, dest, context, limit, extremities):
def backfill(self, dest, room_id, limit, extremities):
"""Requests some more historic PDUs for the given context from the
given destination server.
Args:
dest (str): The remote home server to ask.
context (str): The context to backfill.
room_id (str): The room_id to backfill.
limit (int): The maximum number of PDUs to return.
extremities (list): List of PDU id and origins of the first pdus
we have seen from the context
@@ -183,18 +191,21 @@ class FederationClient(FederationBase):
return
transaction_data = yield self.transport_layer.backfill(
dest, context, extremities, limit)
dest, room_id, extremities, limit)
logger.debug("backfill transaction_data=%s", repr(transaction_data))
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
pdus = [
event_from_pdu_json(p, outlier=False)
event_from_pdu_json(p, format_ver, outlier=False)
for p in transaction_data["pdus"]
]
# FIXME: We should handle signature failures more gracefully.
pdus[:] = yield logcontext.make_deferred_yieldable(defer.gatherResults(
self._check_sigs_and_hashes(pdus),
self._check_sigs_and_hashes(room_version, pdus),
consumeErrors=True,
).addErrback(unwrapFirstError))
@@ -202,7 +213,8 @@ class FederationClient(FederationBase):
@defer.inlineCallbacks
@log_function
def get_pdu(self, destinations, event_id, outlier=False, timeout=None):
def get_pdu(self, destinations, event_id, room_version, outlier=False,
timeout=None):
"""Requests the PDU with given origin and ID from the remote home
servers.
@@ -212,6 +224,7 @@ class FederationClient(FederationBase):
Args:
destinations (list): Which home servers to query
event_id (str): event to fetch
room_version (str): version of the room
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
it's from an arbitary point in the context as opposed to part
of the current block of PDUs. Defaults to `False`
@@ -230,6 +243,8 @@ class FederationClient(FederationBase):
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
format_ver = room_version_to_event_format(room_version)
signed_pdu = None
for destination in destinations:
now = self._clock.time_msec()
@@ -245,7 +260,7 @@ class FederationClient(FederationBase):
logger.debug("transaction_data %r", transaction_data)
pdu_list = [
event_from_pdu_json(p, outlier=outlier)
event_from_pdu_json(p, format_ver, outlier=outlier)
for p in transaction_data["pdus"]
]
@@ -253,7 +268,7 @@ class FederationClient(FederationBase):
pdu = pdu_list[0]
# Check signatures are correct.
signed_pdu = yield self._check_sigs_and_hash(pdu)
signed_pdu = yield self._check_sigs_and_hash(room_version, pdu)
break
@@ -339,12 +354,16 @@ class FederationClient(FederationBase):
destination, room_id, event_id=event_id,
)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
pdus = [
event_from_pdu_json(p, outlier=True) for p in result["pdus"]
event_from_pdu_json(p, format_ver, outlier=True)
for p in result["pdus"]
]
auth_chain = [
event_from_pdu_json(p, outlier=True)
event_from_pdu_json(p, format_ver, outlier=True)
for p in result.get("auth_chain", [])
]
@@ -355,7 +374,8 @@ class FederationClient(FederationBase):
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
destination,
[p for p in pdus if p.event_id not in seen_events],
outlier=True
outlier=True,
room_version=room_version,
)
signed_pdus.extend(
seen_events[p.event_id] for p in pdus if p.event_id in seen_events
@@ -364,7 +384,8 @@ class FederationClient(FederationBase):
signed_auth = yield self._check_sigs_and_hash_and_fetch(
destination,
[p for p in auth_chain if p.event_id not in seen_events],
outlier=True
outlier=True,
room_version=room_version,
)
signed_auth.extend(
seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
@@ -411,6 +432,8 @@ class FederationClient(FederationBase):
random.shuffle(srvs)
return srvs
room_version = yield self.store.get_room_version(room_id)
batch_size = 20
missing_events = list(missing_events)
for i in range(0, len(missing_events), batch_size):
@@ -421,6 +444,7 @@ class FederationClient(FederationBase):
self.get_pdu,
destinations=random_server_list(),
event_id=e_id,
room_version=room_version,
)
for e_id in batch
]
@@ -445,13 +469,17 @@ class FederationClient(FederationBase):
destination, room_id, event_id,
)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
auth_chain = [
event_from_pdu_json(p, outlier=True)
event_from_pdu_json(p, format_ver, outlier=True)
for p in res["auth_chain"]
]
signed_auth = yield self._check_sigs_and_hash_and_fetch(
destination, auth_chain, outlier=True
destination, auth_chain,
outlier=True, room_version=room_version,
)
signed_auth.sort(key=lambda e: e.depth)
@@ -522,6 +550,8 @@ class FederationClient(FederationBase):
Does so by asking one of the already participating servers to create an
event with proper context.
Returns a fully signed and hashed event.
Note that this does not append any events to any graphs.
Args:
@@ -536,8 +566,10 @@ class FederationClient(FederationBase):
params (dict[str, str|Iterable[str]]): Query parameters to include in the
request.
Return:
Deferred: resolves to a tuple of (origin (str), event (object))
where origin is the remote homeserver which generated the event.
Deferred[tuple[str, FrozenEvent, int]]: resolves to a tuple of
`(origin, event, event_format)` where origin is the remote
homeserver which generated the event, and event_format is one of
`synapse.api.constants.EventFormatVersions`.
Fails with a ``SynapseError`` if the chosen remote server
returns a 300/400 code.
@@ -557,6 +589,11 @@ class FederationClient(FederationBase):
destination, room_id, user_id, membership, params,
)
# Note: If not supplied, the room version may be either v1 or v2,
# however either way the event format version will be v1.
room_version = ret.get("room_version", RoomVersions.V1)
event_format = room_version_to_event_format(room_version)
pdu_dict = ret.get("event", None)
if not isinstance(pdu_dict, dict):
raise InvalidResponseError("Bad 'event' field in response")
@@ -571,17 +608,20 @@ class FederationClient(FederationBase):
if "prev_state" not in pdu_dict:
pdu_dict["prev_state"] = []
ev = builder.EventBuilder(pdu_dict)
ev = builder.create_local_event_from_event_dict(
self._clock, self.hostname, self.signing_key,
format_version=event_format, event_dict=pdu_dict,
)
defer.returnValue(
(destination, ev)
(destination, ev, event_format)
)
return self._try_destination_list(
"make_" + membership, destinations, send_request,
)
def send_join(self, destinations, pdu):
def send_join(self, destinations, pdu, event_format_version):
"""Sends a join event to one of a list of homeservers.
Doing so will cause the remote server to add the event to the graph,
@@ -591,6 +631,7 @@ class FederationClient(FederationBase):
destinations (str): Candidate homeservers which are probably
participating in the room.
pdu (BaseEvent): event to be sent
event_format_version (int): The event format version
Return:
Deferred: resolves to a dict with members ``origin`` (a string
@@ -636,12 +677,12 @@ class FederationClient(FederationBase):
logger.debug("Got content: %s", content)
state = [
event_from_pdu_json(p, outlier=True)
event_from_pdu_json(p, event_format_version, outlier=True)
for p in content.get("state", [])
]
auth_chain = [
event_from_pdu_json(p, outlier=True)
event_from_pdu_json(p, event_format_version, outlier=True)
for p in content.get("auth_chain", [])
]
@@ -650,9 +691,21 @@ class FederationClient(FederationBase):
for p in itertools.chain(state, auth_chain)
}
room_version = None
for e in state:
if (e.type, e.state_key) == (EventTypes.Create, ""):
room_version = e.content.get("room_version", RoomVersions.V1)
break
if room_version is None:
# If the state doesn't have a create event then the room is
# invalid, and it would fail auth checks anyway.
raise SynapseError(400, "No create event in state")
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
destination, list(pdus.values()),
outlier=True,
room_version=room_version,
)
valid_pdus_map = {
@@ -690,32 +743,75 @@ class FederationClient(FederationBase):
@defer.inlineCallbacks
def send_invite(self, destination, room_id, event_id, pdu):
time_now = self._clock.time_msec()
try:
code, content = yield self.transport_layer.send_invite(
destination=destination,
room_id=room_id,
event_id=event_id,
content=pdu.get_pdu_json(time_now),
)
except HttpResponseException as e:
if e.code == 403:
raise e.to_synapse_error()
raise
room_version = yield self.store.get_room_version(room_id)
content = yield self._do_send_invite(destination, pdu, room_version)
pdu_dict = content["event"]
logger.debug("Got response to send_invite: %s", pdu_dict)
pdu = event_from_pdu_json(pdu_dict)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(pdu_dict, format_ver)
# Check signatures are correct.
pdu = yield self._check_sigs_and_hash(pdu)
pdu = yield self._check_sigs_and_hash(room_version, pdu)
# FIXME: We should handle signature failures more gracefully.
defer.returnValue(pdu)
@defer.inlineCallbacks
def _do_send_invite(self, destination, pdu, room_version):
"""Actually sends the invite, first trying v2 API and falling back to
v1 API if necessary.
Args:
destination (str): Target server
pdu (FrozenEvent)
room_version (str)
Returns:
dict: The event as a dict as returned by the remote server
"""
time_now = self._clock.time_msec()
try:
content = yield self.transport_layer.send_invite_v2(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content={
"event": pdu.get_pdu_json(time_now),
"room_version": room_version,
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
},
)
defer.returnValue(content)
except HttpResponseException as e:
if e.code in [400, 404]:
if room_version in (RoomVersions.V1, RoomVersions.V2):
pass # We'll fall through
else:
raise Exception("Remote server is too old")
elif e.code == 403:
raise e.to_synapse_error()
else:
raise
# Didn't work, try v1 API.
# Note the v1 API returns a tuple of `(200, content)`
_, content = yield self.transport_layer.send_invite_v1(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
defer.returnValue(content)
def send_leave(self, destinations, pdu):
"""Sends a leave event to one of a list of homeservers.
@@ -785,13 +881,16 @@ class FederationClient(FederationBase):
content=send_content,
)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
auth_chain = [
event_from_pdu_json(e)
event_from_pdu_json(e, format_ver)
for e in content["auth_chain"]
]
signed_auth = yield self._check_sigs_and_hash_and_fetch(
destination, auth_chain, outlier=True
destination, auth_chain, outlier=True, room_version=room_version,
)
signed_auth.sort(key=lambda e: e.depth)
@@ -833,13 +932,16 @@ class FederationClient(FederationBase):
timeout=timeout,
)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
events = [
event_from_pdu_json(e)
event_from_pdu_json(e, format_ver)
for e in content.get("events", [])
]
signed_events = yield self._check_sigs_and_hash_and_fetch(
destination, events, outlier=False
destination, events, outlier=False, room_version=room_version,
)
except HttpResponseException as e:
if not e.code == 400:

View File

@@ -25,7 +25,7 @@ from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
from synapse.api.constants import EventTypes
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
AuthError,
FederationError,
@@ -34,6 +34,7 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.crypto.event_signing import compute_event_signature
from synapse.events import room_version_to_event_format
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
@@ -178,14 +179,13 @@ class FederationServer(FederationBase):
continue
try:
# In future we will actually use the room version to parse the
# PDU into an event.
yield self.store.get_room_version(room_id)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
except NotFoundError:
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
continue
event = event_from_pdu_json(p)
event = event_from_pdu_json(p, format_ver)
pdus_by_room.setdefault(room_id, []).append(event)
pdu_results = {}
@@ -322,7 +322,7 @@ class FederationServer(FederationBase):
if self.hs.is_mine_id(event.event_id):
event.signatures.update(
compute_event_signature(
event,
event.get_pdu_json(),
self.hs.hostname,
self.hs.config.signing_key[0]
)
@@ -369,18 +369,23 @@ class FederationServer(FederationBase):
})
@defer.inlineCallbacks
def on_invite_request(self, origin, content):
pdu = event_from_pdu_json(content)
def on_invite_request(self, origin, content, room_version):
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, pdu.room_id)
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
time_now = self._clock.time_msec()
defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
defer.returnValue({"event": ret_pdu.get_pdu_json(time_now)})
@defer.inlineCallbacks
def on_send_join_request(self, origin, content):
def on_send_join_request(self, origin, content, room_id):
logger.debug("on_send_join_request: content: %s", content)
pdu = event_from_pdu_json(content)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, pdu.room_id)
@@ -400,13 +405,22 @@ class FederationServer(FederationBase):
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
pdu = yield self.handler.on_make_leave_request(room_id, user_id)
room_version = yield self.store.get_room_version(room_id)
time_now = self._clock.time_msec()
defer.returnValue({"event": pdu.get_pdu_json(time_now)})
defer.returnValue({
"event": pdu.get_pdu_json(time_now),
"room_version": room_version,
})
@defer.inlineCallbacks
def on_send_leave_request(self, origin, content):
def on_send_leave_request(self, origin, content, room_id):
logger.debug("on_send_leave_request: content: %s", content)
pdu = event_from_pdu_json(content)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, pdu.room_id)
@@ -452,13 +466,16 @@ class FederationServer(FederationBase):
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
auth_chain = [
event_from_pdu_json(e)
event_from_pdu_json(e, format_ver)
for e in content["auth_chain"]
]
signed_auth = yield self._check_sigs_and_hash_and_fetch(
origin, auth_chain, outlier=True
origin, auth_chain, outlier=True, room_version=room_version,
)
ret = yield self.handler.on_query_auth(
@@ -603,16 +620,19 @@ class FederationServer(FederationBase):
"""
# check that it's actually being sent from a valid destination to
# workaround bug #1753 in 0.18.5 and 0.18.6
if origin != get_domain_from_id(pdu.event_id):
if origin != get_domain_from_id(pdu.sender):
# We continue to accept join events from any server; this is
# necessary for the federation join dance to work correctly.
# (When we join over federation, the "helper" server is
# responsible for sending out the join event, rather than the
# origin. See bug #1893).
# origin. See bug #1893. This is also true for some third party
# invites).
if not (
pdu.type == 'm.room.member' and
pdu.content and
pdu.content.get("membership", None) == 'join'
pdu.content.get("membership", None) in (
Membership.JOIN, Membership.INVITE,
)
):
logger.info(
"Discarding PDU %s from invalid origin %s",
@@ -625,9 +645,12 @@ class FederationServer(FederationBase):
pdu.event_id, origin
)
# We've already checked that we know the room version by this point
room_version = yield self.store.get_room_version(pdu.room_id)
# Check signature.
try:
pdu = yield self._check_sigs_and_hash(pdu)
pdu = yield self._check_sigs_and_hash(room_version, pdu)
except SynapseError as e:
raise FederationError(
"ERROR",

View File

@@ -175,7 +175,7 @@ class TransactionQueue(object):
def handle_event(event):
# Only send events for this server.
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
is_mine = self.is_mine_id(event.event_id)
is_mine = self.is_mine_id(event.sender)
if not is_mine and send_on_behalf_of is None:
return

View File

@@ -21,7 +21,7 @@ from six.moves import urllib
from twisted.internet import defer
from synapse.api.constants import Membership
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
from synapse.util.logutils import log_function
logger = logging.getLogger(__name__)
@@ -51,7 +51,7 @@ class TransportLayerClient(object):
logger.debug("get_room_state dest=%s, room=%s",
destination, room_id)
path = _create_path(PREFIX, "/state/%s/", room_id)
path = _create_v1_path("/state/%s/", room_id)
return self.client.get_json(
destination, path=path, args={"event_id": event_id},
)
@@ -73,7 +73,7 @@ class TransportLayerClient(object):
logger.debug("get_room_state_ids dest=%s, room=%s",
destination, room_id)
path = _create_path(PREFIX, "/state_ids/%s/", room_id)
path = _create_v1_path("/state_ids/%s/", room_id)
return self.client.get_json(
destination, path=path, args={"event_id": event_id},
)
@@ -95,7 +95,7 @@ class TransportLayerClient(object):
logger.debug("get_pdu dest=%s, event_id=%s",
destination, event_id)
path = _create_path(PREFIX, "/event/%s/", event_id)
path = _create_v1_path("/event/%s/", event_id)
return self.client.get_json(destination, path=path, timeout=timeout)
@log_function
@@ -121,7 +121,7 @@ class TransportLayerClient(object):
# TODO: raise?
return
path = _create_path(PREFIX, "/backfill/%s/", room_id)
path = _create_v1_path("/backfill/%s/", room_id)
args = {
"v": event_tuples,
@@ -167,7 +167,7 @@ class TransportLayerClient(object):
# generated by the json_data_callback.
json_data = transaction.get_dict()
path = _create_path(PREFIX, "/send/%s/", transaction.transaction_id)
path = _create_v1_path("/send/%s/", transaction.transaction_id)
response = yield self.client.put_json(
transaction.destination,
@@ -184,7 +184,7 @@ class TransportLayerClient(object):
@log_function
def make_query(self, destination, query_type, args, retry_on_dns_fail,
ignore_backoff=False):
path = _create_path(PREFIX, "/query/%s", query_type)
path = _create_v1_path("/query/%s", query_type)
content = yield self.client.get_json(
destination=destination,
@@ -231,7 +231,7 @@ class TransportLayerClient(object):
"make_membership_event called with membership='%s', must be one of %s" %
(membership, ",".join(valid_memberships))
)
path = _create_path(PREFIX, "/make_%s/%s/%s", membership, room_id, user_id)
path = _create_v1_path("/make_%s/%s/%s", membership, room_id, user_id)
ignore_backoff = False
retry_on_dns_fail = False
@@ -258,7 +258,7 @@ class TransportLayerClient(object):
@defer.inlineCallbacks
@log_function
def send_join(self, destination, room_id, event_id, content):
path = _create_path(PREFIX, "/send_join/%s/%s", room_id, event_id)
path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
response = yield self.client.put_json(
destination=destination,
@@ -271,7 +271,7 @@ class TransportLayerClient(object):
@defer.inlineCallbacks
@log_function
def send_leave(self, destination, room_id, event_id, content):
path = _create_path(PREFIX, "/send_leave/%s/%s", room_id, event_id)
path = _create_v1_path("/send_leave/%s/%s", room_id, event_id)
response = yield self.client.put_json(
destination=destination,
@@ -289,8 +289,22 @@ class TransportLayerClient(object):
@defer.inlineCallbacks
@log_function
def send_invite(self, destination, room_id, event_id, content):
path = _create_path(PREFIX, "/invite/%s/%s", room_id, event_id)
def send_invite_v1(self, destination, room_id, event_id, content):
path = _create_v1_path("/invite/%s/%s", room_id, event_id)
response = yield self.client.put_json(
destination=destination,
path=path,
data=content,
ignore_backoff=True,
)
defer.returnValue(response)
@defer.inlineCallbacks
@log_function
def send_invite_v2(self, destination, room_id, event_id, content):
path = _create_v2_path("/invite/%s/%s", room_id, event_id)
response = yield self.client.put_json(
destination=destination,
@@ -306,7 +320,7 @@ class TransportLayerClient(object):
def get_public_rooms(self, remote_server, limit, since_token,
search_filter=None, include_all_networks=False,
third_party_instance_id=None):
path = PREFIX + "/publicRooms"
path = _create_v1_path("/publicRooms")
args = {
"include_all_networks": "true" if include_all_networks else "false",
@@ -332,7 +346,7 @@ class TransportLayerClient(object):
@defer.inlineCallbacks
@log_function
def exchange_third_party_invite(self, destination, room_id, event_dict):
path = _create_path(PREFIX, "/exchange_third_party_invite/%s", room_id,)
path = _create_v1_path("/exchange_third_party_invite/%s", room_id,)
response = yield self.client.put_json(
destination=destination,
@@ -345,7 +359,7 @@ class TransportLayerClient(object):
@defer.inlineCallbacks
@log_function
def get_event_auth(self, destination, room_id, event_id):
path = _create_path(PREFIX, "/event_auth/%s/%s", room_id, event_id)
path = _create_v1_path("/event_auth/%s/%s", room_id, event_id)
content = yield self.client.get_json(
destination=destination,
@@ -357,7 +371,7 @@ class TransportLayerClient(object):
@defer.inlineCallbacks
@log_function
def send_query_auth(self, destination, room_id, event_id, content):
path = _create_path(PREFIX, "/query_auth/%s/%s", room_id, event_id)
path = _create_v1_path("/query_auth/%s/%s", room_id, event_id)
content = yield self.client.post_json(
destination=destination,
@@ -392,7 +406,7 @@ class TransportLayerClient(object):
Returns:
A dict containg the device keys.
"""
path = PREFIX + "/user/keys/query"
path = _create_v1_path("/user/keys/query")
content = yield self.client.post_json(
destination=destination,
@@ -419,7 +433,7 @@ class TransportLayerClient(object):
Returns:
A dict containg the device keys.
"""
path = _create_path(PREFIX, "/user/devices/%s", user_id)
path = _create_v1_path("/user/devices/%s", user_id)
content = yield self.client.get_json(
destination=destination,
@@ -455,7 +469,7 @@ class TransportLayerClient(object):
A dict containg the one-time keys.
"""
path = PREFIX + "/user/keys/claim"
path = _create_v1_path("/user/keys/claim")
content = yield self.client.post_json(
destination=destination,
@@ -469,7 +483,7 @@ class TransportLayerClient(object):
@log_function
def get_missing_events(self, destination, room_id, earliest_events,
latest_events, limit, min_depth, timeout):
path = _create_path(PREFIX, "/get_missing_events/%s", room_id,)
path = _create_v1_path("/get_missing_events/%s", room_id,)
content = yield self.client.post_json(
destination=destination,
@@ -489,7 +503,7 @@ class TransportLayerClient(object):
def get_group_profile(self, destination, group_id, requester_user_id):
"""Get a group profile
"""
path = _create_path(PREFIX, "/groups/%s/profile", group_id,)
path = _create_v1_path("/groups/%s/profile", group_id,)
return self.client.get_json(
destination=destination,
@@ -508,7 +522,7 @@ class TransportLayerClient(object):
requester_user_id (str)
content (dict): The new profile of the group
"""
path = _create_path(PREFIX, "/groups/%s/profile", group_id,)
path = _create_v1_path("/groups/%s/profile", group_id,)
return self.client.post_json(
destination=destination,
@@ -522,7 +536,7 @@ class TransportLayerClient(object):
def get_group_summary(self, destination, group_id, requester_user_id):
"""Get a group summary
"""
path = _create_path(PREFIX, "/groups/%s/summary", group_id,)
path = _create_v1_path("/groups/%s/summary", group_id,)
return self.client.get_json(
destination=destination,
@@ -535,7 +549,7 @@ class TransportLayerClient(object):
def get_rooms_in_group(self, destination, group_id, requester_user_id):
"""Get all rooms in a group
"""
path = _create_path(PREFIX, "/groups/%s/rooms", group_id,)
path = _create_v1_path("/groups/%s/rooms", group_id,)
return self.client.get_json(
destination=destination,
@@ -548,7 +562,7 @@ class TransportLayerClient(object):
content):
"""Add a room to a group
"""
path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,)
path = _create_v1_path("/groups/%s/room/%s", group_id, room_id,)
return self.client.post_json(
destination=destination,
@@ -562,8 +576,8 @@ class TransportLayerClient(object):
config_key, content):
"""Update room in group
"""
path = _create_path(
PREFIX, "/groups/%s/room/%s/config/%s",
path = _create_v1_path(
"/groups/%s/room/%s/config/%s",
group_id, room_id, config_key,
)
@@ -578,7 +592,7 @@ class TransportLayerClient(object):
def remove_room_from_group(self, destination, group_id, requester_user_id, room_id):
"""Remove a room from a group
"""
path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,)
path = _create_v1_path("/groups/%s/room/%s", group_id, room_id,)
return self.client.delete_json(
destination=destination,
@@ -591,7 +605,7 @@ class TransportLayerClient(object):
def get_users_in_group(self, destination, group_id, requester_user_id):
"""Get users in a group
"""
path = _create_path(PREFIX, "/groups/%s/users", group_id,)
path = _create_v1_path("/groups/%s/users", group_id,)
return self.client.get_json(
destination=destination,
@@ -604,7 +618,7 @@ class TransportLayerClient(object):
def get_invited_users_in_group(self, destination, group_id, requester_user_id):
"""Get users that have been invited to a group
"""
path = _create_path(PREFIX, "/groups/%s/invited_users", group_id,)
path = _create_v1_path("/groups/%s/invited_users", group_id,)
return self.client.get_json(
destination=destination,
@@ -617,8 +631,8 @@ class TransportLayerClient(object):
def accept_group_invite(self, destination, group_id, user_id, content):
"""Accept a group invite
"""
path = _create_path(
PREFIX, "/groups/%s/users/%s/accept_invite",
path = _create_v1_path(
"/groups/%s/users/%s/accept_invite",
group_id, user_id,
)
@@ -633,7 +647,7 @@ class TransportLayerClient(object):
def join_group(self, destination, group_id, user_id, content):
"""Attempts to join a group
"""
path = _create_path(PREFIX, "/groups/%s/users/%s/join", group_id, user_id)
path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id)
return self.client.post_json(
destination=destination,
@@ -646,7 +660,7 @@ class TransportLayerClient(object):
def invite_to_group(self, destination, group_id, user_id, requester_user_id, content):
"""Invite a user to a group
"""
path = _create_path(PREFIX, "/groups/%s/users/%s/invite", group_id, user_id)
path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id)
return self.client.post_json(
destination=destination,
@@ -662,7 +676,7 @@ class TransportLayerClient(object):
invited.
"""
path = _create_path(PREFIX, "/groups/local/%s/users/%s/invite", group_id, user_id)
path = _create_v1_path("/groups/local/%s/users/%s/invite", group_id, user_id)
return self.client.post_json(
destination=destination,
@@ -676,7 +690,7 @@ class TransportLayerClient(object):
user_id, content):
"""Remove a user fron a group
"""
path = _create_path(PREFIX, "/groups/%s/users/%s/remove", group_id, user_id)
path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id)
return self.client.post_json(
destination=destination,
@@ -693,7 +707,7 @@ class TransportLayerClient(object):
kicked from the group.
"""
path = _create_path(PREFIX, "/groups/local/%s/users/%s/remove", group_id, user_id)
path = _create_v1_path("/groups/local/%s/users/%s/remove", group_id, user_id)
return self.client.post_json(
destination=destination,
@@ -708,7 +722,7 @@ class TransportLayerClient(object):
the attestations
"""
path = _create_path(PREFIX, "/groups/%s/renew_attestation/%s", group_id, user_id)
path = _create_v1_path("/groups/%s/renew_attestation/%s", group_id, user_id)
return self.client.post_json(
destination=destination,
@@ -723,12 +737,12 @@ class TransportLayerClient(object):
"""Update a room entry in a group summary
"""
if category_id:
path = _create_path(
PREFIX, "/groups/%s/summary/categories/%s/rooms/%s",
path = _create_v1_path(
"/groups/%s/summary/categories/%s/rooms/%s",
group_id, category_id, room_id,
)
else:
path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,)
path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id,)
return self.client.post_json(
destination=destination,
@@ -744,12 +758,12 @@ class TransportLayerClient(object):
"""Delete a room entry in a group summary
"""
if category_id:
path = _create_path(
PREFIX + "/groups/%s/summary/categories/%s/rooms/%s",
path = _create_v1_path(
"/groups/%s/summary/categories/%s/rooms/%s",
group_id, category_id, room_id,
)
else:
path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,)
path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id,)
return self.client.delete_json(
destination=destination,
@@ -762,7 +776,7 @@ class TransportLayerClient(object):
def get_group_categories(self, destination, group_id, requester_user_id):
"""Get all categories in a group
"""
path = _create_path(PREFIX, "/groups/%s/categories", group_id,)
path = _create_v1_path("/groups/%s/categories", group_id,)
return self.client.get_json(
destination=destination,
@@ -775,7 +789,7 @@ class TransportLayerClient(object):
def get_group_category(self, destination, group_id, requester_user_id, category_id):
"""Get category info in a group
"""
path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
return self.client.get_json(
destination=destination,
@@ -789,7 +803,7 @@ class TransportLayerClient(object):
content):
"""Update a category in a group
"""
path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
return self.client.post_json(
destination=destination,
@@ -804,7 +818,7 @@ class TransportLayerClient(object):
category_id):
"""Delete a category in a group
"""
path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
return self.client.delete_json(
destination=destination,
@@ -817,7 +831,7 @@ class TransportLayerClient(object):
def get_group_roles(self, destination, group_id, requester_user_id):
"""Get all roles in a group
"""
path = _create_path(PREFIX, "/groups/%s/roles", group_id,)
path = _create_v1_path("/groups/%s/roles", group_id,)
return self.client.get_json(
destination=destination,
@@ -830,7 +844,7 @@ class TransportLayerClient(object):
def get_group_role(self, destination, group_id, requester_user_id, role_id):
"""Get a roles info
"""
path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
return self.client.get_json(
destination=destination,
@@ -844,7 +858,7 @@ class TransportLayerClient(object):
content):
"""Update a role in a group
"""
path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
return self.client.post_json(
destination=destination,
@@ -858,7 +872,7 @@ class TransportLayerClient(object):
def delete_group_role(self, destination, group_id, requester_user_id, role_id):
"""Delete a role in a group
"""
path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
return self.client.delete_json(
destination=destination,
@@ -873,12 +887,12 @@ class TransportLayerClient(object):
"""Update a users entry in a group
"""
if role_id:
path = _create_path(
PREFIX, "/groups/%s/summary/roles/%s/users/%s",
path = _create_v1_path(
"/groups/%s/summary/roles/%s/users/%s",
group_id, role_id, user_id,
)
else:
path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,)
path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id,)
return self.client.post_json(
destination=destination,
@@ -893,7 +907,7 @@ class TransportLayerClient(object):
content):
"""Sets the join policy for a group
"""
path = _create_path(PREFIX, "/groups/%s/settings/m.join_policy", group_id,)
path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id,)
return self.client.put_json(
destination=destination,
@@ -909,12 +923,12 @@ class TransportLayerClient(object):
"""Delete a users entry in a group
"""
if role_id:
path = _create_path(
PREFIX, "/groups/%s/summary/roles/%s/users/%s",
path = _create_v1_path(
"/groups/%s/summary/roles/%s/users/%s",
group_id, role_id, user_id,
)
else:
path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,)
path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id,)
return self.client.delete_json(
destination=destination,
@@ -927,7 +941,7 @@ class TransportLayerClient(object):
"""Get the groups a list of users are publicising
"""
path = PREFIX + "/get_groups_publicised"
path = _create_v1_path("/get_groups_publicised")
content = {"user_ids": user_ids}
@@ -939,20 +953,43 @@ class TransportLayerClient(object):
)
def _create_path(prefix, path, *args):
"""Creates a path from the prefix, path template and args. Ensures that
all args are url encoded.
def _create_v1_path(path, *args):
"""Creates a path against V1 federation API from the path template and
args. Ensures that all args are url encoded.
Example:
_create_path(PREFIX, "/event/%s/", event_id)
_create_v1_path("/event/%s/", event_id)
Args:
prefix (str)
path (str): String template for the path
args: ([str]): Args to insert into path. Each arg will be url encoded
Returns:
str
"""
return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
return (
FEDERATION_V1_PREFIX
+ path % tuple(urllib.parse.quote(arg, "") for arg in args)
)
def _create_v2_path(path, *args):
"""Creates a path against V2 federation API from the path template and
args. Ensures that all args are url encoded.
Example:
_create_v2_path("/event/%s/", event_id)
Args:
path (str): String template for the path
args: ([str]): Args to insert into path. Each arg will be url encoded
Returns:
str
"""
return (
FEDERATION_V2_PREFIX
+ path % tuple(urllib.parse.quote(arg, "") for arg in args)
)

View File

@@ -21,8 +21,9 @@ import re
from twisted.internet import defer
import synapse
from synapse.api.constants import RoomVersions
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
from synapse.http.endpoint import parse_and_validate_server_name
from synapse.http.server import JsonResource
from synapse.http.servlet import (
@@ -227,6 +228,8 @@ class BaseFederationServlet(object):
"""
REQUIRE_AUTH = True
PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version
def __init__(self, handler, authenticator, ratelimiter, server_name):
self.handler = handler
self.authenticator = authenticator
@@ -286,7 +289,7 @@ class BaseFederationServlet(object):
return new_func
def register(self, server):
pattern = re.compile("^" + PREFIX + self.PATH + "$")
pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
for method in ("GET", "PUT", "POST"):
code = getattr(self, "on_%s" % (method), None)
@@ -466,7 +469,7 @@ class FederationSendLeaveServlet(BaseFederationServlet):
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, room_id, event_id):
content = yield self.handler.on_send_leave_request(origin, content)
content = yield self.handler.on_send_leave_request(origin, content, room_id)
defer.returnValue((200, content))
@@ -484,18 +487,50 @@ class FederationSendJoinServlet(BaseFederationServlet):
def on_PUT(self, origin, content, query, context, event_id):
# TODO(paul): assert that context/event_id parsed from path actually
# match those given in content
content = yield self.handler.on_send_join_request(origin, content)
content = yield self.handler.on_send_join_request(origin, content, context)
defer.returnValue((200, content))
class FederationInviteServlet(BaseFederationServlet):
class FederationV1InviteServlet(BaseFederationServlet):
PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, context, event_id):
# We don't get a room version, so we have to assume its EITHER v1 or
# v2. This is "fine" as the only difference between V1 and V2 is the
# state resolution algorithm, and we don't use that for processing
# invites
content = yield self.handler.on_invite_request(
origin, content, room_version=RoomVersions.V1,
)
# V1 federation API is defined to return a content of `[200, {...}]`
# due to a historical bug.
defer.returnValue((200, (200, content)))
class FederationV2InviteServlet(BaseFederationServlet):
PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, context, event_id):
# TODO(paul): assert that context/event_id parsed from path actually
# match those given in content
content = yield self.handler.on_invite_request(origin, content)
room_version = content["room_version"]
event = content["event"]
invite_room_state = content["invite_room_state"]
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
content = yield self.handler.on_invite_request(
origin, event, room_version=room_version,
)
defer.returnValue((200, content))
@@ -1263,7 +1298,8 @@ FEDERATION_SERVLET_CLASSES = (
FederationEventServlet,
FederationSendJoinServlet,
FederationSendLeaveServlet,
FederationInviteServlet,
FederationV1InviteServlet,
FederationV2InviteServlet,
FederationQueryAuthServlet,
FederationGetMissingEventsServlet,
FederationEventAuthServlet,

150
synapse/handlers/acme.py Normal file
View File

@@ -0,0 +1,150 @@
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import attr
from zope.interface import implementer
import twisted
import twisted.internet.error
from twisted.internet import defer
from twisted.python.filepath import FilePath
from twisted.python.url import URL
from twisted.web import server, static
from twisted.web.resource import Resource
from synapse.app import check_bind_error
logger = logging.getLogger(__name__)
try:
from txacme.interfaces import ICertificateStore
@attr.s
@implementer(ICertificateStore)
class ErsatzStore(object):
"""
A store that only stores in memory.
"""
certs = attr.ib(default=attr.Factory(dict))
def store(self, server_name, pem_objects):
self.certs[server_name] = [o.as_bytes() for o in pem_objects]
return defer.succeed(None)
except ImportError:
# txacme is missing
pass
class AcmeHandler(object):
def __init__(self, hs):
self.hs = hs
self.reactor = hs.get_reactor()
@defer.inlineCallbacks
def start_listening(self):
# Configure logging for txacme, if you need to debug
# from eliot import add_destinations
# from eliot.twisted import TwistedDestination
#
# add_destinations(TwistedDestination())
from txacme.challenges import HTTP01Responder
from txacme.service import AcmeIssuingService
from txacme.endpoint import load_or_create_client_key
from txacme.client import Client
from josepy.jwa import RS256
self._store = ErsatzStore()
responder = HTTP01Responder()
self._issuer = AcmeIssuingService(
cert_store=self._store,
client_creator=(
lambda: Client.from_url(
reactor=self.reactor,
url=URL.from_text(self.hs.config.acme_url),
key=load_or_create_client_key(
FilePath(self.hs.config.config_dir_path)
),
alg=RS256,
)
),
clock=self.reactor,
responders=[responder],
)
well_known = Resource()
well_known.putChild(b'acme-challenge', responder.resource)
responder_resource = Resource()
responder_resource.putChild(b'.well-known', well_known)
responder_resource.putChild(b'check', static.Data(b'OK', b'text/plain'))
srv = server.Site(responder_resource)
bind_addresses = self.hs.config.acme_bind_addresses
for host in bind_addresses:
logger.info(
"Listening for ACME requests on %s:%i", host, self.hs.config.acme_port,
)
try:
self.reactor.listenTCP(
self.hs.config.acme_port,
srv,
interface=host,
)
except twisted.internet.error.CannotListenError as e:
check_bind_error(e, host, bind_addresses)
# Make sure we are registered to the ACME server. There's no public API
# for this, it is usually triggered by startService, but since we don't
# want it to control where we save the certificates, we have to reach in
# and trigger the registration machinery ourselves.
self._issuer._registered = False
yield self._issuer._ensure_registered()
@defer.inlineCallbacks
def provision_certificate(self):
logger.warning("Reprovisioning %s", self.hs.hostname)
try:
yield self._issuer.issue_cert(self.hs.hostname)
except Exception:
logger.exception("Fail!")
raise
logger.warning("Reprovisioned %s, saving.", self.hs.hostname)
cert_chain = self._store.certs[self.hs.hostname]
try:
with open(self.hs.config.tls_private_key_file, "wb") as private_key_file:
for x in cert_chain:
if x.startswith(b"-----BEGIN RSA PRIVATE KEY-----"):
private_key_file.write(x)
with open(self.hs.config.tls_certificate_file, "wb") as certificate_file:
for x in cert_chain:
if x.startswith(b"-----BEGIN CERTIFICATE-----"):
certificate_file.write(x)
except Exception:
logger.exception("Failed saving!")
raise
defer.returnValue(True)

View File

@@ -532,6 +532,25 @@ class DeviceListEduUpdater(object):
stream_id = result["stream_id"]
devices = result["devices"]
# If the remote server has more than ~1000 devices for this user
# we assume that something is going horribly wrong (e.g. a bot
# that logs in and creates a new device every time it tries to
# send a message). Maintaining lots of devices per user in the
# cache can cause serious performance issues as if this request
# takes more than 60s to complete, internal replication from the
# inbound federation worker to the synapse master may time out
# causing the inbound federation to fail and causing the remote
# server to retry, causing a DoS. So in this scenario we give
# up on storing the total list of devices and only handle the
# delta instead.
if len(devices) > 1000:
logger.warn(
"Ignoring device list snapshot for %s as it has >1K devs (%d)",
user_id, len(devices)
)
devices = []
yield self.store.update_remote_device_list_cache(
user_id, devices, stream_id,
)

View File

@@ -57,8 +57,8 @@ class DirectoryHandler(BaseHandler):
# general association creation for both human users and app services
for wchar in string.whitespace:
if wchar in room_alias.localpart:
raise SynapseError(400, "Invalid characters in room alias")
if wchar in room_alias.localpart:
raise SynapseError(400, "Invalid characters in room alias")
if not self.hs.is_mine(room_alias):
raise SynapseError(400, "Room alias must be local")

View File

@@ -34,6 +34,7 @@ from synapse.api.constants import (
EventTypes,
Membership,
RejectedReason,
RoomVersions,
)
from synapse.api.errors import (
AuthError,
@@ -43,10 +44,7 @@ from synapse.api.errors import (
StoreError,
SynapseError,
)
from synapse.crypto.event_signing import (
add_hashes_and_signatures,
compute_event_signature,
)
from synapse.crypto.event_signing import compute_event_signature
from synapse.events.validator import EventValidator
from synapse.replication.http.federation import (
ReplicationCleanRoomRestServlet,
@@ -58,7 +56,6 @@ from synapse.types import UserID, get_domain_from_id
from synapse.util import logcontext, unwrapFirstError
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_joined_room
from synapse.util.frozenutils import unfreeze
from synapse.util.logutils import log_function
from synapse.util.retryutils import NotRetryingDestination
from synapse.visibility import filter_events_for_server
@@ -105,7 +102,7 @@ class FederationHandler(BaseHandler):
self.hs = hs
self.store = hs.get_datastore() # type: synapse.storage.DataStore
self.store = hs.get_datastore()
self.federation_client = hs.get_federation_client()
self.state_handler = hs.get_state_handler()
self.server_name = hs.hostname
@@ -342,6 +339,8 @@ class FederationHandler(BaseHandler):
room_id, event_id, p,
)
room_version = yield self.store.get_room_version(room_id)
with logcontext.nested_logging_context(p):
# note that if any of the missing prevs share missing state or
# auth events, the requests to fetch those events are deduped
@@ -355,7 +354,7 @@ class FederationHandler(BaseHandler):
# we want the state *after* p; get_state_for_room returns the
# state *before* p.
remote_event = yield self.federation_client.get_pdu(
[origin], p, outlier=True,
[origin], p, room_version, outlier=True,
)
if remote_event is None:
@@ -379,7 +378,6 @@ class FederationHandler(BaseHandler):
for x in remote_state:
event_map[x.event_id] = x
room_version = yield self.store.get_room_version(room_id)
state_map = yield resolve_events_with_store(
room_version, state_maps, event_map,
state_res_store=StateResolutionStore(self.store),
@@ -655,6 +653,8 @@ class FederationHandler(BaseHandler):
if dest == self.server_name:
raise SynapseError(400, "Can't backfill from self.")
room_version = yield self.store.get_room_version(room_id)
events = yield self.federation_client.backfill(
dest,
room_id,
@@ -748,6 +748,7 @@ class FederationHandler(BaseHandler):
self.federation_client.get_pdu,
[dest],
event_id,
room_version=room_version,
outlier=True,
timeout=10000,
)
@@ -1060,7 +1061,7 @@ class FederationHandler(BaseHandler):
"""
logger.debug("Joining %s to %s", joinee, room_id)
origin, event = yield self._make_and_verify_event(
origin, event, event_format_version = yield self._make_and_verify_event(
target_hosts,
room_id,
joinee,
@@ -1083,7 +1084,6 @@ class FederationHandler(BaseHandler):
handled_events = set()
try:
event = self._sign_event(event)
# Try the host we successfully got a response to /make_join/
# request first.
try:
@@ -1091,7 +1091,9 @@ class FederationHandler(BaseHandler):
target_hosts.insert(0, origin)
except ValueError:
pass
ret = yield self.federation_client.send_join(target_hosts, event)
ret = yield self.federation_client.send_join(
target_hosts, event, event_format_version,
)
origin = ret["origin"]
state = ret["state"]
@@ -1164,13 +1166,18 @@ class FederationHandler(BaseHandler):
"""
event_content = {"membership": Membership.JOIN}
builder = self.event_builder_factory.new({
"type": EventTypes.Member,
"content": event_content,
"room_id": room_id,
"sender": user_id,
"state_key": user_id,
})
room_version = yield self.store.get_room_version(room_id)
builder = self.event_builder_factory.new(
room_version,
{
"type": EventTypes.Member,
"content": event_content,
"room_id": room_id,
"sender": user_id,
"state_key": user_id,
}
)
try:
event, context = yield self.event_creation_handler.create_new_client_event(
@@ -1182,7 +1189,9 @@ class FederationHandler(BaseHandler):
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
yield self.auth.check_from_context(event, context, do_sig_check=False)
yield self.auth.check_from_context(
room_version, event, context, do_sig_check=False,
)
defer.returnValue(event)
@@ -1287,11 +1296,11 @@ class FederationHandler(BaseHandler):
)
event.internal_metadata.outlier = True
event.internal_metadata.invite_from_remote = True
event.internal_metadata.out_of_band_membership = True
event.signatures.update(
compute_event_signature(
event,
event.get_pdu_json(),
self.hs.hostname,
self.hs.config.signing_key[0]
)
@@ -1304,7 +1313,7 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
def do_remotely_reject_invite(self, target_hosts, room_id, user_id):
origin, event = yield self._make_and_verify_event(
origin, event, event_format_version = yield self._make_and_verify_event(
target_hosts,
room_id,
user_id,
@@ -1313,7 +1322,7 @@ class FederationHandler(BaseHandler):
# Mark as outlier as we don't have any state for this event; we're not
# even in the room.
event.internal_metadata.outlier = True
event = self._sign_event(event)
event.internal_metadata.out_of_band_membership = True
# Try the host that we succesfully called /make_leave/ on first for
# the /send_leave/ request.
@@ -1336,7 +1345,7 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
content={}, params=None):
origin, pdu = yield self.federation_client.make_membership_event(
origin, event, format_ver = yield self.federation_client.make_membership_event(
target_hosts,
room_id,
user_id,
@@ -1345,9 +1354,7 @@ class FederationHandler(BaseHandler):
params=params,
)
logger.debug("Got response to make_%s: %s", membership, pdu)
event = pdu
logger.debug("Got response to make_%s: %s", membership, event)
# We should assert some things.
# FIXME: Do this in a nicer way
@@ -1355,28 +1362,7 @@ class FederationHandler(BaseHandler):
assert(event.user_id == user_id)
assert(event.state_key == user_id)
assert(event.room_id == room_id)
defer.returnValue((origin, event))
def _sign_event(self, event):
event.internal_metadata.outlier = False
builder = self.event_builder_factory.new(
unfreeze(event.get_pdu_json())
)
builder.event_id = self.event_builder_factory.create_event_id()
builder.origin = self.hs.hostname
if not hasattr(event, "signatures"):
builder.signatures = {}
add_hashes_and_signatures(
builder,
self.hs.hostname,
self.hs.config.signing_key[0],
)
return builder.build()
defer.returnValue((origin, event, format_ver))
@defer.inlineCallbacks
@log_function
@@ -1385,13 +1371,17 @@ class FederationHandler(BaseHandler):
leave event for the room and return that. We do *not* persist or
process it until the other server has signed it and sent it back.
"""
builder = self.event_builder_factory.new({
"type": EventTypes.Member,
"content": {"membership": Membership.LEAVE},
"room_id": room_id,
"sender": user_id,
"state_key": user_id,
})
room_version = yield self.store.get_room_version(room_id)
builder = self.event_builder_factory.new(
room_version,
{
"type": EventTypes.Member,
"content": {"membership": Membership.LEAVE},
"room_id": room_id,
"sender": user_id,
"state_key": user_id,
}
)
event, context = yield self.event_creation_handler.create_new_client_event(
builder=builder,
@@ -1400,7 +1390,9 @@ class FederationHandler(BaseHandler):
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
yield self.auth.check_from_context(event, context, do_sig_check=False)
yield self.auth.check_from_context(
room_version, event, context, do_sig_check=False,
)
except AuthError as e:
logger.warn("Failed to create new leave %r because %s", event, e)
raise e
@@ -1659,6 +1651,13 @@ class FederationHandler(BaseHandler):
create_event = e
break
if create_event is None:
# If the state doesn't have a create event then the room is
# invalid, and it would fail auth checks anyway.
raise SynapseError(400, "No create event in state")
room_version = create_event.content.get("room_version", RoomVersions.V1)
missing_auth_events = set()
for e in itertools.chain(auth_events, state, [event]):
for e_id in e.auth_event_ids():
@@ -1669,6 +1668,7 @@ class FederationHandler(BaseHandler):
m_ev = yield self.federation_client.get_pdu(
[origin],
e_id,
room_version=room_version,
outlier=True,
timeout=10000,
)
@@ -1687,7 +1687,7 @@ class FederationHandler(BaseHandler):
auth_for_e[(EventTypes.Create, "")] = create_event
try:
self.auth.check(e, auth_events=auth_for_e)
self.auth.check(room_version, e, auth_events=auth_for_e)
except SynapseError as err:
# we may get SynapseErrors here as well as AuthErrors. For
# instance, there are a couple of (ancient) events in some
@@ -1931,6 +1931,8 @@ class FederationHandler(BaseHandler):
current_state = set(e.event_id for e in auth_events.values())
different_auth = event_auth_events - current_state
room_version = yield self.store.get_room_version(event.room_id)
if different_auth and not event.internal_metadata.is_outlier():
# Do auth conflict res.
logger.info("Different auth: %s", different_auth)
@@ -1955,8 +1957,6 @@ class FederationHandler(BaseHandler):
(d.type, d.state_key): d for d in different_events if d
})
room_version = yield self.store.get_room_version(event.room_id)
new_state = yield self.state_handler.resolve_events(
room_version,
[list(local_view.values()), list(remote_view.values())],
@@ -2056,7 +2056,7 @@ class FederationHandler(BaseHandler):
)
try:
self.auth.check(event, auth_events=auth_events)
self.auth.check(room_version, event, auth_events=auth_events)
except AuthError as e:
logger.warn("Failed auth resolution for %r because %s", event, e)
raise e
@@ -2279,18 +2279,26 @@ class FederationHandler(BaseHandler):
}
if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)):
builder = self.event_builder_factory.new(event_dict)
EventValidator().validate_new(builder)
room_version = yield self.store.get_room_version(room_id)
builder = self.event_builder_factory.new(room_version, event_dict)
EventValidator().validate_builder(builder)
event, context = yield self.event_creation_handler.create_new_client_event(
builder=builder
)
event, context = yield self.add_display_name_to_third_party_invite(
event_dict, event, context
room_version, event_dict, event, context
)
EventValidator().validate_new(event)
# We need to tell the transaction queue to send this out, even
# though the sender isn't a local user.
event.internal_metadata.send_on_behalf_of = self.hs.hostname
try:
yield self.auth.check_from_context(event, context)
yield self.auth.check_from_context(room_version, event, context)
except AuthError as e:
logger.warn("Denying new third party invite %r because %s", event, e)
raise e
@@ -2317,23 +2325,31 @@ class FederationHandler(BaseHandler):
Returns:
Deferred: resolves (to None)
"""
builder = self.event_builder_factory.new(event_dict)
room_version = yield self.store.get_room_version(room_id)
# NB: event_dict has a particular specced format we might need to fudge
# if we change event formats too much.
builder = self.event_builder_factory.new(room_version, event_dict)
event, context = yield self.event_creation_handler.create_new_client_event(
builder=builder,
)
event, context = yield self.add_display_name_to_third_party_invite(
event_dict, event, context
room_version, event_dict, event, context
)
try:
self.auth.check_from_context(event, context)
self.auth.check_from_context(room_version, event, context)
except AuthError as e:
logger.warn("Denying third party invite %r because %s", event, e)
raise e
yield self._check_signature(event, context)
# We need to tell the transaction queue to send this out, even
# though the sender isn't a local user.
event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
# XXX we send the invite here, but send_membership_event also sends it,
# so we end up making two requests. I think this is redundant.
returned_invite = yield self.send_invite(origin, event)
@@ -2344,7 +2360,8 @@ class FederationHandler(BaseHandler):
yield member_handler.send_membership_event(None, event, context)
@defer.inlineCallbacks
def add_display_name_to_third_party_invite(self, event_dict, event, context):
def add_display_name_to_third_party_invite(self, room_version, event_dict,
event, context):
key = (
EventTypes.ThirdPartyInvite,
event.content["third_party_invite"]["signed"]["token"]
@@ -2368,11 +2385,12 @@ class FederationHandler(BaseHandler):
# auth checks. If we need the invite and don't have it then the
# auth check code will explode appropriately.
builder = self.event_builder_factory.new(event_dict)
EventValidator().validate_new(builder)
builder = self.event_builder_factory.new(room_version, event_dict)
EventValidator().validate_builder(builder)
event, context = yield self.event_creation_handler.create_new_client_event(
builder=builder,
)
EventValidator().validate_new(event)
defer.returnValue((event, context))
@defer.inlineCallbacks

View File

@@ -167,18 +167,21 @@ class IdentityHandler(BaseHandler):
"mxid": mxid,
"threepid": threepid,
}
headers = {}
# we abuse the federation http client to sign the request, but we have to send it
# using the normal http client since we don't want the SRV lookup and want normal
# 'browser-like' HTTPS.
self.federation_http_client.sign_request(
auth_headers = self.federation_http_client.build_auth_headers(
destination=None,
method='POST',
url_bytes='/_matrix/identity/api/v1/3pid/unbind'.encode('ascii'),
headers_dict=headers,
content=content,
destination_is=id_server,
)
headers = {
b"Authorization": auth_headers,
}
try:
yield self.http_client.post_json_get_json(
url,

View File

@@ -22,7 +22,7 @@ from canonicaljson import encode_canonical_json, json
from twisted.internet import defer
from twisted.internet.defer import succeed
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
from synapse.api.constants import EventTypes, Membership, RoomVersions
from synapse.api.errors import (
AuthError,
Codes,
@@ -31,7 +31,6 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.api.urls import ConsentURIBuilder
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
@@ -278,9 +277,17 @@ class EventCreationHandler(object):
"""
yield self.auth.check_auth_blocking(requester.user.to_string())
builder = self.event_builder_factory.new(event_dict)
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
room_version = event_dict["content"]["room_version"]
else:
try:
room_version = yield self.store.get_room_version(event_dict["room_id"])
except NotFoundError:
raise AuthError(403, "Unknown room")
self.validator.validate_new(builder)
builder = self.event_builder_factory.new(room_version, event_dict)
self.validator.validate_builder(builder)
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
@@ -318,6 +325,8 @@ class EventCreationHandler(object):
prev_events_and_hashes=prev_events_and_hashes,
)
self.validator.validate_new(event)
defer.returnValue((event, context))
def _is_exempt_from_privacy_policy(self, builder, requester):
@@ -535,40 +544,19 @@ class EventCreationHandler(object):
prev_events_and_hashes = \
yield self.store.get_prev_events_for_room(builder.room_id)
if prev_events_and_hashes:
depth = max([d for _, _, d in prev_events_and_hashes]) + 1
# we cap depth of generated events, to ensure that they are not
# rejected by other servers (and so that they can be persisted in
# the db)
depth = min(depth, MAX_DEPTH)
else:
depth = 1
prev_events = [
(event_id, prev_hashes)
for event_id, prev_hashes, _ in prev_events_and_hashes
]
builder.prev_events = prev_events
builder.depth = depth
context = yield self.state.compute_event_context(builder)
event = yield builder.build(
prev_event_ids=[p for p, _ in prev_events],
)
context = yield self.state.compute_event_context(event)
if requester:
context.app_service = requester.app_service
if builder.is_state():
builder.prev_state = yield self.store.add_event_hashes(
context.prev_state_events
)
yield self.auth.add_auth_events(builder, context)
signing_key = self.hs.config.signing_key[0]
add_hashes_and_signatures(
builder, self.server_name, signing_key
)
event = builder.build()
self.validator.validate_new(event)
logger.debug(
"Created event %s",
@@ -603,8 +591,13 @@ class EventCreationHandler(object):
extra_users (list(UserID)): Any extra users to notify about event
"""
if event.is_state() and (event.type, event.state_key) == (EventTypes.Create, ""):
room_version = event.content.get("room_version", RoomVersions.V1)
else:
room_version = yield self.store.get_room_version(event.room_id)
try:
yield self.auth.check_from_context(event, context)
yield self.auth.check_from_context(room_version, event, context)
except AuthError as err:
logger.warn("Denying new event %r because %s", event, err)
raise err
@@ -752,7 +745,8 @@ class EventCreationHandler(object):
auth_events = {
(e.type, e.state_key): e for e in auth_events.values()
}
if self.auth.check_redaction(event, auth_events=auth_events):
room_version = yield self.store.get_room_version(event.room_id)
if self.auth.check_redaction(room_version, event, auth_events=auth_events):
original_event = yield self.store.get_event(
event.redacts,
check_redacted=False,
@@ -766,6 +760,9 @@ class EventCreationHandler(object):
"You don't have permission to redact events"
)
# We've already checked.
event.internal_metadata.recheck_redaction = False
if event.type == EventTypes.Create:
prev_state_ids = yield context.get_prev_state_ids(self.store)
if prev_state_ids:

View File

@@ -123,9 +123,12 @@ class RoomCreationHandler(BaseHandler):
token_id=requester.access_token_id,
)
)
yield self.auth.check_from_context(tombstone_event, tombstone_context)
old_room_version = yield self.store.get_room_version(old_room_id)
yield self.auth.check_from_context(
old_room_version, tombstone_event, tombstone_context,
)
yield self.clone_exiting_room(
yield self.clone_existing_room(
requester,
old_room_id=old_room_id,
new_room_id=new_room_id,
@@ -230,7 +233,7 @@ class RoomCreationHandler(BaseHandler):
)
@defer.inlineCallbacks
def clone_exiting_room(
def clone_existing_room(
self, requester, old_room_id, new_room_id, new_room_version,
tombstone_event_id,
):
@@ -262,6 +265,7 @@ class RoomCreationHandler(BaseHandler):
initial_state = dict()
# Replicate relevant room events
types_to_copy = (
(EventTypes.JoinRules, ""),
(EventTypes.Name, ""),
@@ -269,6 +273,7 @@ class RoomCreationHandler(BaseHandler):
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.GuestAccess, ""),
(EventTypes.RoomAvatar, ""),
(EventTypes.Encryption, ""),
)
old_room_state_ids = yield self.store.get_filtered_current_state_ids(

View File

@@ -73,8 +73,14 @@ class RoomListHandler(BaseHandler):
# We explicitly don't bother caching searches or requests for
# appservice specific lists.
logger.info("Bypassing cache as search request.")
# XXX: Quick hack to stop room directory queries taking too long.
# Timeout request after 60s. Probably want a more fundamental
# solution at some point
timeout = self.clock.time() + 60
return self._get_public_room_list(
limit, since_token, search_filter, network_tuple=network_tuple,
limit, since_token, search_filter,
network_tuple=network_tuple, timeout=timeout,
)
key = (limit, since_token, network_tuple)
@@ -87,7 +93,8 @@ class RoomListHandler(BaseHandler):
@defer.inlineCallbacks
def _get_public_room_list(self, limit=None, since_token=None,
search_filter=None,
network_tuple=EMPTY_THIRD_PARTY_ID,):
network_tuple=EMPTY_THIRD_PARTY_ID,
timeout=None,):
if since_token and since_token != "END":
since_token = RoomListNextBatch.from_token(since_token)
else:
@@ -202,6 +209,9 @@ class RoomListHandler(BaseHandler):
chunk = []
for i in range(0, len(rooms_to_scan), step):
if timeout and self.clock.time() > timeout:
raise Exception("Timed out searching room directory")
batch = rooms_to_scan[i:i + step]
logger.info("Processing %i rooms for result", len(batch))
yield concurrently_execute(

View File

@@ -63,7 +63,7 @@ class RoomMemberHandler(object):
self.directory_handler = hs.get_handlers().directory_handler
self.registration_handler = hs.get_handlers().registration_handler
self.profile_handler = hs.get_profile_handler()
self.event_creation_hander = hs.get_event_creation_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.member_linearizer = Linearizer(name="member")
@@ -161,6 +161,8 @@ class RoomMemberHandler(object):
ratelimit=True,
content=None,
):
user_id = target.to_string()
if content is None:
content = {}
@@ -168,14 +170,14 @@ class RoomMemberHandler(object):
if requester.is_guest:
content["kind"] = "guest"
event, context = yield self.event_creation_hander.create_event(
event, context = yield self.event_creation_handler.create_event(
requester,
{
"type": EventTypes.Member,
"content": content,
"room_id": room_id,
"sender": requester.user.to_string(),
"state_key": target.to_string(),
"state_key": user_id,
# For backwards compatibility:
"membership": membership,
@@ -186,14 +188,14 @@ class RoomMemberHandler(object):
)
# Check if this event matches the previous membership event for the user.
duplicate = yield self.event_creation_hander.deduplicate_state_event(
duplicate = yield self.event_creation_handler.deduplicate_state_event(
event, context,
)
if duplicate is not None:
# Discard the new event since this membership change is a no-op.
defer.returnValue(duplicate)
yield self.event_creation_hander.handle_new_client_event(
yield self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
@@ -204,12 +206,12 @@ class RoomMemberHandler(object):
prev_state_ids = yield context.get_prev_state_ids(self.store)
prev_member_event_id = prev_state_ids.get(
(EventTypes.Member, target.to_string()),
(EventTypes.Member, user_id),
None
)
if event.membership == Membership.JOIN:
# Only fire user_joined_room if the user has acutally joined the
# Only fire user_joined_room if the user has actually joined the
# room. Don't bother if the user is just changing their profile
# info.
newly_joined = True
@@ -218,6 +220,18 @@ class RoomMemberHandler(object):
newly_joined = prev_member_event.membership != Membership.JOIN
if newly_joined:
yield self._user_joined_room(target, room_id)
# Copy over direct message status and room tags if this is a join
# on an upgraded room
# Check if this is an upgraded room
predecessor = yield self.store.get_room_predecessor(room_id)
if predecessor:
# It is an upgraded room. Copy over old tags
self.copy_room_tags_and_direct_to_room(
predecessor["room_id"], room_id, user_id,
)
elif event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = yield self.store.get_event(prev_member_event_id)
@@ -226,6 +240,55 @@ class RoomMemberHandler(object):
defer.returnValue(event)
@defer.inlineCallbacks
def copy_room_tags_and_direct_to_room(
self,
old_room_id,
new_room_id,
user_id,
):
"""Copies the tags and direct room state from one room to another.
Args:
old_room_id (str)
new_room_id (str)
user_id (str)
Returns:
Deferred[None]
"""
# Retrieve user account data for predecessor room
user_account_data, _ = yield self.store.get_account_data_for_user(
user_id,
)
# Copy direct message state if applicable
direct_rooms = user_account_data.get("m.direct", {})
# Check which key this room is under
if isinstance(direct_rooms, dict):
for key, room_id_list in direct_rooms.items():
if old_room_id in room_id_list and new_room_id not in room_id_list:
# Add new room_id to this key
direct_rooms[key].append(new_room_id)
# Save back to user's m.direct account data
yield self.store.add_account_data_for_user(
user_id, "m.direct", direct_rooms,
)
break
# Copy room tags if applicable
room_tags = yield self.store.get_tags_for_room(
user_id, old_room_id,
)
# Copy each room tag to the new room
for tag, tag_content in room_tags.items():
yield self.store.add_tag_to_room(
user_id, new_room_id, tag, tag_content
)
@defer.inlineCallbacks
def update_membership(
self,
@@ -493,7 +556,7 @@ class RoomMemberHandler(object):
else:
requester = synapse.types.create_requester(target_user)
prev_event = yield self.event_creation_hander.deduplicate_state_event(
prev_event = yield self.event_creation_handler.deduplicate_state_event(
event, context,
)
if prev_event is not None:
@@ -513,7 +576,7 @@ class RoomMemberHandler(object):
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
yield self.event_creation_hander.handle_new_client_event(
yield self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
@@ -527,7 +590,7 @@ class RoomMemberHandler(object):
)
if event.membership == Membership.JOIN:
# Only fire user_joined_room if the user has acutally joined the
# Only fire user_joined_room if the user has actually joined the
# room. Don't bother if the user is just changing their profile
# info.
newly_joined = True
@@ -755,7 +818,7 @@ class RoomMemberHandler(object):
)
)
yield self.event_creation_hander.create_and_send_nonmember_event(
yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.ThirdPartyInvite,
@@ -877,7 +940,8 @@ class RoomMemberHandler(object):
# first member event?
create_event_id = current_state_ids.get(("m.room.create", ""))
if len(current_state_ids) == 1 and create_event_id:
defer.returnValue(self.hs.is_mine_id(create_event_id))
# We can only get here if we're in the process of creating the room
defer.returnValue(True)
for etype, state_key in current_state_ids:
if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):

View File

@@ -37,6 +37,41 @@ class SearchHandler(BaseHandler):
def __init__(self, hs):
super(SearchHandler, self).__init__(hs)
@defer.inlineCallbacks
def get_old_rooms_from_upgraded_room(self, room_id):
"""Retrieves room IDs of old rooms in the history of an upgraded room.
We do so by checking the m.room.create event of the room for a
`predecessor` key. If it exists, we add the room ID to our return
list and then check that room for a m.room.create event and so on
until we can no longer find any more previous rooms.
The full list of all found rooms in then returned.
Args:
room_id (str): id of the room to search through.
Returns:
Deferred[iterable[unicode]]: predecessor room ids
"""
historical_room_ids = []
while True:
predecessor = yield self.store.get_room_predecessor(room_id)
# If no predecessor, assume we've hit a dead end
if not predecessor:
break
# Add predecessor's room ID
historical_room_ids.append(predecessor["room_id"])
# Scan through the old room for further predecessors
room_id = predecessor["room_id"]
defer.returnValue(historical_room_ids)
@defer.inlineCallbacks
def search(self, user, content, batch=None):
"""Performs a full text search for a user.
@@ -137,6 +172,18 @@ class SearchHandler(BaseHandler):
)
room_ids = set(r.room_id for r in rooms)
# If doing a subset of all rooms seearch, check if any of the rooms
# are from an upgraded room, and search their contents as well
if search_filter.rooms:
historical_room_ids = []
for room_id in search_filter.rooms:
# Add any previous rooms to the search if they exist
ids = yield self.get_old_rooms_from_upgraded_room(room_id)
historical_room_ids += ids
# Prevent any historical events from being filtered
search_filter = search_filter.with_room_ids(historical_room_ids)
room_ids = search_filter.filter_rooms(room_ids)
if batch_group == "room_id":

View File

@@ -1473,10 +1473,22 @@ class SyncHandler(object):
if since_token and since_token.is_after(leave_token):
continue
# If this is an out of band message, like a remote invite
# rejection, we include it in the recents batch. Otherwise, we
# let _load_filtered_recents handle fetching the correct
# batches.
#
# This is all screaming out for a refactor, as the logic here is
# subtle and the moving parts numerous.
if leave_event.internal_metadata.is_out_of_band_membership():
batch_events = [leave_event]
else:
batch_events = None
room_entries.append(RoomSyncResultBuilder(
room_id=room_id,
rtype="archived",
events=None,
events=batch_events,
newly_joined=room_id in newly_joined_rooms,
full_state=False,
since_token=since_token,

View File

@@ -19,6 +19,7 @@ from six import iteritems
from twisted.internet import defer
import synapse.metrics
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.roommember import ProfileInfo
@@ -163,6 +164,11 @@ class UserDirectoryHandler(object):
yield self._handle_deltas(deltas)
self.pos = deltas[-1]["stream_id"]
# Expose current event processing position to prometheus
synapse.metrics.event_processing_positions.labels(
"user_dir").set(self.pos)
yield self.store.update_user_directory_stream_pos(self.pos)
@defer.inlineCallbacks

View File

@@ -333,9 +333,10 @@ class SimpleHttpClient(object):
"POST", uri, headers=Headers(actual_headers), data=query_bytes
)
body = yield make_deferred_yieldable(readBody(response))
if 200 <= response.code < 300:
body = yield make_deferred_yieldable(treq.json_content(response))
defer.returnValue(body)
defer.returnValue(json.loads(body))
else:
raise HttpResponseException(response.code, response.phrase, body)

View File

@@ -12,30 +12,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import random
import re
import time
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.internet.error import ConnectError
from twisted.names import client, dns
from twisted.names.error import DNSNameError, DomainError
logger = logging.getLogger(__name__)
SERVER_CACHE = {}
# our record of an individual server which can be tried to reach a destination.
#
# "host" is the hostname acquired from the SRV record. Except when there's
# no SRV record, in which case it is the original hostname.
_Server = collections.namedtuple(
"_Server", "priority weight host port expires"
)
def parse_server_name(server_name):
"""Split a server name into host/port parts.
@@ -100,264 +81,3 @@ def parse_and_validate_server_name(server_name):
))
return host, port
def matrix_federation_endpoint(reactor, destination, tls_client_options_factory=None,
timeout=None):
"""Construct an endpoint for the given matrix destination.
Args:
reactor: Twisted reactor.
destination (unicode): The name of the server to connect to.
tls_client_options_factory
(synapse.crypto.context_factory.ClientTLSOptionsFactory):
Factory which generates TLS options for client connections.
timeout (int): connection timeout in seconds
"""
domain, port = parse_server_name(destination)
endpoint_kw_args = {}
if timeout is not None:
endpoint_kw_args.update(timeout=timeout)
if tls_client_options_factory is None:
transport_endpoint = HostnameEndpoint
default_port = 8008
else:
# the SNI string should be the same as the Host header, minus the port.
# as per https://github.com/matrix-org/synapse/issues/2525#issuecomment-336896777,
# the Host header and SNI should therefore be the server_name of the remote
# server.
tls_options = tls_client_options_factory.get_options(domain)
def transport_endpoint(reactor, host, port, timeout):
return wrapClientTLS(
tls_options,
HostnameEndpoint(reactor, host, port, timeout=timeout),
)
default_port = 8448
if port is None:
return _WrappingEndpointFac(SRVClientEndpoint(
reactor, "matrix", domain, protocol="tcp",
default_port=default_port, endpoint=transport_endpoint,
endpoint_kw_args=endpoint_kw_args
), reactor)
else:
return _WrappingEndpointFac(transport_endpoint(
reactor, domain, port, **endpoint_kw_args
), reactor)
class _WrappingEndpointFac(object):
def __init__(self, endpoint_fac, reactor):
self.endpoint_fac = endpoint_fac
self.reactor = reactor
@defer.inlineCallbacks
def connect(self, protocolFactory):
conn = yield self.endpoint_fac.connect(protocolFactory)
conn = _WrappedConnection(conn, self.reactor)
defer.returnValue(conn)
class _WrappedConnection(object):
"""Wraps a connection and calls abort on it if it hasn't seen any action
for 2.5-3 minutes.
"""
__slots__ = ["conn", "last_request"]
def __init__(self, conn, reactor):
object.__setattr__(self, "conn", conn)
object.__setattr__(self, "last_request", time.time())
self._reactor = reactor
def __getattr__(self, name):
return getattr(self.conn, name)
def __setattr__(self, name, value):
setattr(self.conn, name, value)
def _time_things_out_maybe(self):
# We use a slightly shorter timeout here just in case the callLater is
# triggered early. Paranoia ftw.
# TODO: Cancel the previous callLater rather than comparing time.time()?
if time.time() - self.last_request >= 2.5 * 60:
self.abort()
# Abort the underlying TLS connection. The abort() method calls
# loseConnection() on the TLS connection which tries to
# shutdown the connection cleanly. We call abortConnection()
# since that will promptly close the TLS connection.
#
# In Twisted >18.4; the TLS connection will be None if it has closed
# which will make abortConnection() throw. Check that the TLS connection
# is not None before trying to close it.
if self.transport.getHandle() is not None:
self.transport.abortConnection()
def request(self, request):
self.last_request = time.time()
# Time this connection out if we haven't send a request in the last
# N minutes
# TODO: Cancel the previous callLater?
self._reactor.callLater(3 * 60, self._time_things_out_maybe)
d = self.conn.request(request)
def update_request_time(res):
self.last_request = time.time()
# TODO: Cancel the previous callLater?
self._reactor.callLater(3 * 60, self._time_things_out_maybe)
return res
d.addCallback(update_request_time)
return d
class SRVClientEndpoint(object):
"""An endpoint which looks up SRV records for a service.
Cycles through the list of servers starting with each call to connect
picking the next server.
Implements twisted.internet.interfaces.IStreamClientEndpoint.
"""
def __init__(self, reactor, service, domain, protocol="tcp",
default_port=None, endpoint=HostnameEndpoint,
endpoint_kw_args={}):
self.reactor = reactor
self.service_name = "_%s._%s.%s" % (service, protocol, domain)
if default_port is not None:
self.default_server = _Server(
host=domain,
port=default_port,
priority=0,
weight=0,
expires=0,
)
else:
self.default_server = None
self.endpoint = endpoint
self.endpoint_kw_args = endpoint_kw_args
self.servers = None
self.used_servers = None
@defer.inlineCallbacks
def fetch_servers(self):
self.used_servers = []
self.servers = yield resolve_service(self.service_name)
def pick_server(self):
if not self.servers:
if self.used_servers:
self.servers = self.used_servers
self.used_servers = []
self.servers.sort()
elif self.default_server:
return self.default_server
else:
raise ConnectError(
"No server available for %s" % self.service_name
)
# look for all servers with the same priority
min_priority = self.servers[0].priority
weight_indexes = list(
(index, server.weight + 1)
for index, server in enumerate(self.servers)
if server.priority == min_priority
)
total_weight = sum(weight for index, weight in weight_indexes)
target_weight = random.randint(0, total_weight)
for index, weight in weight_indexes:
target_weight -= weight
if target_weight <= 0:
server = self.servers[index]
# XXX: this looks totally dubious:
#
# (a) we never reuse a server until we have been through
# all of the servers at the same priority, so if the
# weights are A: 100, B:1, we always do ABABAB instead of
# AAAA...AAAB (approximately).
#
# (b) After using all the servers at the lowest priority,
# we move onto the next priority. We should only use the
# second priority if servers at the top priority are
# unreachable.
#
del self.servers[index]
self.used_servers.append(server)
return server
@defer.inlineCallbacks
def connect(self, protocolFactory):
if self.servers is None:
yield self.fetch_servers()
server = self.pick_server()
logger.info("Connecting to %s:%s", server.host, server.port)
endpoint = self.endpoint(
self.reactor, server.host, server.port, **self.endpoint_kw_args
)
connection = yield endpoint.connect(protocolFactory)
defer.returnValue(connection)
@defer.inlineCallbacks
def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=time):
cache_entry = cache.get(service_name, None)
if cache_entry:
if all(s.expires > int(clock.time()) for s in cache_entry):
servers = list(cache_entry)
defer.returnValue(servers)
servers = []
try:
try:
answers, _, _ = yield dns_client.lookupService(service_name)
except DNSNameError:
defer.returnValue([])
if (len(answers) == 1
and answers[0].type == dns.SRV
and answers[0].payload
and answers[0].payload.target == dns.Name(b'.')):
raise ConnectError("Service %s unavailable" % service_name)
for answer in answers:
if answer.type != dns.SRV or not answer.payload:
continue
payload = answer.payload
servers.append(_Server(
host=str(payload.target),
port=int(payload.port),
priority=int(payload.priority),
weight=int(payload.weight),
expires=int(clock.time()) + answer.ttl,
))
servers.sort()
cache[service_name] = list(servers)
except DomainError as e:
# We failed to resolve the name (other than a NameError)
# Try something in the cache, else rereaise
cache_entry = cache.get(service_name, None)
if cache_entry:
logger.warn(
"Failed to resolve %r, falling back to cache. %r",
service_name, e
)
servers = list(cache_entry)
else:
raise e
defer.returnValue(servers)

View File

@@ -0,0 +1,14 @@
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,424 @@
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import random
import time
import attr
from netaddr import IPAddress
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.web.client import URI, Agent, HTTPConnectionPool, RedirectAgent, readBody
from twisted.web.http import stringToDatetime
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent
from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
from synapse.util.caches.ttlcache import TTLCache
from synapse.util.logcontext import make_deferred_yieldable
# period to cache .well-known results for by default
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
# jitter to add to the .well-known default cache ttl
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
# period to cache failure to fetch .well-known for
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
# cap for .well-known cache period
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
logger = logging.getLogger(__name__)
well_known_cache = TTLCache('well-known')
@implementer(IAgent)
class MatrixFederationAgent(object):
"""An Agent-like thing which provides a `request` method which will look up a matrix
server and send an HTTP request to it.
Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
Args:
reactor (IReactor): twisted reactor to use for underlying requests
tls_client_options_factory (ClientTLSOptionsFactory|None):
factory to use for fetching client tls options, or none to disable TLS.
_well_known_tls_policy (IPolicyForHTTPS|None):
TLS policy to use for fetching .well-known files. None to use a default
(browser-like) implementation.
srv_resolver (SrvResolver|None):
SRVResolver impl to use for looking up SRV records. None to use a default
implementation.
"""
def __init__(
self, reactor, tls_client_options_factory,
_well_known_tls_policy=None,
_srv_resolver=None,
_well_known_cache=well_known_cache,
):
self._reactor = reactor
self._tls_client_options_factory = tls_client_options_factory
if _srv_resolver is None:
_srv_resolver = SrvResolver()
self._srv_resolver = _srv_resolver
self._pool = HTTPConnectionPool(reactor)
self._pool.retryAutomatically = False
self._pool.maxPersistentPerHost = 5
self._pool.cachedConnectionTimeout = 2 * 60
agent_args = {}
if _well_known_tls_policy is not None:
# the param is called 'contextFactory', but actually passing a
# contextfactory is deprecated, and it expects an IPolicyForHTTPS.
agent_args['contextFactory'] = _well_known_tls_policy
_well_known_agent = RedirectAgent(
Agent(self._reactor, pool=self._pool, **agent_args),
)
self._well_known_agent = _well_known_agent
self._well_known_cache = _well_known_cache
@defer.inlineCallbacks
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Args:
method (bytes): HTTP method: GET/POST/etc
uri (bytes): Absolute URI to be retrieved
headers (twisted.web.http_headers.Headers|None):
HTTP headers to send with the request, or None to
send no extra headers.
bodyProducer (twisted.web.iweb.IBodyProducer|None):
An object which can generate bytes to make up the
body of this request (for example, the properly encoded contents of
a file for a file upload). Or None if the request is to have
no body.
Returns:
Deferred[twisted.web.iweb.IResponse]:
fires when the header of the response has been received (regardless of the
response status code). Fails if there is any problem which prevents that
response from being received (including problems that prevent the request
from being sent).
"""
parsed_uri = URI.fromBytes(uri, defaultPort=-1)
res = yield self._route_matrix_uri(parsed_uri)
# set up the TLS connection params
#
# XXX disabling TLS is really only supported here for the benefit of the
# unit tests. We should make the UTs cope with TLS rather than having to make
# the code support the unit tests.
if self._tls_client_options_factory is None:
tls_options = None
else:
tls_options = self._tls_client_options_factory.get_options(
res.tls_server_name.decode("ascii")
)
# make sure that the Host header is set correctly
if headers is None:
headers = Headers()
else:
headers = headers.copy()
if not headers.hasHeader(b'host'):
headers.addRawHeader(b'host', res.host_header)
class EndpointFactory(object):
@staticmethod
def endpointForURI(_uri):
logger.info(
"Connecting to %s:%i",
res.target_host.decode("ascii"),
res.target_port,
)
ep = HostnameEndpoint(self._reactor, res.target_host, res.target_port)
if tls_options is not None:
ep = wrapClientTLS(tls_options, ep)
return ep
agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool)
res = yield make_deferred_yieldable(
agent.request(method, uri, headers, bodyProducer)
)
defer.returnValue(res)
@defer.inlineCallbacks
def _route_matrix_uri(self, parsed_uri, lookup_well_known=True):
"""Helper for `request`: determine the routing for a Matrix URI
Args:
parsed_uri (twisted.web.client.URI): uri to route. Note that it should be
parsed with URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1
if there is no explicit port given.
lookup_well_known (bool): True if we should look up the .well-known file if
there is no SRV record.
Returns:
Deferred[_RoutingResult]
"""
# check for an IP literal
try:
ip_address = IPAddress(parsed_uri.host.decode("ascii"))
except Exception:
# not an IP address
ip_address = None
if ip_address:
port = parsed_uri.port
if port == -1:
port = 8448
defer.returnValue(_RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=parsed_uri.host,
target_port=port,
))
if parsed_uri.port != -1:
# there is an explicit port
defer.returnValue(_RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=parsed_uri.host,
target_port=parsed_uri.port,
))
# try a SRV lookup
service_name = b"_matrix._tcp.%s" % (parsed_uri.host,)
server_list = yield self._srv_resolver.resolve_service(service_name)
if not server_list and lookup_well_known:
# try a .well-known lookup
well_known_server = yield self._get_well_known(parsed_uri.host)
if well_known_server:
# if we found a .well-known, start again, but don't do another
# .well-known lookup.
# parse the server name in the .well-known response into host/port.
# (This code is lifted from twisted.web.client.URI.fromBytes).
if b':' in well_known_server:
well_known_host, well_known_port = well_known_server.rsplit(b':', 1)
try:
well_known_port = int(well_known_port)
except ValueError:
# the part after the colon could not be parsed as an int
# - we assume it is an IPv6 literal with no port (the closing
# ']' stops it being parsed as an int)
well_known_host, well_known_port = well_known_server, -1
else:
well_known_host, well_known_port = well_known_server, -1
new_uri = URI(
scheme=parsed_uri.scheme,
netloc=well_known_server,
host=well_known_host,
port=well_known_port,
path=parsed_uri.path,
params=parsed_uri.params,
query=parsed_uri.query,
fragment=parsed_uri.fragment,
)
res = yield self._route_matrix_uri(new_uri, lookup_well_known=False)
defer.returnValue(res)
if not server_list:
target_host = parsed_uri.host
port = 8448
logger.debug(
"No SRV record for %s, using %s:%i",
parsed_uri.host.decode("ascii"), target_host.decode("ascii"), port,
)
else:
target_host, port = pick_server_from_list(server_list)
logger.debug(
"Picked %s:%i from SRV records for %s",
target_host.decode("ascii"), port, parsed_uri.host.decode("ascii"),
)
defer.returnValue(_RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=target_host,
target_port=port,
))
@defer.inlineCallbacks
def _get_well_known(self, server_name):
"""Attempt to fetch and parse a .well-known file for the given server
Args:
server_name (bytes): name of the server, from the requested url
Returns:
Deferred[bytes|None]: either the new server name, from the .well-known, or
None if there was no .well-known file.
"""
try:
cached = self._well_known_cache[server_name]
defer.returnValue(cached)
except KeyError:
pass
# TODO: should we linearise so that we don't end up doing two .well-known requests
# for the same server in parallel?
uri = b"https://%s/.well-known/matrix/server" % (server_name, )
uri_str = uri.decode("ascii")
logger.info("Fetching %s", uri_str)
try:
response = yield make_deferred_yieldable(
self._well_known_agent.request(b"GET", uri),
)
body = yield make_deferred_yieldable(readBody(response))
if response.code != 200:
raise Exception("Non-200 response %s" % (response.code, ))
except Exception as e:
logger.info("Error fetching %s: %s", uri_str, e)
# add some randomness to the TTL to avoid a stampeding herd every hour
# after startup
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
self._well_known_cache.set(server_name, None, cache_period)
defer.returnValue(None)
try:
parsed_body = json.loads(body.decode('utf-8'))
logger.info("Response from .well-known: %s", parsed_body)
if not isinstance(parsed_body, dict):
raise Exception("not a dict")
if "m.server" not in parsed_body:
raise Exception("Missing key 'm.server'")
except Exception as e:
raise Exception("invalid .well-known response from %s: %s" % (uri_str, e,))
result = parsed_body["m.server"].encode("ascii")
cache_period = _cache_period_from_headers(
response.headers,
time_now=self._reactor.seconds,
)
if cache_period is None:
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
# after startup
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
else:
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
if cache_period > 0:
self._well_known_cache.set(server_name, result, cache_period)
defer.returnValue(result)
def _cache_period_from_headers(headers, time_now=time.time):
cache_controls = _parse_cache_control(headers)
if b'no-store' in cache_controls:
return 0
if b'max-age' in cache_controls:
try:
max_age = int(cache_controls[b'max-age'])
return max_age
except ValueError:
pass
expires = headers.getRawHeaders(b'expires')
if expires is not None:
try:
expires_date = stringToDatetime(expires[-1])
return expires_date - time_now()
except ValueError:
# RFC7234 says 'A cache recipient MUST interpret invalid date formats,
# especially the value "0", as representing a time in the past (i.e.,
# "already expired").
return 0
return None
def _parse_cache_control(headers):
cache_controls = {}
for hdr in headers.getRawHeaders(b'cache-control', []):
for directive in hdr.split(b','):
splits = [x.strip() for x in directive.split(b'=', 1)]
k = splits[0].lower()
v = splits[1] if len(splits) > 1 else None
cache_controls[k] = v
return cache_controls
@attr.s
class _RoutingResult(object):
"""The result returned by `_route_matrix_uri`.
Contains the parameters needed to direct a federation connection to a particular
server.
Where a SRV record points to several servers, this object contains a single server
chosen from the list.
"""
host_header = attr.ib()
"""
The value we should assign to the Host header (host:port from the matrix
URI, or .well-known).
:type: bytes
"""
tls_server_name = attr.ib()
"""
The server name we should set in the SNI (typically host, without port, from the
matrix URI or .well-known)
:type: bytes
"""
target_host = attr.ib()
"""
The hostname (or IP literal) we should route the TCP connection to (the target of the
SRV record, or the hostname from the URL/.well-known)
:type: bytes
"""
target_port = attr.ib()
"""
The port we should route the TCP connection to (the target of the SRV record, or
the port from the URL/.well-known, or 8448)
:type: int
"""

View File

@@ -0,0 +1,169 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import time
import attr
from twisted.internet import defer
from twisted.internet.error import ConnectError
from twisted.names import client, dns
from twisted.names.error import DNSNameError, DomainError
from synapse.util.logcontext import make_deferred_yieldable
logger = logging.getLogger(__name__)
SERVER_CACHE = {}
@attr.s
class Server(object):
"""
Our record of an individual server which can be tried to reach a destination.
Attributes:
host (bytes): target hostname
port (int):
priority (int):
weight (int):
expires (int): when the cache should expire this record - in *seconds* since
the epoch
"""
host = attr.ib()
port = attr.ib()
priority = attr.ib(default=0)
weight = attr.ib(default=0)
expires = attr.ib(default=0)
def pick_server_from_list(server_list):
"""Randomly choose a server from the server list
Args:
server_list (list[Server]): list of candidate servers
Returns:
Tuple[bytes, int]: (host, port) pair for the chosen server
"""
if not server_list:
raise RuntimeError("pick_server_from_list called with empty list")
# TODO: currently we only use the lowest-priority servers. We should maintain a
# cache of servers known to be "down" and filter them out
min_priority = min(s.priority for s in server_list)
eligible_servers = list(s for s in server_list if s.priority == min_priority)
total_weight = sum(s.weight for s in eligible_servers)
target_weight = random.randint(0, total_weight)
for s in eligible_servers:
target_weight -= s.weight
if target_weight <= 0:
return s.host, s.port
# this should be impossible.
raise RuntimeError(
"pick_server_from_list got to end of eligible server list.",
)
class SrvResolver(object):
"""Interface to the dns client to do SRV lookups, with result caching.
The default resolver in twisted.names doesn't do any caching (it has a CacheResolver,
but the cache never gets populated), so we add our own caching layer here.
Args:
dns_client (twisted.internet.interfaces.IResolver): twisted resolver impl
cache (dict): cache object
get_time (callable): clock implementation. Should return seconds since the epoch
"""
def __init__(self, dns_client=client, cache=SERVER_CACHE, get_time=time.time):
self._dns_client = dns_client
self._cache = cache
self._get_time = get_time
@defer.inlineCallbacks
def resolve_service(self, service_name):
"""Look up a SRV record
Args:
service_name (bytes): record to look up
Returns:
Deferred[list[Server]]:
a list of the SRV records, or an empty list if none found
"""
now = int(self._get_time())
if not isinstance(service_name, bytes):
raise TypeError("%r is not a byte string" % (service_name,))
cache_entry = self._cache.get(service_name, None)
if cache_entry:
if all(s.expires > now for s in cache_entry):
servers = list(cache_entry)
defer.returnValue(servers)
try:
answers, _, _ = yield make_deferred_yieldable(
self._dns_client.lookupService(service_name),
)
except DNSNameError:
# TODO: cache this. We can get the SOA out of the exception, and use
# the negative-TTL value.
defer.returnValue([])
except DomainError as e:
# We failed to resolve the name (other than a NameError)
# Try something in the cache, else rereaise
cache_entry = self._cache.get(service_name, None)
if cache_entry:
logger.warn(
"Failed to resolve %r, falling back to cache. %r",
service_name, e
)
defer.returnValue(list(cache_entry))
else:
raise e
if (len(answers) == 1
and answers[0].type == dns.SRV
and answers[0].payload
and answers[0].payload.target == dns.Name(b'.')):
raise ConnectError("Service %s unavailable" % service_name)
servers = []
for answer in answers:
if answer.type != dns.SRV or not answer.payload:
continue
payload = answer.payload
servers.append(Server(
host=payload.target.name,
port=payload.port,
priority=payload.priority,
weight=payload.weight,
expires=now + answer.ttl,
))
self._cache[service_name] = list(servers)
defer.returnValue(servers)

View File

@@ -32,7 +32,7 @@ from twisted.internet import defer, protocol
from twisted.internet.error import DNSLookupError
from twisted.internet.task import _EPSILON, Cooperator
from twisted.web._newclient import ResponseDone
from twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool
from twisted.web.client import FileBodyProducer
from twisted.web.http_headers import Headers
import synapse.metrics
@@ -44,7 +44,7 @@ from synapse.api.errors import (
RequestSendFailed,
SynapseError,
)
from synapse.http.endpoint import matrix_federation_endpoint
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.util.async_helpers import timeout_deferred
from synapse.util.logcontext import make_deferred_yieldable
from synapse.util.metrics import Measure
@@ -66,20 +66,6 @@ else:
MAXINT = sys.maxint
class MatrixFederationEndpointFactory(object):
def __init__(self, hs):
self.reactor = hs.get_reactor()
self.tls_client_options_factory = hs.tls_client_options_factory
def endpointForURI(self, uri):
destination = uri.netloc.decode('ascii')
return matrix_federation_endpoint(
self.reactor, destination, timeout=10,
tls_client_options_factory=self.tls_client_options_factory
)
_next_id = 1
@@ -187,12 +173,10 @@ class MatrixFederationHttpClient(object):
self.signing_key = hs.config.signing_key[0]
self.server_name = hs.hostname
reactor = hs.get_reactor()
pool = HTTPConnectionPool(reactor)
pool.retryAutomatically = False
pool.maxPersistentPerHost = 5
pool.cachedConnectionTimeout = 2 * 60
self.agent = Agent.usingEndpointFactory(
reactor, MatrixFederationEndpointFactory(hs), pool=pool
self.agent = MatrixFederationAgent(
hs.get_reactor(),
hs.tls_client_options_factory,
)
self.clock = hs.get_clock()
self._store = hs.get_datastore()
@@ -229,19 +213,18 @@ class MatrixFederationHttpClient(object):
backoff_on_404 (bool): Back off if we get a 404
Returns:
Deferred: resolves with the http response object on success.
Deferred[twisted.web.client.Response]: resolves with the HTTP
response object on success.
Fails with ``HttpResponseException``: if we get an HTTP response
code >= 300 (except 429).
Fails with ``NotRetryingDestination`` if we are not yet ready
to retry this server.
Fails with ``FederationDeniedError`` if this destination
is not on our federation whitelist
Fails with ``RequestSendFailed`` if there were problems connecting to
the remote, due to e.g. DNS failures, connection timeouts etc.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
if timeout:
_sec_timeout = timeout / 1000
@@ -272,7 +255,6 @@ class MatrixFederationHttpClient(object):
headers_dict = {
b"User-Agent": [self.version_string_bytes],
b"Host": [destination_bytes],
}
with limiter:
@@ -299,9 +281,9 @@ class MatrixFederationHttpClient(object):
json = request.get_json()
if json:
headers_dict[b"Content-Type"] = [b"application/json"]
self.sign_request(
auth_headers = self.build_auth_headers(
destination_bytes, method_bytes, url_to_sign_bytes,
headers_dict, json,
json,
)
data = encode_canonical_json(json)
producer = FileBodyProducer(
@@ -310,40 +292,40 @@ class MatrixFederationHttpClient(object):
)
else:
producer = None
self.sign_request(
auth_headers = self.build_auth_headers(
destination_bytes, method_bytes, url_to_sign_bytes,
headers_dict,
)
headers_dict[b"Authorization"] = auth_headers
logger.info(
"{%s} [%s] Sending request: %s %s",
"{%s} [%s] Sending request: %s %s; timeout %fs",
request.txn_id, request.destination, request.method,
url_str,
)
# we don't want all the fancy cookie and redirect handling that
# treq.request gives: just use the raw Agent.
request_deferred = self.agent.request(
method_bytes,
url_bytes,
headers=Headers(headers_dict),
bodyProducer=producer,
)
request_deferred = timeout_deferred(
request_deferred,
timeout=_sec_timeout,
reactor=self.hs.get_reactor(),
url_str, _sec_timeout,
)
try:
with Measure(self.clock, "outbound_request"):
response = yield make_deferred_yieldable(
request_deferred,
# we don't want all the fancy cookie and redirect handling
# that treq.request gives: just use the raw Agent.
request_deferred = self.agent.request(
method_bytes,
url_bytes,
headers=Headers(headers_dict),
bodyProducer=producer,
)
request_deferred = timeout_deferred(
request_deferred,
timeout=_sec_timeout,
reactor=self.hs.get_reactor(),
)
response = yield request_deferred
except DNSLookupError as e:
raise_from(RequestSendFailed(e, can_retry=retry_on_dns_fail), e)
except Exception as e:
logger.info("Failed to send request: %s", e)
raise_from(RequestSendFailed(e, can_retry=True), e)
logger.info(
@@ -441,24 +423,23 @@ class MatrixFederationHttpClient(object):
defer.returnValue(response)
def sign_request(self, destination, method, url_bytes, headers_dict,
content=None, destination_is=None):
def build_auth_headers(
self, destination, method, url_bytes, content=None, destination_is=None,
):
"""
Signs a request by adding an Authorization header to headers_dict
Builds the Authorization headers for a federation request
Args:
destination (bytes|None): The desination home server of the request.
May be None if the destination is an identity server, in which case
destination_is must be non-None.
method (bytes): The HTTP method of the request
url_bytes (bytes): The URI path of the request
headers_dict (dict[bytes, list[bytes]]): Dictionary of request headers to
append to
content (object): The body of the request
destination_is (bytes): As 'destination', but if the destination is an
identity server
Returns:
None
list[bytes]: a list of headers to be added as "Authorization:" headers
"""
request = {
"method": method,
@@ -485,8 +466,7 @@ class MatrixFederationHttpClient(object):
self.server_name, key, sig,
)).encode('ascii')
)
headers_dict[b"Authorization"] = auth_headers
return auth_headers
@defer.inlineCallbacks
def put_json(self, destination, path, args={}, data={},
@@ -516,17 +496,18 @@ class MatrixFederationHttpClient(object):
requests)
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Fails with ``HttpResponseException`` if we get an HTTP response
code >= 300.
Fails with ``NotRetryingDestination`` if we are not yet ready
to retry this server.
Fails with ``FederationDeniedError`` if this destination
is not on our federation whitelist
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
@@ -570,17 +551,18 @@ class MatrixFederationHttpClient(object):
try the request anyway.
args (dict): query params
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Fails with ``HttpResponseException`` if we get an HTTP response
code >= 300.
Fails with ``NotRetryingDestination`` if we are not yet ready
to retry this server.
Fails with ``FederationDeniedError`` if this destination
is not on our federation whitelist
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
@@ -625,17 +607,18 @@ class MatrixFederationHttpClient(object):
ignore_backoff (bool): true to ignore the historical backoff data
and try the request anyway.
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Fails with ``HttpResponseException`` if we get an HTTP response
code >= 300.
Fails with ``NotRetryingDestination`` if we are not yet ready
to retry this server.
Fails with ``FederationDeniedError`` if this destination
is not on our federation whitelist
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
logger.debug("get_json args: %s", args)
@@ -676,17 +659,18 @@ class MatrixFederationHttpClient(object):
ignore_backoff (bool): true to ignore the historical backoff data and
try the request anyway.
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Fails with ``HttpResponseException`` if we get an HTTP response
code >= 300.
Fails with ``NotRetryingDestination`` if we are not yet ready
to retry this server.
Fails with ``FederationDeniedError`` if this destination
is not on our federation whitelist
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="DELETE",
@@ -719,18 +703,20 @@ class MatrixFederationHttpClient(object):
args (dict): Optional dictionary used to create the query string.
ignore_backoff (bool): true to ignore the historical backoff data
and try the request anyway.
Returns:
Deferred: resolves with an (int,dict) tuple of the file length and
a dict of the response headers.
Deferred[tuple[int, dict]]: Resolves with an (int,dict) tuple of
the file length and a dict of the response headers.
Fails with ``HttpResponseException`` if we get an HTTP response code
>= 300
Fails with ``NotRetryingDestination`` if we are not yet ready
to retry this server.
Fails with ``FederationDeniedError`` if this destination
is not on our federation whitelist
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="GET",

View File

@@ -84,7 +84,7 @@ def _rule_to_template(rule):
templaterule["pattern"] = thecond["pattern"]
if unscoped_rule_id:
templaterule['rule_id'] = unscoped_rule_id
templaterule['rule_id'] = unscoped_rule_id
if 'default' in rule:
templaterule['default'] = rule['default']
return templaterule

View File

@@ -40,7 +40,11 @@ REQUIREMENTS = [
"signedjson>=1.0.0",
"pynacl>=1.2.1",
"service_identity>=16.0.0",
"Twisted>=17.1.0",
# our logcontext handling relies on the ability to cancel inlineCallbacks
# (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
"Twisted>=18.7.0",
"treq>=15.1",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
"pyopenssl>=16.0.0",
@@ -52,15 +56,18 @@ REQUIREMENTS = [
"pillow>=3.1.2",
"sortedcontainers>=1.4.4",
"psutil>=2.0.0",
"pymacaroons-pynacl>=0.9.3",
"msgpack-python>=0.4.2",
"pymacaroons>=0.13.0",
"msgpack>=0.5.0",
"phonenumbers>=8.2.0",
"six>=1.10",
# prometheus_client 0.4.0 changed the format of counter metrics
# (cf https://github.com/matrix-org/synapse/issues/4001)
"prometheus_client>=0.0.18,<0.4.0",
# we use attr.s(slots), which arrived in 16.0.0
"attrs>=16.0.0",
# Twisted 18.7.0 requires attrs>=17.4.0
"attrs>=17.4.0",
"netaddr>=0.7.18",
]
@@ -72,6 +79,10 @@ CONDITIONAL_REQUIREMENTS = {
# ConsentResource uses select_autoescape, which arrived in jinja 2.9
"resources.consent": ["Jinja2>=2.9"],
# ACME support is required to provision TLS certificates from authorities
# that use the protocol, such as Let's Encrypt.
"acme": ["txacme>=0.9.2"],
"saml2": ["pysaml2>=4.5.0"],
"url_preview": ["lxml>=3.5.0"],
"test": ["mock>=2.0"],

View File

@@ -127,7 +127,10 @@ class ReplicationEndpoint(object):
def send_request(**kwargs):
data = yield cls._serialize_payload(**kwargs)
url_args = [urllib.parse.quote(kwargs[name]) for name in cls.PATH_ARGS]
url_args = [
urllib.parse.quote(kwargs[name], safe='')
for name in cls.PATH_ARGS
]
if cls.CACHE:
txn_id = random_string(10)

View File

@@ -17,7 +17,7 @@ import logging
from twisted.internet import defer
from synapse.events import FrozenEvent
from synapse.events import event_type_from_format_version
from synapse.events.snapshot import EventContext
from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
@@ -70,6 +70,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
event_payloads.append({
"event": event.get_pdu_json(),
"event_format_version": event.format_version,
"internal_metadata": event.internal_metadata.get_dict(),
"rejected_reason": event.rejected_reason,
"context": serialized_context,
@@ -94,9 +95,12 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
event_and_contexts = []
for event_payload in event_payloads:
event_dict = event_payload["event"]
format_ver = event_payload["event_format_version"]
internal_metadata = event_payload["internal_metadata"]
rejected_reason = event_payload["rejected_reason"]
event = FrozenEvent(event_dict, internal_metadata, rejected_reason)
EventType = event_type_from_format_version(format_ver)
event = EventType(event_dict, internal_metadata, rejected_reason)
context = yield EventContext.deserialize(
self.store, event_payload["context"],

View File

@@ -17,7 +17,7 @@ import logging
from twisted.internet import defer
from synapse.events import FrozenEvent
from synapse.events import event_type_from_format_version
from synapse.events.snapshot import EventContext
from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
@@ -74,6 +74,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
payload = {
"event": event.get_pdu_json(),
"event_format_version": event.format_version,
"internal_metadata": event.internal_metadata.get_dict(),
"rejected_reason": event.rejected_reason,
"context": serialized_context,
@@ -90,9 +91,12 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
content = parse_json_object_from_request(request)
event_dict = content["event"]
format_ver = content["event_format_version"]
internal_metadata = content["internal_metadata"]
rejected_reason = content["rejected_reason"]
event = FrozenEvent(event_dict, internal_metadata, rejected_reason)
EventType = event_type_from_format_version(format_ver)
event = EventType(event_dict, internal_metadata, rejected_reason)
requester = Requester.deserialize(self.store, content["requester"])
context = yield EventContext.deserialize(self.store, content["context"])

View File

@@ -34,6 +34,7 @@ from synapse.rest.client.v2_alpha import (
account,
account_data,
auth,
capabilities,
devices,
filter,
groups,
@@ -107,3 +108,4 @@ class ClientRestResource(JsonResource):
user_directory.register_servlets(hs, client_resource)
groups.register_servlets(hs, client_resource)
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
capabilities.register_servlets(hs, client_resource)

View File

@@ -89,7 +89,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
def __init__(self, hs):
super(RoomStateEventRestServlet, self).__init__(hs)
self.handlers = hs.get_handlers()
self.event_creation_hander = hs.get_event_creation_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.message_handler = hs.get_message_handler()
@@ -172,7 +172,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
content=content,
)
else:
event = yield self.event_creation_hander.create_and_send_nonmember_event(
event = yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
event_dict,
txn_id=txn_id,
@@ -189,7 +189,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet):
def __init__(self, hs):
super(RoomSendEventRestServlet, self).__init__(hs)
self.event_creation_hander = hs.get_event_creation_handler()
self.event_creation_handler = hs.get_event_creation_handler()
def register(self, http_server):
# /rooms/$roomid/send/$event_type[/$txn_id]
@@ -211,7 +211,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet):
if b'ts' in request.args and requester.app_service:
event_dict['origin_server_ts'] = parse_integer(request, "ts", 0)
event = yield self.event_creation_hander.create_and_send_nonmember_event(
event = yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
event_dict,
txn_id=txn_id,

View File

@@ -0,0 +1,66 @@
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from twisted.internet import defer
from synapse.api.constants import DEFAULT_ROOM_VERSION, RoomDisposition, RoomVersions
from synapse.http.servlet import RestServlet
from ._base import client_v2_patterns
logger = logging.getLogger(__name__)
class CapabilitiesRestServlet(RestServlet):
"""End point to expose the capabilities of the server."""
PATTERNS = client_v2_patterns("/capabilities$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super(CapabilitiesRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastore()
@defer.inlineCallbacks
def on_GET(self, request):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
user = yield self.store.get_user_by_id(requester.user.to_string())
change_password = bool(user["password_hash"])
response = {
"capabilities": {
"m.room_versions": {
"default": DEFAULT_ROOM_VERSION,
"available": {
RoomVersions.V1: RoomDisposition.STABLE,
RoomVersions.V2: RoomDisposition.STABLE,
RoomVersions.STATE_V2_TEST: RoomDisposition.UNSTABLE,
RoomVersions.V3: RoomDisposition.STABLE,
},
},
"m.change_password": {"enabled": change_password},
}
}
defer.returnValue((200, response))
def register_servlets(hs, http_server):
CapabilitiesRestServlet(hs).register(http_server)

View File

@@ -309,22 +309,16 @@ class RegisterRestServlet(RestServlet):
assigned_user_id=registered_user_id,
)
# Only give msisdn flows if the x_show_msisdn flag is given:
# this is a hack to work around the fact that clients were shipped
# that use fallback registration if they see any flows that they don't
# recognise, which means we break registration for these clients if we
# advertise msisdn flows. Once usage of Riot iOS <=0.3.9 and Riot
# Android <=0.6.9 have fallen below an acceptable threshold, this
# parameter should go away and we should always advertise msisdn flows.
show_msisdn = False
if 'x_show_msisdn' in body and body['x_show_msisdn']:
show_msisdn = True
# FIXME: need a better error than "no auth flow found" for scenarios
# where we required 3PID for registration but the user didn't give one
require_email = 'email' in self.hs.config.registrations_require_3pid
require_msisdn = 'msisdn' in self.hs.config.registrations_require_3pid
show_msisdn = True
if self.hs.config.disable_msisdn_registration:
show_msisdn = False
require_msisdn = False
flows = []
if self.hs.config.enable_registration_captcha:
# only support 3PIDless registration if no 3PIDs are required
@@ -422,8 +416,11 @@ class RegisterRestServlet(RestServlet):
)
# Necessary due to auth checks prior to the threepid being
# written to the db
if is_threepid_reserved(self.hs.config, threepid):
yield self.store.upsert_monthly_active_user(registered_user_id)
if threepid:
if is_threepid_reserved(
self.hs.config.mau_limits_reserved_threepids, threepid
):
yield self.store.upsert_monthly_active_user(registered_user_id)
# remember that we've now registered that user account, and with
# what user ID (since the user may not have specified)

View File

@@ -101,16 +101,7 @@ class ConsentResource(Resource):
"missing in config file.",
)
# daemonize changes the cwd to /, so make the path absolute now.
consent_template_directory = path.abspath(
hs.config.user_consent_template_dir,
)
if not path.isdir(consent_template_directory):
raise ConfigError(
"Could not find template directory '%s'" % (
consent_template_directory,
),
)
consent_template_directory = hs.config.user_consent_template_dir
loader = jinja2.FileSystemLoader(consent_template_directory)
self._jinja_env = jinja2.Environment(

View File

@@ -46,6 +46,7 @@ from synapse.federation.transport.client import TransportLayerClient
from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer
from synapse.groups.groups_server import GroupsServerHandler
from synapse.handlers import Handlers
from synapse.handlers.acme import AcmeHandler
from synapse.handlers.appservice import ApplicationServicesHandler
from synapse.handlers.auth import AuthHandler, MacaroonGenerator
from synapse.handlers.deactivate_account import DeactivateAccountHandler
@@ -129,6 +130,7 @@ class HomeServer(object):
'sync_handler',
'typing_handler',
'room_list_handler',
'acme_handler',
'auth_handler',
'device_handler',
'e2e_keys_handler',
@@ -310,6 +312,9 @@ class HomeServer(object):
def build_e2e_room_keys_handler(self):
return E2eRoomKeysHandler(self)
def build_acme_handler(self):
return AcmeHandler(self)
def build_application_service_api(self):
return ApplicationServiceApi(self)
@@ -350,10 +355,7 @@ class HomeServer(object):
return Keyring(self)
def build_event_builder_factory(self):
return EventBuilderFactory(
clock=self.get_clock(),
hostname=self.hostname,
)
return EventBuilderFactory(self)
def build_filtering(self):
return Filtering(self)

View File

@@ -608,10 +608,10 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
state_sets, event_map, state_res_store.get_events,
)
elif room_version in (
RoomVersions.VDH_TEST, RoomVersions.STATE_V2_TEST, RoomVersions.V2,
RoomVersions.STATE_V2_TEST, RoomVersions.V2, RoomVersions.V3,
):
return v2.resolve_events_with_store(
state_sets, event_map, state_res_store,
room_version, state_sets, event_map, state_res_store,
)
else:
# This should only happen if we added a version but forgot to add it to

View File

@@ -21,7 +21,7 @@ from six import iteritems, iterkeys, itervalues
from twisted.internet import defer
from synapse import event_auth
from synapse.api.constants import EventTypes
from synapse.api.constants import EventTypes, RoomVersions
from synapse.api.errors import AuthError
logger = logging.getLogger(__name__)
@@ -274,7 +274,11 @@ def _resolve_auth_events(events, auth_events):
auth_events[(prev_event.type, prev_event.state_key)] = prev_event
try:
# The signatures have already been checked at this point
event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
event_auth.check(
RoomVersions.V1, event, auth_events,
do_sig_check=False,
do_size_check=False,
)
prev_event = event
except AuthError:
return prev_event
@@ -286,7 +290,11 @@ def _resolve_normal_events(events, auth_events):
for event in _ordered_events(events):
try:
# The signatures have already been checked at this point
event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
event_auth.check(
RoomVersions.V1, event, auth_events,
do_sig_check=False,
do_size_check=False,
)
return event
except AuthError:
pass

View File

@@ -29,10 +29,12 @@ logger = logging.getLogger(__name__)
@defer.inlineCallbacks
def resolve_events_with_store(state_sets, event_map, state_res_store):
def resolve_events_with_store(room_version, state_sets, event_map, state_res_store):
"""Resolves the state using the v2 state resolution algorithm
Args:
room_version (str): The room version
state_sets(list): List of dicts of (type, state_key) -> event_id,
which are the different state groups to resolve.
@@ -104,7 +106,7 @@ def resolve_events_with_store(state_sets, event_map, state_res_store):
# Now sequentially auth each one
resolved_state = yield _iterative_auth_checks(
sorted_power_events, unconflicted_state, event_map,
room_version, sorted_power_events, unconflicted_state, event_map,
state_res_store,
)
@@ -129,7 +131,7 @@ def resolve_events_with_store(state_sets, event_map, state_res_store):
logger.debug("resolving remaining events")
resolved_state = yield _iterative_auth_checks(
leftover_events, resolved_state, event_map,
room_version, leftover_events, resolved_state, event_map,
state_res_store,
)
@@ -350,11 +352,13 @@ def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_
@defer.inlineCallbacks
def _iterative_auth_checks(event_ids, base_state, event_map, state_res_store):
def _iterative_auth_checks(room_version, event_ids, base_state, event_map,
state_res_store):
"""Sequentially apply auth checks to each event in given list, updating the
state as it goes along.
Args:
room_version (str)
event_ids (list[str]): Ordered list of events to apply auth checks to
base_state (dict[tuple[str, str], str]): The set of state to start with
event_map (dict[str,FrozenEvent])
@@ -385,7 +389,7 @@ def _iterative_auth_checks(event_ids, base_state, event_map, state_res_store):
try:
event_auth.check(
event, auth_events,
room_version, event, auth_events,
do_sig_check=False,
do_size_check=False
)

View File

@@ -317,7 +317,7 @@ class DataStore(RoomMemberStore, RoomStore,
thirty_days_ago_in_secs))
for row in txn:
if row[0] is 'unknown':
if row[0] == 'unknown':
pass
results[row[0]] = row[1]

View File

@@ -26,7 +26,8 @@ from prometheus_client import Histogram
from twisted.internet import defer
from synapse.api.errors import StoreError
from synapse.storage.engines import PostgresEngine
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.util.caches.descriptors import Cache
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
from synapse.util.stringutils import exception_to_unicode
@@ -192,6 +193,57 @@ class SQLBaseStore(object):
self.database_engine = hs.database_engine
# A set of tables that are not safe to use native upserts in.
self._unsafe_to_upsert_tables = {"user_ips"}
# We add the user_directory_search table to the blacklist on SQLite
# because the existing search table does not have an index, making it
# unsafe to use native upserts.
if isinstance(self.database_engine, Sqlite3Engine):
self._unsafe_to_upsert_tables.add("user_directory_search")
if self.database_engine.can_native_upsert:
# Check ASAP (and then later, every 1s) to see if we have finished
# background updates of tables that aren't safe to update.
self._clock.call_later(
0.0,
run_as_background_process,
"upsert_safety_check",
self._check_safe_to_upsert
)
@defer.inlineCallbacks
def _check_safe_to_upsert(self):
"""
Is it safe to use native UPSERT?
If there are background updates, we will need to wait, as they may be
the addition of indexes that set the UNIQUE constraint that we require.
If the background updates have not completed, wait 15 sec and check again.
"""
updates = yield self._simple_select_list(
"background_updates",
keyvalues=None,
retcols=["update_name"],
desc="check_background_updates",
)
updates = [x["update_name"] for x in updates]
# The User IPs table in schema #53 was missing a unique index, which we
# run as a background update.
if "user_ips_device_unique_index" not in updates:
self._unsafe_to_upsert_tables.discard("user_ips")
# If there's any tables left to check, reschedule to run.
if updates:
self._clock.call_later(
15.0,
run_as_background_process,
"upsert_safety_check",
self._check_safe_to_upsert
)
def start_profiling(self):
self._previous_loop_ts = self._clock.time_msec()
@@ -494,8 +546,15 @@ class SQLBaseStore(object):
txn.executemany(sql, vals)
@defer.inlineCallbacks
def _simple_upsert(self, table, keyvalues, values,
insertion_values={}, desc="_simple_upsert", lock=True):
def _simple_upsert(
self,
table,
keyvalues,
values,
insertion_values={},
desc="_simple_upsert",
lock=True
):
"""
`lock` should generally be set to True (the default), but can be set
@@ -516,16 +575,21 @@ class SQLBaseStore(object):
inserting
lock (bool): True to lock the table when doing the upsert.
Returns:
Deferred(bool): True if a new entry was created, False if an
existing one was updated.
Deferred(None or bool): Native upserts always return None. Emulated
upserts return True if a new entry was created, False if an existing
one was updated.
"""
attempts = 0
while True:
try:
result = yield self.runInteraction(
desc,
self._simple_upsert_txn, table, keyvalues, values, insertion_values,
lock=lock
self._simple_upsert_txn,
table,
keyvalues,
values,
insertion_values,
lock=lock,
)
defer.returnValue(result)
except self.database_engine.module.IntegrityError as e:
@@ -537,21 +601,88 @@ class SQLBaseStore(object):
# presumably we raced with another transaction: let's retry.
logger.warn(
"IntegrityError when upserting into %s; retrying: %s",
table, e
"%s when upserting into %s; retrying: %s", e.__name__, table, e
)
def _simple_upsert_txn(self, txn, table, keyvalues, values, insertion_values={},
lock=True):
def _simple_upsert_txn(
self,
txn,
table,
keyvalues,
values,
insertion_values={},
lock=True,
):
"""
Pick the UPSERT method which works best on the platform. Either the
native one (Pg9.5+, recent SQLites), or fall back to an emulated method.
Args:
txn: The transaction to use.
table (str): The table to upsert into
keyvalues (dict): The unique key tables and their new values
values (dict): The nonunique columns and their new values
insertion_values (dict): additional key/values to use only when
inserting
lock (bool): True to lock the table when doing the upsert.
Returns:
None or bool: Native upserts always return None. Emulated
upserts return True if a new entry was created, False if an existing
one was updated.
"""
if (
self.database_engine.can_native_upsert
and table not in self._unsafe_to_upsert_tables
):
return self._simple_upsert_txn_native_upsert(
txn,
table,
keyvalues,
values,
insertion_values=insertion_values,
)
else:
return self._simple_upsert_txn_emulated(
txn,
table,
keyvalues,
values,
insertion_values=insertion_values,
lock=lock,
)
def _simple_upsert_txn_emulated(
self, txn, table, keyvalues, values, insertion_values={}, lock=True
):
"""
Args:
table (str): The table to upsert into
keyvalues (dict): The unique key tables and their new values
values (dict): The nonunique columns and their new values
insertion_values (dict): additional key/values to use only when
inserting
lock (bool): True to lock the table when doing the upsert.
Returns:
bool: Return True if a new entry was created, False if an existing
one was updated.
"""
# We need to lock the table :(, unless we're *really* careful
if lock:
self.database_engine.lock_table(txn, table)
def _getwhere(key):
# If the value we're passing in is None (aka NULL), we need to use
# IS, not =, as NULL = NULL equals NULL (False).
if keyvalues[key] is None:
return "%s IS ?" % (key,)
else:
return "%s = ?" % (key,)
# First try to update.
sql = "UPDATE %s SET %s WHERE %s" % (
table,
", ".join("%s = ?" % (k,) for k in values),
" AND ".join("%s = ?" % (k,) for k in keyvalues)
" AND ".join(_getwhere(k) for k in keyvalues)
)
sqlargs = list(values.values()) + list(keyvalues.values())
@@ -569,12 +700,44 @@ class SQLBaseStore(object):
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
table,
", ".join(k for k in allvalues),
", ".join("?" for _ in allvalues)
", ".join("?" for _ in allvalues),
)
txn.execute(sql, list(allvalues.values()))
# successfully inserted
return True
def _simple_upsert_txn_native_upsert(
self, txn, table, keyvalues, values, insertion_values={}
):
"""
Use the native UPSERT functionality in recent PostgreSQL versions.
Args:
table (str): The table to upsert into
keyvalues (dict): The unique key tables and their new values
values (dict): The nonunique columns and their new values
insertion_values (dict): additional key/values to use only when
inserting
Returns:
None
"""
allvalues = {}
allvalues.update(keyvalues)
allvalues.update(values)
allvalues.update(insertion_values)
sql = (
"INSERT INTO %s (%s) VALUES (%s) "
"ON CONFLICT (%s) DO UPDATE SET %s"
) % (
table,
", ".join(k for k in allvalues),
", ".join("?" for _ in allvalues),
", ".join(k for k in keyvalues),
", ".join(k + "=EXCLUDED." + k for k in values),
)
txn.execute(sql, list(allvalues.values()))
def _simple_select_one(self, table, keyvalues, retcols,
allow_none=False, desc="_simple_select_one"):
"""Executes a SELECT query on the named table, which is expected to

View File

@@ -240,7 +240,7 @@ class BackgroundUpdateStore(SQLBaseStore):
* An integer count of the number of items to update in this batch.
The handler should return a deferred integer count of items updated.
The hander is responsible for updating the progress of the update.
The handler is responsible for updating the progress of the update.
Args:
update_name(str): The name of the update that this code handles.

View File

@@ -65,7 +65,27 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
columns=["last_seen"],
)
# (user_id, access_token, ip) -> (user_agent, device_id, last_seen)
self.register_background_update_handler(
"user_ips_remove_dupes",
self._remove_user_ip_dupes,
)
# Register a unique index
self.register_background_index_update(
"user_ips_device_unique_index",
index_name="user_ips_user_token_ip_unique_index",
table="user_ips",
columns=["user_id", "access_token", "ip"],
unique=True,
)
# Drop the old non-unique index
self.register_background_update_handler(
"user_ips_drop_nonunique_index",
self._remove_user_ip_nonunique,
)
# (user_id, access_token, ip,) -> (user_agent, device_id, last_seen)
self._batch_row_update = {}
self._client_ip_looper = self._clock.looping_call(
@@ -75,6 +95,129 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
"before", "shutdown", self._update_client_ips_batch
)
@defer.inlineCallbacks
def _remove_user_ip_nonunique(self, progress, batch_size):
def f(conn):
txn = conn.cursor()
txn.execute(
"DROP INDEX IF EXISTS user_ips_user_ip"
)
txn.close()
yield self.runWithConnection(f)
yield self._end_background_update("user_ips_drop_nonunique_index")
defer.returnValue(1)
@defer.inlineCallbacks
def _remove_user_ip_dupes(self, progress, batch_size):
# This works function works by scanning the user_ips table in batches
# based on `last_seen`. For each row in a batch it searches the rest of
# the table to see if there are any duplicates, if there are then they
# are removed and replaced with a suitable row.
# Fetch the start of the batch
begin_last_seen = progress.get("last_seen", 0)
def get_last_seen(txn):
txn.execute(
"""
SELECT last_seen FROM user_ips
WHERE last_seen > ?
ORDER BY last_seen
LIMIT 1
OFFSET ?
""",
(begin_last_seen, batch_size)
)
row = txn.fetchone()
if row:
return row[0]
else:
return None
# Get a last seen that has roughly `batch_size` since `begin_last_seen`
end_last_seen = yield self.runInteraction(
"user_ips_dups_get_last_seen", get_last_seen
)
# If it returns None, then we're processing the last batch
last = end_last_seen is None
logger.info(
"Scanning for duplicate 'user_ips' rows in range: %s <= last_seen < %s",
begin_last_seen, end_last_seen,
)
def remove(txn):
# This works by looking at all entries in the given time span, and
# then for each (user_id, access_token, ip) tuple in that range
# checking for any duplicates in the rest of the table (via a join).
# It then only returns entries which have duplicates, and the max
# last_seen across all duplicates, which can the be used to delete
# all other duplicates.
# It is efficient due to the existence of (user_id, access_token,
# ip) and (last_seen) indices.
# Define the search space, which requires handling the last batch in
# a different way
if last:
clause = "? <= last_seen"
args = (begin_last_seen,)
else:
clause = "? <= last_seen AND last_seen < ?"
args = (begin_last_seen, end_last_seen)
txn.execute(
"""
SELECT user_id, access_token, ip,
MAX(device_id), MAX(user_agent), MAX(last_seen)
FROM (
SELECT user_id, access_token, ip
FROM user_ips
WHERE {}
) c
INNER JOIN user_ips USING (user_id, access_token, ip)
GROUP BY user_id, access_token, ip
HAVING count(*) > 1
""".format(clause),
args
)
res = txn.fetchall()
# We've got some duplicates
for i in res:
user_id, access_token, ip, device_id, user_agent, last_seen = i
# Drop all the duplicates
txn.execute(
"""
DELETE FROM user_ips
WHERE user_id = ? AND access_token = ? AND ip = ?
""",
(user_id, access_token, ip)
)
# Add in one to be the last_seen
txn.execute(
"""
INSERT INTO user_ips
(user_id, access_token, ip, device_id, user_agent, last_seen)
VALUES (?, ?, ?, ?, ?, ?)
""",
(user_id, access_token, ip, device_id, user_agent, last_seen)
)
self._background_update_progress_txn(
txn, "user_ips_remove_dupes", {"last_seen": end_last_seen}
)
yield self.runInteraction("user_ips_dups_remove", remove)
if last:
yield self._end_background_update("user_ips_remove_dupes")
defer.returnValue(batch_size)
@defer.inlineCallbacks
def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id,
now=None):
@@ -114,7 +257,10 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
)
def _update_client_ips_batch_txn(self, txn, to_update):
self.database_engine.lock_table(txn, "user_ips")
if "user_ips" in self._unsafe_to_upsert_tables or (
not self.database_engine.can_native_upsert
):
self.database_engine.lock_table(txn, "user_ips")
for entry in iteritems(to_update):
(user_id, access_token, ip), (user_agent, device_id, last_seen) = entry
@@ -127,10 +273,10 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
"user_id": user_id,
"access_token": access_token,
"ip": ip,
"user_agent": user_agent,
"device_id": device_id,
},
values={
"user_agent": user_agent,
"device_id": device_id,
"last_seen": last_seen,
},
lock=False,
@@ -227,7 +373,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
results = {}
for key in self._batch_row_update:
uid, access_token, ip = key
uid, access_token, ip, = key
if uid == user_id:
user_agent, _, last_seen = self._batch_row_update[key]
results[(access_token, ip)] = (user_agent, last_seen)

View File

@@ -18,7 +18,7 @@ import platform
from ._base import IncorrectDatabaseSetup
from .postgres import PostgresEngine
from .sqlite3 import Sqlite3Engine
from .sqlite import Sqlite3Engine
SUPPORTED_MODULE = {
"sqlite3": Sqlite3Engine,

View File

@@ -38,6 +38,13 @@ class PostgresEngine(object):
return sql.replace("?", "%s")
def on_new_connection(self, db_conn):
# Get the version of PostgreSQL that we're using. As per the psycopg2
# docs: The number is formed by converting the major, minor, and
# revision numbers into two-decimal-digit numbers and appending them
# together. For example, version 8.1.5 will be returned as 80105
self._version = db_conn.server_version
db_conn.set_isolation_level(
self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
)
@@ -54,6 +61,13 @@ class PostgresEngine(object):
cursor.close()
@property
def can_native_upsert(self):
"""
Can we use native UPSERTs? This requires PostgreSQL 9.5+.
"""
return self._version >= 90500
def is_deadlock(self, error):
if isinstance(error, self.module.DatabaseError):
# https://www.postgresql.org/docs/current/static/errcodes-appendix.html

View File

@@ -30,6 +30,14 @@ class Sqlite3Engine(object):
self._current_state_group_id = None
self._current_state_group_id_lock = threading.Lock()
@property
def can_native_upsert(self):
"""
Do we support native UPSERTs? This requires SQLite3 3.24+, plus some
more work we haven't done yet to tell what was inserted vs updated.
"""
return self.module.sqlite_version_info >= (3, 24, 0)
def check_database(self, txn):
pass

View File

@@ -125,6 +125,29 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
return dict(txn)
@defer.inlineCallbacks
def get_max_depth_of(self, event_ids):
"""Returns the max depth of a set of event IDs
Args:
event_ids (list[str])
Returns
Deferred[int]
"""
rows = yield self._simple_select_many_batch(
table="events",
column="event_id",
iterable=event_ids,
retcols=("depth",),
desc="get_max_depth_of",
)
if not rows:
defer.returnValue(0)
else:
defer.returnValue(max(row["depth"] for row in rows))
def _get_oldest_events_in_room_txn(self, txn, room_id):
return self._simple_select_onecol_txn(
txn,

View File

@@ -739,7 +739,18 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
}
events_map = {ev.event_id: ev for ev, _ in events_context}
room_version = yield self.get_room_version(room_id)
# We need to get the room version, which is in the create event.
# Normally that'd be in the database, but its also possible that we're
# currently trying to persist it.
room_version = None
for ev, _ in events_context:
if ev.type == EventTypes.Create and ev.state_key == "":
room_version = ev.content.get("room_version", "1")
break
if not room_version:
room_version = yield self.get_room_version(room_id)
logger.debug("calling resolve_state_groups from preserve_events")
res = yield self._state_resolution_handler.resolve_state_groups(
@@ -893,105 +904,105 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
def _update_current_state_txn(self, txn, state_delta_by_room, max_stream_order):
for room_id, current_state_tuple in iteritems(state_delta_by_room):
to_delete, to_insert = current_state_tuple
to_delete, to_insert = current_state_tuple
# First we add entries to the current_state_delta_stream. We
# do this before updating the current_state_events table so
# that we can use it to calculate the `prev_event_id`. (This
# allows us to not have to pull out the existing state
# unnecessarily).
sql = """
INSERT INTO current_state_delta_stream
(stream_id, room_id, type, state_key, event_id, prev_event_id)
SELECT ?, ?, ?, ?, ?, (
SELECT event_id FROM current_state_events
WHERE room_id = ? AND type = ? AND state_key = ?
)
"""
txn.executemany(sql, (
(
max_stream_order, room_id, etype, state_key, None,
room_id, etype, state_key,
)
for etype, state_key in to_delete
# We sanity check that we're deleting rather than updating
if (etype, state_key) not in to_insert
))
txn.executemany(sql, (
(
max_stream_order, room_id, etype, state_key, ev_id,
room_id, etype, state_key,
)
for (etype, state_key), ev_id in iteritems(to_insert)
))
# Now we actually update the current_state_events table
txn.executemany(
"DELETE FROM current_state_events"
" WHERE room_id = ? AND type = ? AND state_key = ?",
(
(room_id, etype, state_key)
for etype, state_key in itertools.chain(to_delete, to_insert)
),
# First we add entries to the current_state_delta_stream. We
# do this before updating the current_state_events table so
# that we can use it to calculate the `prev_event_id`. (This
# allows us to not have to pull out the existing state
# unnecessarily).
sql = """
INSERT INTO current_state_delta_stream
(stream_id, room_id, type, state_key, event_id, prev_event_id)
SELECT ?, ?, ?, ?, ?, (
SELECT event_id FROM current_state_events
WHERE room_id = ? AND type = ? AND state_key = ?
)
self._simple_insert_many_txn(
txn,
table="current_state_events",
values=[
{
"event_id": ev_id,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
}
for key, ev_id in iteritems(to_insert)
],
"""
txn.executemany(sql, (
(
max_stream_order, room_id, etype, state_key, None,
room_id, etype, state_key,
)
txn.call_after(
self._curr_state_delta_stream_cache.entity_has_changed,
room_id, max_stream_order,
for etype, state_key in to_delete
# We sanity check that we're deleting rather than updating
if (etype, state_key) not in to_insert
))
txn.executemany(sql, (
(
max_stream_order, room_id, etype, state_key, ev_id,
room_id, etype, state_key,
)
for (etype, state_key), ev_id in iteritems(to_insert)
))
# Invalidate the various caches
# Now we actually update the current_state_events table
# Figure out the changes of membership to invalidate the
# `get_rooms_for_user` cache.
# We find out which membership events we may have deleted
# and which we have added, then we invlidate the caches for all
# those users.
members_changed = set(
state_key
for ev_type, state_key in itertools.chain(to_delete, to_insert)
if ev_type == EventTypes.Member
)
txn.executemany(
"DELETE FROM current_state_events"
" WHERE room_id = ? AND type = ? AND state_key = ?",
(
(room_id, etype, state_key)
for etype, state_key in itertools.chain(to_delete, to_insert)
),
)
for member in members_changed:
self._invalidate_cache_and_stream(
txn, self.get_rooms_for_user_with_stream_ordering, (member,)
)
self._simple_insert_many_txn(
txn,
table="current_state_events",
values=[
{
"event_id": ev_id,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
}
for key, ev_id in iteritems(to_insert)
],
)
for host in set(get_domain_from_id(u) for u in members_changed):
self._invalidate_cache_and_stream(
txn, self.is_host_joined, (room_id, host)
)
self._invalidate_cache_and_stream(
txn, self.was_host_joined, (room_id, host)
)
txn.call_after(
self._curr_state_delta_stream_cache.entity_has_changed,
room_id, max_stream_order,
)
# Invalidate the various caches
# Figure out the changes of membership to invalidate the
# `get_rooms_for_user` cache.
# We find out which membership events we may have deleted
# and which we have added, then we invlidate the caches for all
# those users.
members_changed = set(
state_key
for ev_type, state_key in itertools.chain(to_delete, to_insert)
if ev_type == EventTypes.Member
)
for member in members_changed:
self._invalidate_cache_and_stream(
txn, self.get_users_in_room, (room_id,)
txn, self.get_rooms_for_user_with_stream_ordering, (member,)
)
for host in set(get_domain_from_id(u) for u in members_changed):
self._invalidate_cache_and_stream(
txn, self.get_room_summary, (room_id,)
txn, self.is_host_joined, (room_id, host)
)
self._invalidate_cache_and_stream(
txn, self.was_host_joined, (room_id, host)
)
self._invalidate_cache_and_stream(
txn, self.get_current_state_ids, (room_id,)
)
self._invalidate_cache_and_stream(
txn, self.get_users_in_room, (room_id,)
)
self._invalidate_cache_and_stream(
txn, self.get_room_summary, (room_id,)
)
self._invalidate_cache_and_stream(
txn, self.get_current_state_ids, (room_id,)
)
def _update_forward_extremities_txn(self, txn, new_forward_extremities,
max_stream_order):
@@ -1257,6 +1268,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
event.internal_metadata.get_dict()
),
"json": encode_json(event_dict(event)),
"format_version": event.format_version,
}
for event, _ in events_and_contexts
],

View File

@@ -21,13 +21,14 @@ from canonicaljson import json
from twisted.internet import defer
from synapse.api.constants import EventFormatVersions, EventTypes
from synapse.api.errors import NotFoundError
from synapse.events import FrozenEvent, event_type_from_format_version # noqa: F401
# these are only included to make the type annotations work
from synapse.events import EventBase # noqa: F401
from synapse.events import FrozenEvent
from synapse.events.snapshot import EventContext # noqa: F401
from synapse.events.utils import prune_event
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import get_domain_from_id
from synapse.util.logcontext import (
LoggingContext,
PreserveLoggingContext,
@@ -162,7 +163,6 @@ class EventsWorkerStore(SQLBaseStore):
missing_events = yield self._enqueue_events(
missing_events_ids,
check_redacted=check_redacted,
allow_rejected=allow_rejected,
)
@@ -174,6 +174,29 @@ class EventsWorkerStore(SQLBaseStore):
if not entry:
continue
# Starting in room version v3, some redactions need to be rechecked if we
# didn't have the redacted event at the time, so we recheck on read
# instead.
if not allow_rejected and entry.event.type == EventTypes.Redaction:
if entry.event.internal_metadata.need_to_check_redaction():
orig = yield self.get_event(
entry.event.redacts,
allow_none=True,
allow_rejected=True,
get_prev_content=False,
)
expected_domain = get_domain_from_id(entry.event.sender)
if orig and get_domain_from_id(orig.sender) == expected_domain:
# This redaction event is allowed. Mark as not needing a
# recheck.
entry.event.internal_metadata.recheck_redaction = False
else:
# We don't have the event that is being redacted, so we
# assume that the event isn't authorized for now. (If we
# later receive the event, then we will always redact
# it anyway, since we have this redaction)
continue
if allow_rejected or not entry.event.rejected_reason:
if check_redacted and entry.redacted_event:
event = entry.redacted_event
@@ -197,7 +220,7 @@ class EventsWorkerStore(SQLBaseStore):
defer.returnValue(events)
def _invalidate_get_event_cache(self, event_id):
self._get_event_cache.invalidate((event_id,))
self._get_event_cache.invalidate((event_id,))
def _get_events_from_cache(self, events, allow_rejected, update_metrics=True):
"""Fetch events from the caches
@@ -310,7 +333,7 @@ class EventsWorkerStore(SQLBaseStore):
self.hs.get_reactor().callFromThread(fire, event_list, e)
@defer.inlineCallbacks
def _enqueue_events(self, events, check_redacted=True, allow_rejected=False):
def _enqueue_events(self, events, allow_rejected=False):
"""Fetches events from the database using the _event_fetch_list. This
allows batch and bulk fetching of events - it allows us to fetch events
without having to create a new transaction for each request for events.
@@ -353,6 +376,7 @@ class EventsWorkerStore(SQLBaseStore):
self._get_event_from_row,
row["internal_metadata"], row["json"], row["redacts"],
rejected_reason=row["rejects"],
format_version=row["format_version"],
)
for row in rows
],
@@ -377,6 +401,7 @@ class EventsWorkerStore(SQLBaseStore):
" e.event_id as event_id, "
" e.internal_metadata,"
" e.json,"
" e.format_version, "
" r.redacts as redacts,"
" rej.event_id as rejects "
" FROM event_json as e"
@@ -392,7 +417,7 @@ class EventsWorkerStore(SQLBaseStore):
@defer.inlineCallbacks
def _get_event_from_row(self, internal_metadata, js, redacted,
rejected_reason=None):
format_version, rejected_reason=None):
with Measure(self._clock, "_get_event_from_row"):
d = json.loads(js)
internal_metadata = json.loads(internal_metadata)
@@ -405,8 +430,13 @@ class EventsWorkerStore(SQLBaseStore):
desc="_get_event_from_row_rejected_reason",
)
original_ev = FrozenEvent(
d,
if format_version is None:
# This means that we stored the event before we had the concept
# of a event format version, so it must be a V1 event.
format_version = EventFormatVersions.V1
original_ev = event_type_from_format_version(format_version)(
event_dict=d,
internal_metadata_dict=internal_metadata,
rejected_reason=rejected_reason,
)
@@ -436,6 +466,19 @@ class EventsWorkerStore(SQLBaseStore):
# will serialise this field correctly
redacted_event.unsigned["redacted_because"] = because
# Starting in room version v3, some redactions need to be
# rechecked if we didn't have the redacted event at the
# time, so we recheck on read instead.
if because.internal_metadata.need_to_check_redaction():
expected_domain = get_domain_from_id(original_ev.sender)
if get_domain_from_id(because.sender) == expected_domain:
# This redaction event is allowed. Mark as not needing a
# recheck.
because.internal_metadata.recheck_redaction = False
else:
# Senders don't match, so the event isn't actually redacted
redacted_event = None
cache_entry = _EventCacheEntry(
event=original_ev,
redacted_event=redacted_event,

Some files were not shown because too many files have changed in this diff Show More