mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
246 Commits
v1.9.0
...
erikj/mino
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1da0457bba | ||
|
|
59f1458958 | ||
|
|
cb8fdfdf1c | ||
|
|
7cadb476a0 | ||
|
|
008aaca0b6 | ||
|
|
099c96b89b | ||
|
|
2fb7794e60 | ||
|
|
bbe39f808c | ||
|
|
880aaac1d8 | ||
|
|
abf1e5c526 | ||
|
|
0d0bc35792 | ||
|
|
5e4a438556 | ||
|
|
71d65407e7 | ||
|
|
fa64f836ec | ||
|
|
5a5abd55e8 | ||
|
|
603618c002 | ||
|
|
709e81f518 | ||
|
|
a0a1fd0bec | ||
|
|
b58d17e44f | ||
|
|
771d70e89c | ||
|
|
f31a94a6dd | ||
|
|
61b457e3ec | ||
|
|
adfaea8c69 | ||
|
|
3f1cd14791 | ||
|
|
a0d2f9d089 | ||
|
|
d484126bf7 | ||
|
|
8a380d0fe2 | ||
|
|
818def8248 | ||
|
|
9801a042f3 | ||
|
|
bfbe2f5b08 | ||
|
|
7a782c32a2 | ||
|
|
b1255077f5 | ||
|
|
d009535639 | ||
|
|
ba7a523854 | ||
|
|
e837be5b5c | ||
|
|
3c67eee6dc | ||
|
|
fe3941f6e3 | ||
|
|
8ee0d74516 | ||
|
|
3be2abd0a9 | ||
|
|
bc831d1d9a | ||
|
|
0a714c3abf | ||
|
|
7718fabb7a | ||
|
|
fd6d83ed96 | ||
|
|
d2455ec3aa | ||
|
|
3404ad289b | ||
|
|
46fa66bbfd | ||
|
|
10027c80b0 | ||
|
|
5a78f47f6e | ||
|
|
9551911f88 | ||
|
|
43b2be9764 | ||
|
|
32873efa87 | ||
|
|
97a42bbc3a | ||
|
|
02e89021f5 | ||
|
|
49f877d32e | ||
|
|
ffe1fc111d | ||
|
|
79460ce9c9 | ||
|
|
71cc6bab5f | ||
|
|
36af094017 | ||
|
|
65bdc35a1f | ||
|
|
df1c98c22a | ||
|
|
f3f142259e | ||
|
|
0cb83cde70 | ||
|
|
ef9c275d96 | ||
|
|
12bbcc255a | ||
|
|
5820ed905f | ||
|
|
361de49c90 | ||
|
|
f48bf4febd | ||
|
|
dc3f998706 | ||
|
|
862669d6cc | ||
|
|
459d089af7 | ||
|
|
e88a5dd108 | ||
|
|
e45a7c0939 | ||
|
|
f092029d2d | ||
|
|
6cd34da8b1 | ||
|
|
d8994942f2 | ||
|
|
08e050c3fd | ||
|
|
47acbc519f | ||
|
|
d9239b5257 | ||
|
|
7b8d654a61 | ||
|
|
fdb816713a | ||
|
|
3dd2b5f5e3 | ||
|
|
ba547ec3a9 | ||
|
|
a0c4769f1a | ||
|
|
6b21986e4e | ||
|
|
705c978366 | ||
|
|
a443d2a25d | ||
|
|
88d41e94f5 | ||
|
|
856b2a9555 | ||
|
|
78d170262c | ||
|
|
aa7e4291ee | ||
|
|
9e45d573d4 | ||
|
|
605cd089f7 | ||
|
|
3edc65dd24 | ||
|
|
a92e703ab9 | ||
|
|
01209382fb | ||
|
|
3a3118f4ec | ||
|
|
db0fee738d | ||
|
|
3de57e7062 | ||
|
|
8e64c5a24c | ||
|
|
cc0800ebfc | ||
|
|
fe73f0d533 | ||
|
|
21db35f77e | ||
|
|
e1d858984d | ||
|
|
799001f2c0 | ||
|
|
b08b0a22d5 | ||
|
|
de2d267375 | ||
|
|
56ca93ef59 | ||
|
|
f4884444c3 | ||
|
|
e1b240329e | ||
|
|
7765bf3989 | ||
|
|
928edef979 | ||
|
|
b0c8bdd49d | ||
|
|
bce557175b | ||
|
|
99fcc96289 | ||
|
|
ed630ea17c | ||
|
|
9bcd37146e | ||
|
|
2201ef8556 | ||
|
|
f663118155 | ||
|
|
b5176166b7 | ||
|
|
4a50b674f2 | ||
|
|
6a7e90ad78 | ||
|
|
f0561fcffd | ||
|
|
5e019069ab | ||
|
|
39c2d26e0b | ||
|
|
ff70ec0a00 | ||
|
|
ee0525b2b2 | ||
|
|
f84700fba8 | ||
|
|
577f460369 | ||
|
|
6bbd890f05 | ||
|
|
146fec0820 | ||
|
|
a58860e480 | ||
|
|
60d0672426 | ||
|
|
a831d2e4e3 | ||
|
|
d88e0ec080 | ||
|
|
6475382d80 | ||
|
|
74bf3fdbb9 | ||
|
|
c87572d6e4 | ||
|
|
5ef91b96f1 | ||
|
|
c7d6d5c69e | ||
|
|
245ee14220 | ||
|
|
23d8a55c7a | ||
|
|
ea23210b2d | ||
|
|
4b4536dd02 | ||
|
|
6deeefb68c | ||
|
|
abadf44eb2 | ||
|
|
e88b90aaeb | ||
|
|
638001116d | ||
|
|
3960527c2e | ||
|
|
ad09ee9262 | ||
|
|
1330c311b7 | ||
|
|
a46fabf17b | ||
|
|
8af9f11bea | ||
|
|
3f11cbb404 | ||
|
|
24d814ca23 | ||
|
|
d73683c363 | ||
|
|
0cb0c7bcd5 | ||
|
|
0536d0c9be | ||
|
|
5d17c31596 | ||
|
|
e81c093974 | ||
|
|
b9391c9575 | ||
|
|
ae5b3104f0 | ||
|
|
e49eb1a886 | ||
|
|
f64c96662e | ||
|
|
52642860da | ||
|
|
814cc00cb9 | ||
|
|
05299599b6 | ||
|
|
3b7e0e002b | ||
|
|
4286e429a7 | ||
|
|
c3f296af32 | ||
|
|
dbdf843012 | ||
|
|
ebd6a15af3 | ||
|
|
94f7b4cd54 | ||
|
|
863087d186 | ||
|
|
957129f4a7 | ||
|
|
0d5f2f4bb0 | ||
|
|
a25ddf26a3 | ||
|
|
bc9b75c6f0 | ||
|
|
8033b257a7 | ||
|
|
1cdc253e0a | ||
|
|
c556ed9e15 | ||
|
|
6e89ec5e32 | ||
|
|
d184cbc031 | ||
|
|
98681f90cb | ||
|
|
af8ba6b525 | ||
|
|
7571bf86f0 | ||
|
|
b3e44f0bdf | ||
|
|
370080531e | ||
|
|
b0d112e78b | ||
|
|
68ef7ebbef | ||
|
|
0f8ffa38b5 | ||
|
|
ac0d45b78b | ||
|
|
83b0ea047b | ||
|
|
7f93eb1903 | ||
|
|
a5afdd15e5 | ||
|
|
160522e32c | ||
|
|
f6fa2c0b31 | ||
|
|
08f41a6f05 | ||
|
|
d7bf793cc1 | ||
|
|
7d846e8704 | ||
|
|
540c5e168b | ||
|
|
2a81393a4b | ||
|
|
54f3f369bd | ||
|
|
ef6bdafb29 | ||
|
|
46a446828d | ||
|
|
e0992fcc5b | ||
|
|
184303b865 | ||
|
|
57ad702af0 | ||
|
|
b660327056 | ||
|
|
c3d4ad8afd | ||
|
|
a5bab2d058 | ||
|
|
c80a9fe13d | ||
|
|
5a246611e3 | ||
|
|
a855b7c3a8 | ||
|
|
281551f720 | ||
|
|
750d4d7599 | ||
|
|
dcd85b976d | ||
|
|
b36095ae5c | ||
|
|
ee42a5513e | ||
|
|
6b9e1014cf | ||
|
|
611215a49c | ||
|
|
2cad8baa70 | ||
|
|
fcfb591b31 | ||
|
|
cc109b79dd | ||
|
|
a1f307f7d1 | ||
|
|
e17a110661 | ||
|
|
fbe0a82c0d | ||
|
|
99e205fc21 | ||
|
|
49d3bca37b | ||
|
|
a8ce7aeb43 | ||
|
|
02b44db922 | ||
|
|
33f904835a | ||
|
|
77d9357226 | ||
|
|
bdbeeb94ec | ||
|
|
8df862e45d | ||
|
|
d5275fc55f | ||
|
|
f74d178b17 | ||
|
|
cf9d56e5cf | ||
|
|
1fe5001369 | ||
|
|
9f7aaf90b5 | ||
|
|
aa6ad288f1 | ||
|
|
fa4d609e20 | ||
|
|
51fc3f693e | ||
|
|
6b7462a13f | ||
|
|
5bd3cb7260 | ||
|
|
04345338e1 | ||
|
|
ce84dd9e20 |
18
.buildkite/scripts/test_old_deps.sh
Executable file
18
.buildkite/scripts/test_old_deps.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
# this script is run by buildkite in a plain `xenial` container; it installs the
|
||||
# minimal requirements for tox and hands over to the py35-old tox environment.
|
||||
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev
|
||||
|
||||
# workaround for https://github.com/jaraco/zipp/issues/40
|
||||
python3.5 -m pip install 'setuptools>=34.4.0'
|
||||
|
||||
python3.5 -m pip install tox
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
exec tox -e py35-old,combine
|
||||
@@ -39,3 +39,5 @@ Server correctly handles incoming m.device_list_update
|
||||
|
||||
# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
|
||||
Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
|
||||
|
||||
Can get rooms/{roomId}/members at a given point
|
||||
|
||||
117
CHANGES.md
117
CHANGES.md
@@ -1,3 +1,120 @@
|
||||
Synapse 1.10.1 (2020-02-17)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.10.0 which would cause room state to be cleared in the database if Synapse was upgraded direct from 1.2.1 or earlier to 1.10.0. ([\#6924](https://github.com/matrix-org/synapse/issues/6924))
|
||||
|
||||
|
||||
Synapse 1.10.0 (2020-02-12)
|
||||
===========================
|
||||
|
||||
**WARNING to client developers**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details.
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Update the docker images to Alpine Linux 3.11. ([\#6897](https://github.com/matrix-org/synapse/issues/6897))
|
||||
|
||||
|
||||
Synapse 1.10.0rc5 (2020-02-11)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix the filtering introduced in 1.10.0rc3 to also apply to the state blocks returned by `/sync`. ([\#6884](https://github.com/matrix-org/synapse/issues/6884))
|
||||
|
||||
Synapse 1.10.0rc4 (2020-02-11)
|
||||
==============================
|
||||
|
||||
This release candidate was built incorrectly and is superceded by 1.10.0rc5.
|
||||
|
||||
Synapse 1.10.0rc3 (2020-02-10)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Filter out `m.room.aliases` from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library. ([\#6880](https://github.com/matrix-org/synapse/issues/6880))
|
||||
|
||||
|
||||
Synapse 1.10.0rc2 (2020-02-06)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix an issue with cross-signing where device signatures were not sent to remote servers. ([\#6844](https://github.com/matrix-org/synapse/issues/6844))
|
||||
- Fix to the unknown remote device detection which was introduced in 1.10.rc1. ([\#6848](https://github.com/matrix-org/synapse/issues/6848))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Detect unexpected sender keys on remote encrypted events and resync device lists. ([\#6850](https://github.com/matrix-org/synapse/issues/6850))
|
||||
|
||||
|
||||
Synapse 1.10.0rc1 (2020-01-31)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental support for updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). ([\#6787](https://github.com/matrix-org/synapse/issues/6787), [\#6790](https://github.com/matrix-org/synapse/issues/6790), [\#6794](https://github.com/matrix-org/synapse/issues/6794))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Warn if postgres database has a non-C locale, as that can cause issues when upgrading locales (e.g. due to upgrading OS). ([\#6734](https://github.com/matrix-org/synapse/issues/6734))
|
||||
- Minor fixes to `PUT /_synapse/admin/v2/users` admin api. ([\#6761](https://github.com/matrix-org/synapse/issues/6761))
|
||||
- Validate `client_secret` parameter using the regex provided by the Client-Server API, temporarily allowing `:` characters for older clients. The `:` character will be removed in a future release. ([\#6767](https://github.com/matrix-org/synapse/issues/6767))
|
||||
- Fix persisting redaction events that have been redacted (or otherwise don't have a redacts key). ([\#6771](https://github.com/matrix-org/synapse/issues/6771))
|
||||
- Fix outbound federation request metrics. ([\#6795](https://github.com/matrix-org/synapse/issues/6795))
|
||||
- Fix bug where querying a remote user's device keys that weren't cached resulted in only returning a single device. ([\#6796](https://github.com/matrix-org/synapse/issues/6796))
|
||||
- Fix race in federation sender worker that delayed sending of device updates. ([\#6799](https://github.com/matrix-org/synapse/issues/6799), [\#6800](https://github.com/matrix-org/synapse/issues/6800))
|
||||
- Fix bug where Synapse didn't invalidate cache of remote users' devices when Synapse left a room. ([\#6801](https://github.com/matrix-org/synapse/issues/6801))
|
||||
- Fix waking up other workers when remote server is detected to have come back online. ([\#6811](https://github.com/matrix-org/synapse/issues/6811))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Clarify documentation related to `user_dir` and `federation_reader` workers. ([\#6775](https://github.com/matrix-org/synapse/issues/6775))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Record room versions in the `rooms` table. ([\#6729](https://github.com/matrix-org/synapse/issues/6729), [\#6788](https://github.com/matrix-org/synapse/issues/6788), [\#6810](https://github.com/matrix-org/synapse/issues/6810))
|
||||
- Propagate cache invalidates from workers to other workers. ([\#6748](https://github.com/matrix-org/synapse/issues/6748))
|
||||
- Remove some unnecessary admin handler abstraction methods. ([\#6751](https://github.com/matrix-org/synapse/issues/6751))
|
||||
- Add some debugging for media storage providers. ([\#6757](https://github.com/matrix-org/synapse/issues/6757))
|
||||
- Detect unknown remote devices and mark cache as stale. ([\#6776](https://github.com/matrix-org/synapse/issues/6776), [\#6819](https://github.com/matrix-org/synapse/issues/6819))
|
||||
- Attempt to resync remote users' devices when detected as stale. ([\#6786](https://github.com/matrix-org/synapse/issues/6786))
|
||||
- Delete current state from the database when server leaves a room. ([\#6792](https://github.com/matrix-org/synapse/issues/6792))
|
||||
- When a client asks for a remote user's device keys check if the local cache for that user has been marked as potentially stale. ([\#6797](https://github.com/matrix-org/synapse/issues/6797))
|
||||
- Add background update to clean out left rooms from current state. ([\#6802](https://github.com/matrix-org/synapse/issues/6802), [\#6816](https://github.com/matrix-org/synapse/issues/6816))
|
||||
- Refactoring work in preparation for changing the event redaction algorithm. ([\#6803](https://github.com/matrix-org/synapse/issues/6803), [\#6805](https://github.com/matrix-org/synapse/issues/6805), [\#6806](https://github.com/matrix-org/synapse/issues/6806), [\#6807](https://github.com/matrix-org/synapse/issues/6807), [\#6820](https://github.com/matrix-org/synapse/issues/6820))
|
||||
|
||||
|
||||
Synapse 1.9.1 (2020-01-28)
|
||||
==========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug where setting `mau_limit_reserved_threepids` config would cause Synapse to refuse to start. ([\#6793](https://github.com/matrix-org/synapse/issues/6793))
|
||||
|
||||
|
||||
Synapse 1.9.0 (2020-01-23)
|
||||
==========================
|
||||
|
||||
|
||||
@@ -200,6 +200,20 @@ Git allows you to add this signoff automatically when using the `-s`
|
||||
flag to `git commit`, which uses the name and email set in your
|
||||
`user.name` and `user.email` git configs.
|
||||
|
||||
## Merge Strategy
|
||||
|
||||
We use the commit history of develop/master extensively to identify
|
||||
when regressions were introduced and what changes have been made.
|
||||
|
||||
We aim to have a clean merge history, which means we normally squash-merge
|
||||
changes into develop. For small changes this means there is no need to rebase
|
||||
to clean up your PR before merging. Larger changes with an organised set of
|
||||
commits may be merged as-is, if the history is judged to be useful.
|
||||
|
||||
This use of squash-merging will mean PRs built on each other will be hard to
|
||||
merge. We suggest avoiding these where possible, and if required, ensuring
|
||||
each PR has a tidy set of commits to ease merging.
|
||||
|
||||
## Conclusion
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect
|
||||
|
||||
28
INSTALL.md
28
INSTALL.md
@@ -388,15 +388,17 @@ Once you have installed synapse as above, you will need to configure it.
|
||||
|
||||
## TLS certificates
|
||||
|
||||
The default configuration exposes a single HTTP port: http://localhost:8008. It
|
||||
is suitable for local testing, but for any practical use, you will either need
|
||||
to enable a reverse proxy, or configure Synapse to expose an HTTPS port.
|
||||
The default configuration exposes a single HTTP port on the local
|
||||
interface: `http://localhost:8008`. It is suitable for local testing,
|
||||
but for any practical use, you will need Synapse's APIs to be served
|
||||
over HTTPS.
|
||||
|
||||
For information on using a reverse proxy, see
|
||||
The recommended way to do so is to set up a reverse proxy on port
|
||||
`8448`. You can find documentation on doing so in
|
||||
[docs/reverse_proxy.md](docs/reverse_proxy.md).
|
||||
|
||||
To configure Synapse to expose an HTTPS port, you will need to edit
|
||||
`homeserver.yaml`, as follows:
|
||||
Alternatively, you can configure Synapse to expose an HTTPS port. To do
|
||||
so, you will need to edit `homeserver.yaml`, as follows:
|
||||
|
||||
* First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
@@ -414,11 +416,15 @@ To configure Synapse to expose an HTTPS port, you will need to edit
|
||||
point these settings at an existing certificate and key, or you can
|
||||
enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
|
||||
for having Synapse automatically provision and renew federation
|
||||
certificates through ACME can be found at [ACME.md](docs/ACME.md). If you
|
||||
are using your own certificate, be sure to use a `.pem` file that includes
|
||||
the full certificate chain including any intermediate certificates (for
|
||||
instance, if using certbot, use `fullchain.pem` as your certificate, not
|
||||
`cert.pem`).
|
||||
certificates through ACME can be found at [ACME.md](docs/ACME.md).
|
||||
Note that, as pointed out in that document, this feature will not
|
||||
work with installs set up after November 2020.
|
||||
|
||||
If you are using your
|
||||
own certificate, be sure to use a `.pem` file that includes the full
|
||||
certificate chain including any intermediate certificates (for
|
||||
instance, if using certbot, use `fullchain.pem` as your certificate,
|
||||
not `cert.pem`).
|
||||
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
[federate.md](docs/federate.md)
|
||||
|
||||
@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
|
||||
|
||||
virtualenv -p python3 env
|
||||
source env/bin/activate
|
||||
python -m pip install --no-use-pep517 -e .[all]
|
||||
python -m pip install --no-use-pep517 -e ".[all]"
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
@@ -76,6 +76,15 @@ for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
|
||||
Upgrading to v1.10.0
|
||||
====================
|
||||
|
||||
Synapse will now log a warning on start up if used with a PostgreSQL database
|
||||
that has a non-recommended locale set.
|
||||
|
||||
See `docs/postgres.md <docs/postgres.md>`_ for details.
|
||||
|
||||
|
||||
Upgrading to v1.8.0
|
||||
===================
|
||||
|
||||
|
||||
1
changelog.d/6769.feature
Normal file
1
changelog.d/6769.feature
Normal file
@@ -0,0 +1 @@
|
||||
Admin API to add or modify threepids of user accounts.
|
||||
1
changelog.d/6781.bugfix
Normal file
1
changelog.d/6781.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fixed third party event rules function `on_create_room`'s return value being ignored.
|
||||
1
changelog.d/6821.misc
Normal file
1
changelog.d/6821.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to `SyncHandler`.
|
||||
1
changelog.d/6823.misc
Normal file
1
changelog.d/6823.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactoring work in preparation for changing the event redaction algorithm.
|
||||
1
changelog.d/6825.bugfix
Normal file
1
changelog.d/6825.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Allow URL-encoded User IDs on `/_synapse/admin/v2/users/<user_id>[/admin]` endpoints. Thanks to @NHAS for reporting.
|
||||
1
changelog.d/6827.misc
Normal file
1
changelog.d/6827.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactoring work in preparation for changing the event redaction algorithm.
|
||||
1
changelog.d/6833.misc
Normal file
1
changelog.d/6833.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reducing log level to DEBUG for synapse.storage.TIME.
|
||||
1
changelog.d/6834.misc
Normal file
1
changelog.d/6834.misc
Normal file
@@ -0,0 +1 @@
|
||||
Change the default power levels of invites, tombstones and server ACLs for new rooms.
|
||||
1
changelog.d/6836.misc
Normal file
1
changelog.d/6836.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix stacktraces when using `ObservableDeferred` and async/await.
|
||||
1
changelog.d/6837.misc
Normal file
1
changelog.d/6837.misc
Normal file
@@ -0,0 +1 @@
|
||||
Port much of `synapse.handlers.federation` to async/await.
|
||||
1
changelog.d/6840.misc
Normal file
1
changelog.d/6840.misc
Normal file
@@ -0,0 +1 @@
|
||||
Port much of `synapse.handlers.federation` to async/await.
|
||||
1
changelog.d/6844.bugfix
Normal file
1
changelog.d/6844.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix an issue with cross-signing where device signatures were not sent to remote servers.
|
||||
1
changelog.d/6846.doc
Normal file
1
changelog.d/6846.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add details of PR merge strategy to contributing docs.
|
||||
1
changelog.d/6847.misc
Normal file
1
changelog.d/6847.misc
Normal file
@@ -0,0 +1 @@
|
||||
Populate `rooms.room_version` database column at startup, rather than in a background update.
|
||||
1
changelog.d/6849.bugfix
Normal file
1
changelog.d/6849.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank.
|
||||
1
changelog.d/6854.misc
Normal file
1
changelog.d/6854.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactoring work in preparation for changing the event redaction algorithm.
|
||||
1
changelog.d/6855.misc
Normal file
1
changelog.d/6855.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update pip install directiosn in readme to avoid error when using zsh.
|
||||
1
changelog.d/6856.misc
Normal file
1
changelog.d/6856.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactoring work in preparation for changing the event redaction algorithm.
|
||||
1
changelog.d/6857.misc
Normal file
1
changelog.d/6857.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactoring work in preparation for changing the event redaction algorithm.
|
||||
1
changelog.d/6858.misc
Normal file
1
changelog.d/6858.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactoring work in preparation for changing the event redaction algorithm.
|
||||
1
changelog.d/6862.misc
Normal file
1
changelog.d/6862.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce amount we log at `INFO` level.
|
||||
1
changelog.d/6864.misc
Normal file
1
changelog.d/6864.misc
Normal file
@@ -0,0 +1 @@
|
||||
Limit the number of events that can be requested by the backfill federation API to 100.
|
||||
1
changelog.d/6866.feature
Normal file
1
changelog.d/6866.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add ability to run some group APIs on workers.
|
||||
1
changelog.d/6869.misc
Normal file
1
changelog.d/6869.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove unused `get_room_stats_state` method.
|
||||
1
changelog.d/6871.misc
Normal file
1
changelog.d/6871.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add typing to `synapse.federation.sender` and port to async/await.
|
||||
1
changelog.d/6872.misc
Normal file
1
changelog.d/6872.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactor _EventInternalMetadata object to improve type safety.
|
||||
1
changelog.d/6873.feature
Normal file
1
changelog.d/6873.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add ability to route federation user device queries to workers.
|
||||
1
changelog.d/6877.removal
Normal file
1
changelog.d/6877.removal
Normal file
@@ -0,0 +1 @@
|
||||
Remove `m.lazy_load_members` from `unstable_features` since lazy loading is in the stable Client-Server API version r0.5.0.
|
||||
1
changelog.d/6882.misc
Normal file
1
changelog.d/6882.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reject device display names over 100 characters in length.
|
||||
1
changelog.d/6883.misc
Normal file
1
changelog.d/6883.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add an additional entry to the SyTest blacklist for worker mode.
|
||||
1
changelog.d/6887.misc
Normal file
1
changelog.d/6887.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix the use of sed in the linting scripts when using BSD sed.
|
||||
1
changelog.d/6888.feature
Normal file
1
changelog.d/6888.feature
Normal file
@@ -0,0 +1 @@
|
||||
The result of a user directory search can now be filtered via the spam checker.
|
||||
1
changelog.d/6891.doc
Normal file
1
changelog.d/6891.doc
Normal file
@@ -0,0 +1 @@
|
||||
Spell out that the last event sent to a room won't be deleted by a purge.
|
||||
1
changelog.d/6901.misc
Normal file
1
changelog.d/6901.misc
Normal file
@@ -0,0 +1 @@
|
||||
Return a 404 instead of 200 for querying information of a non-existant user through the admin API.
|
||||
1
changelog.d/6904.removal
Normal file
1
changelog.d/6904.removal
Normal file
@@ -0,0 +1 @@
|
||||
Stop sending alias events during adding / removing aliases. Check alt_aliases in the latest canonical aliases event when deleting an alias.
|
||||
1
changelog.d/6905.doc
Normal file
1
changelog.d/6905.doc
Normal file
@@ -0,0 +1 @@
|
||||
Update Synapse's documentation to warn about the deprecation of ACME v1.
|
||||
1
changelog.d/6906.doc
Normal file
1
changelog.d/6906.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add documentation for the spam checker.
|
||||
1
changelog.d/6907.doc
Normal file
1
changelog.d/6907.doc
Normal file
@@ -0,0 +1 @@
|
||||
Update Synapse's documentation to warn about the deprecation of ACME v1.
|
||||
1
changelog.d/6909.doc
Normal file
1
changelog.d/6909.doc
Normal file
@@ -0,0 +1 @@
|
||||
Update Synapse's documentation to warn about the deprecation of ACME v1.
|
||||
1
changelog.d/6915.misc
Normal file
1
changelog.d/6915.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to the spam checker module.
|
||||
1
changelog.d/6918.docker
Normal file
1
changelog.d/6918.docker
Normal file
@@ -0,0 +1 @@
|
||||
The deprecated "generate-config-on-the-fly" mode is no longer supported.
|
||||
1
changelog.d/6919.misc
Normal file
1
changelog.d/6919.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert the directory handler tests to use HomeserverTestCase.
|
||||
1
changelog.d/6920.misc
Normal file
1
changelog.d/6920.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a warning about indentation to generated configuration files.
|
||||
1
changelog.d/6921.docker
Normal file
1
changelog.d/6921.docker
Normal file
@@ -0,0 +1 @@
|
||||
Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund.
|
||||
1
changelog.d/6936.misc
Normal file
1
changelog.d/6936.misc
Normal file
@@ -0,0 +1 @@
|
||||
Increase DB/CPU perf of `_is_server_still_joined` check.
|
||||
1
changelog.d/6937.misc
Normal file
1
changelog.d/6937.misc
Normal file
@@ -0,0 +1 @@
|
||||
Increase perf of `get_auth_chain_ids` used in state res v2.
|
||||
1
changelog.d/6938.doc
Normal file
1
changelog.d/6938.doc
Normal file
@@ -0,0 +1 @@
|
||||
Fix worker docs to point `/publicised_groups` API correctly.
|
||||
1
changelog.d/6939.feature
Normal file
1
changelog.d/6939.feature
Normal file
@@ -0,0 +1 @@
|
||||
Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
|
||||
1
changelog.d/6940.doc
Normal file
1
changelog.d/6940.doc
Normal file
@@ -0,0 +1 @@
|
||||
Clean up and update docs on setting up federation.
|
||||
1
changelog.d/6945.bugfix
Normal file
1
changelog.d/6945.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix errors from logging in the purge jobs related to the message retention policies support.
|
||||
1
changelog.d/6947.misc
Normal file
1
changelog.d/6947.misc
Normal file
@@ -0,0 +1 @@
|
||||
Increase perf of `get_auth_chain_ids` used in state res v2.
|
||||
1
changelog.d/6948.feature
Normal file
1
changelog.d/6948.feature
Normal file
@@ -0,0 +1 @@
|
||||
Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
|
||||
1
changelog.d/6949.feature
Normal file
1
changelog.d/6949.feature
Normal file
@@ -0,0 +1 @@
|
||||
Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
|
||||
1
changelog.d/6950.misc
Normal file
1
changelog.d/6950.misc
Normal file
@@ -0,0 +1 @@
|
||||
Tiny optimisation for incoming HTTP request dispatch.
|
||||
1
changelog.d/6951.misc
Normal file
1
changelog.d/6951.misc
Normal file
@@ -0,0 +1 @@
|
||||
Revert #6937.
|
||||
1
changelog.d/6954.misc
Normal file
1
changelog.d/6954.misc
Normal file
@@ -0,0 +1 @@
|
||||
Minor perf fixes to `get_auth_chain_ids`.
|
||||
@@ -56,6 +56,9 @@ services:
|
||||
environment:
|
||||
- POSTGRES_USER=synapse
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
# ensure the database gets created correctly
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
|
||||
- POSTGRES_INITDB_ARGS="--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
|
||||
volumes:
|
||||
# You may store the database tables in a local folder..
|
||||
- ./schemas:/var/lib/postgresql/data
|
||||
|
||||
18
debian/changelog
vendored
18
debian/changelog
vendored
@@ -1,3 +1,21 @@
|
||||
matrix-synapse-py3 (1.10.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.10.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Feb 2020 16:27:28 +0000
|
||||
|
||||
matrix-synapse-py3 (1.10.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.10.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 12 Feb 2020 12:18:54 +0000
|
||||
|
||||
matrix-synapse-py3 (1.9.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.9.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jan 2020 13:09:23 +0000
|
||||
|
||||
matrix-synapse-py3 (1.9.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.9.0.
|
||||
|
||||
@@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7
|
||||
###
|
||||
### Stage 0: builder
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11 as builder
|
||||
|
||||
# install the OS build deps
|
||||
|
||||
|
||||
@@ -110,12 +110,12 @@ argument to `docker run`.
|
||||
|
||||
## Legacy dynamic configuration file support
|
||||
|
||||
For backwards-compatibility only, the docker image supports creating a dynamic
|
||||
configuration file based on environment variables. This is now deprecated, but
|
||||
is enabled when the `SYNAPSE_SERVER_NAME` variable is set (and `generate` is
|
||||
not given).
|
||||
The docker image used to support creating a dynamic configuration file based
|
||||
on environment variables. This is no longer supported, and an error will be
|
||||
raised if you try to run synapse without a config file.
|
||||
|
||||
To migrate from a dynamic configuration file to a static one, run the docker
|
||||
It is, however, possible to generate a static configuration file based on
|
||||
the environment variables that were previously used. To do this, run the docker
|
||||
container once with the environment variables set, and `migrate_config`
|
||||
command line option. For example:
|
||||
|
||||
@@ -127,15 +127,20 @@ docker run -it --rm \
|
||||
matrixdotorg/synapse:latest migrate_config
|
||||
```
|
||||
|
||||
This will generate the same configuration file as the legacy mode used, but
|
||||
will store it in `/data/homeserver.yaml` instead of a temporary location. You
|
||||
can then use it as shown above at [Running synapse](#running-synapse).
|
||||
This will generate the same configuration file as the legacy mode used, and
|
||||
will store it in `/data/homeserver.yaml`. You can then use it as shown above at
|
||||
[Running synapse](#running-synapse).
|
||||
|
||||
Note that the defaults used in this configuration file may be different to
|
||||
those when generating a new config file with `generate`: for example, TLS is
|
||||
enabled by default in this mode. You are encouraged to inspect the generated
|
||||
configuration file and edit it to ensure it meets your needs.
|
||||
|
||||
## Building the image
|
||||
|
||||
If you need to build the image from a Synapse checkout, use the following `docker
|
||||
build` command from the repo's root:
|
||||
|
||||
|
||||
```
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
@@ -188,11 +188,6 @@ def main(args, environ):
|
||||
else:
|
||||
ownership = "{}:{}".format(desired_uid, desired_gid)
|
||||
|
||||
log(
|
||||
"Container running as UserID %s:%s, ENV (or defaults) requests %s:%s"
|
||||
% (os.getuid(), os.getgid(), desired_uid, desired_gid)
|
||||
)
|
||||
|
||||
if ownership is None:
|
||||
log("Will not perform chmod/su-exec as UserID already matches request")
|
||||
|
||||
@@ -213,38 +208,30 @@ def main(args, environ):
|
||||
if mode is not None:
|
||||
error("Unknown execution mode '%s'" % (mode,))
|
||||
|
||||
if "SYNAPSE_SERVER_NAME" in environ:
|
||||
# backwards-compatibility generate-a-config-on-the-fly mode
|
||||
if "SYNAPSE_CONFIG_PATH" in environ:
|
||||
error(
|
||||
"SYNAPSE_SERVER_NAME can only be combined with SYNAPSE_CONFIG_PATH "
|
||||
"in `generate` or `migrate_config` mode. To start synapse using a "
|
||||
"config file, unset the SYNAPSE_SERVER_NAME environment variable."
|
||||
)
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
|
||||
|
||||
config_path = "/compiled/homeserver.yaml"
|
||||
log(
|
||||
"Generating config file '%s' on-the-fly from environment variables.\n"
|
||||
"Note that this mode is deprecated. You can migrate to a static config\n"
|
||||
"file by running with 'migrate_config'. See the README for more details."
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
generate_config_from_template("/compiled", config_path, environ, ownership)
|
||||
else:
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
config_path = environ.get(
|
||||
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
|
||||
)
|
||||
if not os.path.exists(config_path):
|
||||
if not os.path.exists(config_path):
|
||||
if "SYNAPSE_SERVER_NAME" in environ:
|
||||
error(
|
||||
"Config file '%s' does not exist. You should either create a new "
|
||||
"config file by running with the `generate` argument (and then edit "
|
||||
"the resulting file before restarting) or specify the path to an "
|
||||
"existing config file with the SYNAPSE_CONFIG_PATH variable."
|
||||
"""\
|
||||
Config file '%s' does not exist.
|
||||
|
||||
The synapse docker image no longer supports generating a config file on-the-fly
|
||||
based on environment variables. You can migrate to a static config file by
|
||||
running with 'migrate_config'. See the README for more details.
|
||||
"""
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
error(
|
||||
"Config file '%s' does not exist. You should either create a new "
|
||||
"config file by running with the `generate` argument (and then edit "
|
||||
"the resulting file before restarting) or specify the path to an "
|
||||
"existing config file with the SYNAPSE_CONFIG_PATH variable."
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
log("Starting synapse with config file " + config_path)
|
||||
|
||||
args = ["python", "-m", synapse_worker, "--config-path", config_path]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# The config is maintained as an up-to-date snapshot of the default
|
||||
# This file is maintained as an up-to-date snapshot of the default
|
||||
# homeserver.yaml configuration generated by Synapse.
|
||||
#
|
||||
# It is intended to act as a reference for the default configuration,
|
||||
@@ -10,3 +10,5 @@
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
55
docs/ACME.md
55
docs/ACME.md
@@ -1,12 +1,48 @@
|
||||
# ACME
|
||||
|
||||
Synapse v1.0 will require valid TLS certificates for communication between
|
||||
servers (port `8448` by default) in addition to those that are client-facing
|
||||
(port `443`). If you do not already have a valid certificate for your domain,
|
||||
the easiest way to get one is with Synapse's new ACME support, which will use
|
||||
the ACME protocol to provision a certificate automatically. Synapse v0.99.0+
|
||||
will provision server-to-server certificates automatically for you for free
|
||||
through [Let's Encrypt](https://letsencrypt.org/) if you tell it to.
|
||||
From version 1.0 (June 2019) onwards, Synapse requires valid TLS
|
||||
certificates for communication between servers (by default on port
|
||||
`8448`) in addition to those that are client-facing (port `443`). To
|
||||
help homeserver admins fulfil this new requirement, Synapse v0.99.0
|
||||
introduced support for automatically provisioning certificates through
|
||||
[Let's Encrypt](https://letsencrypt.org/) using the ACME protocol.
|
||||
|
||||
## Deprecation of ACME v1
|
||||
|
||||
In [March 2019](https://community.letsencrypt.org/t/end-of-life-plan-for-acmev1/88430),
|
||||
Let's Encrypt announced that they were deprecating version 1 of the ACME
|
||||
protocol, with the plan to disable the use of it for new accounts in
|
||||
November 2019, and for existing accounts in June 2020.
|
||||
|
||||
Synapse doesn't currently support version 2 of the ACME protocol, which
|
||||
means that:
|
||||
|
||||
* for existing installs, Synapse's built-in ACME support will continue
|
||||
to work until June 2020.
|
||||
* for new installs, this feature will not work at all.
|
||||
|
||||
Either way, it is recommended to move from Synapse's ACME support
|
||||
feature to an external automated tool such as [certbot](https://github.com/certbot/certbot)
|
||||
(or browse [this list](https://letsencrypt.org/fr/docs/client-options/)
|
||||
for an alternative ACME client).
|
||||
|
||||
It's also recommended to use a reverse proxy for the server-facing
|
||||
communications (more documentation about this can be found
|
||||
[here](/docs/reverse_proxy.md)) as well as the client-facing ones and
|
||||
have it serve the certificates.
|
||||
|
||||
In case you can't do that and need Synapse to serve them itself, make
|
||||
sure to set the `tls_certificate_path` configuration setting to the path
|
||||
of the certificate (make sure to use the certificate containing the full
|
||||
certification chain, e.g. `fullchain.pem` if using certbot) and
|
||||
`tls_private_key_path` to the path of the matching private key. Note
|
||||
that in this case you will need to restart Synapse after each
|
||||
certificate renewal so that Synapse stops using the old certificate.
|
||||
|
||||
If you still want to use Synapse's built-in ACME support, the rest of
|
||||
this document explains how to set it up.
|
||||
|
||||
## Initial setup
|
||||
|
||||
In the case that your `server_name` config variable is the same as
|
||||
the hostname that the client connects to, then the same certificate can be
|
||||
@@ -32,11 +68,6 @@ If you already have certificates, you will need to back up or delete them
|
||||
(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
|
||||
directory), Synapse's ACME implementation will not overwrite them.
|
||||
|
||||
You may wish to use alternate methods such as Certbot to obtain a certificate
|
||||
from Let's Encrypt, depending on your server configuration. Of course, if you
|
||||
already have a valid certificate for your homeserver's domain, that can be
|
||||
placed in Synapse's config directory without the need for any ACME setup.
|
||||
|
||||
## ACME setup
|
||||
|
||||
The main steps for enabling ACME support in short summary are:
|
||||
|
||||
@@ -8,6 +8,9 @@ Depending on the amount of history being purged a call to the API may take
|
||||
several minutes or longer. During this period users will not be able to
|
||||
paginate further back in the room from the point being purged from.
|
||||
|
||||
Note that Synapse requires at least one message in each room, so it will never
|
||||
delete the last message in a room.
|
||||
|
||||
The API is:
|
||||
|
||||
``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
|
||||
|
||||
@@ -2,7 +2,8 @@ Create or modify Account
|
||||
========================
|
||||
|
||||
This API allows an administrator to create or modify a user account with a
|
||||
specific ``user_id``.
|
||||
specific ``user_id``. Be aware that ``user_id`` is fully qualified: for example,
|
||||
``@user:server.com``.
|
||||
|
||||
This api is::
|
||||
|
||||
@@ -15,6 +16,16 @@ with a body of:
|
||||
{
|
||||
"password": "user_password",
|
||||
"displayname": "User",
|
||||
"threepids": [
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_1>"
|
||||
},
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_2>"
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": false,
|
||||
"deactivated": false
|
||||
@@ -23,6 +34,7 @@ with a body of:
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
The parameter ``displayname`` is optional and defaults to ``user_id``.
|
||||
The parameter ``threepids`` is optional.
|
||||
The parameter ``avatar_url`` is optional.
|
||||
The parameter ``admin`` is optional and defaults to 'false'.
|
||||
The parameter ``deactivated`` is optional and defaults to 'false'.
|
||||
|
||||
94
docs/delegate.md
Normal file
94
docs/delegate.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Delegation
|
||||
|
||||
By default, other homeservers will expect to be able to reach yours via
|
||||
your `server_name`, on port 8448. For example, if you set your `server_name`
|
||||
to `example.com` (so that your user names look like `@user:example.com`),
|
||||
other servers will try to connect to yours at `https://example.com:8448/`.
|
||||
|
||||
Delegation is a Matrix feature allowing a homeserver admin to retain a
|
||||
`server_name` of `example.com` so that user IDs, room aliases, etc continue
|
||||
to look like `*:example.com`, whilst having federation traffic routed
|
||||
to a different server and/or port (e.g. `synapse.example.com:443`).
|
||||
|
||||
## .well-known delegation
|
||||
|
||||
To use this method, you need to be able to alter the
|
||||
`server_name` 's https server to serve the `/.well-known/matrix/server`
|
||||
URL. Having an active server (with a valid TLS certificate) serving your
|
||||
`server_name` domain is out of the scope of this documentation.
|
||||
|
||||
The URL `https://<server_name>/.well-known/matrix/server` should
|
||||
return a JSON structure containing the key `m.server` like so:
|
||||
|
||||
```json
|
||||
{
|
||||
"m.server": "<synapse.server.name>[:<yourport>]"
|
||||
}
|
||||
```
|
||||
|
||||
In our example, this would mean that URL `https://example.com/.well-known/matrix/server`
|
||||
should return:
|
||||
|
||||
```json
|
||||
{
|
||||
"m.server": "synapse.example.com:443"
|
||||
}
|
||||
```
|
||||
|
||||
Note, specifying a port is optional. If no port is specified, then it defaults
|
||||
to 8448.
|
||||
|
||||
With .well-known delegation, federating servers will check for a valid TLS
|
||||
certificate for the delegated hostname (in our example: `synapse.example.com`).
|
||||
|
||||
## SRV DNS record delegation
|
||||
|
||||
It is also possible to do delegation using a SRV DNS record. However, that is
|
||||
considered an advanced topic since it's a bit complex to set up, and `.well-known`
|
||||
delegation is already enough in most cases.
|
||||
|
||||
However, if you really need it, you can find some documentation on how such a
|
||||
record should look like and how Synapse will use it in [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names).
|
||||
|
||||
## Delegation FAQ
|
||||
|
||||
### When do I need delegation?
|
||||
|
||||
If your homeserver's APIs are accessible on the default federation port (8448)
|
||||
and the domain your `server_name` points to, you do not need any delegation.
|
||||
|
||||
For instance, if you registered `example.com` and pointed its DNS A record at a
|
||||
fresh server, you could install Synapse on that host, giving it a `server_name`
|
||||
of `example.com`, and once a reverse proxy has been set up to proxy all requests
|
||||
sent to the port `8448` and serve TLS certificates for `example.com`, you
|
||||
wouldn't need any delegation set up.
|
||||
|
||||
**However**, if your homeserver's APIs aren't accessible on port 8448 and on the
|
||||
domain `server_name` points to, you will need to let other servers know how to
|
||||
find it using delegation.
|
||||
|
||||
### Do you still recommend against using a reverse proxy on the federation port?
|
||||
|
||||
We no longer actively recommend against using a reverse proxy. Many admins will
|
||||
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||
own TLS certificates, and this is a supported configuration.
|
||||
|
||||
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||
|
||||
This is no longer necessary. If you are using a reverse proxy for all of your
|
||||
TLS traffic, then you can set `no_tls: True` in the Synapse config.
|
||||
|
||||
In that case, the only reason Synapse needs the certificate is to populate a legacy
|
||||
`tls_fingerprints` field in the federation API. This is ignored by Synapse 0.99.0
|
||||
and later, and the only time pre-0.99 Synapses will check it is when attempting to
|
||||
fetch the server keys - and generally this is delegated via `matrix.org`, which
|
||||
is running a modern version of Synapse.
|
||||
|
||||
### Do I need the same certificate for the client and federation port?
|
||||
|
||||
No. There is nothing stopping you from using different certificates,
|
||||
particularly if you are using a reverse proxy.
|
||||
178
docs/federate.md
178
docs/federate.md
@@ -1,163 +1,41 @@
|
||||
Setting up Federation
|
||||
Setting up federation
|
||||
=====================
|
||||
|
||||
Federation is the process by which users on different servers can participate
|
||||
in the same room. For this to work, those other servers must be able to contact
|
||||
yours to send messages.
|
||||
|
||||
The ``server_name`` configured in the Synapse configuration file (often
|
||||
``homeserver.yaml``) defines how resources (users, rooms, etc.) will be
|
||||
identified (eg: ``@user:example.com``, ``#room:example.com``). By
|
||||
default, it is also the domain that other servers will use to
|
||||
try to reach your server (via port 8448). This is easy to set
|
||||
up and will work provided you set the ``server_name`` to match your
|
||||
machine's public DNS hostname, and provide Synapse with a TLS certificate
|
||||
which is valid for your ``server_name``.
|
||||
The `server_name` configured in the Synapse configuration file (often
|
||||
`homeserver.yaml`) defines how resources (users, rooms, etc.) will be
|
||||
identified (eg: `@user:example.com`, `#room:example.com`). By default,
|
||||
it is also the domain that other servers will use to try to reach your
|
||||
server (via port 8448). This is easy to set up and will work provided
|
||||
you set the `server_name` to match your machine's public DNS hostname.
|
||||
|
||||
For this default configuration to work, you will need to listen for TLS
|
||||
connections on port 8448. The preferred way to do that is by using a
|
||||
reverse proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions
|
||||
on how to correctly set one up.
|
||||
|
||||
In some cases you might not want to run Synapse on the machine that has
|
||||
the `server_name` as its public DNS hostname, or you might want federation
|
||||
traffic to use a different port than 8448. For example, you might want to
|
||||
have your user names look like `@user:example.com`, but you want to run
|
||||
Synapse on `synapse.example.com` on port 443. This can be done using
|
||||
delegation, which allows an admin to control where federation traffic should
|
||||
be sent. See [delegate.md](delegate.md) for instructions on how to set this up.
|
||||
|
||||
Once federation has been configured, you should be able to join a room over
|
||||
federation. A good place to start is ``#synapse:matrix.org`` - a room for
|
||||
federation. A good place to start is `#synapse:matrix.org` - a room for
|
||||
Synapse admins.
|
||||
|
||||
|
||||
## Delegation
|
||||
|
||||
For a more flexible configuration, you can have ``server_name``
|
||||
resources (eg: ``@user:example.com``) served by a different host and
|
||||
port (eg: ``synapse.example.com:443``). There are two ways to do this:
|
||||
|
||||
- adding a ``/.well-known/matrix/server`` URL served on ``https://example.com``.
|
||||
- adding a DNS ``SRV`` record in the DNS zone of domain
|
||||
``example.com``.
|
||||
|
||||
Without configuring delegation, the matrix federation will
|
||||
expect to find your server via ``example.com:8448``. The following methods
|
||||
allow you retain a `server_name` of `example.com` so that your user IDs, room
|
||||
aliases, etc continue to look like `*:example.com`, whilst having your
|
||||
federation traffic routed to a different server.
|
||||
|
||||
### .well-known delegation
|
||||
|
||||
To use this method, you need to be able to alter the
|
||||
``server_name`` 's https server to serve the ``/.well-known/matrix/server``
|
||||
URL. Having an active server (with a valid TLS certificate) serving your
|
||||
``server_name`` domain is out of the scope of this documentation.
|
||||
|
||||
The URL ``https://<server_name>/.well-known/matrix/server`` should
|
||||
return a JSON structure containing the key ``m.server`` like so:
|
||||
|
||||
{
|
||||
"m.server": "<synapse.server.name>[:<yourport>]"
|
||||
}
|
||||
|
||||
In our example, this would mean that URL ``https://example.com/.well-known/matrix/server``
|
||||
should return:
|
||||
|
||||
{
|
||||
"m.server": "synapse.example.com:443"
|
||||
}
|
||||
|
||||
Note, specifying a port is optional. If a port is not specified an SRV lookup
|
||||
is performed, as described below. If the target of the
|
||||
delegation does not have an SRV record, then the port defaults to 8448.
|
||||
|
||||
Most installations will not need to configure .well-known. However, it can be
|
||||
useful in cases where the admin is hosting on behalf of someone else and
|
||||
therefore cannot gain access to the necessary certificate. With .well-known,
|
||||
federation servers will check for a valid TLS certificate for the delegated
|
||||
hostname (in our example: ``synapse.example.com``).
|
||||
|
||||
### DNS SRV delegation
|
||||
|
||||
To use this delegation method, you need to have write access to your
|
||||
``server_name`` 's domain zone DNS records (in our example it would be
|
||||
``example.com`` DNS zone).
|
||||
|
||||
This method requires the target server to provide a
|
||||
valid TLS certificate for the original ``server_name``.
|
||||
|
||||
You need to add a SRV record in your ``server_name`` 's DNS zone with
|
||||
this format:
|
||||
|
||||
_matrix._tcp.<yourdomain.com> <ttl> IN SRV <priority> <weight> <port> <synapse.server.name>
|
||||
|
||||
In our example, we would need to add this SRV record in the
|
||||
``example.com`` DNS zone:
|
||||
|
||||
_matrix._tcp.example.com. 3600 IN SRV 10 5 443 synapse.example.com.
|
||||
|
||||
Once done and set up, you can check the DNS record with ``dig -t srv
|
||||
_matrix._tcp.<server_name>``. In our example, we would expect this:
|
||||
|
||||
$ dig -t srv _matrix._tcp.example.com
|
||||
_matrix._tcp.example.com. 3600 IN SRV 10 0 443 synapse.example.com.
|
||||
|
||||
Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
|
||||
directly to the server hosting the synapse instance.
|
||||
|
||||
### Delegation FAQ
|
||||
#### When do I need a SRV record or .well-known URI?
|
||||
|
||||
If your homeserver listens on the default federation port (8448), and your
|
||||
`server_name` points to the host that your homeserver runs on, you do not need an SRV
|
||||
record or `.well-known/matrix/server` URI.
|
||||
|
||||
For instance, if you registered `example.com` and pointed its DNS A record at a
|
||||
fresh server, you could install Synapse on that host,
|
||||
giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled,
|
||||
it would automatically generate a valid TLS certificate for you via Let's Encrypt
|
||||
and no SRV record or .well-known URI would be needed.
|
||||
|
||||
**However**, if your server does not listen on port 8448, or if your `server_name`
|
||||
does not point to the host that your homeserver runs on, you will need to let
|
||||
other servers know how to find it. The way to do this is via .well-known or an
|
||||
SRV record.
|
||||
|
||||
#### I have created a .well-known URI. Do I also need an SRV record?
|
||||
|
||||
No. You can use either `.well-known` delegation or use an SRV record for delegation. You
|
||||
do not need to use both to delegate to the same location.
|
||||
|
||||
#### Can I manage my own certificates rather than having Synapse renew certificates itself?
|
||||
|
||||
Yes, you are welcome to manage your certificates yourself. Synapse will only
|
||||
attempt to obtain certificates from Let's Encrypt if you configure it to do
|
||||
so.The only requirement is that there is a valid TLS cert present for
|
||||
federation end points.
|
||||
|
||||
#### Do you still recommend against using a reverse proxy on the federation port?
|
||||
|
||||
We no longer actively recommend against using a reverse proxy. Many admins will
|
||||
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||
own TLS certificates, and this is a supported configuration.
|
||||
|
||||
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||
|
||||
Practically speaking, this is no longer necessary.
|
||||
|
||||
If you are using a reverse proxy for all of your TLS traffic, then you can set
|
||||
`no_tls: True` in the Synapse config. In that case, the only reason Synapse
|
||||
needs the certificate is to populate a legacy `tls_fingerprints` field in the
|
||||
federation API. This is ignored by Synapse 0.99.0 and later, and the only time
|
||||
pre-0.99 Synapses will check it is when attempting to fetch the server keys -
|
||||
and generally this is delegated via `matrix.org`, which will be running a modern
|
||||
version of Synapse.
|
||||
|
||||
#### Do I need the same certificate for the client and federation port?
|
||||
|
||||
No. There is nothing stopping you from using different certificates,
|
||||
particularly if you are using a reverse proxy. However, Synapse will use the
|
||||
same certificate on any ports where TLS is configured.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
You can use the [federation tester](
|
||||
<https://matrix.org/federationtester>) to check if your homeserver is
|
||||
configured correctly. Alternatively try the [JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
|
||||
Note that you'll have to modify this URL to replace ``DOMAIN`` with your
|
||||
``server_name``. Hitting the API directly provides extra detail.
|
||||
You can use the [federation tester](https://matrix.org/federationtester)
|
||||
to check if your homeserver is configured correctly. Alternatively try the
|
||||
[JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
|
||||
Note that you'll have to modify this URL to replace `DOMAIN` with your
|
||||
`server_name`. Hitting the API directly provides extra detail.
|
||||
|
||||
The typical failure mode for federation is that when the server tries to join
|
||||
a room, it is rejected with "401: Unauthorized". Generally this means that other
|
||||
@@ -169,8 +47,8 @@ you invite them to. This can be caused by an incorrectly-configured reverse
|
||||
proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions on how to correctly
|
||||
configure a reverse proxy.
|
||||
|
||||
## Running a Demo Federation of Synapses
|
||||
## Running a demo federation of Synapses
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
private federation, there is a script in the ``demo`` directory. This is mainly
|
||||
private federation, there is a script in the `demo` directory. This is mainly
|
||||
useful just for development purposes. See [demo/README](<../demo/README>).
|
||||
|
||||
@@ -42,6 +42,10 @@ purged according to its room's policy, then the receiving server will
|
||||
process and store that event until it's picked up by the next purge job,
|
||||
though it will always hide it from clients.
|
||||
|
||||
Synapse requires at least one message in each room, so it will never
|
||||
delete the last message in a room. It will, however, hide it from
|
||||
clients.
|
||||
|
||||
|
||||
## Server configuration
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ Assuming your PostgreSQL database user is called `postgres`, first authenticate
|
||||
su - postgres
|
||||
# Or, if your system uses sudo to get administrative rights
|
||||
sudo -u postgres bash
|
||||
|
||||
|
||||
Then, create a user ``synapse_user`` with:
|
||||
|
||||
createuser --pwprompt synapse_user
|
||||
@@ -63,6 +63,24 @@ You may need to enable password authentication so `synapse_user` can
|
||||
connect to the database. See
|
||||
<https://www.postgresql.org/docs/11/auth-pg-hba-conf.html>.
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
|
||||
different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
|
||||
The safest way to fix the issue is to take a dump and recreate the database with
|
||||
the correct `COLLATE` and `CTYPE` parameters (as per
|
||||
[docs/postgres.md](docs/postgres.md)). It is also possible to change the
|
||||
parameters on a live database and run a `REINDEX` on the entire database,
|
||||
however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
|
||||
@@ -18,9 +18,10 @@ When setting up a reverse proxy, remember that Matrix clients and other
|
||||
Matrix servers do not necessarily need to connect to your server via the
|
||||
same server name or port. Indeed, clients will use port 443 by default,
|
||||
whereas servers default to port 8448. Where these are different, we
|
||||
refer to the 'client port' and the \'federation port\'. See [Setting
|
||||
up federation](federate.md) for more details of the algorithm used for
|
||||
federation connections.
|
||||
refer to the 'client port' and the \'federation port\'. See [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
|
||||
for more details of the algorithm used for federation connections, and
|
||||
[delegate.md](<delegate.md>) for instructions on setting up delegation.
|
||||
|
||||
Let's assume that we expect clients to connect to our server at
|
||||
`https://matrix.example.com`, and other servers to connect at
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# The config is maintained as an up-to-date snapshot of the default
|
||||
# This file is maintained as an up-to-date snapshot of the default
|
||||
# homeserver.yaml configuration generated by Synapse.
|
||||
#
|
||||
# It is intended to act as a reference for the default configuration,
|
||||
@@ -10,6 +10,16 @@
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
################################################################################
|
||||
|
||||
# Configuration file for Synapse.
|
||||
#
|
||||
# This is a YAML file: see [1] for a quick introduction. Note in particular
|
||||
# that *indentation is important*: all the elements of a list or dictionary
|
||||
# should have the same indentation.
|
||||
#
|
||||
# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
|
||||
|
||||
## Server ##
|
||||
|
||||
# The domain name of the server, with optional explicit port.
|
||||
@@ -466,6 +476,11 @@ retention:
|
||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||
# for your configured `server_name` via Let's Encrypt.
|
||||
#
|
||||
# Note that ACME v1 is now deprecated, and Synapse currently doesn't support
|
||||
# ACME v2. This means that this feature currently won't work with installs set
|
||||
# up after November 2019. For more info, and alternative solutions, see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
|
||||
#
|
||||
# Note that provisioning a certificate in this way requires port 80 to be
|
||||
# routed to Synapse so that it can complete the http-01 ACME challenge.
|
||||
# By default, if you enable ACME support, Synapse will attempt to listen on
|
||||
|
||||
88
docs/spam_checker.md
Normal file
88
docs/spam_checker.md
Normal file
@@ -0,0 +1,88 @@
|
||||
# Handling spam in Synapse
|
||||
|
||||
Synapse has support to customize spam checking behavior. It can plug into a
|
||||
variety of events and affect how they are presented to users on your homeserver.
|
||||
|
||||
The spam checking behavior is implemented as a Python class, which must be
|
||||
able to be imported by the running Synapse.
|
||||
|
||||
## Python spam checker class
|
||||
|
||||
The Python class is instantiated with two objects:
|
||||
|
||||
* Any configuration (see below).
|
||||
* An instance of `synapse.spam_checker_api.SpamCheckerApi`.
|
||||
|
||||
It then implements methods which return a boolean to alter behavior in Synapse.
|
||||
|
||||
There's a generic method for checking every event (`check_event_for_spam`), as
|
||||
well as some specific methods:
|
||||
|
||||
* `user_may_invite`
|
||||
* `user_may_create_room`
|
||||
* `user_may_create_room_alias`
|
||||
* `user_may_publish_room`
|
||||
|
||||
The details of the each of these methods (as well as their inputs and outputs)
|
||||
are documented in the `synapse.events.spamcheck.SpamChecker` class.
|
||||
|
||||
The `SpamCheckerApi` class provides a way for the custom spam checker class to
|
||||
call back into the homeserver internals. It currently implements the following
|
||||
methods:
|
||||
|
||||
* `get_state_events_in_room`
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
class ExampleSpamChecker:
|
||||
def __init__(self, config, api):
|
||||
self.config = config
|
||||
self.api = api
|
||||
|
||||
def check_event_for_spam(self, foo):
|
||||
return False # allow all events
|
||||
|
||||
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||
return True # allow all invites
|
||||
|
||||
def user_may_create_room(self, userid):
|
||||
return True # allow all room creations
|
||||
|
||||
def user_may_create_room_alias(self, userid, room_alias):
|
||||
return True # allow all room aliases
|
||||
|
||||
def user_may_publish_room(self, userid, room_id):
|
||||
return True # allow publishing of all rooms
|
||||
|
||||
def check_username_for_spam(self, user_profile):
|
||||
return False # allow all usernames
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Modify the `spam_checker` section of your `homeserver.yaml` in the following
|
||||
manner:
|
||||
|
||||
`module` should point to the fully qualified Python class that implements your
|
||||
custom logic, e.g. `my_module.ExampleSpamChecker`.
|
||||
|
||||
`config` is a dictionary that gets passed to the spam checker class.
|
||||
|
||||
### Example
|
||||
|
||||
This section might look like:
|
||||
|
||||
```yaml
|
||||
spam_checker:
|
||||
module: my_module.ExampleSpamChecker
|
||||
config:
|
||||
# Enable or disable a specific option in ExampleSpamChecker.
|
||||
my_custom_option: true
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
The [Mjolnir](https://github.com/matrix-org/mjolnir) project is a full fledged
|
||||
example using the Synapse spam checking API, including a bot for dynamic
|
||||
configuration.
|
||||
@@ -176,15 +176,34 @@ endpoints matching the following regular expressions:
|
||||
^/_matrix/federation/v1/query_auth/
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/user/devices/
|
||||
^/_matrix/federation/v1/send/
|
||||
^/_matrix/federation/v1/get_groups_publicised$
|
||||
^/_matrix/key/v2/query
|
||||
|
||||
Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/federation/v1/groups/
|
||||
|
||||
The above endpoints should all be routed to the federation_reader worker by the
|
||||
reverse-proxy configuration.
|
||||
|
||||
The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
|
||||
instance.
|
||||
|
||||
Note that `federation` must be added to the listener resources in the worker config:
|
||||
|
||||
```yaml
|
||||
worker_app: synapse.app.federation_reader
|
||||
...
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: <port>
|
||||
resources:
|
||||
- names:
|
||||
- federation
|
||||
```
|
||||
|
||||
### `synapse.app.federation_sender`
|
||||
|
||||
Handles sending federation traffic to other servers. Doesn't handle any
|
||||
@@ -241,10 +260,14 @@ following regular expressions:
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
|
||||
^/_matrix/client/versions$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
|
||||
|
||||
Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/groups/.*$
|
||||
|
||||
Additionally, the following REST endpoints can be handled, but all requests must
|
||||
be routed to the same instance:
|
||||
@@ -265,6 +288,10 @@ the following regular expressions:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
|
||||
|
||||
When using this worker you must also set `update_user_directory: False` in the
|
||||
shared configuration file to stop the main synapse running background
|
||||
jobs related to updating the user directory.
|
||||
|
||||
### `synapse.app.frontend_proxy`
|
||||
|
||||
Proxies some frequently-requested client endpoints to add caching and remove
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
# Exits with 0 if there are no problems, or another code otherwise.
|
||||
|
||||
# Fix non-lowercase true/false values
|
||||
sed -i -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
|
||||
sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
|
||||
rm docs/sample_config.yaml.bak
|
||||
|
||||
# Check if anything changed
|
||||
git diff --exit-code docs/sample_config.yaml
|
||||
|
||||
@@ -36,7 +36,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.9.0"
|
||||
__version__ = "1.10.1"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from six import itervalues
|
||||
|
||||
@@ -33,7 +34,9 @@ from synapse.api.errors import (
|
||||
MissingClientTokenError,
|
||||
ResourceLimitError,
|
||||
)
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
@@ -77,32 +80,48 @@ class Auth(object):
|
||||
self._account_validity = hs.config.account_validity
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, room_version, event, context, do_sig_check=True):
|
||||
def check_from_context(self, room_version: str, event, context, do_sig_check=True):
|
||||
prev_state_ids = yield context.get_prev_state_ids()
|
||||
auth_events_ids = yield self.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in itervalues(auth_events)}
|
||||
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
event_auth.check(
|
||||
room_version, event, auth_events=auth_events, do_sig_check=do_sig_check
|
||||
room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_joined_room(self, room_id, user_id, current_state=None):
|
||||
"""Check if the user is currently joined in the room
|
||||
def check_user_in_room(
|
||||
self,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
current_state: Optional[StateMap[EventBase]] = None,
|
||||
allow_departed_users: bool = False,
|
||||
):
|
||||
"""Check if the user is in the room, or was at some point.
|
||||
Args:
|
||||
room_id(str): The room to check.
|
||||
user_id(str): The user to check.
|
||||
current_state(dict): Optional map of the current state of the room.
|
||||
room_id: The room to check.
|
||||
|
||||
user_id: The user to check.
|
||||
|
||||
current_state: Optional map of the current state of the room.
|
||||
If provided then that map is used to check whether they are a
|
||||
member of the room. Otherwise the current membership is
|
||||
loaded from the database.
|
||||
|
||||
allow_departed_users: if True, accept users that were previously
|
||||
members but have now departed.
|
||||
|
||||
Raises:
|
||||
AuthError if the user is not in the room.
|
||||
AuthError if the user is/was not in the room.
|
||||
Returns:
|
||||
A deferred membership event for the user if the user is in
|
||||
the room.
|
||||
Deferred[Optional[EventBase]]:
|
||||
Membership event for the user if the user was in the
|
||||
room. This will be the join event if they are currently joined to
|
||||
the room. This will be the leave event if they have left the room.
|
||||
"""
|
||||
if current_state:
|
||||
member = current_state.get((EventTypes.Member, user_id), None)
|
||||
@@ -110,37 +129,19 @@ class Auth(object):
|
||||
member = yield self.state.get_current_state(
|
||||
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
|
||||
)
|
||||
|
||||
self._check_joined_room(member, user_id, room_id)
|
||||
return member
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_user_was_in_room(self, room_id, user_id):
|
||||
"""Check if the user was in the room at some point.
|
||||
Args:
|
||||
room_id(str): The room to check.
|
||||
user_id(str): The user to check.
|
||||
Raises:
|
||||
AuthError if the user was never in the room.
|
||||
Returns:
|
||||
A deferred membership event for the user if the user was in the
|
||||
room. This will be the join event if they are currently joined to
|
||||
the room. This will be the leave event if they have left the room.
|
||||
"""
|
||||
member = yield self.state.get_current_state(
|
||||
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
|
||||
)
|
||||
membership = member.membership if member else None
|
||||
|
||||
if membership not in (Membership.JOIN, Membership.LEAVE):
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
if membership == Membership.JOIN:
|
||||
return member
|
||||
|
||||
if membership == Membership.LEAVE:
|
||||
# XXX this looks totally bogus. Why do we not allow users who have been banned,
|
||||
# or those who were members previously and have been re-invited?
|
||||
if allow_departed_users and membership == Membership.LEAVE:
|
||||
forgot = yield self.store.did_forget(user_id, room_id)
|
||||
if forgot:
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
if not forgot:
|
||||
return member
|
||||
|
||||
return member
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_host_in_room(self, room_id, host):
|
||||
@@ -148,12 +149,6 @@ class Auth(object):
|
||||
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
||||
return latest_event_ids
|
||||
|
||||
def _check_joined_room(self, member, user_id, room_id):
|
||||
if not member or member.membership != Membership.JOIN:
|
||||
raise AuthError(
|
||||
403, "User %s not in room %s (%s)" % (user_id, room_id, repr(member))
|
||||
)
|
||||
|
||||
def can_federate(self, event, auth_events):
|
||||
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||
|
||||
@@ -557,7 +552,7 @@ class Auth(object):
|
||||
return True
|
||||
|
||||
user_id = user.to_string()
|
||||
yield self.check_joined_room(room_id, user_id)
|
||||
yield self.check_user_in_room(room_id, user_id)
|
||||
|
||||
# We currently require the user is a "moderator" in the room. We do this
|
||||
# by checking if they would (theoretically) be able to change the
|
||||
@@ -630,10 +625,18 @@ class Auth(object):
|
||||
return query_params[0].decode("ascii")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_in_room_or_world_readable(self, room_id, user_id):
|
||||
def check_user_in_room_or_world_readable(
|
||||
self, room_id: str, user_id: str, allow_departed_users: bool = False
|
||||
):
|
||||
"""Checks that the user is or was in the room or the room is world
|
||||
readable. If it isn't then an exception is raised.
|
||||
|
||||
Args:
|
||||
room_id: room to check
|
||||
user_id: user to check
|
||||
allow_departed_users: if True, accept users that were previously
|
||||
members but have now departed
|
||||
|
||||
Returns:
|
||||
Deferred[tuple[str, str|None]]: Resolves to the current membership of
|
||||
the user in the room and the membership event ID of the user. If
|
||||
@@ -642,12 +645,14 @@ class Auth(object):
|
||||
"""
|
||||
|
||||
try:
|
||||
# check_user_was_in_room will return the most recent membership
|
||||
# check_user_in_room will return the most recent membership
|
||||
# event for the user if:
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = yield self.check_user_was_in_room(room_id, user_id)
|
||||
member_event = yield self.check_user_in_room(
|
||||
room_id, user_id, allow_departed_users=allow_departed_users
|
||||
)
|
||||
return member_event.membership, member_event.event_id
|
||||
except AuthError:
|
||||
visibility = yield self.state.get_current_state(
|
||||
@@ -659,7 +664,9 @@ class Auth(object):
|
||||
):
|
||||
return Membership.JOIN, None
|
||||
raise AuthError(
|
||||
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||
403,
|
||||
"User %s not in room %s, and room previews are disabled"
|
||||
% (user_id, room_id),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -77,12 +77,11 @@ class EventTypes(object):
|
||||
Aliases = "m.room.aliases"
|
||||
Redaction = "m.room.redaction"
|
||||
ThirdPartyInvite = "m.room.third_party_invite"
|
||||
Encryption = "m.room.encryption"
|
||||
RelatedGroups = "m.room.related_groups"
|
||||
|
||||
RoomHistoryVisibility = "m.room.history_visibility"
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
Encryption = "m.room.encryption"
|
||||
Encrypted = "m.room.encrypted"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
RoomEncryption = "m.room.encryption"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
|
||||
@@ -402,11 +402,9 @@ class UnsupportedRoomVersionError(SynapseError):
|
||||
"""The client's request to create a room used a room version that the server does
|
||||
not support."""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, msg="Homeserver does not support this room version"):
|
||||
super(UnsupportedRoomVersionError, self).__init__(
|
||||
code=400,
|
||||
msg="Homeserver does not support this room version",
|
||||
errcode=Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
code=400, msg=msg, errcode=Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -57,6 +57,9 @@ class RoomVersion(object):
|
||||
state_res = attr.ib() # int; one of the StateResolutionVersions
|
||||
enforce_key_validity = attr.ib() # bool
|
||||
|
||||
# bool: before MSC2260, anyone was allowed to send an aliases event
|
||||
special_case_aliases_auth = attr.ib(type=bool, default=False)
|
||||
|
||||
|
||||
class RoomVersions(object):
|
||||
V1 = RoomVersion(
|
||||
@@ -65,6 +68,7 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V1,
|
||||
StateResolutionVersions.V1,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -72,6 +76,7 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V1,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -79,6 +84,7 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V2,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -86,6 +92,7 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -93,6 +100,14 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
MSC2260_DEV = RoomVersion(
|
||||
"org.matrix.msc2260",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -104,5 +119,6 @@ KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V3,
|
||||
RoomVersions.V4,
|
||||
RoomVersions.V5,
|
||||
RoomVersions.MSC2260_DEV,
|
||||
)
|
||||
} # type: Dict[str, RoomVersion]
|
||||
|
||||
@@ -57,6 +57,7 @@ from synapse.rest.client.v1.room import (
|
||||
RoomStateRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.voip import VoipRestServlet
|
||||
from synapse.rest.client.v2_alpha import groups
|
||||
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
|
||||
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
@@ -124,6 +125,8 @@ class ClientReaderServer(HomeServer):
|
||||
PushRuleRestServlet(self).register(resource)
|
||||
VersionsRestServlet(self).register(resource)
|
||||
|
||||
groups.register_servlets(self, resource)
|
||||
|
||||
resources.update({"/_matrix/client": resource})
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
@@ -33,8 +33,10 @@ from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
@@ -66,6 +68,8 @@ class FederationReaderSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedGroupServerStore,
|
||||
SlavedDeviceStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
|
||||
@@ -38,7 +38,11 @@ from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.replication.tcp.streams._base import ReceiptsStream
|
||||
from synapse.replication.tcp.streams._base import (
|
||||
DeviceListsStream,
|
||||
ReceiptsStream,
|
||||
ToDeviceStream,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import Database
|
||||
from synapse.types import ReadReceipt
|
||||
@@ -256,6 +260,20 @@ class FederationSenderHandler(object):
|
||||
"process_receipts_for_federation", self._on_new_receipts, rows
|
||||
)
|
||||
|
||||
# ... as well as device updates and messages
|
||||
elif stream_name == DeviceListsStream.NAME:
|
||||
hosts = set(row.destination for row in rows)
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host)
|
||||
|
||||
elif stream_name == ToDeviceStream.NAME:
|
||||
# The to_device stream includes stuff to be pushed to both local
|
||||
# clients and remote servers, so we ignore entities that start with
|
||||
# '@' (since they'll be local users rather than destinations).
|
||||
hosts = set(row.entity for row in rows if not row.entity.startswith("@"))
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_new_receipts(self, rows):
|
||||
"""
|
||||
|
||||
@@ -53,6 +53,18 @@ Missing mandatory `server_name` config option.
|
||||
"""
|
||||
|
||||
|
||||
CONFIG_FILE_HEADER = """\
|
||||
# Configuration file for Synapse.
|
||||
#
|
||||
# This is a YAML file: see [1] for a quick introduction. Note in particular
|
||||
# that *indentation is important*: all the elements of a list or dictionary
|
||||
# should have the same indentation.
|
||||
#
|
||||
# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def path_exists(file_path):
|
||||
"""Check if a file exists
|
||||
|
||||
@@ -344,7 +356,7 @@ class RootConfig(object):
|
||||
str: the yaml config file
|
||||
"""
|
||||
|
||||
return "\n\n".join(
|
||||
return CONFIG_FILE_HEADER + "\n\n".join(
|
||||
dedent(conf)
|
||||
for conf in self.invoke_all(
|
||||
"generate_config_section",
|
||||
@@ -574,8 +586,8 @@ class RootConfig(object):
|
||||
if not path_exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
with open(config_path, "w") as config_file:
|
||||
config_file.write("# vim:ft=yaml\n\n")
|
||||
config_file.write(config_str)
|
||||
config_file.write("\n\n# vim:ft=yaml")
|
||||
|
||||
config_dict = yaml.safe_load(config_str)
|
||||
obj.generate_missing_files(config_dict, config_dir_path)
|
||||
|
||||
@@ -32,6 +32,17 @@ from synapse.util import glob_to_regex
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ACME_SUPPORT_ENABLED_WARN = """\
|
||||
This server uses Synapse's built-in ACME support. Note that ACME v1 has been
|
||||
deprecated by Let's Encrypt, and that Synapse doesn't currently support ACME v2,
|
||||
which means that this feature will not work with Synapse installs set up after
|
||||
November 2019, and that it may stop working on June 2020 for installs set up
|
||||
before that date.
|
||||
|
||||
For more info and alternative solutions, see
|
||||
https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
|
||||
--------------------------------------------------------------------------------"""
|
||||
|
||||
|
||||
class TlsConfig(Config):
|
||||
section = "tls"
|
||||
@@ -44,6 +55,9 @@ class TlsConfig(Config):
|
||||
|
||||
self.acme_enabled = acme_config.get("enabled", False)
|
||||
|
||||
if self.acme_enabled:
|
||||
logger.warning(ACME_SUPPORT_ENABLED_WARN)
|
||||
|
||||
# hyperlink complains on py2 if this is not a Unicode
|
||||
self.acme_url = six.text_type(
|
||||
acme_config.get("url", "https://acme-v01.api.letsencrypt.org/directory")
|
||||
@@ -109,6 +123,8 @@ class TlsConfig(Config):
|
||||
fed_whitelist_entries = config.get(
|
||||
"federation_certificate_verification_whitelist", []
|
||||
)
|
||||
if fed_whitelist_entries is None:
|
||||
fed_whitelist_entries = []
|
||||
|
||||
# Support globs (*) in whitelist values
|
||||
self.federation_certificate_verification_whitelist = [] # type: List[str]
|
||||
@@ -360,6 +376,11 @@ class TlsConfig(Config):
|
||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||
# for your configured `server_name` via Let's Encrypt.
|
||||
#
|
||||
# Note that ACME v1 is now deprecated, and Synapse currently doesn't support
|
||||
# ACME v2. This means that this feature currently won't work with installs set
|
||||
# up after November 2019. For more info, and alternative solutions, see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
|
||||
#
|
||||
# Note that provisioning a certificate in this way requires port 80 to be
|
||||
# routed to Synapse so that it can complete the http-01 ACME challenge.
|
||||
# By default, if you enable ACME support, Synapse will attempt to listen on
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,13 +18,17 @@
|
||||
import collections.abc
|
||||
import hashlib
|
||||
import logging
|
||||
from typing import Dict
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.sign import sign_json
|
||||
from signedjson.types import SigningKey
|
||||
from unpaddedbase64 import decode_base64, encode_base64
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.events.utils import prune_event, prune_event_dict
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -112,18 +117,28 @@ def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
|
||||
return hashed.name, hashed.digest()
|
||||
|
||||
|
||||
def compute_event_signature(event_dict, signature_name, signing_key):
|
||||
def compute_event_signature(
|
||||
room_version: RoomVersion,
|
||||
event_dict: JsonDict,
|
||||
signature_name: str,
|
||||
signing_key: SigningKey,
|
||||
) -> Dict[str, Dict[str, str]]:
|
||||
"""Compute the signature of the event for the given name and key.
|
||||
|
||||
Args:
|
||||
event_dict (dict): The event as a dict
|
||||
signature_name (str): The name of the entity signing the event
|
||||
room_version: the version of the room that this event is in.
|
||||
(the room version determines the redaction algorithm and hence the
|
||||
json to be signed)
|
||||
|
||||
event_dict: The event as a dict
|
||||
|
||||
signature_name: The name of the entity signing the event
|
||||
(typically the server's hostname).
|
||||
signing_key (syutil.crypto.SigningKey): The key to sign with
|
||||
|
||||
signing_key: The key to sign with
|
||||
|
||||
Returns:
|
||||
dict[str, dict[str, str]]: Returns a dictionary in the same format of
|
||||
an event's signatures field.
|
||||
a dictionary in the same format of an event's signatures field.
|
||||
"""
|
||||
redact_json = prune_event_dict(event_dict)
|
||||
redact_json.pop("age_ts", None)
|
||||
@@ -137,23 +152,26 @@ def compute_event_signature(event_dict, signature_name, signing_key):
|
||||
|
||||
|
||||
def add_hashes_and_signatures(
|
||||
event_dict, signature_name, signing_key, hash_algorithm=hashlib.sha256
|
||||
room_version: RoomVersion,
|
||||
event_dict: JsonDict,
|
||||
signature_name: str,
|
||||
signing_key: SigningKey,
|
||||
):
|
||||
"""Add content hash and sign the event
|
||||
|
||||
Args:
|
||||
event_dict (dict): The event to add hashes to and sign
|
||||
signature_name (str): The name of the entity signing the event
|
||||
room_version: the version of the room this event is in
|
||||
|
||||
event_dict: The event to add hashes to and sign
|
||||
signature_name: The name of the entity signing the event
|
||||
(typically the server's hostname).
|
||||
signing_key (syutil.crypto.SigningKey): The key to sign with
|
||||
hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
|
||||
to hash the event
|
||||
signing_key: The key to sign with
|
||||
"""
|
||||
|
||||
name, digest = compute_content_hash(event_dict, hash_algorithm=hash_algorithm)
|
||||
name, digest = compute_content_hash(event_dict, hash_algorithm=hashlib.sha256)
|
||||
|
||||
event_dict.setdefault("hashes", {})[name] = encode_base64(digest)
|
||||
|
||||
event_dict["signatures"] = compute_event_signature(
|
||||
event_dict, signature_name=signature_name, signing_key=signing_key
|
||||
room_version, event_dict, signature_name=signature_name, signing_key=signing_key
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -23,17 +24,27 @@ from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, EventSizeError, SynapseError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
RoomVersion,
|
||||
)
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check(room_version, event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
def check(
|
||||
room_version_obj: RoomVersion,
|
||||
event,
|
||||
auth_events,
|
||||
do_sig_check=True,
|
||||
do_size_check=True,
|
||||
):
|
||||
""" Checks if this event is correctly authed.
|
||||
|
||||
Args:
|
||||
room_version (str): the version of the room
|
||||
room_version_obj: the version of the room
|
||||
event: the event being checked.
|
||||
auth_events (dict: event-key -> event): the existing room state.
|
||||
|
||||
@@ -89,7 +100,12 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
|
||||
if not event.signatures.get(event_id_domain):
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
# Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules
|
||||
#
|
||||
# 1. If type is m.room.create:
|
||||
if event.type == EventTypes.Create:
|
||||
# 1b. If the domain of the room_id does not match the domain of the sender,
|
||||
# reject.
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
room_id_domain = get_domain_from_id(event.room_id)
|
||||
if room_id_domain != sender_domain:
|
||||
@@ -97,39 +113,49 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
|
||||
403, "Creation event's room_id domain does not match sender's"
|
||||
)
|
||||
|
||||
room_version = event.content.get("room_version", "1")
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
# 1c. If content.room_version is present and is not a recognised version, reject
|
||||
room_version_prop = event.content.get("room_version", "1")
|
||||
if room_version_prop not in KNOWN_ROOM_VERSIONS:
|
||||
raise AuthError(
|
||||
403, "room appears to have unsupported version %s" % (room_version,)
|
||||
403,
|
||||
"room appears to have unsupported version %s" % (room_version_prop,),
|
||||
)
|
||||
# FIXME
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
# 3. If event does not have a m.room.create in its auth_events, reject.
|
||||
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||
|
||||
if not creation_event:
|
||||
raise AuthError(403, "No create event in auth events")
|
||||
|
||||
# additional check for m.federate
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
originating_domain = get_domain_from_id(event.sender)
|
||||
if creating_domain != originating_domain:
|
||||
if not _can_federate(event, auth_events):
|
||||
raise AuthError(403, "This room has been marked as unfederatable.")
|
||||
|
||||
# FIXME: Temp hack
|
||||
# 4. If type is m.room.aliases
|
||||
if event.type == EventTypes.Aliases:
|
||||
# 4a. If event has no state_key, reject
|
||||
if not event.is_state():
|
||||
raise AuthError(403, "Alias event must be a state event")
|
||||
if not event.state_key:
|
||||
raise AuthError(403, "Alias event must have non-empty state_key")
|
||||
|
||||
# 4b. If sender's domain doesn't matches [sic] state_key, reject
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
if event.state_key != sender_domain:
|
||||
raise AuthError(
|
||||
403, "Alias event's state_key does not match sender's domain"
|
||||
)
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
# 4c. Otherwise, allow.
|
||||
# This is removed by https://github.com/matrix-org/matrix-doc/pull/2260
|
||||
if room_version_obj.special_case_aliases_auth:
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()])
|
||||
@@ -160,7 +186,7 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
|
||||
_check_power_levels(event, auth_events)
|
||||
|
||||
if event.type == EventTypes.Redaction:
|
||||
check_redaction(room_version, event, auth_events)
|
||||
check_redaction(room_version_obj, event, auth_events)
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
|
||||
@@ -386,7 +412,7 @@ def _can_send_event(event, auth_events):
|
||||
return True
|
||||
|
||||
|
||||
def check_redaction(room_version, event, auth_events):
|
||||
def check_redaction(room_version_obj: RoomVersion, event, auth_events):
|
||||
"""Check whether the event sender is allowed to redact the target event.
|
||||
|
||||
Returns:
|
||||
@@ -406,11 +432,7 @@ def check_redaction(room_version, event, auth_events):
|
||||
if user_level >= redact_level:
|
||||
return False
|
||||
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
if not v:
|
||||
raise RuntimeError("Unrecognized room version %r" % (room_version,))
|
||||
|
||||
if v.event_format == EventFormatVersions.V1:
|
||||
if room_version_obj.event_format == EventFormatVersions.V1:
|
||||
redacter_domain = get_domain_from_id(event.event_id)
|
||||
redactee_domain = get_domain_from_id(event.redacts)
|
||||
if redacter_domain == redactee_domain:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,13 +17,14 @@
|
||||
|
||||
import os
|
||||
from distutils.util import strtobool
|
||||
from typing import Optional, Type
|
||||
|
||||
import six
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from synapse.api.errors import UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
|
||||
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.caches import intern_dict
|
||||
from synapse.util.frozenutils import freeze
|
||||
|
||||
@@ -36,34 +38,115 @@ from synapse.util.frozenutils import freeze
|
||||
USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0"))
|
||||
|
||||
|
||||
class DictProperty:
|
||||
"""An object property which delegates to the `_dict` within its parent object."""
|
||||
|
||||
__slots__ = ["key"]
|
||||
|
||||
def __init__(self, key: str):
|
||||
self.key = key
|
||||
|
||||
def __get__(self, instance, owner=None):
|
||||
# if the property is accessed as a class property rather than an instance
|
||||
# property, return the property itself rather than the value
|
||||
if instance is None:
|
||||
return self
|
||||
try:
|
||||
return instance._dict[self.key]
|
||||
except KeyError as e1:
|
||||
# We want this to look like a regular attribute error (mostly so that
|
||||
# hasattr() works correctly), so we convert the KeyError into an
|
||||
# AttributeError.
|
||||
#
|
||||
# To exclude the KeyError from the traceback, we explicitly
|
||||
# 'raise from e1.__context__' (which is better than 'raise from None',
|
||||
# becuase that would omit any *earlier* exceptions).
|
||||
#
|
||||
raise AttributeError(
|
||||
"'%s' has no '%s' property" % (type(instance), self.key)
|
||||
) from e1.__context__
|
||||
|
||||
def __set__(self, instance, v):
|
||||
instance._dict[self.key] = v
|
||||
|
||||
def __delete__(self, instance):
|
||||
try:
|
||||
del instance._dict[self.key]
|
||||
except KeyError as e1:
|
||||
raise AttributeError(
|
||||
"'%s' has no '%s' property" % (type(instance), self.key)
|
||||
) from e1.__context__
|
||||
|
||||
|
||||
class DefaultDictProperty(DictProperty):
|
||||
"""An extension of DictProperty which provides a default if the property is
|
||||
not present in the parent's _dict.
|
||||
|
||||
Note that this means that hasattr() on the property always returns True.
|
||||
"""
|
||||
|
||||
__slots__ = ["default"]
|
||||
|
||||
def __init__(self, key, default):
|
||||
super().__init__(key)
|
||||
self.default = default
|
||||
|
||||
def __get__(self, instance, owner=None):
|
||||
if instance is None:
|
||||
return self
|
||||
return instance._dict.get(self.key, self.default)
|
||||
|
||||
|
||||
class _EventInternalMetadata(object):
|
||||
def __init__(self, internal_metadata_dict):
|
||||
self.__dict__ = dict(internal_metadata_dict)
|
||||
__slots__ = ["_dict"]
|
||||
|
||||
def get_dict(self):
|
||||
return dict(self.__dict__)
|
||||
def __init__(self, internal_metadata_dict: JsonDict):
|
||||
# we have to copy the dict, because it turns out that the same dict is
|
||||
# reused. TODO: fix that
|
||||
self._dict = dict(internal_metadata_dict)
|
||||
|
||||
def is_outlier(self):
|
||||
return getattr(self, "outlier", False)
|
||||
outlier = DictProperty("outlier") # type: bool
|
||||
out_of_band_membership = DictProperty("out_of_band_membership") # type: bool
|
||||
send_on_behalf_of = DictProperty("send_on_behalf_of") # type: str
|
||||
recheck_redaction = DictProperty("recheck_redaction") # type: bool
|
||||
soft_failed = DictProperty("soft_failed") # type: bool
|
||||
proactively_send = DictProperty("proactively_send") # type: bool
|
||||
redacted = DictProperty("redacted") # type: bool
|
||||
txn_id = DictProperty("txn_id") # type: str
|
||||
token_id = DictProperty("token_id") # type: str
|
||||
stream_ordering = DictProperty("stream_ordering") # type: int
|
||||
|
||||
def is_out_of_band_membership(self):
|
||||
# XXX: These are set by StreamWorkerStore._set_before_and_after.
|
||||
# I'm pretty sure that these are never persisted to the database, so shouldn't
|
||||
# be here
|
||||
before = DictProperty("before") # type: str
|
||||
after = DictProperty("after") # type: str
|
||||
order = DictProperty("order") # type: int
|
||||
|
||||
def get_dict(self) -> JsonDict:
|
||||
return dict(self._dict)
|
||||
|
||||
def is_outlier(self) -> bool:
|
||||
return self._dict.get("outlier", False)
|
||||
|
||||
def is_out_of_band_membership(self) -> bool:
|
||||
"""Whether this is an out of band membership, like an invite or an invite
|
||||
rejection. This is needed as those events are marked as outliers, but
|
||||
they still need to be processed as if they're new events (e.g. updating
|
||||
invite state in the database, relaying to clients, etc).
|
||||
"""
|
||||
return getattr(self, "out_of_band_membership", False)
|
||||
return self._dict.get("out_of_band_membership", False)
|
||||
|
||||
def get_send_on_behalf_of(self):
|
||||
def get_send_on_behalf_of(self) -> Optional[str]:
|
||||
"""Whether this server should send the event on behalf of another server.
|
||||
This is used by the federation "send_join" API to forward the initial join
|
||||
event for a server in the room.
|
||||
|
||||
returns a str with the name of the server this event is sent on behalf of.
|
||||
"""
|
||||
return getattr(self, "send_on_behalf_of", None)
|
||||
return self._dict.get("send_on_behalf_of")
|
||||
|
||||
def need_to_check_redaction(self):
|
||||
def need_to_check_redaction(self) -> bool:
|
||||
"""Whether the redaction event needs to be rechecked when fetching
|
||||
from the database.
|
||||
|
||||
@@ -76,9 +159,9 @@ class _EventInternalMetadata(object):
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
return getattr(self, "recheck_redaction", False)
|
||||
return self._dict.get("recheck_redaction", False)
|
||||
|
||||
def is_soft_failed(self):
|
||||
def is_soft_failed(self) -> bool:
|
||||
"""Whether the event has been soft failed.
|
||||
|
||||
Soft failed events should be handled as usual, except:
|
||||
@@ -90,7 +173,7 @@ class _EventInternalMetadata(object):
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
return getattr(self, "soft_failed", False)
|
||||
return self._dict.get("soft_failed", False)
|
||||
|
||||
def should_proactively_send(self):
|
||||
"""Whether the event, if ours, should be sent to other clients and
|
||||
@@ -102,7 +185,7 @@ class _EventInternalMetadata(object):
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
return getattr(self, "proactively_send", True)
|
||||
return self._dict.get("proactively_send", True)
|
||||
|
||||
def is_redacted(self):
|
||||
"""Whether the event has been redacted.
|
||||
@@ -113,32 +196,7 @@ class _EventInternalMetadata(object):
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
return getattr(self, "redacted", False)
|
||||
|
||||
|
||||
def _event_dict_property(key):
|
||||
# We want to be able to use hasattr with the event dict properties.
|
||||
# However, (on python3) hasattr expects AttributeError to be raised. Hence,
|
||||
# we need to transform the KeyError into an AttributeError
|
||||
def getter(self):
|
||||
try:
|
||||
return self._event_dict[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def setter(self, v):
|
||||
try:
|
||||
self._event_dict[key] = v
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def delete(self):
|
||||
try:
|
||||
del self._event_dict[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
return property(getter, setter, delete)
|
||||
return self._dict.get("redacted", False)
|
||||
|
||||
|
||||
class EventBase(object):
|
||||
@@ -154,21 +212,27 @@ class EventBase(object):
|
||||
self.unsigned = unsigned
|
||||
self.rejected_reason = rejected_reason
|
||||
|
||||
self._event_dict = event_dict
|
||||
self._dict = event_dict
|
||||
|
||||
self.internal_metadata = _EventInternalMetadata(internal_metadata_dict)
|
||||
|
||||
auth_events = _event_dict_property("auth_events")
|
||||
depth = _event_dict_property("depth")
|
||||
content = _event_dict_property("content")
|
||||
hashes = _event_dict_property("hashes")
|
||||
origin = _event_dict_property("origin")
|
||||
origin_server_ts = _event_dict_property("origin_server_ts")
|
||||
prev_events = _event_dict_property("prev_events")
|
||||
redacts = _event_dict_property("redacts")
|
||||
room_id = _event_dict_property("room_id")
|
||||
sender = _event_dict_property("sender")
|
||||
user_id = _event_dict_property("sender")
|
||||
auth_events = DictProperty("auth_events")
|
||||
depth = DictProperty("depth")
|
||||
content = DictProperty("content")
|
||||
hashes = DictProperty("hashes")
|
||||
origin = DictProperty("origin")
|
||||
origin_server_ts = DictProperty("origin_server_ts")
|
||||
prev_events = DictProperty("prev_events")
|
||||
redacts = DefaultDictProperty("redacts", None)
|
||||
room_id = DictProperty("room_id")
|
||||
sender = DictProperty("sender")
|
||||
state_key = DictProperty("state_key")
|
||||
type = DictProperty("type")
|
||||
user_id = DictProperty("sender")
|
||||
|
||||
@property
|
||||
def event_id(self) -> str:
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def membership(self):
|
||||
@@ -177,19 +241,19 @@ class EventBase(object):
|
||||
def is_state(self):
|
||||
return hasattr(self, "state_key") and self.state_key is not None
|
||||
|
||||
def get_dict(self):
|
||||
d = dict(self._event_dict)
|
||||
def get_dict(self) -> JsonDict:
|
||||
d = dict(self._dict)
|
||||
d.update({"signatures": self.signatures, "unsigned": dict(self.unsigned)})
|
||||
|
||||
return d
|
||||
|
||||
def get(self, key, default=None):
|
||||
return self._event_dict.get(key, default)
|
||||
return self._dict.get(key, default)
|
||||
|
||||
def get_internal_metadata_dict(self):
|
||||
return self.internal_metadata.get_dict()
|
||||
|
||||
def get_pdu_json(self, time_now=None):
|
||||
def get_pdu_json(self, time_now=None) -> JsonDict:
|
||||
pdu_json = self.get_dict()
|
||||
|
||||
if time_now is not None and "age_ts" in pdu_json["unsigned"]:
|
||||
@@ -206,16 +270,16 @@ class EventBase(object):
|
||||
raise AttributeError("Unrecognized attribute %s" % (instance,))
|
||||
|
||||
def __getitem__(self, field):
|
||||
return self._event_dict[field]
|
||||
return self._dict[field]
|
||||
|
||||
def __contains__(self, field):
|
||||
return field in self._event_dict
|
||||
return field in self._dict
|
||||
|
||||
def items(self):
|
||||
return list(self._event_dict.items())
|
||||
return list(self._dict.items())
|
||||
|
||||
def keys(self):
|
||||
return six.iterkeys(self._event_dict)
|
||||
return six.iterkeys(self._dict)
|
||||
|
||||
def prev_event_ids(self):
|
||||
"""Returns the list of prev event IDs. The order matches the order
|
||||
@@ -260,10 +324,7 @@ class FrozenEvent(EventBase):
|
||||
else:
|
||||
frozen_dict = event_dict
|
||||
|
||||
self.event_id = event_dict["event_id"]
|
||||
self.type = event_dict["type"]
|
||||
if "state_key" in event_dict:
|
||||
self.state_key = event_dict["state_key"]
|
||||
self._event_id = event_dict["event_id"]
|
||||
|
||||
super(FrozenEvent, self).__init__(
|
||||
frozen_dict,
|
||||
@@ -273,6 +334,10 @@ class FrozenEvent(EventBase):
|
||||
rejected_reason=rejected_reason,
|
||||
)
|
||||
|
||||
@property
|
||||
def event_id(self) -> str:
|
||||
return self._event_id
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
@@ -311,9 +376,6 @@ class FrozenEventV2(EventBase):
|
||||
frozen_dict = event_dict
|
||||
|
||||
self._event_id = None
|
||||
self.type = event_dict["type"]
|
||||
if "state_key" in event_dict:
|
||||
self.state_key = event_dict["state_key"]
|
||||
|
||||
super(FrozenEventV2, self).__init__(
|
||||
frozen_dict,
|
||||
@@ -383,28 +445,7 @@ class FrozenEventV3(FrozenEventV2):
|
||||
return self._event_id
|
||||
|
||||
|
||||
def room_version_to_event_format(room_version):
|
||||
"""Converts a room version string to the event format
|
||||
|
||||
Args:
|
||||
room_version (str)
|
||||
|
||||
Returns:
|
||||
int
|
||||
|
||||
Raises:
|
||||
UnsupportedRoomVersionError if the room version is unknown
|
||||
"""
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
|
||||
if not v:
|
||||
# this can happen if support is withdrawn for a room version
|
||||
raise UnsupportedRoomVersionError()
|
||||
|
||||
return v.event_format
|
||||
|
||||
|
||||
def event_type_from_format_version(format_version):
|
||||
def event_type_from_format_version(format_version: int) -> Type[EventBase]:
|
||||
"""Returns the python type to use to construct an Event object for the
|
||||
given event format version.
|
||||
|
||||
@@ -424,3 +465,14 @@ def event_type_from_format_version(format_version):
|
||||
return FrozenEventV3
|
||||
else:
|
||||
raise Exception("No event format %r" % (format_version,))
|
||||
|
||||
|
||||
def make_event_from_dict(
|
||||
event_dict: JsonDict,
|
||||
room_version: RoomVersion = RoomVersions.V1,
|
||||
internal_metadata_dict: JsonDict = {},
|
||||
rejected_reason: Optional[str] = None,
|
||||
) -> EventBase:
|
||||
"""Construct an EventBase from the given event dict"""
|
||||
event_type = event_type_from_format_version(room_version.event_format)
|
||||
return event_type(event_dict, internal_metadata_dict, rejected_reason)
|
||||
|
||||
@@ -12,8 +12,10 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Optional
|
||||
|
||||
import attr
|
||||
from nacl.signing import SigningKey
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -23,13 +25,14 @@ from synapse.api.room_versions import (
|
||||
KNOWN_EVENT_FORMAT_VERSIONS,
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
RoomVersion,
|
||||
)
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.types import EventID
|
||||
from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
|
||||
from synapse.types import EventID, JsonDict
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
from . import _EventInternalMetadata, event_type_from_format_version
|
||||
|
||||
|
||||
@attr.s(slots=True, cmp=False, frozen=True)
|
||||
class EventBuilder(object):
|
||||
@@ -40,7 +43,7 @@ class EventBuilder(object):
|
||||
content/unsigned/internal_metadata fields are still mutable)
|
||||
|
||||
Attributes:
|
||||
format_version (int): Event format version
|
||||
room_version: Version of the target room
|
||||
room_id (str)
|
||||
type (str)
|
||||
sender (str)
|
||||
@@ -63,7 +66,7 @@ class EventBuilder(object):
|
||||
_hostname = attr.ib()
|
||||
_signing_key = attr.ib()
|
||||
|
||||
format_version = attr.ib()
|
||||
room_version = attr.ib(type=RoomVersion)
|
||||
|
||||
room_id = attr.ib()
|
||||
type = attr.ib()
|
||||
@@ -108,7 +111,8 @@ class EventBuilder(object):
|
||||
)
|
||||
auth_ids = yield self._auth.compute_auth_events(self, state_ids)
|
||||
|
||||
if self.format_version == EventFormatVersions.V1:
|
||||
format_version = self.room_version.event_format
|
||||
if format_version == EventFormatVersions.V1:
|
||||
auth_events = yield self._store.add_event_hashes(auth_ids)
|
||||
prev_events = yield self._store.add_event_hashes(prev_event_ids)
|
||||
else:
|
||||
@@ -148,7 +152,7 @@ class EventBuilder(object):
|
||||
clock=self._clock,
|
||||
hostname=self._hostname,
|
||||
signing_key=self._signing_key,
|
||||
format_version=self.format_version,
|
||||
room_version=self.room_version,
|
||||
event_dict=event_dict,
|
||||
internal_metadata_dict=self.internal_metadata.get_dict(),
|
||||
)
|
||||
@@ -201,7 +205,7 @@ class EventBuilderFactory(object):
|
||||
clock=self.clock,
|
||||
hostname=self.hostname,
|
||||
signing_key=self.signing_key,
|
||||
format_version=room_version.event_format,
|
||||
room_version=room_version,
|
||||
type=key_values["type"],
|
||||
state_key=key_values.get("state_key"),
|
||||
room_id=key_values["room_id"],
|
||||
@@ -214,29 +218,19 @@ class EventBuilderFactory(object):
|
||||
|
||||
|
||||
def create_local_event_from_event_dict(
|
||||
clock,
|
||||
hostname,
|
||||
signing_key,
|
||||
format_version,
|
||||
event_dict,
|
||||
internal_metadata_dict=None,
|
||||
):
|
||||
clock: Clock,
|
||||
hostname: str,
|
||||
signing_key: SigningKey,
|
||||
room_version: RoomVersion,
|
||||
event_dict: JsonDict,
|
||||
internal_metadata_dict: Optional[JsonDict] = None,
|
||||
) -> EventBase:
|
||||
"""Takes a fully formed event dict, ensuring that fields like `origin`
|
||||
and `origin_server_ts` have correct values for a locally produced event,
|
||||
then signs and hashes it.
|
||||
|
||||
Args:
|
||||
clock (Clock)
|
||||
hostname (str)
|
||||
signing_key
|
||||
format_version (int)
|
||||
event_dict (dict)
|
||||
internal_metadata_dict (dict|None)
|
||||
|
||||
Returns:
|
||||
FrozenEvent
|
||||
"""
|
||||
|
||||
format_version = room_version.event_format
|
||||
if format_version not in KNOWN_EVENT_FORMAT_VERSIONS:
|
||||
raise Exception("No event format defined for version %r" % (format_version,))
|
||||
|
||||
@@ -257,9 +251,9 @@ def create_local_event_from_event_dict(
|
||||
|
||||
event_dict.setdefault("signatures", {})
|
||||
|
||||
add_hashes_and_signatures(event_dict, hostname, signing_key)
|
||||
return event_type_from_format_version(format_version)(
|
||||
event_dict, internal_metadata_dict=internal_metadata_dict
|
||||
add_hashes_and_signatures(room_version, event_dict, hostname, signing_key)
|
||||
return make_event_from_dict(
|
||||
event_dict, room_version, internal_metadata_dict=internal_metadata_dict
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -15,12 +15,17 @@
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
from typing import Dict
|
||||
|
||||
from synapse.spam_checker_api import SpamCheckerApi
|
||||
|
||||
MYPY = False
|
||||
if MYPY:
|
||||
import synapse.server
|
||||
|
||||
|
||||
class SpamChecker(object):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
self.spam_checker = None
|
||||
|
||||
module = None
|
||||
@@ -40,7 +45,7 @@ class SpamChecker(object):
|
||||
else:
|
||||
self.spam_checker = module(config=config)
|
||||
|
||||
def check_event_for_spam(self, event):
|
||||
def check_event_for_spam(self, event: "synapse.events.EventBase") -> bool:
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
||||
If the server considers an event spammy, then it will be rejected if
|
||||
@@ -48,26 +53,30 @@ class SpamChecker(object):
|
||||
users receive a blank event.
|
||||
|
||||
Args:
|
||||
event (synapse.events.EventBase): the event to be checked
|
||||
event: the event to be checked
|
||||
|
||||
Returns:
|
||||
bool: True if the event is spammy.
|
||||
True if the event is spammy.
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return False
|
||||
|
||||
return self.spam_checker.check_event_for_spam(event)
|
||||
|
||||
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||
def user_may_invite(
|
||||
self, inviter_userid: str, invitee_userid: str, room_id: str
|
||||
) -> bool:
|
||||
"""Checks if a given user may send an invite
|
||||
|
||||
If this method returns false, the invite will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
inviter_userid: The user ID of the sender of the invitation
|
||||
invitee_userid: The user ID targeted in the invitation
|
||||
room_id: The room ID
|
||||
|
||||
Returns:
|
||||
bool: True if the user may send an invite, otherwise False
|
||||
True if the user may send an invite, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
@@ -76,52 +85,78 @@ class SpamChecker(object):
|
||||
inviter_userid, invitee_userid, room_id
|
||||
)
|
||||
|
||||
def user_may_create_room(self, userid):
|
||||
def user_may_create_room(self, userid: str) -> bool:
|
||||
"""Checks if a given user may create a room
|
||||
|
||||
If this method returns false, the creation request will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
userid: The ID of the user attempting to create a room
|
||||
|
||||
Returns:
|
||||
bool: True if the user may create a room, otherwise False
|
||||
True if the user may create a room, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_create_room(userid)
|
||||
|
||||
def user_may_create_room_alias(self, userid, room_alias):
|
||||
def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool:
|
||||
"""Checks if a given user may create a room alias
|
||||
|
||||
If this method returns false, the association request will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
room_alias (string): The alias to be created
|
||||
userid: The ID of the user attempting to create a room alias
|
||||
room_alias: The alias to be created
|
||||
|
||||
Returns:
|
||||
bool: True if the user may create a room alias, otherwise False
|
||||
True if the user may create a room alias, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_create_room_alias(userid, room_alias)
|
||||
|
||||
def user_may_publish_room(self, userid, room_id):
|
||||
def user_may_publish_room(self, userid: str, room_id: str) -> bool:
|
||||
"""Checks if a given user may publish a room to the directory
|
||||
|
||||
If this method returns false, the publish request will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
room_id (string): The ID of the room that would be published
|
||||
userid: The user ID attempting to publish the room
|
||||
room_id: The ID of the room that would be published
|
||||
|
||||
Returns:
|
||||
bool: True if the user may publish the room, otherwise False
|
||||
True if the user may publish the room, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_publish_room(userid, room_id)
|
||||
|
||||
def check_username_for_spam(self, user_profile: Dict[str, str]) -> bool:
|
||||
"""Checks if a user ID or display name are considered "spammy" by this server.
|
||||
|
||||
If the server considers a username spammy, then it will not be included in
|
||||
user directory results.
|
||||
|
||||
Args:
|
||||
user_profile: The user information to check, it contains the keys:
|
||||
* user_id
|
||||
* display_name
|
||||
* avatar_url
|
||||
|
||||
Returns:
|
||||
True if the user is spammy.
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return False
|
||||
|
||||
# For backwards compatibility, if the method does not exist on the spam checker, fallback to not interfering.
|
||||
checker = getattr(self.spam_checker, "check_username_for_spam", None)
|
||||
if not checker:
|
||||
return False
|
||||
# Make a copy of the user profile object to ensure the spam checker
|
||||
# cannot modify it.
|
||||
return checker(user_profile.copy())
|
||||
|
||||
@@ -74,15 +74,16 @@ class ThirdPartyEventRules(object):
|
||||
is_requester_admin (bool): If the requester is an admin
|
||||
|
||||
Returns:
|
||||
defer.Deferred
|
||||
defer.Deferred[bool]: Whether room creation is allowed or denied.
|
||||
"""
|
||||
|
||||
if self.third_party_rules is None:
|
||||
return
|
||||
return True
|
||||
|
||||
yield self.third_party_rules.on_create_room(
|
||||
ret = yield self.third_party_rules.on_create_room(
|
||||
requester, config, is_requester_admin
|
||||
)
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_threepid_can_be_invited(self, medium, address, room_id):
|
||||
|
||||
@@ -12,8 +12,9 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import collections
|
||||
import re
|
||||
from typing import Mapping, Union
|
||||
|
||||
from six import string_types
|
||||
|
||||
@@ -422,3 +423,37 @@ class EventClientSerializer(object):
|
||||
return yieldable_gather_results(
|
||||
self.serialize_event, events, time_now=time_now, **kwargs
|
||||
)
|
||||
|
||||
|
||||
def copy_power_levels_contents(
|
||||
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
|
||||
):
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
|
||||
|
||||
Raises:
|
||||
TypeError if the input does not look like a valid power levels event content
|
||||
"""
|
||||
if not isinstance(old_power_levels, collections.Mapping):
|
||||
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
|
||||
|
||||
power_levels = {}
|
||||
for k, v in old_power_levels.items():
|
||||
|
||||
if isinstance(v, int):
|
||||
power_levels[k] = v
|
||||
continue
|
||||
|
||||
if isinstance(v, collections.Mapping):
|
||||
power_levels[k] = h = {}
|
||||
for k1, v1 in v.items():
|
||||
# we should only have one level of nesting
|
||||
if not isinstance(v1, int):
|
||||
raise TypeError(
|
||||
"Invalid power_levels value for %s.%s: %r" % (k, k1, v1)
|
||||
)
|
||||
h[k1] = v1
|
||||
continue
|
||||
|
||||
raise TypeError("Invalid power_levels value for %s: %r" % (k, v))
|
||||
|
||||
return power_levels
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -22,9 +23,13 @@ from twisted.internet.defer import DeferredList
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
RoomVersion,
|
||||
)
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.events import event_type_from_format_version
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import (
|
||||
@@ -33,7 +38,7 @@ from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
preserve_fn,
|
||||
)
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.util import unwrapFirstError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -342,16 +347,15 @@ def _is_invite_via_3pid(event):
|
||||
)
|
||||
|
||||
|
||||
def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
|
||||
"""Construct a FrozenEvent from an event json received over federation
|
||||
def event_from_pdu_json(
|
||||
pdu_json: JsonDict, room_version: RoomVersion, outlier: bool = False
|
||||
) -> EventBase:
|
||||
"""Construct an EventBase from an event json received over federation
|
||||
|
||||
Args:
|
||||
pdu_json (object): pdu as received over federation
|
||||
event_format_version (int): The event format version
|
||||
outlier (bool): True to mark this event as an outlier
|
||||
|
||||
Returns:
|
||||
FrozenEvent
|
||||
pdu_json: pdu as received over federation
|
||||
room_version: The version of the room this event belongs to
|
||||
outlier: True to mark this event as an outlier
|
||||
|
||||
Raises:
|
||||
SynapseError: if the pdu is missing required fields or is otherwise
|
||||
@@ -370,8 +374,7 @@ def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
|
||||
elif depth > MAX_DEPTH:
|
||||
raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
|
||||
|
||||
event = event_type_from_format_version(event_format_version)(pdu_json)
|
||||
|
||||
event = make_event_from_dict(pdu_json, room_version)
|
||||
event.internal_metadata.outlier = outlier
|
||||
|
||||
return event
|
||||
|
||||
@@ -17,6 +17,18 @@
|
||||
import copy
|
||||
import itertools
|
||||
import logging
|
||||
from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
@@ -29,16 +41,19 @@ from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
RoomVersion,
|
||||
RoomVersions,
|
||||
)
|
||||
from synapse.events import builder, room_version_to_event_format
|
||||
from synapse.events import EventBase, builder
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
@@ -50,6 +65,8 @@ sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["t
|
||||
|
||||
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class InvalidResponseError(RuntimeError):
|
||||
"""Helper for _try_destination_list: indicates that the server returned a response
|
||||
@@ -168,21 +185,17 @@ class FederationClient(FederationBase):
|
||||
sent_queries_counter.labels("client_one_time_keys").inc()
|
||||
return self.transport_layer.claim_client_keys(destination, content, timeout)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def backfill(self, dest, room_id, limit, extremities):
|
||||
"""Requests some more historic PDUs for the given context from the
|
||||
async def backfill(
|
||||
self, dest: str, room_id: str, limit: int, extremities: Iterable[str]
|
||||
) -> List[EventBase]:
|
||||
"""Requests some more historic PDUs for the given room from the
|
||||
given destination server.
|
||||
|
||||
Args:
|
||||
dest (str): The remote homeserver to ask.
|
||||
room_id (str): The room_id to backfill.
|
||||
limit (int): The maximum number of PDUs to return.
|
||||
extremities (list): List of PDU id and origins of the first pdus
|
||||
we have seen from the context
|
||||
|
||||
Returns:
|
||||
Deferred: Results in the received PDUs.
|
||||
limit (int): The maximum number of events to return.
|
||||
extremities (list): our current backwards extremities, to backfill from
|
||||
"""
|
||||
logger.debug("backfill extrem=%s", extremities)
|
||||
|
||||
@@ -190,34 +203,37 @@ class FederationClient(FederationBase):
|
||||
if not extremities:
|
||||
return
|
||||
|
||||
transaction_data = yield self.transport_layer.backfill(
|
||||
transaction_data = await self.transport_layer.backfill(
|
||||
dest, room_id, extremities, limit
|
||||
)
|
||||
|
||||
logger.debug("backfill transaction_data=%r", transaction_data)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
pdus = [
|
||||
event_from_pdu_json(p, format_ver, outlier=False)
|
||||
event_from_pdu_json(p, room_version, outlier=False)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
pdus[:] = yield make_deferred_yieldable(
|
||||
pdus[:] = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
|
||||
self._check_sigs_and_hashes(room_version.identifier, pdus),
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
return pdus
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_pdu(
|
||||
self, destinations, event_id, room_version, outlier=False, timeout=None
|
||||
):
|
||||
async def get_pdu(
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
event_id: str,
|
||||
room_version: RoomVersion,
|
||||
outlier: bool = False,
|
||||
timeout: Optional[int] = None,
|
||||
) -> Optional[EventBase]:
|
||||
"""Requests the PDU with given origin and ID from the remote home
|
||||
servers.
|
||||
|
||||
@@ -225,18 +241,17 @@ class FederationClient(FederationBase):
|
||||
one succeeds.
|
||||
|
||||
Args:
|
||||
destinations (list): Which homeservers to query
|
||||
event_id (str): event to fetch
|
||||
room_version (str): version of the room
|
||||
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
||||
destinations: Which homeservers to query
|
||||
event_id: event to fetch
|
||||
room_version: version of the room
|
||||
outlier: Indicates whether the PDU is an `outlier`, i.e. if
|
||||
it's from an arbitary point in the context as opposed to part
|
||||
of the current block of PDUs. Defaults to `False`
|
||||
timeout (int): How long to try (in ms) each destination for before
|
||||
timeout: How long to try (in ms) each destination for before
|
||||
moving to the next destination. None indicates no timeout.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in the requested PDU, or None if we were unable to find
|
||||
it.
|
||||
The requested PDU, or None if we were unable to find it.
|
||||
"""
|
||||
|
||||
# TODO: Rate limit the number of times we try and get the same event.
|
||||
@@ -247,8 +262,6 @@ class FederationClient(FederationBase):
|
||||
|
||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
signed_pdu = None
|
||||
for destination in destinations:
|
||||
now = self._clock.time_msec()
|
||||
@@ -257,7 +270,7 @@ class FederationClient(FederationBase):
|
||||
continue
|
||||
|
||||
try:
|
||||
transaction_data = yield self.transport_layer.get_event(
|
||||
transaction_data = await self.transport_layer.get_event(
|
||||
destination, event_id, timeout=timeout
|
||||
)
|
||||
|
||||
@@ -269,7 +282,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
pdu_list = [
|
||||
event_from_pdu_json(p, format_ver, outlier=outlier)
|
||||
event_from_pdu_json(p, room_version, outlier=outlier)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
@@ -277,7 +290,9 @@ class FederationClient(FederationBase):
|
||||
pdu = pdu_list[0]
|
||||
|
||||
# Check signatures are correct.
|
||||
signed_pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
signed_pdu = await self._check_sigs_and_hash(
|
||||
room_version.identifier, pdu
|
||||
)
|
||||
|
||||
break
|
||||
|
||||
@@ -307,15 +322,16 @@ class FederationClient(FederationBase):
|
||||
|
||||
return signed_pdu
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_room_state_ids(self, destination: str, room_id: str, event_id: str):
|
||||
async def get_room_state_ids(
|
||||
self, destination: str, room_id: str, event_id: str
|
||||
) -> Tuple[List[str], List[str]]:
|
||||
"""Calls the /state_ids endpoint to fetch the state at a particular point
|
||||
in the room, and the auth events for the given event
|
||||
|
||||
Returns:
|
||||
Tuple[List[str], List[str]]: a tuple of (state event_ids, auth event_ids)
|
||||
a tuple of (state event_ids, auth event_ids)
|
||||
"""
|
||||
result = yield self.transport_layer.get_room_state_ids(
|
||||
result = await self.transport_layer.get_room_state_ids(
|
||||
destination, room_id, event_id=event_id
|
||||
)
|
||||
|
||||
@@ -329,37 +345,39 @@ class FederationClient(FederationBase):
|
||||
|
||||
return state_event_ids, auth_event_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, room_id, event_id):
|
||||
res = yield self.transport_layer.get_event_auth(destination, room_id, event_id)
|
||||
async def get_event_auth(self, destination, room_id, event_id):
|
||||
res = await self.transport_layer.get_event_auth(destination, room_id, event_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, format_ver, outlier=True) for p in res["auth_chain"]
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in res["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True, room_version=room_version
|
||||
signed_auth = await self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True, room_version=room_version.identifier
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
return signed_auth
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _try_destination_list(self, description, destinations, callback):
|
||||
async def _try_destination_list(
|
||||
self,
|
||||
description: str,
|
||||
destinations: Iterable[str],
|
||||
callback: Callable[[str], Awaitable[T]],
|
||||
) -> T:
|
||||
"""Try an operation on a series of servers, until it succeeds
|
||||
|
||||
Args:
|
||||
description (unicode): description of the operation we're doing, for logging
|
||||
description: description of the operation we're doing, for logging
|
||||
|
||||
destinations (Iterable[unicode]): list of server_names to try
|
||||
destinations: list of server_names to try
|
||||
|
||||
callback (callable): Function to run for each server. Passed a single
|
||||
argument: the server_name to try. May return a deferred.
|
||||
callback: Function to run for each server. Passed a single
|
||||
argument: the server_name to try.
|
||||
|
||||
If the callback raises a CodeMessageException with a 300/400 code,
|
||||
attempts to perform the operation stop immediately and the exception is
|
||||
@@ -370,7 +388,7 @@ class FederationClient(FederationBase):
|
||||
suppressed if the exception is an InvalidResponseError.
|
||||
|
||||
Returns:
|
||||
The [Deferred] result of callback, if it succeeds
|
||||
The result of callback, if it succeeds
|
||||
|
||||
Raises:
|
||||
SynapseError if the chosen remote server returns a 300/400 code, or
|
||||
@@ -381,10 +399,12 @@ class FederationClient(FederationBase):
|
||||
continue
|
||||
|
||||
try:
|
||||
res = yield callback(destination)
|
||||
res = await callback(destination)
|
||||
return res
|
||||
except InvalidResponseError as e:
|
||||
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
||||
except UnsupportedRoomVersionError:
|
||||
raise
|
||||
except HttpResponseException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise e.to_synapse_error()
|
||||
@@ -398,14 +418,20 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Failed to %s via %s", description, destination, exc_info=1
|
||||
"Failed to %s via %s", description, destination, exc_info=True
|
||||
)
|
||||
|
||||
raise SynapseError(502, "Failed to %s via any server" % (description,))
|
||||
|
||||
def make_membership_event(
|
||||
self, destinations, room_id, user_id, membership, content, params
|
||||
):
|
||||
async def make_membership_event(
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
membership: str,
|
||||
content: dict,
|
||||
params: Dict[str, str],
|
||||
) -> Tuple[str, EventBase, RoomVersion]:
|
||||
"""
|
||||
Creates an m.room.member event, with context, without participating in the room.
|
||||
|
||||
@@ -417,26 +443,28 @@ class FederationClient(FederationBase):
|
||||
Note that this does not append any events to any graphs.
|
||||
|
||||
Args:
|
||||
destinations (Iterable[str]): Candidate homeservers which are probably
|
||||
destinations: Candidate homeservers which are probably
|
||||
participating in the room.
|
||||
room_id (str): The room in which the event will happen.
|
||||
user_id (str): The user whose membership is being evented.
|
||||
membership (str): The "membership" property of the event. Must be
|
||||
one of "join" or "leave".
|
||||
content (dict): Any additional data to put into the content field
|
||||
of the event.
|
||||
params (dict[str, str|Iterable[str]]): Query parameters to include in the
|
||||
request.
|
||||
Return:
|
||||
Deferred[tuple[str, FrozenEvent, int]]: resolves to a tuple of
|
||||
`(origin, event, event_format)` where origin is the remote
|
||||
homeserver which generated the event, and event_format is one of
|
||||
`synapse.api.room_versions.EventFormatVersions`.
|
||||
room_id: The room in which the event will happen.
|
||||
user_id: The user whose membership is being evented.
|
||||
membership: The "membership" property of the event. Must be one of
|
||||
"join" or "leave".
|
||||
content: Any additional data to put into the content field of the
|
||||
event.
|
||||
params: Query parameters to include in the request.
|
||||
|
||||
Fails with a ``SynapseError`` if the chosen remote server
|
||||
returns a 300/400 code.
|
||||
Returns:
|
||||
`(origin, event, room_version)` where origin is the remote
|
||||
homeserver which generated the event, and room_version is the
|
||||
version of the room.
|
||||
|
||||
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||
Raises:
|
||||
UnsupportedRoomVersionError: if remote responds with
|
||||
a room version we don't understand.
|
||||
|
||||
SynapseError: if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError: if no servers were reachable.
|
||||
"""
|
||||
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
||||
if membership not in valid_memberships:
|
||||
@@ -445,16 +473,17 @@ class FederationClient(FederationBase):
|
||||
% (membership, ",".join(valid_memberships))
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
ret = yield self.transport_layer.make_membership_event(
|
||||
async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]:
|
||||
ret = await self.transport_layer.make_membership_event(
|
||||
destination, room_id, user_id, membership, params
|
||||
)
|
||||
|
||||
# Note: If not supplied, the room version may be either v1 or v2,
|
||||
# however either way the event format version will be v1.
|
||||
room_version = ret.get("room_version", RoomVersions.V1.identifier)
|
||||
event_format = room_version_to_event_format(room_version)
|
||||
room_version_id = ret.get("room_version", RoomVersions.V1.identifier)
|
||||
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||
if not room_version:
|
||||
raise UnsupportedRoomVersionError()
|
||||
|
||||
pdu_dict = ret.get("event", None)
|
||||
if not isinstance(pdu_dict, dict):
|
||||
@@ -474,92 +503,87 @@ class FederationClient(FederationBase):
|
||||
self._clock,
|
||||
self.hostname,
|
||||
self.signing_key,
|
||||
format_version=event_format,
|
||||
room_version=room_version,
|
||||
event_dict=pdu_dict,
|
||||
)
|
||||
|
||||
return (destination, ev, event_format)
|
||||
return destination, ev, room_version
|
||||
|
||||
return self._try_destination_list(
|
||||
return await self._try_destination_list(
|
||||
"make_" + membership, destinations, send_request
|
||||
)
|
||||
|
||||
def send_join(self, destinations, pdu, event_format_version):
|
||||
async def send_join(
|
||||
self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
|
||||
) -> Dict[str, Any]:
|
||||
"""Sends a join event to one of a list of homeservers.
|
||||
|
||||
Doing so will cause the remote server to add the event to the graph,
|
||||
and send the event out to the rest of the federation.
|
||||
|
||||
Args:
|
||||
destinations (str): Candidate homeservers which are probably
|
||||
destinations: Candidate homeservers which are probably
|
||||
participating in the room.
|
||||
pdu (BaseEvent): event to be sent
|
||||
event_format_version (int): The event format version
|
||||
pdu: event to be sent
|
||||
room_version: the version of the room (according to the server that
|
||||
did the make_join)
|
||||
|
||||
Return:
|
||||
Deferred: resolves to a dict with members ``origin`` (a string
|
||||
giving the serer the event was sent to, ``state`` (?) and
|
||||
Returns:
|
||||
a dict with members ``origin`` (a string
|
||||
giving the server the event was sent to, ``state`` (?) and
|
||||
``auth_chain``.
|
||||
|
||||
Fails with a ``SynapseError`` if the chosen remote server
|
||||
returns a 300/400 code.
|
||||
Raises:
|
||||
SynapseError: if the chosen remote server returns a 300/400 code.
|
||||
|
||||
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||
RuntimeError: if no servers were reachable.
|
||||
"""
|
||||
|
||||
def check_authchain_validity(signed_auth_chain):
|
||||
for e in signed_auth_chain:
|
||||
if e.type == EventTypes.Create:
|
||||
create_event = e
|
||||
break
|
||||
else:
|
||||
raise InvalidResponseError("no %s in auth chain" % (EventTypes.Create,))
|
||||
|
||||
# the room version should be sane.
|
||||
room_version = create_event.content.get("room_version", "1")
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
# This shouldn't be possible, because the remote server should have
|
||||
# rejected the join attempt during make_join.
|
||||
raise InvalidResponseError(
|
||||
"room appears to have unsupported version %s" % (room_version,)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
content = yield self._do_send_join(destination, pdu)
|
||||
async def send_request(destination) -> Dict[str, Any]:
|
||||
content = await self._do_send_join(destination, pdu)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
state = [
|
||||
event_from_pdu_json(p, event_format_version, outlier=True)
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, event_format_version, outlier=True)
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
|
||||
pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
|
||||
|
||||
room_version = None
|
||||
create_event = None
|
||||
for e in state:
|
||||
if (e.type, e.state_key) == (EventTypes.Create, ""):
|
||||
room_version = e.content.get(
|
||||
"room_version", RoomVersions.V1.identifier
|
||||
)
|
||||
create_event = e
|
||||
break
|
||||
|
||||
if room_version is None:
|
||||
if create_event is None:
|
||||
# If the state doesn't have a create event then the room is
|
||||
# invalid, and it would fail auth checks anyway.
|
||||
raise SynapseError(400, "No create event in state")
|
||||
|
||||
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
# the room version should be sane.
|
||||
create_room_version = create_event.content.get(
|
||||
"room_version", RoomVersions.V1.identifier
|
||||
)
|
||||
if create_room_version != room_version.identifier:
|
||||
# either the server that fulfilled the make_join, or the server that is
|
||||
# handling the send_join, is lying.
|
||||
raise InvalidResponseError(
|
||||
"Unexpected room version %s in create event"
|
||||
% (create_room_version,)
|
||||
)
|
||||
|
||||
valid_pdus = await self._check_sigs_and_hash_and_fetch(
|
||||
destination,
|
||||
list(pdus.values()),
|
||||
outlier=True,
|
||||
room_version=room_version,
|
||||
room_version=room_version.identifier,
|
||||
)
|
||||
|
||||
valid_pdus_map = {p.event_id: p for p in valid_pdus}
|
||||
@@ -583,7 +607,17 @@ class FederationClient(FederationBase):
|
||||
for s in signed_state:
|
||||
s.internal_metadata = copy.deepcopy(s.internal_metadata)
|
||||
|
||||
check_authchain_validity(signed_auth)
|
||||
# double-check that the same create event has ended up in the auth chain
|
||||
auth_chain_create_events = [
|
||||
e.event_id
|
||||
for e in signed_auth
|
||||
if (e.type, e.state_key) == (EventTypes.Create, "")
|
||||
]
|
||||
if auth_chain_create_events != [create_event.event_id]:
|
||||
raise InvalidResponseError(
|
||||
"Unexpected create event(s) in auth chain"
|
||||
% (auth_chain_create_events,)
|
||||
)
|
||||
|
||||
return {
|
||||
"state": signed_state,
|
||||
@@ -591,14 +625,13 @@ class FederationClient(FederationBase):
|
||||
"origin": destination,
|
||||
}
|
||||
|
||||
return self._try_destination_list("send_join", destinations, send_request)
|
||||
return await self._try_destination_list("send_join", destinations, send_request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_send_join(self, destination, pdu):
|
||||
async def _do_send_join(self, destination: str, pdu: EventBase):
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
content = yield self.transport_layer.send_join_v2(
|
||||
content = await self.transport_layer.send_join_v2(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -620,7 +653,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
||||
|
||||
resp = yield self.transport_layer.send_join_v1(
|
||||
resp = await self.transport_layer.send_join_v1(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -631,51 +664,45 @@ class FederationClient(FederationBase):
|
||||
# content.
|
||||
return resp[1]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_invite(self, destination, room_id, event_id, pdu):
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
async def send_invite(
|
||||
self, destination: str, room_id: str, event_id: str, pdu: EventBase,
|
||||
) -> EventBase:
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
content = yield self._do_send_invite(destination, pdu, room_version)
|
||||
content = await self._do_send_invite(destination, pdu, room_version)
|
||||
|
||||
pdu_dict = content["event"]
|
||||
|
||||
logger.debug("Got response to send_invite: %s", pdu_dict)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
pdu = event_from_pdu_json(pdu_dict, format_ver)
|
||||
pdu = event_from_pdu_json(pdu_dict, room_version)
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
|
||||
return pdu
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_send_invite(self, destination, pdu, room_version):
|
||||
async def _do_send_invite(
|
||||
self, destination: str, pdu: EventBase, room_version: RoomVersion
|
||||
) -> JsonDict:
|
||||
"""Actually sends the invite, first trying v2 API and falling back to
|
||||
v1 API if necessary.
|
||||
|
||||
Args:
|
||||
destination (str): Target server
|
||||
pdu (FrozenEvent)
|
||||
room_version (str)
|
||||
|
||||
Returns:
|
||||
dict: The event as a dict as returned by the remote server
|
||||
The event as a dict as returned by the remote server
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
content = yield self.transport_layer.send_invite_v2(
|
||||
content = await self.transport_layer.send_invite_v2(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content={
|
||||
"event": pdu.get_pdu_json(time_now),
|
||||
"room_version": room_version,
|
||||
"room_version": room_version.identifier,
|
||||
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
|
||||
},
|
||||
)
|
||||
@@ -693,8 +720,7 @@ class FederationClient(FederationBase):
|
||||
# Otherwise, we assume that the remote server doesn't understand
|
||||
# the v2 invite API. That's ok provided the room uses old-style event
|
||||
# IDs.
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
if v.event_format != EventFormatVersions.V1:
|
||||
if room_version.event_format != EventFormatVersions.V1:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
@@ -708,7 +734,7 @@ class FederationClient(FederationBase):
|
||||
# Didn't work, try v1 API.
|
||||
# Note the v1 API returns a tuple of `(200, content)`
|
||||
|
||||
_, content = yield self.transport_layer.send_invite_v1(
|
||||
_, content = await self.transport_layer.send_invite_v1(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -716,7 +742,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
return content
|
||||
|
||||
def send_leave(self, destinations, pdu):
|
||||
async def send_leave(self, destinations: Iterable[str], pdu: EventBase) -> None:
|
||||
"""Sends a leave event to one of a list of homeservers.
|
||||
|
||||
Doing so will cause the remote server to add the event to the graph,
|
||||
@@ -725,34 +751,29 @@ class FederationClient(FederationBase):
|
||||
This is mostly useful to reject received invites.
|
||||
|
||||
Args:
|
||||
destinations (str): Candidate homeservers which are probably
|
||||
destinations: Candidate homeservers which are probably
|
||||
participating in the room.
|
||||
pdu (BaseEvent): event to be sent
|
||||
pdu: event to be sent
|
||||
|
||||
Return:
|
||||
Deferred: resolves to None.
|
||||
Raises:
|
||||
SynapseError if the chosen remote server returns a 300/400 code.
|
||||
|
||||
Fails with a ``SynapseError`` if the chosen remote server
|
||||
returns a 300/400 code.
|
||||
|
||||
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||
RuntimeError if no servers were reachable.
|
||||
"""
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
content = yield self._do_send_leave(destination, pdu)
|
||||
|
||||
async def send_request(destination: str) -> None:
|
||||
content = await self._do_send_leave(destination, pdu)
|
||||
logger.debug("Got content: %s", content)
|
||||
return None
|
||||
|
||||
return self._try_destination_list("send_leave", destinations, send_request)
|
||||
return await self._try_destination_list(
|
||||
"send_leave", destinations, send_request
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_send_leave(self, destination, pdu):
|
||||
async def _do_send_leave(self, destination, pdu):
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
content = yield self.transport_layer.send_leave_v2(
|
||||
content = await self.transport_layer.send_leave_v2(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -774,7 +795,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
|
||||
|
||||
resp = yield self.transport_layer.send_leave_v1(
|
||||
resp = await self.transport_layer.send_leave_v1(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -806,34 +827,33 @@ class FederationClient(FederationBase):
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_missing_events(
|
||||
async def get_missing_events(
|
||||
self,
|
||||
destination,
|
||||
room_id,
|
||||
earliest_events_ids,
|
||||
latest_events,
|
||||
limit,
|
||||
min_depth,
|
||||
timeout,
|
||||
):
|
||||
destination: str,
|
||||
room_id: str,
|
||||
earliest_events_ids: Sequence[str],
|
||||
latest_events: Iterable[EventBase],
|
||||
limit: int,
|
||||
min_depth: int,
|
||||
timeout: int,
|
||||
) -> List[EventBase]:
|
||||
"""Tries to fetch events we are missing. This is called when we receive
|
||||
an event without having received all of its ancestors.
|
||||
|
||||
Args:
|
||||
destination (str)
|
||||
room_id (str)
|
||||
earliest_events_ids (list): List of event ids. Effectively the
|
||||
destination
|
||||
room_id
|
||||
earliest_events_ids: List of event ids. Effectively the
|
||||
events we expected to receive, but haven't. `get_missing_events`
|
||||
should only return events that didn't happen before these.
|
||||
latest_events (list): List of events we have received that we don't
|
||||
latest_events: List of events we have received that we don't
|
||||
have all previous events for.
|
||||
limit (int): Maximum number of events to return.
|
||||
min_depth (int): Minimum depth of events tor return.
|
||||
timeout (int): Max time to wait in ms
|
||||
limit: Maximum number of events to return.
|
||||
min_depth: Minimum depth of events to return.
|
||||
timeout: Max time to wait in ms
|
||||
"""
|
||||
try:
|
||||
content = yield self.transport_layer.get_missing_events(
|
||||
content = await self.transport_layer.get_missing_events(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
earliest_events=earliest_events_ids,
|
||||
@@ -843,15 +863,14 @@ class FederationClient(FederationBase):
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
events = [
|
||||
event_from_pdu_json(e, format_ver) for e in content.get("events", [])
|
||||
event_from_pdu_json(e, room_version) for e in content.get("events", [])
|
||||
]
|
||||
|
||||
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, events, outlier=False, room_version=room_version
|
||||
signed_events = await self._check_sigs_and_hash_and_fetch(
|
||||
destination, events, outlier=False, room_version=room_version.identifier
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if not e.code == 400:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user