mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
148 Commits
v1.106.0
...
erikj/fix_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d69b3b4c9f | ||
|
|
e60b6cc533 | ||
|
|
f1c4dfb08b | ||
|
|
0edf1cacf7 | ||
|
|
d0f90bd04e | ||
|
|
0248ed70a9 | ||
|
|
e6816babf6 | ||
|
|
a8069e9739 | ||
|
|
863578bfcf | ||
|
|
9e59d18022 | ||
|
|
491365f199 | ||
|
|
dad1559721 | ||
|
|
8c4937b216 | ||
|
|
b84e31375b | ||
|
|
06953bc193 | ||
|
|
265ee88f34 | ||
|
|
ab94bce02c | ||
|
|
17d6c28285 | ||
|
|
4a7c58642c | ||
|
|
ce9385819b | ||
|
|
a963f579de | ||
|
|
3f06bbc0ac | ||
|
|
fcbc79bb87 | ||
|
|
aabf577166 | ||
|
|
7d8f0ef351 | ||
|
|
eab0b548e4 | ||
|
|
81cef38d4b | ||
|
|
e2f8476044 | ||
|
|
18c1196893 | ||
|
|
8a3270075b | ||
|
|
f458dff16d | ||
|
|
6b709c512d | ||
|
|
5c2a837e3c | ||
|
|
64f5a4a353 | ||
|
|
7dd14fadb1 | ||
|
|
5624c8b961 | ||
|
|
4e3868dc46 | ||
|
|
d16910ca02 | ||
|
|
225f378ffa | ||
|
|
8bd9ff0783 | ||
|
|
466f344547 | ||
|
|
726006cdf2 | ||
|
|
967b6948b0 | ||
|
|
d7198dfb95 | ||
|
|
94ef2f4f5d | ||
|
|
bb5a692946 | ||
|
|
ad179b0136 | ||
|
|
5147ce294a | ||
|
|
f35bc08d39 | ||
|
|
f2616edb73 | ||
|
|
86a2a0258f | ||
|
|
0893ee9af8 | ||
|
|
887f773472 | ||
|
|
9edb725ebc | ||
|
|
c97251d5ba | ||
|
|
7e2412265d | ||
|
|
7ef00b7628 | ||
|
|
b71d277438 | ||
|
|
a547b49773 | ||
|
|
6a9a641fb8 | ||
|
|
b5facbac0f | ||
|
|
b250ca5df2 | ||
|
|
e0d420fbd1 | ||
|
|
9956f35c6a | ||
|
|
d464ee3602 | ||
|
|
439a095edc | ||
|
|
5d040f2066 | ||
|
|
f33266232e | ||
|
|
d43042864a | ||
|
|
f4ce030608 | ||
|
|
8b43cc89fa | ||
|
|
52af16c561 | ||
|
|
38f03a09ff | ||
|
|
c856ae4724 | ||
|
|
fe07995e69 | ||
|
|
52a649580f | ||
|
|
28a948f04f | ||
|
|
7cb3f8a979 | ||
|
|
fd12003441 | ||
|
|
5e892671a7 | ||
|
|
d2d48cce85 | ||
|
|
2359c64dec | ||
|
|
68dca8076f | ||
|
|
284d85dee3 | ||
|
|
ebe77381b0 | ||
|
|
0b91ccce47 | ||
|
|
ecf4e0674c | ||
|
|
7d82987b27 | ||
|
|
bd8d8865fb | ||
|
|
caf528477e | ||
|
|
f0c72d8e87 | ||
|
|
03a342b049 | ||
|
|
aa6345cb3b | ||
|
|
2b438df9b3 | ||
|
|
038b9ec59a | ||
|
|
59ac541310 | ||
|
|
a2e6f43f11 | ||
|
|
4cf4a8281b | ||
|
|
ef7e040e54 | ||
|
|
393429d692 | ||
|
|
34a8652366 | ||
|
|
414ddcd457 | ||
|
|
4d408cb4dd | ||
|
|
212f150208 | ||
|
|
4c6e78fa14 | ||
|
|
1b155362ca | ||
|
|
522a40c4de | ||
|
|
dcd03d3b15 | ||
|
|
438bc23560 | ||
|
|
cf30cfe5d1 | ||
|
|
1726b49457 | ||
|
|
792cfe7ba6 | ||
|
|
c3682ff668 | ||
|
|
3e6ee8ff88 | ||
|
|
7c9ac01eb5 | ||
|
|
3818597751 | ||
|
|
3aadf43122 | ||
|
|
5b6a75935e | ||
|
|
c0ea2bf800 | ||
|
|
37558d5e4c | ||
|
|
0b358f8643 | ||
|
|
7254015665 | ||
|
|
e84a493f41 | ||
|
|
07232e27a8 | ||
|
|
7ab0f630da | ||
|
|
b548f7803a | ||
|
|
758aec6b34 | ||
|
|
c897ac63e9 | ||
|
|
38bc7a009d | ||
|
|
6a275828c8 | ||
|
|
6e373468a4 | ||
|
|
48ee17dc79 | ||
|
|
f6437ca1c4 | ||
|
|
02bda250f8 | ||
|
|
0fd6b269d3 | ||
|
|
89fc579329 | ||
|
|
9c91873922 | ||
|
|
41fbe387d6 | ||
|
|
90cc9e5b29 | ||
|
|
516fd891ee | ||
|
|
0ef2315a99 | ||
|
|
59710437e4 | ||
|
|
9985aa6821 | ||
|
|
31742149d4 | ||
|
|
947e8a6cb0 | ||
|
|
0d4d00a07c | ||
|
|
3166445514 | ||
|
|
922656fc77 |
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@09f2f74827fd3a8607589e5ad7f9398816f540fe # v3.1.4
|
||||
uses: dawidd6/action-download-artifact@deb3bb83256a78589fef6a7b942e5f2573ad7c13 # v5
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
30
.github/workflows/docs.yaml
vendored
30
.github/workflows/docs.yaml
vendored
@@ -85,33 +85,3 @@ jobs:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ needs.pre.outputs.branch-version }}
|
||||
|
||||
################################################################################
|
||||
pages-devdocs:
|
||||
name: GitHub Pages (developer docs)
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: "Set up Sphinx"
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.3.2"
|
||||
groups: "dev-docs"
|
||||
extras: ""
|
||||
|
||||
- name: Build the documentation
|
||||
run: |
|
||||
cd dev-docs
|
||||
poetry run make html
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./dev-docs/_build/html
|
||||
destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}
|
||||
|
||||
182
CHANGES.md
182
CHANGES.md
@@ -1,3 +1,185 @@
|
||||
# Synapse 1.109.0rc2 (2024-06-11)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where one-time-keys were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
|
||||
- Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. ([\#17292](https://github.com/element-hq/synapse/issues/17292))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.109.0rc1 (2024-06-04)
|
||||
|
||||
### Features
|
||||
|
||||
- Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details. ([\#17147](https://github.com/element-hq/synapse/issues/17147))
|
||||
- Add experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync/e2ee` endpoint for to-device messages and device encryption info. ([\#17167](https://github.com/element-hq/synapse/issues/17167))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/issues/3916) by adding unstable media endpoints to `/_matrix/client`. ([\#17213](https://github.com/element-hq/synapse/issues/17213))
|
||||
- Add logging to tasks managed by the task scheduler, showing CPU and database usage. ([\#17219](https://github.com/element-hq/synapse/issues/17219))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix deduplicating of membership events to not create unused state groups. ([\#17164](https://github.com/element-hq/synapse/issues/17164))
|
||||
- Fix bug where duplicate events could be sent down sync when using workers that are overloaded. ([\#17215](https://github.com/element-hq/synapse/issues/17215))
|
||||
- Ignore attempts to send to-device messages to bad users, to avoid log spam when we try to connect to the bad server. ([\#17240](https://github.com/element-hq/synapse/issues/17240))
|
||||
- Fix handling of duplicate concurrent uploading of device one-time-keys. ([\#17241](https://github.com/element-hq/synapse/issues/17241))
|
||||
- Fix reporting of default tags to Sentry, such as worker name. Broke in v1.108.0. ([\#17251](https://github.com/element-hq/synapse/issues/17251))
|
||||
- Fix bug where typing updates would not be sent when using workers after a restart. ([\#17252](https://github.com/element-hq/synapse/issues/17252))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update the LemonLDAP documentation to say that claims should be explicitly included in the returned `id_token`, as Synapse won't request them. ([\#17204](https://github.com/element-hq/synapse/issues/17204))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Improve DB usage when fetching related events. ([\#17083](https://github.com/element-hq/synapse/issues/17083))
|
||||
- Log exceptions when failing to auto-join new user according to the `auto_join_rooms` option. ([\#17176](https://github.com/element-hq/synapse/issues/17176))
|
||||
- Reduce work of calculating outbound device lists updates. ([\#17211](https://github.com/element-hq/synapse/issues/17211))
|
||||
- Improve performance of calculating device lists changes in `/sync`. ([\#17216](https://github.com/element-hq/synapse/issues/17216))
|
||||
- Move towards using `MultiWriterIdGenerator` everywhere. ([\#17226](https://github.com/element-hq/synapse/issues/17226))
|
||||
- Replaces all usages of `StreamIdGenerator` with `MultiWriterIdGenerator`. ([\#17229](https://github.com/element-hq/synapse/issues/17229))
|
||||
- Change the `allow_unsafe_locale` config option to also apply when setting up new databases. ([\#17238](https://github.com/element-hq/synapse/issues/17238))
|
||||
- Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module. ([\#17239](https://github.com/element-hq/synapse/issues/17239), [\#17246](https://github.com/element-hq/synapse/issues/17246))
|
||||
- Clean out invalid destinations from `device_federation_outbox` table. ([\#17242](https://github.com/element-hq/synapse/issues/17242))
|
||||
- Stop logging errors when receiving invalid User IDs in key querys requests. ([\#17250](https://github.com/element-hq/synapse/issues/17250))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.83 to 1.0.86. ([\#17220](https://github.com/element-hq/synapse/issues/17220))
|
||||
* Bump bcrypt from 4.1.2 to 4.1.3. ([\#17224](https://github.com/element-hq/synapse/issues/17224))
|
||||
* Bump lxml from 5.2.1 to 5.2.2. ([\#17261](https://github.com/element-hq/synapse/issues/17261))
|
||||
* Bump mypy-zope from 1.0.3 to 1.0.4. ([\#17262](https://github.com/element-hq/synapse/issues/17262))
|
||||
* Bump phonenumbers from 8.13.35 to 8.13.37. ([\#17235](https://github.com/element-hq/synapse/issues/17235))
|
||||
* Bump prometheus-client from 0.19.0 to 0.20.0. ([\#17233](https://github.com/element-hq/synapse/issues/17233))
|
||||
* Bump pyasn1 from 0.5.1 to 0.6.0. ([\#17223](https://github.com/element-hq/synapse/issues/17223))
|
||||
* Bump pyicu from 2.13 to 2.13.1. ([\#17236](https://github.com/element-hq/synapse/issues/17236))
|
||||
* Bump pyopenssl from 24.0.0 to 24.1.0. ([\#17234](https://github.com/element-hq/synapse/issues/17234))
|
||||
* Bump serde from 1.0.201 to 1.0.202. ([\#17221](https://github.com/element-hq/synapse/issues/17221))
|
||||
* Bump serde from 1.0.202 to 1.0.203. ([\#17232](https://github.com/element-hq/synapse/issues/17232))
|
||||
* Bump twine from 5.0.0 to 5.1.0. ([\#17225](https://github.com/element-hq/synapse/issues/17225))
|
||||
* Bump types-psycopg2 from 2.9.21.20240311 to 2.9.21.20240417. ([\#17222](https://github.com/element-hq/synapse/issues/17222))
|
||||
* Bump types-pyopenssl from 24.0.0.20240311 to 24.1.0.20240425. ([\#17260](https://github.com/element-hq/synapse/issues/17260))
|
||||
|
||||
# Synapse 1.108.0 (2024-05-28)
|
||||
|
||||
No significant changes since 1.108.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.108.0rc1 (2024-05-21)
|
||||
|
||||
### Features
|
||||
|
||||
- Add a feature that allows clients to query the configured federation whitelist. Disabled by default. ([\#16848](https://github.com/element-hq/synapse/issues/16848), [\#17199](https://github.com/element-hq/synapse/issues/17199))
|
||||
- Add the ability to allow numeric user IDs with a specific prefix when in the CAS flow. Contributed by Aurélien Grimpard. ([\#17098](https://github.com/element-hq/synapse/issues/17098))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where push rules would be empty in `/sync` for some accounts. Introduced in v1.93.0. ([\#17142](https://github.com/element-hq/synapse/issues/17142))
|
||||
- Add support for optional whitespace around the Federation API's `Authorization` header's parameter commas. ([\#17145](https://github.com/element-hq/synapse/issues/17145))
|
||||
- Fix bug where disabling room publication prevented public rooms being created on workers. ([\#17177](https://github.com/element-hq/synapse/issues/17177), [\#17184](https://github.com/element-hq/synapse/issues/17184))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Document [`/v1/make_knock`](https://spec.matrix.org/v1.10/server-server-api/#get_matrixfederationv1make_knockroomiduserid) and [`/v1/send_knock/`](https://spec.matrix.org/v1.10/server-server-api/#put_matrixfederationv1send_knockroomideventid) federation endpoints as worker-compatible. ([\#17058](https://github.com/element-hq/synapse/issues/17058))
|
||||
- Update User Admin API with note about prefixing OIDC external_id providers. ([\#17139](https://github.com/element-hq/synapse/issues/17139))
|
||||
- Clarify the state of the created room when using the `autocreate_auto_join_room_preset` config option. ([\#17150](https://github.com/element-hq/synapse/issues/17150))
|
||||
- Update the Admin FAQ with the current libjemalloc version for latest Debian stable. Additionally update the name of the "push_rules" stream in the Workers documentation. ([\#17171](https://github.com/element-hq/synapse/issues/17171))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Add note to reflect that [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) is closed but will remain supported for some time. ([\#17151](https://github.com/element-hq/synapse/issues/17151))
|
||||
- Update dependency PyO3 to 0.21. ([\#17162](https://github.com/element-hq/synapse/issues/17162))
|
||||
- Fixes linter errors found in PR #17147. ([\#17166](https://github.com/element-hq/synapse/issues/17166))
|
||||
- Bump black from 24.2.0 to 24.4.2. ([\#17170](https://github.com/element-hq/synapse/issues/17170))
|
||||
- Cache literal sync filter validation for performance. ([\#17186](https://github.com/element-hq/synapse/issues/17186))
|
||||
- Improve performance by fixing a reactor pause. ([\#17192](https://github.com/element-hq/synapse/issues/17192))
|
||||
- Route `/make_knock` and `/send_knock` federation APIs to the federation reader worker in Complement test runs. ([\#17195](https://github.com/element-hq/synapse/issues/17195))
|
||||
- Prepare sync handler to be able to return different sync responses (`SyncVersion`). ([\#17200](https://github.com/element-hq/synapse/issues/17200))
|
||||
- Organize the sync cache key parameter outside of the sync config (separate concerns). ([\#17201](https://github.com/element-hq/synapse/issues/17201))
|
||||
- Refactor `SyncResultBuilder` assembly to its own function. ([\#17202](https://github.com/element-hq/synapse/issues/17202))
|
||||
- Rename to be obvious: `joined_rooms` -> `joined_room_ids`. ([\#17203](https://github.com/element-hq/synapse/issues/17203), [\#17208](https://github.com/element-hq/synapse/issues/17208))
|
||||
- Add a short pause when rate-limiting a request. ([\#17210](https://github.com/element-hq/synapse/issues/17210))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump cryptography from 42.0.5 to 42.0.7. ([\#17180](https://github.com/element-hq/synapse/issues/17180))
|
||||
* Bump gitpython from 3.1.41 to 3.1.43. ([\#17181](https://github.com/element-hq/synapse/issues/17181))
|
||||
* Bump immutabledict from 4.1.0 to 4.2.0. ([\#17179](https://github.com/element-hq/synapse/issues/17179))
|
||||
* Bump sentry-sdk from 1.40.3 to 2.1.1. ([\#17178](https://github.com/element-hq/synapse/issues/17178))
|
||||
* Bump serde from 1.0.200 to 1.0.201. ([\#17183](https://github.com/element-hq/synapse/issues/17183))
|
||||
* Bump serde_json from 1.0.116 to 1.0.117. ([\#17182](https://github.com/element-hq/synapse/issues/17182))
|
||||
|
||||
Synapse 1.107.0 (2024-05-14)
|
||||
============================
|
||||
|
||||
No significant changes since 1.107.0rc1.
|
||||
|
||||
|
||||
# Synapse 1.107.0rc1 (2024-05-07)
|
||||
|
||||
### Features
|
||||
|
||||
- Add preliminary support for [MSC3823: Account Suspension](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#17051](https://github.com/element-hq/synapse/issues/17051))
|
||||
- Declare support for [Matrix v1.10](https://matrix.org/blog/2024/03/22/matrix-v1.10-release/). Contributed by @clokep. ([\#17082](https://github.com/element-hq/synapse/issues/17082))
|
||||
- Add support for [MSC4115: membership metadata on events](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17104](https://github.com/element-hq/synapse/issues/17104), [\#17137](https://github.com/element-hq/synapse/issues/17137))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed search feature of Element Android on homesevers using SQLite by returning search terms as search highlights. ([\#17000](https://github.com/element-hq/synapse/issues/17000))
|
||||
- Fixes a bug introduced in v1.52.0 where the `destination` query parameter for the [Destination Rooms Admin API](https://element-hq.github.io/synapse/v1.105/usage/administration/admin_api/federation.html#destination-rooms) failed to actually filter returned rooms. ([\#17077](https://github.com/element-hq/synapse/issues/17077))
|
||||
- For MSC3266 room summaries, support queries at the recommended endpoint of `/_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`. The existing endpoint of `/_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` is deprecated. ([\#17078](https://github.com/element-hq/synapse/issues/17078))
|
||||
- Apply user email & picture during OIDC registration if present & selected. ([\#17120](https://github.com/element-hq/synapse/issues/17120))
|
||||
- Improve error message for cross signing reset with [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) enabled. ([\#17121](https://github.com/element-hq/synapse/issues/17121))
|
||||
- Fix a bug which meant that to-device messages received over federation could be dropped when the server was under load or networking problems caused problems between Synapse processes or the database. ([\#17127](https://github.com/element-hq/synapse/issues/17127))
|
||||
- Fix bug where `StreamChangeCache` would not respect configured cache factors. ([\#17152](https://github.com/element-hq/synapse/issues/17152))
|
||||
|
||||
### Updates to the Docker image
|
||||
|
||||
- Correct licensing metadata on Docker image. ([\#17141](https://github.com/element-hq/synapse/issues/17141))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update the `event_cache_size` and `global_factor` configuration options' documentation. ([\#17071](https://github.com/element-hq/synapse/issues/17071))
|
||||
- Remove broken sphinx docs. ([\#17073](https://github.com/element-hq/synapse/issues/17073), [\#17148](https://github.com/element-hq/synapse/issues/17148))
|
||||
- Add RuntimeDirectory to example matrix-synapse.service systemd unit. ([\#17084](https://github.com/element-hq/synapse/issues/17084))
|
||||
- Fix various small typos throughout the docs. ([\#17114](https://github.com/element-hq/synapse/issues/17114))
|
||||
- Update enable_notifs configuration documentation. ([\#17116](https://github.com/element-hq/synapse/issues/17116))
|
||||
- Update the Upgrade Notes with the latest minimum supported Rust version of 1.66.0. Contributed by @jahway603. ([\#17140](https://github.com/element-hq/synapse/issues/17140))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Enable [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) by default in the Synapse Complement image. ([\#17105](https://github.com/element-hq/synapse/issues/17105))
|
||||
- Add optimisation to `StreamChangeCache.get_entities_changed(..)`. ([\#17130](https://github.com/element-hq/synapse/issues/17130))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump furo from 2024.1.29 to 2024.4.27. ([\#17133](https://github.com/element-hq/synapse/issues/17133))
|
||||
* Bump idna from 3.6 to 3.7. ([\#17136](https://github.com/element-hq/synapse/issues/17136))
|
||||
* Bump jsonschema from 4.21.1 to 4.22.0. ([\#17157](https://github.com/element-hq/synapse/issues/17157))
|
||||
* Bump lxml from 5.1.0 to 5.2.1. ([\#17158](https://github.com/element-hq/synapse/issues/17158))
|
||||
* Bump phonenumbers from 8.13.29 to 8.13.35. ([\#17106](https://github.com/element-hq/synapse/issues/17106))
|
||||
- Bump pillow from 10.2.0 to 10.3.0. ([\#17146](https://github.com/element-hq/synapse/issues/17146))
|
||||
* Bump pydantic from 2.6.4 to 2.7.0. ([\#17107](https://github.com/element-hq/synapse/issues/17107))
|
||||
* Bump pydantic from 2.7.0 to 2.7.1. ([\#17160](https://github.com/element-hq/synapse/issues/17160))
|
||||
* Bump pyicu from 2.12 to 2.13. ([\#17109](https://github.com/element-hq/synapse/issues/17109))
|
||||
* Bump serde from 1.0.197 to 1.0.198. ([\#17111](https://github.com/element-hq/synapse/issues/17111))
|
||||
* Bump serde from 1.0.198 to 1.0.199. ([\#17132](https://github.com/element-hq/synapse/issues/17132))
|
||||
* Bump serde from 1.0.199 to 1.0.200. ([\#17161](https://github.com/element-hq/synapse/issues/17161))
|
||||
* Bump serde_json from 1.0.115 to 1.0.116. ([\#17112](https://github.com/element-hq/synapse/issues/17112))
|
||||
- Update `tornado` Python dependency from 6.2 to 6.4. ([\#17131](https://github.com/element-hq/synapse/issues/17131))
|
||||
* Bump twisted from 23.10.0 to 24.3.0. ([\#17135](https://github.com/element-hq/synapse/issues/17135))
|
||||
* Bump types-bleach from 6.1.0.1 to 6.1.0.20240331. ([\#17110](https://github.com/element-hq/synapse/issues/17110))
|
||||
* Bump types-pillow from 10.2.0.20240415 to 10.2.0.20240423. ([\#17159](https://github.com/element-hq/synapse/issues/17159))
|
||||
* Bump types-setuptools from 69.0.0.20240125 to 69.5.0.20240423. ([\#17134](https://github.com/element-hq/synapse/issues/17134))
|
||||
|
||||
# Synapse 1.106.0 (2024-04-30)
|
||||
|
||||
No significant changes since 1.106.0rc1.
|
||||
|
||||
219
Cargo.lock
generated
219
Cargo.lock
generated
@@ -4,30 +4,30 @@ version = 3
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "1.0.2"
|
||||
version = "1.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
|
||||
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.82"
|
||||
version = "1.0.86"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
|
||||
checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.5.1"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
|
||||
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.1.0"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
||||
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
@@ -37,9 +37,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
@@ -52,9 +52,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.10.3"
|
||||
version = "0.10.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
|
||||
checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
@@ -115,9 +115,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "generic-array"
|
||||
version = "0.14.6"
|
||||
version = "0.14.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
|
||||
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
|
||||
dependencies = [
|
||||
"typenum",
|
||||
"version_check",
|
||||
@@ -125,9 +125,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.14"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c"
|
||||
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"js-sys",
|
||||
@@ -191,15 +191,15 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
|
||||
|
||||
[[package]]
|
||||
name = "indoc"
|
||||
version = "2.0.4"
|
||||
version = "2.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8"
|
||||
checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5"
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.4"
|
||||
version = "1.0.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
|
||||
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
@@ -218,15 +218,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.153"
|
||||
version = "0.2.154"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
|
||||
checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.9"
|
||||
version = "0.4.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
|
||||
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
@@ -240,15 +240,15 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.6.3"
|
||||
version = "2.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c"
|
||||
checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d"
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
|
||||
checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
@@ -261,15 +261,15 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.15.0"
|
||||
version = "1.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
|
||||
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.12.1"
|
||||
version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
|
||||
checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb"
|
||||
dependencies = [
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
@@ -277,15 +277,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.9.3"
|
||||
version = "0.9.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
|
||||
checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"windows-sys",
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -302,18 +302,18 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.76"
|
||||
version = "1.0.82"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
|
||||
checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.20.3"
|
||||
version = "0.21.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "53bdbb96d49157e65d45cc287af5f32ffadd5f4761438b527b055fb0d4bb8233"
|
||||
checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
@@ -330,9 +330,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.20.3"
|
||||
version = "0.21.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "deaa5745de3f5231ce10517a1f5dd97d53e5a2fd77aa6b5842292085831d48d7"
|
||||
checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
@@ -340,9 +340,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.20.3"
|
||||
version = "0.21.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62b42531d03e08d4ef1f6e85a2ed422eb678b8cd62b762e53891c05faf0d4afa"
|
||||
checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
@@ -350,9 +350,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.9.0"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c10808ee7250403bedb24bc30c32493e93875fef7ba3e4292226fe924f398bd"
|
||||
checksum = "2af49834b8d2ecd555177e63b273b708dea75150abc6f5341d0a6e1a9623976c"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
@@ -361,9 +361,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.20.3"
|
||||
version = "0.21.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7305c720fa01b8055ec95e484a6eca7a83c841267f0dd5280f0c8b8551d2c158"
|
||||
checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
@@ -373,9 +373,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.20.3"
|
||||
version = "0.21.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c7e9b68bb9c3149c5b0cade5d07f953d6d125eb4337723c4ccdb665f1f96185"
|
||||
checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
@@ -386,9 +386,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pythonize"
|
||||
version = "0.20.0"
|
||||
version = "0.21.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ffd1c3ef39c725d63db5f9bc455461bafd80540cb7824c61afb823501921a850"
|
||||
checksum = "9d0664248812c38cc55a4ed07f88e4df516ce82604b93b1ffdc041aa77a6cb3c"
|
||||
dependencies = [
|
||||
"pyo3",
|
||||
"serde",
|
||||
@@ -396,9 +396,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.35"
|
||||
version = "1.0.36"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
|
||||
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -435,18 +435,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
|
||||
checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.10.4"
|
||||
version = "1.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
|
||||
checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -456,9 +456,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.4.4"
|
||||
version = "0.4.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a"
|
||||
checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -467,36 +467,36 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
|
||||
checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.11"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
|
||||
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.197"
|
||||
version = "1.0.203"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
|
||||
checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.197"
|
||||
version = "1.0.203"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
|
||||
checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -505,9 +505,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.115"
|
||||
version = "1.0.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd"
|
||||
checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -516,9 +516,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sha1"
|
||||
version = "0.10.5"
|
||||
version = "0.10.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3"
|
||||
checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
@@ -538,21 +538,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.10.0"
|
||||
version = "1.13.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
|
||||
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.4.1"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
|
||||
checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.48"
|
||||
version = "2.0.61"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
|
||||
checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -585,15 +585,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "target-lexicon"
|
||||
version = "0.12.4"
|
||||
version = "0.12.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
|
||||
checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f"
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.15.0"
|
||||
version = "1.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
|
||||
checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
|
||||
|
||||
[[package]]
|
||||
name = "ulid"
|
||||
@@ -608,9 +608,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.5"
|
||||
version = "1.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
|
||||
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
|
||||
|
||||
[[package]]
|
||||
name = "unindent"
|
||||
@@ -695,44 +695,65 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.36.1"
|
||||
name = "windows-targets"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
|
||||
checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.36.1"
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
|
||||
checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.36.1"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
|
||||
checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.36.1"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
|
||||
checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.36.1"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
|
||||
checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.36.1"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
|
||||
checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"
|
||||
|
||||
2
changelog.d/17172.feature
Normal file
2
changelog.d/17172.feature
Normal file
@@ -0,0 +1,2 @@
|
||||
Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
|
||||
by adding a federation /download endpoint (#17172).
|
||||
1
changelog.d/17187.feature
Normal file
1
changelog.d/17187.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
1
changelog.d/17254.bugfix
Normal file
1
changelog.d/17254.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix searching for users with their exact localpart whose ID includes a hyphen.
|
||||
1
changelog.d/17256.feature
Normal file
1
changelog.d/17256.feature
Normal file
@@ -0,0 +1 @@
|
||||
Improve ratelimiting in Synapse (#17256).
|
||||
1
changelog.d/17265.misc
Normal file
1
changelog.d/17265.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation.
|
||||
1
changelog.d/17266.misc
Normal file
1
changelog.d/17266.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add debug logging for when room keys are uploaded, including whether they are replacing other room keys.
|
||||
1
changelog.d/17270.feature
Normal file
1
changelog.d/17270.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.
|
||||
1
changelog.d/17271.misc
Normal file
1
changelog.d/17271.misc
Normal file
@@ -0,0 +1 @@
|
||||
Handle OTK uploads off master.
|
||||
1
changelog.d/17272.bugfix
Normal file
1
changelog.d/17272.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix wrong retention policy being used when filtering events.
|
||||
1
changelog.d/17273.misc
Normal file
1
changelog.d/17273.misc
Normal file
@@ -0,0 +1 @@
|
||||
Don't try and resync devices for remote users whose servers are marked as down.
|
||||
1
changelog.d/17275.bugfix
Normal file
1
changelog.d/17275.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix bug where OTKs were not always included in `/sync` response when using workers.
|
||||
1
changelog.d/17279.misc
Normal file
1
changelog.d/17279.misc
Normal file
@@ -0,0 +1 @@
|
||||
Re-organize Pydantic models and types used in handlers.
|
||||
1
changelog.d/17303.misc
Normal file
1
changelog.d/17303.misc
Normal file
@@ -0,0 +1 @@
|
||||
IGNORE.
|
||||
36
debian/changelog
vendored
36
debian/changelog
vendored
@@ -1,3 +1,39 @@
|
||||
matrix-synapse-py3 (1.109.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jun 2024 13:20:17 +0000
|
||||
|
||||
matrix-synapse-py3 (1.109.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.109.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Jun 2024 09:42:46 +0100
|
||||
|
||||
matrix-synapse-py3 (1.108.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.108.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 May 2024 11:54:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.108.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.108.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 21 May 2024 10:54:13 +0100
|
||||
|
||||
matrix-synapse-py3 (1.107.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.107.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 14 May 2024 14:15:34 +0100
|
||||
|
||||
matrix-synapse-py3 (1.107.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.107.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 07 May 2024 16:26:26 +0100
|
||||
|
||||
matrix-synapse-py3 (1.106.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.106.0.
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@@ -1,50 +0,0 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# For the full list of built-in configuration values, see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = "Synapse development"
|
||||
copyright = "2023, The Matrix.org Foundation C.I.C."
|
||||
author = "The Synapse Maintainers and Community"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||
|
||||
extensions = [
|
||||
"autodoc2",
|
||||
"myst_parser",
|
||||
]
|
||||
|
||||
templates_path = ["_templates"]
|
||||
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
|
||||
|
||||
|
||||
# -- Options for Autodoc2 ----------------------------------------------------
|
||||
|
||||
autodoc2_docstring_parser_regexes = [
|
||||
# this will render all docstrings as 'MyST' Markdown
|
||||
(r".*", "myst"),
|
||||
]
|
||||
|
||||
autodoc2_packages = [
|
||||
{
|
||||
"path": "../synapse",
|
||||
# Don't render documentation for everything as a matter of course
|
||||
"auto_mode": False,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# -- Options for MyST (Markdown) ---------------------------------------------
|
||||
|
||||
# myst_heading_anchors = 2
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
||||
|
||||
html_theme = "furo"
|
||||
html_static_path = ["_static"]
|
||||
@@ -1,22 +0,0 @@
|
||||
.. Synapse Developer Documentation documentation master file, created by
|
||||
sphinx-quickstart on Mon Mar 13 08:59:51 2023.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to the Synapse Developer Documentation!
|
||||
===========================================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
modules/federation_sender
|
||||
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
@@ -1,5 +0,0 @@
|
||||
Federation Sender
|
||||
=================
|
||||
|
||||
```{autodoc2-docstring} synapse.federation.sender
|
||||
```
|
||||
@@ -163,7 +163,7 @@ FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm
|
||||
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
|
||||
LABEL org.opencontainers.image.documentation='https://github.com/element-hq/synapse/blob/master/docker/README.md'
|
||||
LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git'
|
||||
LABEL org.opencontainers.image.licenses='Apache-2.0'
|
||||
LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later'
|
||||
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
|
||||
@@ -92,8 +92,6 @@ allow_device_name_lookup_over_federation: true
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
# client-side support for partial state in /send_join responses
|
||||
faster_joins: true
|
||||
# Enable support for polls
|
||||
msc3381_polls_enabled: true
|
||||
# Enable deleting device-specific notification settings stored in account data
|
||||
@@ -104,6 +102,10 @@ experimental_features:
|
||||
msc3874_enabled: true
|
||||
# no UIA for x-signing upload for the first time
|
||||
msc3967_enabled: true
|
||||
# Expose a room summary for public rooms
|
||||
msc3266_enabled: true
|
||||
|
||||
msc4115_membership_on_events: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
||||
@@ -211,6 +211,8 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/federation/(v1|v2)/make_leave/",
|
||||
"^/_matrix/federation/(v1|v2)/send_join/",
|
||||
"^/_matrix/federation/(v1|v2)/send_leave/",
|
||||
"^/_matrix/federation/v1/make_knock/",
|
||||
"^/_matrix/federation/v1/send_knock/",
|
||||
"^/_matrix/federation/(v1|v2)/invite/",
|
||||
"^/_matrix/federation/(v1|v2)/query_auth/",
|
||||
"^/_matrix/federation/(v1|v2)/event_auth/",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Edit Room Membership API
|
||||
|
||||
This API allows an administrator to join an user account with a given `user_id`
|
||||
This API allows an administrator to join a user account with a given `user_id`
|
||||
to a room with a given `room_id_or_alias`. You can only modify the membership of
|
||||
local users. The server administrator must be in the room and have permission to
|
||||
invite users.
|
||||
|
||||
@@ -141,8 +141,8 @@ Body parameters:
|
||||
provider for SSO (Single sign-on). More details are in the configuration manual under the
|
||||
sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
|
||||
- `auth_provider` - **string**, required. The unique, internal ID of the external identity provider.
|
||||
The same as `idp_id` from the homeserver configuration. Note that no error is raised if the
|
||||
provided value is not in the homeserver configuration.
|
||||
The same as `idp_id` from the homeserver configuration. If using OIDC, this value should be prefixed
|
||||
with `oidc-`. Note that no error is raised if the provided value is not in the homeserver configuration.
|
||||
- `external_id` - **string**, required. An identifier for the user in the external identity provider.
|
||||
When the user logs in to the identity provider, this must be the unique ID that they map to.
|
||||
- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator,
|
||||
|
||||
@@ -51,8 +51,8 @@ clients.
|
||||
|
||||
## Server configuration
|
||||
|
||||
Support for this feature can be enabled and configured by adding a the
|
||||
`retention` in the Synapse configuration file (see
|
||||
Support for this feature can be enabled and configured by adding the
|
||||
`retention` option in the Synapse configuration file (see
|
||||
[configuration manual](usage/configuration/config_documentation.md#retention)).
|
||||
|
||||
To enable support for message retention policies, set the setting
|
||||
@@ -117,7 +117,7 @@ In this example, we define three jobs:
|
||||
policy's `max_lifetime` is greater than a week.
|
||||
|
||||
Note that this example is tailored to show different configurations and
|
||||
features slightly more jobs than it's probably necessary (in practice, a
|
||||
features slightly more jobs than is probably necessary (in practice, a
|
||||
server admin would probably consider it better to replace the two last
|
||||
jobs with one that runs once a day and handles rooms which
|
||||
policy's `max_lifetime` is greater than 3 days).
|
||||
|
||||
@@ -525,6 +525,8 @@ oidc_providers:
|
||||
(`Options > Security > ID Token signature algorithm` and `Options > Security >
|
||||
Access Token signature algorithm`)
|
||||
- Scopes: OpenID, Email and Profile
|
||||
- Force claims into `id_token`
|
||||
(`Options > Advanced > Force claims to be returned in ID Token`)
|
||||
- Allowed redirection addresses for login (`Options > Basic > Allowed
|
||||
redirection addresses for login` ) :
|
||||
`[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
@@ -128,7 +128,7 @@ can read more about that [here](https://www.postgresql.org/docs/10/kernel-resour
|
||||
### Overview
|
||||
|
||||
The script `synapse_port_db` allows porting an existing synapse server
|
||||
backed by SQLite to using PostgreSQL. This is done in as a two phase
|
||||
backed by SQLite to using PostgreSQL. This is done as a two phase
|
||||
process:
|
||||
|
||||
1. Copy the existing SQLite database to a separate location and run
|
||||
@@ -242,12 +242,11 @@ host all all ::1/128 ident
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set. Synapse will also refuse to start an existing database with incorrect values
|
||||
of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
|
||||
`database` section of the config, is set to true. Using different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
Synapse will refuse to start when using a database with incorrect values of
|
||||
`COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
|
||||
`database` section of the config, is set to true. Using different locales can
|
||||
cause issues if the locale library is updated from underneath the database, or
|
||||
if a different version of the locale is used on any replicas.
|
||||
|
||||
If you have a database with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with
|
||||
the correct locale parameter (as shown above). It is also possible to change the
|
||||
|
||||
@@ -259,9 +259,9 @@ users, etc.) to the developers via the `--report-stats` argument.
|
||||
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your homeserver to
|
||||
identify itself to other homeserver, so don't lose or delete them. It would be
|
||||
identify itself to other homeservers, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your homeserver's keys, you may find that other homeserver have the
|
||||
change your homeserver's keys, you may find that other homeservers have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the `<server name>.signing.key` file (the second word) to something
|
||||
different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
|
||||
|
||||
@@ -98,6 +98,7 @@ A custom mapping provider must specify the following methods:
|
||||
either accept this localpart or pick their own username. Otherwise this
|
||||
option has no effect. If omitted, defaults to `False`.
|
||||
- `display_name`: An optional string, the display name for the user.
|
||||
- `picture`: An optional string, the avatar url for the user.
|
||||
- `emails`: A list of strings, the email address(es) to associate with
|
||||
this user. If omitted, defaults to an empty list.
|
||||
* `async def get_extra_attributes(self, userinfo, token)`
|
||||
|
||||
@@ -9,6 +9,7 @@ ReloadPropagatedFrom=matrix-synapse.target
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
User=matrix-synapse
|
||||
RuntimeDirectory=synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=-/etc/default/matrix-synapse
|
||||
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
|
||||
|
||||
@@ -117,6 +117,14 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.106.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
The minimum supported Rust version has been increased from v1.65.0 to v1.66.0.
|
||||
Users building from source will need to ensure their `rustc` version is up to
|
||||
date.
|
||||
|
||||
|
||||
# Upgrading to v1.100.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
@@ -44,7 +44,7 @@ For each update:
|
||||
|
||||
## Enabled
|
||||
|
||||
This API allow pausing background updates.
|
||||
This API allows pausing background updates.
|
||||
|
||||
Background updates should *not* be paused for significant periods of time, as
|
||||
this can affect the performance of Synapse.
|
||||
|
||||
@@ -241,7 +241,7 @@ in memory constrained environments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
However, degraded performance due to a low cache factor, common on
|
||||
machines with slow disks, often leads to explosions in memory use due
|
||||
machines with slow disks, often leads to explosions in memory use due to
|
||||
backlogged requests. In this case, reducing the cache factor will make
|
||||
things worse. Instead, try increasing it drastically. 2.0 is a good
|
||||
starting value.
|
||||
@@ -250,10 +250,10 @@ Using [libjemalloc](https://jemalloc.net) can also yield a significant
|
||||
improvement in overall memory use, and especially in terms of giving back
|
||||
RAM to the OS. To use it, the library must simply be put in the
|
||||
LD_PRELOAD environment variable when launching Synapse. On Debian, this
|
||||
can be done by installing the `libjemalloc1` package and adding this
|
||||
can be done by installing the `libjemalloc2` package and adding this
|
||||
line to `/etc/default/matrix-synapse`:
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
|
||||
|
||||
This made a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
@@ -676,8 +676,8 @@ This setting has the following sub-options:
|
||||
trailing 's'.
|
||||
* `app_name`: `app_name` defines the default value for '%(app)s' in `notif_from` and email
|
||||
subjects. It defaults to 'Matrix'.
|
||||
* `enable_notifs`: Set to true to enable sending emails for messages that the user
|
||||
has missed. Disabled by default.
|
||||
* `enable_notifs`: Set to true to allow users to receive e-mail notifications. If this is not set,
|
||||
users can configure e-mail notifications but will not receive them. Disabled by default.
|
||||
* `notif_for_new_users`: Set to false to disable automatic subscription to email
|
||||
notifications for new users. Enabled by default.
|
||||
* `notif_delay_before_mail`: The time to wait before emailing about a notification.
|
||||
@@ -1232,6 +1232,31 @@ federation_domain_whitelist:
|
||||
- syd.example.com
|
||||
```
|
||||
---
|
||||
### `federation_whitelist_endpoint_enabled`
|
||||
|
||||
Enables an endpoint for fetching the federation whitelist config.
|
||||
|
||||
The request method and path is `GET /_synapse/client/v1/config/federation_whitelist`, and the
|
||||
response format is:
|
||||
|
||||
```json
|
||||
{
|
||||
"whitelist_enabled": true, // Whether the federation whitelist is being enforced
|
||||
"whitelist": [ // Which server names are allowed by the whitelist
|
||||
"example.com"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
If `whitelist_enabled` is `false` then the server is permitted to federate with all others.
|
||||
|
||||
The endpoint requires authentication.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
federation_whitelist_endpoint_enabled: true
|
||||
```
|
||||
---
|
||||
### `federation_metrics_domains`
|
||||
|
||||
Report prometheus metrics on the age of PDUs being sent to and received from
|
||||
@@ -1317,6 +1342,12 @@ Options related to caching.
|
||||
The number of events to cache in memory. Defaults to 10K. Like other caches,
|
||||
this is affected by `caches.global_factor` (see below).
|
||||
|
||||
For example, the default is 10K and the global_factor default is 0.5.
|
||||
|
||||
Since 10K * 0.5 is 5K then the event cache size will be 5K.
|
||||
|
||||
The cache affected by this configuration is named as "*getEvent*".
|
||||
|
||||
Note that this option is not part of the `caches` section.
|
||||
|
||||
Example configuration:
|
||||
@@ -1342,6 +1373,8 @@ number of entries that can be stored.
|
||||
|
||||
Defaults to 0.5, which will halve the size of all caches.
|
||||
|
||||
Note that changing this value also affects the HTTP connection pool.
|
||||
|
||||
* `per_cache_factors`: A dictionary of cache name to cache factor for that individual
|
||||
cache. Overrides the global cache factor for a given cache.
|
||||
|
||||
@@ -1913,6 +1946,24 @@ Example configuration:
|
||||
max_image_pixels: 35M
|
||||
```
|
||||
---
|
||||
### `remote_media_download_burst_count`
|
||||
|
||||
Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized - if the bucket is full, ie a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains. Defaults to 500MiB. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
remote_media_download_burst_count: 200M
|
||||
```
|
||||
---
|
||||
### `remote_media_download_per_second`
|
||||
|
||||
Works in conjunction with `remote_media_download_burst_count` to ratelimit remote media downloads - this configuration option determines the rate at which the "bucket" (see above) leaks in bytes per second. As requests are made to download remote media, the size of those requests in bytes is added to the bucket, and once the bucket has reached it's capacity, no more requests will be allowed until a number of bytes has "drained" from the bucket. This setting determines the rate at which bytes drain from the bucket, with the practical effect that the larger the number, the faster the bucket leaks, allowing for more bytes downloaded over a shorter period of time. Defaults to 87KiB per second. See also `remote_media_download_burst_count`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
remote_media_download_per_second: 40K
|
||||
```
|
||||
---
|
||||
### `prevent_media_downloads_from`
|
||||
|
||||
A list of domains to never download media from. Media from these
|
||||
@@ -2583,6 +2634,11 @@ Possible values for this option are:
|
||||
* "trusted_private_chat": an invitation is required to join this room and the invitee is
|
||||
assigned a power level of 100 upon joining the room.
|
||||
|
||||
Each preset will set up a room in the same manner as if it were provided as the `preset` parameter when
|
||||
calling the
|
||||
[`POST /_matrix/client/v3/createRoom`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom)
|
||||
Client-Server API endpoint.
|
||||
|
||||
If a value of "private_chat" or "trusted_private_chat" is used then
|
||||
`auto_join_mxid_localpart` must also be configured.
|
||||
|
||||
@@ -3520,6 +3576,15 @@ Has the following sub-options:
|
||||
users. This allows the CAS SSO flow to be limited to sign in only, rather than
|
||||
automatically registering users that have a valid SSO login but do not have
|
||||
a pre-registered account. Defaults to true.
|
||||
* `allow_numeric_ids`: set to 'true' allow numeric user IDs (default false).
|
||||
This allows CAS SSO flow to provide user IDs composed of numbers only.
|
||||
These identifiers will be prefixed by the letter "u" by default.
|
||||
The prefix can be configured using the "numeric_ids_prefix" option.
|
||||
Be careful to choose the prefix correctly to avoid any possible conflicts
|
||||
(e.g. user 1234 becomes u1234 when a user u1234 already exists).
|
||||
* `numeric_ids_prefix`: the prefix you wish to add in front of a numeric user ID
|
||||
when the "allow_numeric_ids" option is set to "true".
|
||||
By default, the prefix is the letter "u" and only alphanumeric characters are allowed.
|
||||
|
||||
*Added in Synapse 1.93.0.*
|
||||
|
||||
@@ -3534,6 +3599,8 @@ cas_config:
|
||||
userGroup: "staff"
|
||||
department: None
|
||||
enable_registration: true
|
||||
allow_numeric_ids: true
|
||||
numeric_ids_prefix: "numericuser"
|
||||
```
|
||||
---
|
||||
### `sso`
|
||||
@@ -4546,3 +4613,32 @@ background_updates:
|
||||
min_batch_size: 10
|
||||
default_batch_size: 50
|
||||
```
|
||||
---
|
||||
## Auto Accept Invites
|
||||
Configuration settings related to automatically accepting invites.
|
||||
|
||||
---
|
||||
### `auto_accept_invites`
|
||||
|
||||
Automatically accepting invites controls whether users are presented with an invite request or if they
|
||||
are instead automatically joined to a room when receiving an invite. Set the `enabled` sub-option to true to
|
||||
enable auto-accepting invites. Defaults to false.
|
||||
This setting has the following sub-options:
|
||||
* `enabled`: Whether to run the auto-accept invites logic. Defaults to false.
|
||||
* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
|
||||
for direct messages. Defaults to false.
|
||||
* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
|
||||
* `worker_to_run_on`: Which worker to run this module on. This must match the "worker_name".
|
||||
|
||||
NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed.
|
||||
The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join
|
||||
events could be generated from a single invite.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
auto_accept_invites:
|
||||
enabled: true
|
||||
only_for_direct_messages: true
|
||||
only_from_local_users: true
|
||||
worker_to_run_on: "worker_1"
|
||||
```
|
||||
|
||||
@@ -86,9 +86,9 @@ The search term is then split into words:
|
||||
* If unavailable, then runs of ASCII characters, numbers, underscores, and hyphens
|
||||
are considered words.
|
||||
|
||||
The queries for PostgreSQL and SQLite are detailed below, by their overall goal
|
||||
The queries for PostgreSQL and SQLite are detailed below, but their overall goal
|
||||
is to find matching users, preferring users who are "real" (e.g. not bots,
|
||||
not deactivated). It is assumed that real users will have an display name and
|
||||
not deactivated). It is assumed that real users will have a display name and
|
||||
avatar set.
|
||||
|
||||
### PostgreSQL
|
||||
|
||||
@@ -211,6 +211,8 @@ information.
|
||||
^/_matrix/federation/v1/make_leave/
|
||||
^/_matrix/federation/(v1|v2)/send_join/
|
||||
^/_matrix/federation/(v1|v2)/send_leave/
|
||||
^/_matrix/federation/v1/make_knock/
|
||||
^/_matrix/federation/v1/send_knock/
|
||||
^/_matrix/federation/(v1|v2)/invite/
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/timestamp_to_event/
|
||||
@@ -232,7 +234,7 @@ information.
|
||||
^/_matrix/client/v1/rooms/.*/hierarchy$
|
||||
^/_matrix/client/(v1|unstable)/rooms/.*/relations/
|
||||
^/_matrix/client/v1/rooms/.*/threads$
|
||||
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
|
||||
^/_matrix/client/unstable/im.nheko.summary/summary/.*$
|
||||
^/_matrix/client/(r0|v3|unstable)/account/3pid$
|
||||
^/_matrix/client/(r0|v3|unstable)/account/whoami$
|
||||
^/_matrix/client/(r0|v3|unstable)/devices$
|
||||
@@ -535,7 +537,7 @@ the stream writer for the `presence` stream:
|
||||
##### The `push_rules` stream
|
||||
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `push` stream:
|
||||
the stream writer for the `push_rules` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
||||
|
||||
@@ -634,7 +636,7 @@ worker application type.
|
||||
|
||||
#### Push Notifications
|
||||
|
||||
You can designate generic worker to sending push notifications to
|
||||
You can designate generic workers to send push notifications to
|
||||
a [push gateway](https://spec.matrix.org/v1.5/push-gateway-api/) such as
|
||||
[sygnal](https://github.com/matrix-org/sygnal) and email.
|
||||
|
||||
|
||||
1268
poetry.lock
generated
1268
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.106.0"
|
||||
version = "1.109.0rc2"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -200,10 +200,8 @@ netaddr = ">=0.7.18"
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
Jinja2 = ">=3.0"
|
||||
bleach = ">=1.4.3"
|
||||
# We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0.
|
||||
# Additionally we need https://github.com/python/typing/pull/817 to allow types to be
|
||||
# generic over ParamSpecs.
|
||||
typing-extensions = ">=3.10.0.1"
|
||||
# We use `Self`, which were added in `typing-extensions` 4.0.
|
||||
typing-extensions = ">=4.0"
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
cryptography = ">=3.4.7"
|
||||
@@ -364,17 +362,6 @@ towncrier = ">=18.6.0rc1"
|
||||
tomli = ">=1.2.3"
|
||||
|
||||
|
||||
# Dependencies for building the development documentation
|
||||
[tool.poetry.group.dev-docs]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.dev-docs.dependencies]
|
||||
sphinx = {version = "^6.1", python = "^3.8"}
|
||||
sphinx-autodoc2 = {version = ">=0.4.2,<0.6.0", python = "^3.8"}
|
||||
myst-parser = {version = "^1.0.0", python = "^3.8"}
|
||||
furo = ">=2022.12.7,<2025.0.0"
|
||||
|
||||
|
||||
[build-system]
|
||||
# The upper bounds here are defensive, intended to prevent situations like
|
||||
# https://github.com/matrix-org/synapse/issues/13849 and
|
||||
|
||||
@@ -30,14 +30,14 @@ http = "1.1.0"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
mime = "0.3.17"
|
||||
pyo3 = { version = "0.20.0", features = [
|
||||
pyo3 = { version = "0.21.0", features = [
|
||||
"macros",
|
||||
"anyhow",
|
||||
"abi3",
|
||||
"abi3-py38",
|
||||
] }
|
||||
pyo3-log = "0.9.0"
|
||||
pythonize = "0.20.0"
|
||||
pyo3-log = "0.10.0"
|
||||
pythonize = "0.21.0"
|
||||
regex = "1.6.0"
|
||||
sha2 = "0.10.8"
|
||||
serde = { version = "1.0.144", features = ["derive"] }
|
||||
|
||||
@@ -25,21 +25,21 @@ use std::net::Ipv4Addr;
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Error;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::{prelude::*, pybacked::PyBackedStr};
|
||||
use regex::Regex;
|
||||
|
||||
use crate::push::utils::{glob_to_regex, GlobMatchType};
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
let child_module = PyModule::new(py, "acl")?;
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "acl")?;
|
||||
child_module.add_class::<ServerAclEvaluator>()?;
|
||||
|
||||
m.add_submodule(child_module)?;
|
||||
m.add_submodule(&child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import acl` work.
|
||||
py.import("sys")?
|
||||
py.import_bound("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.acl", child_module)?;
|
||||
|
||||
@@ -59,8 +59,8 @@ impl ServerAclEvaluator {
|
||||
#[new]
|
||||
pub fn py_new(
|
||||
allow_ip_literals: bool,
|
||||
allow: Vec<&str>,
|
||||
deny: Vec<&str>,
|
||||
allow: Vec<PyBackedStr>,
|
||||
deny: Vec<PyBackedStr>,
|
||||
) -> Result<Self, Error> {
|
||||
let allow = allow
|
||||
.iter()
|
||||
|
||||
@@ -20,8 +20,10 @@
|
||||
|
||||
//! Implements the internal metadata class attached to events.
|
||||
//!
|
||||
//! The internal metadata is a bit like a `TypedDict`, in that it is stored as a
|
||||
//! JSON dict in the DB. Most events have zero, or only a few, of these keys
|
||||
//! The internal metadata is a bit like a `TypedDict`, in that most of
|
||||
//! it is stored as a JSON dict in the DB (the exceptions being `outlier`
|
||||
//! and `stream_ordering` which have their own columns in the database).
|
||||
//! Most events have zero, or only a few, of these keys
|
||||
//! set. Therefore, since we care more about memory size than performance here,
|
||||
//! we store these fields in a mapping.
|
||||
//!
|
||||
@@ -36,9 +38,10 @@ use anyhow::Context;
|
||||
use log::warn;
|
||||
use pyo3::{
|
||||
exceptions::PyAttributeError,
|
||||
pybacked::PyBackedStr,
|
||||
pyclass, pymethods,
|
||||
types::{PyDict, PyString},
|
||||
IntoPy, PyAny, PyObject, PyResult, Python,
|
||||
types::{PyAnyMethods, PyDict, PyDictMethods, PyString},
|
||||
Bound, IntoPy, PyAny, PyObject, PyResult, Python,
|
||||
};
|
||||
|
||||
/// Definitions of the various fields of the internal metadata.
|
||||
@@ -57,7 +60,7 @@ enum EventInternalMetadataData {
|
||||
|
||||
impl EventInternalMetadataData {
|
||||
/// Convert the field to its name and python object.
|
||||
fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a PyString, PyObject) {
|
||||
fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, PyObject) {
|
||||
match self {
|
||||
EventInternalMetadataData::OutOfBandMembership(o) => {
|
||||
(pyo3::intern!(py, "out_of_band_membership"), o.into_py(py))
|
||||
@@ -88,10 +91,13 @@ impl EventInternalMetadataData {
|
||||
/// Converts from python key/values to the field.
|
||||
///
|
||||
/// Returns `None` if the key is a valid but unrecognized string.
|
||||
fn from_python_pair(key: &PyAny, value: &PyAny) -> PyResult<Option<Self>> {
|
||||
let key_str: &str = key.extract()?;
|
||||
fn from_python_pair(
|
||||
key: &Bound<'_, PyAny>,
|
||||
value: &Bound<'_, PyAny>,
|
||||
) -> PyResult<Option<Self>> {
|
||||
let key_str: PyBackedStr = key.extract()?;
|
||||
|
||||
let e = match key_str {
|
||||
let e = match &*key_str {
|
||||
"out_of_band_membership" => EventInternalMetadataData::OutOfBandMembership(
|
||||
value
|
||||
.extract()
|
||||
@@ -208,11 +214,11 @@ pub struct EventInternalMetadata {
|
||||
#[pymethods]
|
||||
impl EventInternalMetadata {
|
||||
#[new]
|
||||
fn new(dict: &PyDict) -> PyResult<Self> {
|
||||
fn new(dict: &Bound<'_, PyDict>) -> PyResult<Self> {
|
||||
let mut data = Vec::with_capacity(dict.len());
|
||||
|
||||
for (key, value) in dict.iter() {
|
||||
match EventInternalMetadataData::from_python_pair(key, value) {
|
||||
match EventInternalMetadataData::from_python_pair(&key, &value) {
|
||||
Ok(Some(entry)) => data.push(entry),
|
||||
Ok(None) => {}
|
||||
Err(err) => {
|
||||
@@ -234,8 +240,11 @@ impl EventInternalMetadata {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
/// Get a dict holding the data stored in the `internal_metadata` column in the database.
|
||||
///
|
||||
/// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here.
|
||||
fn get_dict(&self, py: Python<'_>) -> PyResult<PyObject> {
|
||||
let dict = PyDict::new(py);
|
||||
let dict = PyDict::new_bound(py);
|
||||
|
||||
for entry in &self.data {
|
||||
let (key, value) = entry.to_python_pair(py);
|
||||
|
||||
@@ -20,20 +20,23 @@
|
||||
|
||||
//! Classes for representing Events.
|
||||
|
||||
use pyo3::{types::PyModule, PyResult, Python};
|
||||
use pyo3::{
|
||||
types::{PyAnyMethods, PyModule, PyModuleMethods},
|
||||
Bound, PyResult, Python,
|
||||
};
|
||||
|
||||
mod internal_metadata;
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
let child_module = PyModule::new(py, "events")?;
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "events")?;
|
||||
child_module.add_class::<internal_metadata::EventInternalMetadata>()?;
|
||||
|
||||
m.add_submodule(child_module)?;
|
||||
m.add_submodule(&child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import events` work.
|
||||
py.import("sys")?
|
||||
py.import_bound("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.events", child_module)?;
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@ use headers::{Header, HeaderMapExt};
|
||||
use http::{HeaderName, HeaderValue, Method, Request, Response, StatusCode, Uri};
|
||||
use pyo3::{
|
||||
exceptions::PyValueError,
|
||||
types::{PyBytes, PySequence, PyTuple},
|
||||
PyAny, PyResult,
|
||||
types::{PyAnyMethods, PyBytes, PyBytesMethods, PySequence, PyTuple},
|
||||
Bound, PyAny, PyResult,
|
||||
};
|
||||
|
||||
use crate::errors::SynapseError;
|
||||
@@ -28,10 +28,11 @@ use crate::errors::SynapseError;
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if calling the `read` on the Python object failed
|
||||
fn read_io_body(body: &PyAny, chunk_size: usize) -> PyResult<Bytes> {
|
||||
fn read_io_body(body: &Bound<'_, PyAny>, chunk_size: usize) -> PyResult<Bytes> {
|
||||
let mut buf = BytesMut::new();
|
||||
loop {
|
||||
let bytes: &PyBytes = body.call_method1("read", (chunk_size,))?.downcast()?;
|
||||
let bound = &body.call_method1("read", (chunk_size,))?;
|
||||
let bytes: &Bound<'_, PyBytes> = bound.downcast()?;
|
||||
if bytes.as_bytes().is_empty() {
|
||||
return Ok(buf.into());
|
||||
}
|
||||
@@ -50,17 +51,19 @@ fn read_io_body(body: &PyAny, chunk_size: usize) -> PyResult<Bytes> {
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the Python object doesn't properly implement `IRequest`
|
||||
pub fn http_request_from_twisted(request: &PyAny) -> PyResult<Request<Bytes>> {
|
||||
pub fn http_request_from_twisted(request: &Bound<'_, PyAny>) -> PyResult<Request<Bytes>> {
|
||||
let content = request.getattr("content")?;
|
||||
let body = read_io_body(content, 4096)?;
|
||||
let body = read_io_body(&content, 4096)?;
|
||||
|
||||
let mut req = Request::new(body);
|
||||
|
||||
let uri: &PyBytes = request.getattr("uri")?.downcast()?;
|
||||
let bound = &request.getattr("uri")?;
|
||||
let uri: &Bound<'_, PyBytes> = bound.downcast()?;
|
||||
*req.uri_mut() =
|
||||
Uri::try_from(uri.as_bytes()).map_err(|_| PyValueError::new_err("invalid uri"))?;
|
||||
|
||||
let method: &PyBytes = request.getattr("method")?.downcast()?;
|
||||
let bound = &request.getattr("method")?;
|
||||
let method: &Bound<'_, PyBytes> = bound.downcast()?;
|
||||
*req.method_mut() = Method::from_bytes(method.as_bytes())
|
||||
.map_err(|_| PyValueError::new_err("invalid method"))?;
|
||||
|
||||
@@ -71,14 +74,17 @@ pub fn http_request_from_twisted(request: &PyAny) -> PyResult<Request<Bytes>> {
|
||||
|
||||
for header in headers_iter {
|
||||
let header = header?;
|
||||
let header: &PyTuple = header.downcast()?;
|
||||
let name: &PyBytes = header.get_item(0)?.downcast()?;
|
||||
let header: &Bound<'_, PyTuple> = header.downcast()?;
|
||||
let bound = &header.get_item(0)?;
|
||||
let name: &Bound<'_, PyBytes> = bound.downcast()?;
|
||||
let name = HeaderName::from_bytes(name.as_bytes())
|
||||
.map_err(|_| PyValueError::new_err("invalid header name"))?;
|
||||
|
||||
let values: &PySequence = header.get_item(1)?.downcast()?;
|
||||
let bound = &header.get_item(1)?;
|
||||
let values: &Bound<'_, PySequence> = bound.downcast()?;
|
||||
for index in 0..values.len()? {
|
||||
let value: &PyBytes = values.get_item(index)?.downcast()?;
|
||||
let bound = &values.get_item(index)?;
|
||||
let value: &Bound<'_, PyBytes> = bound.downcast()?;
|
||||
let value = HeaderValue::from_bytes(value.as_bytes())
|
||||
.map_err(|_| PyValueError::new_err("invalid header value"))?;
|
||||
req.headers_mut().append(name.clone(), value);
|
||||
@@ -100,7 +106,10 @@ pub fn http_request_from_twisted(request: &PyAny) -> PyResult<Request<Bytes>> {
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the Python object doesn't properly implement `IRequest`
|
||||
pub fn http_response_to_twisted<B>(request: &PyAny, response: Response<B>) -> PyResult<()>
|
||||
pub fn http_response_to_twisted<B>(
|
||||
request: &Bound<'_, PyAny>,
|
||||
response: Response<B>,
|
||||
) -> PyResult<()>
|
||||
where
|
||||
B: Buf,
|
||||
{
|
||||
|
||||
@@ -38,7 +38,7 @@ fn reset_logging_config() {
|
||||
|
||||
/// The entry point for defining the Python module.
|
||||
#[pymodule]
|
||||
fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
fn synapse_rust(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(reset_logging_config, m)?)?;
|
||||
|
||||
@@ -66,7 +66,7 @@ use log::warn;
|
||||
use pyo3::exceptions::PyTypeError;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::{PyBool, PyList, PyLong, PyString};
|
||||
use pythonize::{depythonize, pythonize};
|
||||
use pythonize::{depythonize_bound, pythonize};
|
||||
use serde::de::Error as _;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@@ -78,19 +78,19 @@ pub mod evaluator;
|
||||
pub mod utils;
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
let child_module = PyModule::new(py, "push")?;
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "push")?;
|
||||
child_module.add_class::<PushRule>()?;
|
||||
child_module.add_class::<PushRules>()?;
|
||||
child_module.add_class::<FilteredPushRules>()?;
|
||||
child_module.add_class::<PushRuleEvaluator>()?;
|
||||
child_module.add_function(wrap_pyfunction!(get_base_rule_ids, m)?)?;
|
||||
|
||||
m.add_submodule(child_module)?;
|
||||
m.add_submodule(&child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import push` work.
|
||||
py.import("sys")?
|
||||
py.import_bound("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.push", child_module)?;
|
||||
|
||||
@@ -271,12 +271,12 @@ pub enum SimpleJsonValue {
|
||||
|
||||
impl<'source> FromPyObject<'source> for SimpleJsonValue {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
if let Ok(s) = <PyString as pyo3::PyTryFrom>::try_from(ob) {
|
||||
if let Ok(s) = ob.downcast::<PyString>() {
|
||||
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
|
||||
// A bool *is* an int, ensure we try bool first.
|
||||
} else if let Ok(b) = <PyBool as pyo3::PyTryFrom>::try_from(ob) {
|
||||
} else if let Ok(b) = ob.downcast::<PyBool>() {
|
||||
Ok(SimpleJsonValue::Bool(b.extract()?))
|
||||
} else if let Ok(i) = <PyLong as pyo3::PyTryFrom>::try_from(ob) {
|
||||
} else if let Ok(i) = ob.downcast::<PyLong>() {
|
||||
Ok(SimpleJsonValue::Int(i.extract()?))
|
||||
} else if ob.is_none() {
|
||||
Ok(SimpleJsonValue::Null)
|
||||
@@ -299,7 +299,7 @@ pub enum JsonValue {
|
||||
|
||||
impl<'source> FromPyObject<'source> for JsonValue {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
if let Ok(l) = <PyList as pyo3::PyTryFrom>::try_from(ob) {
|
||||
if let Ok(l) = ob.downcast::<PyList>() {
|
||||
match l.iter().map(SimpleJsonValue::extract).collect() {
|
||||
Ok(a) => Ok(JsonValue::Array(a)),
|
||||
Err(e) => Err(PyTypeError::new_err(format!(
|
||||
@@ -370,8 +370,8 @@ impl IntoPy<PyObject> for Condition {
|
||||
}
|
||||
|
||||
impl<'source> FromPyObject<'source> for Condition {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
Ok(depythonize(ob)?)
|
||||
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
|
||||
Ok(depythonize_bound(ob.clone())?)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,8 +26,10 @@ use headers::{
|
||||
use http::{header::ETAG, HeaderMap, Response, StatusCode, Uri};
|
||||
use mime::Mime;
|
||||
use pyo3::{
|
||||
exceptions::PyValueError, pyclass, pymethods, types::PyModule, Py, PyAny, PyObject, PyResult,
|
||||
Python, ToPyObject,
|
||||
exceptions::PyValueError,
|
||||
pyclass, pymethods,
|
||||
types::{PyAnyMethods, PyModule, PyModuleMethods},
|
||||
Bound, Py, PyAny, PyObject, PyResult, Python, ToPyObject,
|
||||
};
|
||||
use ulid::Ulid;
|
||||
|
||||
@@ -109,7 +111,7 @@ impl RendezvousHandler {
|
||||
#[pyo3(signature = (homeserver, /, capacity=100, max_content_length=4*1024, eviction_interval=60*1000, ttl=60*1000))]
|
||||
fn new(
|
||||
py: Python<'_>,
|
||||
homeserver: &PyAny,
|
||||
homeserver: &Bound<'_, PyAny>,
|
||||
capacity: usize,
|
||||
max_content_length: u64,
|
||||
eviction_interval: u64,
|
||||
@@ -150,7 +152,7 @@ impl RendezvousHandler {
|
||||
}
|
||||
|
||||
fn _evict(&mut self, py: Python<'_>) -> PyResult<()> {
|
||||
let clock = self.clock.as_ref(py);
|
||||
let clock = self.clock.bind(py);
|
||||
let now: u64 = clock.call_method0("time_msec")?.extract()?;
|
||||
let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
|
||||
self.evict(now);
|
||||
@@ -158,12 +160,12 @@ impl RendezvousHandler {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_post(&mut self, py: Python<'_>, twisted_request: &PyAny) -> PyResult<()> {
|
||||
fn handle_post(&mut self, py: Python<'_>, twisted_request: &Bound<'_, PyAny>) -> PyResult<()> {
|
||||
let request = http_request_from_twisted(twisted_request)?;
|
||||
|
||||
let content_type = self.check_input_headers(request.headers())?;
|
||||
|
||||
let clock = self.clock.as_ref(py);
|
||||
let clock = self.clock.bind(py);
|
||||
let now: u64 = clock.call_method0("time_msec")?.extract()?;
|
||||
let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
|
||||
|
||||
@@ -197,7 +199,12 @@ impl RendezvousHandler {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_get(&mut self, py: Python<'_>, twisted_request: &PyAny, id: &str) -> PyResult<()> {
|
||||
fn handle_get(
|
||||
&mut self,
|
||||
py: Python<'_>,
|
||||
twisted_request: &Bound<'_, PyAny>,
|
||||
id: &str,
|
||||
) -> PyResult<()> {
|
||||
let request = http_request_from_twisted(twisted_request)?;
|
||||
|
||||
let if_none_match: Option<IfNoneMatch> = request.headers().typed_get_optional()?;
|
||||
@@ -233,7 +240,12 @@ impl RendezvousHandler {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_put(&mut self, py: Python<'_>, twisted_request: &PyAny, id: &str) -> PyResult<()> {
|
||||
fn handle_put(
|
||||
&mut self,
|
||||
py: Python<'_>,
|
||||
twisted_request: &Bound<'_, PyAny>,
|
||||
id: &str,
|
||||
) -> PyResult<()> {
|
||||
let request = http_request_from_twisted(twisted_request)?;
|
||||
|
||||
let content_type = self.check_input_headers(request.headers())?;
|
||||
@@ -281,7 +293,7 @@ impl RendezvousHandler {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_delete(&mut self, twisted_request: &PyAny, id: &str) -> PyResult<()> {
|
||||
fn handle_delete(&mut self, twisted_request: &Bound<'_, PyAny>, id: &str) -> PyResult<()> {
|
||||
let _request = http_request_from_twisted(twisted_request)?;
|
||||
|
||||
let id: Ulid = id.parse().map_err(|_| NotFoundError::new())?;
|
||||
@@ -298,16 +310,16 @@ impl RendezvousHandler {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
let child_module = PyModule::new(py, "rendezvous")?;
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "rendezvous")?;
|
||||
|
||||
child_module.add_class::<RendezvousHandler>()?;
|
||||
|
||||
m.add_submodule(child_module)?;
|
||||
m.add_submodule(&child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import rendezvous` work.
|
||||
py.import("sys")?
|
||||
py.import_bound("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.rendezvous", child_module)?;
|
||||
|
||||
|
||||
@@ -214,7 +214,17 @@ fi
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_packages="./tests/csapi ./tests ./tests/msc3874 ./tests/msc3890 ./tests/msc3391 ./tests/msc3930 ./tests/msc3902 ./tests/msc3967"
|
||||
test_packages=(
|
||||
./tests/csapi
|
||||
./tests
|
||||
./tests/msc3874
|
||||
./tests/msc3890
|
||||
./tests/msc3391
|
||||
./tests/msc3930
|
||||
./tests/msc3902
|
||||
./tests/msc3967
|
||||
./tests/msc4115
|
||||
)
|
||||
|
||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||
# This significantly speeds up tests, but increases the possibility of test pollution.
|
||||
@@ -278,7 +288,7 @@ fi
|
||||
export PASS_SYNAPSE_LOG_TESTING=1
|
||||
|
||||
# Run the tests!
|
||||
echo "Images built; running complement with ${extra_test_args[@]} $@ $test_packages"
|
||||
echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" $test_packages
|
||||
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}"
|
||||
|
||||
@@ -91,7 +91,6 @@ else
|
||||
"synapse" "docker" "tests"
|
||||
"scripts-dev"
|
||||
"contrib" "synmark" "stubs" ".ci"
|
||||
"dev-docs"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -127,7 +127,7 @@ BOOLEAN_COLUMNS = {
|
||||
"redactions": ["have_censored"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"rooms": ["is_public", "has_auth_chain_index"],
|
||||
"users": ["shadow_banned", "approved", "locked"],
|
||||
"users": ["shadow_banned", "approved", "locked", "suspended"],
|
||||
"un_partial_stated_event_stream": ["rejection_status_changed"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
@@ -777,22 +777,74 @@ class Porter:
|
||||
await self._setup_events_stream_seqs()
|
||||
await self._setup_sequence(
|
||||
"un_partial_stated_event_stream_sequence",
|
||||
("un_partial_stated_event_stream",),
|
||||
[("un_partial_stated_event_stream", "stream_id")],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
|
||||
"device_inbox_sequence",
|
||||
[
|
||||
("device_inbox", "stream_id"),
|
||||
("device_federation_outbox", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"account_data_sequence",
|
||||
("room_account_data", "room_tags_revisions", "account_data"),
|
||||
[
|
||||
("room_account_data", "stream_id"),
|
||||
("room_tags_revisions", "stream_id"),
|
||||
("account_data", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"receipts_sequence",
|
||||
[
|
||||
("receipts_linearized", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"presence_stream_sequence",
|
||||
[
|
||||
("presence_stream", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
|
||||
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
|
||||
await self._setup_auth_chain_sequence()
|
||||
await self._setup_sequence(
|
||||
"application_services_txn_id_seq",
|
||||
("application_services_txns",),
|
||||
"txn_id",
|
||||
[
|
||||
(
|
||||
"application_services_txns",
|
||||
"txn_id",
|
||||
)
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"device_lists_sequence",
|
||||
[
|
||||
("device_lists_stream", "stream_id"),
|
||||
("user_signature_stream", "stream_id"),
|
||||
("device_lists_outbound_pokes", "stream_id"),
|
||||
("device_lists_changes_in_room", "stream_id"),
|
||||
("device_lists_remote_pending", "stream_id"),
|
||||
("device_lists_changes_converted_stream_position", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"e2e_cross_signing_keys_sequence",
|
||||
[
|
||||
("e2e_cross_signing_keys", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"push_rules_stream_sequence",
|
||||
[
|
||||
("push_rules_stream", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"pushers_sequence",
|
||||
[
|
||||
("pushers", "id"),
|
||||
("deleted_pushers", "stream_id"),
|
||||
],
|
||||
)
|
||||
|
||||
# Step 3. Get tables.
|
||||
@@ -1101,12 +1153,11 @@ class Porter:
|
||||
async def _setup_sequence(
|
||||
self,
|
||||
sequence_name: str,
|
||||
stream_id_tables: Iterable[str],
|
||||
column_name: str = "stream_id",
|
||||
stream_id_tables: Iterable[Tuple[str, str]],
|
||||
) -> None:
|
||||
"""Set a sequence to the correct value."""
|
||||
current_stream_ids = []
|
||||
for stream_id_table in stream_id_tables:
|
||||
for stream_id_table, column_name in stream_id_tables:
|
||||
max_stream_id = cast(
|
||||
int,
|
||||
await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
|
||||
@@ -50,7 +50,7 @@ class Membership:
|
||||
KNOCK: Final = "knock"
|
||||
LEAVE: Final = "leave"
|
||||
BAN: Final = "ban"
|
||||
LIST: Final = (INVITE, JOIN, KNOCK, LEAVE, BAN)
|
||||
LIST: Final = {INVITE, JOIN, KNOCK, LEAVE, BAN}
|
||||
|
||||
|
||||
class PresenceState:
|
||||
@@ -234,6 +234,13 @@ class EventContentFields:
|
||||
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
|
||||
|
||||
|
||||
class EventUnsignedContentFields:
|
||||
"""Fields found inside the 'unsigned' data on events"""
|
||||
|
||||
# Requesting user's membership, per MSC4115
|
||||
MSC4115_MEMBERSHIP: Final = "io.element.msc4115.membership"
|
||||
|
||||
|
||||
class RoomTypes:
|
||||
"""Understood values of the room_type field of m.room.create events."""
|
||||
|
||||
|
||||
@@ -316,6 +316,10 @@ class Ratelimiter:
|
||||
)
|
||||
|
||||
if not allowed:
|
||||
# We pause for a bit here to stop clients from "tight-looping" on
|
||||
# retrying their request.
|
||||
await self.clock.sleep(0.5)
|
||||
|
||||
raise LimitExceededError(
|
||||
limiter_name=self._limiter_name,
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now_s)),
|
||||
|
||||
@@ -68,6 +68,7 @@ from synapse.config._base import format_config_error
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.events.auto_accept_invites import InviteAutoAccepter
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.http.site import SynapseSite
|
||||
@@ -582,6 +583,11 @@ async def start(hs: "HomeServer") -> None:
|
||||
m = module(config, module_api)
|
||||
logger.info("Loaded module %s", m)
|
||||
|
||||
if hs.config.auto_accept_invites.enabled:
|
||||
# Start the local auto_accept_invites module.
|
||||
m = InviteAutoAccepter(hs.config.auto_accept_invites, module_api)
|
||||
logger.info("Loaded local module %s", m)
|
||||
|
||||
load_legacy_spam_checkers(hs)
|
||||
load_legacy_third_party_event_rules(hs)
|
||||
load_legacy_presence_router(hs)
|
||||
@@ -675,17 +681,17 @@ def setup_sentry(hs: "HomeServer") -> None:
|
||||
)
|
||||
|
||||
# We set some default tags that give some context to this instance
|
||||
with sentry_sdk.configure_scope() as scope:
|
||||
scope.set_tag("matrix_server_name", hs.config.server.server_name)
|
||||
global_scope = sentry_sdk.Scope.get_global_scope()
|
||||
global_scope.set_tag("matrix_server_name", hs.config.server.server_name)
|
||||
|
||||
app = (
|
||||
hs.config.worker.worker_app
|
||||
if hs.config.worker.worker_app
|
||||
else "synapse.app.homeserver"
|
||||
)
|
||||
name = hs.get_instance_name()
|
||||
scope.set_tag("worker_app", app)
|
||||
scope.set_tag("worker_name", name)
|
||||
app = (
|
||||
hs.config.worker.worker_app
|
||||
if hs.config.worker.worker_app
|
||||
else "synapse.app.homeserver"
|
||||
)
|
||||
name = hs.get_instance_name()
|
||||
global_scope.set_tag("worker_app", app)
|
||||
global_scope.set_tag("worker_name", name)
|
||||
|
||||
|
||||
def setup_sdnotify(hs: "HomeServer") -> None:
|
||||
|
||||
@@ -23,6 +23,7 @@ from synapse.config import ( # noqa: F401
|
||||
api,
|
||||
appservice,
|
||||
auth,
|
||||
auto_accept_invites,
|
||||
background_updates,
|
||||
cache,
|
||||
captcha,
|
||||
@@ -120,6 +121,7 @@ class RootConfig:
|
||||
federation: federation.FederationConfig
|
||||
retention: retention.RetentionConfig
|
||||
background_updates: background_updates.BackgroundUpdateConfig
|
||||
auto_accept_invites: auto_accept_invites.AutoAcceptInvitesConfig
|
||||
|
||||
config_classes: List[Type["Config"]] = ...
|
||||
config_files: List[str]
|
||||
|
||||
43
synapse/config/auto_accept_invites.py
Normal file
43
synapse/config/auto_accept_invites.py
Normal file
@@ -0,0 +1,43 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
# Originally licensed under the Apache License, Version 2.0:
|
||||
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
||||
#
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
from typing import Any
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class AutoAcceptInvitesConfig(Config):
|
||||
section = "auto_accept_invites"
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
auto_accept_invites_config = config.get("auto_accept_invites") or {}
|
||||
|
||||
self.enabled = auto_accept_invites_config.get("enabled", False)
|
||||
|
||||
self.accept_invites_only_for_direct_messages = auto_accept_invites_config.get(
|
||||
"only_for_direct_messages", False
|
||||
)
|
||||
|
||||
self.accept_invites_only_from_local_users = auto_accept_invites_config.get(
|
||||
"only_from_local_users", False
|
||||
)
|
||||
|
||||
self.worker_to_run_on = auto_accept_invites_config.get("worker_to_run_on")
|
||||
@@ -66,6 +66,17 @@ class CasConfig(Config):
|
||||
|
||||
self.cas_enable_registration = cas_config.get("enable_registration", True)
|
||||
|
||||
self.cas_allow_numeric_ids = cas_config.get("allow_numeric_ids")
|
||||
self.cas_numeric_ids_prefix = cas_config.get("numeric_ids_prefix")
|
||||
if (
|
||||
self.cas_numeric_ids_prefix is not None
|
||||
and self.cas_numeric_ids_prefix.isalnum() is False
|
||||
):
|
||||
raise ConfigError(
|
||||
"Only alphanumeric characters are allowed for numeric IDs prefix",
|
||||
("cas_config", "numeric_ids_prefix"),
|
||||
)
|
||||
|
||||
self.idp_name = cas_config.get("idp_name", "CAS")
|
||||
self.idp_icon = cas_config.get("idp_icon")
|
||||
self.idp_brand = cas_config.get("idp_brand")
|
||||
@@ -77,6 +88,8 @@ class CasConfig(Config):
|
||||
self.cas_displayname_attribute = None
|
||||
self.cas_required_attributes = []
|
||||
self.cas_enable_registration = False
|
||||
self.cas_allow_numeric_ids = False
|
||||
self.cas_numeric_ids_prefix = "u"
|
||||
|
||||
|
||||
# CAS uses a legacy required attributes mapping, not the one provided by
|
||||
|
||||
@@ -332,6 +332,9 @@ class ExperimentalConfig(Config):
|
||||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3575 (Sliding Sync API endpoints)
|
||||
self.msc3575_enabled: bool = experimental.get("msc3575_enabled", False)
|
||||
|
||||
# MSC3773: Thread notifications
|
||||
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
|
||||
|
||||
@@ -432,3 +435,14 @@ class ExperimentalConfig(Config):
|
||||
"You cannot have MSC4108 both enabled and delegated at the same time",
|
||||
("experimental", "msc4108_delegation_endpoint"),
|
||||
)
|
||||
|
||||
self.msc4115_membership_on_events = experimental.get(
|
||||
"msc4115_membership_on_events", False
|
||||
)
|
||||
|
||||
self.msc3916_authenticated_media_enabled = experimental.get(
|
||||
"msc3916_authenticated_media_enabled", False
|
||||
)
|
||||
|
||||
# MSC4151: Report room API (Client-Server API)
|
||||
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
|
||||
|
||||
@@ -42,6 +42,10 @@ class FederationConfig(Config):
|
||||
for domain in federation_domain_whitelist:
|
||||
self.federation_domain_whitelist[domain] = True
|
||||
|
||||
self.federation_whitelist_endpoint_enabled = config.get(
|
||||
"federation_whitelist_endpoint_enabled", False
|
||||
)
|
||||
|
||||
federation_metrics_domains = config.get("federation_metrics_domains") or []
|
||||
validate_config(
|
||||
_METRICS_FOR_DOMAINS_SCHEMA,
|
||||
|
||||
@@ -23,6 +23,7 @@ from .account_validity import AccountValidityConfig
|
||||
from .api import ApiConfig
|
||||
from .appservice import AppServiceConfig
|
||||
from .auth import AuthConfig
|
||||
from .auto_accept_invites import AutoAcceptInvitesConfig
|
||||
from .background_updates import BackgroundUpdateConfig
|
||||
from .cache import CacheConfig
|
||||
from .captcha import CaptchaConfig
|
||||
@@ -105,4 +106,5 @@ class HomeServerConfig(RootConfig):
|
||||
RedisConfig,
|
||||
ExperimentalConfig,
|
||||
BackgroundUpdateConfig,
|
||||
AutoAcceptInvitesConfig,
|
||||
]
|
||||
|
||||
@@ -218,3 +218,13 @@ class RatelimitConfig(Config):
|
||||
"rc_media_create",
|
||||
defaults={"per_second": 10, "burst_count": 50},
|
||||
)
|
||||
|
||||
self.remote_media_downloads = RatelimitSettings(
|
||||
key="rc_remote_media_downloads",
|
||||
per_second=self.parse_size(
|
||||
config.get("remote_media_download_per_second", "87K")
|
||||
),
|
||||
burst_count=self.parse_size(
|
||||
config.get("remote_media_download_burst_count", "500M")
|
||||
),
|
||||
)
|
||||
|
||||
196
synapse/events/auto_accept_invites.py
Normal file
196
synapse/events/auto_accept_invites.py
Normal file
@@ -0,0 +1,196 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
# Originally licensed under the Apache License, Version 2.0:
|
||||
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
||||
#
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from http import HTTPStatus
|
||||
from typing import Any, Dict, Tuple
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.config.auto_accept_invites import AutoAcceptInvitesConfig
|
||||
from synapse.module_api import EventBase, ModuleApi, run_as_background_process
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InviteAutoAccepter:
|
||||
def __init__(self, config: AutoAcceptInvitesConfig, api: ModuleApi):
|
||||
# Keep a reference to the Module API.
|
||||
self._api = api
|
||||
self._config = config
|
||||
|
||||
if not self._config.enabled:
|
||||
return
|
||||
|
||||
should_run_on_this_worker = config.worker_to_run_on == self._api.worker_name
|
||||
|
||||
if not should_run_on_this_worker:
|
||||
logger.info(
|
||||
"Not accepting invites on this worker (configured: %r, here: %r)",
|
||||
config.worker_to_run_on,
|
||||
self._api.worker_name,
|
||||
)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Accepting invites on this worker (here: %r)", self._api.worker_name
|
||||
)
|
||||
|
||||
# Register the callback.
|
||||
self._api.register_third_party_rules_callbacks(
|
||||
on_new_event=self.on_new_event,
|
||||
)
|
||||
|
||||
async def on_new_event(self, event: EventBase, *args: Any) -> None:
|
||||
"""Listens for new events, and if the event is an invite for a local user then
|
||||
automatically accepts it.
|
||||
|
||||
Args:
|
||||
event: The incoming event.
|
||||
"""
|
||||
# Check if the event is an invite for a local user.
|
||||
is_invite_for_local_user = (
|
||||
event.type == EventTypes.Member
|
||||
and event.is_state()
|
||||
and event.membership == Membership.INVITE
|
||||
and self._api.is_mine(event.state_key)
|
||||
)
|
||||
|
||||
# Only accept invites for direct messages if the configuration mandates it.
|
||||
is_direct_message = event.content.get("is_direct", False)
|
||||
is_allowed_by_direct_message_rules = (
|
||||
not self._config.accept_invites_only_for_direct_messages
|
||||
or is_direct_message is True
|
||||
)
|
||||
|
||||
# Only accept invites from remote users if the configuration mandates it.
|
||||
is_from_local_user = self._api.is_mine(event.sender)
|
||||
is_allowed_by_local_user_rules = (
|
||||
not self._config.accept_invites_only_from_local_users
|
||||
or is_from_local_user is True
|
||||
)
|
||||
|
||||
if (
|
||||
is_invite_for_local_user
|
||||
and is_allowed_by_direct_message_rules
|
||||
and is_allowed_by_local_user_rules
|
||||
):
|
||||
# Make the user join the room. We run this as a background process to circumvent a race condition
|
||||
# that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12)
|
||||
run_as_background_process(
|
||||
"retry_make_join",
|
||||
self._retry_make_join,
|
||||
event.state_key,
|
||||
event.state_key,
|
||||
event.room_id,
|
||||
"join",
|
||||
bg_start_span=False,
|
||||
)
|
||||
|
||||
if is_direct_message:
|
||||
# Mark this room as a direct message!
|
||||
await self._mark_room_as_direct_message(
|
||||
event.state_key, event.sender, event.room_id
|
||||
)
|
||||
|
||||
async def _mark_room_as_direct_message(
|
||||
self, user_id: str, dm_user_id: str, room_id: str
|
||||
) -> None:
|
||||
"""
|
||||
Marks a room (`room_id`) as a direct message with the counterparty `dm_user_id`
|
||||
from the perspective of the user `user_id`.
|
||||
|
||||
Args:
|
||||
user_id: the user for whom the membership is changing
|
||||
dm_user_id: the user performing the membership change
|
||||
room_id: room id of the room the user is invited to
|
||||
"""
|
||||
|
||||
# This is a dict of User IDs to tuples of Room IDs
|
||||
# (get_global will return a frozendict of tuples as it freezes the data,
|
||||
# but we should accept either frozen or unfrozen variants.)
|
||||
# Be careful: we convert the outer frozendict into a dict here,
|
||||
# but the contents of the dict are still frozen (tuples in lieu of lists,
|
||||
# etc.)
|
||||
dm_map: Dict[str, Tuple[str, ...]] = dict(
|
||||
await self._api.account_data_manager.get_global(
|
||||
user_id, AccountDataTypes.DIRECT
|
||||
)
|
||||
or {}
|
||||
)
|
||||
|
||||
if dm_user_id not in dm_map:
|
||||
dm_map[dm_user_id] = (room_id,)
|
||||
else:
|
||||
dm_rooms_for_user = dm_map[dm_user_id]
|
||||
assert isinstance(dm_rooms_for_user, (tuple, list))
|
||||
|
||||
dm_map[dm_user_id] = tuple(dm_rooms_for_user) + (room_id,)
|
||||
|
||||
await self._api.account_data_manager.put_global(
|
||||
user_id, AccountDataTypes.DIRECT, dm_map
|
||||
)
|
||||
|
||||
async def _retry_make_join(
|
||||
self, sender: str, target: str, room_id: str, new_membership: str
|
||||
) -> None:
|
||||
"""
|
||||
A function to retry sending the `make_join` request with an increasing backoff. This is
|
||||
implemented to work around a race condition when receiving invites over federation.
|
||||
|
||||
Args:
|
||||
sender: the user performing the membership change
|
||||
target: the user for whom the membership is changing
|
||||
room_id: room id of the room to join to
|
||||
new_membership: the type of membership event (in this case will be "join")
|
||||
"""
|
||||
|
||||
sleep = 0
|
||||
retries = 0
|
||||
join_event = None
|
||||
|
||||
while retries < 5:
|
||||
try:
|
||||
await self._api.sleep(sleep)
|
||||
join_event = await self._api.update_room_membership(
|
||||
sender=sender,
|
||||
target=target,
|
||||
room_id=room_id,
|
||||
new_membership=new_membership,
|
||||
)
|
||||
except SynapseError as e:
|
||||
if e.code == HTTPStatus.FORBIDDEN:
|
||||
logger.debug(
|
||||
f"Update_room_membership was forbidden. This can sometimes be expected for remote invites. Exception: {e}"
|
||||
)
|
||||
else:
|
||||
logger.warn(
|
||||
f"Update_room_membership raised the following unexpected (SynapseError) exception: {e}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
f"Update_room_membership raised the following unexpected exception: {e}"
|
||||
)
|
||||
|
||||
sleep = 2**retries
|
||||
retries += 1
|
||||
|
||||
if join_event is not None:
|
||||
break
|
||||
@@ -49,7 +49,7 @@ from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.types import JsonDict, Requester
|
||||
|
||||
from . import EventBase
|
||||
from . import EventBase, make_event_from_dict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
@@ -82,17 +82,14 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
"""
|
||||
pruned_event_dict = prune_event_dict(event.room_version, event.get_dict())
|
||||
|
||||
from . import make_event_from_dict
|
||||
|
||||
pruned_event = make_event_from_dict(
|
||||
pruned_event_dict, event.room_version, event.internal_metadata.get_dict()
|
||||
)
|
||||
|
||||
# copy the internal fields
|
||||
# Copy the bits of `internal_metadata` that aren't returned by `get_dict`
|
||||
pruned_event.internal_metadata.stream_ordering = (
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
|
||||
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
# Mark the event as redacted
|
||||
@@ -101,6 +98,29 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
return pruned_event
|
||||
|
||||
|
||||
def clone_event(event: EventBase) -> EventBase:
|
||||
"""Take a copy of the event.
|
||||
|
||||
This is mostly useful because it does a *shallow* copy of the `unsigned` data,
|
||||
which means it can then be updated without corrupting the in-memory cache. Note that
|
||||
other properties of the event, such as `content`, are *not* (currently) copied here.
|
||||
"""
|
||||
# XXX: We rely on at least one of `event.get_dict()` and `make_event_from_dict()`
|
||||
# making a copy of `unsigned`. Currently, both do, though I don't really know why.
|
||||
# Still, as long as they do, there's not much point doing yet another copy here.
|
||||
new_event = make_event_from_dict(
|
||||
event.get_dict(), event.room_version, event.internal_metadata.get_dict()
|
||||
)
|
||||
|
||||
# Copy the bits of `internal_metadata` that aren't returned by `get_dict`.
|
||||
new_event.internal_metadata.stream_ordering = (
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
new_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
return new_event
|
||||
|
||||
|
||||
def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDict:
|
||||
"""Redacts the event_dict in the same way as `prune_event`, except it
|
||||
operates on dicts rather than event objects
|
||||
|
||||
@@ -47,9 +47,9 @@ from synapse.events.utils import (
|
||||
validate_canonicaljson,
|
||||
)
|
||||
from synapse.http.servlet import validate_json_object
|
||||
from synapse.rest.models import RequestBodyModel
|
||||
from synapse.storage.controllers.state import server_acl_evaluator_from_event
|
||||
from synapse.types import EventID, JsonDict, RoomID, StrCollection, UserID
|
||||
from synapse.types.rest import RequestBodyModel
|
||||
|
||||
|
||||
class EventValidator:
|
||||
|
||||
@@ -56,6 +56,7 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
@@ -1877,6 +1878,8 @@ class FederationClient(FederationBase):
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
try:
|
||||
return await self.transport_layer.download_media_v3(
|
||||
@@ -1885,6 +1888,8 @@ class FederationClient(FederationBase):
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
@@ -1905,6 +1910,8 @@ class FederationClient(FederationBase):
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -546,7 +546,25 @@ class FederationServer(FederationBase):
|
||||
edu_type=edu_dict["edu_type"],
|
||||
content=edu_dict["content"],
|
||||
)
|
||||
await self.registry.on_edu(edu.edu_type, origin, edu.content)
|
||||
try:
|
||||
await self.registry.on_edu(edu.edu_type, origin, edu.content)
|
||||
except Exception:
|
||||
# If there was an error handling the EDU, we must reject the
|
||||
# transaction.
|
||||
#
|
||||
# Some EDU types (notably, to-device messages) are, despite their name,
|
||||
# expected to be reliable; if we weren't able to do something with it,
|
||||
# we have to tell the sender that, and the only way the protocol gives
|
||||
# us to do so is by sending an HTTP error back on the transaction.
|
||||
#
|
||||
# We log the exception now, and then raise a new SynapseError to cause
|
||||
# the transaction to be failed.
|
||||
logger.exception("Error handling EDU of type %s", edu.edu_type)
|
||||
raise SynapseError(500, f"Error handing EDU of type {edu.edu_type}")
|
||||
|
||||
# TODO: if the first EDU fails, we should probably abort the whole
|
||||
# thing rather than carrying on with the rest of them. That would
|
||||
# probably be best done inside `concurrently_execute`.
|
||||
|
||||
await concurrently_execute(
|
||||
_process_edu,
|
||||
@@ -656,7 +674,7 @@ class FederationServer(FederationBase):
|
||||
# This is in addition to the HS-level rate limiting applied by
|
||||
# BaseFederationServlet.
|
||||
# type-ignore: mypy doesn't seem able to deduce the type of the limiter(!?)
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit(
|
||||
requester=None,
|
||||
key=room_id,
|
||||
update=False,
|
||||
@@ -699,7 +717,7 @@ class FederationServer(FederationBase):
|
||||
SynapseTags.SEND_JOIN_RESPONSE_IS_PARTIAL_STATE,
|
||||
caller_supports_partial_state,
|
||||
)
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit(
|
||||
requester=None,
|
||||
key=room_id,
|
||||
update=False,
|
||||
@@ -1327,6 +1345,7 @@ class FederationServer(FederationBase):
|
||||
Raises:
|
||||
AuthError if the server does not match the ACL
|
||||
"""
|
||||
# Test.
|
||||
server_acl_evaluator = (
|
||||
await self._storage_controllers.state.get_server_acl_for_room(room_id)
|
||||
)
|
||||
@@ -1414,12 +1433,7 @@ class FederationHandlerRegistry:
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
with start_active_span_from_edu(content, "handle_edu"):
|
||||
try:
|
||||
await handler(origin, content)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception:
|
||||
logger.exception("Failed to handle edu %r", edu_type)
|
||||
await handler(origin, content)
|
||||
return
|
||||
|
||||
# Check if we can route it somewhere else that isn't us
|
||||
@@ -1428,17 +1442,12 @@ class FederationHandlerRegistry:
|
||||
# Pick an instance randomly so that we don't overload one.
|
||||
route_to = random.choice(instances)
|
||||
|
||||
try:
|
||||
await self._send_edu(
|
||||
instance_name=route_to,
|
||||
edu_type=edu_type,
|
||||
origin=origin,
|
||||
content=content,
|
||||
)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception:
|
||||
logger.exception("Failed to handle edu %r", edu_type)
|
||||
await self._send_edu(
|
||||
instance_name=route_to,
|
||||
edu_type=edu_type,
|
||||
origin=origin,
|
||||
content=content,
|
||||
)
|
||||
return
|
||||
|
||||
# Oh well, let's just log and move on.
|
||||
|
||||
@@ -43,6 +43,7 @@ import ijson
|
||||
|
||||
from synapse.api.constants import Direction, Membership
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_UNSTABLE_PREFIX,
|
||||
@@ -819,6 +820,8 @@ class TransportLayerClient:
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/r0/download/{destination}/{media_id}"
|
||||
|
||||
@@ -834,6 +837,8 @@ class TransportLayerClient:
|
||||
"allow_remote": "false",
|
||||
"timeout_ms": str(max_timeout_ms),
|
||||
},
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def download_media_v3(
|
||||
@@ -843,6 +848,8 @@ class TransportLayerClient:
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/v3/download/{destination}/{media_id}"
|
||||
|
||||
@@ -862,6 +869,8 @@ class TransportLayerClient:
|
||||
"allow_redirect": "true",
|
||||
},
|
||||
follow_redirects=True,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Type
|
||||
|
||||
@@ -33,6 +34,7 @@ from synapse.federation.transport.server.federation import (
|
||||
FEDERATION_SERVLET_CLASSES,
|
||||
FederationAccountStatusServlet,
|
||||
FederationUnstableClientKeysClaimServlet,
|
||||
FederationUnstableMediaDownloadServlet,
|
||||
)
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
from synapse.http.servlet import (
|
||||
@@ -315,6 +317,28 @@ def register_servlets(
|
||||
):
|
||||
continue
|
||||
|
||||
if servletclass == FederationUnstableMediaDownloadServlet:
|
||||
if (
|
||||
not hs.config.server.enable_media_repo
|
||||
or not hs.config.experimental.msc3916_authenticated_media_enabled
|
||||
):
|
||||
continue
|
||||
|
||||
# don't load the endpoint if the storage provider is incompatible
|
||||
media_repo = hs.get_media_repository()
|
||||
load_download_endpoint = True
|
||||
for provider in media_repo.media_storage.storage_providers:
|
||||
signature = inspect.signature(provider.backend.fetch)
|
||||
if "federation" not in signature.parameters:
|
||||
logger.warning(
|
||||
f"Federation media `/download` endpoint will not be enabled as storage provider {provider.backend} is not compatible with this endpoint."
|
||||
)
|
||||
load_download_endpoint = False
|
||||
break
|
||||
|
||||
if not load_download_endpoint:
|
||||
continue
|
||||
|
||||
servletclass(
|
||||
hs=hs,
|
||||
authenticator=authenticator,
|
||||
|
||||
@@ -180,7 +180,11 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str, Optional[str
|
||||
"""
|
||||
try:
|
||||
header_str = header_bytes.decode("utf-8")
|
||||
params = re.split(" +", header_str)[1].split(",")
|
||||
space_or_tab = "[ \t]"
|
||||
params = re.split(
|
||||
rf"{space_or_tab}*,{space_or_tab}*",
|
||||
re.split(r"^X-Matrix +", header_str, maxsplit=1)[1],
|
||||
)
|
||||
param_dict: Dict[str, str] = {
|
||||
k.lower(): v for k, v in [param.split("=", maxsplit=1) for param in params]
|
||||
}
|
||||
@@ -356,13 +360,29 @@ class BaseFederationServlet:
|
||||
"request"
|
||||
)
|
||||
return None
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationUnstableMediaDownloadServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationUnstableMediaDownloadServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
finally:
|
||||
# if we used the origin's context as the parent, add a new span using
|
||||
# the servlet span as a parent, so that we have a link
|
||||
|
||||
@@ -44,10 +44,13 @@ from synapse.federation.transport.server._base import (
|
||||
)
|
||||
from synapse.http.servlet import (
|
||||
parse_boolean_from_args,
|
||||
parse_integer,
|
||||
parse_integer_from_args,
|
||||
parse_string_from_args,
|
||||
parse_strings_from_args,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
@@ -787,6 +790,43 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
|
||||
return 200, {"account_statuses": statuses, "failures": failures}
|
||||
|
||||
|
||||
class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
|
||||
a multipart/form-data response consisting of a JSON object and the requested media
|
||||
item. This endpoint only returns local media.
|
||||
"""
|
||||
|
||||
PATH = "/media/download/(?P<media_id>[^/]*)"
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3916"
|
||||
RATELIMIT = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
ratelimiter: FederationRateLimiter,
|
||||
authenticator: Authenticator,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.media_repo = self.hs.get_media_repository()
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: Optional[str],
|
||||
content: Literal[None],
|
||||
request: SynapseRequest,
|
||||
media_id: str,
|
||||
) -> None:
|
||||
max_timeout_ms = parse_integer(
|
||||
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
|
||||
)
|
||||
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
|
||||
await self.media_repo.get_local_media(
|
||||
request, media_id, None, max_timeout_ms, federation=True
|
||||
)
|
||||
|
||||
|
||||
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationSendServlet,
|
||||
FederationEventServlet,
|
||||
@@ -818,4 +858,5 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationV1SendKnockServlet,
|
||||
FederationMakeKnockServlet,
|
||||
FederationAccountStatusServlet,
|
||||
FederationUnstableMediaDownloadServlet,
|
||||
)
|
||||
|
||||
@@ -42,6 +42,7 @@ class AdminHandler:
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._state_storage_controller = self._storage_controllers.state
|
||||
self._hs_config = hs.config
|
||||
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
|
||||
|
||||
async def get_whois(self, user: UserID) -> JsonMapping:
|
||||
@@ -125,13 +126,7 @@ class AdminHandler:
|
||||
# Get all rooms the user is in or has been in
|
||||
rooms = await self._store.get_rooms_for_local_user_where_membership_is(
|
||||
user_id,
|
||||
membership_list=(
|
||||
Membership.JOIN,
|
||||
Membership.LEAVE,
|
||||
Membership.BAN,
|
||||
Membership.INVITE,
|
||||
Membership.KNOCK,
|
||||
),
|
||||
membership_list=Membership.LIST,
|
||||
)
|
||||
|
||||
# We only try and fetch events for rooms the user has been in. If
|
||||
@@ -178,7 +173,7 @@ class AdminHandler:
|
||||
if room.membership == Membership.JOIN:
|
||||
stream_ordering = self._store.get_room_max_stream_ordering()
|
||||
else:
|
||||
stream_ordering = room.stream_ordering
|
||||
stream_ordering = room.event_pos.stream
|
||||
|
||||
from_key = RoomStreamToken(topological=0, stream=0)
|
||||
to_key = RoomStreamToken(stream=stream_ordering)
|
||||
@@ -217,7 +212,10 @@ class AdminHandler:
|
||||
)
|
||||
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers, user_id, events
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
msc4115_membership_on_events=self._hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
writer.write_events(room_id, events)
|
||||
|
||||
@@ -78,6 +78,8 @@ class CasHandler:
|
||||
self._cas_displayname_attribute = hs.config.cas.cas_displayname_attribute
|
||||
self._cas_required_attributes = hs.config.cas.cas_required_attributes
|
||||
self._cas_enable_registration = hs.config.cas.cas_enable_registration
|
||||
self._cas_allow_numeric_ids = hs.config.cas.cas_allow_numeric_ids
|
||||
self._cas_numeric_ids_prefix = hs.config.cas.cas_numeric_ids_prefix
|
||||
|
||||
self._http_client = hs.get_proxied_http_client()
|
||||
|
||||
@@ -188,6 +190,9 @@ class CasHandler:
|
||||
for child in root[0]:
|
||||
if child.tag.endswith("user"):
|
||||
user = child.text
|
||||
# if numeric user IDs are allowed and username is numeric then we add the prefix so Synapse can handle it
|
||||
if self._cas_allow_numeric_ids and user is not None and user.isdigit():
|
||||
user = f"{self._cas_numeric_ids_prefix}{user}"
|
||||
if child.tag.endswith("attributes"):
|
||||
for attribute in child:
|
||||
# ElementTree library expands the namespace in
|
||||
|
||||
@@ -159,20 +159,32 @@ class DeviceWorkerHandler:
|
||||
|
||||
@cancellable
|
||||
async def get_device_changes_in_shared_rooms(
|
||||
self, user_id: str, room_ids: StrCollection, from_token: StreamToken
|
||||
self,
|
||||
user_id: str,
|
||||
room_ids: StrCollection,
|
||||
from_token: StreamToken,
|
||||
now_token: Optional[StreamToken] = None,
|
||||
) -> Set[str]:
|
||||
"""Get the set of users whose devices have changed who share a room with
|
||||
the given user.
|
||||
"""
|
||||
now_device_lists_key = self.store.get_device_stream_token()
|
||||
if now_token:
|
||||
now_device_lists_key = now_token.device_list_key
|
||||
|
||||
changed_users = await self.store.get_device_list_changes_in_rooms(
|
||||
room_ids, from_token.device_list_key
|
||||
room_ids,
|
||||
from_token.device_list_key,
|
||||
now_device_lists_key,
|
||||
)
|
||||
|
||||
if changed_users is not None:
|
||||
# We also check if the given user has changed their device. If
|
||||
# they're in no rooms then the above query won't include them.
|
||||
changed = await self.store.get_users_whose_devices_changed(
|
||||
from_token.device_list_key, [user_id]
|
||||
from_token.device_list_key,
|
||||
[user_id],
|
||||
to_key=now_device_lists_key,
|
||||
)
|
||||
changed_users.update(changed)
|
||||
return changed_users
|
||||
@@ -190,7 +202,9 @@ class DeviceWorkerHandler:
|
||||
tracked_users.add(user_id)
|
||||
|
||||
changed = await self.store.get_users_whose_devices_changed(
|
||||
from_token.device_list_key, tracked_users
|
||||
from_token.device_list_key,
|
||||
tracked_users,
|
||||
to_key=now_device_lists_key,
|
||||
)
|
||||
|
||||
return changed
|
||||
@@ -892,6 +906,13 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
context=opentracing_context,
|
||||
)
|
||||
|
||||
await self.store.mark_redundant_device_lists_pokes(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
room_id=room_id,
|
||||
converted_upto_stream_id=stream_id,
|
||||
)
|
||||
|
||||
# Notify replication that we've updated the device list stream.
|
||||
self.notifier.notify_replication()
|
||||
|
||||
|
||||
@@ -104,6 +104,9 @@ class DeviceMessageHandler:
|
||||
"""
|
||||
Handle receiving to-device messages from remote homeservers.
|
||||
|
||||
Note that any errors thrown from this method will cause the federation /send
|
||||
request to receive an error response.
|
||||
|
||||
Args:
|
||||
origin: The remote homeserver.
|
||||
content: The JSON dictionary containing the to-device messages.
|
||||
@@ -233,6 +236,13 @@ class DeviceMessageHandler:
|
||||
local_messages = {}
|
||||
remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
|
||||
for user_id, by_device in messages.items():
|
||||
if not UserID.is_valid(user_id):
|
||||
logger.warning(
|
||||
"Ignoring attempt to send device message to invalid user: %r",
|
||||
user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
# add an opentracing log entry for each message
|
||||
for device_id, message_content in by_device.items():
|
||||
log_kv(
|
||||
|
||||
@@ -35,6 +35,7 @@ from synapse.api.errors import CodeMessageException, Codes, NotFoundError, Synap
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
|
||||
from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
@@ -45,7 +46,10 @@ from synapse.types import (
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.util.retryutils import (
|
||||
NotRetryingDestination,
|
||||
filter_destinations_by_retry_limiter,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -53,6 +57,9 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock"
|
||||
|
||||
|
||||
class E2eKeysHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.config = hs.config
|
||||
@@ -62,6 +69,7 @@ class E2eKeysHandler:
|
||||
self._appservice_handler = hs.get_application_service_handler()
|
||||
self.is_mine = hs.is_mine
|
||||
self.clock = hs.get_clock()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
@@ -82,6 +90,12 @@ class E2eKeysHandler:
|
||||
edu_updater.incoming_signing_key_update,
|
||||
)
|
||||
|
||||
self.device_key_uploader = self.upload_device_keys_for_user
|
||||
else:
|
||||
self.device_key_uploader = (
|
||||
ReplicationUploadKeysForUserRestServlet.make_client(hs)
|
||||
)
|
||||
|
||||
# doesn't really work as part of the generic query API, because the
|
||||
# query request requires an object POST, but we abuse the
|
||||
# "query handler" interface.
|
||||
@@ -145,6 +159,11 @@ class E2eKeysHandler:
|
||||
remote_queries = {}
|
||||
|
||||
for user_id, device_ids in device_keys_query.items():
|
||||
if not UserID.is_valid(user_id):
|
||||
# Ignore invalid user IDs, which is the same behaviour as if
|
||||
# the user existed but had no keys.
|
||||
continue
|
||||
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if self.is_mine(UserID.from_string(user_id)):
|
||||
local_query[user_id] = device_ids
|
||||
@@ -259,10 +278,8 @@ class E2eKeysHandler:
|
||||
"%d destinations to query devices for", len(remote_queries_not_in_cache)
|
||||
)
|
||||
|
||||
async def _query(
|
||||
destination_queries: Tuple[str, Dict[str, Iterable[str]]]
|
||||
) -> None:
|
||||
destination, queries = destination_queries
|
||||
async def _query(destination: str) -> None:
|
||||
queries = remote_queries_not_in_cache[destination]
|
||||
return await self._query_devices_for_destination(
|
||||
results,
|
||||
cross_signing_keys,
|
||||
@@ -272,9 +289,20 @@ class E2eKeysHandler:
|
||||
timeout,
|
||||
)
|
||||
|
||||
# Only try and fetch keys for destinations that are not marked as
|
||||
# down.
|
||||
filtered_destinations = await filter_destinations_by_retry_limiter(
|
||||
remote_queries_not_in_cache.keys(),
|
||||
self.clock,
|
||||
self.store,
|
||||
# Let's give an arbitrary grace period for those hosts that are
|
||||
# only recently down
|
||||
retry_due_within_ms=60 * 1000,
|
||||
)
|
||||
|
||||
await concurrently_execute(
|
||||
_query,
|
||||
remote_queries_not_in_cache.items(),
|
||||
filtered_destinations,
|
||||
10,
|
||||
delay_cancellation=True,
|
||||
)
|
||||
@@ -775,36 +803,17 @@ class E2eKeysHandler:
|
||||
"one_time_keys": A mapping from algorithm to number of keys for that
|
||||
algorithm, including those previously persisted.
|
||||
"""
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
device_keys = keys.get("device_keys", None)
|
||||
if device_keys:
|
||||
logger.info(
|
||||
"Updating device_keys for device %r for user %s at %d",
|
||||
device_id,
|
||||
user_id,
|
||||
time_now,
|
||||
await self.device_key_uploader(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
keys={"device_keys": device_keys},
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating device_keys for user.",
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
}
|
||||
)
|
||||
# TODO: Sign the JSON with the server key
|
||||
changed = await self.store.set_e2e_device_keys(
|
||||
user_id, device_id, time_now, device_keys
|
||||
)
|
||||
if changed:
|
||||
# Only notify about device updates *if* the keys actually changed
|
||||
await self.device_handler.notify_device_update(user_id, [device_id])
|
||||
else:
|
||||
log_kv({"message": "Not updating device_keys for user", "user_id": user_id})
|
||||
|
||||
one_time_keys = keys.get("one_time_keys", None)
|
||||
if one_time_keys:
|
||||
log_kv(
|
||||
@@ -840,6 +849,49 @@ class E2eKeysHandler:
|
||||
{"message": "Did not update fallback_keys", "reason": "no keys given"}
|
||||
)
|
||||
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
|
||||
set_tag("one_time_key_counts", str(result))
|
||||
return {"one_time_key_counts": result}
|
||||
|
||||
@tag_args
|
||||
async def upload_device_keys_for_user(
|
||||
self, user_id: str, device_id: str, keys: JsonDict
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
user_id: user whose keys are being uploaded.
|
||||
device_id: device whose keys are being uploaded.
|
||||
device_keys: the `device_keys` of an /keys/upload request.
|
||||
|
||||
"""
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
device_keys = keys["device_keys"]
|
||||
logger.info(
|
||||
"Updating device_keys for device %r for user %s at %d",
|
||||
device_id,
|
||||
user_id,
|
||||
time_now,
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating device_keys for user.",
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
}
|
||||
)
|
||||
# TODO: Sign the JSON with the server key
|
||||
changed = await self.store.set_e2e_device_keys(
|
||||
user_id, device_id, time_now, device_keys
|
||||
)
|
||||
if changed:
|
||||
# Only notify about device updates *if* the keys actually changed
|
||||
await self.device_handler.notify_device_update(user_id, [device_id])
|
||||
|
||||
# the device should have been registered already, but it may have been
|
||||
# deleted due to a race with a DELETE request. Or we may be using an
|
||||
# old access_token without an associated device_id. Either way, we
|
||||
@@ -847,53 +899,56 @@ class E2eKeysHandler:
|
||||
# keys without a corresponding device.
|
||||
await self.device_handler.check_device_registered(user_id, device_id)
|
||||
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
|
||||
set_tag("one_time_key_counts", str(result))
|
||||
return {"one_time_key_counts": result}
|
||||
|
||||
async def _upload_one_time_keys_for_user(
|
||||
self, user_id: str, device_id: str, time_now: int, one_time_keys: JsonDict
|
||||
) -> None:
|
||||
logger.info(
|
||||
"Adding one_time_keys %r for device %r for user %r at %d",
|
||||
one_time_keys.keys(),
|
||||
device_id,
|
||||
user_id,
|
||||
time_now,
|
||||
)
|
||||
# We take out a lock so that we don't have to worry about a client
|
||||
# sending duplicate requests.
|
||||
lock_key = f"{user_id}_{device_id}"
|
||||
async with self._worker_lock_handler.acquire_lock(
|
||||
ONE_TIME_KEY_UPLOAD, lock_key
|
||||
):
|
||||
logger.info(
|
||||
"Adding one_time_keys %r for device %r for user %r at %d",
|
||||
one_time_keys.keys(),
|
||||
device_id,
|
||||
user_id,
|
||||
time_now,
|
||||
)
|
||||
|
||||
# make a list of (alg, id, key) tuples
|
||||
key_list = []
|
||||
for key_id, key_obj in one_time_keys.items():
|
||||
algorithm, key_id = key_id.split(":")
|
||||
key_list.append((algorithm, key_id, key_obj))
|
||||
# make a list of (alg, id, key) tuples
|
||||
key_list = []
|
||||
for key_id, key_obj in one_time_keys.items():
|
||||
algorithm, key_id = key_id.split(":")
|
||||
key_list.append((algorithm, key_id, key_obj))
|
||||
|
||||
# First we check if we have already persisted any of the keys.
|
||||
existing_key_map = await self.store.get_e2e_one_time_keys(
|
||||
user_id, device_id, [k_id for _, k_id, _ in key_list]
|
||||
)
|
||||
# First we check if we have already persisted any of the keys.
|
||||
existing_key_map = await self.store.get_e2e_one_time_keys(
|
||||
user_id, device_id, [k_id for _, k_id, _ in key_list]
|
||||
)
|
||||
|
||||
new_keys = [] # Keys that we need to insert. (alg, id, json) tuples.
|
||||
for algorithm, key_id, key in key_list:
|
||||
ex_json = existing_key_map.get((algorithm, key_id), None)
|
||||
if ex_json:
|
||||
if not _one_time_keys_match(ex_json, key):
|
||||
raise SynapseError(
|
||||
400,
|
||||
(
|
||||
"One time key %s:%s already exists. "
|
||||
"Old key: %s; new key: %r"
|
||||
new_keys = [] # Keys that we need to insert. (alg, id, json) tuples.
|
||||
for algorithm, key_id, key in key_list:
|
||||
ex_json = existing_key_map.get((algorithm, key_id), None)
|
||||
if ex_json:
|
||||
if not _one_time_keys_match(ex_json, key):
|
||||
raise SynapseError(
|
||||
400,
|
||||
(
|
||||
"One time key %s:%s already exists. "
|
||||
"Old key: %s; new key: %r"
|
||||
)
|
||||
% (algorithm, key_id, ex_json, key),
|
||||
)
|
||||
% (algorithm, key_id, ex_json, key),
|
||||
else:
|
||||
new_keys.append(
|
||||
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
|
||||
)
|
||||
else:
|
||||
new_keys.append(
|
||||
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
|
||||
)
|
||||
|
||||
log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
|
||||
await self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)
|
||||
log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
|
||||
await self.store.add_e2e_one_time_keys(
|
||||
user_id, device_id, time_now, new_keys
|
||||
)
|
||||
|
||||
async def upload_signing_keys_for_user(
|
||||
self, user_id: str, keys: JsonDict
|
||||
|
||||
@@ -247,6 +247,12 @@ class E2eRoomKeysHandler:
|
||||
if current_room_key:
|
||||
if self._should_replace_room_key(current_room_key, room_key):
|
||||
log_kv({"message": "Replacing room key."})
|
||||
logger.debug(
|
||||
"Replacing room key. room=%s session=%s user=%s",
|
||||
room_id,
|
||||
session_id,
|
||||
user_id,
|
||||
)
|
||||
# updates are done one at a time in the DB, so send
|
||||
# updates right away rather than batching them up,
|
||||
# like we do with the inserts
|
||||
@@ -256,6 +262,12 @@ class E2eRoomKeysHandler:
|
||||
changed = True
|
||||
else:
|
||||
log_kv({"message": "Not replacing room_key."})
|
||||
logger.debug(
|
||||
"Not replacing room key. room=%s session=%s user=%s",
|
||||
room_id,
|
||||
session_id,
|
||||
user_id,
|
||||
)
|
||||
else:
|
||||
log_kv(
|
||||
{
|
||||
@@ -265,6 +277,12 @@ class E2eRoomKeysHandler:
|
||||
}
|
||||
)
|
||||
log_kv({"message": "Replacing room key."})
|
||||
logger.debug(
|
||||
"Inserting new room key. room=%s session=%s user=%s",
|
||||
room_id,
|
||||
session_id,
|
||||
user_id,
|
||||
)
|
||||
to_insert.append((room_id, session_id, room_key))
|
||||
changed = True
|
||||
|
||||
|
||||
@@ -148,6 +148,7 @@ class EventHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._config = hs.config
|
||||
|
||||
async def get_event(
|
||||
self,
|
||||
@@ -189,7 +190,11 @@ class EventHandler:
|
||||
is_peeking = not is_user_in_room
|
||||
|
||||
filtered = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), [event], is_peeking=is_peeking
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
[event],
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
if not filtered:
|
||||
|
||||
@@ -199,7 +199,7 @@ class InitialSyncHandler:
|
||||
)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
room_end_token = RoomStreamToken(
|
||||
stream=event.stream_ordering,
|
||||
stream=event.event_pos.stream,
|
||||
)
|
||||
deferred_room_state = run_in_background(
|
||||
self._state_storage_controller.get_state_for_events,
|
||||
@@ -221,7 +221,10 @@ class InitialSyncHandler:
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
messages = await filter_events_for_client(
|
||||
self._storage_controllers, user_id, messages
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
messages,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
@@ -380,6 +383,7 @@ class InitialSyncHandler:
|
||||
requester.user.to_string(),
|
||||
messages,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
@@ -494,6 +498,7 @@ class InitialSyncHandler:
|
||||
requester.user.to_string(),
|
||||
messages,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
|
||||
@@ -496,13 +496,6 @@ class EventCreationHandler:
|
||||
|
||||
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
|
||||
|
||||
self.membership_types_to_include_profile_data_in = {
|
||||
Membership.JOIN,
|
||||
Membership.KNOCK,
|
||||
}
|
||||
if self.hs.config.server.include_profile_data_on_invite:
|
||||
self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
|
||||
|
||||
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
|
||||
self.send_events = ReplicationSendEventsRestServlet.make_client(hs)
|
||||
|
||||
@@ -594,8 +587,6 @@ class EventCreationHandler:
|
||||
Creates an FrozenEvent object, filling out auth_events, prev_events,
|
||||
etc.
|
||||
|
||||
Adds display names to Join membership events.
|
||||
|
||||
Args:
|
||||
requester
|
||||
event_dict: An entire event
|
||||
@@ -672,29 +663,6 @@ class EventCreationHandler:
|
||||
|
||||
self.validator.validate_builder(builder)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
target = UserID.from_string(builder.state_key)
|
||||
|
||||
if membership in self.membership_types_to_include_profile_data_in:
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.profile_handler
|
||||
content = builder.content
|
||||
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
displayname = await profile.get_displayname(target)
|
||||
if displayname is not None:
|
||||
content["displayname"] = displayname
|
||||
if "avatar_url" not in content:
|
||||
avatar_url = await profile.get_avatar_url(target)
|
||||
if avatar_url is not None:
|
||||
content["avatar_url"] = avatar_url
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information for %r: %s", target, e
|
||||
)
|
||||
|
||||
is_exempt = await self._is_exempt_from_privacy_policy(builder, requester)
|
||||
if require_consent and not is_exempt:
|
||||
await self.assert_accepted_privacy_policy(requester)
|
||||
|
||||
@@ -27,7 +27,6 @@ from synapse.api.constants import Direction, EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.room import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -41,6 +40,7 @@ from synapse.types import (
|
||||
StreamKeyType,
|
||||
TaskStatus,
|
||||
)
|
||||
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.async_helpers import ReadWriteLock
|
||||
from synapse.visibility import filter_events_for_client
|
||||
@@ -623,6 +623,7 @@ class PaginationHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
# if after the filter applied there are no more events
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#
|
||||
import logging
|
||||
import random
|
||||
from typing import TYPE_CHECKING, Optional, Union
|
||||
from typing import TYPE_CHECKING, List, Optional, Union
|
||||
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
@@ -64,8 +64,10 @@ class ProfileHandler:
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
self.request_ratelimiter = hs.get_request_ratelimiter()
|
||||
|
||||
self.max_avatar_size = hs.config.server.max_avatar_size
|
||||
self.allowed_avatar_mimetypes = hs.config.server.allowed_avatar_mimetypes
|
||||
self.max_avatar_size: Optional[int] = hs.config.server.max_avatar_size
|
||||
self.allowed_avatar_mimetypes: Optional[List[str]] = (
|
||||
hs.config.server.allowed_avatar_mimetypes
|
||||
)
|
||||
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
|
||||
@@ -337,6 +339,12 @@ class ProfileHandler:
|
||||
return False
|
||||
|
||||
if self.max_avatar_size:
|
||||
if media_info.media_length is None:
|
||||
logger.warning(
|
||||
"Forbidding avatar change to %s: unknown media size",
|
||||
mxc,
|
||||
)
|
||||
return False
|
||||
# Ensure avatar does not exceed max allowed avatar size
|
||||
if media_info.media_length > self.max_avatar_size:
|
||||
logger.warning(
|
||||
|
||||
@@ -590,7 +590,7 @@ class RegistrationHandler:
|
||||
# moving away from bare excepts is a good thing to do.
|
||||
logger.error("Failed to join new user to %r: %r", r, e)
|
||||
except Exception as e:
|
||||
logger.error("Failed to join new user to %r: %r", r, e)
|
||||
logger.error("Failed to join new user to %r: %r", r, e, exc_info=True)
|
||||
|
||||
async def _auto_join_rooms(self, user_id: str) -> None:
|
||||
"""Automatically joins users to auto join rooms - creating the room in the first place
|
||||
|
||||
@@ -95,6 +95,7 @@ class RelationsHandler:
|
||||
self._event_handler = hs.get_event_handler()
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self._event_creation_handler = hs.get_event_creation_handler()
|
||||
self._config = hs.config
|
||||
|
||||
async def get_relations(
|
||||
self,
|
||||
@@ -163,6 +164,7 @@ class RelationsHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
# The relations returned for the requested event do include their
|
||||
@@ -391,9 +393,9 @@ class RelationsHandler:
|
||||
|
||||
# Attempt to find another event to use as the latest event.
|
||||
potential_events, _ = await self._main_store.get_relations_for_event(
|
||||
room_id,
|
||||
event_id,
|
||||
event,
|
||||
room_id,
|
||||
RelationTypes.THREAD,
|
||||
direction=Direction.FORWARDS,
|
||||
)
|
||||
@@ -608,6 +610,7 @@ class RelationsHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
aggregations = await self.get_bundled_aggregations(
|
||||
|
||||
@@ -40,7 +40,6 @@ from typing import (
|
||||
)
|
||||
|
||||
import attr
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
import synapse.events.snapshot
|
||||
from synapse.api.constants import (
|
||||
@@ -88,6 +87,7 @@ from synapse.types import (
|
||||
UserID,
|
||||
create_requester,
|
||||
)
|
||||
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
@@ -1476,6 +1476,7 @@ class RoomContextHandler:
|
||||
user.to_string(),
|
||||
events,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
event = await self.store.get_event(
|
||||
@@ -1779,63 +1780,6 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
|
||||
return self.store.get_current_room_stream_token_for_room_id(room_id)
|
||||
|
||||
|
||||
class ShutdownRoomParams(TypedDict):
|
||||
"""
|
||||
Attributes:
|
||||
requester_user_id:
|
||||
User who requested the action. Will be recorded as putting the room on the
|
||||
blocking list.
|
||||
new_room_user_id:
|
||||
If set, a new room will be created with this user ID
|
||||
as the creator and admin, and all users in the old room will be
|
||||
moved into that room. If not set, no new room will be created
|
||||
and the users will just be removed from the old room.
|
||||
new_room_name:
|
||||
A string representing the name of the room that new users will
|
||||
be invited to. Defaults to `Content Violation Notification`
|
||||
message:
|
||||
A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly
|
||||
convey why the original room was shut down.
|
||||
Defaults to `Sharing illegal content on this server is not
|
||||
permitted and rooms in violation will be blocked.`
|
||||
block:
|
||||
If set to `true`, this room will be added to a blocking list,
|
||||
preventing future attempts to join the room. Defaults to `false`.
|
||||
purge:
|
||||
If set to `true`, purge the given room from the database.
|
||||
force_purge:
|
||||
If set to `true`, the room will be purged from database
|
||||
even if there are still users joined to the room.
|
||||
"""
|
||||
|
||||
requester_user_id: Optional[str]
|
||||
new_room_user_id: Optional[str]
|
||||
new_room_name: Optional[str]
|
||||
message: Optional[str]
|
||||
block: bool
|
||||
purge: bool
|
||||
force_purge: bool
|
||||
|
||||
|
||||
class ShutdownRoomResponse(TypedDict):
|
||||
"""
|
||||
Attributes:
|
||||
kicked_users: An array of users (`user_id`) that were kicked.
|
||||
failed_to_kick_users:
|
||||
An array of users (`user_id`) that that were not kicked.
|
||||
local_aliases:
|
||||
An array of strings representing the local aliases that were
|
||||
migrated from the old room to the new.
|
||||
new_room_id: A string representing the room ID of the new room.
|
||||
"""
|
||||
|
||||
kicked_users: List[str]
|
||||
failed_to_kick_users: List[str]
|
||||
local_aliases: List[str]
|
||||
new_room_id: Optional[str]
|
||||
|
||||
|
||||
class RoomShutdownHandler:
|
||||
DEFAULT_MESSAGE = (
|
||||
"Sharing illegal content on this server is not permitted and rooms in"
|
||||
|
||||
@@ -106,6 +106,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
self.event_auth_handler = hs.get_event_auth_handler()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
self._membership_types_to_include_profile_data_in = {
|
||||
Membership.JOIN,
|
||||
Membership.KNOCK,
|
||||
}
|
||||
if self.hs.config.server.include_profile_data_on_invite:
|
||||
self._membership_types_to_include_profile_data_in.add(Membership.INVITE)
|
||||
|
||||
self.member_linearizer: Linearizer = Linearizer(name="member")
|
||||
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
|
||||
|
||||
@@ -752,12 +759,41 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
and requester.user.to_string() == self._server_notices_mxid
|
||||
)
|
||||
|
||||
requester_suspended = await self.store.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
if action == Membership.INVITE and requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Sending invites while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
if target.to_string() != requester.user.to_string():
|
||||
target_suspended = await self.store.get_user_suspended_status(
|
||||
target.to_string()
|
||||
)
|
||||
else:
|
||||
target_suspended = requester_suspended
|
||||
|
||||
if action == Membership.JOIN and target_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Joining rooms while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
if action == Membership.KNOCK and target_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Knocking on rooms while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
if (
|
||||
not self.allow_per_room_profiles and not is_requester_server_notices_user
|
||||
) or requester.shadow_banned:
|
||||
# Strip profile data, knowing that new profile data will be added to the
|
||||
# event's content in event_creation_handler.create_event() using the target's
|
||||
# global profile.
|
||||
# Strip profile data, knowing that new profile data will be added to
|
||||
# the event's content below using the target's global profile.
|
||||
content.pop("displayname", None)
|
||||
content.pop("avatar_url", None)
|
||||
|
||||
@@ -793,6 +829,29 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
if action in ["kick", "unban"]:
|
||||
effective_membership_state = "leave"
|
||||
|
||||
if effective_membership_state not in Membership.LIST:
|
||||
raise SynapseError(400, "Invalid membership key")
|
||||
|
||||
# Add profile data for joins etc, if no per-room profile.
|
||||
if (
|
||||
effective_membership_state
|
||||
in self._membership_types_to_include_profile_data_in
|
||||
):
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.profile_handler
|
||||
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
displayname = await profile.get_displayname(target)
|
||||
if displayname is not None:
|
||||
content["displayname"] = displayname
|
||||
if "avatar_url" not in content:
|
||||
avatar_url = await profile.get_avatar_url(target)
|
||||
if avatar_url is not None:
|
||||
content["avatar_url"] = avatar_url
|
||||
except Exception as e:
|
||||
logger.info("Failed to get profile information for %r: %s", target, e)
|
||||
|
||||
# if this is a join with a 3pid signature, we may need to turn a 3pid
|
||||
# invite into a normal invite before we can handle the join.
|
||||
if third_party_signed is not None:
|
||||
|
||||
@@ -480,7 +480,10 @@ class SearchHandler:
|
||||
filtered_events = await search_filter.filter([r["event"] for r in results])
|
||||
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), filtered_events
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
filtered_events,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
events.sort(key=lambda e: -rank_map[e.event_id])
|
||||
@@ -579,7 +582,10 @@ class SearchHandler:
|
||||
filtered_events = await search_filter.filter([r["event"] for r in results])
|
||||
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), filtered_events
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
filtered_events,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
room_events.extend(events)
|
||||
@@ -664,11 +670,17 @@ class SearchHandler:
|
||||
)
|
||||
|
||||
events_before = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), res.events_before
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
res.events_before,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
events_after = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), res.events_after
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
res.events_after,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
context: JsonDict = {
|
||||
|
||||
441
synapse/handlers/sliding_sync.py
Normal file
441
synapse/handlers/sliding_sync.py
Normal file
@@ -0,0 +1,441 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
# Originally licensed under the Apache License, Version 2.0:
|
||||
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
||||
#
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, AbstractSet, Dict, List, Optional
|
||||
|
||||
from immutabledict import immutabledict
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import Requester, RoomStreamToken, StreamToken, UserID
|
||||
from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def filter_membership_for_sync(*, membership: str, user_id: str, sender: str) -> bool:
|
||||
"""
|
||||
Returns True if the membership event should be included in the sync response,
|
||||
otherwise False.
|
||||
|
||||
Attributes:
|
||||
membership: The membership state of the user in the room.
|
||||
user_id: The user ID that the membership applies to
|
||||
sender: The person who sent the membership event
|
||||
"""
|
||||
|
||||
# Everything except `Membership.LEAVE` because we want everything that's *still*
|
||||
# relevant to the user. There are few more things to include in the sync response
|
||||
# (newly_left) but those are handled separately.
|
||||
#
|
||||
# This logic includes kicks (leave events where the sender is not the same user) and
|
||||
# can be read as "anything that isn't a leave or a leave with a different sender".
|
||||
return membership != Membership.LEAVE or sender != user_id
|
||||
|
||||
|
||||
class SlidingSyncHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastores().main
|
||||
self.auth_blocking = hs.get_auth_blocking()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
|
||||
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SlidingSyncConfig,
|
||||
from_token: Optional[StreamToken] = None,
|
||||
timeout_ms: int = 0,
|
||||
) -> SlidingSyncResult:
|
||||
"""Get the sync for a client if we have new data for it now. Otherwise
|
||||
wait for new data to arrive on the server. If the timeout expires, then
|
||||
return an empty sync result.
|
||||
"""
|
||||
# If the user is not part of the mau group, then check that limits have
|
||||
# not been exceeded (if not part of the group by this point, almost certain
|
||||
# auth_blocking will occur)
|
||||
await self.auth_blocking.check_auth_blocking(requester=requester)
|
||||
|
||||
# TODO: If the To-Device extension is enabled and we have a `from_token`, delete
|
||||
# any to-device messages before that token (since we now know that the device
|
||||
# has received them). (see sync v2 for how to do this)
|
||||
|
||||
# If we're working with a user-provided token, we need to make sure to wait for
|
||||
# this worker to catch up with the token so we don't skip past any incoming
|
||||
# events or future events if the user is nefariously, manually modifying the
|
||||
# token.
|
||||
if from_token is not None:
|
||||
# We need to make sure this worker has caught up with the token. If
|
||||
# this returns false, it means we timed out waiting, and we should
|
||||
# just return an empty response.
|
||||
before_wait_ts = self.clock.time_msec()
|
||||
if not await self.notifier.wait_for_stream_token(from_token):
|
||||
logger.warning(
|
||||
"Timed out waiting for worker to catch up. Returning empty response"
|
||||
)
|
||||
return SlidingSyncResult.empty(from_token)
|
||||
|
||||
# If we've spent significant time waiting to catch up, take it off
|
||||
# the timeout.
|
||||
after_wait_ts = self.clock.time_msec()
|
||||
if after_wait_ts - before_wait_ts > 1_000:
|
||||
timeout_ms -= after_wait_ts - before_wait_ts
|
||||
timeout_ms = max(timeout_ms, 0)
|
||||
|
||||
# We're going to respond immediately if the timeout is 0 or if this is an
|
||||
# initial sync (without a `from_token`) so we can avoid calling
|
||||
# `notifier.wait_for_events()`.
|
||||
if timeout_ms == 0 or from_token is None:
|
||||
now_token = self.event_sources.get_current_token()
|
||||
result = await self.current_sync_for_user(
|
||||
sync_config,
|
||||
from_token=from_token,
|
||||
to_token=now_token,
|
||||
)
|
||||
else:
|
||||
# Otherwise, we wait for something to happen and report it to the user.
|
||||
async def current_sync_callback(
|
||||
before_token: StreamToken, after_token: StreamToken
|
||||
) -> SlidingSyncResult:
|
||||
return await self.current_sync_for_user(
|
||||
sync_config,
|
||||
from_token=from_token,
|
||||
to_token=after_token,
|
||||
)
|
||||
|
||||
result = await self.notifier.wait_for_events(
|
||||
sync_config.user.to_string(),
|
||||
timeout_ms,
|
||||
current_sync_callback,
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SlidingSyncConfig,
|
||||
to_token: StreamToken,
|
||||
from_token: Optional[StreamToken] = None,
|
||||
) -> SlidingSyncResult:
|
||||
"""
|
||||
Generates the response body of a Sliding Sync result, represented as a
|
||||
`SlidingSyncResult`.
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service:
|
||||
# We no longer support AS users using /sync directly.
|
||||
# See https://github.com/matrix-org/matrix-doc/issues/1144
|
||||
raise NotImplementedError()
|
||||
|
||||
# Get all of the room IDs that the user should be able to see in the sync
|
||||
# response
|
||||
room_id_set = await self.get_sync_room_ids_for_user(
|
||||
sync_config.user,
|
||||
from_token=from_token,
|
||||
to_token=to_token,
|
||||
)
|
||||
|
||||
# Assemble sliding window lists
|
||||
lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {}
|
||||
if sync_config.lists:
|
||||
for list_key, list_config in sync_config.lists.items():
|
||||
# TODO: Apply filters
|
||||
#
|
||||
# TODO: Exclude partially stated rooms unless the `required_state` has
|
||||
# `["m.room.member", "$LAZY"]`
|
||||
filtered_room_ids = room_id_set
|
||||
# TODO: Apply sorts
|
||||
sorted_room_ids = sorted(filtered_room_ids)
|
||||
|
||||
ops: List[SlidingSyncResult.SlidingWindowList.Operation] = []
|
||||
if list_config.ranges:
|
||||
for range in list_config.ranges:
|
||||
ops.append(
|
||||
SlidingSyncResult.SlidingWindowList.Operation(
|
||||
op=OperationType.SYNC,
|
||||
range=range,
|
||||
room_ids=sorted_room_ids[range[0] : range[1]],
|
||||
)
|
||||
)
|
||||
|
||||
lists[list_key] = SlidingSyncResult.SlidingWindowList(
|
||||
count=len(sorted_room_ids),
|
||||
ops=ops,
|
||||
)
|
||||
|
||||
return SlidingSyncResult(
|
||||
next_pos=to_token,
|
||||
lists=lists,
|
||||
# TODO: Gather room data for rooms in lists and `sync_config.room_subscriptions`
|
||||
rooms={},
|
||||
extensions={},
|
||||
)
|
||||
|
||||
async def get_sync_room_ids_for_user(
|
||||
self,
|
||||
user: UserID,
|
||||
to_token: StreamToken,
|
||||
from_token: Optional[StreamToken] = None,
|
||||
) -> AbstractSet[str]:
|
||||
"""
|
||||
Fetch room IDs that should be listed for this user in the sync response (the
|
||||
full room list that will be filtered, sorted, and sliced).
|
||||
|
||||
We're looking for rooms where the user has the following state in the token
|
||||
range (> `from_token` and <= `to_token`):
|
||||
|
||||
- `invite`, `join`, `knock`, `ban` membership events
|
||||
- Kicks (`leave` membership events where `sender` is different from the
|
||||
`user_id`/`state_key`)
|
||||
- `newly_left` (rooms that were left during the given token range)
|
||||
- In order for bans/kicks to not show up in sync, you need to `/forget` those
|
||||
rooms. This doesn't modify the event itself though and only adds the
|
||||
`forgotten` flag to the `room_memberships` table in Synapse. There isn't a way
|
||||
to tell when a room was forgotten at the moment so we can't factor it into the
|
||||
from/to range.
|
||||
"""
|
||||
user_id = user.to_string()
|
||||
|
||||
# First grab a current snapshot rooms for the user
|
||||
# (also handles forgotten rooms)
|
||||
room_for_user_list = await self.store.get_rooms_for_local_user_where_membership_is(
|
||||
user_id=user_id,
|
||||
# We want to fetch any kind of membership (joined and left rooms) in order
|
||||
# to get the `event_pos` of the latest room membership event for the
|
||||
# user.
|
||||
#
|
||||
# We will filter out the rooms that don't belong below (see
|
||||
# `filter_membership_for_sync`)
|
||||
membership_list=Membership.LIST,
|
||||
excluded_rooms=self.rooms_to_exclude_globally,
|
||||
)
|
||||
|
||||
# If the user has never joined any rooms before, we can just return an empty list
|
||||
if not room_for_user_list:
|
||||
return set()
|
||||
|
||||
# Our working list of rooms that can show up in the sync response
|
||||
sync_room_id_set = {
|
||||
room_for_user.room_id
|
||||
for room_for_user in room_for_user_list
|
||||
if filter_membership_for_sync(
|
||||
membership=room_for_user.membership,
|
||||
user_id=user_id,
|
||||
sender=room_for_user.sender,
|
||||
)
|
||||
}
|
||||
|
||||
# Get the `RoomStreamToken` that represents the spot we queried up to when we got
|
||||
# our membership snapshot from `get_rooms_for_local_user_where_membership_is()`.
|
||||
#
|
||||
# First, we need to get the max stream_ordering of each event persister instance
|
||||
# that we queried events from.
|
||||
instance_to_max_stream_ordering_map: Dict[str, int] = {}
|
||||
for room_for_user in room_for_user_list:
|
||||
instance_name = room_for_user.event_pos.instance_name
|
||||
stream_ordering = room_for_user.event_pos.stream
|
||||
|
||||
current_instance_max_stream_ordering = (
|
||||
instance_to_max_stream_ordering_map.get(instance_name)
|
||||
)
|
||||
if (
|
||||
current_instance_max_stream_ordering is None
|
||||
or stream_ordering > current_instance_max_stream_ordering
|
||||
):
|
||||
instance_to_max_stream_ordering_map[instance_name] = stream_ordering
|
||||
|
||||
# Then assemble the `RoomStreamToken`
|
||||
membership_snapshot_token = RoomStreamToken(
|
||||
# Minimum position in the `instance_map`
|
||||
stream=min(instance_to_max_stream_ordering_map.values()),
|
||||
instance_map=immutabledict(instance_to_max_stream_ordering_map),
|
||||
)
|
||||
|
||||
# If our `to_token` is already the same or ahead of the latest room membership
|
||||
# for the user, we can just straight-up return the room list (nothing has
|
||||
# changed)
|
||||
if membership_snapshot_token.is_before_or_eq(to_token.room_key):
|
||||
return sync_room_id_set
|
||||
|
||||
# Since we fetched the users room list at some point in time after the from/to
|
||||
# tokens, we need to revert/rewind some membership changes to match the point in
|
||||
# time of the `to_token`. In particular, we need to make these fixups:
|
||||
#
|
||||
# - 1a) Remove rooms that the user joined after the `to_token`
|
||||
# - 1b) Add back rooms that the user left after the `to_token`
|
||||
# - 2) Add back newly_left rooms (> `from_token` and <= `to_token`)
|
||||
#
|
||||
# Below, we're doing two separate lookups for membership changes. We could
|
||||
# request everything for both fixups in one range, [`from_token.room_key`,
|
||||
# `membership_snapshot_token`), but we want to avoid raw `stream_ordering`
|
||||
# comparison without `instance_name` (which is flawed). We could refactor
|
||||
# `event.internal_metadata` to include `instance_name` but it might turn out a
|
||||
# little difficult and a bigger, broader Synapse change than we want to make.
|
||||
|
||||
# 1) -----------------------------------------------------
|
||||
|
||||
# 1) Fetch membership changes that fall in the range from `to_token` up to
|
||||
# `membership_snapshot_token`
|
||||
membership_change_events_after_to_token = (
|
||||
await self.store.get_membership_changes_for_user(
|
||||
user_id,
|
||||
from_key=to_token.room_key,
|
||||
to_key=membership_snapshot_token,
|
||||
excluded_rooms=self.rooms_to_exclude_globally,
|
||||
)
|
||||
)
|
||||
|
||||
# 1) Assemble a list of the last membership events in some given ranges. Someone
|
||||
# could have left and joined multiple times during the given range but we only
|
||||
# care about end-result so we grab the last one.
|
||||
last_membership_change_by_room_id_after_to_token: Dict[str, EventBase] = {}
|
||||
# We also need the first membership event after the `to_token` so we can step
|
||||
# backward to the previous membership that would apply to the from/to range.
|
||||
first_membership_change_by_room_id_after_to_token: Dict[str, EventBase] = {}
|
||||
for event in membership_change_events_after_to_token:
|
||||
last_membership_change_by_room_id_after_to_token[event.room_id] = event
|
||||
# Only set if we haven't already set it
|
||||
first_membership_change_by_room_id_after_to_token.setdefault(
|
||||
event.room_id, event
|
||||
)
|
||||
|
||||
# 1) Fixup
|
||||
for (
|
||||
last_membership_change_after_to_token
|
||||
) in last_membership_change_by_room_id_after_to_token.values():
|
||||
room_id = last_membership_change_after_to_token.room_id
|
||||
|
||||
# We want to find the first membership change after the `to_token` then step
|
||||
# backward to know the membership in the from/to range.
|
||||
first_membership_change_after_to_token = (
|
||||
first_membership_change_by_room_id_after_to_token.get(room_id)
|
||||
)
|
||||
assert first_membership_change_after_to_token is not None, (
|
||||
"If there was a `last_membership_change_after_to_token` that we're iterating over, "
|
||||
+ "then there should be corresponding a first change. For example, even if there "
|
||||
+ "is only one event after the `to_token`, the first and last event will be same event. "
|
||||
+ "This is probably a mistake in assembling the `last_membership_change_by_room_id_after_to_token`"
|
||||
+ "/`first_membership_change_by_room_id_after_to_token` dicts above."
|
||||
)
|
||||
# TODO: Instead of reading from `unsigned`, refactor this to use the
|
||||
# `current_state_delta_stream` table in the future. Probably a new
|
||||
# `get_membership_changes_for_user()` function that uses
|
||||
# `current_state_delta_stream` with a join to `room_memberships`. This would
|
||||
# help in state reset scenarios since `prev_content` is looking at the
|
||||
# current branch vs the current room state. This is all just data given to
|
||||
# the client so no real harm to data integrity, but we'd like to be nice to
|
||||
# the client. Since the `current_state_delta_stream` table is new, it
|
||||
# doesn't have all events in it. Since this is Sliding Sync, if we ever need
|
||||
# to, we can signal the client to throw all of their state away by sending
|
||||
# "operation: RESET".
|
||||
prev_content = first_membership_change_after_to_token.unsigned.get(
|
||||
"prev_content", {}
|
||||
)
|
||||
prev_membership = prev_content.get("membership", None)
|
||||
prev_sender = first_membership_change_after_to_token.unsigned.get(
|
||||
"prev_sender", None
|
||||
)
|
||||
|
||||
# Check if the previous membership (membership that applies to the from/to
|
||||
# range) should be included in our `sync_room_id_set`
|
||||
should_prev_membership_be_included = (
|
||||
prev_membership is not None
|
||||
and prev_sender is not None
|
||||
and filter_membership_for_sync(
|
||||
membership=prev_membership,
|
||||
user_id=user_id,
|
||||
sender=prev_sender,
|
||||
)
|
||||
)
|
||||
|
||||
# Check if the last membership (membership that applies to our snapshot) was
|
||||
# already included in our `sync_room_id_set`
|
||||
was_last_membership_already_included = filter_membership_for_sync(
|
||||
membership=last_membership_change_after_to_token.membership,
|
||||
user_id=user_id,
|
||||
sender=last_membership_change_after_to_token.sender,
|
||||
)
|
||||
|
||||
# 1a) Add back rooms that the user left after the `to_token`
|
||||
#
|
||||
# For example, if the last membership event after the `to_token` is a leave
|
||||
# event, then the room was excluded from `sync_room_id_set` when we first
|
||||
# crafted it above. We should add these rooms back as long as the user also
|
||||
# was part of the room before the `to_token`.
|
||||
if (
|
||||
not was_last_membership_already_included
|
||||
and should_prev_membership_be_included
|
||||
):
|
||||
sync_room_id_set.add(room_id)
|
||||
# 1b) Remove rooms that the user joined (hasn't left) after the `to_token`
|
||||
#
|
||||
# For example, if the last membership event after the `to_token` is a "join"
|
||||
# event, then the room was included `sync_room_id_set` when we first crafted
|
||||
# it above. We should remove these rooms as long as the user also wasn't
|
||||
# part of the room before the `to_token`.
|
||||
elif (
|
||||
was_last_membership_already_included
|
||||
and not should_prev_membership_be_included
|
||||
):
|
||||
sync_room_id_set.discard(room_id)
|
||||
|
||||
# 2) -----------------------------------------------------
|
||||
# We fix-up newly_left rooms after the first fixup because it may have removed
|
||||
# some left rooms that we can figure out our newly_left in the following code
|
||||
|
||||
# 2) Fetch membership changes that fall in the range from `from_token` up to `to_token`
|
||||
membership_change_events_in_from_to_range = []
|
||||
if from_token:
|
||||
membership_change_events_in_from_to_range = (
|
||||
await self.store.get_membership_changes_for_user(
|
||||
user_id,
|
||||
from_key=from_token.room_key,
|
||||
to_key=to_token.room_key,
|
||||
excluded_rooms=self.rooms_to_exclude_globally,
|
||||
)
|
||||
)
|
||||
|
||||
# 2) Assemble a list of the last membership events in some given ranges. Someone
|
||||
# could have left and joined multiple times during the given range but we only
|
||||
# care about end-result so we grab the last one.
|
||||
last_membership_change_by_room_id_in_from_to_range: Dict[str, EventBase] = {}
|
||||
for event in membership_change_events_in_from_to_range:
|
||||
last_membership_change_by_room_id_in_from_to_range[event.room_id] = event
|
||||
|
||||
# 2) Fixup
|
||||
for (
|
||||
last_membership_change_in_from_to_range
|
||||
) in last_membership_change_by_room_id_in_from_to_range.values():
|
||||
room_id = last_membership_change_in_from_to_range.room_id
|
||||
|
||||
# 2) Add back newly_left rooms (> `from_token` and <= `to_token`). We
|
||||
# include newly_left rooms because the last event that the user should see
|
||||
# is their own leave event
|
||||
if last_membership_change_in_from_to_range.membership == Membership.LEAVE:
|
||||
sync_room_id_set.add(room_id)
|
||||
|
||||
return sync_room_id_set
|
||||
@@ -169,6 +169,7 @@ class UsernameMappingSession:
|
||||
# attributes returned by the ID mapper
|
||||
display_name: Optional[str]
|
||||
emails: StrCollection
|
||||
avatar_url: Optional[str]
|
||||
|
||||
# An optional dictionary of extra attributes to be provided to the client in the
|
||||
# login response.
|
||||
@@ -183,6 +184,7 @@ class UsernameMappingSession:
|
||||
# choices made by the user
|
||||
chosen_localpart: Optional[str] = None
|
||||
use_display_name: bool = True
|
||||
use_avatar: bool = True
|
||||
emails_to_use: StrCollection = ()
|
||||
terms_accepted_version: Optional[str] = None
|
||||
|
||||
@@ -660,6 +662,9 @@ class SsoHandler:
|
||||
remote_user_id=remote_user_id,
|
||||
display_name=attributes.display_name,
|
||||
emails=attributes.emails,
|
||||
avatar_url=attributes.picture,
|
||||
# Default to using all mapped emails. Will be overwritten in handle_submit_username_request.
|
||||
emails_to_use=attributes.emails,
|
||||
client_redirect_url=client_redirect_url,
|
||||
expiry_time_ms=now + self._MAPPING_SESSION_VALIDITY_PERIOD_MS,
|
||||
extra_login_attributes=extra_login_attributes,
|
||||
@@ -812,7 +817,7 @@ class SsoHandler:
|
||||
server_name = profile["avatar_url"].split("/")[-2]
|
||||
media_id = profile["avatar_url"].split("/")[-1]
|
||||
if self._is_mine_server_name(server_name):
|
||||
media = await self._media_repo.store.get_local_media(media_id)
|
||||
media = await self._media_repo.store.get_local_media(media_id) # type: ignore[has-type]
|
||||
if media is not None and upload_name == media.upload_name:
|
||||
logger.info("skipping saving the user avatar")
|
||||
return True
|
||||
@@ -966,6 +971,7 @@ class SsoHandler:
|
||||
session_id: str,
|
||||
localpart: str,
|
||||
use_display_name: bool,
|
||||
use_avatar: bool,
|
||||
emails_to_use: Iterable[str],
|
||||
) -> None:
|
||||
"""Handle a request to the username-picker 'submit' endpoint
|
||||
@@ -988,6 +994,7 @@ class SsoHandler:
|
||||
# update the session with the user's choices
|
||||
session.chosen_localpart = localpart
|
||||
session.use_display_name = use_display_name
|
||||
session.use_avatar = use_avatar
|
||||
|
||||
emails_from_idp = set(session.emails)
|
||||
filtered_emails: Set[str] = set()
|
||||
@@ -1068,6 +1075,9 @@ class SsoHandler:
|
||||
if session.use_display_name:
|
||||
attributes.display_name = session.display_name
|
||||
|
||||
if session.use_avatar:
|
||||
attributes.picture = session.avatar_url
|
||||
|
||||
# the following will raise a 400 error if the username has been taken in the
|
||||
# meantime.
|
||||
user_id = await self._register_mapped_user(
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#
|
||||
import itertools
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
@@ -27,11 +28,14 @@ from typing import (
|
||||
Dict,
|
||||
FrozenSet,
|
||||
List,
|
||||
Literal,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
import attr
|
||||
@@ -112,12 +116,30 @@ LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100
|
||||
SyncRequestKey = Tuple[Any, ...]
|
||||
|
||||
|
||||
class SyncVersion(Enum):
|
||||
"""
|
||||
Enum for specifying the version of sync request. This is used to key which type of
|
||||
sync response that we are generating.
|
||||
|
||||
This is different than the `sync_type` you might see used in other code below; which
|
||||
specifies the sub-type sync request (e.g. initial_sync, full_state_sync,
|
||||
incremental_sync) and is really only relevant for the `/sync` v2 endpoint.
|
||||
"""
|
||||
|
||||
# These string values are semantically significant because they are used in the the
|
||||
# metrics
|
||||
|
||||
# Traditional `/sync` endpoint
|
||||
SYNC_V2 = "sync_v2"
|
||||
# Part of MSC3575 Sliding Sync
|
||||
E2EE_SYNC = "e2ee_sync"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class SyncConfig:
|
||||
user: UserID
|
||||
filter_collection: FilterCollection
|
||||
is_guest: bool
|
||||
request_key: SyncRequestKey
|
||||
device_id: Optional[str]
|
||||
|
||||
|
||||
@@ -262,6 +284,47 @@ class SyncResult:
|
||||
or self.device_lists
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def empty(
|
||||
next_batch: StreamToken,
|
||||
device_one_time_keys_count: JsonMapping,
|
||||
device_unused_fallback_key_types: List[str],
|
||||
) -> "SyncResult":
|
||||
"Return a new empty result"
|
||||
return SyncResult(
|
||||
next_batch=next_batch,
|
||||
presence=[],
|
||||
account_data=[],
|
||||
joined=[],
|
||||
invited=[],
|
||||
knocked=[],
|
||||
archived=[],
|
||||
to_device=[],
|
||||
device_lists=DeviceListUpdates(),
|
||||
device_one_time_keys_count=device_one_time_keys_count,
|
||||
device_unused_fallback_key_types=device_unused_fallback_key_types,
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class E2eeSyncResult:
|
||||
"""
|
||||
Attributes:
|
||||
next_batch: Token for the next sync
|
||||
to_device: List of direct messages for the device.
|
||||
device_lists: List of user_ids whose devices have changed
|
||||
device_one_time_keys_count: Dict of algorithm to count for one time keys
|
||||
for this device
|
||||
device_unused_fallback_key_types: List of key types that have an unused fallback
|
||||
key
|
||||
"""
|
||||
|
||||
next_batch: StreamToken
|
||||
to_device: List[JsonDict]
|
||||
device_lists: DeviceListUpdates
|
||||
device_one_time_keys_count: JsonMapping
|
||||
device_unused_fallback_key_types: List[str]
|
||||
|
||||
|
||||
class SyncHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@@ -305,17 +368,68 @@ class SyncHandler:
|
||||
|
||||
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult:
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
"""Get the sync for a client if we have new data for it now. Otherwise
|
||||
wait for new data to arrive on the server. If the timeout expires, then
|
||||
return an empty sync result.
|
||||
|
||||
Args:
|
||||
requester: The user requesting the sync response.
|
||||
sync_config: Config/info necessary to process the sync request.
|
||||
sync_version: Determines what kind of sync response to generate.
|
||||
request_key: The key to use for caching the response.
|
||||
since_token: The point in the stream to sync from.
|
||||
timeout: How long to wait for new data to arrive before giving up.
|
||||
full_state: Whether to return the full state for each room.
|
||||
|
||||
Returns:
|
||||
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
|
||||
When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
|
||||
"""
|
||||
# If the user is not part of the mau group, then check that limits have
|
||||
# not been exceeded (if not part of the group by this point, almost certain
|
||||
@@ -324,9 +438,10 @@ class SyncHandler:
|
||||
await self.auth_blocking.check_auth_blocking(requester=requester)
|
||||
|
||||
res = await self.response_cache.wrap(
|
||||
sync_config.request_key,
|
||||
request_key,
|
||||
self._wait_for_sync_for_user,
|
||||
sync_config,
|
||||
sync_version,
|
||||
since_token,
|
||||
timeout,
|
||||
full_state,
|
||||
@@ -335,14 +450,48 @@ class SyncHandler:
|
||||
logger.debug("Returning sync response for %s", user_id)
|
||||
return res
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> SyncResult:
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
"""The start of the machinery that produces a /sync response.
|
||||
|
||||
See https://spec.matrix.org/v1.1/client-server-api/#syncing for full details.
|
||||
@@ -363,9 +512,50 @@ class SyncHandler:
|
||||
else:
|
||||
sync_type = "incremental_sync"
|
||||
|
||||
sync_label = f"{sync_version}:{sync_type}"
|
||||
|
||||
context = current_context()
|
||||
if context:
|
||||
context.tag = sync_type
|
||||
context.tag = sync_label
|
||||
|
||||
if since_token is not None:
|
||||
# We need to make sure this worker has caught up with the token. If
|
||||
# this returns false it means we timed out waiting, and we should
|
||||
# just return an empty response.
|
||||
start = self.clock.time_msec()
|
||||
if not await self.notifier.wait_for_stream_token(since_token):
|
||||
logger.warning(
|
||||
"Timed out waiting for worker to catch up. Returning empty response"
|
||||
)
|
||||
device_id = sync_config.device_id
|
||||
one_time_keys_count: JsonMapping = {}
|
||||
unused_fallback_key_types: List[str] = []
|
||||
if device_id:
|
||||
user_id = sync_config.user.to_string()
|
||||
# TODO: We should have a way to let clients differentiate between the states of:
|
||||
# * no change in OTK count since the provided since token
|
||||
# * the server has zero OTKs left for this device
|
||||
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
|
||||
one_time_keys_count = await self.store.count_e2e_one_time_keys(
|
||||
user_id, device_id
|
||||
)
|
||||
unused_fallback_key_types = list(
|
||||
await self.store.get_e2e_unused_fallback_key_types(
|
||||
user_id, device_id
|
||||
)
|
||||
)
|
||||
|
||||
cache_context.should_cache = False # Don't cache empty responses
|
||||
return SyncResult.empty(
|
||||
since_token, one_time_keys_count, unused_fallback_key_types
|
||||
)
|
||||
|
||||
# If we've spent significant time waiting to catch up, take it off
|
||||
# the timeout.
|
||||
now = self.clock.time_msec()
|
||||
if now - start > 1_000:
|
||||
timeout -= now - start
|
||||
timeout = max(timeout, 0)
|
||||
|
||||
# if we have a since token, delete any to-device messages before that token
|
||||
# (since we now know that the device has received them)
|
||||
@@ -383,15 +573,19 @@ class SyncHandler:
|
||||
if timeout == 0 or since_token is None or full_state:
|
||||
# we are going to return immediately, so don't bother calling
|
||||
# notifier.wait_for_events.
|
||||
result: SyncResult = await self.current_sync_for_user(
|
||||
sync_config, since_token, full_state=full_state
|
||||
result: Union[SyncResult, E2eeSyncResult] = (
|
||||
await self.current_sync_for_user(
|
||||
sync_config, sync_version, since_token, full_state=full_state
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Otherwise, we wait for something to happen and report it to the user.
|
||||
async def current_sync_callback(
|
||||
before_token: StreamToken, after_token: StreamToken
|
||||
) -> SyncResult:
|
||||
return await self.current_sync_for_user(sync_config, since_token)
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
return await self.current_sync_for_user(
|
||||
sync_config, sync_version, since_token
|
||||
)
|
||||
|
||||
result = await self.notifier.wait_for_events(
|
||||
sync_config.user.to_string(),
|
||||
@@ -416,27 +610,81 @@ class SyncHandler:
|
||||
lazy_loaded = "true"
|
||||
else:
|
||||
lazy_loaded = "false"
|
||||
non_empty_sync_counter.labels(sync_type, lazy_loaded).inc()
|
||||
non_empty_sync_counter.labels(sync_label, lazy_loaded).inc()
|
||||
|
||||
return result
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult:
|
||||
"""Generates the response body of a sync result, represented as a SyncResult.
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
"""
|
||||
Generates the response body of a sync result, represented as a
|
||||
`SyncResult`/`E2eeSyncResult`.
|
||||
|
||||
This is a wrapper around `generate_sync_result` which starts an open tracing
|
||||
span to track the sync. See `generate_sync_result` for the next part of your
|
||||
indoctrination.
|
||||
|
||||
Args:
|
||||
sync_config: Config/info necessary to process the sync request.
|
||||
sync_version: Determines what kind of sync response to generate.
|
||||
since_token: The point in the stream to sync from.p.
|
||||
full_state: Whether to return the full state for each room.
|
||||
|
||||
Returns:
|
||||
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
|
||||
When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
|
||||
"""
|
||||
with start_active_span("sync.current_sync_for_user"):
|
||||
log_kv({"since_token": since_token})
|
||||
sync_result = await self.generate_sync_result(
|
||||
sync_config, since_token, full_state
|
||||
)
|
||||
|
||||
# Go through the `/sync` v2 path
|
||||
if sync_version == SyncVersion.SYNC_V2:
|
||||
sync_result: Union[SyncResult, E2eeSyncResult] = (
|
||||
await self.generate_sync_result(
|
||||
sync_config, since_token, full_state
|
||||
)
|
||||
)
|
||||
# Go through the MSC3575 Sliding Sync `/sync/e2ee` path
|
||||
elif sync_version == SyncVersion.E2EE_SYNC:
|
||||
sync_result = await self.generate_e2ee_sync_result(
|
||||
sync_config, since_token
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
f"Unknown sync_version (this is a Synapse problem): {sync_version}"
|
||||
)
|
||||
|
||||
set_tag(SynapseTags.SYNC_RESULT, bool(sync_result))
|
||||
return sync_result
|
||||
@@ -596,6 +844,7 @@ class SyncHandler:
|
||||
sync_config.user.to_string(),
|
||||
recents,
|
||||
always_include_ids=current_state_ids,
|
||||
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
log_kv({"recents_after_visibility_filtering": len(recents)})
|
||||
else:
|
||||
@@ -681,6 +930,7 @@ class SyncHandler:
|
||||
sync_config.user.to_string(),
|
||||
loaded_recents,
|
||||
always_include_ids=current_state_ids,
|
||||
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
loaded_recents = []
|
||||
@@ -1516,128 +1766,17 @@ class SyncHandler:
|
||||
# See https://github.com/matrix-org/matrix-doc/issues/1144
|
||||
raise NotImplementedError()
|
||||
|
||||
# Note: we get the users room list *before* we get the current token, this
|
||||
# avoids checking back in history if rooms are joined after the token is fetched.
|
||||
token_before_rooms = self.event_sources.get_current_token()
|
||||
mutable_joined_room_ids = set(await self.store.get_rooms_for_user(user_id))
|
||||
|
||||
# NB: The now_token gets changed by some of the generate_sync_* methods,
|
||||
# this is due to some of the underlying streams not supporting the ability
|
||||
# to query up to a given point.
|
||||
# Always use the `now_token` in `SyncResultBuilder`
|
||||
now_token = self.event_sources.get_current_token()
|
||||
log_kv({"now_token": now_token})
|
||||
|
||||
# Since we fetched the users room list before the token, there's a small window
|
||||
# during which membership events may have been persisted, so we fetch these now
|
||||
# and modify the joined room list for any changes between the get_rooms_for_user
|
||||
# call and the get_current_token call.
|
||||
membership_change_events = []
|
||||
if since_token:
|
||||
membership_change_events = await self.store.get_membership_changes_for_user(
|
||||
user_id,
|
||||
since_token.room_key,
|
||||
now_token.room_key,
|
||||
self.rooms_to_exclude_globally,
|
||||
)
|
||||
|
||||
mem_last_change_by_room_id: Dict[str, EventBase] = {}
|
||||
for event in membership_change_events:
|
||||
mem_last_change_by_room_id[event.room_id] = event
|
||||
|
||||
# For the latest membership event in each room found, add/remove the room ID
|
||||
# from the joined room list accordingly. In this case we only care if the
|
||||
# latest change is JOIN.
|
||||
|
||||
for room_id, event in mem_last_change_by_room_id.items():
|
||||
assert event.internal_metadata.stream_ordering
|
||||
if (
|
||||
event.internal_metadata.stream_ordering
|
||||
< token_before_rooms.room_key.stream
|
||||
):
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"User membership change between getting rooms and current token: %s %s %s",
|
||||
user_id,
|
||||
event.membership,
|
||||
room_id,
|
||||
)
|
||||
# User joined a room - we have to then check the room state to ensure we
|
||||
# respect any bans if there's a race between the join and ban events.
|
||||
if event.membership == Membership.JOIN:
|
||||
user_ids_in_room = await self.store.get_users_in_room(room_id)
|
||||
if user_id in user_ids_in_room:
|
||||
mutable_joined_room_ids.add(room_id)
|
||||
# The user left the room, or left and was re-invited but not joined yet
|
||||
else:
|
||||
mutable_joined_room_ids.discard(room_id)
|
||||
|
||||
# Tweak the set of rooms to return to the client for eager (non-lazy) syncs.
|
||||
mutable_rooms_to_exclude = set(self.rooms_to_exclude_globally)
|
||||
if not sync_config.filter_collection.lazy_load_members():
|
||||
# Non-lazy syncs should never include partially stated rooms.
|
||||
# Exclude all partially stated rooms from this sync.
|
||||
results = await self.store.is_partial_state_room_batched(
|
||||
mutable_joined_room_ids
|
||||
)
|
||||
mutable_rooms_to_exclude.update(
|
||||
room_id
|
||||
for room_id, is_partial_state in results.items()
|
||||
if is_partial_state
|
||||
)
|
||||
membership_change_events = [
|
||||
event
|
||||
for event in membership_change_events
|
||||
if not results.get(event.room_id, False)
|
||||
]
|
||||
|
||||
# Incremental eager syncs should additionally include rooms that
|
||||
# - we are joined to
|
||||
# - are full-stated
|
||||
# - became fully-stated at some point during the sync period
|
||||
# (These rooms will have been omitted during a previous eager sync.)
|
||||
forced_newly_joined_room_ids: Set[str] = set()
|
||||
if since_token and not sync_config.filter_collection.lazy_load_members():
|
||||
un_partial_stated_rooms = (
|
||||
await self.store.get_un_partial_stated_rooms_between(
|
||||
since_token.un_partial_stated_rooms_key,
|
||||
now_token.un_partial_stated_rooms_key,
|
||||
mutable_joined_room_ids,
|
||||
)
|
||||
)
|
||||
results = await self.store.is_partial_state_room_batched(
|
||||
un_partial_stated_rooms
|
||||
)
|
||||
forced_newly_joined_room_ids.update(
|
||||
room_id
|
||||
for room_id, is_partial_state in results.items()
|
||||
if not is_partial_state
|
||||
)
|
||||
|
||||
# Now we have our list of joined room IDs, exclude as configured and freeze
|
||||
joined_room_ids = frozenset(
|
||||
room_id
|
||||
for room_id in mutable_joined_room_ids
|
||||
if room_id not in mutable_rooms_to_exclude
|
||||
sync_result_builder = await self.get_sync_result_builder(
|
||||
sync_config,
|
||||
since_token,
|
||||
full_state,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Calculating sync response for %r between %s and %s",
|
||||
sync_config.user,
|
||||
since_token,
|
||||
now_token,
|
||||
)
|
||||
|
||||
sync_result_builder = SyncResultBuilder(
|
||||
sync_config,
|
||||
full_state,
|
||||
since_token=since_token,
|
||||
now_token=now_token,
|
||||
joined_room_ids=joined_room_ids,
|
||||
excluded_room_ids=frozenset(mutable_rooms_to_exclude),
|
||||
forced_newly_joined_room_ids=frozenset(forced_newly_joined_room_ids),
|
||||
membership_change_events=membership_change_events,
|
||||
sync_result_builder.since_token,
|
||||
sync_result_builder.now_token,
|
||||
)
|
||||
|
||||
logger.debug("Fetching account data")
|
||||
@@ -1749,6 +1888,242 @@ class SyncHandler:
|
||||
next_batch=sync_result_builder.now_token,
|
||||
)
|
||||
|
||||
async def generate_e2ee_sync_result(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
) -> E2eeSyncResult:
|
||||
"""
|
||||
Generates the response body of a MSC3575 Sliding Sync `/sync/e2ee` result.
|
||||
|
||||
This is represented by a `E2eeSyncResult` struct, which is built from small
|
||||
pieces using a `SyncResultBuilder`. The `sync_result_builder` is passed as a
|
||||
mutable ("inout") parameter to various helper functions. These retrieve and
|
||||
process the data which forms the sync body, often writing to the
|
||||
`sync_result_builder` to store their output.
|
||||
|
||||
At the end, we transfer data from the `sync_result_builder` to a new `E2eeSyncResult`
|
||||
instance to signify that the sync calculation is complete.
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service:
|
||||
# We no longer support AS users using /sync directly.
|
||||
# See https://github.com/matrix-org/matrix-doc/issues/1144
|
||||
raise NotImplementedError()
|
||||
|
||||
sync_result_builder = await self.get_sync_result_builder(
|
||||
sync_config,
|
||||
since_token,
|
||||
full_state=False,
|
||||
)
|
||||
|
||||
# 1. Calculate `to_device` events
|
||||
await self._generate_sync_entry_for_to_device(sync_result_builder)
|
||||
|
||||
# 2. Calculate `device_lists`
|
||||
# Device list updates are sent if a since token is provided.
|
||||
device_lists = DeviceListUpdates()
|
||||
include_device_list_updates = bool(since_token and since_token.device_list_key)
|
||||
if include_device_list_updates:
|
||||
# Note that _generate_sync_entry_for_rooms sets sync_result_builder.joined, which
|
||||
# is used in calculate_user_changes below.
|
||||
#
|
||||
# TODO: Running `_generate_sync_entry_for_rooms()` is a lot of work just to
|
||||
# figure out the membership changes/derived info needed for
|
||||
# `_generate_sync_entry_for_device_list()`. In the future, we should try to
|
||||
# refactor this away.
|
||||
(
|
||||
newly_joined_rooms,
|
||||
newly_left_rooms,
|
||||
) = await self._generate_sync_entry_for_rooms(sync_result_builder)
|
||||
|
||||
# This uses the sync_result_builder.joined which is set in
|
||||
# `_generate_sync_entry_for_rooms`, if that didn't find any joined
|
||||
# rooms for some reason it is a no-op.
|
||||
(
|
||||
newly_joined_or_invited_or_knocked_users,
|
||||
newly_left_users,
|
||||
) = sync_result_builder.calculate_user_changes()
|
||||
|
||||
device_lists = await self._generate_sync_entry_for_device_list(
|
||||
sync_result_builder,
|
||||
newly_joined_rooms=newly_joined_rooms,
|
||||
newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
|
||||
newly_left_rooms=newly_left_rooms,
|
||||
newly_left_users=newly_left_users,
|
||||
)
|
||||
|
||||
# 3. Calculate `device_one_time_keys_count` and `device_unused_fallback_key_types`
|
||||
device_id = sync_config.device_id
|
||||
one_time_keys_count: JsonMapping = {}
|
||||
unused_fallback_key_types: List[str] = []
|
||||
if device_id:
|
||||
# TODO: We should have a way to let clients differentiate between the states of:
|
||||
# * no change in OTK count since the provided since token
|
||||
# * the server has zero OTKs left for this device
|
||||
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
|
||||
one_time_keys_count = await self.store.count_e2e_one_time_keys(
|
||||
user_id, device_id
|
||||
)
|
||||
unused_fallback_key_types = list(
|
||||
await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
|
||||
)
|
||||
|
||||
return E2eeSyncResult(
|
||||
to_device=sync_result_builder.to_device,
|
||||
device_lists=device_lists,
|
||||
device_one_time_keys_count=one_time_keys_count,
|
||||
device_unused_fallback_key_types=unused_fallback_key_types,
|
||||
next_batch=sync_result_builder.now_token,
|
||||
)
|
||||
|
||||
async def get_sync_result_builder(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> "SyncResultBuilder":
|
||||
"""
|
||||
Assemble a `SyncResultBuilder` with all of the initial context to
|
||||
start building up the sync response:
|
||||
|
||||
- Membership changes between the last sync and the current sync.
|
||||
- Joined room IDs (minus any rooms to exclude).
|
||||
- Rooms that became fully-stated/un-partial stated since the last sync.
|
||||
|
||||
Args:
|
||||
sync_config: Config/info necessary to process the sync request.
|
||||
since_token: The point in the stream to sync from.
|
||||
full_state: Whether to return the full state for each room.
|
||||
|
||||
Returns:
|
||||
`SyncResultBuilder` ready to start generating parts of the sync response.
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
|
||||
# Note: we get the users room list *before* we get the `now_token`, this
|
||||
# avoids checking back in history if rooms are joined after the token is fetched.
|
||||
token_before_rooms = self.event_sources.get_current_token()
|
||||
mutable_joined_room_ids = set(await self.store.get_rooms_for_user(user_id))
|
||||
|
||||
# NB: The `now_token` gets changed by some of the `generate_sync_*` methods,
|
||||
# this is due to some of the underlying streams not supporting the ability
|
||||
# to query up to a given point.
|
||||
# Always use the `now_token` in `SyncResultBuilder`
|
||||
now_token = self.event_sources.get_current_token()
|
||||
log_kv({"now_token": now_token})
|
||||
|
||||
# Since we fetched the users room list before calculating the `now_token` (see
|
||||
# above), there's a small window during which membership events may have been
|
||||
# persisted, so we fetch these now and modify the joined room list for any
|
||||
# changes between the get_rooms_for_user call and the get_current_token call.
|
||||
membership_change_events = []
|
||||
if since_token:
|
||||
membership_change_events = await self.store.get_membership_changes_for_user(
|
||||
user_id,
|
||||
since_token.room_key,
|
||||
now_token.room_key,
|
||||
self.rooms_to_exclude_globally,
|
||||
)
|
||||
|
||||
last_membership_change_by_room_id: Dict[str, EventBase] = {}
|
||||
for event in membership_change_events:
|
||||
last_membership_change_by_room_id[event.room_id] = event
|
||||
|
||||
# For the latest membership event in each room found, add/remove the room ID
|
||||
# from the joined room list accordingly. In this case we only care if the
|
||||
# latest change is JOIN.
|
||||
|
||||
for room_id, event in last_membership_change_by_room_id.items():
|
||||
assert event.internal_metadata.stream_ordering
|
||||
# As a shortcut, skip any events that happened before we got our
|
||||
# `get_rooms_for_user()` snapshot (any changes are already represented
|
||||
# in that list).
|
||||
if (
|
||||
event.internal_metadata.stream_ordering
|
||||
< token_before_rooms.room_key.stream
|
||||
):
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"User membership change between getting rooms and current token: %s %s %s",
|
||||
user_id,
|
||||
event.membership,
|
||||
room_id,
|
||||
)
|
||||
# User joined a room - we have to then check the room state to ensure we
|
||||
# respect any bans if there's a race between the join and ban events.
|
||||
if event.membership == Membership.JOIN:
|
||||
user_ids_in_room = await self.store.get_users_in_room(room_id)
|
||||
if user_id in user_ids_in_room:
|
||||
mutable_joined_room_ids.add(room_id)
|
||||
# The user left the room, or left and was re-invited but not joined yet
|
||||
else:
|
||||
mutable_joined_room_ids.discard(room_id)
|
||||
|
||||
# Tweak the set of rooms to return to the client for eager (non-lazy) syncs.
|
||||
mutable_rooms_to_exclude = set(self.rooms_to_exclude_globally)
|
||||
if not sync_config.filter_collection.lazy_load_members():
|
||||
# Non-lazy syncs should never include partially stated rooms.
|
||||
# Exclude all partially stated rooms from this sync.
|
||||
results = await self.store.is_partial_state_room_batched(
|
||||
mutable_joined_room_ids
|
||||
)
|
||||
mutable_rooms_to_exclude.update(
|
||||
room_id
|
||||
for room_id, is_partial_state in results.items()
|
||||
if is_partial_state
|
||||
)
|
||||
membership_change_events = [
|
||||
event
|
||||
for event in membership_change_events
|
||||
if not results.get(event.room_id, False)
|
||||
]
|
||||
|
||||
# Incremental eager syncs should additionally include rooms that
|
||||
# - we are joined to
|
||||
# - are full-stated
|
||||
# - became fully-stated at some point during the sync period
|
||||
# (These rooms will have been omitted during a previous eager sync.)
|
||||
forced_newly_joined_room_ids: Set[str] = set()
|
||||
if since_token and not sync_config.filter_collection.lazy_load_members():
|
||||
un_partial_stated_rooms = (
|
||||
await self.store.get_un_partial_stated_rooms_between(
|
||||
since_token.un_partial_stated_rooms_key,
|
||||
now_token.un_partial_stated_rooms_key,
|
||||
mutable_joined_room_ids,
|
||||
)
|
||||
)
|
||||
results = await self.store.is_partial_state_room_batched(
|
||||
un_partial_stated_rooms
|
||||
)
|
||||
forced_newly_joined_room_ids.update(
|
||||
room_id
|
||||
for room_id, is_partial_state in results.items()
|
||||
if not is_partial_state
|
||||
)
|
||||
|
||||
# Now we have our list of joined room IDs, exclude as configured and freeze
|
||||
joined_room_ids = frozenset(
|
||||
room_id
|
||||
for room_id in mutable_joined_room_ids
|
||||
if room_id not in mutable_rooms_to_exclude
|
||||
)
|
||||
|
||||
sync_result_builder = SyncResultBuilder(
|
||||
sync_config,
|
||||
full_state,
|
||||
since_token=since_token,
|
||||
now_token=now_token,
|
||||
joined_room_ids=joined_room_ids,
|
||||
excluded_room_ids=frozenset(mutable_rooms_to_exclude),
|
||||
forced_newly_joined_room_ids=frozenset(forced_newly_joined_room_ids),
|
||||
membership_change_events=membership_change_events,
|
||||
)
|
||||
|
||||
return sync_result_builder
|
||||
|
||||
@measure_func("_generate_sync_entry_for_device_list")
|
||||
async def _generate_sync_entry_for_device_list(
|
||||
self,
|
||||
@@ -1797,42 +2172,18 @@ class SyncHandler:
|
||||
|
||||
users_that_have_changed = set()
|
||||
|
||||
joined_rooms = sync_result_builder.joined_room_ids
|
||||
joined_room_ids = sync_result_builder.joined_room_ids
|
||||
|
||||
# Step 1a, check for changes in devices of users we share a room
|
||||
# with
|
||||
#
|
||||
# We do this in two different ways depending on what we have cached.
|
||||
# If we already have a list of all the user that have changed since
|
||||
# the last sync then it's likely more efficient to compare the rooms
|
||||
# they're in with the rooms the syncing user is in.
|
||||
#
|
||||
# If we don't have that info cached then we get all the users that
|
||||
# share a room with our user and check if those users have changed.
|
||||
cache_result = self.store.get_cached_device_list_changes(
|
||||
since_token.device_list_key
|
||||
)
|
||||
if cache_result.hit:
|
||||
changed_users = cache_result.entities
|
||||
|
||||
result = await self.store.get_rooms_for_users(changed_users)
|
||||
|
||||
for changed_user_id, entries in result.items():
|
||||
# Check if the changed user shares any rooms with the user,
|
||||
# or if the changed user is the syncing user (as we always
|
||||
# want to include device list updates of their own devices).
|
||||
if user_id == changed_user_id or any(
|
||||
rid in joined_rooms for rid in entries
|
||||
):
|
||||
users_that_have_changed.add(changed_user_id)
|
||||
else:
|
||||
users_that_have_changed = (
|
||||
await self._device_handler.get_device_changes_in_shared_rooms(
|
||||
user_id,
|
||||
sync_result_builder.joined_room_ids,
|
||||
from_token=since_token,
|
||||
)
|
||||
users_that_have_changed = (
|
||||
await self._device_handler.get_device_changes_in_shared_rooms(
|
||||
user_id,
|
||||
joined_room_ids,
|
||||
from_token=since_token,
|
||||
now_token=sync_result_builder.now_token,
|
||||
)
|
||||
)
|
||||
|
||||
# Step 1b, check for newly joined rooms
|
||||
for room_id in newly_joined_rooms:
|
||||
@@ -1856,7 +2207,7 @@ class SyncHandler:
|
||||
# Remove any users that we still share a room with.
|
||||
left_users_rooms = await self.store.get_rooms_for_users(newly_left_users)
|
||||
for user_id, entries in left_users_rooms.items():
|
||||
if any(rid in joined_rooms for rid in entries):
|
||||
if any(rid in joined_room_ids for rid in entries):
|
||||
newly_left_users.discard(user_id)
|
||||
|
||||
return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users)
|
||||
@@ -1943,23 +2294,19 @@ class SyncHandler:
|
||||
)
|
||||
|
||||
if push_rules_changed:
|
||||
global_account_data = {
|
||||
AccountDataTypes.PUSH_RULES: await self._push_rules_handler.push_rules_for_user(
|
||||
sync_config.user
|
||||
),
|
||||
**global_account_data,
|
||||
}
|
||||
global_account_data = dict(global_account_data)
|
||||
global_account_data[AccountDataTypes.PUSH_RULES] = (
|
||||
await self._push_rules_handler.push_rules_for_user(sync_config.user)
|
||||
)
|
||||
else:
|
||||
all_global_account_data = await self.store.get_global_account_data_for_user(
|
||||
user_id
|
||||
)
|
||||
|
||||
global_account_data = {
|
||||
AccountDataTypes.PUSH_RULES: await self._push_rules_handler.push_rules_for_user(
|
||||
sync_config.user
|
||||
),
|
||||
**all_global_account_data,
|
||||
}
|
||||
global_account_data = dict(all_global_account_data)
|
||||
global_account_data[AccountDataTypes.PUSH_RULES] = (
|
||||
await self._push_rules_handler.push_rules_for_user(sync_config.user)
|
||||
)
|
||||
|
||||
account_data_for_user = (
|
||||
await sync_config.filter_collection.filter_global_account_data(
|
||||
@@ -2486,7 +2833,7 @@ class SyncHandler:
|
||||
continue
|
||||
|
||||
leave_token = now_token.copy_and_replace(
|
||||
StreamKeyType.ROOM, RoomStreamToken(stream=event.stream_ordering)
|
||||
StreamKeyType.ROOM, RoomStreamToken(stream=event.event_pos.stream)
|
||||
)
|
||||
room_entries.append(
|
||||
RoomSyncResultBuilder(
|
||||
|
||||
@@ -477,9 +477,9 @@ class TypingWriterHandler(FollowerTypingHandler):
|
||||
|
||||
rows = []
|
||||
for room_id in changed_rooms:
|
||||
serial = self._room_serials[room_id]
|
||||
if last_id < serial <= current_id:
|
||||
typing = self._room_typing[room_id]
|
||||
serial = self._room_serials.get(room_id)
|
||||
if serial and last_id < serial <= current_id:
|
||||
typing = self._room_typing.get(room_id, set())
|
||||
rows.append((serial, [room_id, list(typing)]))
|
||||
rows.sort()
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ from twisted.internet.interfaces import IReactorTime
|
||||
from twisted.internet.task import Cooperator
|
||||
from twisted.web.client import ResponseFailed
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import IAgent, IBodyProducer, IResponse
|
||||
from twisted.web.iweb import UNKNOWN_LENGTH, IAgent, IBodyProducer, IResponse
|
||||
|
||||
import synapse.metrics
|
||||
import synapse.util.retryutils
|
||||
@@ -68,6 +68,7 @@ from synapse.api.errors import (
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.crypto.context_factory import FederationPolicyForHTTPS
|
||||
from synapse.http import QuieterFileBodyProducer
|
||||
from synapse.http.client import (
|
||||
@@ -1411,9 +1412,11 @@ class MatrixFederationHttpClient:
|
||||
destination: str,
|
||||
path: str,
|
||||
output_stream: BinaryIO,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
max_size: int,
|
||||
args: Optional[QueryParams] = None,
|
||||
retry_on_dns_fail: bool = True,
|
||||
max_size: Optional[int] = None,
|
||||
ignore_backoff: bool = False,
|
||||
follow_redirects: bool = False,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
@@ -1422,6 +1425,10 @@ class MatrixFederationHttpClient:
|
||||
destination: The remote server to send the HTTP request to.
|
||||
path: The HTTP path to GET.
|
||||
output_stream: File to write the response body to.
|
||||
download_ratelimiter: a ratelimiter to limit remote media downloads, keyed to
|
||||
requester IP
|
||||
ip_address: IP address of the requester
|
||||
max_size: maximum allowable size in bytes of the file
|
||||
args: Optional dictionary used to create the query string.
|
||||
ignore_backoff: true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
@@ -1441,11 +1448,27 @@ class MatrixFederationHttpClient:
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
SynapseError: If the requested file exceeds ratelimits
|
||||
"""
|
||||
request = MatrixFederationRequest(
|
||||
method="GET", destination=destination, path=path, query=args
|
||||
)
|
||||
|
||||
# check for a minimum balance of 1MiB in ratelimiter before initiating request
|
||||
send_req, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None, key=ip_address, n_actions=1048576, update=False
|
||||
)
|
||||
|
||||
if not send_req:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
response = await self._send_request(
|
||||
request,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
@@ -1455,12 +1478,36 @@ class MatrixFederationHttpClient:
|
||||
|
||||
headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
expected_size = response.length
|
||||
# if we don't get an expected length then use the max length
|
||||
if expected_size == UNKNOWN_LENGTH:
|
||||
expected_size = max_size
|
||||
logger.debug(
|
||||
f"File size unknown, assuming file is max allowable size: {max_size}"
|
||||
)
|
||||
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
try:
|
||||
d = read_body_with_max_size(response, output_stream, max_size)
|
||||
# add a byte of headroom to max size as function errs at >=
|
||||
d = read_body_with_max_size(response, output_stream, expected_size + 1)
|
||||
d.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
length = await make_deferred_yieldable(d)
|
||||
except BodyExceededMaxSize:
|
||||
msg = "Requested file is too large > %r bytes" % (max_size,)
|
||||
msg = "Requested file is too large > %r bytes" % (expected_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
|
||||
@@ -25,7 +25,16 @@ import os
|
||||
import urllib
|
||||
from abc import ABC, abstractmethod
|
||||
from types import TracebackType
|
||||
from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
Dict,
|
||||
Generator,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
)
|
||||
|
||||
import attr
|
||||
|
||||
@@ -39,6 +48,11 @@ from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.util.stringutils import is_ascii
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.media_storage import MultipartResponder
|
||||
from synapse.storage.databases.main.media_repository import LocalMedia
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# list all text content types that will have the charset default to UTF-8 when
|
||||
@@ -260,6 +274,53 @@ def _can_encode_filename_as_token(x: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
async def respond_with_multipart_responder(
|
||||
request: SynapseRequest,
|
||||
responder: "Optional[MultipartResponder]",
|
||||
media_info: "LocalMedia",
|
||||
) -> None:
|
||||
"""
|
||||
Responds via a Multipart responder for the federation media `/download` requests
|
||||
|
||||
Args:
|
||||
request: the federation request to respond to
|
||||
responder: the Multipart responder which will send the response
|
||||
media_info: metadata about the media item
|
||||
"""
|
||||
if not responder:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
# If we have a responder we *must* use it as a context manager.
|
||||
with responder:
|
||||
if request._disconnected:
|
||||
logger.warning(
|
||||
"Not sending response to request %s, already disconnected.", request
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug("Responding to media request with responder %s", responder)
|
||||
if media_info.media_length is not None:
|
||||
request.setHeader(b"Content-Length", b"%d" % (media_info.media_length,))
|
||||
request.setHeader(
|
||||
b"Content-Type", b"multipart/mixed; boundary=%s" % responder.boundary
|
||||
)
|
||||
|
||||
try:
|
||||
await responder.write_to_consumer(request)
|
||||
except Exception as e:
|
||||
# The majority of the time this will be due to the client having gone
|
||||
# away. Unfortunately, Twisted simply throws a generic exception at us
|
||||
# in that case.
|
||||
logger.warning("Failed to write to consumer: %s %s", type(e), e)
|
||||
|
||||
# Unregister the producer, if it has one, so Twisted doesn't complain
|
||||
if request.producer:
|
||||
request.unregisterProducer()
|
||||
|
||||
finish_request(request)
|
||||
|
||||
|
||||
async def respond_with_responder(
|
||||
request: SynapseRequest,
|
||||
responder: "Optional[Responder]",
|
||||
|
||||
@@ -42,6 +42,7 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
cs_error,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.config.repository import ThumbnailRequirement
|
||||
from synapse.http.server import respond_with_json
|
||||
from synapse.http.site import SynapseRequest
|
||||
@@ -53,10 +54,11 @@ from synapse.media._base import (
|
||||
ThumbnailInfo,
|
||||
get_filename_from_headers,
|
||||
respond_404,
|
||||
respond_with_multipart_responder,
|
||||
respond_with_responder,
|
||||
)
|
||||
from synapse.media.filepath import MediaFilePaths
|
||||
from synapse.media.media_storage import MediaStorage
|
||||
from synapse.media.media_storage import MediaStorage, MultipartResponder
|
||||
from synapse.media.storage_provider import StorageProviderWrapper
|
||||
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
|
||||
from synapse.media.url_previewer import UrlPreviewer
|
||||
@@ -111,6 +113,12 @@ class MediaRepository:
|
||||
)
|
||||
self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from
|
||||
|
||||
self.download_ratelimiter = Ratelimiter(
|
||||
store=hs.get_storage_controllers().main,
|
||||
clock=hs.get_clock(),
|
||||
cfg=hs.config.ratelimiting.remote_media_downloads,
|
||||
)
|
||||
|
||||
# List of StorageProviders where we should search for media and
|
||||
# potentially upload to.
|
||||
storage_providers = []
|
||||
@@ -422,6 +430,7 @@ class MediaRepository:
|
||||
media_id: str,
|
||||
name: Optional[str],
|
||||
max_timeout_ms: int,
|
||||
federation: bool = False,
|
||||
) -> None:
|
||||
"""Responds to requests for local media, if exists, or returns 404.
|
||||
|
||||
@@ -433,6 +442,7 @@ class MediaRepository:
|
||||
the filename in the Content-Disposition header of the response.
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
federation: whether the local media being fetched is for a federation request
|
||||
|
||||
Returns:
|
||||
Resolves once a response has successfully been written to request
|
||||
@@ -452,10 +462,17 @@ class MediaRepository:
|
||||
|
||||
file_info = FileInfo(None, media_id, url_cache=bool(url_cache))
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
await respond_with_responder(
|
||||
request, responder, media_type, media_length, upload_name
|
||||
responder = await self.media_storage.fetch_media(
|
||||
file_info, media_info, federation
|
||||
)
|
||||
if federation:
|
||||
# this really should be a Multipart responder but just in case
|
||||
assert isinstance(responder, MultipartResponder)
|
||||
await respond_with_multipart_responder(request, responder, media_info)
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request, responder, media_type, media_length, upload_name
|
||||
)
|
||||
|
||||
async def get_remote_media(
|
||||
self,
|
||||
@@ -464,6 +481,7 @@ class MediaRepository:
|
||||
media_id: str,
|
||||
name: Optional[str],
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
) -> None:
|
||||
"""Respond to requests for remote media.
|
||||
|
||||
@@ -475,6 +493,7 @@ class MediaRepository:
|
||||
the filename in the Content-Disposition header of the response.
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
ip_address: the IP address of the requester
|
||||
|
||||
Returns:
|
||||
Resolves once a response has successfully been written to request
|
||||
@@ -500,7 +519,11 @@ class MediaRepository:
|
||||
key = (server_name, media_id)
|
||||
async with self.remote_media_linearizer.queue(key):
|
||||
responder, media_info = await self._get_remote_media_impl(
|
||||
server_name, media_id, max_timeout_ms
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
self.download_ratelimiter,
|
||||
ip_address,
|
||||
)
|
||||
|
||||
# We deliberately stream the file outside the lock
|
||||
@@ -517,7 +540,7 @@ class MediaRepository:
|
||||
respond_404(request)
|
||||
|
||||
async def get_remote_media_info(
|
||||
self, server_name: str, media_id: str, max_timeout_ms: int
|
||||
self, server_name: str, media_id: str, max_timeout_ms: int, ip_address: str
|
||||
) -> RemoteMedia:
|
||||
"""Gets the media info associated with the remote file, downloading
|
||||
if necessary.
|
||||
@@ -527,6 +550,7 @@ class MediaRepository:
|
||||
media_id: The media ID of the content (as defined by the remote server).
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
ip_address: IP address of the requester
|
||||
|
||||
Returns:
|
||||
The media info of the file
|
||||
@@ -542,7 +566,11 @@ class MediaRepository:
|
||||
key = (server_name, media_id)
|
||||
async with self.remote_media_linearizer.queue(key):
|
||||
responder, media_info = await self._get_remote_media_impl(
|
||||
server_name, media_id, max_timeout_ms
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
self.download_ratelimiter,
|
||||
ip_address,
|
||||
)
|
||||
|
||||
# Ensure we actually use the responder so that it releases resources
|
||||
@@ -553,7 +581,12 @@ class MediaRepository:
|
||||
return media_info
|
||||
|
||||
async def _get_remote_media_impl(
|
||||
self, server_name: str, media_id: str, max_timeout_ms: int
|
||||
self,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[Optional[Responder], RemoteMedia]:
|
||||
"""Looks for media in local cache, if not there then attempt to
|
||||
download from remote server.
|
||||
@@ -564,6 +597,9 @@ class MediaRepository:
|
||||
remote server).
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
|
||||
requester IP.
|
||||
ip_address: the IP address of the requester
|
||||
|
||||
Returns:
|
||||
A tuple of responder and the media info of the file.
|
||||
@@ -596,7 +632,7 @@ class MediaRepository:
|
||||
|
||||
try:
|
||||
media_info = await self._download_remote_file(
|
||||
server_name, media_id, max_timeout_ms
|
||||
server_name, media_id, max_timeout_ms, download_ratelimiter, ip_address
|
||||
)
|
||||
except SynapseError:
|
||||
raise
|
||||
@@ -630,6 +666,8 @@ class MediaRepository:
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> RemoteMedia:
|
||||
"""Attempt to download the remote file from the given server name,
|
||||
using the given file_id as the local id.
|
||||
@@ -641,6 +679,9 @@ class MediaRepository:
|
||||
locally generated.
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
|
||||
requester IP
|
||||
ip_address: the IP address of the requester
|
||||
|
||||
Returns:
|
||||
The media info of the file.
|
||||
@@ -650,7 +691,7 @@ class MediaRepository:
|
||||
|
||||
file_info = FileInfo(server_name=server_name, file_id=file_id)
|
||||
|
||||
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
|
||||
async with self.media_storage.store_into_file(file_info) as (f, fname):
|
||||
try:
|
||||
length, headers = await self.client.download_media(
|
||||
server_name,
|
||||
@@ -658,6 +699,8 @@ class MediaRepository:
|
||||
output_stream=f,
|
||||
max_size=self.max_upload_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
logger.warning(
|
||||
@@ -693,8 +736,6 @@ class MediaRepository:
|
||||
)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
await finish()
|
||||
|
||||
if b"Content-Type" in headers:
|
||||
media_type = headers[b"Content-Type"][0].decode("ascii")
|
||||
else:
|
||||
@@ -1045,17 +1086,17 @@ class MediaRepository:
|
||||
),
|
||||
)
|
||||
|
||||
with self.media_storage.store_into_file(file_info) as (
|
||||
f,
|
||||
fname,
|
||||
finish,
|
||||
):
|
||||
async with self.media_storage.store_into_file(file_info) as (f, fname):
|
||||
try:
|
||||
await self.media_storage.write_to_file(t_byte_source, f)
|
||||
await finish()
|
||||
finally:
|
||||
t_byte_source.close()
|
||||
|
||||
# We flush and close the file to ensure that the bytes have
|
||||
# been written before getting the size.
|
||||
f.flush()
|
||||
f.close()
|
||||
|
||||
t_len = os.path.getsize(fname)
|
||||
|
||||
# Write to database
|
||||
|
||||
@@ -19,26 +19,33 @@
|
||||
#
|
||||
#
|
||||
import contextlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
from contextlib import closing
|
||||
from io import BytesIO
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
IO,
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
AsyncIterator,
|
||||
BinaryIO,
|
||||
Callable,
|
||||
Generator,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
from uuid import uuid4
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer, interfaces
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IConsumer
|
||||
from twisted.protocols.basic import FileSender
|
||||
@@ -49,15 +56,19 @@ from synapse.logging.opentracing import start_active_span, trace, trace_with_opn
|
||||
from synapse.util import Clock
|
||||
from synapse.util.file_consumer import BackgroundFileConsumer
|
||||
|
||||
from ..storage.databases.main.media_repository import LocalMedia
|
||||
from ..types import JsonDict
|
||||
from ._base import FileInfo, Responder
|
||||
from .filepath import MediaFilePaths
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.storage_provider import StorageProvider
|
||||
from synapse.media.storage_provider import StorageProviderWrapper
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CRLF = b"\r\n"
|
||||
|
||||
|
||||
class MediaStorage:
|
||||
"""Responsible for storing/fetching files from local sources.
|
||||
@@ -74,7 +85,7 @@ class MediaStorage:
|
||||
hs: "HomeServer",
|
||||
local_media_directory: str,
|
||||
filepaths: MediaFilePaths,
|
||||
storage_providers: Sequence["StorageProvider"],
|
||||
storage_providers: Sequence["StorageProviderWrapper"],
|
||||
):
|
||||
self.hs = hs
|
||||
self.reactor = hs.get_reactor()
|
||||
@@ -97,11 +108,9 @@ class MediaStorage:
|
||||
the file path written to in the primary media store
|
||||
"""
|
||||
|
||||
with self.store_into_file(file_info) as (f, fname, finish_cb):
|
||||
async with self.store_into_file(file_info) as (f, fname):
|
||||
# Write to the main media repository
|
||||
await self.write_to_file(source, f)
|
||||
# Write to the other storage providers
|
||||
await finish_cb()
|
||||
|
||||
return fname
|
||||
|
||||
@@ -111,32 +120,27 @@ class MediaStorage:
|
||||
await defer_to_thread(self.reactor, _write_file_synchronously, source, output)
|
||||
|
||||
@trace_with_opname("MediaStorage.store_into_file")
|
||||
@contextlib.contextmanager
|
||||
def store_into_file(
|
||||
@contextlib.asynccontextmanager
|
||||
async def store_into_file(
|
||||
self, file_info: FileInfo
|
||||
) -> Generator[Tuple[BinaryIO, str, Callable[[], Awaitable[None]]], None, None]:
|
||||
"""Context manager used to get a file like object to write into, as
|
||||
) -> AsyncIterator[Tuple[BinaryIO, str]]:
|
||||
"""Async Context manager used to get a file like object to write into, as
|
||||
described by file_info.
|
||||
|
||||
Actually yields a 3-tuple (file, fname, finish_cb), where file is a file
|
||||
like object that can be written to, fname is the absolute path of file
|
||||
on disk, and finish_cb is a function that returns an awaitable.
|
||||
Actually yields a 2-tuple (file, fname,), where file is a file
|
||||
like object that can be written to and fname is the absolute path of file
|
||||
on disk.
|
||||
|
||||
fname can be used to read the contents from after upload, e.g. to
|
||||
generate thumbnails.
|
||||
|
||||
finish_cb must be called and waited on after the file has been successfully been
|
||||
written to. Should not be called if there was an error. Checks for spam and
|
||||
stores the file into the configured storage providers.
|
||||
|
||||
Args:
|
||||
file_info: Info about the file to store
|
||||
|
||||
Example:
|
||||
|
||||
with media_storage.store_into_file(info) as (f, fname, finish_cb):
|
||||
async with media_storage.store_into_file(info) as (f, fname,):
|
||||
# .. write into f ...
|
||||
await finish_cb()
|
||||
"""
|
||||
|
||||
path = self._file_info_to_path(file_info)
|
||||
@@ -145,72 +149,55 @@ class MediaStorage:
|
||||
dirname = os.path.dirname(fname)
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
|
||||
finished_called = [False]
|
||||
|
||||
main_media_repo_write_trace_scope = start_active_span(
|
||||
"writing to main media repo"
|
||||
)
|
||||
main_media_repo_write_trace_scope.__enter__()
|
||||
|
||||
try:
|
||||
with open(fname, "wb") as f:
|
||||
with start_active_span("writing to main media repo"):
|
||||
with open(fname, "wb") as f:
|
||||
yield f, fname
|
||||
|
||||
async def finish() -> None:
|
||||
# When someone calls finish, we assume they are done writing to the main media repo
|
||||
main_media_repo_write_trace_scope.__exit__(None, None, None)
|
||||
with start_active_span("writing to other storage providers"):
|
||||
spam_check = (
|
||||
await self._spam_checker_module_callbacks.check_media_file_for_spam(
|
||||
ReadableFileWrapper(self.clock, fname), file_info
|
||||
)
|
||||
)
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
logger.info("Blocking media due to spam checker")
|
||||
# Note that we'll delete the stored media, due to the
|
||||
# try/except below. The media also won't be stored in
|
||||
# the DB.
|
||||
# We currently ignore any additional field returned by
|
||||
# the spam-check API.
|
||||
raise SpamMediaException(errcode=spam_check[0])
|
||||
|
||||
with start_active_span("writing to other storage providers"):
|
||||
# Ensure that all writes have been flushed and close the
|
||||
# file.
|
||||
f.flush()
|
||||
f.close()
|
||||
for provider in self.storage_providers:
|
||||
with start_active_span(str(provider)):
|
||||
await provider.store_file(path, file_info)
|
||||
|
||||
spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam(
|
||||
ReadableFileWrapper(self.clock, fname), file_info
|
||||
)
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
logger.info("Blocking media due to spam checker")
|
||||
# Note that we'll delete the stored media, due to the
|
||||
# try/except below. The media also won't be stored in
|
||||
# the DB.
|
||||
# We currently ignore any additional field returned by
|
||||
# the spam-check API.
|
||||
raise SpamMediaException(errcode=spam_check[0])
|
||||
|
||||
for provider in self.storage_providers:
|
||||
with start_active_span(str(provider)):
|
||||
await provider.store_file(path, file_info)
|
||||
|
||||
finished_called[0] = True
|
||||
|
||||
yield f, fname, finish
|
||||
except Exception as e:
|
||||
try:
|
||||
main_media_repo_write_trace_scope.__exit__(
|
||||
type(e), None, e.__traceback__
|
||||
)
|
||||
os.remove(fname)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
raise e from None
|
||||
|
||||
if not finished_called:
|
||||
exc = Exception("Finished callback not called")
|
||||
main_media_repo_write_trace_scope.__exit__(
|
||||
type(exc), None, exc.__traceback__
|
||||
)
|
||||
raise exc
|
||||
|
||||
async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
|
||||
async def fetch_media(
|
||||
self,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
"""Attempts to fetch media described by file_info from the local cache
|
||||
and configured storage providers.
|
||||
|
||||
Args:
|
||||
file_info
|
||||
file_info: Metadata about the media file
|
||||
media_info: Metadata about the media item
|
||||
federation: Whether this file is being fetched for a federation request
|
||||
|
||||
Returns:
|
||||
Returns a Responder if the file was found, otherwise None.
|
||||
If the file was found returns a Responder (a Multipart Responder if the requested
|
||||
file is for the federation /download endpoint), otherwise None.
|
||||
"""
|
||||
paths = [self._file_info_to_path(file_info)]
|
||||
|
||||
@@ -230,12 +217,19 @@ class MediaStorage:
|
||||
local_path = os.path.join(self.local_media_directory, path)
|
||||
if os.path.exists(local_path):
|
||||
logger.debug("responding with local file %s", local_path)
|
||||
return FileResponder(open(local_path, "rb"))
|
||||
if federation:
|
||||
assert media_info is not None
|
||||
boundary = uuid4().hex.encode("ascii")
|
||||
return MultipartResponder(
|
||||
open(local_path, "rb"), media_info, boundary
|
||||
)
|
||||
else:
|
||||
return FileResponder(open(local_path, "rb"))
|
||||
logger.debug("local file %s did not exist", local_path)
|
||||
|
||||
for provider in self.storage_providers:
|
||||
for path in paths:
|
||||
res: Any = await provider.fetch(path, file_info)
|
||||
res: Any = await provider.fetch(path, file_info, media_info, federation)
|
||||
if res:
|
||||
logger.debug("Streaming %s from %s", path, provider)
|
||||
return res
|
||||
@@ -349,7 +343,7 @@ class FileResponder(Responder):
|
||||
"""Wraps an open file that can be sent to a request.
|
||||
|
||||
Args:
|
||||
open_file: A file like object to be streamed ot the client,
|
||||
open_file: A file like object to be streamed to the client,
|
||||
is closed when finished streaming.
|
||||
"""
|
||||
|
||||
@@ -370,6 +364,38 @@ class FileResponder(Responder):
|
||||
self.open_file.close()
|
||||
|
||||
|
||||
class MultipartResponder(Responder):
|
||||
"""Wraps an open file, formats the response according to MSC3916 and sends it to a
|
||||
federation request.
|
||||
|
||||
Args:
|
||||
open_file: A file like object to be streamed to the client,
|
||||
is closed when finished streaming.
|
||||
media_info: metadata about the media item
|
||||
boundary: bytes to use for the multipart response boundary
|
||||
"""
|
||||
|
||||
def __init__(self, open_file: IO, media_info: LocalMedia, boundary: bytes) -> None:
|
||||
self.open_file = open_file
|
||||
self.media_info = media_info
|
||||
self.boundary = boundary
|
||||
|
||||
def write_to_consumer(self, consumer: IConsumer) -> Deferred:
|
||||
return make_deferred_yieldable(
|
||||
MultipartFileSender().beginFileTransfer(
|
||||
self.open_file, consumer, self.media_info.media_type, {}, self.boundary
|
||||
)
|
||||
)
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[Type[BaseException]],
|
||||
exc_val: Optional[BaseException],
|
||||
exc_tb: Optional[TracebackType],
|
||||
) -> None:
|
||||
self.open_file.close()
|
||||
|
||||
|
||||
class SpamMediaException(NotFoundError):
|
||||
"""The media was blocked by a spam checker, so we simply 404 the request (in
|
||||
the same way as if it was quarantined).
|
||||
@@ -403,3 +429,151 @@ class ReadableFileWrapper:
|
||||
|
||||
# We yield to the reactor by sleeping for 0 seconds.
|
||||
await self.clock.sleep(0)
|
||||
|
||||
|
||||
@implementer(interfaces.IProducer)
|
||||
class MultipartFileSender:
|
||||
"""
|
||||
A producer that sends the contents of a file to a federation request in the format
|
||||
outlined in MSC3916 - a multipart/format-data response where the first field is a
|
||||
JSON object and the second is the requested file.
|
||||
|
||||
This is a slight re-writing of twisted.protocols.basic.FileSender to achieve the format
|
||||
outlined above.
|
||||
"""
|
||||
|
||||
CHUNK_SIZE = 2**14
|
||||
|
||||
lastSent = ""
|
||||
deferred: Optional[defer.Deferred] = None
|
||||
|
||||
def beginFileTransfer(
|
||||
self,
|
||||
file: IO,
|
||||
consumer: IConsumer,
|
||||
file_content_type: str,
|
||||
json_object: JsonDict,
|
||||
boundary: bytes,
|
||||
) -> Deferred:
|
||||
"""
|
||||
Begin transferring a file
|
||||
|
||||
Args:
|
||||
file: The file object to read data from
|
||||
consumer: The synapse request to write the data to
|
||||
file_content_type: The content-type of the file
|
||||
json_object: The JSON object to write to the first field of the response
|
||||
boundary: bytes to be used as the multipart/form-data boundary
|
||||
|
||||
Returns: A deferred whose callback will be invoked when the file has
|
||||
been completely written to the consumer. The last byte written to the
|
||||
consumer is passed to the callback.
|
||||
"""
|
||||
self.file: Optional[IO] = file
|
||||
self.consumer = consumer
|
||||
self.json_field = json_object
|
||||
self.json_field_written = False
|
||||
self.content_type_written = False
|
||||
self.file_content_type = file_content_type
|
||||
self.boundary = boundary
|
||||
self.deferred: Deferred = defer.Deferred()
|
||||
self.consumer.registerProducer(self, False)
|
||||
# while it's not entirely clear why this assignment is necessary, it mirrors
|
||||
# the behavior in FileSender.beginFileTransfer and thus is preserved here
|
||||
deferred = self.deferred
|
||||
return deferred
|
||||
|
||||
def resumeProducing(self) -> None:
|
||||
# write the first field, which will always be a json field
|
||||
if not self.json_field_written:
|
||||
self.consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
|
||||
content_type = Header(b"Content-Type", b"application/json")
|
||||
self.consumer.write(bytes(content_type) + CRLF)
|
||||
|
||||
json_field = json.dumps(self.json_field)
|
||||
json_bytes = json_field.encode("utf-8")
|
||||
self.consumer.write(json_bytes)
|
||||
self.consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
|
||||
self.json_field_written = True
|
||||
|
||||
chunk: Any = ""
|
||||
if self.file:
|
||||
# if we haven't written the content type yet, do so
|
||||
if not self.content_type_written:
|
||||
type = self.file_content_type.encode("utf-8")
|
||||
content_type = Header(b"Content-Type", type)
|
||||
self.consumer.write(bytes(content_type) + CRLF)
|
||||
self.content_type_written = True
|
||||
|
||||
chunk = self.file.read(self.CHUNK_SIZE)
|
||||
|
||||
if not chunk:
|
||||
# we've reached the end of the file
|
||||
self.consumer.write(CRLF + b"--" + self.boundary + b"--" + CRLF)
|
||||
self.file = None
|
||||
self.consumer.unregisterProducer()
|
||||
|
||||
if self.deferred:
|
||||
self.deferred.callback(self.lastSent)
|
||||
self.deferred = None
|
||||
return
|
||||
|
||||
self.consumer.write(chunk)
|
||||
self.lastSent = chunk[-1:]
|
||||
|
||||
def pauseProducing(self) -> None:
|
||||
pass
|
||||
|
||||
def stopProducing(self) -> None:
|
||||
if self.deferred:
|
||||
self.deferred.errback(Exception("Consumer asked us to stop producing"))
|
||||
self.deferred = None
|
||||
|
||||
|
||||
class Header:
|
||||
"""
|
||||
`Header` This class is a tiny wrapper that produces
|
||||
request headers. We can't use standard python header
|
||||
class because it encodes unicode fields using =? bla bla ?=
|
||||
encoding, which is correct, but no one in HTTP world expects
|
||||
that, everyone wants utf-8 raw bytes. (stolen from treq.multipart)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: bytes,
|
||||
value: Any,
|
||||
params: Optional[List[Tuple[Any, Any]]] = None,
|
||||
):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.params = params or []
|
||||
|
||||
def add_param(self, name: Any, value: Any) -> None:
|
||||
self.params.append((name, value))
|
||||
|
||||
def __bytes__(self) -> bytes:
|
||||
with closing(BytesIO()) as h:
|
||||
h.write(self.name + b": " + escape(self.value).encode("us-ascii"))
|
||||
if self.params:
|
||||
for name, val in self.params:
|
||||
h.write(b"; ")
|
||||
h.write(escape(name).encode("us-ascii"))
|
||||
h.write(b"=")
|
||||
h.write(b'"' + escape(val).encode("utf-8") + b'"')
|
||||
h.seek(0)
|
||||
return h.read()
|
||||
|
||||
|
||||
def escape(value: Union[str, bytes]) -> str:
|
||||
"""
|
||||
This function prevents header values from corrupting the request,
|
||||
a newline in the file name parameter makes form-data request unreadable
|
||||
for a majority of parsers. (stolen from treq.multipart)
|
||||
"""
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode("utf-8")
|
||||
return value.replace("\r", "").replace("\n", "").replace('"', '\\"')
|
||||
|
||||
@@ -24,14 +24,16 @@ import logging
|
||||
import os
|
||||
import shutil
|
||||
from typing import TYPE_CHECKING, Callable, Optional
|
||||
from uuid import uuid4
|
||||
|
||||
from synapse.config._base import Config
|
||||
from synapse.logging.context import defer_to_thread, run_in_background
|
||||
from synapse.logging.opentracing import start_active_span, trace_with_opname
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
from ..storage.databases.main.media_repository import LocalMedia
|
||||
from ._base import FileInfo, Responder
|
||||
from .media_storage import FileResponder
|
||||
from .media_storage import FileResponder, MultipartResponder
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -55,13 +57,21 @@ class StorageProvider(metaclass=abc.ABCMeta):
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
async def fetch(
|
||||
self,
|
||||
path: str,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
"""Attempt to fetch the file described by file_info and stream it
|
||||
into writer.
|
||||
|
||||
Args:
|
||||
path: Relative path of file in local cache
|
||||
file_info: The metadata of the file.
|
||||
media_info: metadata of the media item
|
||||
federation: Whether the requested media is for a federation request
|
||||
|
||||
Returns:
|
||||
Returns a Responder if the provider has the file, otherwise returns None.
|
||||
@@ -124,7 +134,13 @@ class StorageProviderWrapper(StorageProvider):
|
||||
run_in_background(store)
|
||||
|
||||
@trace_with_opname("StorageProviderWrapper.fetch")
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
async def fetch(
|
||||
self,
|
||||
path: str,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
if file_info.url_cache:
|
||||
# Files in the URL preview cache definitely aren't stored here,
|
||||
# so avoid any potentially slow I/O or network access.
|
||||
@@ -132,7 +148,9 @@ class StorageProviderWrapper(StorageProvider):
|
||||
|
||||
# store_file is supposed to return an Awaitable, but guard
|
||||
# against improper implementations.
|
||||
return await maybe_awaitable(self.backend.fetch(path, file_info))
|
||||
return await maybe_awaitable(
|
||||
self.backend.fetch(path, file_info, media_info, federation)
|
||||
)
|
||||
|
||||
|
||||
class FileStorageProviderBackend(StorageProvider):
|
||||
@@ -172,11 +190,23 @@ class FileStorageProviderBackend(StorageProvider):
|
||||
)
|
||||
|
||||
@trace_with_opname("FileStorageProviderBackend.fetch")
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
async def fetch(
|
||||
self,
|
||||
path: str,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
"""See StorageProvider.fetch"""
|
||||
|
||||
backup_fname = os.path.join(self.base_directory, path)
|
||||
if os.path.isfile(backup_fname):
|
||||
if federation:
|
||||
assert media_info is not None
|
||||
boundary = uuid4().hex.encode("ascii")
|
||||
return MultipartResponder(
|
||||
open(backup_fname, "rb"), media_info, boundary
|
||||
)
|
||||
return FileResponder(open(backup_fname, "rb"))
|
||||
|
||||
return None
|
||||
|
||||
@@ -22,11 +22,27 @@
|
||||
import logging
|
||||
from io import BytesIO
|
||||
from types import TracebackType
|
||||
from typing import Optional, Tuple, Type
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, Type
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError, cs_error
|
||||
from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP
|
||||
from synapse.http.server import respond_with_json
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.media._base import (
|
||||
FileInfo,
|
||||
ThumbnailInfo,
|
||||
respond_404,
|
||||
respond_with_file,
|
||||
respond_with_responder,
|
||||
)
|
||||
from synapse.media.media_storage import MediaStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.media_repository import MediaRepository
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -231,3 +247,473 @@ class Thumbnailer:
|
||||
def __del__(self) -> None:
|
||||
# Make sure we actually do close the image, rather than leak data.
|
||||
self.close()
|
||||
|
||||
|
||||
class ThumbnailProvider:
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
media_repo: "MediaRepository",
|
||||
media_storage: MediaStorage,
|
||||
):
|
||||
self.hs = hs
|
||||
self.media_repo = media_repo
|
||||
self.media_storage = media_storage
|
||||
self.store = hs.get_datastores().main
|
||||
self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
|
||||
|
||||
async def respond_local_thumbnail(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
media_id: str,
|
||||
width: int,
|
||||
height: int,
|
||||
method: str,
|
||||
m_type: str,
|
||||
max_timeout_ms: int,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_local_media_info(
|
||||
request, media_id, max_timeout_ms
|
||||
)
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
|
||||
await self._select_and_respond_with_thumbnail(
|
||||
request,
|
||||
width,
|
||||
height,
|
||||
method,
|
||||
m_type,
|
||||
thumbnail_infos,
|
||||
media_id,
|
||||
media_id,
|
||||
url_cache=bool(media_info.url_cache),
|
||||
server_name=None,
|
||||
)
|
||||
|
||||
async def select_or_generate_local_thumbnail(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
media_id: str,
|
||||
desired_width: int,
|
||||
desired_height: int,
|
||||
desired_method: str,
|
||||
desired_type: str,
|
||||
max_timeout_ms: int,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_local_media_info(
|
||||
request, media_id, max_timeout_ms
|
||||
)
|
||||
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
|
||||
for info in thumbnail_infos:
|
||||
t_w = info.width == desired_width
|
||||
t_h = info.height == desired_height
|
||||
t_method = info.method == desired_method
|
||||
t_type = info.type == desired_type
|
||||
|
||||
if t_w and t_h and t_method and t_type:
|
||||
file_info = FileInfo(
|
||||
server_name=None,
|
||||
file_id=media_id,
|
||||
url_cache=bool(media_info.url_cache),
|
||||
thumbnail=info,
|
||||
)
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
if responder:
|
||||
await respond_with_responder(
|
||||
request, responder, info.type, info.length
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug("We don't have a thumbnail of that size. Generating")
|
||||
|
||||
# Okay, so we generate one.
|
||||
file_path = await self.media_repo.generate_local_exact_thumbnail(
|
||||
media_id,
|
||||
desired_width,
|
||||
desired_height,
|
||||
desired_method,
|
||||
desired_type,
|
||||
url_cache=bool(media_info.url_cache),
|
||||
)
|
||||
|
||||
if file_path:
|
||||
await respond_with_file(request, desired_type, file_path)
|
||||
else:
|
||||
logger.warning("Failed to generate thumbnail")
|
||||
raise SynapseError(400, "Failed to generate thumbnail.")
|
||||
|
||||
async def select_or_generate_remote_thumbnail(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
desired_width: int,
|
||||
desired_height: int,
|
||||
desired_method: str,
|
||||
desired_type: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_remote_media_info(
|
||||
server_name, media_id, max_timeout_ms, ip_address
|
||||
)
|
||||
if not media_info:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
thumbnail_infos = await self.store.get_remote_media_thumbnails(
|
||||
server_name, media_id
|
||||
)
|
||||
|
||||
file_id = media_info.filesystem_id
|
||||
|
||||
for info in thumbnail_infos:
|
||||
t_w = info.width == desired_width
|
||||
t_h = info.height == desired_height
|
||||
t_method = info.method == desired_method
|
||||
t_type = info.type == desired_type
|
||||
|
||||
if t_w and t_h and t_method and t_type:
|
||||
file_info = FileInfo(
|
||||
server_name=server_name,
|
||||
file_id=file_id,
|
||||
thumbnail=info,
|
||||
)
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
if responder:
|
||||
await respond_with_responder(
|
||||
request, responder, info.type, info.length
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug("We don't have a thumbnail of that size. Generating")
|
||||
|
||||
# Okay, so we generate one.
|
||||
file_path = await self.media_repo.generate_remote_exact_thumbnail(
|
||||
server_name,
|
||||
file_id,
|
||||
media_id,
|
||||
desired_width,
|
||||
desired_height,
|
||||
desired_method,
|
||||
desired_type,
|
||||
)
|
||||
|
||||
if file_path:
|
||||
await respond_with_file(request, desired_type, file_path)
|
||||
else:
|
||||
logger.warning("Failed to generate thumbnail")
|
||||
raise SynapseError(400, "Failed to generate thumbnail.")
|
||||
|
||||
async def respond_remote_thumbnail(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
width: int,
|
||||
height: int,
|
||||
method: str,
|
||||
m_type: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
) -> None:
|
||||
# TODO: Don't download the whole remote file
|
||||
# We should proxy the thumbnail from the remote server instead of
|
||||
# downloading the remote file and generating our own thumbnails.
|
||||
media_info = await self.media_repo.get_remote_media_info(
|
||||
server_name, media_id, max_timeout_ms, ip_address
|
||||
)
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
thumbnail_infos = await self.store.get_remote_media_thumbnails(
|
||||
server_name, media_id
|
||||
)
|
||||
await self._select_and_respond_with_thumbnail(
|
||||
request,
|
||||
width,
|
||||
height,
|
||||
method,
|
||||
m_type,
|
||||
thumbnail_infos,
|
||||
media_id,
|
||||
media_info.filesystem_id,
|
||||
url_cache=False,
|
||||
server_name=server_name,
|
||||
)
|
||||
|
||||
async def _select_and_respond_with_thumbnail(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
desired_width: int,
|
||||
desired_height: int,
|
||||
desired_method: str,
|
||||
desired_type: str,
|
||||
thumbnail_infos: List[ThumbnailInfo],
|
||||
media_id: str,
|
||||
file_id: str,
|
||||
url_cache: bool,
|
||||
server_name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Respond to a request with an appropriate thumbnail from the previously generated thumbnails.
|
||||
|
||||
Args:
|
||||
request: The incoming request.
|
||||
desired_width: The desired width, the returned thumbnail may be larger than this.
|
||||
desired_height: The desired height, the returned thumbnail may be larger than this.
|
||||
desired_method: The desired method used to generate the thumbnail.
|
||||
desired_type: The desired content-type of the thumbnail.
|
||||
thumbnail_infos: A list of thumbnail info of candidate thumbnails.
|
||||
file_id: The ID of the media that a thumbnail is being requested for.
|
||||
url_cache: True if this is from a URL cache.
|
||||
server_name: The server name, if this is a remote thumbnail.
|
||||
"""
|
||||
logger.debug(
|
||||
"_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s",
|
||||
media_id,
|
||||
desired_width,
|
||||
desired_height,
|
||||
desired_method,
|
||||
thumbnail_infos,
|
||||
)
|
||||
|
||||
# If `dynamic_thumbnails` is enabled, we expect Synapse to go down a
|
||||
# different code path to handle it.
|
||||
assert not self.dynamic_thumbnails
|
||||
|
||||
if thumbnail_infos:
|
||||
file_info = self._select_thumbnail(
|
||||
desired_width,
|
||||
desired_height,
|
||||
desired_method,
|
||||
desired_type,
|
||||
thumbnail_infos,
|
||||
file_id,
|
||||
url_cache,
|
||||
server_name,
|
||||
)
|
||||
if not file_info:
|
||||
logger.info("Couldn't find a thumbnail matching the desired inputs")
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
# The thumbnail property must exist.
|
||||
assert file_info.thumbnail is not None
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
if responder:
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
return
|
||||
|
||||
# If we can't find the thumbnail we regenerate it. This can happen
|
||||
# if e.g. we've deleted the thumbnails but still have the original
|
||||
# image somewhere.
|
||||
#
|
||||
# Since we have an entry for the thumbnail in the DB we a) know we
|
||||
# have have successfully generated the thumbnail in the past (so we
|
||||
# don't need to worry about repeatedly failing to generate
|
||||
# thumbnails), and b) have already calculated that appropriate
|
||||
# width/height/method so we can just call the "generate exact"
|
||||
# methods.
|
||||
|
||||
# First let's check that we do actually have the original image
|
||||
# still. This will throw a 404 if we don't.
|
||||
# TODO: We should refetch the thumbnails for remote media.
|
||||
await self.media_storage.ensure_media_is_in_local_cache(
|
||||
FileInfo(server_name, file_id, url_cache=url_cache)
|
||||
)
|
||||
|
||||
if server_name:
|
||||
await self.media_repo.generate_remote_exact_thumbnail(
|
||||
server_name,
|
||||
file_id=file_id,
|
||||
media_id=media_id,
|
||||
t_width=file_info.thumbnail.width,
|
||||
t_height=file_info.thumbnail.height,
|
||||
t_method=file_info.thumbnail.method,
|
||||
t_type=file_info.thumbnail.type,
|
||||
)
|
||||
else:
|
||||
await self.media_repo.generate_local_exact_thumbnail(
|
||||
media_id=media_id,
|
||||
t_width=file_info.thumbnail.width,
|
||||
t_height=file_info.thumbnail.height,
|
||||
t_method=file_info.thumbnail.method,
|
||||
t_type=file_info.thumbnail.type,
|
||||
url_cache=url_cache,
|
||||
)
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
else:
|
||||
# This might be because:
|
||||
# 1. We can't create thumbnails for the given media (corrupted or
|
||||
# unsupported file type), or
|
||||
# 2. The thumbnailing process never ran or errored out initially
|
||||
# when the media was first uploaded (these bugs should be
|
||||
# reported and fixed).
|
||||
# Note that we don't attempt to generate a thumbnail now because
|
||||
# `dynamic_thumbnails` is disabled.
|
||||
logger.info("Failed to find any generated thumbnails")
|
||||
|
||||
assert request.path is not None
|
||||
respond_with_json(
|
||||
request,
|
||||
400,
|
||||
cs_error(
|
||||
"Cannot find any thumbnails for the requested media ('%s'). This might mean the media is not a supported_media_format=(%s) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)"
|
||||
% (
|
||||
request.path.decode(),
|
||||
", ".join(THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.keys()),
|
||||
),
|
||||
code=Codes.UNKNOWN,
|
||||
),
|
||||
send_cors=True,
|
||||
)
|
||||
|
||||
def _select_thumbnail(
|
||||
self,
|
||||
desired_width: int,
|
||||
desired_height: int,
|
||||
desired_method: str,
|
||||
desired_type: str,
|
||||
thumbnail_infos: List[ThumbnailInfo],
|
||||
file_id: str,
|
||||
url_cache: bool,
|
||||
server_name: Optional[str],
|
||||
) -> Optional[FileInfo]:
|
||||
"""
|
||||
Choose an appropriate thumbnail from the previously generated thumbnails.
|
||||
|
||||
Args:
|
||||
desired_width: The desired width, the returned thumbnail may be larger than this.
|
||||
desired_height: The desired height, the returned thumbnail may be larger than this.
|
||||
desired_method: The desired method used to generate the thumbnail.
|
||||
desired_type: The desired content-type of the thumbnail.
|
||||
thumbnail_infos: A list of thumbnail infos of candidate thumbnails.
|
||||
file_id: The ID of the media that a thumbnail is being requested for.
|
||||
url_cache: True if this is from a URL cache.
|
||||
server_name: The server name, if this is a remote thumbnail.
|
||||
|
||||
Returns:
|
||||
The thumbnail which best matches the desired parameters.
|
||||
"""
|
||||
desired_method = desired_method.lower()
|
||||
|
||||
# The chosen thumbnail.
|
||||
thumbnail_info = None
|
||||
|
||||
d_w = desired_width
|
||||
d_h = desired_height
|
||||
|
||||
if desired_method == "crop":
|
||||
# Thumbnails that match equal or larger sizes of desired width/height.
|
||||
crop_info_list: List[
|
||||
Tuple[int, int, int, bool, Optional[int], ThumbnailInfo]
|
||||
] = []
|
||||
# Other thumbnails.
|
||||
crop_info_list2: List[
|
||||
Tuple[int, int, int, bool, Optional[int], ThumbnailInfo]
|
||||
] = []
|
||||
for info in thumbnail_infos:
|
||||
# Skip thumbnails generated with different methods.
|
||||
if info.method != "crop":
|
||||
continue
|
||||
|
||||
t_w = info.width
|
||||
t_h = info.height
|
||||
aspect_quality = abs(d_w * t_h - d_h * t_w)
|
||||
min_quality = 0 if d_w <= t_w and d_h <= t_h else 1
|
||||
size_quality = abs((d_w - t_w) * (d_h - t_h))
|
||||
type_quality = desired_type != info.type
|
||||
length_quality = info.length
|
||||
if t_w >= d_w or t_h >= d_h:
|
||||
crop_info_list.append(
|
||||
(
|
||||
aspect_quality,
|
||||
min_quality,
|
||||
size_quality,
|
||||
type_quality,
|
||||
length_quality,
|
||||
info,
|
||||
)
|
||||
)
|
||||
else:
|
||||
crop_info_list2.append(
|
||||
(
|
||||
aspect_quality,
|
||||
min_quality,
|
||||
size_quality,
|
||||
type_quality,
|
||||
length_quality,
|
||||
info,
|
||||
)
|
||||
)
|
||||
# Pick the most appropriate thumbnail. Some values of `desired_width` and
|
||||
# `desired_height` may result in a tie, in which case we avoid comparing on
|
||||
# the thumbnail info and pick the thumbnail that appears earlier
|
||||
# in the list of candidates.
|
||||
if crop_info_list:
|
||||
thumbnail_info = min(crop_info_list, key=lambda t: t[:-1])[-1]
|
||||
elif crop_info_list2:
|
||||
thumbnail_info = min(crop_info_list2, key=lambda t: t[:-1])[-1]
|
||||
elif desired_method == "scale":
|
||||
# Thumbnails that match equal or larger sizes of desired width/height.
|
||||
info_list: List[Tuple[int, bool, int, ThumbnailInfo]] = []
|
||||
# Other thumbnails.
|
||||
info_list2: List[Tuple[int, bool, int, ThumbnailInfo]] = []
|
||||
|
||||
for info in thumbnail_infos:
|
||||
# Skip thumbnails generated with different methods.
|
||||
if info.method != "scale":
|
||||
continue
|
||||
|
||||
t_w = info.width
|
||||
t_h = info.height
|
||||
size_quality = abs((d_w - t_w) * (d_h - t_h))
|
||||
type_quality = desired_type != info.type
|
||||
length_quality = info.length
|
||||
if t_w >= d_w or t_h >= d_h:
|
||||
info_list.append((size_quality, type_quality, length_quality, info))
|
||||
else:
|
||||
info_list2.append(
|
||||
(size_quality, type_quality, length_quality, info)
|
||||
)
|
||||
# Pick the most appropriate thumbnail. Some values of `desired_width` and
|
||||
# `desired_height` may result in a tie, in which case we avoid comparing on
|
||||
# the thumbnail info and pick the thumbnail that appears earlier
|
||||
# in the list of candidates.
|
||||
if info_list:
|
||||
thumbnail_info = min(info_list, key=lambda t: t[:-1])[-1]
|
||||
elif info_list2:
|
||||
thumbnail_info = min(info_list2, key=lambda t: t[:-1])[-1]
|
||||
|
||||
if thumbnail_info:
|
||||
return FileInfo(
|
||||
file_id=file_id,
|
||||
url_cache=url_cache,
|
||||
server_name=server_name,
|
||||
thumbnail=thumbnail_info,
|
||||
)
|
||||
|
||||
# No matching thumbnail was found.
|
||||
return None
|
||||
|
||||
@@ -592,7 +592,7 @@ class UrlPreviewer:
|
||||
|
||||
file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
|
||||
|
||||
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
|
||||
async with self.media_storage.store_into_file(file_info) as (f, fname):
|
||||
if url.startswith("data:"):
|
||||
if not allow_data_urls:
|
||||
raise SynapseError(
|
||||
@@ -603,8 +603,6 @@ class UrlPreviewer:
|
||||
else:
|
||||
download_result = await self._download_url(url, f)
|
||||
|
||||
await finish()
|
||||
|
||||
try:
|
||||
time_now_ms = self.clock.time_msec()
|
||||
|
||||
|
||||
@@ -721,6 +721,7 @@ class Notifier:
|
||||
user.to_string(),
|
||||
new_events,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
elif keyname == StreamKeyType.PRESENCE:
|
||||
now = self.clock.time_msec()
|
||||
@@ -762,6 +763,29 @@ class Notifier:
|
||||
|
||||
return result
|
||||
|
||||
async def wait_for_stream_token(self, stream_token: StreamToken) -> bool:
|
||||
"""Wait for this worker to catch up with the given stream token."""
|
||||
|
||||
start = self.clock.time_msec()
|
||||
while True:
|
||||
current_token = self.event_sources.get_current_token()
|
||||
if stream_token.is_before_or_eq(current_token):
|
||||
return True
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
||||
if now - start > 10_000:
|
||||
return False
|
||||
|
||||
logger.info(
|
||||
"Waiting for current token to reach %s; currently at %s",
|
||||
stream_token,
|
||||
current_token,
|
||||
)
|
||||
|
||||
# TODO: be better
|
||||
await self.clock.sleep(0.5)
|
||||
|
||||
async def _get_room_ids(
|
||||
self, user: UserID, explicit_room_id: Optional[str]
|
||||
) -> Tuple[StrCollection, bool]:
|
||||
|
||||
@@ -529,7 +529,10 @@ class Mailer:
|
||||
}
|
||||
|
||||
the_events = await filter_events_for_client(
|
||||
self._storage_controllers, user_id, results.events_before
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
results.events_before,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
the_events.append(notif_event)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user