mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
88 Commits
v1.111.1
...
erikj/ss_i
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa8cda9574 | ||
|
|
2226ef0790 | ||
|
|
50f47345c1 | ||
|
|
51c282c813 | ||
|
|
30e9f6e469 | ||
|
|
eb62d12063 | ||
|
|
ceb3686dcd | ||
|
|
1dfa59b238 | ||
|
|
bef6568537 | ||
|
|
244a255065 | ||
|
|
932cb0a928 | ||
|
|
2dad718265 | ||
|
|
5d8446298c | ||
|
|
d845e939a9 | ||
|
|
23727869c7 | ||
|
|
c270355349 | ||
|
|
e3db7b2d81 | ||
|
|
2b620e0a15 | ||
|
|
39731bb205 | ||
|
|
1d6186265a | ||
|
|
46de0ee16b | ||
|
|
b221f0b84b | ||
|
|
b2c55bd049 | ||
|
|
ed583d9c81 | ||
|
|
f76dc9923c | ||
|
|
7e997fb8b1 | ||
|
|
dbc2290cbe | ||
|
|
2f6b86e79a | ||
|
|
37f9876ccf | ||
|
|
8b449a8ce6 | ||
|
|
53db8a914e | ||
|
|
be726724a8 | ||
|
|
62ae56a4ac | ||
|
|
808dab0699 | ||
|
|
34306be5aa | ||
|
|
be4a16ff44 | ||
|
|
568051c0f0 | ||
|
|
ebbabfe782 | ||
|
|
69ac4b6a6e | ||
|
|
729026e604 | ||
|
|
bdf37ad4c4 | ||
|
|
8bbc98e66d | ||
|
|
4b9f4c2abf | ||
|
|
e8ee784c75 | ||
|
|
48c1307911 | ||
|
|
d225b6b3eb | ||
|
|
1daae43f3a | ||
|
|
a9ee832e48 | ||
|
|
13a99fba1b | ||
|
|
e3a0681ecf | ||
|
|
de05a64246 | ||
|
|
d221512498 | ||
|
|
ed0face8ad | ||
|
|
73529d3732 | ||
|
|
1648337775 | ||
|
|
dc8ddc6472 | ||
|
|
d3f9afd8d9 | ||
|
|
43c865f7c9 | ||
|
|
71d83477cb | ||
|
|
6a01af59e1 | ||
|
|
f583f1dce4 | ||
|
|
a574de0062 | ||
|
|
3fee32ed6b | ||
|
|
5884f0a956 | ||
|
|
79924aebef | ||
|
|
83894180b2 | ||
|
|
429ecb7564 | ||
|
|
9e1acea051 | ||
|
|
899d33f2ba | ||
|
|
df11af14db | ||
|
|
d88ba45db9 | ||
|
|
14f2b1eb00 | ||
|
|
2af729a193 | ||
|
|
0de0689ae8 | ||
|
|
4c44020838 | ||
|
|
4f6194492a | ||
|
|
ab62aa09da | ||
|
|
fb66e938b2 | ||
|
|
5a97bbd895 | ||
|
|
606da398fc | ||
|
|
677142b6a9 | ||
|
|
342f0c35b7 | ||
|
|
5871daf877 | ||
|
|
30e14c8510 | ||
|
|
3232bc2982 | ||
|
|
4ca13ce0dd | ||
|
|
8e229535fa | ||
|
|
1cf3ff6b40 |
4
.github/workflows/tests.yml
vendored
4
.github/workflows/tests.yml
vendored
@@ -305,7 +305,7 @@ jobs:
|
||||
- lint-readme
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
- uses: matrix-org/done-action@v3
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
@@ -737,7 +737,7 @@ jobs:
|
||||
- linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
- uses: matrix-org/done-action@v3
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
|
||||
128
CHANGES.md
128
CHANGES.md
@@ -1,3 +1,131 @@
|
||||
# Synapse 1.113.0rc1 (2024-08-06)
|
||||
|
||||
### Features
|
||||
|
||||
- Track which rooms have been sent to clients in the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17447](https://github.com/element-hq/synapse/issues/17447))
|
||||
- Add Account Data extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17477](https://github.com/element-hq/synapse/issues/17477))
|
||||
- Add receipts extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17489](https://github.com/element-hq/synapse/issues/17489))
|
||||
- Add typing notification extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17505](https://github.com/element-hq/synapse/issues/17505))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to handle invite/knock rooms when filtering. ([\#17450](https://github.com/element-hq/synapse/issues/17450))
|
||||
- Fix a bug introduced in v1.110.0 which caused `/keys/query` to return incomplete results, leading to high network activity and CPU usage on Matrix clients. ([\#17499](https://github.com/element-hq/synapse/issues/17499))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update the [`allowed_local_3pids`](https://element-hq.github.io/synapse/v1.112/usage/configuration/config_documentation.html#allowed_local_3pids) config option's msisdn address to a working example. ([\#17476](https://github.com/element-hq/synapse/issues/17476))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Change sliding sync to use their own token format in preparation for storing per-connection state. ([\#17452](https://github.com/element-hq/synapse/issues/17452))
|
||||
- Ensure we don't send down negative `bump_stamp` in experimental sliding sync endpoint. ([\#17478](https://github.com/element-hq/synapse/issues/17478))
|
||||
- Do not send down empty room entries down experimental sliding sync endpoint. ([\#17479](https://github.com/element-hq/synapse/issues/17479))
|
||||
- Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. ([\#17481](https://github.com/element-hq/synapse/issues/17481), [\#17482](https://github.com/element-hq/synapse/issues/17482))
|
||||
- Add some opentracing tags and logging to the experimental sliding sync implementation. ([\#17501](https://github.com/element-hq/synapse/issues/17501))
|
||||
- Split and move Sliding Sync tests so we have some more sane test file sizes. ([\#17504](https://github.com/element-hq/synapse/issues/17504))
|
||||
- Update the `limited` field description in the Sliding Sync response to accurately describe what it actually represents. ([\#17507](https://github.com/element-hq/synapse/issues/17507))
|
||||
- Easier to understand `timeline` assertions in Sliding Sync tests. ([\#17511](https://github.com/element-hq/synapse/issues/17511))
|
||||
- Reset the sliding sync connection if we don't recognize the per-connection state position. ([\#17529](https://github.com/element-hq/synapse/issues/17529))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump bcrypt from 4.1.3 to 4.2.0. ([\#17495](https://github.com/element-hq/synapse/issues/17495))
|
||||
* Bump black from 24.4.2 to 24.8.0. ([\#17522](https://github.com/element-hq/synapse/issues/17522))
|
||||
* Bump phonenumbers from 8.13.39 to 8.13.42. ([\#17521](https://github.com/element-hq/synapse/issues/17521))
|
||||
* Bump ruff from 0.5.4 to 0.5.5. ([\#17494](https://github.com/element-hq/synapse/issues/17494))
|
||||
* Bump serde_json from 1.0.120 to 1.0.121. ([\#17493](https://github.com/element-hq/synapse/issues/17493))
|
||||
* Bump serde_json from 1.0.121 to 1.0.122. ([\#17525](https://github.com/element-hq/synapse/issues/17525))
|
||||
* Bump towncrier from 23.11.0 to 24.7.1. ([\#17523](https://github.com/element-hq/synapse/issues/17523))
|
||||
* Bump types-pyopenssl from 24.1.0.20240425 to 24.1.0.20240722. ([\#17496](https://github.com/element-hq/synapse/issues/17496))
|
||||
* Bump types-setuptools from 70.1.0.20240627 to 71.1.0.20240726. ([\#17497](https://github.com/element-hq/synapse/issues/17497))
|
||||
|
||||
# Synapse 1.112.0 (2024-07-30)
|
||||
|
||||
This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
|
||||
|
||||
Note that this security fix is also available as **Synapse 1.111.1**, which does not include the rest of the changes in Synapse 1.112.0.
|
||||
|
||||
This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request.
|
||||
If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality.
|
||||
|
||||
With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**.
|
||||
The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration.
|
||||
|
||||
Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today.
|
||||
|
||||
**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time.
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502))
|
||||
|
||||
|
||||
# Synapse 1.112.0rc1 (2024-07-23)
|
||||
|
||||
Please note that this release candidate does not include the security dependency update
|
||||
included in version 1.111.1 as this version was released before 1.111.1.
|
||||
The same security fix can be found in the full release of 1.112.0.
|
||||
|
||||
### Features
|
||||
|
||||
- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416))
|
||||
- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418))
|
||||
- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419))
|
||||
- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429))
|
||||
- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432))
|
||||
- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433))
|
||||
- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231))
|
||||
- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434))
|
||||
- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435))
|
||||
- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387))
|
||||
- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423))
|
||||
- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424))
|
||||
- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426))
|
||||
- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428))
|
||||
- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439))
|
||||
- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449))
|
||||
- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453))
|
||||
- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458))
|
||||
- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460))
|
||||
- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461))
|
||||
- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468))
|
||||
- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441))
|
||||
* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464))
|
||||
* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444))
|
||||
* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440))
|
||||
* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445))
|
||||
* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465))
|
||||
* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466))
|
||||
* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456))
|
||||
* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467))
|
||||
* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448))
|
||||
* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443))
|
||||
* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446))
|
||||
* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442))
|
||||
* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427))
|
||||
|
||||
|
||||
# Synapse 1.111.1 (2024-07-30)
|
||||
|
||||
This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
|
||||
|
||||
17
Cargo.lock
generated
17
Cargo.lock
generated
@@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.6.0"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
|
||||
checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
@@ -444,9 +444,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.10.5"
|
||||
version = "1.10.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
|
||||
checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -505,11 +505,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.120"
|
||||
version = "1.0.122"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5"
|
||||
checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
@@ -597,9 +598,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
|
||||
|
||||
[[package]]
|
||||
name = "ulid"
|
||||
version = "1.1.2"
|
||||
version = "1.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34778c17965aa2a08913b57e1f34db9b4a63f5de31768b55bf20d2795f921259"
|
||||
checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"rand",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7
|
||||
.. image:: ./docs/element_logo_white_bg.svg
|
||||
:height: 60px
|
||||
|
||||
**Element Synapse - Matrix homeserver implementation**
|
||||
|
||||
1
changelog.d/17514.misc
Normal file
1
changelog.d/17514.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add more tracing to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
3
changelog.d/17515.doc
Normal file
3
changelog.d/17515.doc
Normal file
@@ -0,0 +1,3 @@
|
||||
Clarify default behaviour of the
|
||||
[`auto_accept_invites.worker_to_run_on`](https://element-hq.github.io/synapse/develop/usage/configuration/config_documentation.html#auto-accept-invites)
|
||||
option.
|
||||
1
changelog.d/17531.misc
Normal file
1
changelog.d/17531.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fixup comment in sliding sync implementation.
|
||||
1
changelog.d/17535.bugfix
Normal file
1
changelog.d/17535.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix experimental sliding sync implementation to remember any updates in rooms that were not sent down immediately.
|
||||
18
debian/changelog
vendored
18
debian/changelog
vendored
@@ -1,3 +1,21 @@
|
||||
matrix-synapse-py3 (1.113.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.113.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Aug 2024 12:23:23 +0100
|
||||
|
||||
matrix-synapse-py3 (1.112.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.112.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Jul 2024 17:15:48 +0100
|
||||
|
||||
matrix-synapse-py3 (1.112.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.112.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 23 Jul 2024 08:58:55 -0600
|
||||
|
||||
matrix-synapse-py3 (1.111.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.111.1.
|
||||
|
||||
2
debian/templates
vendored
2
debian/templates
vendored
@@ -5,7 +5,7 @@ _Description: Name of the server:
|
||||
servers via federation. This is normally the public hostname of the
|
||||
server running synapse, but can be different if you set up delegation.
|
||||
Please refer to the delegation documentation in this case:
|
||||
https://github.com/element-hq/synapse/blob/master/docs/delegate.md.
|
||||
https://element-hq.github.io/synapse/latest/delegate.html.
|
||||
|
||||
Template: matrix-synapse/report-stats
|
||||
Type: boolean
|
||||
|
||||
@@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11
|
||||
###
|
||||
# We hardcode the use of Debian bookworm here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bookworm.
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS requirements
|
||||
|
||||
# RUN --mount is specific to buildkit and is documented at
|
||||
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
|
||||
@@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
###
|
||||
### Stage 1: builder
|
||||
###
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS builder
|
||||
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
|
||||
@@ -24,7 +24,7 @@ ARG distro=""
|
||||
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
|
||||
# it's not obviously easier to use that than to build our own.)
|
||||
|
||||
FROM docker.io/library/${distro} as builder
|
||||
FROM docker.io/library/${distro} AS builder
|
||||
|
||||
RUN apt-get update -qq -o Acquire::Languages=none
|
||||
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
|
||||
94
docs/element_logo_white_bg.svg
Normal file
94
docs/element_logo_white_bg.svg
Normal file
@@ -0,0 +1,94 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="41.440346mm"
|
||||
height="10.383124mm"
|
||||
viewBox="0 0 41.440346 10.383125"
|
||||
version="1.1"
|
||||
id="svg1"
|
||||
xml:space="preserve"
|
||||
sodipodi:docname="element_logo_white_bg.svg"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
|
||||
id="namedview1"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#000000"
|
||||
borderopacity="0.25"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="mm"
|
||||
showgrid="false"
|
||||
inkscape:export-bgcolor="#ffffffff" /><defs
|
||||
id="defs1" /><g
|
||||
id="layer1"
|
||||
transform="translate(-84.803844,-143.2075)"
|
||||
inkscape:export-filename="element_logo_white_bg.svg"
|
||||
inkscape:export-xdpi="96"
|
||||
inkscape:export-ydpi="96"><g
|
||||
style="fill:none"
|
||||
id="g1"
|
||||
transform="matrix(0.26458333,0,0,0.26458333,85.841658,144.26667)"><rect
|
||||
style="display:inline;fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:1.31041;stroke-dasharray:none;stroke-opacity:1"
|
||||
id="rect20"
|
||||
width="155.31451"
|
||||
height="37.932892"
|
||||
x="-3.2672384"
|
||||
y="-3.3479743"
|
||||
rx="3.3718522"
|
||||
ry="3.7915266"
|
||||
transform="translate(-2.1259843e-6)"
|
||||
inkscape:label="rect20"
|
||||
inkscape:export-filename="rect20.svg"
|
||||
inkscape:export-xdpi="96"
|
||||
inkscape:export-ydpi="96" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="M 16,32 C 24.8366,32 32,24.8366 32,16 32,7.16344 24.8366,0 16,0 7.16344,0 0,7.16344 0,16 0,24.8366 7.16344,32 16,32 Z"
|
||||
fill="#0dbd8b"
|
||||
id="path1" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 13.0756,7.455 c 0,-0.64584 0.5247,-1.1694 1.1719,-1.1694 4.3864,0 7.9423,3.54853 7.9423,7.9259 0,0.6458 -0.5246,1.1694 -1.1718,1.1694 -0.6472,0 -1.1719,-0.5236 -1.1719,-1.1694 0,-3.0857 -2.5066,-5.58711 -5.5986,-5.58711 -0.6472,0 -1.1719,-0.52355 -1.1719,-1.16939 z"
|
||||
fill="#ffffff"
|
||||
id="path2" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 24.5424,13.042 c 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,4.3773 -3.5559,7.9258 -7.9424,7.9258 -0.6472,0 -1.1718,-0.5235 -1.1718,-1.1693 0,-0.6459 0.5246,-1.1694 1.1718,-1.1694 3.0921,0 5.5987,-2.5015 5.5987,-5.5871 0,-0.6459 0.5247,-1.1694 1.1718,-1.1694 z"
|
||||
fill="#ffffff"
|
||||
id="path3" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 18.9446,24.5446 c 0,0.6459 -0.5247,1.1694 -1.1718,1.1694 -4.3865,0 -7.94239,-3.5485 -7.94239,-7.9258 0,-0.6459 0.52469,-1.1694 1.17179,-1.1694 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,3.0856 2.5066,5.587 5.5987,5.587 0.6471,0 1.1718,0.5236 1.1718,1.1694 z"
|
||||
fill="#ffffff"
|
||||
id="path4" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 7.45823,18.9576 c -0.64718,0 -1.17183,-0.5235 -1.17183,-1.1694 0,-4.3773 3.55591,-7.92581 7.9423,-7.92581 0.6472,0 1.1719,0.52351 1.1719,1.16941 0,0.6458 -0.5247,1.1694 -1.1719,1.1694 -3.092,0 -5.59864,2.5014 -5.59864,5.587 0,0.6459 -0.52465,1.1694 -1.17183,1.1694 z"
|
||||
fill="#ffffff"
|
||||
id="path5" /><path
|
||||
d="M 56.2856,18.1428 H 44.9998 c 0.1334,1.181 0.5619,2.1238 1.2858,2.8286 0.7238,0.6857 1.6761,1.0286 2.8571,1.0286 0.7809,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3428,-1.5428 h 3.4286 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1714,0 3.9238,0.73333 5.2572,2.20003 1.3523,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0667,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6858,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
|
||||
fill="#000000"
|
||||
id="path6" /><path
|
||||
d="M 58.6539,20.1428 V 3.14282 h 3.4 V 20.2 c 0,0.7619 0.419,1.1428 1.2571,1.1428 l 0.6,-0.0285 v 3.2285 c -0.3238,0.0572 -0.6667,0.0857 -1.0286,0.0857 -1.4666,0 -2.5428,-0.3714 -3.2285,-1.1142 -0.6667,-0.7429 -1,-1.8667 -1,-3.3715 z"
|
||||
fill="#000000"
|
||||
id="path7" /><path
|
||||
d="M 79.7454,18.1428 H 68.4597 c 0.1333,1.181 0.5619,2.1238 1.2857,2.8286 0.7238,0.6857 1.6762,1.0286 2.8571,1.0286 0.781,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3429,-1.5428 h 3.4285 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1715,0 3.9238,0.73333 5.2572,2.20003 1.3524,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0666,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6857,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
|
||||
fill="#000000"
|
||||
id="path8" /><path
|
||||
d="m 95.0851,16.0571 v 8.5143 h -3.4 v -8.8857 c 0,-2.2476 -0.9333,-3.3714 -2.8,-3.3714 -1.0095,0 -1.819,0.3238 -2.4286,0.9714 -0.5904,0.6476 -0.8857,1.5333 -0.8857,2.6571 v 8.6286 h -3.4 V 9.74282 h 3.1429 v 1.97148 c 0.3619,-0.6667 0.9143,-1.2191 1.6571,-1.6572 0.7429,-0.43809 1.6667,-0.65713 2.7714,-0.65713 2.0572,0 3.5429,0.78093 4.4572,2.34283 1.2571,-1.5619 2.9333,-2.34283 5.0286,-2.34283 1.733,0 3.067,0.54285 4,1.62853 0.933,1.0667 1.4,2.4762 1.4,4.2286 v 9.3143 h -3.4 v -8.8857 c 0,-2.2476 -0.933,-3.3714 -2.8,-3.3714 -1.0286,0 -1.8477,0.3333 -2.4572,1 -0.5905,0.6476 -0.8857,1.5619 -0.8857,2.7428 z"
|
||||
fill="#000000"
|
||||
id="path9" /><path
|
||||
d="m 121.537,18.1428 h -11.286 c 0.133,1.181 0.562,2.1238 1.286,2.8286 0.723,0.6857 1.676,1.0286 2.857,1.0286 0.781,0 1.486,-0.1905 2.114,-0.5715 0.629,-0.3809 1.076,-0.8952 1.343,-1.5428 h 3.429 c -0.458,1.5047 -1.315,2.7238 -2.572,3.6571 -1.238,0.9143 -2.705,1.3715 -4.4,1.3715 -2.209,0 -4,-0.7334 -5.371,-2.2 -1.353,-1.4667 -2.029,-3.3239 -2.029,-5.5715 0,-2.1905 0.686,-4.0285 2.057,-5.5143 1.372,-1.4857 3.143,-2.22853 5.315,-2.22853 2.171,0 3.923,0.73333 5.257,2.20003 1.352,1.4476 2.028,3.2762 2.028,5.4857 z m -7.257,-5.9714 c -1.067,0 -1.953,0.3143 -2.658,0.9429 -0.704,0.6285 -1.142,1.4666 -1.314,2.5142 h 7.886 c -0.153,-1.0476 -0.572,-1.8857 -1.257,-2.5142 -0.686,-0.6286 -1.572,-0.9429 -2.657,-0.9429 z"
|
||||
fill="#000000"
|
||||
id="path10" /><path
|
||||
d="m 127.105,9.74282 v 1.97148 c 0.343,-0.6477 0.905,-1.1905 1.686,-1.6286 0.8,-0.45716 1.762,-0.68573 2.885,-0.68573 1.753,0 3.105,0.53333 4.058,1.60003 0.971,1.0666 1.457,2.4857 1.457,4.2571 v 9.3143 h -3.4 v -8.8857 c 0,-1.0476 -0.248,-1.8667 -0.743,-2.4572 -0.476,-0.6095 -1.21,-0.9142 -2.2,-0.9142 -1.086,0 -1.943,0.3238 -2.572,0.9714 -0.609,0.6476 -0.914,1.5428 -0.914,2.6857 v 8.6 h -3.4 V 9.74282 Z"
|
||||
fill="#000000"
|
||||
id="path11" /><path
|
||||
d="m 147.12,21.5428 v 2.9429 c -0.419,0.1143 -1.009,0.1714 -1.771,0.1714 -2.895,0 -4.343,-1.4571 -4.343,-4.3714 v -7.8286 h -2.257 V 9.74282 h 2.257 V 5.88568 h 3.4 v 3.85714 h 2.772 v 2.71428 h -2.772 v 7.4857 c 0,1.1619 0.552,1.7429 1.657,1.7429 z"
|
||||
fill="#000000"
|
||||
id="path12" /></g></g></svg>
|
||||
|
After Width: | Height: | Size: 7.5 KiB |
@@ -67,7 +67,7 @@ in Synapse can be deactivated.
|
||||
**NOTE**: This has an impact on security and is for testing purposes only!
|
||||
|
||||
To deactivate the certificate validation, the following setting must be added to
|
||||
your [homserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
your [homeserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
|
||||
```yaml
|
||||
use_insecure_ssl_client_just_for_testing_do_not_use: true
|
||||
|
||||
@@ -309,7 +309,62 @@ sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel \
|
||||
libicu-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
sudo dnf group install "Development Tools"
|
||||
```
|
||||
|
||||
##### Red Hat Enterprise Linux / Rocky Linux
|
||||
|
||||
*Note: The term "RHEL" below refers to both Red Hat Enterprise Linux and Rocky Linux. The distributions are 1:1 binary compatible.*
|
||||
|
||||
It's recommended to use the latest Python versions.
|
||||
|
||||
RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ship with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them.
|
||||
|
||||
Python 3.11 and 3.12 are available for both RHEL 8 and 9.
|
||||
|
||||
These commands should be run as root user.
|
||||
|
||||
RHEL 8
|
||||
```bash
|
||||
# Enable PowerTools repository
|
||||
dnf config-manager --set-enabled powertools
|
||||
```
|
||||
RHEL 9
|
||||
```bash
|
||||
# Enable CodeReady Linux Builder repository
|
||||
crb enable
|
||||
```
|
||||
|
||||
Install new version of Python. You only need one of these:
|
||||
```bash
|
||||
# Python 3.11
|
||||
dnf install python3.11 python3.11-devel
|
||||
```
|
||||
```bash
|
||||
# Python 3.12
|
||||
dnf install python3.12 python3.12-devel
|
||||
```
|
||||
Finally, install common prerequisites
|
||||
```bash
|
||||
dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf
|
||||
dnf group install "Development Tools"
|
||||
```
|
||||
###### Using venv module instead of virtualenv command
|
||||
|
||||
It's recommended to use Python venv module directly rather than the virtualenv command.
|
||||
* On RHEL 9, virtualenv is only available on [EPEL](https://docs.fedoraproject.org/en-US/epel/).
|
||||
* On RHEL 8, virtualenv is based on Python 3.6. It does not support creating 3.11/3.12 virtual environments.
|
||||
|
||||
Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI.
|
||||
|
||||
```bash
|
||||
mkdir -p ~/synapse
|
||||
# To use Python 3.11, simply use the command "python3.11" instead.
|
||||
python3.12 -m venv ~/synapse/env
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade setuptools
|
||||
pip install matrix-synapse
|
||||
```
|
||||
|
||||
##### macOS
|
||||
|
||||
@@ -246,6 +246,7 @@ Example configuration:
|
||||
```yaml
|
||||
presence:
|
||||
enabled: false
|
||||
include_offline_users_on_sync: false
|
||||
```
|
||||
|
||||
`enabled` can also be set to a special value of "untracked" which ignores updates
|
||||
@@ -254,6 +255,10 @@ received via clients and federation, while still accepting updates from the
|
||||
|
||||
*The "untracked" option was added in Synapse 1.96.0.*
|
||||
|
||||
When clients perform an initial or `full_state` sync, presence results for offline users are
|
||||
not included by default. Setting `include_offline_users_on_sync` to `true` will always include
|
||||
offline users in the results. Defaults to false.
|
||||
|
||||
---
|
||||
### `require_auth_for_profile_requests`
|
||||
|
||||
@@ -1863,6 +1868,18 @@ federation_rr_transactions_per_room_per_second: 40
|
||||
## Media Store
|
||||
Config options related to Synapse's media store.
|
||||
|
||||
---
|
||||
### `enable_authenticated_media`
|
||||
|
||||
When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy
|
||||
unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false
|
||||
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
|
||||
this will change to true in a future Synapse release.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_authenticated_media: true
|
||||
```
|
||||
---
|
||||
### `enable_media_repo`
|
||||
|
||||
@@ -2369,7 +2386,7 @@ enable_registration_without_verification: true
|
||||
---
|
||||
### `registrations_require_3pid`
|
||||
|
||||
If this is set, users must provide all of the specified types of 3PID when registering an account.
|
||||
If this is set, users must provide all of the specified types of [3PID](https://spec.matrix.org/latest/appendices/#3pid-types) when registering an account.
|
||||
|
||||
Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
|
||||
|
||||
@@ -2394,6 +2411,9 @@ disable_msisdn_registration: true
|
||||
|
||||
Mandate that users are only allowed to associate certain formats of
|
||||
3PIDs with accounts on this server, as specified by the `medium` and `pattern` sub-options.
|
||||
`pattern` is a [Perl-like regular expression](https://docs.python.org/3/library/re.html#module-re).
|
||||
|
||||
More information about 3PIDs, allowed `medium` types and their `address` syntax can be found [in the Matrix spec](https://spec.matrix.org/latest/appendices/#3pid-types).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -2403,7 +2423,7 @@ allowed_local_3pids:
|
||||
- medium: email
|
||||
pattern: '^[^@]+@vector\.im$'
|
||||
- medium: msisdn
|
||||
pattern: '\+44'
|
||||
pattern: '^44\d{10}$'
|
||||
```
|
||||
---
|
||||
### `enable_3pid_lookup`
|
||||
@@ -4134,6 +4154,38 @@ default_power_level_content_override:
|
||||
trusted_private_chat: null
|
||||
public_chat: null
|
||||
```
|
||||
|
||||
The default power levels for each preset are:
|
||||
```yaml
|
||||
"m.room.name": 50
|
||||
"m.room.power_levels": 100
|
||||
"m.room.history_visibility": 100
|
||||
"m.room.canonical_alias": 50
|
||||
"m.room.avatar": 50
|
||||
"m.room.tombstone": 100
|
||||
"m.room.server_acl": 100
|
||||
"m.room.encryption": 100
|
||||
```
|
||||
|
||||
So a complete example where the default power-levels for a preset are maintained
|
||||
but the power level for a new key is set is:
|
||||
```yaml
|
||||
default_power_level_content_override:
|
||||
private_chat:
|
||||
events:
|
||||
"com.example.foo": 0
|
||||
"m.room.name": 50
|
||||
"m.room.power_levels": 100
|
||||
"m.room.history_visibility": 100
|
||||
"m.room.canonical_alias": 50
|
||||
"m.room.avatar": 50
|
||||
"m.room.tombstone": 100
|
||||
"m.room.server_acl": 100
|
||||
"m.room.encryption": 100
|
||||
trusted_private_chat: null
|
||||
public_chat: null
|
||||
```
|
||||
|
||||
---
|
||||
### `forget_rooms_on_leave`
|
||||
|
||||
@@ -4633,7 +4685,9 @@ This setting has the following sub-options:
|
||||
* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
|
||||
for direct messages. Defaults to false.
|
||||
* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
|
||||
* `worker_to_run_on`: Which worker to run this module on. This must match the "worker_name".
|
||||
* `worker_to_run_on`: Which worker to run this module on. This must match
|
||||
the "worker_name". If not set or `null`, invites will be accepted on the
|
||||
main process.
|
||||
|
||||
NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed.
|
||||
The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join
|
||||
|
||||
487
poetry.lock
generated
487
poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
@@ -67,38 +67,38 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "bcrypt"
|
||||
version = "4.1.3"
|
||||
version = "4.2.0"
|
||||
description = "Modern password hashing for your software and your servers"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"},
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"},
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a"},
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05"},
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3"},
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15"},
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d"},
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286"},
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64"},
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-win32.whl", hash = "sha256:037c5bf7c196a63dcce75545c8874610c600809d5d82c305dd327cd4969995bf"},
|
||||
{file = "bcrypt-4.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:8a893d192dfb7c8e883c4576813bf18bb9d59e2cfd88b68b725990f033f1b978"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-win32.whl", hash = "sha256:6004f5229b50f8493c49232b8e75726b568535fd300e5039e255d919fc3a07f2"},
|
||||
{file = "bcrypt-4.1.3-cp39-abi3-win_amd64.whl", hash = "sha256:2505b54afb074627111b5a8dc9b6ae69d0f01fea65c2fcaea403448c503d3991"},
|
||||
{file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:cb9c707c10bddaf9e5ba7cdb769f3e889e60b7d4fea22834b261f51ca2b89fed"},
|
||||
{file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9f8ea645eb94fb6e7bea0cf4ba121c07a3a182ac52876493870033141aa687bc"},
|
||||
{file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f44a97780677e7ac0ca393bd7982b19dbbd8d7228c1afe10b128fd9550eef5f1"},
|
||||
{file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d84702adb8f2798d813b17d8187d27076cca3cd52fe3686bb07a9083930ce650"},
|
||||
{file = "bcrypt-4.1.3.tar.gz", hash = "sha256:2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d84cf6d877918620b687b8fd1bf7781d11e8a0998f576c7aa939776b512b98d"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:1bb429fedbe0249465cdd85a58e8376f31bb315e484f16e68ca4c786dcc04291"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:655ea221910bcac76ea08aaa76df427ef8625f92e55a8ee44fbf7753dbabb328"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:1ee38e858bf5d0287c39b7a1fc59eec64bbf880c7d504d3a06a96c16e14058e7"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0da52759f7f30e83f1e30a888d9163a81353ef224d82dc58eb5bb52efcabc399"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3698393a1b1f1fd5714524193849d0c6d524d33523acca37cd28f02899285060"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:762a2c5fb35f89606a9fde5e51392dad0cd1ab7ae64149a8b935fe8d79dd5ed7"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-win32.whl", hash = "sha256:5a1e8aa9b28ae28020a3ac4b053117fb51c57a010b9f969603ed885f23841458"},
|
||||
{file = "bcrypt-4.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:8f6ede91359e5df88d1f5c1ef47428a4420136f3ce97763e31b86dd8280fbdf5"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:c52aac18ea1f4a4f65963ea4f9530c306b56ccd0c6f8c8da0c06976e34a6e841"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bbbfb2734f0e4f37c5136130405332640a1e46e6b23e000eeff2ba8d005da68"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3413bd60460f76097ee2e0a493ccebe4a7601918219c02f503984f0a7ee0aebe"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8d7bb9c42801035e61c109c345a28ed7e84426ae4865511eb82e913df18f58c2"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3d3a6d28cb2305b43feac298774b997e372e56c7c7afd90a12b3dc49b189151c"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9c1c4ad86351339c5f320ca372dfba6cb6beb25e8efc659bedd918d921956bae"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:27fe0f57bb5573104b5a6de5e4153c60814c711b29364c10a75a54bb6d7ff48d"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:8ac68872c82f1add6a20bd489870c71b00ebacd2e9134a8aa3f98a0052ab4b0e"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-win32.whl", hash = "sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34"},
|
||||
{file = "bcrypt-4.2.0-cp39-abi3-win_amd64.whl", hash = "sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9"},
|
||||
{file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a"},
|
||||
{file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db"},
|
||||
{file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:1ff39b78a52cf03fdf902635e4c81e544714861ba3f0efc56558979dd4f09170"},
|
||||
{file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184"},
|
||||
{file = "bcrypt-4.2.0.tar.gz", hash = "sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -107,33 +107,33 @@ typecheck = ["mypy"]
|
||||
|
||||
[[package]]
|
||||
name = "black"
|
||||
version = "24.4.2"
|
||||
version = "24.8.0"
|
||||
description = "The uncompromising code formatter."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"},
|
||||
{file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"},
|
||||
{file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"},
|
||||
{file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"},
|
||||
{file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"},
|
||||
{file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"},
|
||||
{file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"},
|
||||
{file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"},
|
||||
{file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"},
|
||||
{file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"},
|
||||
{file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"},
|
||||
{file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"},
|
||||
{file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"},
|
||||
{file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"},
|
||||
{file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"},
|
||||
{file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"},
|
||||
{file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"},
|
||||
{file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"},
|
||||
{file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"},
|
||||
{file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"},
|
||||
{file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"},
|
||||
{file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"},
|
||||
{file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"},
|
||||
{file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"},
|
||||
{file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"},
|
||||
{file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"},
|
||||
{file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"},
|
||||
{file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"},
|
||||
{file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"},
|
||||
{file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"},
|
||||
{file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"},
|
||||
{file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"},
|
||||
{file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"},
|
||||
{file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"},
|
||||
{file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"},
|
||||
{file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"},
|
||||
{file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"},
|
||||
{file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"},
|
||||
{file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"},
|
||||
{file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"},
|
||||
{file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"},
|
||||
{file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"},
|
||||
{file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"},
|
||||
{file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -542,120 +542,105 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit",
|
||||
|
||||
[[package]]
|
||||
name = "hiredis"
|
||||
version = "2.3.2"
|
||||
version = "3.0.0"
|
||||
description = "Python wrapper for hiredis"
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:742093f33d374098aa21c1696ac6e4874b52658c870513a297a89265a4d08fe5"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:9e14fb70ca4f7efa924f508975199353bf653f452e4ef0a1e47549e208f943d7"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d7302b4b17fcc1cc727ce84ded7f6be4655701e8d58744f73b09cb9ed2b13df"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed63e8b75c193c5e5a8288d9d7b011da076cc314fafc3bfd59ec1d8a750d48c8"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b4edee59dc089bc3948f4f6fba309f51aa2ccce63902364900aa0a553a85e97"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6481c3b7673a86276220140456c2a6fbfe8d1fb5c613b4728293c8634134824"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684840b014ce83541a087fcf2d48227196576f56ae3e944d4dfe14c0a3e0ccb7"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c4c0bcf786f0eac9593367b6279e9b89534e008edbf116dcd0de956524702c8"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66ab949424ac6504d823cba45c4c4854af5c59306a1531edb43b4dd22e17c102"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:322c668ee1c12d6c5750a4b1057e6b4feee2a75b3d25d630922a463cfe5e7478"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bfa73e3f163c6e8b2ec26f22285d717a5f77ab2120c97a2605d8f48b26950dac"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:7f39f28ffc65de577c3bc0c7615f149e35bc927802a0f56e612db9b530f316f9"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:55ce31bf4711da879b96d511208efb65a6165da4ba91cb3a96d86d5a8d9d23e6"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-win32.whl", hash = "sha256:3dd63d0bbbe75797b743f35d37a4cca7ca7ba35423a0de742ae2985752f20c6d"},
|
||||
{file = "hiredis-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:ea002656a8d974daaf6089863ab0a306962c8b715db6b10879f98b781a2a5bf5"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:adfbf2e9c38b77d0db2fb32c3bdaea638fa76b4e75847283cd707521ad2475ef"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:80b02d27864ebaf9b153d4b99015342382eeaed651f5591ce6f07e840307c56d"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd40d2e2f82a483de0d0a6dfd8c3895a02e55e5c9949610ecbded18188fd0a56"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfa904045d7cebfb0f01dad51352551cce1d873d7c3f80c7ded7d42f8cac8f89"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28bd184b33e0dd6d65816c16521a4ba1ffbe9ff07d66873c42ea4049a62fed83"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f70481213373d44614148f0f2e38e7905be3f021902ae5167289413196de4ba4"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8797b528c1ff81eef06713623562b36db3dafa106b59f83a6468df788ff0d1"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02fc71c8333586871602db4774d3a3e403b4ccf6446dc4603ec12df563127cee"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0da56915bda1e0a49157191b54d3e27689b70960f0685fdd5c415dacdee2fbed"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e2674a5a3168349435b08fa0b82998ed2536eb9acccf7087efe26e4cd088a525"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:dc1c3fd49930494a67dcec37d0558d99d84eca8eb3f03b17198424538f2608d7"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:14c7b43205e515f538a9defb4e411e0f0576caaeeda76bb9993ed505486f7562"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7bac7e02915b970c3723a7a7c5df4ba7a11a3426d2a3f181e041aa506a1ff028"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-win32.whl", hash = "sha256:63a090761ddc3c1f7db5e67aa4e247b4b3bb9890080bdcdadd1b5200b8b89ac4"},
|
||||
{file = "hiredis-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:70d226ab0306a5b8d408235cabe51d4bf3554c9e8a72d53ce0b3c5c84cf78881"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5c614552c6bd1d0d907f448f75550f6b24fb56cbfce80c094908b7990cad9702"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9c431431abf55b64347ddc8df68b3ef840269cb0aa5bc2d26ad9506eb4b1b866"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a45857e87e9d2b005e81ddac9d815a33efd26ec67032c366629f023fe64fb415"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e138d141ec5a6ec800b6d01ddc3e5561ce1c940215e0eb9960876bfde7186aae"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:387f655444d912a963ab68abf64bf6e178a13c8e4aa945cb27388fd01a02e6f1"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4852f4bf88f0e2d9bdf91279892f5740ed22ae368335a37a52b92a5c88691140"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d711c107e83117129b7f8bd08e9820c43ceec6204fff072a001fd82f6d13db9f"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92830c16885f29163e1c2da1f3c1edb226df1210ec7e8711aaabba3dd0d5470a"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:16b01d9ceae265d4ab9547be0cd628ecaff14b3360357a9d30c029e5ae8b7e7f"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5986fb5f380169270a0293bebebd95466a1c85010b4f1afc2727e4d17c452512"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:49532d7939cc51f8e99efc326090c54acf5437ed88b9c904cc8015b3c4eda9c9"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8f34801b251ca43ad70691fb08b606a2e55f06b9c9fb1fc18fd9402b19d70f7b"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7298562a49d95570ab1c7fc4051e72824c6a80e907993a21a41ba204223e7334"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-win32.whl", hash = "sha256:e1d86b75de787481b04d112067a4033e1ecfda2a060e50318a74e4e1c9b2948c"},
|
||||
{file = "hiredis-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:6dbfe1887ffa5cf3030451a56a8f965a9da2fa82b7149357752b67a335a05fc6"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:4fc242e9da4af48714199216eb535b61e8f8d66552c8819e33fc7806bd465a09"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e81aa4e9a1fcf604c8c4b51aa5d258e195a6ba81efe1da82dea3204443eba01c"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:419780f8583ddb544ffa86f9d44a7fcc183cd826101af4e5ffe535b6765f5f6b"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6871306d8b98a15e53a5f289ec1106a3a1d43e7ab6f4d785f95fcef9a7bd9504"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb0b35b63717ef1e41d62f4f8717166f7c6245064957907cfe177cc144357c"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c490191fa1218851f8a80c5a21a05a6f680ac5aebc2e688b71cbfe592f8fec6"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4baf4b579b108062e91bd2a991dc98b9dc3dc06e6288db2d98895eea8acbac22"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e627d8ef5e100556e09fb44c9571a432b10e11596d3c4043500080ca9944a91a"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:ba3dc0af0def8c21ce7d903c59ea1e8ec4cb073f25ece9edaec7f92a286cd219"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:56e9b7d6051688ca94e68c0c8a54a243f8db841911b683cedf89a29d4de91509"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:380e029bb4b1d34cf560fcc8950bf6b57c2ef0c9c8b7c7ac20b7c524a730fadd"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-win32.whl", hash = "sha256:948d9f2ca7841794dd9b204644963a4bcd69ced4e959b0d4ecf1b8ce994a6daa"},
|
||||
{file = "hiredis-2.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:cfa67afe2269b2d203cd1389c00c5bc35a287cd57860441fb0e53b371ea6a029"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bcbe47da0aebc00a7cfe3ebdcff0373b86ce2b1856251c003e3d69c9db44b5a7"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f2c9c0d910dd3f7df92f0638e7f65d8edd7f442203caf89c62fc79f11b0b73f8"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:01b6c24c0840ac7afafbc4db236fd55f56a9a0919a215c25a238f051781f4772"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1f567489f422d40c21e53212a73bef4638d9f21043848150f8544ef1f3a6ad1"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28adecb308293e705e44087a1c2d557a816f032430d8a2a9bb7873902a1c6d48"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27e9619847e9dc70b14b1ad2d0fb4889e7ca18996585c3463cff6c951fd6b10b"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a0026cfbf29f07649b0e34509091a2a6016ff8844b127de150efce1c3aff60b"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9de7586522e5da6bee83c9cf0dcccac0857a43249cb4d721a2e312d98a684d1"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e58494f282215fc461b06709e9a195a24c12ba09570f25bdf9efb036acc05101"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:de3a32b4b76d46f1eb42b24a918d51d8ca52411a381748196241d59a895f7c5c"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1979334ccab21a49c544cd1b8d784ffb2747f99a51cb0bd0976eebb517628382"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0c0773266e1c38a06e7593bd08870ac1503f5f0ce0f5c63f2b4134b090b5d6a4"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bd1cee053416183adcc8e6134704c46c60c3f66b8faaf9e65bf76191ca59a2f7"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-win32.whl", hash = "sha256:5341ce3d01ef3c7418a72e370bf028c7aeb16895e79e115fe4c954fff990489e"},
|
||||
{file = "hiredis-2.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:8fc7197ff33047ce43a67851ccf190acb5b05c52fd4a001bb55766358f04da68"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:f47775e27388b58ce52f4f972f80e45b13c65113e9e6b6bf60148f893871dc9b"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:9412a06b8a8e09abd6313d96864b6d7713c6003a365995a5c70cfb9209df1570"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3020b60e3fc96d08c2a9b011f1c2e2a6bdcc09cb55df93c509b88be5cb791df"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53d0f2c59bce399b8010a21bc779b4f8c32d0f582b2284ac8c98dc7578b27bc4"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57c0d0c7e308ed5280a4900d4468bbfec51f0e1b4cde1deae7d4e639bc6b7766"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d63318ca189fddc7e75f6a4af8eae9c0545863619fb38cfba5f43e81280b286"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e741ffe4e2db78a1b9dd6e5d29678ce37fbaaf65dfe132e5b82a794413302ef1"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb98038ccd368e0d88bd92ee575c58cfaf33e77f788c36b2a89a84ee1936dc6b"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:eae62ed60d53b3561148bcd8c2383e430af38c0deab9f2dd15f8874888ffd26f"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ca33c175c1cf60222d9c6d01c38fc17ec3a484f32294af781de30226b003e00f"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c5f6972d2bdee3cd301d5c5438e31195cf1cabf6fd9274491674d4ceb46914d"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:a6b54dabfaa5dbaa92f796f0c32819b4636e66aa8e9106c3d421624bd2a2d676"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e96cd35df012a17c87ae276196ea8f215e77d6eeca90709eb03999e2d5e3fd8a"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-win32.whl", hash = "sha256:63b99b5ea9fe4f21469fb06a16ca5244307678636f11917359e3223aaeca0b67"},
|
||||
{file = "hiredis-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a50c8af811b35b8a43b1590cf890b61ff2233225257a3cad32f43b3ec7ff1b9f"},
|
||||
{file = "hiredis-2.3.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e8bf4444b09419b77ce671088db9f875b26720b5872d97778e2545cd87dba4a"},
|
||||
{file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bd42d0d45ea47a2f96babd82a659fbc60612ab9423a68e4a8191e538b85542a"},
|
||||
{file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80441b55edbef868e2563842f5030982b04349408396e5ac2b32025fb06b5212"},
|
||||
{file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec444ab8f27562a363672d6a7372bc0700a1bdc9764563c57c5f9efa0e592b5f"},
|
||||
{file = "hiredis-2.3.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f9f606e810858207d4b4287b4ef0dc622c2aa469548bf02b59dcc616f134f811"},
|
||||
{file = "hiredis-2.3.2-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c3dde4ca00fe9eee3b76209711f1941bb86db42b8a75d7f2249ff9dfc026ab0e"},
|
||||
{file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4dd676107a1d3c724a56a9d9db38166ad4cf44f924ee701414751bd18a784a0"},
|
||||
{file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce42649e2676ad783186264d5ffc788a7612ecd7f9effb62d51c30d413a3eefe"},
|
||||
{file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e3f8b1733078ac663dad57e20060e16389a60ab542f18a97931f3a2a2dd64a4"},
|
||||
{file = "hiredis-2.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:532a84a82156a82529ec401d1c25d677c6543c791e54a263aa139541c363995f"},
|
||||
{file = "hiredis-2.3.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4d59f88c4daa36b8c38e59ac7bffed6f5d7f68eaccad471484bf587b28ccc478"},
|
||||
{file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91a14dd95e24dc078204b18b0199226ee44644974c645dc54ee7b00c3157330"},
|
||||
{file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb777a38797c8c7df0444533119570be18d1a4ce5478dffc00c875684df7bfcb"},
|
||||
{file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d47c915897a99d0d34a39fad4be97b4b709ab3d0d3b779ebccf2b6024a8c681e"},
|
||||
{file = "hiredis-2.3.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:333b5e04866758b11bda5f5315b4e671d15755fc6ed3b7969721bc6311d0ee36"},
|
||||
{file = "hiredis-2.3.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c8937f1100435698c18e4da086968c4b5d70e86ea718376f833475ab3277c9aa"},
|
||||
{file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa45f7d771094b8145af10db74704ab0f698adb682fbf3721d8090f90e42cc49"},
|
||||
{file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33d5ebc93c39aed4b5bc769f8ce0819bc50e74bb95d57a35f838f1c4378978e0"},
|
||||
{file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a797d8c7df9944314d309b0d9e1b354e2fa4430a05bb7604da13b6ad291bf959"},
|
||||
{file = "hiredis-2.3.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e15a408f71a6c8c87b364f1f15a6cd9c1baca12bbc47a326ac8ab99ec7ad3c64"},
|
||||
{file = "hiredis-2.3.2.tar.gz", hash = "sha256:733e2456b68f3f126ddaf2cd500a33b25146c3676b97ea843665717bda0c5d43"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:4b182791c41c5eb1d9ed736f0ff81694b06937ca14b0d4dadde5dadba7ff6dae"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:13c275b483a052dd645eb2cb60d6380f1f5215e4c22d6207e17b86be6dd87ffa"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1018cc7f12824506f165027eabb302735b49e63af73eb4d5450c66c88f47026"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83a29cc7b21b746cb6a480189e49f49b2072812c445e66a9e38d2004d496b81c"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e241fab6332e8fb5f14af00a4a9c6aefa22f19a336c069b7ddbf28ef8341e8d6"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1fb8de899f0145d6c4d5d4bd0ee88a78eb980a7ffabd51e9889251b8f58f1785"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b23291951959141173eec10f8573538e9349fa27f47a0c34323d1970bf891ee5"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e421ac9e4b5efc11705a0d5149e641d4defdc07077f748667f359e60dc904420"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:77c8006c12154c37691b24ff293c077300c22944018c3ff70094a33e10c1d795"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:41afc0d3c18b59eb50970479a9c0e5544fb4b95e3a79cf2fbaece6ddefb926fe"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:04ccae6dcd9647eae6025425ab64edb4d79fde8b9e6e115ebfabc6830170e3b2"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fe91d62b0594db5ea7d23fc2192182b1a7b6973f628a9b8b2e0a42a2be721ac6"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:99516d99316062824a24d145d694f5b0d030c80da693ea6f8c4ecf71a251d8bb"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-win32.whl", hash = "sha256:562eaf820de045eb487afaa37e6293fe7eceb5b25e158b5a1974b7e40bf04543"},
|
||||
{file = "hiredis-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a1c81c89ed765198da27412aa21478f30d54ef69bf5e4480089d9c3f77b8f882"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:4664dedcd5933364756d7251a7ea86d60246ccf73a2e00912872dacbfcef8978"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:47de0bbccf4c8a9f99d82d225f7672b9dd690d8fd872007b933ef51a302c9fa6"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e43679eca508ba8240d016d8cca9d27342d70184773c15bea78a23c87a1922f1"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13c345e7278c210317e77e1934b27b61394fee0dec2e8bd47e71570900f75823"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00018f22f38530768b73ea86c11f47e8d4df65facd4e562bd78773bd1baef35e"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ea3a86405baa8eb0d3639ced6926ad03e07113de54cb00fd7510cb0db76a89d"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c073848d2b1d5561f3903879ccf4e1a70c9b1e7566c7bdcc98d082fa3e7f0a1d"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a8dffb5f5b3415a4669d25de48b617fd9d44b0bccfc4c2ab24b06406ecc9ecb"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:22c17c96143c2a62dfd61b13803bc5de2ac526b8768d2141c018b965d0333b66"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c3ece960008dab66c6b8bb3a1350764677ee7c74ccd6270aaf1b1caf9ccebb46"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f75999ae00a920f7dce6ecae76fa5e8674a3110e5a75f12c7a2c75ae1af53396"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e069967cbd5e1900aafc4b5943888f6d34937fc59bf8918a1a546cb729b4b1e4"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0aacc0a78e1d94d843a6d191f224a35893e6bdfeb77a4a89264155015c65f126"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-win32.whl", hash = "sha256:719c32147ba29528cb451f037bf837dcdda4ff3ddb6cdb12c4216b0973174718"},
|
||||
{file = "hiredis-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:bdc144d56333c52c853c31b4e2e52cfbdb22d3da4374c00f5f3d67c42158970f"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:484025d2eb8f6348f7876fc5a2ee742f568915039fcb31b478fd5c242bb0fe3a"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fcdb552ffd97151dab8e7bc3ab556dfa1512556b48a367db94b5c20253a35ee1"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bb6f9fd92f147ba11d338ef5c68af4fd2908739c09e51f186e1d90958c68cc1"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa86bf9a0ed339ec9e8a9a9d0ae4dccd8671625c83f9f9f2640729b15e07fbfd"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e194a0d5df9456995d8f510eab9f529213e7326af6b94770abf8f8b7952ddcaa"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a1df39d74ec507d79c7a82c8063eee60bf80537cdeee652f576059b9cdd15c"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f91456507427ba36fd81b2ca11053a8e112c775325acc74e993201ea912d63e9"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9862db92ef67a8a02e0d5370f07d380e14577ecb281b79720e0d7a89aedb9ee5"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d10fcd9e0eeab835f492832b2a6edb5940e2f1230155f33006a8dfd3bd2c94e4"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:48727d7d405d03977d01885f317328dc21d639096308de126c2c4e9950cbd3c9"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e0bb6102ebe2efecf8a3292c6660a0e6fac98176af6de67f020bea1c2343717"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:df274e3abb4df40f4c7274dd3e587dfbb25691826c948bc98d5fead019dfb001"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:034925b5fb514f7b11aac38cd55b3fd7e9d3af23bd6497f3f20aa5b8ba58e232"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-win32.whl", hash = "sha256:120f2dda469b28d12ccff7c2230225162e174657b49cf4cd119db525414ae281"},
|
||||
{file = "hiredis-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:e584fe5f4e6681d8762982be055f1534e0170f6308a7a90f58d737bab12ff6a8"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:122171ff47d96ed8dd4bba6c0e41d8afaba3e8194949f7720431a62aa29d8895"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ba9fc605ac558f0de67463fb588722878641e6fa1dabcda979e8e69ff581d0bd"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a631e2990b8be23178f655cae8ac6c7422af478c420dd54e25f2e26c29e766f1"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63482db3fadebadc1d01ad33afa6045ebe2ea528eb77ccaabd33ee7d9c2bad48"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f669212c390eebfbe03c4e20181f5970b82c5d0a0ad1df1785f7ffbe7d61150"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a49ef161739f8018c69b371528bdb47d7342edfdee9ddc75a4d8caddf45a6e"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98a152052b8878e5e43a2e3a14075218adafc759547c98668a21e9485882696c"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50a196af0ce657fcde9bf8a0bbe1032e22c64d8fcec2bc926a35e7ff68b3a166"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f2f312eef8aafc2255e3585dcf94d5da116c43ef837db91db9ecdc1bc930072d"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:6ca41fa40fa019cde42c21add74aadd775e71458051a15a352eabeb12eb4d084"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:6eecb343c70629f5af55a8b3e53264e44fa04e155ef7989de13668a0cb102a90"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:c3fdad75e7837a475900a1d3a5cc09aa024293c3b0605155da2d42f41bc0e482"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8854969e7480e8d61ed7549eb232d95082a743e94138d98d7222ba4e9f7ecacd"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-win32.whl", hash = "sha256:f114a6c86edbf17554672b050cce72abf489fe58d583c7921904d5f1c9691605"},
|
||||
{file = "hiredis-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:7d99b91e42217d7b4b63354b15b41ce960e27d216783e04c4a350224d55842a4"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:4c6efcbb5687cf8d2aedcc2c3ed4ac6feae90b8547427d417111194873b66b06"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5b5cff42a522a0d81c2ae7eae5e56d0ee7365e0c4ad50c4de467d8957aff4414"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:82f794d564f4bc76b80c50b03267fe5d6589e93f08e66b7a2f674faa2fa76ebc"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a4c1791d7aa7e192f60fe028ae409f18ccdd540f8b1e6aeb0df7816c77e4a4"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2537b2cd98192323fce4244c8edbf11f3cac548a9d633dbbb12b48702f379f4"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fed69bbaa307040c62195a269f82fc3edf46b510a17abb6b30a15d7dab548df"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869f6d5537d243080f44253491bb30aa1ec3c21754003b3bddeadedeb65842b0"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d435ae89073d7cd51e6b6bf78369c412216261c9c01662e7008ff00978153729"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:204b79b30a0e6be0dc2301a4d385bb61472809f09c49f400497f1cdd5a165c66"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3ea635101b739c12effd189cc19b2671c268abb03013fd1f6321ca29df3ca625"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f359175197fd833c8dd7a8c288f1516be45415bb5c939862ab60c2918e1e1943"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ac6d929cb33dd12ad3424b75725975f0a54b5b12dbff95f2a2d660c510aa106d"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:100431e04d25a522ef2c3b94f294c4219c4de3bfc7d557b6253296145a144c11"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-win32.whl", hash = "sha256:e1a9c14ae9573d172dc050a6f63a644457df5d01ec4d35a6a0f097f812930f83"},
|
||||
{file = "hiredis-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:54a6dd7b478e6eb01ce15b3bb5bf771e108c6c148315bf194eb2ab776a3cac4d"},
|
||||
{file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:50da7a9edf371441dfcc56288d790985ee9840d982750580710a9789b8f4a290"},
|
||||
{file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9b285ef6bf1581310b0d5e8f6ce64f790a1c40e89c660e1320b35f7515433672"},
|
||||
{file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dcfa684966f25b335072115de2f920228a3c2caf79d4bfa2b30f6e4f674a948"},
|
||||
{file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a41be8af1fd78ca97bc948d789a09b730d1e7587d07ca53af05758f31f4b985d"},
|
||||
{file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:038756db735e417ab36ee6fd7725ce412385ed2bd0767e8179a4755ea11b804f"},
|
||||
{file = "hiredis-3.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:fcecbd39bd42cef905c0b51c9689c39d0cc8b88b1671e7f40d4fb213423aef3a"},
|
||||
{file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a131377493a59fb0f5eaeb2afd49c6540cafcfba5b0b3752bed707be9e7c4eaf"},
|
||||
{file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d22c53f0ec5c18ecb3d92aa9420563b1c5d657d53f01356114978107b00b860"},
|
||||
{file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a91e9520fbc65a799943e5c970ffbcd67905744d8becf2e75f9f0a5e8414f0"},
|
||||
{file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc8043959b50141df58ab4f398e8ae84c6f9e673a2c9407be65fc789138f4a6"},
|
||||
{file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51b99cfac514173d7b8abdfe10338193e8a0eccdfe1870b646009d2fb7cbe4b5"},
|
||||
{file = "hiredis-3.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:fa1fcad89d8a41d8dc10b1e54951ec1e161deabd84ed5a2c95c3c7213bdb3514"},
|
||||
{file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:898636a06d9bf575d2c594129085ad6b713414038276a4bfc5db7646b8a5be78"},
|
||||
{file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:466f836dbcf86de3f9692097a7a01533dc9926986022c6617dc364a402b265c5"},
|
||||
{file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23142a8af92a13fc1e3f2ca1d940df3dcf2af1d176be41fe8d89e30a837a0b60"},
|
||||
{file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:793c80a3d6b0b0e8196a2d5de37a08330125668c8012922685e17aa9108c33ac"},
|
||||
{file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:467d28112c7faa29b7db743f40803d927c8591e9da02b6ce3d5fadc170a542a2"},
|
||||
{file = "hiredis-3.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:dc384874a719c767b50a30750f937af18842ee5e288afba95a5a3ed703b1515a"},
|
||||
{file = "hiredis-3.0.0.tar.gz", hash = "sha256:fed8581ae26345dea1f1e0d1a96e05041a727a45e7d8d459164583e23c6ac441"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -937,13 +922,13 @@ i18n = ["Babel (>=2.7)"]
|
||||
|
||||
[[package]]
|
||||
name = "jsonschema"
|
||||
version = "4.22.0"
|
||||
version = "4.23.0"
|
||||
description = "An implementation of JSON Schema validation for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"},
|
||||
{file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"},
|
||||
{file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
|
||||
{file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -956,7 +941,7 @@ rpds-py = ">=0.7.1"
|
||||
|
||||
[package.extras]
|
||||
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
|
||||
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
|
||||
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "jsonschema-specifications"
|
||||
@@ -1392,38 +1377,38 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "mypy"
|
||||
version = "1.9.0"
|
||||
version = "1.10.1"
|
||||
description = "Optional static typing for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"},
|
||||
{file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"},
|
||||
{file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"},
|
||||
{file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"},
|
||||
{file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"},
|
||||
{file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"},
|
||||
{file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"},
|
||||
{file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"},
|
||||
{file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"},
|
||||
{file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"},
|
||||
{file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"},
|
||||
{file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"},
|
||||
{file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"},
|
||||
{file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"},
|
||||
{file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"},
|
||||
{file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"},
|
||||
{file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"},
|
||||
{file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"},
|
||||
{file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"},
|
||||
{file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"},
|
||||
{file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"},
|
||||
{file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"},
|
||||
{file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"},
|
||||
{file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"},
|
||||
{file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"},
|
||||
{file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"},
|
||||
{file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"},
|
||||
{file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"},
|
||||
{file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"},
|
||||
{file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1531,13 +1516,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "8.13.39"
|
||||
version = "8.13.42"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "phonenumbers-8.13.39-py2.py3-none-any.whl", hash = "sha256:3ad2d086fa71e7eef409001b9195ac54bebb0c6e3e752209b558ca192c9229a0"},
|
||||
{file = "phonenumbers-8.13.39.tar.gz", hash = "sha256:db7ca4970d206b2056231105300753b1a5b229f43416f8c2b3010e63fbb68d77"},
|
||||
{file = "phonenumbers-8.13.42-py2.py3-none-any.whl", hash = "sha256:18acc22ee03116d27b26e990f53806a1770a3e05f05e1620bc09ad187f889456"},
|
||||
{file = "phonenumbers-8.13.42.tar.gz", hash = "sha256:7137904f2db3b991701e853174ce8e1cb8f540b8bfdf27617540de04c0b7bed5"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2016,17 +2001,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyopenssl"
|
||||
version = "24.1.0"
|
||||
version = "24.2.1"
|
||||
description = "Python wrapper module around the OpenSSL library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "pyOpenSSL-24.1.0-py3-none-any.whl", hash = "sha256:17ed5be5936449c5418d1cd269a1a9e9081bc54c17aed272b45856a3d3dc86ad"},
|
||||
{file = "pyOpenSSL-24.1.0.tar.gz", hash = "sha256:cabed4bfaa5df9f1a16c0ef64a0cb65318b5cd077a7eda7d6970131ca2f41a6f"},
|
||||
{file = "pyOpenSSL-24.2.1-py3-none-any.whl", hash = "sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d"},
|
||||
{file = "pyopenssl-24.2.1.tar.gz", hash = "sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
cryptography = ">=41.0.5,<43"
|
||||
cryptography = ">=41.0.5,<44"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"]
|
||||
@@ -2376,29 +2361,29 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.5.0"
|
||||
version = "0.5.5"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.5.0-py3-none-linux_armv6l.whl", hash = "sha256:ee770ea8ab38918f34e7560a597cc0a8c9a193aaa01bfbd879ef43cb06bd9c4c"},
|
||||
{file = "ruff-0.5.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38f3b8327b3cb43474559d435f5fa65dacf723351c159ed0dc567f7ab735d1b6"},
|
||||
{file = "ruff-0.5.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7594f8df5404a5c5c8f64b8311169879f6cf42142da644c7e0ba3c3f14130370"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:adc7012d6ec85032bc4e9065110df205752d64010bed5f958d25dbee9ce35de3"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d505fb93b0fabef974b168d9b27c3960714d2ecda24b6ffa6a87ac432905ea38"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dc5cfd3558f14513ed0d5b70ce531e28ea81a8a3b1b07f0f48421a3d9e7d80a"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:db3ca35265de239a1176d56a464b51557fce41095c37d6c406e658cf80bbb362"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1a321c4f68809fddd9b282fab6a8d8db796b270fff44722589a8b946925a2a8"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c4dfcd8d34b143916994b3876b63d53f56724c03f8c1a33a253b7b1e6bf2a7d"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81e5facfc9f4a674c6a78c64d38becfbd5e4f739c31fcd9ce44c849f1fad9e4c"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e589e27971c2a3efff3fadafb16e5aef7ff93250f0134ec4b52052b673cf988d"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2ffbc3715a52b037bcb0f6ff524a9367f642cdc5817944f6af5479bbb2eb50e"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cd096e23c6a4f9c819525a437fa0a99d1c67a1b6bb30948d46f33afbc53596cf"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:46e193b36f2255729ad34a49c9a997d506e58f08555366b2108783b3064a0e1e"},
|
||||
{file = "ruff-0.5.0-py3-none-win32.whl", hash = "sha256:49141d267100f5ceff541b4e06552e98527870eafa1acc9dec9139c9ec5af64c"},
|
||||
{file = "ruff-0.5.0-py3-none-win_amd64.whl", hash = "sha256:e9118f60091047444c1b90952736ee7b1792910cab56e9b9a9ac20af94cd0440"},
|
||||
{file = "ruff-0.5.0-py3-none-win_arm64.whl", hash = "sha256:ed5c4df5c1fb4518abcb57725b576659542bdbe93366f4f329e8f398c4b71178"},
|
||||
{file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"},
|
||||
{file = "ruff-0.5.5-py3-none-linux_armv6l.whl", hash = "sha256:605d589ec35d1da9213a9d4d7e7a9c761d90bba78fc8790d1c5e65026c1b9eaf"},
|
||||
{file = "ruff-0.5.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00817603822a3e42b80f7c3298c8269e09f889ee94640cd1fc7f9329788d7bf8"},
|
||||
{file = "ruff-0.5.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:187a60f555e9f865a2ff2c6984b9afeffa7158ba6e1eab56cb830404c942b0f3"},
|
||||
{file = "ruff-0.5.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe26fc46fa8c6e0ae3f47ddccfbb136253c831c3289bba044befe68f467bfb16"},
|
||||
{file = "ruff-0.5.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad25dd9c5faac95c8e9efb13e15803cd8bbf7f4600645a60ffe17c73f60779b"},
|
||||
{file = "ruff-0.5.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f70737c157d7edf749bcb952d13854e8f745cec695a01bdc6e29c29c288fc36e"},
|
||||
{file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:cfd7de17cef6ab559e9f5ab859f0d3296393bc78f69030967ca4d87a541b97a0"},
|
||||
{file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a09b43e02f76ac0145f86a08e045e2ea452066f7ba064fd6b0cdccb486f7c3e7"},
|
||||
{file = "ruff-0.5.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0b856cb19c60cd40198be5d8d4b556228e3dcd545b4f423d1ad812bfdca5884"},
|
||||
{file = "ruff-0.5.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3687d002f911e8a5faf977e619a034d159a8373514a587249cc00f211c67a091"},
|
||||
{file = "ruff-0.5.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ac9dc814e510436e30d0ba535f435a7f3dc97f895f844f5b3f347ec8c228a523"},
|
||||
{file = "ruff-0.5.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:af9bdf6c389b5add40d89b201425b531e0a5cceb3cfdcc69f04d3d531c6be74f"},
|
||||
{file = "ruff-0.5.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d40a8533ed545390ef8315b8e25c4bb85739b90bd0f3fe1280a29ae364cc55d8"},
|
||||
{file = "ruff-0.5.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:cab904683bf9e2ecbbe9ff235bfe056f0eba754d0168ad5407832928d579e7ab"},
|
||||
{file = "ruff-0.5.5-py3-none-win32.whl", hash = "sha256:696f18463b47a94575db635ebb4c178188645636f05e934fdf361b74edf1bb2d"},
|
||||
{file = "ruff-0.5.5-py3-none-win_amd64.whl", hash = "sha256:50f36d77f52d4c9c2f1361ccbfbd09099a1b2ea5d2b2222c586ab08885cf3445"},
|
||||
{file = "ruff-0.5.5-py3-none-win_arm64.whl", hash = "sha256:3191317d967af701f1b73a31ed5788795936e423b7acce82a2b63e26eb3e89d6"},
|
||||
{file = "ruff-0.5.5.tar.gz", hash = "sha256:cc5516bdb4858d972fbc31d246bdb390eab8df1a26e2353be2dbc0c2d7f5421a"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2433,13 +2418,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "2.6.0"
|
||||
version = "2.10.0"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
optional = true
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "sentry_sdk-2.6.0-py2.py3-none-any.whl", hash = "sha256:422b91cb49378b97e7e8d0e8d5a1069df23689d45262b86f54988a7db264e874"},
|
||||
{file = "sentry_sdk-2.6.0.tar.gz", hash = "sha256:65cc07e9c6995c5e316109f138570b32da3bd7ff8d0d0ee4aaf2628c3dd8127d"},
|
||||
{file = "sentry_sdk-2.10.0-py2.py3-none-any.whl", hash = "sha256:87b3d413c87d8e7f816cc9334bff255a83d8b577db2b22042651c30c19c09190"},
|
||||
{file = "sentry_sdk-2.10.0.tar.gz", hash = "sha256:545fcc6e36c335faa6d6cda84669b6e17025f31efbf3b2211ec14efe008b75d1"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2469,7 +2454,7 @@ langchain = ["langchain (>=0.0.210)"]
|
||||
loguru = ["loguru (>=0.5)"]
|
||||
openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"]
|
||||
opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
|
||||
opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"]
|
||||
opentelemetry-experimental = ["opentelemetry-instrumentation-aio-pika (==0.46b0)", "opentelemetry-instrumentation-aiohttp-client (==0.46b0)", "opentelemetry-instrumentation-aiopg (==0.46b0)", "opentelemetry-instrumentation-asgi (==0.46b0)", "opentelemetry-instrumentation-asyncio (==0.46b0)", "opentelemetry-instrumentation-asyncpg (==0.46b0)", "opentelemetry-instrumentation-aws-lambda (==0.46b0)", "opentelemetry-instrumentation-boto (==0.46b0)", "opentelemetry-instrumentation-boto3sqs (==0.46b0)", "opentelemetry-instrumentation-botocore (==0.46b0)", "opentelemetry-instrumentation-cassandra (==0.46b0)", "opentelemetry-instrumentation-celery (==0.46b0)", "opentelemetry-instrumentation-confluent-kafka (==0.46b0)", "opentelemetry-instrumentation-dbapi (==0.46b0)", "opentelemetry-instrumentation-django (==0.46b0)", "opentelemetry-instrumentation-elasticsearch (==0.46b0)", "opentelemetry-instrumentation-falcon (==0.46b0)", "opentelemetry-instrumentation-fastapi (==0.46b0)", "opentelemetry-instrumentation-flask (==0.46b0)", "opentelemetry-instrumentation-grpc (==0.46b0)", "opentelemetry-instrumentation-httpx (==0.46b0)", "opentelemetry-instrumentation-jinja2 (==0.46b0)", "opentelemetry-instrumentation-kafka-python (==0.46b0)", "opentelemetry-instrumentation-logging (==0.46b0)", "opentelemetry-instrumentation-mysql (==0.46b0)", "opentelemetry-instrumentation-mysqlclient (==0.46b0)", "opentelemetry-instrumentation-pika (==0.46b0)", "opentelemetry-instrumentation-psycopg (==0.46b0)", "opentelemetry-instrumentation-psycopg2 (==0.46b0)", "opentelemetry-instrumentation-pymemcache (==0.46b0)", "opentelemetry-instrumentation-pymongo (==0.46b0)", "opentelemetry-instrumentation-pymysql (==0.46b0)", "opentelemetry-instrumentation-pyramid (==0.46b0)", "opentelemetry-instrumentation-redis (==0.46b0)", "opentelemetry-instrumentation-remoulade (==0.46b0)", "opentelemetry-instrumentation-requests (==0.46b0)", "opentelemetry-instrumentation-sklearn (==0.46b0)", "opentelemetry-instrumentation-sqlalchemy (==0.46b0)", "opentelemetry-instrumentation-sqlite3 (==0.46b0)", "opentelemetry-instrumentation-starlette (==0.46b0)", "opentelemetry-instrumentation-system-metrics (==0.46b0)", "opentelemetry-instrumentation-threading (==0.46b0)", "opentelemetry-instrumentation-tornado (==0.46b0)", "opentelemetry-instrumentation-tortoiseorm (==0.46b0)", "opentelemetry-instrumentation-urllib (==0.46b0)", "opentelemetry-instrumentation-urllib3 (==0.46b0)", "opentelemetry-instrumentation-wsgi (==0.46b0)"]
|
||||
pure-eval = ["asttokens", "executing", "pure-eval"]
|
||||
pymongo = ["pymongo (>=3.1)"]
|
||||
pyspark = ["pyspark (>=2.4.4)"]
|
||||
@@ -2479,7 +2464,7 @@ sanic = ["sanic (>=0.8)"]
|
||||
sqlalchemy = ["sqlalchemy (>=1.2)"]
|
||||
starlette = ["starlette (>=0.19.1)"]
|
||||
starlite = ["starlite (>=1.48)"]
|
||||
tornado = ["tornado (>=5)"]
|
||||
tornado = ["tornado (>=6)"]
|
||||
|
||||
[[package]]
|
||||
name = "service-identity"
|
||||
@@ -2664,24 +2649,24 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "towncrier"
|
||||
version = "23.11.0"
|
||||
version = "24.7.1"
|
||||
description = "Building newsfiles for your project."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "towncrier-23.11.0-py3-none-any.whl", hash = "sha256:2e519ca619426d189e3c98c99558fe8be50c9ced13ea1fc20a4a353a95d2ded7"},
|
||||
{file = "towncrier-23.11.0.tar.gz", hash = "sha256:13937c247e3f8ae20ac44d895cf5f96a60ad46cfdcc1671759530d7837d9ee5d"},
|
||||
{file = "towncrier-24.7.1-py3-none-any.whl", hash = "sha256:685e2a94335b5dc47537b4d3b449a25b18571ea85b07dcf6e8df31ba40f692dd"},
|
||||
{file = "towncrier-24.7.1.tar.gz", hash = "sha256:57a057faedabcadf1a62f6f9bad726ae566c1f31a411338ddb8316993f583b3d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
click = "*"
|
||||
importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""}
|
||||
importlib-resources = {version = ">=5", markers = "python_version < \"3.10\""}
|
||||
incremental = "*"
|
||||
jinja2 = "*"
|
||||
tomli = {version = "*", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["furo", "packaging", "sphinx (>=5)", "twisted"]
|
||||
dev = ["furo (>=2024.05.06)", "nox", "packaging", "sphinx (>=5)", "twisted"]
|
||||
|
||||
[[package]]
|
||||
name = "treq"
|
||||
@@ -2707,19 +2692,19 @@ docs = ["sphinx (<7.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "twine"
|
||||
version = "5.1.0"
|
||||
version = "5.1.1"
|
||||
description = "Collection of utilities for publishing packages on PyPI"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "twine-5.1.0-py3-none-any.whl", hash = "sha256:fe1d814395bfe50cfbe27783cb74efe93abeac3f66deaeb6c8390e4e92bacb43"},
|
||||
{file = "twine-5.1.0.tar.gz", hash = "sha256:4d74770c88c4fcaf8134d2a6a9d863e40f08255ff7d8e2acb3cbbd57d25f6e9d"},
|
||||
{file = "twine-5.1.1-py3-none-any.whl", hash = "sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997"},
|
||||
{file = "twine-5.1.1.tar.gz", hash = "sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
importlib-metadata = ">=3.6"
|
||||
keyring = ">=15.1"
|
||||
pkginfo = ">=1.8.1"
|
||||
pkginfo = ">=1.8.1,<1.11"
|
||||
readme-renderer = ">=35.0"
|
||||
requests = ">=2.20"
|
||||
requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0"
|
||||
@@ -2832,13 +2817,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-jsonschema"
|
||||
version = "4.22.0.20240610"
|
||||
version = "4.23.0.20240712"
|
||||
description = "Typing stubs for jsonschema"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "types-jsonschema-4.22.0.20240610.tar.gz", hash = "sha256:f82ab9fe756e3a2642ea9712c46b403ce61eb380b939b696cff3252af42f65b0"},
|
||||
{file = "types_jsonschema-4.22.0.20240610-py3-none-any.whl", hash = "sha256:89996b9bd1928f820a0e252b2844be21cd2e55d062b6fa1048d88453006ad89e"},
|
||||
{file = "types-jsonschema-4.23.0.20240712.tar.gz", hash = "sha256:b20db728dcf7ea3e80e9bdeb55e8b8420c6c040cda14e8cf284465adee71d217"},
|
||||
{file = "types_jsonschema-4.23.0.20240712-py3-none-any.whl", hash = "sha256:8c33177ce95336241c1d61ccb56a9964d4361b99d5f1cd81a1ab4909b0dd7cf4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2890,13 +2875,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-pyopenssl"
|
||||
version = "24.1.0.20240425"
|
||||
version = "24.1.0.20240722"
|
||||
description = "Typing stubs for pyOpenSSL"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "types-pyOpenSSL-24.1.0.20240425.tar.gz", hash = "sha256:0a7e82626c1983dc8dc59292bf20654a51c3c3881bcbb9b337c1da6e32f0204e"},
|
||||
{file = "types_pyOpenSSL-24.1.0.20240425-py3-none-any.whl", hash = "sha256:f51a156835555dd2a1f025621e8c4fbe7493470331afeef96884d1d29bf3a473"},
|
||||
{file = "types-pyOpenSSL-24.1.0.20240722.tar.gz", hash = "sha256:47913b4678a01d879f503a12044468221ed8576263c1540dcb0484ca21b08c39"},
|
||||
{file = "types_pyOpenSSL-24.1.0.20240722-py3-none-any.whl", hash = "sha256:6a7a5d2ec042537934cfb4c9d4deb0e16c4c6250b09358df1f083682fe6fda54"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2930,13 +2915,13 @@ urllib3 = ">=2"
|
||||
|
||||
[[package]]
|
||||
name = "types-setuptools"
|
||||
version = "70.1.0.20240627"
|
||||
version = "71.1.0.20240726"
|
||||
description = "Typing stubs for setuptools"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "types-setuptools-70.1.0.20240627.tar.gz", hash = "sha256:385907a47b5cf302b928ce07953cd91147d5de6f3da604c31905fdf0ec309e83"},
|
||||
{file = "types_setuptools-70.1.0.20240627-py3-none-any.whl", hash = "sha256:c7bdf05cd0a8b66868b4774c7b3c079d01ae025d8c9562bfc8bf2ff44d263c9c"},
|
||||
{file = "types-setuptools-71.1.0.20240726.tar.gz", hash = "sha256:85ba28e9461bb1be86ebba4db0f1c2408f2b11115b1966334ea9dc464e29303e"},
|
||||
{file = "types_setuptools-71.1.0.20240726-py3-none-any.whl", hash = "sha256:a7775376f36e0ff09bcad236bf265777590a66b11623e48c20bfc30f1444ea36"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3094,18 +3079,18 @@ docs = ["Sphinx", "elementpath (>=4.1.5,<5.0.0)", "jinja2", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "zipp"
|
||||
version = "3.15.0"
|
||||
version = "3.19.1"
|
||||
description = "Backport of pathlib-compatible object wrapper for zip files"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"},
|
||||
{file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"},
|
||||
{file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"},
|
||||
{file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
|
||||
testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
|
||||
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
|
||||
test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "zope-event"
|
||||
@@ -3211,4 +3196,4 @@ user-search = ["pyicu"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.8.0"
|
||||
content-hash = "3372a97db99050a34f8eddad2ddf8efe8b7b704b6123df4a3e36ddc171e8f34d"
|
||||
content-hash = "c165cdc1f6612c9f1b5bfd8063c23e2d595d717dd8ac1a468519e902be2cdf93"
|
||||
|
||||
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.111.1"
|
||||
version = "1.113.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -201,8 +201,8 @@ netaddr = ">=0.7.18"
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
Jinja2 = ">=3.0"
|
||||
bleach = ">=1.4.3"
|
||||
# We use `Self`, which were added in `typing-extensions` 4.0.
|
||||
typing-extensions = ">=4.0"
|
||||
# We use `assert_never`, which were added in `typing-extensions` 4.1.
|
||||
typing-extensions = ">=4.1"
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
cryptography = ">=3.4.7"
|
||||
@@ -322,7 +322,7 @@ all = [
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.5.0"
|
||||
ruff = "0.5.5"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
||||
@@ -119,18 +119,19 @@ BOOLEAN_COLUMNS = {
|
||||
"e2e_room_keys": ["is_verified"],
|
||||
"event_edges": ["is_state"],
|
||||
"events": ["processed", "outlier", "contains_url"],
|
||||
"local_media_repository": ["safe_from_quarantine"],
|
||||
"local_media_repository": ["safe_from_quarantine", "authenticated"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
"presence_list": ["accepted"],
|
||||
"presence_stream": ["currently_active"],
|
||||
"public_room_list_stream": ["visibility"],
|
||||
"pushers": ["enabled"],
|
||||
"redactions": ["have_censored"],
|
||||
"remote_media_cache": ["authenticated"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"rooms": ["is_public", "has_auth_chain_index"],
|
||||
"users": ["shadow_banned", "approved", "locked", "suspended"],
|
||||
"un_partial_stated_event_stream": ["rejection_status_changed"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ class Membership:
|
||||
KNOCK: Final = "knock"
|
||||
LEAVE: Final = "leave"
|
||||
BAN: Final = "ban"
|
||||
LIST: Final = {INVITE, JOIN, KNOCK, LEAVE, BAN}
|
||||
LIST: Final = frozenset((INVITE, JOIN, KNOCK, LEAVE, BAN))
|
||||
|
||||
|
||||
class PresenceState:
|
||||
@@ -225,6 +225,11 @@ class EventContentFields:
|
||||
# This is deprecated in MSC2175.
|
||||
ROOM_CREATOR: Final = "creator"
|
||||
|
||||
# The version of the room for `m.room.create` events.
|
||||
ROOM_VERSION: Final = "room_version"
|
||||
|
||||
ROOM_NAME: Final = "name"
|
||||
|
||||
# Used in m.room.guest_access events.
|
||||
GUEST_ACCESS: Final = "guest_access"
|
||||
|
||||
@@ -237,6 +242,9 @@ class EventContentFields:
|
||||
# an unspecced field added to to-device messages to identify them uniquely-ish
|
||||
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
|
||||
|
||||
# `m.room.encryption`` algorithm field
|
||||
ENCRYPTION_ALGORITHM: Final = "algorithm"
|
||||
|
||||
|
||||
class EventUnsignedContentFields:
|
||||
"""Fields found inside the 'unsigned' data on events"""
|
||||
|
||||
@@ -128,6 +128,10 @@ class Codes(str, Enum):
|
||||
# MSC2677
|
||||
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
|
||||
|
||||
# MSC3575 we are telling the client they need to expire their sliding sync
|
||||
# connection.
|
||||
UNKNOWN_POS = "M_UNKNOWN_POS"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
"""An exception with integer code, a message string attributes and optional headers.
|
||||
@@ -847,3 +851,17 @@ class PartialStateConflictError(SynapseError):
|
||||
msg=PartialStateConflictError.message(),
|
||||
errcode=Codes.UNKNOWN,
|
||||
)
|
||||
|
||||
|
||||
class SlidingSyncUnknownPosition(SynapseError):
|
||||
"""An error that Synapse can return to signal to the client to expire their
|
||||
sliding sync connection (i.e. send a new request without a `?since=`
|
||||
param).
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
msg="Unknown position",
|
||||
errcode=Codes.UNKNOWN_POS,
|
||||
)
|
||||
|
||||
@@ -236,9 +236,8 @@ class Ratelimiter:
|
||||
requester: The requester that is doing the action, if any.
|
||||
key: An arbitrary key used to classify an action. Defaults to the
|
||||
requester's user ID.
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
n_actions: The number of times the user performed the action. May be negative
|
||||
to "refund" the rate limit.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
"""
|
||||
|
||||
@@ -217,7 +217,7 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
|
||||
if name in ["media", "federation", "client"]:
|
||||
if self.config.server.enable_media_repo:
|
||||
if self.config.media.can_load_media_repo:
|
||||
media_repo = self.get_media_repository_resource()
|
||||
resources.update(
|
||||
{
|
||||
|
||||
@@ -126,7 +126,7 @@ class ContentRepositoryConfig(Config):
|
||||
# Only enable the media repo if either the media repo is enabled or the
|
||||
# current worker app is the media repo.
|
||||
if (
|
||||
self.root.server.enable_media_repo is False
|
||||
config.get("enable_media_repo", True) is False
|
||||
and config.get("worker_app") != "synapse.app.media_repository"
|
||||
):
|
||||
self.can_load_media_repo = False
|
||||
@@ -272,6 +272,10 @@ class ContentRepositoryConfig(Config):
|
||||
remote_media_lifetime
|
||||
)
|
||||
|
||||
self.enable_authenticated_media = config.get(
|
||||
"enable_authenticated_media", False
|
||||
)
|
||||
|
||||
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
|
||||
assert data_dir_path is not None
|
||||
media_store = os.path.join(data_dir_path, "media_store")
|
||||
|
||||
@@ -384,6 +384,11 @@ class ServerConfig(Config):
|
||||
# Whether to internally track presence, requires that presence is enabled,
|
||||
self.track_presence = self.presence_enabled and presence_enabled != "untracked"
|
||||
|
||||
# Determines if presence results for offline users are included on initial/full sync
|
||||
self.presence_include_offline_users_on_sync = presence_config.get(
|
||||
"include_offline_users_on_sync", False
|
||||
)
|
||||
|
||||
# Custom presence router module
|
||||
# This is the legacy way of configuring it (the config should now be put in the modules section)
|
||||
self.presence_router_module_class = None
|
||||
@@ -395,12 +400,6 @@ class ServerConfig(Config):
|
||||
self.presence_router_config,
|
||||
) = load_module(presence_router_config, ("presence", "presence_router"))
|
||||
|
||||
# whether to enable the media repository endpoints. This should be set
|
||||
# to false if the media repository is running as a separate endpoint;
|
||||
# doing so ensures that we will not run cache cleanup jobs on the
|
||||
# master, potentially causing inconsistency.
|
||||
self.enable_media_repo = config.get("enable_media_repo", True)
|
||||
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API.
|
||||
self.require_auth_for_profile_requests = config.get(
|
||||
|
||||
@@ -554,3 +554,22 @@ def relation_from_event(event: EventBase) -> Optional[_EventRelation]:
|
||||
aggregation_key = None
|
||||
|
||||
return _EventRelation(parent_id, rel_type, aggregation_key)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class StrippedStateEvent:
|
||||
"""
|
||||
A stripped down state event. Usually used for remote invite/knocks so the user can
|
||||
make an informed decision on whether they want to join.
|
||||
|
||||
Attributes:
|
||||
type: Event `type`
|
||||
state_key: Event `state_key`
|
||||
sender: Event `sender`
|
||||
content: Event `content`
|
||||
"""
|
||||
|
||||
type: str
|
||||
state_key: str
|
||||
sender: str
|
||||
content: Dict[str, Any]
|
||||
|
||||
@@ -49,7 +49,7 @@ from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.types import JsonDict, Requester
|
||||
|
||||
from . import EventBase, make_event_from_dict
|
||||
from . import EventBase, StrippedStateEvent, make_event_from_dict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
@@ -854,3 +854,30 @@ def strip_event(event: EventBase) -> JsonDict:
|
||||
"content": event.content,
|
||||
"sender": event.sender,
|
||||
}
|
||||
|
||||
|
||||
def parse_stripped_state_event(raw_stripped_event: Any) -> Optional[StrippedStateEvent]:
|
||||
"""
|
||||
Given a raw value from an event's `unsigned` field, attempt to parse it into a
|
||||
`StrippedStateEvent`.
|
||||
"""
|
||||
if isinstance(raw_stripped_event, dict):
|
||||
# All of these fields are required
|
||||
type = raw_stripped_event.get("type")
|
||||
state_key = raw_stripped_event.get("state_key")
|
||||
sender = raw_stripped_event.get("sender")
|
||||
content = raw_stripped_event.get("content")
|
||||
if (
|
||||
isinstance(type, str)
|
||||
and isinstance(state_key, str)
|
||||
and isinstance(sender, str)
|
||||
and isinstance(content, dict)
|
||||
):
|
||||
return StrippedStateEvent(
|
||||
type=type,
|
||||
state_key=state_key,
|
||||
sender=sender,
|
||||
content=content,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
@@ -39,6 +39,7 @@ from synapse.metrics.background_process_metrics import (
|
||||
)
|
||||
from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo
|
||||
from synapse.types import (
|
||||
DeviceListUpdates,
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
ScheduledTask,
|
||||
@@ -214,7 +215,7 @@ class DeviceWorkerHandler:
|
||||
@cancellable
|
||||
async def get_user_ids_changed(
|
||||
self, user_id: str, from_token: StreamToken
|
||||
) -> JsonDict:
|
||||
) -> DeviceListUpdates:
|
||||
"""Get list of users that have had the devices updated, or have newly
|
||||
joined a room, that `user_id` may be interested in.
|
||||
"""
|
||||
@@ -341,11 +342,19 @@ class DeviceWorkerHandler:
|
||||
possibly_joined = set()
|
||||
possibly_left = set()
|
||||
|
||||
result = {"changed": list(possibly_joined), "left": list(possibly_left)}
|
||||
device_list_updates = DeviceListUpdates(
|
||||
changed=possibly_joined,
|
||||
left=possibly_left,
|
||||
)
|
||||
|
||||
log_kv(result)
|
||||
log_kv(
|
||||
{
|
||||
"changed": device_list_updates.changed,
|
||||
"left": device_list_updates.left,
|
||||
}
|
||||
)
|
||||
|
||||
return result
|
||||
return device_list_updates
|
||||
|
||||
async def on_federation_query_user_devices(self, user_id: str) -> JsonDict:
|
||||
if not self.hs.is_mine(UserID.from_string(user_id)):
|
||||
|
||||
@@ -291,13 +291,20 @@ class E2eKeysHandler:
|
||||
|
||||
# Only try and fetch keys for destinations that are not marked as
|
||||
# down.
|
||||
filtered_destinations = await filter_destinations_by_retry_limiter(
|
||||
remote_queries_not_in_cache.keys(),
|
||||
self.clock,
|
||||
self.store,
|
||||
# Let's give an arbitrary grace period for those hosts that are
|
||||
# only recently down
|
||||
retry_due_within_ms=60 * 1000,
|
||||
unfiltered_destinations = remote_queries_not_in_cache.keys()
|
||||
filtered_destinations = set(
|
||||
await filter_destinations_by_retry_limiter(
|
||||
unfiltered_destinations,
|
||||
self.clock,
|
||||
self.store,
|
||||
# Let's give an arbitrary grace period for those hosts that are
|
||||
# only recently down
|
||||
retry_due_within_ms=60 * 1000,
|
||||
)
|
||||
)
|
||||
failures.update(
|
||||
(dest, _NOT_READY_FOR_RETRY_FAILURE)
|
||||
for dest in (unfiltered_destinations - filtered_destinations)
|
||||
)
|
||||
|
||||
await concurrently_execute(
|
||||
@@ -1641,6 +1648,9 @@ def _check_device_signature(
|
||||
raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
|
||||
|
||||
|
||||
_NOT_READY_FOR_RETRY_FAILURE = {"status": 503, "message": "Not ready for retry"}
|
||||
|
||||
|
||||
def _exception_to_failure(e: Exception) -> JsonDict:
|
||||
if isinstance(e, SynapseError):
|
||||
return {"status": e.code, "errcode": e.errcode, "message": str(e)}
|
||||
@@ -1649,7 +1659,7 @@ def _exception_to_failure(e: Exception) -> JsonDict:
|
||||
return {"status": e.code, "message": str(e)}
|
||||
|
||||
if isinstance(e, NotRetryingDestination):
|
||||
return {"status": 503, "message": "Not ready for retry"}
|
||||
return _NOT_READY_FOR_RETRY_FAILURE
|
||||
|
||||
# include ConnectionRefused and other errors
|
||||
#
|
||||
|
||||
@@ -34,7 +34,7 @@ from synapse.api.errors import (
|
||||
from synapse.logging.opentracing import log_kv, trace
|
||||
from synapse.storage.databases.main.e2e_room_keys import RoomKey
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.async_helpers import ReadWriteLock
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -58,7 +58,7 @@ class E2eRoomKeysHandler:
|
||||
# clients belonging to a user will receive and try to upload a new session at
|
||||
# roughly the same time. Also used to lock out uploads when the key is being
|
||||
# changed.
|
||||
self._upload_linearizer = Linearizer("upload_room_keys_lock")
|
||||
self._upload_lock = ReadWriteLock()
|
||||
|
||||
@trace
|
||||
async def get_room_keys(
|
||||
@@ -89,7 +89,7 @@ class E2eRoomKeysHandler:
|
||||
|
||||
# we deliberately take the lock to get keys so that changing the version
|
||||
# works atomically
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.read(user_id):
|
||||
# make sure the backup version exists
|
||||
try:
|
||||
await self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
@@ -132,7 +132,7 @@ class E2eRoomKeysHandler:
|
||||
"""
|
||||
|
||||
# lock for consistency with uploading
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
# make sure the backup version exists
|
||||
try:
|
||||
version_info = await self.store.get_e2e_room_keys_version_info(
|
||||
@@ -193,7 +193,7 @@ class E2eRoomKeysHandler:
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
|
||||
# XXX: perhaps we should use a finer grained lock here?
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
# Check that the version we're trying to upload is the current version
|
||||
try:
|
||||
version_info = await self.store.get_e2e_room_keys_version_info(user_id)
|
||||
@@ -355,7 +355,7 @@ class E2eRoomKeysHandler:
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
|
||||
# lock everyone out until we've switched version
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
new_version = await self.store.create_e2e_room_keys_version(
|
||||
user_id, version_info
|
||||
)
|
||||
@@ -382,7 +382,7 @@ class E2eRoomKeysHandler:
|
||||
}
|
||||
"""
|
||||
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.read(user_id):
|
||||
try:
|
||||
res = await self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
except StoreError as e:
|
||||
@@ -407,7 +407,7 @@ class E2eRoomKeysHandler:
|
||||
NotFoundError: if this backup version doesn't exist
|
||||
"""
|
||||
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
try:
|
||||
await self.store.delete_e2e_room_keys_version(user_id, version)
|
||||
except StoreError as e:
|
||||
@@ -437,7 +437,7 @@ class E2eRoomKeysHandler:
|
||||
raise SynapseError(
|
||||
400, "Version in body does not match", Codes.INVALID_PARAM
|
||||
)
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
try:
|
||||
old_info = await self.store.get_e2e_room_keys_version_info(
|
||||
user_id, version
|
||||
|
||||
@@ -286,8 +286,14 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]):
|
||||
room_ids: Iterable[str],
|
||||
is_guest: bool,
|
||||
explicit_room_id: Optional[str] = None,
|
||||
to_key: Optional[MultiWriterStreamToken] = None,
|
||||
) -> Tuple[List[JsonMapping], MultiWriterStreamToken]:
|
||||
to_key = self.get_current_key()
|
||||
"""
|
||||
Find read receipts for given rooms (> `from_token` and <= `to_token`)
|
||||
"""
|
||||
|
||||
if to_key is None:
|
||||
to_key = self.get_current_key()
|
||||
|
||||
if from_key == to_key:
|
||||
return [], to_key
|
||||
|
||||
@@ -1188,6 +1188,8 @@ class RoomCreationHandler:
|
||||
)
|
||||
events_to_send.append((power_event, power_context))
|
||||
else:
|
||||
# Please update the docs for `default_power_level_content_override` when
|
||||
# updating the `events` dict below
|
||||
power_level_content: JsonDict = {
|
||||
"users": {creator_id: 100},
|
||||
"users_default": 0,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -293,7 +293,9 @@ class StatsHandler:
|
||||
"history_visibility"
|
||||
)
|
||||
elif delta.event_type == EventTypes.RoomEncryption:
|
||||
room_state["encryption"] = event_content.get("algorithm")
|
||||
room_state["encryption"] = event_content.get(
|
||||
EventContentFields.ENCRYPTION_ALGORITHM
|
||||
)
|
||||
elif delta.event_type == EventTypes.Name:
|
||||
room_state["name"] = event_content.get("name")
|
||||
elif delta.event_type == EventTypes.Topic:
|
||||
|
||||
@@ -2270,7 +2270,11 @@ class SyncHandler:
|
||||
user=user,
|
||||
from_key=presence_key,
|
||||
is_guest=sync_config.is_guest,
|
||||
include_offline=include_offline,
|
||||
include_offline=(
|
||||
True
|
||||
if self.hs_config.server.presence_include_offline_users_on_sync
|
||||
else include_offline
|
||||
),
|
||||
)
|
||||
assert presence_key
|
||||
sync_result_builder.now_token = now_token.copy_and_replace(
|
||||
|
||||
@@ -565,7 +565,12 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]):
|
||||
room_ids: Iterable[str],
|
||||
is_guest: bool,
|
||||
explicit_room_id: Optional[str] = None,
|
||||
to_key: Optional[int] = None,
|
||||
) -> Tuple[List[JsonMapping], int]:
|
||||
"""
|
||||
Find typing notifications for given rooms (> `from_token` and <= `to_token`)
|
||||
"""
|
||||
|
||||
with Measure(self.clock, "typing.get_new_events"):
|
||||
from_key = int(from_key)
|
||||
handler = self.get_typing_handler()
|
||||
@@ -574,7 +579,9 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]):
|
||||
for room_id in room_ids:
|
||||
if room_id not in handler._room_serials:
|
||||
continue
|
||||
if handler._room_serials[room_id] <= from_key:
|
||||
if handler._room_serials[room_id] <= from_key or (
|
||||
to_key is not None and handler._room_serials[room_id] > to_key
|
||||
):
|
||||
continue
|
||||
|
||||
events.append(self._make_event_for(room_id))
|
||||
|
||||
@@ -90,7 +90,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import set_tag, start_active_span, tags
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred
|
||||
from synapse.util.async_helpers import AwakenableSleeper, Linearizer, timeout_deferred
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
|
||||
@@ -475,6 +475,8 @@ class MatrixFederationHttpClient:
|
||||
use_proxy=True,
|
||||
)
|
||||
|
||||
self.remote_download_linearizer = Linearizer("remote_download_linearizer", 6)
|
||||
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when the remote server may have come back online."""
|
||||
|
||||
@@ -1486,35 +1488,44 @@ class MatrixFederationHttpClient:
|
||||
)
|
||||
|
||||
headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
expected_size = response.length
|
||||
# if we don't get an expected length then use the max length
|
||||
|
||||
if expected_size == UNKNOWN_LENGTH:
|
||||
expected_size = max_size
|
||||
logger.debug(
|
||||
f"File size unknown, assuming file is max allowable size: {max_size}"
|
||||
)
|
||||
else:
|
||||
if int(expected_size) > max_size:
|
||||
msg = "Requested file is too large > %r bytes" % (max_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
|
||||
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(
|
||||
HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED
|
||||
)
|
||||
|
||||
try:
|
||||
# add a byte of headroom to max size as function errs at >=
|
||||
d = read_body_with_max_size(response, output_stream, expected_size + 1)
|
||||
d.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
length = await make_deferred_yieldable(d)
|
||||
async with self.remote_download_linearizer.queue(ip_address):
|
||||
# add a byte of headroom to max size as function errs at >=
|
||||
d = read_body_with_max_size(response, output_stream, expected_size + 1)
|
||||
d.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
length = await make_deferred_yieldable(d)
|
||||
except BodyExceededMaxSize:
|
||||
msg = "Requested file is too large > %r bytes" % (expected_size,)
|
||||
logger.warning(
|
||||
@@ -1560,6 +1571,13 @@ class MatrixFederationHttpClient:
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
|
||||
# if we didn't know the length upfront, decrement the actual size from ratelimiter
|
||||
if response.length == UNKNOWN_LENGTH:
|
||||
download_ratelimiter.record_action(
|
||||
requester=None, key=ip_address, n_actions=length
|
||||
)
|
||||
|
||||
return length, headers
|
||||
|
||||
async def federation_get_file(
|
||||
@@ -1630,29 +1648,37 @@ class MatrixFederationHttpClient:
|
||||
)
|
||||
|
||||
headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
expected_size = response.length
|
||||
# if we don't get an expected length then use the max length
|
||||
|
||||
if expected_size == UNKNOWN_LENGTH:
|
||||
expected_size = max_size
|
||||
logger.debug(
|
||||
f"File size unknown, assuming file is max allowable size: {max_size}"
|
||||
)
|
||||
else:
|
||||
if int(expected_size) > max_size:
|
||||
msg = "Requested file is too large > %r bytes" % (max_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
|
||||
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(
|
||||
HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED
|
||||
)
|
||||
|
||||
# this should be a multipart/mixed response with the boundary string in the header
|
||||
try:
|
||||
@@ -1672,11 +1698,12 @@ class MatrixFederationHttpClient:
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg)
|
||||
|
||||
try:
|
||||
# add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
|
||||
deferred = read_multipart_response(
|
||||
response, output_stream, boundary, expected_size + 1
|
||||
)
|
||||
deferred.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
async with self.remote_download_linearizer.queue(ip_address):
|
||||
# add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
|
||||
deferred = read_multipart_response(
|
||||
response, output_stream, boundary, expected_size + 1
|
||||
)
|
||||
deferred.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
except BodyExceededMaxSize:
|
||||
msg = "Requested file is too large > %r bytes" % (expected_size,)
|
||||
logger.warning(
|
||||
@@ -1743,6 +1770,13 @@ class MatrixFederationHttpClient:
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
|
||||
# if we didn't know the length upfront, decrement the actual size from ratelimiter
|
||||
if response.length == UNKNOWN_LENGTH:
|
||||
download_ratelimiter.record_action(
|
||||
requester=None, key=ip_address, n_actions=length
|
||||
)
|
||||
|
||||
return length, headers, multipart_response.json
|
||||
|
||||
|
||||
|
||||
@@ -430,6 +430,7 @@ class MediaRepository:
|
||||
media_id: str,
|
||||
name: Optional[str],
|
||||
max_timeout_ms: int,
|
||||
allow_authenticated: bool = True,
|
||||
federation: bool = False,
|
||||
) -> None:
|
||||
"""Responds to requests for local media, if exists, or returns 404.
|
||||
@@ -442,6 +443,7 @@ class MediaRepository:
|
||||
the filename in the Content-Disposition header of the response.
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
allow_authenticated: whether media marked as authenticated may be served to this request
|
||||
federation: whether the local media being fetched is for a federation request
|
||||
|
||||
Returns:
|
||||
@@ -451,6 +453,10 @@ class MediaRepository:
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
self.mark_recently_accessed(None, media_id)
|
||||
|
||||
media_type = media_info.media_type
|
||||
@@ -481,6 +487,7 @@ class MediaRepository:
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation_endpoint: bool,
|
||||
allow_authenticated: bool = True,
|
||||
) -> None:
|
||||
"""Respond to requests for remote media.
|
||||
|
||||
@@ -495,6 +502,8 @@ class MediaRepository:
|
||||
ip_address: the IP address of the requester
|
||||
use_federation_endpoint: whether to request the remote media over the new
|
||||
federation `/download` endpoint
|
||||
allow_authenticated: whether media marked as authenticated may be served to this
|
||||
request
|
||||
|
||||
Returns:
|
||||
Resolves once a response has successfully been written to request
|
||||
@@ -526,6 +535,7 @@ class MediaRepository:
|
||||
self.download_ratelimiter,
|
||||
ip_address,
|
||||
use_federation_endpoint,
|
||||
allow_authenticated,
|
||||
)
|
||||
|
||||
# We deliberately stream the file outside the lock
|
||||
@@ -548,6 +558,7 @@ class MediaRepository:
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
allow_authenticated: bool,
|
||||
) -> RemoteMedia:
|
||||
"""Gets the media info associated with the remote file, downloading
|
||||
if necessary.
|
||||
@@ -560,6 +571,8 @@ class MediaRepository:
|
||||
ip_address: IP address of the requester
|
||||
use_federation: if a download is necessary, whether to request the remote file
|
||||
over the federation `/download` endpoint
|
||||
allow_authenticated: whether media marked as authenticated may be served to this
|
||||
request
|
||||
|
||||
Returns:
|
||||
The media info of the file
|
||||
@@ -581,6 +594,7 @@ class MediaRepository:
|
||||
self.download_ratelimiter,
|
||||
ip_address,
|
||||
use_federation,
|
||||
allow_authenticated,
|
||||
)
|
||||
|
||||
# Ensure we actually use the responder so that it releases resources
|
||||
@@ -598,6 +612,7 @@ class MediaRepository:
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
use_federation_endpoint: bool,
|
||||
allow_authenticated: bool,
|
||||
) -> Tuple[Optional[Responder], RemoteMedia]:
|
||||
"""Looks for media in local cache, if not there then attempt to
|
||||
download from remote server.
|
||||
@@ -619,6 +634,11 @@ class MediaRepository:
|
||||
"""
|
||||
media_info = await self.store.get_cached_remote_media(server_name, media_id)
|
||||
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
# if it isn't cached then don't fetch it or if it's authenticated then don't serve it
|
||||
if not media_info or media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
# file_id is the ID we use to track the file locally. If we've already
|
||||
# seen the file then reuse the existing ID, otherwise generate a new
|
||||
# one.
|
||||
@@ -792,6 +812,11 @@ class MediaRepository:
|
||||
|
||||
logger.info("Stored remote media in file %r", fname)
|
||||
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
else:
|
||||
authenticated = False
|
||||
|
||||
return RemoteMedia(
|
||||
media_origin=server_name,
|
||||
media_id=media_id,
|
||||
@@ -802,6 +827,7 @@ class MediaRepository:
|
||||
filesystem_id=file_id,
|
||||
last_access_ts=time_now_ms,
|
||||
quarantined_by=None,
|
||||
authenticated=authenticated,
|
||||
)
|
||||
|
||||
async def _federation_download_remote_file(
|
||||
@@ -915,6 +941,11 @@ class MediaRepository:
|
||||
|
||||
logger.debug("Stored remote media in file %r", fname)
|
||||
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
else:
|
||||
authenticated = False
|
||||
|
||||
return RemoteMedia(
|
||||
media_origin=server_name,
|
||||
media_id=media_id,
|
||||
@@ -925,6 +956,7 @@ class MediaRepository:
|
||||
filesystem_id=file_id,
|
||||
last_access_ts=time_now_ms,
|
||||
quarantined_by=None,
|
||||
authenticated=authenticated,
|
||||
)
|
||||
|
||||
def _get_thumbnail_requirements(
|
||||
@@ -1030,7 +1062,12 @@ class MediaRepository:
|
||||
t_len = os.path.getsize(output_path)
|
||||
|
||||
await self.store.store_local_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method, t_len
|
||||
media_id,
|
||||
t_width,
|
||||
t_height,
|
||||
t_type,
|
||||
t_method,
|
||||
t_len,
|
||||
)
|
||||
|
||||
return output_path
|
||||
|
||||
@@ -26,7 +26,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Type
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError, cs_error
|
||||
from synapse.api.errors import Codes, NotFoundError, SynapseError, cs_error
|
||||
from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP
|
||||
from synapse.http.server import respond_with_json
|
||||
from synapse.http.site import SynapseRequest
|
||||
@@ -274,6 +274,7 @@ class ThumbnailProvider:
|
||||
m_type: str,
|
||||
max_timeout_ms: int,
|
||||
for_federation: bool,
|
||||
allow_authenticated: bool = True,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_local_media_info(
|
||||
request, media_id, max_timeout_ms
|
||||
@@ -281,6 +282,12 @@ class ThumbnailProvider:
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
# if the media the thumbnail is generated from is authenticated, don't serve the
|
||||
# thumbnail over an unauthenticated endpoint
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
|
||||
await self._select_and_respond_with_thumbnail(
|
||||
request,
|
||||
@@ -307,14 +314,20 @@ class ThumbnailProvider:
|
||||
desired_type: str,
|
||||
max_timeout_ms: int,
|
||||
for_federation: bool,
|
||||
allow_authenticated: bool = True,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_local_media_info(
|
||||
request, media_id, max_timeout_ms
|
||||
)
|
||||
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
# if the media the thumbnail is generated from is authenticated, don't serve the
|
||||
# thumbnail over an unauthenticated endpoint
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
|
||||
for info in thumbnail_infos:
|
||||
t_w = info.width == desired_width
|
||||
@@ -381,14 +394,27 @@ class ThumbnailProvider:
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
allow_authenticated: bool = True,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_remote_media_info(
|
||||
server_name, media_id, max_timeout_ms, ip_address, use_federation
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
use_federation,
|
||||
allow_authenticated,
|
||||
)
|
||||
if not media_info:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
# if the media the thumbnail is generated from is authenticated, don't serve the
|
||||
# thumbnail over an unauthenticated endpoint
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
if media_info.authenticated:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
thumbnail_infos = await self.store.get_remote_media_thumbnails(
|
||||
server_name, media_id
|
||||
)
|
||||
@@ -446,16 +472,28 @@ class ThumbnailProvider:
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
allow_authenticated: bool = True,
|
||||
) -> None:
|
||||
# TODO: Don't download the whole remote file
|
||||
# We should proxy the thumbnail from the remote server instead of
|
||||
# downloading the remote file and generating our own thumbnails.
|
||||
media_info = await self.media_repo.get_remote_media_info(
|
||||
server_name, media_id, max_timeout_ms, ip_address, use_federation
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
use_federation,
|
||||
allow_authenticated,
|
||||
)
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
# if the media the thumbnail is generated from is authenticated, don't serve the
|
||||
# thumbnail over an unauthenticated endpoint
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
thumbnail_infos = await self.store.get_remote_media_thumbnails(
|
||||
server_name, media_id
|
||||
)
|
||||
@@ -485,8 +523,8 @@ class ThumbnailProvider:
|
||||
file_id: str,
|
||||
url_cache: bool,
|
||||
for_federation: bool,
|
||||
server_name: Optional[str] = None,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
server_name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Respond to a request with an appropriate thumbnail from the previously generated thumbnails.
|
||||
|
||||
@@ -773,6 +773,7 @@ class Notifier:
|
||||
stream_token = await self.event_sources.bound_future_token(stream_token)
|
||||
|
||||
start = self.clock.time_msec()
|
||||
logged = False
|
||||
while True:
|
||||
current_token = self.event_sources.get_current_token()
|
||||
if stream_token.is_before_or_eq(current_token):
|
||||
@@ -783,11 +784,13 @@ class Notifier:
|
||||
if now - start > 10_000:
|
||||
return False
|
||||
|
||||
logger.info(
|
||||
"Waiting for current token to reach %s; currently at %s",
|
||||
stream_token,
|
||||
current_token,
|
||||
)
|
||||
if not logged:
|
||||
logger.info(
|
||||
"Waiting for current token to reach %s; currently at %s",
|
||||
stream_token,
|
||||
current_token,
|
||||
)
|
||||
logged = True
|
||||
|
||||
# TODO: be better
|
||||
await self.clock.sleep(0.5)
|
||||
|
||||
@@ -256,9 +256,15 @@ class KeyChangesServlet(RestServlet):
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
results = await self.device_handler.get_user_ids_changed(user_id, from_token)
|
||||
device_list_updates = await self.device_handler.get_user_ids_changed(
|
||||
user_id, from_token
|
||||
)
|
||||
|
||||
return 200, results
|
||||
response: JsonDict = {}
|
||||
response["changed"] = list(device_list_updates.changed)
|
||||
response["left"] = list(device_list_updates.left)
|
||||
|
||||
return 200, response
|
||||
|
||||
|
||||
class OneTimeKeyServlet(RestServlet):
|
||||
|
||||
@@ -52,9 +52,9 @@ from synapse.http.servlet import (
|
||||
parse_string,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import trace_with_opname
|
||||
from synapse.logging.opentracing import log_kv, set_tag, trace_with_opname
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.types import JsonDict, Requester, StreamToken
|
||||
from synapse.types import JsonDict, Requester, SlidingSyncStreamToken, StreamToken
|
||||
from synapse.types.rest.client import SlidingSyncBody
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
@@ -881,7 +881,6 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
)
|
||||
|
||||
user = requester.user
|
||||
device_id = requester.device_id
|
||||
|
||||
timeout = parse_integer(request, "timeout", default=0)
|
||||
# Position in the stream
|
||||
@@ -889,22 +888,50 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
|
||||
from_token = None
|
||||
if from_token_string is not None:
|
||||
from_token = await StreamToken.from_string(self.store, from_token_string)
|
||||
from_token = await SlidingSyncStreamToken.from_string(
|
||||
self.store, from_token_string
|
||||
)
|
||||
|
||||
# TODO: We currently don't know whether we're going to use sticky params or
|
||||
# maybe some filters like sync v2 where they are built up once and referenced
|
||||
# by filter ID. For now, we will just prototype with always passing everything
|
||||
# in.
|
||||
body = parse_and_validate_json_object_from_request(request, SlidingSyncBody)
|
||||
logger.info("Sliding sync request: %r", body)
|
||||
|
||||
# Tag and log useful data to differentiate requests.
|
||||
set_tag(
|
||||
"sliding_sync.sync_type", "initial" if from_token is None else "incremental"
|
||||
)
|
||||
set_tag("sliding_sync.conn_id", body.conn_id or "")
|
||||
log_kv(
|
||||
{
|
||||
"sliding_sync.lists": {
|
||||
list_name: {
|
||||
"ranges": list_config.ranges,
|
||||
"timeline_limit": list_config.timeline_limit,
|
||||
}
|
||||
for list_name, list_config in (body.lists or {}).items()
|
||||
},
|
||||
"sliding_sync.room_subscriptions": list(
|
||||
(body.room_subscriptions or {}).keys()
|
||||
),
|
||||
# We also include the number of room subscriptions because logs are
|
||||
# limited to 1024 characters and the large room ID list above can be cut
|
||||
# off.
|
||||
"sliding_sync.num_room_subscriptions": len(
|
||||
(body.room_subscriptions or {}).keys()
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
sync_config = SlidingSyncConfig(
|
||||
user=user,
|
||||
device_id=device_id,
|
||||
requester=requester,
|
||||
# FIXME: Currently, we're just manually copying the fields from the
|
||||
# `SlidingSyncBody` into the config. How can we gurantee into the future
|
||||
# `SlidingSyncBody` into the config. How can we guarantee into the future
|
||||
# that we don't forget any? I would like something more structured like
|
||||
# `copy_attributes(from=body, to=config)`
|
||||
conn_id=body.conn_id,
|
||||
lists=body.lists,
|
||||
room_subscriptions=body.room_subscriptions,
|
||||
extensions=body.extensions,
|
||||
@@ -927,7 +954,6 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
|
||||
return 200, response_content
|
||||
|
||||
# TODO: Is there a better way to encode things?
|
||||
async def encode_response(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -942,7 +968,9 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
response["rooms"] = await self.encode_rooms(
|
||||
requester, sliding_sync_result.rooms
|
||||
)
|
||||
response["extensions"] = {} # TODO: sliding_sync_result.extensions
|
||||
response["extensions"] = await self.encode_extensions(
|
||||
requester, sliding_sync_result.extensions
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@@ -995,8 +1023,21 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
if room_result.avatar:
|
||||
serialized_rooms[room_id]["avatar"] = room_result.avatar
|
||||
|
||||
if room_result.heroes:
|
||||
serialized_rooms[room_id]["heroes"] = room_result.heroes
|
||||
if room_result.heroes is not None and len(room_result.heroes) > 0:
|
||||
serialized_heroes = []
|
||||
for hero in room_result.heroes:
|
||||
serialized_hero = {
|
||||
"user_id": hero.user_id,
|
||||
}
|
||||
if hero.display_name is not None:
|
||||
# Not a typo, just how "displayname" is spelled in the spec
|
||||
serialized_hero["displayname"] = hero.display_name
|
||||
|
||||
if hero.avatar_url is not None:
|
||||
serialized_hero["avatar_url"] = hero.avatar_url
|
||||
|
||||
serialized_heroes.append(serialized_hero)
|
||||
serialized_rooms[room_id]["heroes"] = serialized_heroes
|
||||
|
||||
# We should only include the `initial` key if it's `True` to save bandwidth.
|
||||
# The absense of this flag means `False`.
|
||||
@@ -1004,7 +1045,10 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
serialized_rooms[room_id]["initial"] = room_result.initial
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.required_state is not None:
|
||||
if (
|
||||
room_result.required_state is not None
|
||||
and len(room_result.required_state) > 0
|
||||
):
|
||||
serialized_required_state = (
|
||||
await self.event_serializer.serialize_events(
|
||||
room_result.required_state,
|
||||
@@ -1015,7 +1059,10 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
serialized_rooms[room_id]["required_state"] = serialized_required_state
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.timeline_events is not None:
|
||||
if (
|
||||
room_result.timeline_events is not None
|
||||
and len(room_result.timeline_events) > 0
|
||||
):
|
||||
serialized_timeline = await self.event_serializer.serialize_events(
|
||||
room_result.timeline_events,
|
||||
time_now,
|
||||
@@ -1043,7 +1090,10 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
serialized_rooms[room_id]["is_dm"] = room_result.is_dm
|
||||
|
||||
# Stripped state only applies to invite/knock rooms
|
||||
if room_result.stripped_state is not None:
|
||||
if (
|
||||
room_result.stripped_state is not None
|
||||
and len(room_result.stripped_state) > 0
|
||||
):
|
||||
# TODO: `knocked_state` but that isn't specced yet.
|
||||
#
|
||||
# TODO: Instead of adding `knocked_state`, it would be good to rename
|
||||
@@ -1054,6 +1104,73 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
|
||||
return serialized_rooms
|
||||
|
||||
async def encode_extensions(
|
||||
self, requester: Requester, extensions: SlidingSyncResult.Extensions
|
||||
) -> JsonDict:
|
||||
serialized_extensions: JsonDict = {}
|
||||
|
||||
if extensions.to_device is not None:
|
||||
serialized_extensions["to_device"] = {
|
||||
"next_batch": extensions.to_device.next_batch,
|
||||
"events": extensions.to_device.events,
|
||||
}
|
||||
|
||||
if extensions.e2ee is not None:
|
||||
serialized_extensions["e2ee"] = {
|
||||
# We always include this because
|
||||
# https://github.com/vector-im/element-android/issues/3725. The spec
|
||||
# isn't terribly clear on when this can be omitted and how a client
|
||||
# would tell the difference between "no keys present" and "nothing
|
||||
# changed" in terms of whole field absent / individual key type entry
|
||||
# absent Corresponding synapse issue:
|
||||
# https://github.com/matrix-org/synapse/issues/10456
|
||||
"device_one_time_keys_count": extensions.e2ee.device_one_time_keys_count,
|
||||
# https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md
|
||||
# states that this field should always be included, as long as the
|
||||
# server supports the feature.
|
||||
"device_unused_fallback_key_types": extensions.e2ee.device_unused_fallback_key_types,
|
||||
}
|
||||
|
||||
if extensions.e2ee.device_list_updates is not None:
|
||||
serialized_extensions["e2ee"]["device_lists"] = {}
|
||||
|
||||
serialized_extensions["e2ee"]["device_lists"]["changed"] = list(
|
||||
extensions.e2ee.device_list_updates.changed
|
||||
)
|
||||
serialized_extensions["e2ee"]["device_lists"]["left"] = list(
|
||||
extensions.e2ee.device_list_updates.left
|
||||
)
|
||||
|
||||
if extensions.account_data is not None:
|
||||
serialized_extensions["account_data"] = {
|
||||
# Same as the the top-level `account_data.events` field in Sync v2.
|
||||
"global": [
|
||||
{"type": account_data_type, "content": content}
|
||||
for account_data_type, content in extensions.account_data.global_account_data_map.items()
|
||||
],
|
||||
# Same as the joined room's account_data field in Sync v2, e.g the path
|
||||
# `rooms.join["!foo:bar"].account_data.events`.
|
||||
"rooms": {
|
||||
room_id: [
|
||||
{"type": account_data_type, "content": content}
|
||||
for account_data_type, content in event_map.items()
|
||||
]
|
||||
for room_id, event_map in extensions.account_data.account_data_by_room_map.items()
|
||||
},
|
||||
}
|
||||
|
||||
if extensions.receipts is not None:
|
||||
serialized_extensions["receipts"] = {
|
||||
"rooms": extensions.receipts.room_id_to_receipt_map,
|
||||
}
|
||||
|
||||
if extensions.typing is not None:
|
||||
serialized_extensions["typing"] = {
|
||||
"rooms": extensions.typing.room_id_to_typing_map,
|
||||
}
|
||||
|
||||
return serialized_extensions
|
||||
|
||||
|
||||
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
SyncRestServlet(hs).register(http_server)
|
||||
|
||||
@@ -84,7 +84,7 @@ class DownloadResource(RestServlet):
|
||||
|
||||
if self._is_mine_server_name(server_name):
|
||||
await self.media_repo.get_local_media(
|
||||
request, media_id, file_name, max_timeout_ms
|
||||
request, media_id, file_name, max_timeout_ms, allow_authenticated=False
|
||||
)
|
||||
else:
|
||||
allow_remote = parse_boolean(request, "allow_remote", default=True)
|
||||
@@ -106,4 +106,5 @@ class DownloadResource(RestServlet):
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
False,
|
||||
allow_authenticated=False,
|
||||
)
|
||||
|
||||
@@ -96,6 +96,7 @@ class ThumbnailResource(RestServlet):
|
||||
m_type,
|
||||
max_timeout_ms,
|
||||
False,
|
||||
allow_authenticated=False,
|
||||
)
|
||||
else:
|
||||
await self.thumbnail_provider.respond_local_thumbnail(
|
||||
@@ -107,6 +108,7 @@ class ThumbnailResource(RestServlet):
|
||||
m_type,
|
||||
max_timeout_ms,
|
||||
False,
|
||||
allow_authenticated=False,
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(None, media_id)
|
||||
else:
|
||||
@@ -134,6 +136,7 @@ class ThumbnailResource(RestServlet):
|
||||
m_type,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
False,
|
||||
use_federation=False,
|
||||
allow_authenticated=False,
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(server_name, media_id)
|
||||
|
||||
@@ -559,6 +559,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
def get_sync_handler(self) -> SyncHandler:
|
||||
return SyncHandler(self)
|
||||
|
||||
@cache_in_self
|
||||
def get_sliding_sync_handler(self) -> SlidingSyncHandler:
|
||||
return SlidingSyncHandler(self)
|
||||
|
||||
|
||||
@@ -120,10 +120,15 @@ class SQLBaseStore(metaclass=ABCMeta):
|
||||
"get_user_in_room_with_profile", (room_id, user_id)
|
||||
)
|
||||
self._attempt_to_invalidate_cache("get_rooms_for_user", (user_id,))
|
||||
self._attempt_to_invalidate_cache(
|
||||
"_get_rooms_for_local_user_where_membership_is_inner", (user_id,)
|
||||
)
|
||||
|
||||
# Purge other caches based on room state.
|
||||
self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
|
||||
self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,))
|
||||
self._attempt_to_invalidate_cache("get_room_type", (room_id,))
|
||||
self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
|
||||
|
||||
def _invalidate_state_caches_all(self, room_id: str) -> None:
|
||||
"""Invalidates caches that are based on the current state, but does
|
||||
@@ -146,7 +151,12 @@ class SQLBaseStore(metaclass=ABCMeta):
|
||||
self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None)
|
||||
self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None)
|
||||
self._attempt_to_invalidate_cache("get_rooms_for_user", None)
|
||||
self._attempt_to_invalidate_cache(
|
||||
"_get_rooms_for_local_user_where_membership_is_inner", None
|
||||
)
|
||||
self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
|
||||
self._attempt_to_invalidate_cache("get_room_type", (room_id,))
|
||||
self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
|
||||
|
||||
def _attempt_to_invalidate_cache(
|
||||
self, cache_name: str, key: Optional[Collection[Any]]
|
||||
|
||||
@@ -268,13 +268,23 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined]
|
||||
|
||||
if data.type == EventTypes.Member:
|
||||
self.get_rooms_for_user.invalidate((data.state_key,)) # type: ignore[attr-defined]
|
||||
self._attempt_to_invalidate_cache(
|
||||
"get_rooms_for_user", (data.state_key,)
|
||||
)
|
||||
elif data.type == EventTypes.RoomEncryption:
|
||||
self._attempt_to_invalidate_cache(
|
||||
"get_room_encryption", (data.room_id,)
|
||||
)
|
||||
elif data.type == EventTypes.Create:
|
||||
self._attempt_to_invalidate_cache("get_room_type", (data.room_id,))
|
||||
elif row.type == EventsStreamAllStateRow.TypeId:
|
||||
assert isinstance(data, EventsStreamAllStateRow)
|
||||
# Similar to the above, but the entire caches are invalidated. This is
|
||||
# unfortunate for the membership caches, but should recover quickly.
|
||||
self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined]
|
||||
self.get_rooms_for_user.invalidate_all() # type: ignore[attr-defined]
|
||||
self._attempt_to_invalidate_cache("get_rooms_for_user", None)
|
||||
self._attempt_to_invalidate_cache("get_room_type", (data.room_id,))
|
||||
self._attempt_to_invalidate_cache("get_room_encryption", (data.room_id,))
|
||||
else:
|
||||
raise Exception("Unknown events stream row type %s" % (row.type,))
|
||||
|
||||
@@ -331,6 +341,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
"get_invited_rooms_for_local_user", (state_key,)
|
||||
)
|
||||
self._attempt_to_invalidate_cache("get_rooms_for_user", (state_key,))
|
||||
self._attempt_to_invalidate_cache(
|
||||
"_get_rooms_for_local_user_where_membership_is_inner", (state_key,)
|
||||
)
|
||||
|
||||
self._attempt_to_invalidate_cache(
|
||||
"did_forget",
|
||||
@@ -342,6 +355,10 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
self._attempt_to_invalidate_cache(
|
||||
"get_forgotten_rooms_for_user", (state_key,)
|
||||
)
|
||||
elif etype == EventTypes.Create:
|
||||
self._attempt_to_invalidate_cache("get_room_type", (room_id,))
|
||||
elif etype == EventTypes.RoomEncryption:
|
||||
self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
|
||||
|
||||
if relates_to:
|
||||
self._attempt_to_invalidate_cache(
|
||||
@@ -393,12 +410,17 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
self._attempt_to_invalidate_cache("get_thread_id_for_receipts", None)
|
||||
self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None)
|
||||
self._attempt_to_invalidate_cache("get_rooms_for_user", None)
|
||||
self._attempt_to_invalidate_cache(
|
||||
"_get_rooms_for_local_user_where_membership_is_inner", None
|
||||
)
|
||||
self._attempt_to_invalidate_cache("did_forget", None)
|
||||
self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None)
|
||||
self._attempt_to_invalidate_cache("get_references_for_event", None)
|
||||
self._attempt_to_invalidate_cache("get_thread_summary", None)
|
||||
self._attempt_to_invalidate_cache("get_thread_participated", None)
|
||||
self._attempt_to_invalidate_cache("get_threads", (room_id,))
|
||||
self._attempt_to_invalidate_cache("get_room_type", (room_id,))
|
||||
self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
|
||||
|
||||
self._attempt_to_invalidate_cache("_get_state_group_for_event", None)
|
||||
|
||||
@@ -451,6 +473,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None)
|
||||
self._attempt_to_invalidate_cache("_get_membership_from_event_id", None)
|
||||
self._attempt_to_invalidate_cache("get_room_version_id", (room_id,))
|
||||
self._attempt_to_invalidate_cache("get_room_type", (room_id,))
|
||||
self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
|
||||
|
||||
# And delete state caches.
|
||||
|
||||
|
||||
@@ -1313,6 +1313,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
|
||||
# We want to make the cache more effective, so we clamp to the last
|
||||
# change before the given ordering.
|
||||
last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id) # type: ignore[attr-defined]
|
||||
if last_change is None:
|
||||
# If the room isn't in the cache we know that the last change was
|
||||
# somewhere before the earliest known position of the cache, so we
|
||||
# can clamp to that.
|
||||
last_change = self._events_stream_cache.get_earliest_known_position() # type: ignore[attr-defined]
|
||||
|
||||
# We don't always have a full stream_to_exterm_id table, e.g. after
|
||||
# the upgrade that introduced it, so we make sure we never ask for a
|
||||
|
||||
@@ -64,6 +64,7 @@ class LocalMedia:
|
||||
quarantined_by: Optional[str]
|
||||
safe_from_quarantine: bool
|
||||
user_id: Optional[str]
|
||||
authenticated: Optional[bool]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -77,6 +78,7 @@ class RemoteMedia:
|
||||
created_ts: int
|
||||
last_access_ts: int
|
||||
quarantined_by: Optional[str]
|
||||
authenticated: Optional[bool]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -218,6 +220,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"last_access_ts",
|
||||
"safe_from_quarantine",
|
||||
"user_id",
|
||||
"authenticated",
|
||||
),
|
||||
allow_none=True,
|
||||
desc="get_local_media",
|
||||
@@ -235,6 +238,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
last_access_ts=row[6],
|
||||
safe_from_quarantine=row[7],
|
||||
user_id=row[8],
|
||||
authenticated=row[9],
|
||||
)
|
||||
|
||||
async def get_local_media_by_user_paginate(
|
||||
@@ -290,7 +294,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
last_access_ts,
|
||||
quarantined_by,
|
||||
safe_from_quarantine,
|
||||
user_id
|
||||
user_id,
|
||||
authenticated
|
||||
FROM local_media_repository
|
||||
WHERE user_id = ?
|
||||
ORDER BY {order_by_column} {order}, media_id ASC
|
||||
@@ -314,6 +319,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
quarantined_by=row[7],
|
||||
safe_from_quarantine=bool(row[8]),
|
||||
user_id=row[9],
|
||||
authenticated=row[10],
|
||||
)
|
||||
for row in txn
|
||||
]
|
||||
@@ -417,12 +423,18 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
time_now_ms: int,
|
||||
user_id: UserID,
|
||||
) -> None:
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
else:
|
||||
authenticated = False
|
||||
|
||||
await self.db_pool.simple_insert(
|
||||
"local_media_repository",
|
||||
{
|
||||
"media_id": media_id,
|
||||
"created_ts": time_now_ms,
|
||||
"user_id": user_id.to_string(),
|
||||
"authenticated": authenticated,
|
||||
},
|
||||
desc="store_local_media_id",
|
||||
)
|
||||
@@ -438,6 +450,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
user_id: UserID,
|
||||
url_cache: Optional[str] = None,
|
||||
) -> None:
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
else:
|
||||
authenticated = False
|
||||
|
||||
await self.db_pool.simple_insert(
|
||||
"local_media_repository",
|
||||
{
|
||||
@@ -448,6 +465,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"media_length": media_length,
|
||||
"user_id": user_id.to_string(),
|
||||
"url_cache": url_cache,
|
||||
"authenticated": authenticated,
|
||||
},
|
||||
desc="store_local_media",
|
||||
)
|
||||
@@ -638,6 +656,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"filesystem_id",
|
||||
"last_access_ts",
|
||||
"quarantined_by",
|
||||
"authenticated",
|
||||
),
|
||||
allow_none=True,
|
||||
desc="get_cached_remote_media",
|
||||
@@ -654,6 +673,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
filesystem_id=row[4],
|
||||
last_access_ts=row[5],
|
||||
quarantined_by=row[6],
|
||||
authenticated=row[7],
|
||||
)
|
||||
|
||||
async def store_cached_remote_media(
|
||||
@@ -666,6 +686,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
upload_name: Optional[str],
|
||||
filesystem_id: str,
|
||||
) -> None:
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
else:
|
||||
authenticated = False
|
||||
|
||||
await self.db_pool.simple_insert(
|
||||
"remote_media_cache",
|
||||
{
|
||||
@@ -677,6 +702,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"upload_name": upload_name,
|
||||
"filesystem_id": filesystem_id,
|
||||
"last_access_ts": time_now_ms,
|
||||
"authenticated": authenticated,
|
||||
},
|
||||
desc="store_cached_remote_media",
|
||||
)
|
||||
|
||||
@@ -39,6 +39,7 @@ from typing import (
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||
@@ -279,8 +280,19 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
||||
|
||||
@cached(max_entries=100000) # type: ignore[synapse-@cached-mutable]
|
||||
async def get_room_summary(self, room_id: str) -> Mapping[str, MemberSummary]:
|
||||
"""Get the details of a room roughly suitable for use by the room
|
||||
"""
|
||||
Get the details of a room roughly suitable for use by the room
|
||||
summary extension to /sync. Useful when lazy loading room members.
|
||||
|
||||
Returns the total count of members in the room by membership type, and a
|
||||
truncated list of members (the heroes). This will be the first 6 members of the
|
||||
room:
|
||||
- We want 5 heroes plus 1, in case one of them is the
|
||||
calling user.
|
||||
- They are ordered by `stream_ordering`, which are joined or
|
||||
invited. When no joined or invited members are available, this also includes
|
||||
banned and left users.
|
||||
|
||||
Args:
|
||||
room_id: The room ID to query
|
||||
Returns:
|
||||
@@ -308,23 +320,36 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
||||
for count, membership in txn:
|
||||
res.setdefault(membership, MemberSummary([], count))
|
||||
|
||||
# we order by membership and then fairly arbitrarily by event_id so
|
||||
# heroes are consistent
|
||||
# Note, rejected events will have a null membership field, so
|
||||
# we we manually filter them out.
|
||||
# Order by membership (joins -> invites -> leave (former insiders) ->
|
||||
# everything else (outsiders like bans/knocks), then by `stream_ordering` so
|
||||
# the first members in the room show up first and to make the sort stable
|
||||
# (consistent heroes).
|
||||
#
|
||||
# Note: rejected events will have a null membership field, so we we manually
|
||||
# filter them out.
|
||||
sql = """
|
||||
SELECT state_key, membership, event_id
|
||||
FROM current_state_events
|
||||
WHERE type = 'm.room.member' AND room_id = ?
|
||||
AND membership IS NOT NULL
|
||||
ORDER BY
|
||||
CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
|
||||
event_id ASC
|
||||
CASE membership WHEN ? THEN 1 WHEN ? THEN 2 WHEN ? THEN 3 ELSE 4 END ASC,
|
||||
event_stream_ordering ASC
|
||||
LIMIT ?
|
||||
"""
|
||||
|
||||
# 6 is 5 (number of heroes) plus 1, in case one of them is the calling user.
|
||||
txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6))
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
room_id,
|
||||
# Sort order
|
||||
Membership.JOIN,
|
||||
Membership.INVITE,
|
||||
Membership.LEAVE,
|
||||
# 6 is 5 (number of heroes) plus 1, in case one of them is the calling user.
|
||||
6,
|
||||
),
|
||||
)
|
||||
for user_id, membership, event_id in txn:
|
||||
summary = res[membership]
|
||||
# we will always have a summary for this membership type at this
|
||||
@@ -398,6 +423,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
||||
return invite
|
||||
return None
|
||||
|
||||
@trace
|
||||
async def get_rooms_for_local_user_where_membership_is(
|
||||
self,
|
||||
user_id: str,
|
||||
@@ -421,9 +447,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
||||
if not membership_list:
|
||||
return []
|
||||
|
||||
rooms = await self.db_pool.runInteraction(
|
||||
"get_rooms_for_local_user_where_membership_is",
|
||||
self._get_rooms_for_local_user_where_membership_is_txn,
|
||||
# Convert membership list to frozen set as a) it needs to be hashable,
|
||||
# and b) we don't care about the order.
|
||||
membership_list = frozenset(membership_list)
|
||||
|
||||
rooms = await self._get_rooms_for_local_user_where_membership_is_inner(
|
||||
user_id,
|
||||
membership_list,
|
||||
)
|
||||
@@ -442,6 +470,24 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
||||
|
||||
return [room for room in rooms if room.room_id not in rooms_to_exclude]
|
||||
|
||||
@cached(max_entries=1000, tree=True)
|
||||
async def _get_rooms_for_local_user_where_membership_is_inner(
|
||||
self,
|
||||
user_id: str,
|
||||
membership_list: Collection[str],
|
||||
) -> Sequence[RoomsForUser]:
|
||||
if not membership_list:
|
||||
return []
|
||||
|
||||
rooms = await self.db_pool.runInteraction(
|
||||
"get_rooms_for_local_user_where_membership_is",
|
||||
self._get_rooms_for_local_user_where_membership_is_txn,
|
||||
user_id,
|
||||
membership_list,
|
||||
)
|
||||
|
||||
return rooms
|
||||
|
||||
def _get_rooms_for_local_user_where_membership_is_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
@@ -1509,10 +1555,19 @@ def extract_heroes_from_room_summary(
|
||||
) -> List[str]:
|
||||
"""Determine the users that represent a room, from the perspective of the `me` user.
|
||||
|
||||
This function expects `MemberSummary.members` to already be sorted by
|
||||
`stream_ordering` like the results from `get_room_summary(...)`.
|
||||
|
||||
The rules which say which users we select are specified in the "Room Summary"
|
||||
section of
|
||||
https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv3sync
|
||||
|
||||
|
||||
Args:
|
||||
details: Mapping from membership type to member summary. We expect
|
||||
`MemberSummary.members` to already be sorted by `stream_ordering`.
|
||||
me: The user for whom we are determining the heroes for.
|
||||
|
||||
Returns a list (possibly empty) of heroes' mxids.
|
||||
"""
|
||||
empty_ms = MemberSummary([], 0)
|
||||
@@ -1527,11 +1582,11 @@ def extract_heroes_from_room_summary(
|
||||
r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me
|
||||
] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me]
|
||||
|
||||
# FIXME: order by stream ordering rather than as returned by SQL
|
||||
# We expect `MemberSummary.members` to already be sorted by `stream_ordering`
|
||||
if joined_user_ids or invited_user_ids:
|
||||
return sorted(joined_user_ids + invited_user_ids)[0:5]
|
||||
return (joined_user_ids + invited_user_ids)[0:5]
|
||||
else:
|
||||
return sorted(gone_user_ids)[0:5]
|
||||
return gone_user_ids[0:5]
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
|
||||
@@ -30,6 +30,7 @@ from typing import (
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
@@ -41,7 +42,7 @@ from typing import (
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.constants import EventContentFields, EventTypes, Membership
|
||||
from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.events import EventBase
|
||||
@@ -72,10 +73,18 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
MAX_STATE_DELTA_HOPS = 100
|
||||
|
||||
|
||||
# Freeze so it's immutable and we can use it as a cache value
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class Sentinel:
|
||||
pass
|
||||
|
||||
|
||||
ROOM_UNKNOWN_SENTINEL = Sentinel()
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class EventMetadata:
|
||||
"""Returned by `get_metadata_for_events`"""
|
||||
@@ -298,6 +307,194 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
create_event = await self.get_event(create_id)
|
||||
return create_event
|
||||
|
||||
@cached(max_entries=10000)
|
||||
async def get_room_type(self, room_id: str) -> Optional[str]:
|
||||
raise NotImplementedError()
|
||||
|
||||
@cachedList(cached_method_name="get_room_type", list_name="room_ids")
|
||||
async def bulk_get_room_type(
|
||||
self, room_ids: Set[str]
|
||||
) -> Mapping[str, Union[Optional[str], Sentinel]]:
|
||||
"""
|
||||
Bulk fetch room types for the given rooms (via current state).
|
||||
|
||||
Since this function is cached, any missing values would be cached as `None`. In
|
||||
order to distinguish between an unencrypted room that has `None` encryption and
|
||||
a room that is unknown to the server where we might want to omit the value
|
||||
(which would make it cached as `None`), instead we use the sentinel value
|
||||
`ROOM_UNKNOWN_SENTINEL`.
|
||||
|
||||
Returns:
|
||||
A mapping from room ID to the room's type (`None` is a valid room type).
|
||||
Rooms unknown to this server will return `ROOM_UNKNOWN_SENTINEL`.
|
||||
"""
|
||||
|
||||
def txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> MutableMapping[str, Union[Optional[str], Sentinel]]:
|
||||
clause, args = make_in_list_sql_clause(
|
||||
txn.database_engine, "room_id", room_ids
|
||||
)
|
||||
|
||||
# We can't rely on `room_stats_state.room_type` if the server has left the
|
||||
# room because the `room_id` will still be in the table but everything will
|
||||
# be set to `None` but `None` is a valid room type value. We join against
|
||||
# the `room_stats_current` table which keeps track of the
|
||||
# `current_state_events` count (and a proxy value `local_users_in_room`
|
||||
# which can used to assume the server is participating in the room and has
|
||||
# current state) to ensure that the data in `room_stats_state` is up-to-date
|
||||
# with the current state.
|
||||
#
|
||||
# FIXME: Use `room_stats_current.current_state_events` instead of
|
||||
# `room_stats_current.local_users_in_room` once
|
||||
# https://github.com/element-hq/synapse/issues/17457 is fixed.
|
||||
sql = f"""
|
||||
SELECT room_id, room_type
|
||||
FROM room_stats_state
|
||||
INNER JOIN room_stats_current USING (room_id)
|
||||
WHERE
|
||||
{clause}
|
||||
AND local_users_in_room > 0
|
||||
"""
|
||||
|
||||
txn.execute(sql, args)
|
||||
|
||||
room_id_to_type_map = {}
|
||||
for row in txn:
|
||||
room_id_to_type_map[row[0]] = row[1]
|
||||
|
||||
return room_id_to_type_map
|
||||
|
||||
results = await self.db_pool.runInteraction(
|
||||
"bulk_get_room_type",
|
||||
txn,
|
||||
)
|
||||
|
||||
# If we haven't updated `room_stats_state` with the room yet, query the
|
||||
# create events directly. This should happen only rarely so we don't
|
||||
# mind if we do this in a loop.
|
||||
for room_id in room_ids - results.keys():
|
||||
try:
|
||||
create_event = await self.get_create_event_for_room(room_id)
|
||||
room_type = create_event.content.get(EventContentFields.ROOM_TYPE)
|
||||
results[room_id] = room_type
|
||||
except NotFoundError:
|
||||
# We use the sentinel value to distinguish between `None` which is a
|
||||
# valid room type and a room that is unknown to the server so the value
|
||||
# is just unset.
|
||||
results[room_id] = ROOM_UNKNOWN_SENTINEL
|
||||
|
||||
return results
|
||||
|
||||
@cached(max_entries=10000)
|
||||
async def get_room_encryption(self, room_id: str) -> Optional[str]:
|
||||
raise NotImplementedError()
|
||||
|
||||
@cachedList(cached_method_name="get_room_encryption", list_name="room_ids")
|
||||
async def bulk_get_room_encryption(
|
||||
self, room_ids: Set[str]
|
||||
) -> Mapping[str, Union[Optional[str], Sentinel]]:
|
||||
"""
|
||||
Bulk fetch room encryption for the given rooms (via current state).
|
||||
|
||||
Since this function is cached, any missing values would be cached as `None`. In
|
||||
order to distinguish between an unencrypted room that has `None` encryption and
|
||||
a room that is unknown to the server where we might want to omit the value
|
||||
(which would make it cached as `None`), instead we use the sentinel value
|
||||
`ROOM_UNKNOWN_SENTINEL`.
|
||||
|
||||
Returns:
|
||||
A mapping from room ID to the room's encryption algorithm if the room is
|
||||
encrypted, otherwise `None`. Rooms unknown to this server will return
|
||||
`ROOM_UNKNOWN_SENTINEL`.
|
||||
"""
|
||||
|
||||
def txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> MutableMapping[str, Union[Optional[str], Sentinel]]:
|
||||
clause, args = make_in_list_sql_clause(
|
||||
txn.database_engine, "room_id", room_ids
|
||||
)
|
||||
|
||||
# We can't rely on `room_stats_state.encryption` if the server has left the
|
||||
# room because the `room_id` will still be in the table but everything will
|
||||
# be set to `None` but `None` is a valid encryption value. We join against
|
||||
# the `room_stats_current` table which keeps track of the
|
||||
# `current_state_events` count (and a proxy value `local_users_in_room`
|
||||
# which can used to assume the server is participating in the room and has
|
||||
# current state) to ensure that the data in `room_stats_state` is up-to-date
|
||||
# with the current state.
|
||||
#
|
||||
# FIXME: Use `room_stats_current.current_state_events` instead of
|
||||
# `room_stats_current.local_users_in_room` once
|
||||
# https://github.com/element-hq/synapse/issues/17457 is fixed.
|
||||
sql = f"""
|
||||
SELECT room_id, encryption
|
||||
FROM room_stats_state
|
||||
INNER JOIN room_stats_current USING (room_id)
|
||||
WHERE
|
||||
{clause}
|
||||
AND local_users_in_room > 0
|
||||
"""
|
||||
|
||||
txn.execute(sql, args)
|
||||
|
||||
room_id_to_encryption_map = {}
|
||||
for row in txn:
|
||||
room_id_to_encryption_map[row[0]] = row[1]
|
||||
|
||||
return room_id_to_encryption_map
|
||||
|
||||
results = await self.db_pool.runInteraction(
|
||||
"bulk_get_room_encryption",
|
||||
txn,
|
||||
)
|
||||
|
||||
# If we haven't updated `room_stats_state` with the room yet, query the state
|
||||
# directly. This should happen only rarely so we don't mind if we do this in a
|
||||
# loop.
|
||||
encryption_event_ids: List[str] = []
|
||||
for room_id in room_ids - results.keys():
|
||||
state_map = await self.get_partial_filtered_current_state_ids(
|
||||
room_id,
|
||||
state_filter=StateFilter.from_types(
|
||||
[
|
||||
(EventTypes.Create, ""),
|
||||
(EventTypes.RoomEncryption, ""),
|
||||
]
|
||||
),
|
||||
)
|
||||
# We can use the create event as a canary to tell whether the server has
|
||||
# seen the room before
|
||||
create_event_id = state_map.get((EventTypes.Create, ""))
|
||||
encryption_event_id = state_map.get((EventTypes.RoomEncryption, ""))
|
||||
|
||||
if create_event_id is None:
|
||||
# We use the sentinel value to distinguish between `None` which is a
|
||||
# valid room type and a room that is unknown to the server so the value
|
||||
# is just unset.
|
||||
results[room_id] = ROOM_UNKNOWN_SENTINEL
|
||||
continue
|
||||
|
||||
if encryption_event_id is None:
|
||||
results[room_id] = None
|
||||
else:
|
||||
encryption_event_ids.append(encryption_event_id)
|
||||
|
||||
encryption_event_map = await self.get_events(encryption_event_ids)
|
||||
|
||||
for encryption_event_id in encryption_event_ids:
|
||||
encryption_event = encryption_event_map.get(encryption_event_id)
|
||||
# If the curent state says there is an encryption event, we should have it
|
||||
# in the database.
|
||||
assert encryption_event is not None
|
||||
|
||||
results[encryption_event.room_id] = encryption_event.content.get(
|
||||
EventContentFields.ENCRYPTION_ALGORITHM
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
@cached(max_entries=100000, iterable=True)
|
||||
async def get_partial_current_state_ids(self, room_id: str) -> StateMap[str]:
|
||||
"""Get the current state event ids for a room based on the
|
||||
|
||||
@@ -24,8 +24,11 @@ from typing import List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.databases.main.stream import _filter_results_by_stream
|
||||
from synapse.types import RoomStreamToken
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -156,3 +159,39 @@ class StateDeltasStore(SQLBaseStore):
|
||||
"get_max_stream_id_in_current_state_deltas",
|
||||
self._get_max_stream_id_in_current_state_deltas_txn,
|
||||
)
|
||||
|
||||
@trace
|
||||
async def get_current_state_deltas_for_room(
|
||||
self, room_id: str, from_token: RoomStreamToken, to_token: RoomStreamToken
|
||||
) -> List[StateDelta]:
|
||||
"""Get the state deltas between two tokens."""
|
||||
|
||||
def get_current_state_deltas_for_room_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> List[StateDelta]:
|
||||
sql = """
|
||||
SELECT instance_name, stream_id, type, state_key, event_id, prev_event_id
|
||||
FROM current_state_delta_stream
|
||||
WHERE room_id = ? AND ? < stream_id AND stream_id <= ?
|
||||
ORDER BY stream_id ASC
|
||||
"""
|
||||
txn.execute(
|
||||
sql, (room_id, from_token.stream, to_token.get_max_stream_pos())
|
||||
)
|
||||
|
||||
return [
|
||||
StateDelta(
|
||||
stream_id=row[1],
|
||||
room_id=room_id,
|
||||
event_type=row[2],
|
||||
state_key=row[3],
|
||||
event_id=row[4],
|
||||
prev_event_id=row[5],
|
||||
)
|
||||
for row in txn
|
||||
if _filter_results_by_stream(from_token, to_token, row[0], row[1])
|
||||
]
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_current_state_deltas_for_room", get_current_state_deltas_for_room_txn
|
||||
)
|
||||
|
||||
@@ -67,7 +67,7 @@ from synapse.api.constants import Direction, EventTypes, Membership
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.logging.opentracing import tag_args, trace
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
@@ -78,10 +78,11 @@ from synapse.storage.database import (
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
|
||||
from synapse.storage.util.id_generators import MultiWriterIdGenerator
|
||||
from synapse.types import PersistedEventPosition, RoomStreamToken
|
||||
from synapse.types import PersistedEventPosition, RoomStreamToken, StrCollection
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -811,6 +812,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
|
||||
return ret, key
|
||||
|
||||
@trace
|
||||
async def get_current_state_delta_membership_changes_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
@@ -1185,6 +1187,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
|
||||
return None
|
||||
|
||||
@trace
|
||||
async def get_last_event_pos_in_room_before_stream_ordering(
|
||||
self,
|
||||
room_id: str,
|
||||
@@ -1293,6 +1296,126 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
get_last_event_pos_in_room_before_stream_ordering_txn,
|
||||
)
|
||||
|
||||
async def bulk_get_last_event_pos_in_room_before_stream_ordering(
|
||||
self,
|
||||
room_ids: StrCollection,
|
||||
end_token: RoomStreamToken,
|
||||
) -> Dict[str, int]:
|
||||
"""Bulk fetch the stream position of the latest events in the given
|
||||
rooms
|
||||
"""
|
||||
|
||||
min_token = end_token.stream
|
||||
max_token = end_token.get_max_stream_pos()
|
||||
results: Dict[str, int] = {}
|
||||
|
||||
# First, we check for the rooms in the stream change cache to see if we
|
||||
# can just use the latest position from it.
|
||||
missing_room_ids: Set[str] = set()
|
||||
for room_id in room_ids:
|
||||
stream_pos = self._events_stream_cache.get_max_pos_of_last_change(room_id)
|
||||
if stream_pos and stream_pos <= min_token:
|
||||
results[room_id] = stream_pos
|
||||
else:
|
||||
missing_room_ids.add(room_id)
|
||||
|
||||
# Next, we query the stream position from the DB. At first we fetch all
|
||||
# positions less than the *max* stream pos in the token, then filter
|
||||
# them down. We do this as a) this is a cheaper query, and b) the vast
|
||||
# majority of rooms will have a latest token from before the min stream
|
||||
# pos.
|
||||
|
||||
def bulk_get_last_event_pos_txn(
|
||||
txn: LoggingTransaction, batch_room_ids: StrCollection
|
||||
) -> Dict[str, int]:
|
||||
# This query fetches the latest stream position in the rooms before
|
||||
# the given max position.
|
||||
clause, args = make_in_list_sql_clause(
|
||||
self.database_engine, "room_id", batch_room_ids
|
||||
)
|
||||
sql = f"""
|
||||
SELECT room_id, (
|
||||
SELECT stream_ordering FROM events AS e
|
||||
LEFT JOIN rejections USING (event_id)
|
||||
WHERE e.room_id = r.room_id
|
||||
AND stream_ordering <= ?
|
||||
AND NOT outlier
|
||||
AND rejection_reason IS NULL
|
||||
ORDER BY stream_ordering DESC
|
||||
LIMIT 1
|
||||
)
|
||||
FROM rooms AS r
|
||||
WHERE {clause}
|
||||
"""
|
||||
txn.execute(sql, [max_token] + args)
|
||||
return {row[0]: row[1] for row in txn}
|
||||
|
||||
recheck_rooms: Set[str] = set()
|
||||
for batched in batch_iter(missing_room_ids, 1000):
|
||||
result = await self.db_pool.runInteraction(
|
||||
"bulk_get_last_event_pos_in_room_before_stream_ordering",
|
||||
bulk_get_last_event_pos_txn,
|
||||
batched,
|
||||
)
|
||||
|
||||
# Check that the stream position for the rooms are from before the
|
||||
# minimum position of the token. If not then we need to fetch more
|
||||
# rows.
|
||||
for room_id, stream in result.items():
|
||||
if stream <= min_token:
|
||||
results[room_id] = stream
|
||||
else:
|
||||
recheck_rooms.add(room_id)
|
||||
|
||||
if not recheck_rooms:
|
||||
return results
|
||||
|
||||
# For the remaining rooms we need to fetch all rows between the min and
|
||||
# max stream positions in the end token, and filter out the rows that
|
||||
# are after the end token.
|
||||
#
|
||||
# This query should be fast as the range between the min and max should
|
||||
# be small.
|
||||
|
||||
def bulk_get_last_event_pos_recheck_txn(
|
||||
txn: LoggingTransaction, batch_room_ids: StrCollection
|
||||
) -> Dict[str, int]:
|
||||
clause, args = make_in_list_sql_clause(
|
||||
self.database_engine, "room_id", batch_room_ids
|
||||
)
|
||||
sql = f"""
|
||||
SELECT room_id, instance_name, stream_ordering
|
||||
FROM events
|
||||
WHERE ? < stream_ordering AND stream_ordering <= ?
|
||||
AND NOT outlier
|
||||
AND rejection_reason IS NULL
|
||||
AND {clause}
|
||||
ORDER BY stream_ordering ASC
|
||||
"""
|
||||
txn.execute(sql, [min_token, max_token] + args)
|
||||
|
||||
# We take the max stream ordering that is less than the token. Since
|
||||
# we ordered by stream ordering we just need to iterate through and
|
||||
# take the last matching stream ordering.
|
||||
txn_results: Dict[str, int] = {}
|
||||
for row in txn:
|
||||
room_id = row[0]
|
||||
event_pos = PersistedEventPosition(row[1], row[2])
|
||||
if not event_pos.persisted_after(end_token):
|
||||
txn_results[room_id] = event_pos.stream
|
||||
|
||||
return txn_results
|
||||
|
||||
for batched in batch_iter(recheck_rooms, 1000):
|
||||
recheck_result = await self.db_pool.runInteraction(
|
||||
"bulk_get_last_event_pos_in_room_before_stream_ordering_recheck",
|
||||
bulk_get_last_event_pos_recheck_txn,
|
||||
batched,
|
||||
)
|
||||
results.update(recheck_result)
|
||||
|
||||
return results
|
||||
|
||||
async def get_current_room_stream_token_for_room_id(
|
||||
self, room_id: str
|
||||
) -> RoomStreamToken:
|
||||
@@ -1819,6 +1942,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
return rows, next_token
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def paginate_room_events(
|
||||
self,
|
||||
room_id: str,
|
||||
@@ -1983,3 +2107,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
return RoomStreamToken(stream=last_position.stream - 1)
|
||||
|
||||
return None
|
||||
|
||||
@trace
|
||||
def get_rooms_that_might_have_updates(
|
||||
self, room_ids: StrCollection, from_token: RoomStreamToken
|
||||
) -> StrCollection:
|
||||
"""Filters given room IDs down to those that might have updates, i.e.
|
||||
removes rooms that definitely do not have updates.
|
||||
"""
|
||||
return self._events_stream_cache.get_entities_changed(
|
||||
room_ids, from_token.stream
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
#
|
||||
#
|
||||
|
||||
SCHEMA_VERSION = 85 # remember to update the list below when updating
|
||||
SCHEMA_VERSION = 86 # remember to update the list below when updating
|
||||
"""Represents the expectations made by the codebase about the database schema
|
||||
|
||||
This should be incremented whenever the codebase changes its requirements on the
|
||||
@@ -139,6 +139,9 @@ Changes in SCHEMA_VERSION = 84
|
||||
|
||||
Changes in SCHEMA_VERSION = 85
|
||||
- Add a column `suspended` to the `users` table
|
||||
|
||||
Changes in SCHEMA_VERSION = 86
|
||||
- Add a column `authenticated` to the tables `local_media_repository` and `remote_media_cache`
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2024 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
ALTER TABLE remote_media_cache ADD COLUMN authenticated BOOLEAN DEFAULT FALSE NOT NULL;
|
||||
ALTER TABLE local_media_repository ADD COLUMN authenticated BOOLEAN DEFAULT FALSE NOT NULL;
|
||||
@@ -20,6 +20,7 @@
|
||||
#
|
||||
#
|
||||
import abc
|
||||
import logging
|
||||
import re
|
||||
import string
|
||||
from enum import Enum
|
||||
@@ -74,6 +75,9 @@ if TYPE_CHECKING:
|
||||
from synapse.storage.databases.main import DataStore, PurgeEventsStore
|
||||
from synapse.storage.databases.main.appservice import ApplicationServiceWorkerStore
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Define a state map type from type/state_key to T (usually an event ID or
|
||||
# event)
|
||||
T = TypeVar("T")
|
||||
@@ -454,6 +458,8 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
|
||||
represented by a default `stream` attribute and a map of instance name to
|
||||
stream position of any writers that are ahead of the default stream
|
||||
position.
|
||||
|
||||
The values in `instance_map` must be greater than the `stream` attribute.
|
||||
"""
|
||||
|
||||
stream: int = attr.ib(validator=attr.validators.instance_of(int), kw_only=True)
|
||||
@@ -468,6 +474,15 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
|
||||
kw_only=True,
|
||||
)
|
||||
|
||||
def __attrs_post_init__(self) -> None:
|
||||
# Enforce that all instances have a value greater than the min stream
|
||||
# position.
|
||||
for i, v in self.instance_map.items():
|
||||
if v <= self.stream:
|
||||
raise ValueError(
|
||||
f"'instance_map' includes a stream position before the main 'stream' attribute. Instance: {i}"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
async def parse(cls, store: "DataStore", string: str) -> "Self":
|
||||
@@ -494,6 +509,9 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
|
||||
for instance in set(self.instance_map).union(other.instance_map)
|
||||
}
|
||||
|
||||
# Filter out any redundant entries.
|
||||
instance_map = {i: s for i, s in instance_map.items() if s > max_stream}
|
||||
|
||||
return attr.evolve(
|
||||
self, stream=max_stream, instance_map=immutabledict(instance_map)
|
||||
)
|
||||
@@ -539,10 +557,15 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
|
||||
def bound_stream_token(self, max_stream: int) -> "Self":
|
||||
"""Bound the stream positions to a maximum value"""
|
||||
|
||||
min_pos = min(self.stream, max_stream)
|
||||
return type(self)(
|
||||
stream=min(self.stream, max_stream),
|
||||
stream=min_pos,
|
||||
instance_map=immutabledict(
|
||||
{k: min(s, max_stream) for k, s in self.instance_map.items()}
|
||||
{
|
||||
k: min(s, max_stream)
|
||||
for k, s in self.instance_map.items()
|
||||
if min(s, max_stream) > min_pos
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
@@ -637,6 +660,8 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
|
||||
"Cannot set both 'topological' and 'instance_map' on 'RoomStreamToken'."
|
||||
)
|
||||
|
||||
super().__attrs_post_init__()
|
||||
|
||||
@classmethod
|
||||
async def parse(cls, store: "PurgeEventsStore", string: str) -> "RoomStreamToken":
|
||||
try:
|
||||
@@ -651,6 +676,11 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
|
||||
|
||||
instance_map = {}
|
||||
for part in parts[1:]:
|
||||
if not part:
|
||||
# Handle tokens of the form `m5~`, which were created by
|
||||
# a bug
|
||||
continue
|
||||
|
||||
key, value = part.split(".")
|
||||
instance_id = int(key)
|
||||
pos = int(value)
|
||||
@@ -666,7 +696,10 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
pass
|
||||
# We log an exception here as even though this *might* be a client
|
||||
# handing a bad token, its more likely that Synapse returned a bad
|
||||
# token (and we really want to catch those!).
|
||||
logger.exception("Failed to parse stream token: %r", string)
|
||||
raise SynapseError(400, "Invalid room stream token %r" % (string,))
|
||||
|
||||
@classmethod
|
||||
@@ -713,6 +746,8 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
|
||||
return self.instance_map.get(instance_name, self.stream)
|
||||
|
||||
async def to_string(self, store: "DataStore") -> str:
|
||||
"""See class level docstring for information about the format."""
|
||||
|
||||
if self.topological is not None:
|
||||
return "t%d-%d" % (self.topological, self.stream)
|
||||
elif self.instance_map:
|
||||
@@ -727,8 +762,10 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
|
||||
instance_id = await store.get_id_for_instance(name)
|
||||
entries.append(f"{instance_id}.{pos}")
|
||||
|
||||
encoded_map = "~".join(entries)
|
||||
return f"m{self.stream}~{encoded_map}"
|
||||
if entries:
|
||||
encoded_map = "~".join(entries)
|
||||
return f"m{self.stream}~{encoded_map}"
|
||||
return f"s{self.stream}"
|
||||
else:
|
||||
return "s%d" % (self.stream,)
|
||||
|
||||
@@ -740,6 +777,13 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
|
||||
|
||||
return super().bound_stream_token(max_stream)
|
||||
|
||||
def __str__(self) -> str:
|
||||
instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items()))
|
||||
return (
|
||||
f"RoomStreamToken(stream: {self.stream}, topological: {self.topological}, "
|
||||
f"instances: {{{instances}}})"
|
||||
)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True, order=False)
|
||||
class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
|
||||
@@ -756,6 +800,11 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
|
||||
|
||||
instance_map = {}
|
||||
for part in parts[1:]:
|
||||
if not part:
|
||||
# Handle tokens of the form `m5~`, which were created by
|
||||
# a bug
|
||||
continue
|
||||
|
||||
key, value = part.split(".")
|
||||
instance_id = int(key)
|
||||
pos = int(value)
|
||||
@@ -770,10 +819,15 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
pass
|
||||
# We log an exception here as even though this *might* be a client
|
||||
# handing a bad token, its more likely that Synapse returned a bad
|
||||
# token (and we really want to catch those!).
|
||||
logger.exception("Failed to parse stream token: %r", string)
|
||||
raise SynapseError(400, "Invalid stream token %r" % (string,))
|
||||
|
||||
async def to_string(self, store: "DataStore") -> str:
|
||||
"""See class level docstring for information about the format."""
|
||||
|
||||
if self.instance_map:
|
||||
entries = []
|
||||
for name, pos in self.instance_map.items():
|
||||
@@ -786,8 +840,10 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
|
||||
instance_id = await store.get_id_for_instance(name)
|
||||
entries.append(f"{instance_id}.{pos}")
|
||||
|
||||
encoded_map = "~".join(entries)
|
||||
return f"m{self.stream}~{encoded_map}"
|
||||
if entries:
|
||||
encoded_map = "~".join(entries)
|
||||
return f"m{self.stream}~{encoded_map}"
|
||||
return str(self.stream)
|
||||
else:
|
||||
return str(self.stream)
|
||||
|
||||
@@ -824,6 +880,13 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
|
||||
|
||||
return True
|
||||
|
||||
def __str__(self) -> str:
|
||||
instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items()))
|
||||
return (
|
||||
f"MultiWriterStreamToken(stream: {self.stream}, "
|
||||
f"instances: {{{instances}}})"
|
||||
)
|
||||
|
||||
|
||||
class StreamKeyType(Enum):
|
||||
"""Known stream types.
|
||||
@@ -1082,12 +1145,64 @@ class StreamToken:
|
||||
|
||||
return True
|
||||
|
||||
def __str__(self) -> str:
|
||||
return (
|
||||
f"StreamToken(room: {self.room_key}, presence: {self.presence_key}, "
|
||||
f"typing: {self.typing_key}, receipt: {self.receipt_key}, "
|
||||
f"account_data: {self.account_data_key}, push_rules: {self.push_rules_key}, "
|
||||
f"to_device: {self.to_device_key}, device_list: {self.device_list_key}, "
|
||||
f"groups: {self.groups_key}, un_partial_stated_rooms: {self.un_partial_stated_rooms_key})"
|
||||
)
|
||||
|
||||
|
||||
StreamToken.START = StreamToken(
|
||||
RoomStreamToken(stream=0), 0, 0, MultiWriterStreamToken(stream=0), 0, 0, 0, 0, 0, 0
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class SlidingSyncStreamToken:
|
||||
"""The same as a `StreamToken`, but includes an extra field at the start for
|
||||
the sliding sync connection token (separated by a '/'). This is used to
|
||||
store per-connection state.
|
||||
|
||||
This then looks something like:
|
||||
5/s2633508_17_338_6732159_1082514_541479_274711_265584_1_379
|
||||
|
||||
Attributes:
|
||||
stream_token: Token representing the position of all the standard
|
||||
streams.
|
||||
connection_position: Token used by sliding sync to track updates to any
|
||||
per-connection state stored by Synapse.
|
||||
"""
|
||||
|
||||
stream_token: StreamToken
|
||||
connection_position: int
|
||||
|
||||
@staticmethod
|
||||
@cancellable
|
||||
async def from_string(store: "DataStore", string: str) -> "SlidingSyncStreamToken":
|
||||
"""Creates a SlidingSyncStreamToken from its textual representation."""
|
||||
try:
|
||||
connection_position_str, stream_token_str = string.split("/", 1)
|
||||
connection_position = int(connection_position_str)
|
||||
stream_token = await StreamToken.from_string(store, stream_token_str)
|
||||
|
||||
return SlidingSyncStreamToken(
|
||||
stream_token=stream_token,
|
||||
connection_position=connection_position,
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
raise SynapseError(400, "Invalid stream token")
|
||||
|
||||
async def to_string(self, store: "DataStore") -> str:
|
||||
"""Serializes the token to a string"""
|
||||
stream_token_str = await self.stream_token.to_string(store)
|
||||
return f"{self.connection_position}/{stream_token_str}"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class PersistedPosition:
|
||||
"""Position of a newly persisted row with instance that persisted it."""
|
||||
@@ -1170,11 +1285,12 @@ class ReadReceipt:
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class DeviceListUpdates:
|
||||
"""
|
||||
An object containing a diff of information regarding other users' device lists, intended for
|
||||
a recipient to carry out device list tracking.
|
||||
An object containing a diff of information regarding other users' device lists,
|
||||
intended for a recipient to carry out device list tracking.
|
||||
|
||||
Attributes:
|
||||
changed: A set of users whose device lists have changed recently.
|
||||
changed: A set of users who have updated their device identity or
|
||||
cross-signing keys, or who now share an encrypted room with.
|
||||
left: A set of users who the recipient no longer needs to track the device lists of.
|
||||
Typically when those users no longer share any end-to-end encryption enabled rooms.
|
||||
"""
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
#
|
||||
#
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, Dict, Final, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Dict, Final, List, Mapping, Optional, Sequence, Tuple
|
||||
|
||||
import attr
|
||||
from typing_extensions import TypedDict
|
||||
@@ -31,7 +31,15 @@ else:
|
||||
from pydantic import Extra
|
||||
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import JsonDict, JsonMapping, StreamToken, UserID
|
||||
from synapse.types import (
|
||||
DeviceListUpdates,
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
Requester,
|
||||
SlidingSyncStreamToken,
|
||||
StreamToken,
|
||||
UserID,
|
||||
)
|
||||
from synapse.types.rest.client import SlidingSyncBody
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -102,7 +110,7 @@ class SlidingSyncConfig(SlidingSyncBody):
|
||||
"""
|
||||
|
||||
user: UserID
|
||||
device_id: Optional[str]
|
||||
requester: Requester
|
||||
|
||||
# Pydantic config
|
||||
class Config:
|
||||
@@ -144,7 +152,7 @@ class SlidingSyncResult:
|
||||
Attributes:
|
||||
next_pos: The next position token in the sliding window to request (next_batch).
|
||||
lists: Sliding window API. A map of list key to list results.
|
||||
rooms: Room subscription API. A map of room ID to room subscription to room results.
|
||||
rooms: Room subscription API. A map of room ID to room results.
|
||||
extensions: Extensions API. A map of extension key to extension results.
|
||||
"""
|
||||
|
||||
@@ -174,8 +182,8 @@ class SlidingSyncResult:
|
||||
absent on joined/left rooms
|
||||
prev_batch: A token that can be passed as a start parameter to the
|
||||
`/rooms/<room_id>/messages` API to retrieve earlier messages.
|
||||
limited: True if their are more events than fit between the given position and now.
|
||||
Sync again to get more.
|
||||
limited: True if there are more events than `timeline_limit` looking
|
||||
backwards from the `response.pos` to the `request.pos`.
|
||||
num_live: The number of timeline events which have just occurred and are not historical.
|
||||
The last N events are 'live' and should be treated as such. This is mostly
|
||||
useful to determine whether a given @mention event should make a noise or not.
|
||||
@@ -200,18 +208,24 @@ class SlidingSyncResult:
|
||||
flag set. (same as sync v2)
|
||||
"""
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class StrippedHero:
|
||||
user_id: str
|
||||
display_name: Optional[str]
|
||||
avatar_url: Optional[str]
|
||||
|
||||
name: Optional[str]
|
||||
avatar: Optional[str]
|
||||
heroes: Optional[List[EventBase]]
|
||||
heroes: Optional[List[StrippedHero]]
|
||||
is_dm: bool
|
||||
initial: bool
|
||||
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
|
||||
required_state: Optional[List[EventBase]]
|
||||
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
|
||||
timeline_events: Optional[List[EventBase]]
|
||||
# Should be empty for invite/knock rooms with `stripped_state`
|
||||
required_state: List[EventBase]
|
||||
# Should be empty for invite/knock rooms with `stripped_state`
|
||||
timeline_events: List[EventBase]
|
||||
bundled_aggregations: Optional[Dict[str, "BundledAggregations"]]
|
||||
# Optional because it's only relevant to invite/knock rooms
|
||||
stripped_state: Optional[List[JsonDict]]
|
||||
stripped_state: List[JsonDict]
|
||||
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
|
||||
prev_batch: Optional[StreamToken]
|
||||
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
|
||||
@@ -224,6 +238,17 @@ class SlidingSyncResult:
|
||||
notification_count: int
|
||||
highlight_count: int
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return (
|
||||
# If this is the first time the client is seeing the room, we should not filter it out
|
||||
# under any circumstance.
|
||||
self.initial
|
||||
# We need to let the client know if there are any new events
|
||||
or bool(self.required_state)
|
||||
or bool(self.timeline_events)
|
||||
or bool(self.stripped_state)
|
||||
)
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class SlidingWindowList:
|
||||
"""
|
||||
@@ -252,24 +277,155 @@ class SlidingSyncResult:
|
||||
count: int
|
||||
ops: List[Operation]
|
||||
|
||||
next_pos: StreamToken
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class Extensions:
|
||||
"""Responses for extensions
|
||||
|
||||
Attributes:
|
||||
to_device: The to-device extension (MSC3885)
|
||||
e2ee: The E2EE device extension (MSC3884)
|
||||
"""
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class ToDeviceExtension:
|
||||
"""The to-device extension (MSC3885)
|
||||
|
||||
Attributes:
|
||||
next_batch: The to-device stream token the client should use
|
||||
to get more results
|
||||
events: A list of to-device messages for the client
|
||||
"""
|
||||
|
||||
next_batch: str
|
||||
events: Sequence[JsonMapping]
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self.events)
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class E2eeExtension:
|
||||
"""The E2EE device extension (MSC3884)
|
||||
|
||||
Attributes:
|
||||
device_list_updates: List of user_ids whose devices have changed or left (only
|
||||
present on incremental syncs).
|
||||
device_one_time_keys_count: Map from key algorithm to the number of
|
||||
unclaimed one-time keys currently held on the server for this device. If
|
||||
an algorithm is unlisted, the count for that algorithm is assumed to be
|
||||
zero. If this entire parameter is missing, the count for all algorithms
|
||||
is assumed to be zero.
|
||||
device_unused_fallback_key_types: List of unused fallback key algorithms
|
||||
for this device.
|
||||
"""
|
||||
|
||||
# Only present on incremental syncs
|
||||
device_list_updates: Optional[DeviceListUpdates]
|
||||
device_one_time_keys_count: Mapping[str, int]
|
||||
device_unused_fallback_key_types: Sequence[str]
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
# Note that "signed_curve25519" is always returned in key count responses
|
||||
# regardless of whether we uploaded any keys for it. This is necessary until
|
||||
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
|
||||
#
|
||||
# Also related:
|
||||
# https://github.com/element-hq/element-android/issues/3725 and
|
||||
# https://github.com/matrix-org/synapse/issues/10456
|
||||
default_otk = self.device_one_time_keys_count.get("signed_curve25519")
|
||||
more_than_default_otk = len(self.device_one_time_keys_count) > 1 or (
|
||||
default_otk is not None and default_otk > 0
|
||||
)
|
||||
|
||||
return bool(
|
||||
more_than_default_otk
|
||||
or self.device_list_updates
|
||||
or self.device_unused_fallback_key_types
|
||||
)
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class AccountDataExtension:
|
||||
"""The Account Data extension (MSC3959)
|
||||
|
||||
Attributes:
|
||||
global_account_data_map: Mapping from `type` to `content` of global account
|
||||
data events.
|
||||
account_data_by_room_map: Mapping from room_id to mapping of `type` to
|
||||
`content` of room account data events.
|
||||
"""
|
||||
|
||||
global_account_data_map: Mapping[str, JsonMapping]
|
||||
account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]]
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(
|
||||
self.global_account_data_map or self.account_data_by_room_map
|
||||
)
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class ReceiptsExtension:
|
||||
"""The Receipts extension (MSC3960)
|
||||
|
||||
Attributes:
|
||||
room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral
|
||||
event (type, content)
|
||||
"""
|
||||
|
||||
room_id_to_receipt_map: Mapping[str, JsonMapping]
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self.room_id_to_receipt_map)
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class TypingExtension:
|
||||
"""The Typing Notification extension (MSC3961)
|
||||
|
||||
Attributes:
|
||||
room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral
|
||||
event (type, content)
|
||||
"""
|
||||
|
||||
room_id_to_typing_map: Mapping[str, JsonMapping]
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self.room_id_to_typing_map)
|
||||
|
||||
to_device: Optional[ToDeviceExtension] = None
|
||||
e2ee: Optional[E2eeExtension] = None
|
||||
account_data: Optional[AccountDataExtension] = None
|
||||
receipts: Optional[ReceiptsExtension] = None
|
||||
typing: Optional[TypingExtension] = None
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(
|
||||
self.to_device
|
||||
or self.e2ee
|
||||
or self.account_data
|
||||
or self.receipts
|
||||
or self.typing
|
||||
)
|
||||
|
||||
next_pos: SlidingSyncStreamToken
|
||||
lists: Dict[str, SlidingWindowList]
|
||||
rooms: Dict[str, RoomResult]
|
||||
extensions: JsonMapping
|
||||
extensions: Extensions
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
"""Make the result appear empty if there are no updates. This is used
|
||||
to tell if the notifier needs to wait for more events when polling for
|
||||
events.
|
||||
"""
|
||||
return bool(self.lists or self.rooms or self.extensions)
|
||||
# We don't include `self.lists` here, as a) `lists` is always non-empty even if
|
||||
# there are no changes, and b) since we're sorting rooms by `stream_ordering` of
|
||||
# the latest activity, anything that would cause the order to change would end
|
||||
# up in `self.rooms` and cause us to send down the change.
|
||||
return bool(self.rooms or self.extensions)
|
||||
|
||||
@staticmethod
|
||||
def empty(next_pos: StreamToken) -> "SlidingSyncResult":
|
||||
def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult":
|
||||
"Return a new empty result"
|
||||
return SlidingSyncResult(
|
||||
next_pos=next_pos,
|
||||
lists={},
|
||||
rooms={},
|
||||
extensions={},
|
||||
extensions=SlidingSyncResult.Extensions(),
|
||||
)
|
||||
|
||||
@@ -120,6 +120,9 @@ class SlidingSyncBody(RequestBodyModel):
|
||||
Sliding Sync API request body.
|
||||
|
||||
Attributes:
|
||||
conn_id: An optional string to identify this connection to the server.
|
||||
Only one sliding sync connection is allowed per given conn_id (empty
|
||||
or not).
|
||||
lists: Sliding window API. A map of list key to list information
|
||||
(:class:`SlidingSyncList`). Max lists: 100. The list keys should be
|
||||
arbitrary strings which the client is using to refer to the list. Keep this
|
||||
@@ -200,9 +203,6 @@ class SlidingSyncBody(RequestBodyModel):
|
||||
}
|
||||
|
||||
timeline_limit: The maximum number of timeline events to return per response.
|
||||
include_heroes: Return a stripped variant of membership events (containing
|
||||
`user_id` and optionally `avatar_url` and `displayname`) for the users used
|
||||
to calculate the room name.
|
||||
filters: Filters to apply to the list before sorting.
|
||||
"""
|
||||
|
||||
@@ -270,16 +270,119 @@ class SlidingSyncBody(RequestBodyModel):
|
||||
else:
|
||||
ranges: Optional[List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]]] = None # type: ignore[valid-type]
|
||||
slow_get_all_rooms: Optional[StrictBool] = False
|
||||
include_heroes: Optional[StrictBool] = False
|
||||
filters: Optional[Filters] = None
|
||||
|
||||
class RoomSubscription(CommonRoomParameters):
|
||||
pass
|
||||
|
||||
class Extension(RequestBodyModel):
|
||||
enabled: Optional[StrictBool] = False
|
||||
lists: Optional[List[StrictStr]] = None
|
||||
rooms: Optional[List[StrictStr]] = None
|
||||
class Extensions(RequestBodyModel):
|
||||
"""The extensions section of the request.
|
||||
|
||||
Extensions MUST have an `enabled` flag which defaults to `false`. If a client
|
||||
sends an unknown extension name, the server MUST ignore it (or else backwards
|
||||
compatibility between clients and servers is broken when a newer client tries to
|
||||
communicate with an older server).
|
||||
"""
|
||||
|
||||
class ToDeviceExtension(RequestBodyModel):
|
||||
"""The to-device extension (MSC3885)
|
||||
|
||||
Attributes:
|
||||
enabled
|
||||
limit: Maximum number of to-device messages to return
|
||||
since: The `next_batch` from the previous sync response
|
||||
"""
|
||||
|
||||
enabled: Optional[StrictBool] = False
|
||||
limit: StrictInt = 100
|
||||
since: Optional[StrictStr] = None
|
||||
|
||||
@validator("since")
|
||||
def since_token_check(
|
||||
cls, value: Optional[StrictStr]
|
||||
) -> Optional[StrictStr]:
|
||||
# `since` comes in as an opaque string token but we know that it's just
|
||||
# an integer representing the position in the device inbox stream. We
|
||||
# want to pre-validate it to make sure it works fine in downstream code.
|
||||
if value is None:
|
||||
return value
|
||||
|
||||
try:
|
||||
int(value)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
"'extensions.to_device.since' is invalid (should look like an int)"
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
class E2eeExtension(RequestBodyModel):
|
||||
"""The E2EE device extension (MSC3884)
|
||||
|
||||
Attributes:
|
||||
enabled
|
||||
"""
|
||||
|
||||
enabled: Optional[StrictBool] = False
|
||||
|
||||
class AccountDataExtension(RequestBodyModel):
|
||||
"""The Account Data extension (MSC3959)
|
||||
|
||||
Attributes:
|
||||
enabled
|
||||
lists: List of list keys (from the Sliding Window API) to apply this
|
||||
extension to.
|
||||
rooms: List of room IDs (from the Room Subscription API) to apply this
|
||||
extension to.
|
||||
"""
|
||||
|
||||
enabled: Optional[StrictBool] = False
|
||||
# Process all lists defined in the Sliding Window API. (This is the default.)
|
||||
lists: Optional[List[StrictStr]] = ["*"]
|
||||
# Process all room subscriptions defined in the Room Subscription API. (This is the default.)
|
||||
rooms: Optional[List[StrictStr]] = ["*"]
|
||||
|
||||
class ReceiptsExtension(RequestBodyModel):
|
||||
"""The Receipts extension (MSC3960)
|
||||
|
||||
Attributes:
|
||||
enabled
|
||||
lists: List of list keys (from the Sliding Window API) to apply this
|
||||
extension to.
|
||||
rooms: List of room IDs (from the Room Subscription API) to apply this
|
||||
extension to.
|
||||
"""
|
||||
|
||||
enabled: Optional[StrictBool] = False
|
||||
# Process all lists defined in the Sliding Window API. (This is the default.)
|
||||
lists: Optional[List[StrictStr]] = ["*"]
|
||||
# Process all room subscriptions defined in the Room Subscription API. (This is the default.)
|
||||
rooms: Optional[List[StrictStr]] = ["*"]
|
||||
|
||||
class TypingExtension(RequestBodyModel):
|
||||
"""The Typing Notification extension (MSC3961)
|
||||
|
||||
Attributes:
|
||||
enabled
|
||||
lists: List of list keys (from the Sliding Window API) to apply this
|
||||
extension to.
|
||||
rooms: List of room IDs (from the Room Subscription API) to apply this
|
||||
extension to.
|
||||
"""
|
||||
|
||||
enabled: Optional[StrictBool] = False
|
||||
# Process all lists defined in the Sliding Window API. (This is the default.)
|
||||
lists: Optional[List[StrictStr]] = ["*"]
|
||||
# Process all room subscriptions defined in the Room Subscription API. (This is the default.)
|
||||
rooms: Optional[List[StrictStr]] = ["*"]
|
||||
|
||||
to_device: Optional[ToDeviceExtension] = None
|
||||
e2ee: Optional[E2eeExtension] = None
|
||||
account_data: Optional[AccountDataExtension] = None
|
||||
receipts: Optional[ReceiptsExtension] = None
|
||||
typing: Optional[TypingExtension] = None
|
||||
|
||||
conn_id: Optional[str]
|
||||
|
||||
# mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884
|
||||
if TYPE_CHECKING:
|
||||
@@ -287,7 +390,7 @@ class SlidingSyncBody(RequestBodyModel):
|
||||
else:
|
||||
lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = None # type: ignore[valid-type]
|
||||
room_subscriptions: Optional[Dict[StrictStr, RoomSubscription]] = None
|
||||
extensions: Optional[Dict[StrictStr, Extension]] = None
|
||||
extensions: Optional[Extensions] = None
|
||||
|
||||
@validator("lists")
|
||||
def lists_length_check(
|
||||
|
||||
@@ -327,7 +327,7 @@ class StreamChangeCache:
|
||||
for entity in r:
|
||||
self._entity_to_key.pop(entity, None)
|
||||
|
||||
def get_max_pos_of_last_change(self, entity: EntityType) -> int:
|
||||
def get_max_pos_of_last_change(self, entity: EntityType) -> Optional[int]:
|
||||
"""Returns an upper bound of the stream id of the last change to an
|
||||
entity.
|
||||
|
||||
@@ -335,7 +335,11 @@ class StreamChangeCache:
|
||||
entity: The entity to check.
|
||||
|
||||
Return:
|
||||
The stream position of the latest change for the given entity or
|
||||
the earliest known stream position if the entitiy is unknown.
|
||||
The stream position of the latest change for the given entity, if
|
||||
known
|
||||
"""
|
||||
return self._entity_to_key.get(entity, self._earliest_known_stream_pos)
|
||||
return self._entity_to_key.get(entity)
|
||||
|
||||
def get_earliest_known_position(self) -> int:
|
||||
"""Returns the earliest position in the cache."""
|
||||
return self._earliest_known_stream_pos
|
||||
|
||||
@@ -43,9 +43,7 @@ from tests.unittest import override_config
|
||||
class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
|
||||
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
||||
self.appservice_api = mock.AsyncMock()
|
||||
return self.setup_test_homeserver(
|
||||
federation_client=mock.Mock(), application_service_api=self.appservice_api
|
||||
)
|
||||
return self.setup_test_homeserver(application_service_api=self.appservice_api)
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.handler = hs.get_e2e_keys_handler()
|
||||
@@ -1224,6 +1222,61 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
|
||||
},
|
||||
)
|
||||
|
||||
def test_query_devices_remote_down(self) -> None:
|
||||
"""Tests that querying keys for a remote user on an unreachable server returns
|
||||
results in the "failures" property
|
||||
"""
|
||||
|
||||
remote_user_id = "@test:other"
|
||||
local_user_id = "@test:test"
|
||||
|
||||
# The backoff code treats time zero as special
|
||||
self.reactor.advance(5)
|
||||
|
||||
self.hs.get_federation_http_client().agent.request = mock.AsyncMock( # type: ignore[method-assign]
|
||||
side_effect=Exception("boop")
|
||||
)
|
||||
|
||||
e2e_handler = self.hs.get_e2e_keys_handler()
|
||||
|
||||
query_result = self.get_success(
|
||||
e2e_handler.query_devices(
|
||||
{
|
||||
"device_keys": {remote_user_id: []},
|
||||
},
|
||||
timeout=10,
|
||||
from_user_id=local_user_id,
|
||||
from_device_id="some_device_id",
|
||||
)
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
query_result["failures"],
|
||||
{
|
||||
"other": {
|
||||
"message": "Failed to send request: Exception: boop",
|
||||
"status": 503,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
# Do it again: we should hit the backoff
|
||||
query_result = self.get_success(
|
||||
e2e_handler.query_devices(
|
||||
{
|
||||
"device_keys": {remote_user_id: []},
|
||||
},
|
||||
timeout=10,
|
||||
from_user_id=local_user_id,
|
||||
from_device_id="some_device_id",
|
||||
)
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
query_result["failures"],
|
||||
{"other": {"message": "Not ready for retry", "status": 503}},
|
||||
)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
# The remote homeserver's response indicates that this user has 0/1/2 devices.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -211,6 +211,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
||||
|
||||
# Blow away caches (supported room versions can only change due to a restart).
|
||||
self.store.get_rooms_for_user.invalidate_all()
|
||||
self.store._get_rooms_for_local_user_where_membership_is_inner.invalidate_all()
|
||||
self.store._get_event_cache.clear()
|
||||
self.store._event_ref.clear()
|
||||
|
||||
|
||||
@@ -1057,13 +1057,15 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
assert channel.code == 200
|
||||
|
||||
@override_config({"remote_media_download_burst_count": "87M"})
|
||||
@patch(
|
||||
"synapse.http.matrixfederationclient.read_body_with_max_size",
|
||||
read_body_with_max_size_30MiB,
|
||||
)
|
||||
def test_download_ratelimit_max_size_sub(self) -> None:
|
||||
def test_download_ratelimit_unknown_length(self) -> None:
|
||||
"""
|
||||
Test that if no content-length is provided, the default max size is applied instead
|
||||
Test that if no content-length is provided, ratelimit will still be applied after
|
||||
download once length is known
|
||||
"""
|
||||
|
||||
# mock out actually sending the request
|
||||
@@ -1077,19 +1079,48 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
self.client._send_request = _send_request # type: ignore
|
||||
|
||||
# ten requests should go through using the max size (500MB/50MB)
|
||||
for i in range(10):
|
||||
channel2 = self.make_request(
|
||||
# 3 requests should go through (note 3rd one would technically violate ratelimit but
|
||||
# is applied *after* download - the next one will be ratelimited)
|
||||
for i in range(3):
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
f"/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxy{i}",
|
||||
shorthand=False,
|
||||
)
|
||||
assert channel2.code == 200
|
||||
assert channel.code == 200
|
||||
|
||||
# eleventh will hit ratelimit
|
||||
channel3 = self.make_request(
|
||||
# 4th will hit ratelimit
|
||||
channel2 = self.make_request(
|
||||
"GET",
|
||||
"/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyx",
|
||||
shorthand=False,
|
||||
)
|
||||
assert channel3.code == 429
|
||||
assert channel2.code == 429
|
||||
|
||||
@override_config({"max_upload_size": "29M"})
|
||||
@patch(
|
||||
"synapse.http.matrixfederationclient.read_body_with_max_size",
|
||||
read_body_with_max_size_30MiB,
|
||||
)
|
||||
def test_max_download_respected(self) -> None:
|
||||
"""
|
||||
Test that the max download size is enforced - note that max download size is determined
|
||||
by the max_upload_size
|
||||
"""
|
||||
|
||||
# mock out actually sending the request
|
||||
async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
|
||||
resp = MagicMock(spec=IResponse)
|
||||
resp.code = 200
|
||||
resp.length = 31457280
|
||||
resp.headers = Headers({"Content-Type": ["application/octet-stream"]})
|
||||
resp.phrase = b"OK"
|
||||
return resp
|
||||
|
||||
self.client._send_request = _send_request # type: ignore
|
||||
|
||||
channel = self.make_request(
|
||||
"GET", "/_matrix/media/v3/download/remote.org/abcd", shorthand=False
|
||||
)
|
||||
assert channel.code == 502
|
||||
assert channel.json_body["errcode"] == "M_TOO_LARGE"
|
||||
|
||||
13
tests/rest/client/sliding_sync/__init__.py
Normal file
13
tests/rest/client/sliding_sync/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
381
tests/rest/client/sliding_sync/test_connection_tracking.py
Normal file
381
tests/rest/client/sliding_sync/test_connection_tracking.py
Normal file
@@ -0,0 +1,381 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
|
||||
from parameterized import parameterized
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.rest.client import login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase):
|
||||
"""
|
||||
Test connection tracking in the Sliding Sync API.
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
def test_rooms_required_state_incremental_sync_LIVE(self) -> None:
|
||||
"""Test that we only get state updates in incremental sync for rooms
|
||||
we've already seen (LIVE).
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
[EventTypes.RoomHistoryVisibility, ""],
|
||||
# This one doesn't exist in the room
|
||||
[EventTypes.Name, ""],
|
||||
],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Create, "")],
|
||||
state_map[(EventTypes.RoomHistoryVisibility, "")],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Send a state event
|
||||
self.helper.send_state(
|
||||
room_id1, EventTypes.Name, body={"name": "foo"}, tok=user2_tok
|
||||
)
|
||||
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
self.assertNotIn("initial", response_body["rooms"][room_id1])
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Name, "")],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
@parameterized.expand([(False,), (True,)])
|
||||
def test_rooms_timeline_incremental_sync_PREVIOUSLY(self, limited: bool) -> None:
|
||||
"""
|
||||
Test getting room data where we have previously sent down the room, but
|
||||
we missed sending down some timeline events previously and so its status
|
||||
is considered PREVIOUSLY.
|
||||
|
||||
There are two versions of this test, one where there are more messages
|
||||
than the timeline limit, and one where there isn't.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "msg", tok=user1_tok)
|
||||
|
||||
timeline_limit = 5
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 0]],
|
||||
"required_state": [],
|
||||
"timeline_limit": timeline_limit,
|
||||
}
|
||||
},
|
||||
"conn_id": "conn_id",
|
||||
}
|
||||
|
||||
# The first room gets sent down the initial sync
|
||||
response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
|
||||
)
|
||||
|
||||
# We now send down some events in room1 (depending on the test param).
|
||||
expected_events = [] # The set of events in the timeline
|
||||
if limited:
|
||||
for _ in range(10):
|
||||
resp = self.helper.send(room_id1, "msg1", tok=user1_tok)
|
||||
expected_events.append(resp["event_id"])
|
||||
else:
|
||||
resp = self.helper.send(room_id1, "msg1", tok=user1_tok)
|
||||
expected_events.append(resp["event_id"])
|
||||
|
||||
# A second messages happens in the other room, so room1 won't get sent down.
|
||||
self.helper.send(room_id2, "msg", tok=user1_tok)
|
||||
|
||||
# Only the second room gets sent down sync.
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=initial_from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
|
||||
)
|
||||
|
||||
# We now send another event to room1, so we should sync all the missing events.
|
||||
resp = self.helper.send(room_id1, "msg2", tok=user1_tok)
|
||||
expected_events.append(resp["event_id"])
|
||||
|
||||
# This sync should contain the messages from room1 not yet sent down.
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
|
||||
)
|
||||
self.assertNotIn("initial", response_body["rooms"][room_id1])
|
||||
|
||||
self.assertEqual(
|
||||
[ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]],
|
||||
expected_events[-timeline_limit:],
|
||||
)
|
||||
self.assertEqual(response_body["rooms"][room_id1]["limited"], limited)
|
||||
self.assertEqual(response_body["rooms"][room_id1].get("required_state"), None)
|
||||
|
||||
def test_rooms_required_state_incremental_sync_PREVIOUSLY(self) -> None:
|
||||
"""
|
||||
Test getting room data where we have previously sent down the room, but
|
||||
we missed sending down some state previously and so its status is
|
||||
considered PREVIOUSLY.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "msg", tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 0]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
[EventTypes.RoomHistoryVisibility, ""],
|
||||
# This one doesn't exist in the room
|
||||
[EventTypes.Name, ""],
|
||||
],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
},
|
||||
"conn_id": "conn_id",
|
||||
}
|
||||
|
||||
# The first room gets sent down the initial sync
|
||||
response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
|
||||
)
|
||||
|
||||
# We now send down some state in room1
|
||||
resp = self.helper.send_state(
|
||||
room_id1, EventTypes.Name, {"name": "foo"}, tok=user1_tok
|
||||
)
|
||||
name_change_id = resp["event_id"]
|
||||
|
||||
# A second messages happens in the other room, so room1 won't get sent down.
|
||||
self.helper.send(room_id2, "msg", tok=user1_tok)
|
||||
|
||||
# Only the second room gets sent down sync.
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=initial_from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
|
||||
)
|
||||
|
||||
# We now send another event to room1, so we should sync all the missing state.
|
||||
self.helper.send(room_id1, "msg", tok=user1_tok)
|
||||
|
||||
# This sync should contain the state changes from room1.
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
|
||||
)
|
||||
self.assertNotIn("initial", response_body["rooms"][room_id1])
|
||||
|
||||
# We should only see the name change.
|
||||
self.assertEqual(
|
||||
[
|
||||
ev["event_id"]
|
||||
for ev in response_body["rooms"][room_id1]["required_state"]
|
||||
],
|
||||
[name_change_id],
|
||||
)
|
||||
|
||||
def test_rooms_required_state_incremental_sync_NEVER(self) -> None:
|
||||
"""
|
||||
Test getting `required_state` where we have NEVER sent down the room before
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "msg", tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 0]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
[EventTypes.RoomHistoryVisibility, ""],
|
||||
# This one doesn't exist in the room
|
||||
[EventTypes.Name, ""],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
# A message happens in the other room, so room1 won't get sent down.
|
||||
self.helper.send(room_id2, "msg", tok=user1_tok)
|
||||
|
||||
# Only the second room gets sent down sync.
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
|
||||
)
|
||||
|
||||
# We now send another event to room1, so we should send down the full
|
||||
# room.
|
||||
self.helper.send(room_id1, "msg2", tok=user1_tok)
|
||||
|
||||
# This sync should contain the messages from room1 not yet sent down.
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
|
||||
)
|
||||
|
||||
self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Create, "")],
|
||||
state_map[(EventTypes.RoomHistoryVisibility, "")],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_rooms_timeline_incremental_sync_NEVER(self) -> None:
|
||||
"""
|
||||
Test getting timeline room data where we have NEVER sent down the room
|
||||
before
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 0]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 5,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
expected_events = []
|
||||
for _ in range(4):
|
||||
resp = self.helper.send(room_id1, "msg", tok=user1_tok)
|
||||
expected_events.append(resp["event_id"])
|
||||
|
||||
# A message happens in the other room, so room1 won't get sent down.
|
||||
self.helper.send(room_id2, "msg", tok=user1_tok)
|
||||
|
||||
# Only the second room gets sent down sync.
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
|
||||
)
|
||||
|
||||
# We now send another event to room1 so it comes down sync
|
||||
resp = self.helper.send(room_id1, "msg2", tok=user1_tok)
|
||||
expected_events.append(resp["event_id"])
|
||||
|
||||
# This sync should contain the messages from room1 not yet sent down.
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
[ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]],
|
||||
expected_events,
|
||||
)
|
||||
self.assertEqual(response_body["rooms"][room_id1]["limited"], True)
|
||||
self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
|
||||
495
tests/rest/client/sliding_sync/test_extension_account_data.py
Normal file
495
tests/rest/client/sliding_sync/test_extension_account_data.py
Normal file
@@ -0,0 +1,495 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import AccountDataTypes
|
||||
from synapse.rest.client import login, room, sendtodevice, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import StreamKeyType
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
from tests.server import TimedOutException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase):
|
||||
"""Tests for the account_data sliding sync extension"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
sendtodevice.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.account_data_handler = hs.get_account_data_handler()
|
||||
|
||||
def test_no_data_initial_sync(self) -> None:
|
||||
"""
|
||||
Test that enabling the account_data extension works during an intitial sync,
|
||||
even if there is no-data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Make an initial Sliding Sync request with the account_data extension enabled
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"account_data": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
self.assertIncludes(
|
||||
{
|
||||
global_event["type"]
|
||||
for global_event in response_body["extensions"]["account_data"].get(
|
||||
"global"
|
||||
)
|
||||
},
|
||||
# Even though we don't have any global account data set, Synapse saves some
|
||||
# default push rules for us.
|
||||
{AccountDataTypes.PUSH_RULES},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["account_data"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_no_data_incremental_sync(self) -> None:
|
||||
"""
|
||||
Test that enabling account_data extension works during an incremental sync, even
|
||||
if there is no-data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"account_data": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request with the account_data extension enabled
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# There has been no account data changes since the `from_token` so we shouldn't
|
||||
# see any account data here.
|
||||
self.assertIncludes(
|
||||
{
|
||||
global_event["type"]
|
||||
for global_event in response_body["extensions"]["account_data"].get(
|
||||
"global"
|
||||
)
|
||||
},
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["account_data"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_global_account_data_initial_sync(self) -> None:
|
||||
"""
|
||||
On initial sync, we should return all global account data on initial sync.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Update the global account data
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_for_user(
|
||||
user_id=user1_id,
|
||||
account_data_type="org.matrix.foobarbaz",
|
||||
content={"foo": "bar"},
|
||||
)
|
||||
)
|
||||
|
||||
# Make an initial Sliding Sync request with the account_data extension enabled
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"account_data": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# It should show us all of the global account data
|
||||
self.assertIncludes(
|
||||
{
|
||||
global_event["type"]
|
||||
for global_event in response_body["extensions"]["account_data"].get(
|
||||
"global"
|
||||
)
|
||||
},
|
||||
{AccountDataTypes.PUSH_RULES, "org.matrix.foobarbaz"},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["account_data"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_global_account_data_incremental_sync(self) -> None:
|
||||
"""
|
||||
On incremental sync, we should only account data that has changed since the
|
||||
`from_token`.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Add some global account data
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_for_user(
|
||||
user_id=user1_id,
|
||||
account_data_type="org.matrix.foobarbaz",
|
||||
content={"foo": "bar"},
|
||||
)
|
||||
)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"account_data": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Add some other global account data
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_for_user(
|
||||
user_id=user1_id,
|
||||
account_data_type="org.matrix.doodardaz",
|
||||
content={"doo": "dar"},
|
||||
)
|
||||
)
|
||||
|
||||
# Make an incremental Sliding Sync request with the account_data extension enabled
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
self.assertIncludes(
|
||||
{
|
||||
global_event["type"]
|
||||
for global_event in response_body["extensions"]["account_data"].get(
|
||||
"global"
|
||||
)
|
||||
},
|
||||
# We should only see the new global account data that happened after the `from_token`
|
||||
{"org.matrix.doodardaz"},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["account_data"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_room_account_data_initial_sync(self) -> None:
|
||||
"""
|
||||
On initial sync, we return all account data for a given room but only for
|
||||
rooms that we request and are being returned in the Sliding Sync response.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Create a room and add some room account data
|
||||
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_to_room(
|
||||
user_id=user1_id,
|
||||
room_id=room_id1,
|
||||
account_data_type="org.matrix.roorarraz",
|
||||
content={"roo": "rar"},
|
||||
)
|
||||
)
|
||||
|
||||
# Create another room with some room account data
|
||||
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_to_room(
|
||||
user_id=user1_id,
|
||||
room_id=room_id2,
|
||||
account_data_type="org.matrix.roorarraz",
|
||||
content={"roo": "rar"},
|
||||
)
|
||||
)
|
||||
|
||||
# Make an initial Sliding Sync request with the account_data extension enabled
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
},
|
||||
"extensions": {
|
||||
"account_data": {
|
||||
"enabled": True,
|
||||
"rooms": [room_id1, room_id2],
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
self.assertIsNotNone(response_body["extensions"]["account_data"].get("global"))
|
||||
# Even though we requested room2, we only expect room1 to show up because that's
|
||||
# the only room in the Sliding Sync response (room2 is not one of our room
|
||||
# subscriptions or in a sliding window list).
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["account_data"].get("rooms").keys(),
|
||||
{room_id1},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIncludes(
|
||||
{
|
||||
event["type"]
|
||||
for event in response_body["extensions"]["account_data"]
|
||||
.get("rooms")
|
||||
.get(room_id1)
|
||||
},
|
||||
{"org.matrix.roorarraz"},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_room_account_data_incremental_sync(self) -> None:
|
||||
"""
|
||||
On incremental sync, we return all account data for a given room but only for
|
||||
rooms that we request and are being returned in the Sliding Sync response.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Create a room and add some room account data
|
||||
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_to_room(
|
||||
user_id=user1_id,
|
||||
room_id=room_id1,
|
||||
account_data_type="org.matrix.roorarraz",
|
||||
content={"roo": "rar"},
|
||||
)
|
||||
)
|
||||
|
||||
# Create another room with some room account data
|
||||
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_to_room(
|
||||
user_id=user1_id,
|
||||
room_id=room_id2,
|
||||
account_data_type="org.matrix.roorarraz",
|
||||
content={"roo": "rar"},
|
||||
)
|
||||
)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
},
|
||||
"extensions": {
|
||||
"account_data": {
|
||||
"enabled": True,
|
||||
"rooms": [room_id1, room_id2],
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Add some other room account data
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_to_room(
|
||||
user_id=user1_id,
|
||||
room_id=room_id1,
|
||||
account_data_type="org.matrix.roorarraz2",
|
||||
content={"roo": "rar"},
|
||||
)
|
||||
)
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_to_room(
|
||||
user_id=user1_id,
|
||||
room_id=room_id2,
|
||||
account_data_type="org.matrix.roorarraz2",
|
||||
content={"roo": "rar"},
|
||||
)
|
||||
)
|
||||
|
||||
# Make an incremental Sliding Sync request with the account_data extension enabled
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
self.assertIsNotNone(response_body["extensions"]["account_data"].get("global"))
|
||||
# Even though we requested room2, we only expect room1 to show up because that's
|
||||
# the only room in the Sliding Sync response (room2 is not one of our room
|
||||
# subscriptions or in a sliding window list).
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["account_data"].get("rooms").keys(),
|
||||
{room_id1},
|
||||
exact=True,
|
||||
)
|
||||
# We should only see the new room account data that happened after the `from_token`
|
||||
self.assertIncludes(
|
||||
{
|
||||
event["type"]
|
||||
for event in response_body["extensions"]["account_data"]
|
||||
.get("rooms")
|
||||
.get(room_id1)
|
||||
},
|
||||
{"org.matrix.roorarraz2"},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_wait_for_new_data(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive.
|
||||
|
||||
(Only applies to incremental syncs with a `timeout` specified)
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"account_data": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request with the account_data extension enabled
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Bump the global account data to trigger new results
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_for_user(
|
||||
user1_id,
|
||||
"org.matrix.foobarbaz",
|
||||
{"foo": "bar"},
|
||||
)
|
||||
)
|
||||
# Should respond before the 10 second timeout
|
||||
channel.await_result(timeout_ms=3000)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# We should see the global account data update
|
||||
self.assertIncludes(
|
||||
{
|
||||
global_event["type"]
|
||||
for global_event in channel.json_body["extensions"]["account_data"].get(
|
||||
"global"
|
||||
)
|
||||
},
|
||||
{"org.matrix.foobarbaz"},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIncludes(
|
||||
channel.json_body["extensions"]["account_data"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_wait_for_new_data_timeout(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive but
|
||||
no data ever arrives so we timeout. We're also making sure that the default data
|
||||
from the account_data extension doesn't trigger a false-positive for new data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"account_data": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Wake-up `notifier.wait_for_events(...)` that will cause us test
|
||||
# `SlidingSyncResult.__bool__` for new results.
|
||||
self._bump_notifier_wait_for_events(
|
||||
user1_id,
|
||||
# We choose `StreamKeyType.PRESENCE` because we're testing for account data
|
||||
# and don't want to contaminate the account data results using
|
||||
# `StreamKeyType.ACCOUNT_DATA`.
|
||||
wake_stream_key=StreamKeyType.PRESENCE,
|
||||
)
|
||||
# Block for a little bit more to ensure we don't see any new results.
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=4000)
|
||||
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
|
||||
# 5000 + 4000 + 1200 > 10000)
|
||||
channel.await_result(timeout_ms=1200)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
self.assertIsNotNone(
|
||||
channel.json_body["extensions"]["account_data"].get("global")
|
||||
)
|
||||
self.assertIsNotNone(
|
||||
channel.json_body["extensions"]["account_data"].get("rooms")
|
||||
)
|
||||
441
tests/rest/client/sliding_sync/test_extension_e2ee.py
Normal file
441
tests/rest/client/sliding_sync/test_extension_e2ee.py
Normal file
@@ -0,0 +1,441 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.rest.client import devices, login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict, StreamKeyType
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
from tests.server import TimedOutException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase):
|
||||
"""Tests for the e2ee sliding sync extension"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
devices.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
|
||||
def test_no_data_initial_sync(self) -> None:
|
||||
"""
|
||||
Test that enabling e2ee extension works during an intitial sync, even if there
|
||||
is no-data
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Make an initial Sliding Sync request with the e2ee extension enabled
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"e2ee": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Device list updates are only present for incremental syncs
|
||||
self.assertIsNone(response_body["extensions"]["e2ee"].get("device_lists"))
|
||||
|
||||
# Both of these should be present even when empty
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["e2ee"]["device_one_time_keys_count"],
|
||||
{
|
||||
# This is always present because of
|
||||
# https://github.com/element-hq/element-android/issues/3725 and
|
||||
# https://github.com/matrix-org/synapse/issues/10456
|
||||
"signed_curve25519": 0
|
||||
},
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"],
|
||||
[],
|
||||
)
|
||||
|
||||
def test_no_data_incremental_sync(self) -> None:
|
||||
"""
|
||||
Test that enabling e2ee extension works during an incremental sync, even if
|
||||
there is no-data
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"e2ee": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request with the e2ee extension enabled
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# Device list shows up for incremental syncs
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"),
|
||||
[],
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
|
||||
[],
|
||||
)
|
||||
|
||||
# Both of these should be present even when empty
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["e2ee"]["device_one_time_keys_count"],
|
||||
{
|
||||
# Note that "signed_curve25519" is always returned in key count responses
|
||||
# regardless of whether we uploaded any keys for it. This is necessary until
|
||||
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
|
||||
#
|
||||
# Also related:
|
||||
# https://github.com/element-hq/element-android/issues/3725 and
|
||||
# https://github.com/matrix-org/synapse/issues/10456
|
||||
"signed_curve25519": 0
|
||||
},
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"],
|
||||
[],
|
||||
)
|
||||
|
||||
def test_wait_for_new_data(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive.
|
||||
|
||||
(Only applies to incremental syncs with a `timeout` specified)
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
test_device_id = "TESTDEVICE"
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass", device_id=test_device_id)
|
||||
|
||||
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id, user3_id, tok=user3_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"e2ee": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Bump the device lists to trigger new results
|
||||
# Have user3 update their device list
|
||||
device_update_channel = self.make_request(
|
||||
"PUT",
|
||||
f"/devices/{test_device_id}",
|
||||
{
|
||||
"display_name": "New Device Name",
|
||||
},
|
||||
access_token=user3_tok,
|
||||
)
|
||||
self.assertEqual(
|
||||
device_update_channel.code, 200, device_update_channel.json_body
|
||||
)
|
||||
# Should respond before the 10 second timeout
|
||||
channel.await_result(timeout_ms=3000)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# We should see the device list update
|
||||
self.assertEqual(
|
||||
channel.json_body["extensions"]["e2ee"]
|
||||
.get("device_lists", {})
|
||||
.get("changed"),
|
||||
[user3_id],
|
||||
)
|
||||
self.assertEqual(
|
||||
channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
|
||||
[],
|
||||
)
|
||||
|
||||
def test_wait_for_new_data_timeout(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive but
|
||||
no data ever arrives so we timeout. We're also making sure that the default data
|
||||
from the E2EE extension doesn't trigger a false-positive for new data (see
|
||||
`device_one_time_keys_count.signed_curve25519`).
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"e2ee": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Wake-up `notifier.wait_for_events(...)` that will cause us test
|
||||
# `SlidingSyncResult.__bool__` for new results.
|
||||
self._bump_notifier_wait_for_events(
|
||||
user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
|
||||
)
|
||||
# Block for a little bit more to ensure we don't see any new results.
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=4000)
|
||||
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
|
||||
# 5000 + 4000 + 1200 > 10000)
|
||||
channel.await_result(timeout_ms=1200)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Device lists are present for incremental syncs but empty because no device changes
|
||||
self.assertEqual(
|
||||
channel.json_body["extensions"]["e2ee"]
|
||||
.get("device_lists", {})
|
||||
.get("changed"),
|
||||
[],
|
||||
)
|
||||
self.assertEqual(
|
||||
channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
|
||||
[],
|
||||
)
|
||||
|
||||
# Both of these should be present even when empty
|
||||
self.assertEqual(
|
||||
channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"],
|
||||
{
|
||||
# Note that "signed_curve25519" is always returned in key count responses
|
||||
# regardless of whether we uploaded any keys for it. This is necessary until
|
||||
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
|
||||
#
|
||||
# Also related:
|
||||
# https://github.com/element-hq/element-android/issues/3725 and
|
||||
# https://github.com/matrix-org/synapse/issues/10456
|
||||
"signed_curve25519": 0
|
||||
},
|
||||
)
|
||||
self.assertEqual(
|
||||
channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"],
|
||||
[],
|
||||
)
|
||||
|
||||
def test_device_lists(self) -> None:
|
||||
"""
|
||||
Test that device list updates are included in the response
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
test_device_id = "TESTDEVICE"
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass", device_id=test_device_id)
|
||||
|
||||
user4_id = self.register_user("user4", "pass")
|
||||
user4_tok = self.login(user4_id, "pass")
|
||||
|
||||
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id, user3_id, tok=user3_tok)
|
||||
self.helper.join(room_id, user4_id, tok=user4_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"e2ee": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Have user3 update their device list
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/devices/{test_device_id}",
|
||||
{
|
||||
"display_name": "New Device Name",
|
||||
},
|
||||
access_token=user3_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# User4 leaves the room
|
||||
self.helper.leave(room_id, user4_id, tok=user4_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request with the e2ee extension enabled
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# Device list updates show up
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"),
|
||||
[user3_id],
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
|
||||
[user4_id],
|
||||
)
|
||||
|
||||
def test_device_one_time_keys_count(self) -> None:
|
||||
"""
|
||||
Test that `device_one_time_keys_count` are included in the response
|
||||
"""
|
||||
test_device_id = "TESTDEVICE"
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass", device_id=test_device_id)
|
||||
|
||||
# Upload one time keys for the user/device
|
||||
keys: JsonDict = {
|
||||
"alg1:k1": "key1",
|
||||
"alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}},
|
||||
"alg2:k3": {"key": "key3"},
|
||||
}
|
||||
upload_keys_response = self.get_success(
|
||||
self.e2e_keys_handler.upload_keys_for_user(
|
||||
user1_id, test_device_id, {"one_time_keys": keys}
|
||||
)
|
||||
)
|
||||
self.assertDictEqual(
|
||||
upload_keys_response,
|
||||
{
|
||||
"one_time_key_counts": {
|
||||
"alg1": 1,
|
||||
"alg2": 2,
|
||||
# Note that "signed_curve25519" is always returned in key count responses
|
||||
# regardless of whether we uploaded any keys for it. This is necessary until
|
||||
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
|
||||
#
|
||||
# Also related:
|
||||
# https://github.com/element-hq/element-android/issues/3725 and
|
||||
# https://github.com/matrix-org/synapse/issues/10456
|
||||
"signed_curve25519": 0,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
# Make a Sliding Sync request with the e2ee extension enabled
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"e2ee": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Check for those one time key counts
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["e2ee"].get("device_one_time_keys_count"),
|
||||
{
|
||||
"alg1": 1,
|
||||
"alg2": 2,
|
||||
# Note that "signed_curve25519" is always returned in key count responses
|
||||
# regardless of whether we uploaded any keys for it. This is necessary until
|
||||
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
|
||||
#
|
||||
# Also related:
|
||||
# https://github.com/element-hq/element-android/issues/3725 and
|
||||
# https://github.com/matrix-org/synapse/issues/10456
|
||||
"signed_curve25519": 0,
|
||||
},
|
||||
)
|
||||
|
||||
def test_device_unused_fallback_key_types(self) -> None:
|
||||
"""
|
||||
Test that `device_unused_fallback_key_types` are included in the response
|
||||
"""
|
||||
test_device_id = "TESTDEVICE"
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass", device_id=test_device_id)
|
||||
|
||||
# We shouldn't have any unused fallback keys yet
|
||||
res = self.get_success(
|
||||
self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id)
|
||||
)
|
||||
self.assertEqual(res, [])
|
||||
|
||||
# Upload a fallback key for the user/device
|
||||
self.get_success(
|
||||
self.e2e_keys_handler.upload_keys_for_user(
|
||||
user1_id,
|
||||
test_device_id,
|
||||
{"fallback_keys": {"alg1:k1": "fallback_key1"}},
|
||||
)
|
||||
)
|
||||
# We should now have an unused alg1 key
|
||||
fallback_res = self.get_success(
|
||||
self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id)
|
||||
)
|
||||
self.assertEqual(fallback_res, ["alg1"], fallback_res)
|
||||
|
||||
# Make a Sliding Sync request with the e2ee extension enabled
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"e2ee": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Check for the unused fallback key types
|
||||
self.assertListEqual(
|
||||
response_body["extensions"]["e2ee"].get("device_unused_fallback_key_types"),
|
||||
["alg1"],
|
||||
)
|
||||
679
tests/rest/client/sliding_sync/test_extension_receipts.py
Normal file
679
tests/rest/client/sliding_sync/test_extension_receipts.py
Normal file
@@ -0,0 +1,679 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import EduTypes, ReceiptTypes
|
||||
from synapse.rest.client import login, receipts, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import StreamKeyType
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
from tests.server import TimedOutException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase):
|
||||
"""Tests for the receipts sliding sync extension"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
receipts.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
def test_no_data_initial_sync(self) -> None:
|
||||
"""
|
||||
Test that enabling the receipts extension works during an intitial sync,
|
||||
even if there is no-data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Make an initial Sliding Sync request with the receipts extension enabled
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"receipts": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_no_data_incremental_sync(self) -> None:
|
||||
"""
|
||||
Test that enabling receipts extension works during an incremental sync, even
|
||||
if there is no-data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"receipts": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request with the receipts extension enabled
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_receipts_initial_sync_with_timeline(self) -> None:
|
||||
"""
|
||||
On initial sync, we only return receipts for events in a given room's timeline.
|
||||
|
||||
We also make sure that we only return receipts for rooms that we request and are
|
||||
already being returned in the Sliding Sync response.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass")
|
||||
user4_id = self.register_user("user4", "pass")
|
||||
user4_tok = self.login(user4_id, "pass")
|
||||
|
||||
# Create a room
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id1, user3_id, tok=user3_tok)
|
||||
self.helper.join(room_id1, user4_id, tok=user4_tok)
|
||||
room1_event_response1 = self.helper.send(
|
||||
room_id1, body="new event1", tok=user2_tok
|
||||
)
|
||||
room1_event_response2 = self.helper.send(
|
||||
room_id1, body="new event2", tok=user2_tok
|
||||
)
|
||||
# User1 reads the last event
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}",
|
||||
{},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User2 reads the last event
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}",
|
||||
{},
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User3 reads the first event
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
|
||||
{},
|
||||
access_token=user3_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User4 privately reads the last event (make sure this doesn't leak to the other users)
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ_PRIVATE}/{room1_event_response2['event_id']}",
|
||||
{},
|
||||
access_token=user4_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Create another room
|
||||
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id2, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id2, user3_id, tok=user3_tok)
|
||||
self.helper.join(room_id2, user4_id, tok=user4_tok)
|
||||
room2_event_response1 = self.helper.send(
|
||||
room_id2, body="new event2", tok=user2_tok
|
||||
)
|
||||
# User1 reads the last event
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}",
|
||||
{},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User2 reads the last event
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}",
|
||||
{},
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User4 privately reads the last event (make sure this doesn't leak to the other users)
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}",
|
||||
{},
|
||||
access_token=user4_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Make an initial Sliding Sync request with the receipts extension enabled
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [],
|
||||
# On initial sync, we only have receipts for events in the timeline
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
},
|
||||
"extensions": {
|
||||
"receipts": {
|
||||
"enabled": True,
|
||||
"rooms": [room_id1, room_id2],
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Only the latest event in the room is in the timelie because the `timeline_limit` is 1
|
||||
self.assertIncludes(
|
||||
{
|
||||
event["event_id"]
|
||||
for event in response_body["rooms"][room_id1].get("timeline", [])
|
||||
},
|
||||
{room1_event_response2["event_id"]},
|
||||
exact=True,
|
||||
message=str(response_body["rooms"][room_id1]),
|
||||
)
|
||||
|
||||
# Even though we requested room2, we only expect room1 to show up because that's
|
||||
# the only room in the Sliding Sync response (room2 is not one of our room
|
||||
# subscriptions or in a sliding window list).
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"].get("rooms").keys(),
|
||||
{room_id1},
|
||||
exact=True,
|
||||
)
|
||||
# Sanity check that it's the correct ephemeral event type
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id1]["type"],
|
||||
EduTypes.RECEIPT,
|
||||
)
|
||||
# We can see user1 and user2 read receipts
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
|
||||
room1_event_response2["event_id"]
|
||||
][ReceiptTypes.READ].keys(),
|
||||
{user1_id, user2_id},
|
||||
exact=True,
|
||||
)
|
||||
# User1 did not have a private read receipt and we shouldn't leak others'
|
||||
# private read receipts
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
|
||||
room1_event_response2["event_id"]
|
||||
]
|
||||
.get(ReceiptTypes.READ_PRIVATE, {})
|
||||
.keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# We shouldn't see receipts for event2 since it wasn't in the timeline and this is an initial sync
|
||||
self.assertIsNone(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"].get(
|
||||
room1_event_response1["event_id"]
|
||||
)
|
||||
)
|
||||
|
||||
def test_receipts_incremental_sync(self) -> None:
|
||||
"""
|
||||
On incremental sync, we return all receipts in the token range for a given room
|
||||
but only for rooms that we request and are being returned in the Sliding Sync
|
||||
response.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass")
|
||||
|
||||
# Create room1
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id1, user3_id, tok=user3_tok)
|
||||
room1_event_response1 = self.helper.send(
|
||||
room_id1, body="new event2", tok=user2_tok
|
||||
)
|
||||
# User2 reads the last event (before the `from_token`)
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
|
||||
{},
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Create room2
|
||||
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id2, user1_id, tok=user1_tok)
|
||||
room2_event_response1 = self.helper.send(
|
||||
room_id2, body="new event2", tok=user2_tok
|
||||
)
|
||||
# User1 reads the last event (before the `from_token`)
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}",
|
||||
{},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Create room3
|
||||
room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id3, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id3, user3_id, tok=user3_tok)
|
||||
room3_event_response1 = self.helper.send(
|
||||
room_id3, body="new event", tok=user2_tok
|
||||
)
|
||||
|
||||
# Create room4
|
||||
room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id4, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id4, user3_id, tok=user3_tok)
|
||||
event_response4 = self.helper.send(room_id4, body="new event", tok=user2_tok)
|
||||
# User1 reads the last event (before the `from_token`)
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id4}/receipt/{ReceiptTypes.READ}/{event_response4['event_id']}",
|
||||
{},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
room_id3: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
room_id4: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
},
|
||||
"extensions": {
|
||||
"receipts": {
|
||||
"enabled": True,
|
||||
"rooms": [room_id1, room_id2, room_id3, room_id4],
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Add some more read receipts after the `from_token`
|
||||
#
|
||||
# User1 reads room1
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
|
||||
{},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User1 privately reads room2
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}",
|
||||
{},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User3 reads room3
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id3}/receipt/{ReceiptTypes.READ}/{room3_event_response1['event_id']}",
|
||||
{},
|
||||
access_token=user3_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# No activity for room4 after the `from_token`
|
||||
|
||||
# Make an incremental Sliding Sync request with the receipts extension enabled
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# Even though we requested room2, we only expect rooms to show up if they are
|
||||
# already in the Sliding Sync response. room4 doesn't show up because there is
|
||||
# no activity after the `from_token`.
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"].get("rooms").keys(),
|
||||
{room_id1, room_id3},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Check room1:
|
||||
#
|
||||
# Sanity check that it's the correct ephemeral event type
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id1]["type"],
|
||||
EduTypes.RECEIPT,
|
||||
)
|
||||
# We only see that user1 has read something in room1 since the `from_token`
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
|
||||
room1_event_response1["event_id"]
|
||||
][ReceiptTypes.READ].keys(),
|
||||
{user1_id},
|
||||
exact=True,
|
||||
)
|
||||
# User1 did not send a private read receipt in this room and we shouldn't leak
|
||||
# others' private read receipts
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
|
||||
room1_event_response1["event_id"]
|
||||
]
|
||||
.get(ReceiptTypes.READ_PRIVATE, {})
|
||||
.keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
# No events in the timeline since they were sent before the `from_token`
|
||||
self.assertNotIn(room_id1, response_body["rooms"])
|
||||
|
||||
# Check room3:
|
||||
#
|
||||
# Sanity check that it's the correct ephemeral event type
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id3]["type"],
|
||||
EduTypes.RECEIPT,
|
||||
)
|
||||
# We only see that user3 has read something in room1 since the `from_token`
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][
|
||||
room3_event_response1["event_id"]
|
||||
][ReceiptTypes.READ].keys(),
|
||||
{user3_id},
|
||||
exact=True,
|
||||
)
|
||||
# User1 did not send a private read receipt in this room and we shouldn't leak
|
||||
# others' private read receipts
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][
|
||||
room3_event_response1["event_id"]
|
||||
]
|
||||
.get(ReceiptTypes.READ_PRIVATE, {})
|
||||
.keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
# No events in the timeline since they were sent before the `from_token`
|
||||
self.assertNotIn(room_id3, response_body["rooms"])
|
||||
|
||||
def test_receipts_incremental_sync_all_live_receipts(self) -> None:
|
||||
"""
|
||||
On incremental sync, we return all receipts in the token range for a given room
|
||||
even if they are not in the timeline.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
# Create room1
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [],
|
||||
# The timeline will only include event2
|
||||
"timeline_limit": 1,
|
||||
},
|
||||
},
|
||||
"extensions": {
|
||||
"receipts": {
|
||||
"enabled": True,
|
||||
"rooms": [room_id1],
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
room1_event_response1 = self.helper.send(
|
||||
room_id1, body="new event1", tok=user2_tok
|
||||
)
|
||||
room1_event_response2 = self.helper.send(
|
||||
room_id1, body="new event2", tok=user2_tok
|
||||
)
|
||||
|
||||
# User1 reads event1
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
|
||||
{},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User2 reads event2
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}",
|
||||
{},
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Make an incremental Sliding Sync request with the receipts extension enabled
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# We should see room1 because it has receipts in the token range
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"].get("rooms").keys(),
|
||||
{room_id1},
|
||||
exact=True,
|
||||
)
|
||||
# Sanity check that it's the correct ephemeral event type
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id1]["type"],
|
||||
EduTypes.RECEIPT,
|
||||
)
|
||||
# We should see all receipts in the token range regardless of whether the events
|
||||
# are in the timeline
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
|
||||
room1_event_response1["event_id"]
|
||||
][ReceiptTypes.READ].keys(),
|
||||
{user1_id},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
|
||||
room1_event_response2["event_id"]
|
||||
][ReceiptTypes.READ].keys(),
|
||||
{user2_id},
|
||||
exact=True,
|
||||
)
|
||||
# Only the latest event in the timeline because the `timeline_limit` is 1
|
||||
self.assertIncludes(
|
||||
{
|
||||
event["event_id"]
|
||||
for event in response_body["rooms"][room_id1].get("timeline", [])
|
||||
},
|
||||
{room1_event_response2["event_id"]},
|
||||
exact=True,
|
||||
message=str(response_body["rooms"][room_id1]),
|
||||
)
|
||||
|
||||
def test_wait_for_new_data(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive.
|
||||
|
||||
(Only applies to incremental syncs with a `timeout` specified)
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
event_response = self.helper.send(room_id, body="new event", tok=user2_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"room_subscriptions": {
|
||||
room_id: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
},
|
||||
"extensions": {
|
||||
"receipts": {
|
||||
"enabled": True,
|
||||
"rooms": [room_id],
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request with the receipts extension enabled
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Bump the receipts to trigger new results
|
||||
receipt_channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}",
|
||||
{},
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(receipt_channel.code, 200, receipt_channel.json_body)
|
||||
# Should respond before the 10 second timeout
|
||||
channel.await_result(timeout_ms=3000)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# We should see the new receipt
|
||||
self.assertIncludes(
|
||||
channel.json_body.get("extensions", {})
|
||||
.get("receipts", {})
|
||||
.get("rooms", {})
|
||||
.keys(),
|
||||
{room_id},
|
||||
exact=True,
|
||||
message=str(channel.json_body),
|
||||
)
|
||||
self.assertIncludes(
|
||||
channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][
|
||||
event_response["event_id"]
|
||||
][ReceiptTypes.READ].keys(),
|
||||
{user2_id},
|
||||
exact=True,
|
||||
)
|
||||
# User1 did not send a private read receipt in this room and we shouldn't leak
|
||||
# others' private read receipts
|
||||
self.assertIncludes(
|
||||
channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][
|
||||
event_response["event_id"]
|
||||
]
|
||||
.get(ReceiptTypes.READ_PRIVATE, {})
|
||||
.keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_wait_for_new_data_timeout(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive but
|
||||
no data ever arrives so we timeout. We're also making sure that the default data
|
||||
from the receipts extension doesn't trigger a false-positive for new data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"receipts": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Wake-up `notifier.wait_for_events(...)` that will cause us test
|
||||
# `SlidingSyncResult.__bool__` for new results.
|
||||
self._bump_notifier_wait_for_events(
|
||||
user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
|
||||
)
|
||||
# Block for a little bit more to ensure we don't see any new results.
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=4000)
|
||||
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
|
||||
# 5000 + 4000 + 1200 > 10000)
|
||||
channel.await_result(timeout_ms=1200)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
self.assertIncludes(
|
||||
channel.json_body["extensions"]["receipts"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
278
tests/rest/client/sliding_sync/test_extension_to_device.py
Normal file
278
tests/rest/client/sliding_sync/test_extension_to_device.py
Normal file
@@ -0,0 +1,278 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.rest.client import login, sendtodevice, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict, StreamKeyType
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
from tests.server import TimedOutException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase):
|
||||
"""Tests for the to-device sliding sync extension"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
sync.register_servlets,
|
||||
sendtodevice.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
def _assert_to_device_response(
|
||||
self, response_body: JsonDict, expected_messages: List[JsonDict]
|
||||
) -> str:
|
||||
"""Assert the sliding sync response was successful and has the expected
|
||||
to-device messages.
|
||||
|
||||
Returns the next_batch token from the to-device section.
|
||||
"""
|
||||
extensions = response_body["extensions"]
|
||||
to_device = extensions["to_device"]
|
||||
self.assertIsInstance(to_device["next_batch"], str)
|
||||
self.assertEqual(to_device["events"], expected_messages)
|
||||
|
||||
return to_device["next_batch"]
|
||||
|
||||
def test_no_data(self) -> None:
|
||||
"""Test that enabling to-device extension works, even if there is
|
||||
no-data
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"to_device": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# We expect no to-device messages
|
||||
self._assert_to_device_response(response_body, [])
|
||||
|
||||
def test_data_initial_sync(self) -> None:
|
||||
"""Test that we get to-device messages when we don't specify a since
|
||||
token"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass", "d1")
|
||||
user2_id = self.register_user("u2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass", "d2")
|
||||
|
||||
# Send the to-device message
|
||||
test_msg = {"foo": "bar"}
|
||||
chan = self.make_request(
|
||||
"PUT",
|
||||
"/_matrix/client/r0/sendToDevice/m.test/1234",
|
||||
content={"messages": {user1_id: {"d1": test_msg}}},
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(chan.code, 200, chan.result)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"to_device": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
self._assert_to_device_response(
|
||||
response_body,
|
||||
[{"content": test_msg, "sender": user2_id, "type": "m.test"}],
|
||||
)
|
||||
|
||||
def test_data_incremental_sync(self) -> None:
|
||||
"""Test that we get to-device messages over incremental syncs"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass", "d1")
|
||||
user2_id = self.register_user("u2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass", "d2")
|
||||
|
||||
sync_body: JsonDict = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"to_device": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
# No to-device messages yet.
|
||||
next_batch = self._assert_to_device_response(response_body, [])
|
||||
|
||||
test_msg = {"foo": "bar"}
|
||||
chan = self.make_request(
|
||||
"PUT",
|
||||
"/_matrix/client/r0/sendToDevice/m.test/1234",
|
||||
content={"messages": {user1_id: {"d1": test_msg}}},
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(chan.code, 200, chan.result)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"to_device": {
|
||||
"enabled": True,
|
||||
"since": next_batch,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
next_batch = self._assert_to_device_response(
|
||||
response_body,
|
||||
[{"content": test_msg, "sender": user2_id, "type": "m.test"}],
|
||||
)
|
||||
|
||||
# The next sliding sync request should not include the to-device
|
||||
# message.
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"to_device": {
|
||||
"enabled": True,
|
||||
"since": next_batch,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
self._assert_to_device_response(response_body, [])
|
||||
|
||||
# An initial sliding sync request should not include the to-device
|
||||
# message, as it should have been deleted
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"to_device": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
self._assert_to_device_response(response_body, [])
|
||||
|
||||
def test_wait_for_new_data(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive.
|
||||
|
||||
(Only applies to incremental syncs with a `timeout` specified)
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass", "d1")
|
||||
user2_id = self.register_user("u2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass", "d2")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"to_device": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Bump the to-device messages to trigger new results
|
||||
test_msg = {"foo": "bar"}
|
||||
send_to_device_channel = self.make_request(
|
||||
"PUT",
|
||||
"/_matrix/client/r0/sendToDevice/m.test/1234",
|
||||
content={"messages": {user1_id: {"d1": test_msg}}},
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(
|
||||
send_to_device_channel.code, 200, send_to_device_channel.result
|
||||
)
|
||||
# Should respond before the 10 second timeout
|
||||
channel.await_result(timeout_ms=3000)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
self._assert_to_device_response(
|
||||
channel.json_body,
|
||||
[{"content": test_msg, "sender": user2_id, "type": "m.test"}],
|
||||
)
|
||||
|
||||
def test_wait_for_new_data_timeout(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive but
|
||||
no data ever arrives so we timeout. We're also making sure that the default data
|
||||
from the To-Device extension doesn't trigger a false-positive for new data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"to_device": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Wake-up `notifier.wait_for_events(...)` that will cause us test
|
||||
# `SlidingSyncResult.__bool__` for new results.
|
||||
self._bump_notifier_wait_for_events(
|
||||
user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
|
||||
)
|
||||
# Block for a little bit more to ensure we don't see any new results.
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=4000)
|
||||
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
|
||||
# 5000 + 4000 + 1200 > 10000)
|
||||
channel.await_result(timeout_ms=1200)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
self._assert_to_device_response(channel.json_body, [])
|
||||
482
tests/rest/client/sliding_sync/test_extension_typing.py
Normal file
482
tests/rest/client/sliding_sync/test_extension_typing.py
Normal file
@@ -0,0 +1,482 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import EduTypes
|
||||
from synapse.rest.client import login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import StreamKeyType
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
from tests.server import TimedOutException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncTypingExtensionTestCase(SlidingSyncBase):
|
||||
"""Tests for the typing notification sliding sync extension"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
def test_no_data_initial_sync(self) -> None:
|
||||
"""
|
||||
Test that enabling the typing extension works during an intitial sync,
|
||||
even if there is no-data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Make an initial Sliding Sync request with the typing extension enabled
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"typing": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["typing"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_no_data_incremental_sync(self) -> None:
|
||||
"""
|
||||
Test that enabling typing extension works during an incremental sync, even
|
||||
if there is no-data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"typing": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request with the typing extension enabled
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["typing"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_typing_initial_sync(self) -> None:
|
||||
"""
|
||||
On initial sync, we return all typing notifications for rooms that we request
|
||||
and are being returned in the Sliding Sync response.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass")
|
||||
user4_id = self.register_user("user4", "pass")
|
||||
user4_tok = self.login(user4_id, "pass")
|
||||
|
||||
# Create a room
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id1, user3_id, tok=user3_tok)
|
||||
self.helper.join(room_id1, user4_id, tok=user4_tok)
|
||||
# User1 starts typing in room1
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id1}/typing/{user1_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User2 starts typing in room1
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id1}/typing/{user2_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Create another room
|
||||
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id2, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id2, user3_id, tok=user3_tok)
|
||||
self.helper.join(room_id2, user4_id, tok=user4_tok)
|
||||
# User1 starts typing in room2
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id2}/typing/{user1_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User2 starts typing in room2
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id2}/typing/{user2_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Make an initial Sliding Sync request with the typing extension enabled
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
},
|
||||
"extensions": {
|
||||
"typing": {
|
||||
"enabled": True,
|
||||
"rooms": [room_id1, room_id2],
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Even though we requested room2, we only expect room1 to show up because that's
|
||||
# the only room in the Sliding Sync response (room2 is not one of our room
|
||||
# subscriptions or in a sliding window list).
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["typing"].get("rooms").keys(),
|
||||
{room_id1},
|
||||
exact=True,
|
||||
)
|
||||
# Sanity check that it's the correct ephemeral event type
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["typing"]["rooms"][room_id1]["type"],
|
||||
EduTypes.TYPING,
|
||||
)
|
||||
# We can see user1 and user2 typing
|
||||
self.assertIncludes(
|
||||
set(
|
||||
response_body["extensions"]["typing"]["rooms"][room_id1]["content"][
|
||||
"user_ids"
|
||||
]
|
||||
),
|
||||
{user1_id, user2_id},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_typing_incremental_sync(self) -> None:
|
||||
"""
|
||||
On incremental sync, we return all typing notifications in the token range for a
|
||||
given room but only for rooms that we request and are being returned in the
|
||||
Sliding Sync response.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass")
|
||||
|
||||
# Create room1
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id1, user3_id, tok=user3_tok)
|
||||
# User2 starts typing in room1
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id1}/typing/{user2_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Create room2
|
||||
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id2, user1_id, tok=user1_tok)
|
||||
# User1 starts typing in room2 (before the `from_token`)
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id2}/typing/{user1_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Create room3
|
||||
room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id3, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id3, user3_id, tok=user3_tok)
|
||||
|
||||
# Create room4
|
||||
room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id4, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id4, user3_id, tok=user3_tok)
|
||||
# User1 starts typing in room4 (before the `from_token`)
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id4}/typing/{user1_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Advance time so all of the typing notifications timeout before we make our
|
||||
# Sliding Sync requests. Even though these are sent before the `from_token`, the
|
||||
# typing code only keeps track of stream position of the latest typing
|
||||
# notification so "old" typing notifications that are still "alive" (haven't
|
||||
# timed out) can appear in the response.
|
||||
self.reactor.advance(36)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
room_id3: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
room_id4: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
},
|
||||
"extensions": {
|
||||
"typing": {
|
||||
"enabled": True,
|
||||
"rooms": [room_id1, room_id2, room_id3, room_id4],
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Add some more typing notifications after the `from_token`
|
||||
#
|
||||
# User1 starts typing in room1
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id1}/typing/{user1_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User1 starts typing in room2
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id2}/typing/{user1_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# User3 starts typing in room3
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id3}/typing/{user3_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user3_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
# No activity for room4 after the `from_token`
|
||||
|
||||
# Make an incremental Sliding Sync request with the typing extension enabled
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# Even though we requested room2, we only expect rooms to show up if they are
|
||||
# already in the Sliding Sync response. room4 doesn't show up because there is
|
||||
# no activity after the `from_token`.
|
||||
self.assertIncludes(
|
||||
response_body["extensions"]["typing"].get("rooms").keys(),
|
||||
{room_id1, room_id3},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Check room1:
|
||||
#
|
||||
# Sanity check that it's the correct ephemeral event type
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["typing"]["rooms"][room_id1]["type"],
|
||||
EduTypes.TYPING,
|
||||
)
|
||||
# We only see that user1 is typing in room1 since the `from_token`
|
||||
self.assertIncludes(
|
||||
set(
|
||||
response_body["extensions"]["typing"]["rooms"][room_id1]["content"][
|
||||
"user_ids"
|
||||
]
|
||||
),
|
||||
{user1_id},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Check room3:
|
||||
#
|
||||
# Sanity check that it's the correct ephemeral event type
|
||||
self.assertEqual(
|
||||
response_body["extensions"]["typing"]["rooms"][room_id3]["type"],
|
||||
EduTypes.TYPING,
|
||||
)
|
||||
# We only see that user3 is typing in room1 since the `from_token`
|
||||
self.assertIncludes(
|
||||
set(
|
||||
response_body["extensions"]["typing"]["rooms"][room_id3]["content"][
|
||||
"user_ids"
|
||||
]
|
||||
),
|
||||
{user3_id},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_wait_for_new_data(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive.
|
||||
|
||||
(Only applies to incremental syncs with a `timeout` specified)
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"room_subscriptions": {
|
||||
room_id: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
},
|
||||
"extensions": {
|
||||
"typing": {
|
||||
"enabled": True,
|
||||
"rooms": [room_id],
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request with the typing extension enabled
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Bump the typing status to trigger new results
|
||||
typing_channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id}/typing/{user2_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user2_tok,
|
||||
)
|
||||
self.assertEqual(typing_channel.code, 200, typing_channel.json_body)
|
||||
# Should respond before the 10 second timeout
|
||||
channel.await_result(timeout_ms=3000)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# We should see the new typing notification
|
||||
self.assertIncludes(
|
||||
channel.json_body.get("extensions", {})
|
||||
.get("typing", {})
|
||||
.get("rooms", {})
|
||||
.keys(),
|
||||
{room_id},
|
||||
exact=True,
|
||||
message=str(channel.json_body),
|
||||
)
|
||||
self.assertIncludes(
|
||||
set(
|
||||
channel.json_body["extensions"]["typing"]["rooms"][room_id]["content"][
|
||||
"user_ids"
|
||||
]
|
||||
),
|
||||
{user2_id},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
def test_wait_for_new_data_timeout(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive but
|
||||
no data ever arrives so we timeout. We're also making sure that the default data
|
||||
from the typing extension doesn't trigger a false-positive for new data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {},
|
||||
"extensions": {
|
||||
"typing": {
|
||||
"enabled": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Wake-up `notifier.wait_for_events(...)` that will cause us test
|
||||
# `SlidingSyncResult.__bool__` for new results.
|
||||
self._bump_notifier_wait_for_events(
|
||||
user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
|
||||
)
|
||||
# Block for a little bit more to ensure we don't see any new results.
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=4000)
|
||||
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
|
||||
# 5000 + 4000 + 1200 > 10000)
|
||||
channel.await_result(timeout_ms=1200)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
self.assertIncludes(
|
||||
channel.json_body["extensions"]["typing"].get("rooms").keys(),
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
283
tests/rest/client/sliding_sync/test_extensions.py
Normal file
283
tests/rest/client/sliding_sync/test_extensions.py
Normal file
@@ -0,0 +1,283 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
from typing import Literal
|
||||
|
||||
from parameterized import parameterized
|
||||
from typing_extensions import assert_never
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import ReceiptTypes
|
||||
from synapse.rest.client import login, receipts, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncExtensionsTestCase(SlidingSyncBase):
|
||||
"""
|
||||
Test general extensions behavior in the Sliding Sync API. Each extension has their
|
||||
own suite of tests in their own file as well.
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
receipts.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
self.account_data_handler = hs.get_account_data_handler()
|
||||
|
||||
# Any extensions that use `lists`/`rooms` should be tested here
|
||||
@parameterized.expand([("account_data",), ("receipts",), ("typing",)])
|
||||
def test_extensions_lists_rooms_relevant_rooms(
|
||||
self,
|
||||
extension_name: Literal["account_data", "receipts", "typing"],
|
||||
) -> None:
|
||||
"""
|
||||
With various extensions, test out requesting different variations of
|
||||
`lists`/`rooms`.
|
||||
|
||||
Stresses `SlidingSyncHandler.find_relevant_room_ids_for_extension(...)`
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Create some rooms
|
||||
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
room_id4 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
room_id5 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
room_id_to_human_name_map = {
|
||||
room_id1: "room1",
|
||||
room_id2: "room2",
|
||||
room_id3: "room3",
|
||||
room_id4: "room4",
|
||||
room_id5: "room5",
|
||||
}
|
||||
|
||||
for room_id in room_id_to_human_name_map.keys():
|
||||
if extension_name == "account_data":
|
||||
# Add some account data to each room
|
||||
self.get_success(
|
||||
self.account_data_handler.add_account_data_to_room(
|
||||
user_id=user1_id,
|
||||
room_id=room_id,
|
||||
account_data_type="org.matrix.roorarraz",
|
||||
content={"roo": "rar"},
|
||||
)
|
||||
)
|
||||
elif extension_name == "receipts":
|
||||
event_response = self.helper.send(
|
||||
room_id, body="new event", tok=user1_tok
|
||||
)
|
||||
# Read last event
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}",
|
||||
{},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
elif extension_name == "typing":
|
||||
# Start a typing notification
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/rooms/{room_id}/typing/{user1_id}",
|
||||
b'{"typing": true, "timeout": 30000}',
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
else:
|
||||
assert_never(extension_name)
|
||||
|
||||
main_sync_body = {
|
||||
"lists": {
|
||||
# We expect this list range to include room5 and room4
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
# We expect this list range to include room5, room4, room3
|
||||
"bar-list": {
|
||||
"ranges": [[0, 2]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
},
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
# Mix lists and rooms
|
||||
sync_body = {
|
||||
**main_sync_body,
|
||||
"extensions": {
|
||||
extension_name: {
|
||||
"enabled": True,
|
||||
"lists": ["foo-list", "non-existent-list"],
|
||||
"rooms": [room_id1, room_id2, "!non-existent-room"],
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# room1: ✅ Requested via `rooms` and a room subscription exists
|
||||
# room2: ❌ Requested via `rooms` but not in the response (from lists or room subscriptions)
|
||||
# room3: ❌ Not requested
|
||||
# room4: ✅ Shows up because requested via `lists` and list exists in the response
|
||||
# room5: ✅ Shows up because requested via `lists` and list exists in the response
|
||||
self.assertIncludes(
|
||||
{
|
||||
room_id_to_human_name_map[room_id]
|
||||
for room_id in response_body["extensions"][extension_name]
|
||||
.get("rooms")
|
||||
.keys()
|
||||
},
|
||||
{"room1", "room4", "room5"},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Try wildcards (this is the default)
|
||||
sync_body = {
|
||||
**main_sync_body,
|
||||
"extensions": {
|
||||
extension_name: {
|
||||
"enabled": True,
|
||||
# "lists": ["*"],
|
||||
# "rooms": ["*"],
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# room1: ✅ Shows up because of default `rooms` wildcard and is in one of the room subscriptions
|
||||
# room2: ❌ Not requested
|
||||
# room3: ✅ Shows up because of default `lists` wildcard and is in a list
|
||||
# room4: ✅ Shows up because of default `lists` wildcard and is in a list
|
||||
# room5: ✅ Shows up because of default `lists` wildcard and is in a list
|
||||
self.assertIncludes(
|
||||
{
|
||||
room_id_to_human_name_map[room_id]
|
||||
for room_id in response_body["extensions"][extension_name]
|
||||
.get("rooms")
|
||||
.keys()
|
||||
},
|
||||
{"room1", "room3", "room4", "room5"},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Empty list will return nothing
|
||||
sync_body = {
|
||||
**main_sync_body,
|
||||
"extensions": {
|
||||
extension_name: {
|
||||
"enabled": True,
|
||||
"lists": [],
|
||||
"rooms": [],
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# room1: ❌ Not requested
|
||||
# room2: ❌ Not requested
|
||||
# room3: ❌ Not requested
|
||||
# room4: ❌ Not requested
|
||||
# room5: ❌ Not requested
|
||||
self.assertIncludes(
|
||||
{
|
||||
room_id_to_human_name_map[room_id]
|
||||
for room_id in response_body["extensions"][extension_name]
|
||||
.get("rooms")
|
||||
.keys()
|
||||
},
|
||||
set(),
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Try wildcard and none
|
||||
sync_body = {
|
||||
**main_sync_body,
|
||||
"extensions": {
|
||||
extension_name: {
|
||||
"enabled": True,
|
||||
"lists": ["*"],
|
||||
"rooms": [],
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# room1: ❌ Not requested
|
||||
# room2: ❌ Not requested
|
||||
# room3: ✅ Shows up because of default `lists` wildcard and is in a list
|
||||
# room4: ✅ Shows up because of default `lists` wildcard and is in a list
|
||||
# room5: ✅ Shows up because of default `lists` wildcard and is in a list
|
||||
self.assertIncludes(
|
||||
{
|
||||
room_id_to_human_name_map[room_id]
|
||||
for room_id in response_body["extensions"][extension_name]
|
||||
.get("rooms")
|
||||
.keys()
|
||||
},
|
||||
{"room3", "room4", "room5"},
|
||||
exact=True,
|
||||
)
|
||||
|
||||
# Try requesting a room that is only in a list
|
||||
sync_body = {
|
||||
**main_sync_body,
|
||||
"extensions": {
|
||||
extension_name: {
|
||||
"enabled": True,
|
||||
"lists": [],
|
||||
"rooms": [room_id5],
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# room1: ❌ Not requested
|
||||
# room2: ❌ Not requested
|
||||
# room3: ❌ Not requested
|
||||
# room4: ❌ Not requested
|
||||
# room5: ✅ Requested via `rooms` and is in a list
|
||||
self.assertIncludes(
|
||||
{
|
||||
room_id_to_human_name_map[room_id]
|
||||
for room_id in response_body["extensions"][extension_name]
|
||||
.get("rooms")
|
||||
.keys()
|
||||
},
|
||||
{"room5"},
|
||||
exact=True,
|
||||
)
|
||||
285
tests/rest/client/sliding_sync/test_room_subscriptions.py
Normal file
285
tests/rest/client/sliding_sync/test_room_subscriptions.py
Normal file
@@ -0,0 +1,285 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
from http import HTTPStatus
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import EventTypes, HistoryVisibility
|
||||
from synapse.rest.client import login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncRoomSubscriptionsTestCase(SlidingSyncBase):
|
||||
"""
|
||||
Test `room_subscriptions` in the Sliding Sync API.
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
def test_room_subscriptions_with_join_membership(self) -> None:
|
||||
"""
|
||||
Test `room_subscriptions` with a joined room should give us timeline and current
|
||||
state events.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request with just the room subscription
|
||||
sync_body = {
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
# We should see some state
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Create, "")],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
# We should see some events
|
||||
self.assertEqual(
|
||||
[
|
||||
event["event_id"]
|
||||
for event in response_body["rooms"][room_id1]["timeline"]
|
||||
],
|
||||
[
|
||||
join_response["event_id"],
|
||||
],
|
||||
response_body["rooms"][room_id1]["timeline"],
|
||||
)
|
||||
# No "live" events in an initial sync (no `from_token` to define the "live"
|
||||
# range)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["num_live"],
|
||||
0,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# There are more events to paginate to
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["limited"],
|
||||
True,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_room_subscriptions_with_leave_membership(self) -> None:
|
||||
"""
|
||||
Test `room_subscriptions` with a leave room should give us timeline and state
|
||||
events up to the leave event.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo_state",
|
||||
state_key="",
|
||||
body={"foo": "bar"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
# Send some events after user1 leaves
|
||||
self.helper.send(room_id1, "activity after leave", tok=user2_tok)
|
||||
# Update state after user1 leaves
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo_state",
|
||||
state_key="",
|
||||
body={"foo": "qux"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request with just the room subscription
|
||||
sync_body = {
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [
|
||||
["org.matrix.foo_state", ""],
|
||||
],
|
||||
"timeline_limit": 2,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# We should see the state at the time of the leave
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[("org.matrix.foo_state", "")],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
# We should see some before we left (nothing after)
|
||||
self.assertEqual(
|
||||
[
|
||||
event["event_id"]
|
||||
for event in response_body["rooms"][room_id1]["timeline"]
|
||||
],
|
||||
[
|
||||
join_response["event_id"],
|
||||
leave_response["event_id"],
|
||||
],
|
||||
response_body["rooms"][room_id1]["timeline"],
|
||||
)
|
||||
# No "live" events in an initial sync (no `from_token` to define the "live"
|
||||
# range)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["num_live"],
|
||||
0,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# There are more events to paginate to
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["limited"],
|
||||
True,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_room_subscriptions_no_leak_private_room(self) -> None:
|
||||
"""
|
||||
Test `room_subscriptions` with a private room we have never been in should not
|
||||
leak any data to the user.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=False)
|
||||
|
||||
# We should not be able to join the private room
|
||||
self.helper.join(
|
||||
room_id1, user1_id, tok=user1_tok, expect_code=HTTPStatus.FORBIDDEN
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request with just the room subscription
|
||||
sync_body = {
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# We should not see the room at all (we're not in it)
|
||||
self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"])
|
||||
|
||||
def test_room_subscriptions_world_readable(self) -> None:
|
||||
"""
|
||||
Test `room_subscriptions` with a room that has `world_readable` history visibility
|
||||
|
||||
FIXME: We should be able to see the room timeline and state
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
# Create a room with `world_readable` history visibility
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"preset": "public_chat",
|
||||
"initial_state": [
|
||||
{
|
||||
"content": {
|
||||
"history_visibility": HistoryVisibility.WORLD_READABLE
|
||||
},
|
||||
"state_key": "",
|
||||
"type": EventTypes.RoomHistoryVisibility,
|
||||
}
|
||||
],
|
||||
},
|
||||
)
|
||||
# Ensure we're testing with a room with `world_readable` history visibility
|
||||
# which means events are visible to anyone even without membership.
|
||||
history_visibility_response = self.helper.get_state(
|
||||
room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
|
||||
)
|
||||
self.assertEqual(
|
||||
history_visibility_response.get("history_visibility"),
|
||||
HistoryVisibility.WORLD_READABLE,
|
||||
)
|
||||
|
||||
# Note: We never join the room
|
||||
|
||||
# Make the Sliding Sync request with just the room subscription
|
||||
sync_body = {
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# FIXME: In the future, we should be able to see the room because it's
|
||||
# `world_readable` but currently we don't support this.
|
||||
self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"])
|
||||
510
tests/rest/client/sliding_sync/test_rooms_invites.py
Normal file
510
tests/rest/client/sliding_sync/test_rooms_invites.py
Normal file
@@ -0,0 +1,510 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import EventTypes, HistoryVisibility
|
||||
from synapse.rest.client import login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import UserID
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncRoomsInvitesTestCase(SlidingSyncBase):
|
||||
"""
|
||||
Test to make sure the `rooms` response looks good for invites in the Sliding Sync API.
|
||||
|
||||
Invites behave a lot different than other rooms because we don't include the
|
||||
`timeline` (`num_live`, `limited`, `prev_batch`) or `required_state` in favor of
|
||||
some stripped state under the `invite_state` key.
|
||||
|
||||
Knocks probably have the same behavior but the spec doesn't mention knocks yet.
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
def test_rooms_invite_shared_history_initial_sync(self) -> None:
|
||||
"""
|
||||
Test that `rooms` we are invited to have some stripped `invite_state` during an
|
||||
initial sync.
|
||||
|
||||
This is an `invite` room so we should only have `stripped_state` (no `timeline`)
|
||||
but we also shouldn't see any timeline events because the history visiblity is
|
||||
`shared` and we haven't joined the room yet.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user1 = UserID.from_string(user1_id)
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user2 = UserID.from_string(user2_id)
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
# Ensure we're testing with a room with `shared` history visibility which means
|
||||
# history visible until you actually join the room.
|
||||
history_visibility_response = self.helper.get_state(
|
||||
room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
|
||||
)
|
||||
self.assertEqual(
|
||||
history_visibility_response.get("history_visibility"),
|
||||
HistoryVisibility.SHARED,
|
||||
)
|
||||
|
||||
self.helper.send(room_id1, "activity before1", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity before2", tok=user2_tok)
|
||||
self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after3", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after4", tok=user2_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# `timeline` is omitted for `invite` rooms with `stripped_state`
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("timeline"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("num_live"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("limited"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("prev_batch"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `required_state` is omitted for `invite` rooms with `stripped_state`
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("required_state"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# We should have some `stripped_state` so the potential joiner can identify the
|
||||
# room (we don't care about the order).
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"][room_id1]["invite_state"],
|
||||
[
|
||||
{
|
||||
"content": {"creator": user2_id, "room_version": "10"},
|
||||
"sender": user2_id,
|
||||
"state_key": "",
|
||||
"type": "m.room.create",
|
||||
},
|
||||
{
|
||||
"content": {"join_rule": "public"},
|
||||
"sender": user2_id,
|
||||
"state_key": "",
|
||||
"type": "m.room.join_rules",
|
||||
},
|
||||
{
|
||||
"content": {"displayname": user2.localpart, "membership": "join"},
|
||||
"sender": user2_id,
|
||||
"state_key": user2_id,
|
||||
"type": "m.room.member",
|
||||
},
|
||||
{
|
||||
"content": {"displayname": user1.localpart, "membership": "invite"},
|
||||
"sender": user2_id,
|
||||
"state_key": user1_id,
|
||||
"type": "m.room.member",
|
||||
},
|
||||
],
|
||||
response_body["rooms"][room_id1]["invite_state"],
|
||||
)
|
||||
|
||||
def test_rooms_invite_shared_history_incremental_sync(self) -> None:
|
||||
"""
|
||||
Test that `rooms` we are invited to have some stripped `invite_state` during an
|
||||
incremental sync.
|
||||
|
||||
This is an `invite` room so we should only have `stripped_state` (no `timeline`)
|
||||
but we also shouldn't see any timeline events because the history visiblity is
|
||||
`shared` and we haven't joined the room yet.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user1 = UserID.from_string(user1_id)
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user2 = UserID.from_string(user2_id)
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
# Ensure we're testing with a room with `shared` history visibility which means
|
||||
# history visible until you actually join the room.
|
||||
history_visibility_response = self.helper.get_state(
|
||||
room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
|
||||
)
|
||||
self.assertEqual(
|
||||
history_visibility_response.get("history_visibility"),
|
||||
HistoryVisibility.SHARED,
|
||||
)
|
||||
|
||||
self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
|
||||
self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "activity after token5", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
# `timeline` is omitted for `invite` rooms with `stripped_state`
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("timeline"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("num_live"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("limited"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("prev_batch"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `required_state` is omitted for `invite` rooms with `stripped_state`
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("required_state"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# We should have some `stripped_state` so the potential joiner can identify the
|
||||
# room (we don't care about the order).
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"][room_id1]["invite_state"],
|
||||
[
|
||||
{
|
||||
"content": {"creator": user2_id, "room_version": "10"},
|
||||
"sender": user2_id,
|
||||
"state_key": "",
|
||||
"type": "m.room.create",
|
||||
},
|
||||
{
|
||||
"content": {"join_rule": "public"},
|
||||
"sender": user2_id,
|
||||
"state_key": "",
|
||||
"type": "m.room.join_rules",
|
||||
},
|
||||
{
|
||||
"content": {"displayname": user2.localpart, "membership": "join"},
|
||||
"sender": user2_id,
|
||||
"state_key": user2_id,
|
||||
"type": "m.room.member",
|
||||
},
|
||||
{
|
||||
"content": {"displayname": user1.localpart, "membership": "invite"},
|
||||
"sender": user2_id,
|
||||
"state_key": user1_id,
|
||||
"type": "m.room.member",
|
||||
},
|
||||
],
|
||||
response_body["rooms"][room_id1]["invite_state"],
|
||||
)
|
||||
|
||||
def test_rooms_invite_world_readable_history_initial_sync(self) -> None:
|
||||
"""
|
||||
Test that `rooms` we are invited to have some stripped `invite_state` during an
|
||||
initial sync.
|
||||
|
||||
This is an `invite` room so we should only have `stripped_state` (no `timeline`)
|
||||
but depending on the semantics we decide, we could potentially see some
|
||||
historical events before/after the `from_token` because the history is
|
||||
`world_readable`. Same situation for events after the `from_token` if the
|
||||
history visibility was set to `invited`.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user1 = UserID.from_string(user1_id)
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user2 = UserID.from_string(user2_id)
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"preset": "public_chat",
|
||||
"initial_state": [
|
||||
{
|
||||
"content": {
|
||||
"history_visibility": HistoryVisibility.WORLD_READABLE
|
||||
},
|
||||
"state_key": "",
|
||||
"type": EventTypes.RoomHistoryVisibility,
|
||||
}
|
||||
],
|
||||
},
|
||||
)
|
||||
# Ensure we're testing with a room with `world_readable` history visibility
|
||||
# which means events are visible to anyone even without membership.
|
||||
history_visibility_response = self.helper.get_state(
|
||||
room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
|
||||
)
|
||||
self.assertEqual(
|
||||
history_visibility_response.get("history_visibility"),
|
||||
HistoryVisibility.WORLD_READABLE,
|
||||
)
|
||||
|
||||
self.helper.send(room_id1, "activity before1", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity before2", tok=user2_tok)
|
||||
self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after3", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after4", tok=user2_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
# Large enough to see the latest events and before the invite
|
||||
"timeline_limit": 4,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# `timeline` is omitted for `invite` rooms with `stripped_state`
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("timeline"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("num_live"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("limited"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("prev_batch"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `required_state` is omitted for `invite` rooms with `stripped_state`
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("required_state"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# We should have some `stripped_state` so the potential joiner can identify the
|
||||
# room (we don't care about the order).
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"][room_id1]["invite_state"],
|
||||
[
|
||||
{
|
||||
"content": {"creator": user2_id, "room_version": "10"},
|
||||
"sender": user2_id,
|
||||
"state_key": "",
|
||||
"type": "m.room.create",
|
||||
},
|
||||
{
|
||||
"content": {"join_rule": "public"},
|
||||
"sender": user2_id,
|
||||
"state_key": "",
|
||||
"type": "m.room.join_rules",
|
||||
},
|
||||
{
|
||||
"content": {"displayname": user2.localpart, "membership": "join"},
|
||||
"sender": user2_id,
|
||||
"state_key": user2_id,
|
||||
"type": "m.room.member",
|
||||
},
|
||||
{
|
||||
"content": {"displayname": user1.localpart, "membership": "invite"},
|
||||
"sender": user2_id,
|
||||
"state_key": user1_id,
|
||||
"type": "m.room.member",
|
||||
},
|
||||
],
|
||||
response_body["rooms"][room_id1]["invite_state"],
|
||||
)
|
||||
|
||||
def test_rooms_invite_world_readable_history_incremental_sync(self) -> None:
|
||||
"""
|
||||
Test that `rooms` we are invited to have some stripped `invite_state` during an
|
||||
incremental sync.
|
||||
|
||||
This is an `invite` room so we should only have `stripped_state` (no `timeline`)
|
||||
but depending on the semantics we decide, we could potentially see some
|
||||
historical events before/after the `from_token` because the history is
|
||||
`world_readable`. Same situation for events after the `from_token` if the
|
||||
history visibility was set to `invited`.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user1 = UserID.from_string(user1_id)
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user2 = UserID.from_string(user2_id)
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"preset": "public_chat",
|
||||
"initial_state": [
|
||||
{
|
||||
"content": {
|
||||
"history_visibility": HistoryVisibility.WORLD_READABLE
|
||||
},
|
||||
"state_key": "",
|
||||
"type": EventTypes.RoomHistoryVisibility,
|
||||
}
|
||||
],
|
||||
},
|
||||
)
|
||||
# Ensure we're testing with a room with `world_readable` history visibility
|
||||
# which means events are visible to anyone even without membership.
|
||||
history_visibility_response = self.helper.get_state(
|
||||
room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
|
||||
)
|
||||
self.assertEqual(
|
||||
history_visibility_response.get("history_visibility"),
|
||||
HistoryVisibility.WORLD_READABLE,
|
||||
)
|
||||
|
||||
self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
|
||||
self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
# Large enough to see the latest events and before the invite
|
||||
"timeline_limit": 4,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "activity after token5", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
|
||||
|
||||
# Make the incremental Sliding Sync request
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# `timeline` is omitted for `invite` rooms with `stripped_state`
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("timeline"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("num_live"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("limited"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("prev_batch"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# `required_state` is omitted for `invite` rooms with `stripped_state`
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("required_state"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# We should have some `stripped_state` so the potential joiner can identify the
|
||||
# room (we don't care about the order).
|
||||
self.assertCountEqual(
|
||||
response_body["rooms"][room_id1]["invite_state"],
|
||||
[
|
||||
{
|
||||
"content": {"creator": user2_id, "room_version": "10"},
|
||||
"sender": user2_id,
|
||||
"state_key": "",
|
||||
"type": "m.room.create",
|
||||
},
|
||||
{
|
||||
"content": {"join_rule": "public"},
|
||||
"sender": user2_id,
|
||||
"state_key": "",
|
||||
"type": "m.room.join_rules",
|
||||
},
|
||||
{
|
||||
"content": {"displayname": user2.localpart, "membership": "join"},
|
||||
"sender": user2_id,
|
||||
"state_key": user2_id,
|
||||
"type": "m.room.member",
|
||||
},
|
||||
{
|
||||
"content": {"displayname": user1.localpart, "membership": "invite"},
|
||||
"sender": user2_id,
|
||||
"state_key": user1_id,
|
||||
"type": "m.room.member",
|
||||
},
|
||||
],
|
||||
response_body["rooms"][room_id1]["invite_state"],
|
||||
)
|
||||
710
tests/rest/client/sliding_sync/test_rooms_meta.py
Normal file
710
tests/rest/client/sliding_sync/test_rooms_meta.py
Normal file
@@ -0,0 +1,710 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.rest.client import login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
from tests.test_utils.event_injection import create_event
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncRoomsMetaTestCase(SlidingSyncBase):
|
||||
"""
|
||||
Test rooms meta info like name, avatar, joined_count, invited_count, is_dm,
|
||||
bump_stamp in the Sliding Sync API.
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
def test_rooms_meta_when_joined(self) -> None:
|
||||
"""
|
||||
Test that the `rooms` `name` and `avatar` are included in the response and
|
||||
reflect the current state of the room when the user is joined to the room.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"name": "my super room",
|
||||
},
|
||||
)
|
||||
# Set the room avatar URL
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.RoomAvatar,
|
||||
{"url": "mxc://DUMMY_MEDIA_ID"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Reflect the current state of the room
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["name"],
|
||||
"my super room",
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["avatar"],
|
||||
"mxc://DUMMY_MEDIA_ID",
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["joined_count"],
|
||||
2,
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["invited_count"],
|
||||
0,
|
||||
)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("is_dm"),
|
||||
)
|
||||
|
||||
def test_rooms_meta_when_invited(self) -> None:
|
||||
"""
|
||||
Test that the `rooms` `name` and `avatar` are included in the response and
|
||||
reflect the current state of the room when the user is invited to the room.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"name": "my super room",
|
||||
},
|
||||
)
|
||||
# Set the room avatar URL
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.RoomAvatar,
|
||||
{"url": "mxc://DUMMY_MEDIA_ID"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# User1 is invited to the room
|
||||
self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
|
||||
# Update the room name after user1 has left
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.Name,
|
||||
{"name": "my super duper room"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
# Update the room avatar URL after user1 has left
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.RoomAvatar,
|
||||
{"url": "mxc://UPDATED_DUMMY_MEDIA_ID"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# This should still reflect the current state of the room even when the user is
|
||||
# invited.
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["name"],
|
||||
"my super duper room",
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["avatar"],
|
||||
"mxc://UPDATED_DUMMY_MEDIA_ID",
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["joined_count"],
|
||||
1,
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["invited_count"],
|
||||
1,
|
||||
)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("is_dm"),
|
||||
)
|
||||
|
||||
def test_rooms_meta_when_banned(self) -> None:
|
||||
"""
|
||||
Test that the `rooms` `name` and `avatar` reflect the state of the room when the
|
||||
user was banned (do not leak current state).
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"name": "my super room",
|
||||
},
|
||||
)
|
||||
# Set the room avatar URL
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.RoomAvatar,
|
||||
{"url": "mxc://DUMMY_MEDIA_ID"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
|
||||
# Update the room name after user1 has left
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.Name,
|
||||
{"name": "my super duper room"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
# Update the room avatar URL after user1 has left
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.RoomAvatar,
|
||||
{"url": "mxc://UPDATED_DUMMY_MEDIA_ID"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Reflect the state of the room at the time of leaving
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["name"],
|
||||
"my super room",
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["avatar"],
|
||||
"mxc://DUMMY_MEDIA_ID",
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["joined_count"],
|
||||
# FIXME: The actual number should be "1" (user2) but we currently don't
|
||||
# support this for rooms where the user has left/been banned.
|
||||
0,
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["invited_count"],
|
||||
0,
|
||||
)
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("is_dm"),
|
||||
)
|
||||
|
||||
def test_rooms_meta_heroes(self) -> None:
|
||||
"""
|
||||
Test that the `rooms` `heroes` are included in the response when the room
|
||||
doesn't have a room name set.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
_user3_tok = self.login(user3_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"name": "my super room",
|
||||
},
|
||||
)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
# User3 is invited
|
||||
self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok)
|
||||
|
||||
room_id2 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
# No room name set so that `heroes` is populated
|
||||
#
|
||||
# "name": "my super room2",
|
||||
},
|
||||
)
|
||||
self.helper.join(room_id2, user1_id, tok=user1_tok)
|
||||
# User3 is invited
|
||||
self.helper.invite(room_id2, src=user2_id, targ=user3_id, tok=user2_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Room1 has a name so we shouldn't see any `heroes` which the client would use
|
||||
# the calculate the room name themselves.
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["name"],
|
||||
"my super room",
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("heroes"))
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["joined_count"],
|
||||
2,
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["invited_count"],
|
||||
1,
|
||||
)
|
||||
|
||||
# Room2 doesn't have a name so we should see `heroes` populated
|
||||
self.assertIsNone(response_body["rooms"][room_id2].get("name"))
|
||||
self.assertCountEqual(
|
||||
[
|
||||
hero["user_id"]
|
||||
for hero in response_body["rooms"][room_id2].get("heroes", [])
|
||||
],
|
||||
# Heroes shouldn't include the user themselves (we shouldn't see user1)
|
||||
[user2_id, user3_id],
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id2]["joined_count"],
|
||||
2,
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id2]["invited_count"],
|
||||
1,
|
||||
)
|
||||
|
||||
# We didn't request any state so we shouldn't see any `required_state`
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
|
||||
self.assertIsNone(response_body["rooms"][room_id2].get("required_state"))
|
||||
|
||||
def test_rooms_meta_heroes_max(self) -> None:
|
||||
"""
|
||||
Test that the `rooms` `heroes` only includes the first 5 users (not including
|
||||
yourself).
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass")
|
||||
user4_id = self.register_user("user4", "pass")
|
||||
user4_tok = self.login(user4_id, "pass")
|
||||
user5_id = self.register_user("user5", "pass")
|
||||
user5_tok = self.login(user5_id, "pass")
|
||||
user6_id = self.register_user("user6", "pass")
|
||||
user6_tok = self.login(user6_id, "pass")
|
||||
user7_id = self.register_user("user7", "pass")
|
||||
user7_tok = self.login(user7_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
# No room name set so that `heroes` is populated
|
||||
#
|
||||
# "name": "my super room",
|
||||
},
|
||||
)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id1, user3_id, tok=user3_tok)
|
||||
self.helper.join(room_id1, user4_id, tok=user4_tok)
|
||||
self.helper.join(room_id1, user5_id, tok=user5_tok)
|
||||
self.helper.join(room_id1, user6_id, tok=user6_tok)
|
||||
self.helper.join(room_id1, user7_id, tok=user7_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Room2 doesn't have a name so we should see `heroes` populated
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("name"))
|
||||
self.assertCountEqual(
|
||||
[
|
||||
hero["user_id"]
|
||||
for hero in response_body["rooms"][room_id1].get("heroes", [])
|
||||
],
|
||||
# Heroes should be the first 5 users in the room (excluding the user
|
||||
# themselves, we shouldn't see `user1`)
|
||||
[user2_id, user3_id, user4_id, user5_id, user6_id],
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["joined_count"],
|
||||
7,
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["invited_count"],
|
||||
0,
|
||||
)
|
||||
|
||||
# We didn't request any state so we shouldn't see any `required_state`
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
|
||||
|
||||
def test_rooms_meta_heroes_when_banned(self) -> None:
|
||||
"""
|
||||
Test that the `rooms` `heroes` are included in the response when the room
|
||||
doesn't have a room name set but doesn't leak information past their ban.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
_user3_tok = self.login(user3_id, "pass")
|
||||
user4_id = self.register_user("user4", "pass")
|
||||
user4_tok = self.login(user4_id, "pass")
|
||||
user5_id = self.register_user("user5", "pass")
|
||||
_user5_tok = self.login(user5_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
# No room name set so that `heroes` is populated
|
||||
#
|
||||
# "name": "my super room",
|
||||
},
|
||||
)
|
||||
# User1 joins the room
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
# User3 is invited
|
||||
self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok)
|
||||
|
||||
# User1 is banned from the room
|
||||
self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
|
||||
# User4 joins the room after user1 is banned
|
||||
self.helper.join(room_id1, user4_id, tok=user4_tok)
|
||||
# User5 is invited after user1 is banned
|
||||
self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Room2 doesn't have a name so we should see `heroes` populated
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("name"))
|
||||
self.assertCountEqual(
|
||||
[
|
||||
hero["user_id"]
|
||||
for hero in response_body["rooms"][room_id1].get("heroes", [])
|
||||
],
|
||||
# Heroes shouldn't include the user themselves (we shouldn't see user1). We
|
||||
# also shouldn't see user4 since they joined after user1 was banned.
|
||||
#
|
||||
# FIXME: The actual result should be `[user2_id, user3_id]` but we currently
|
||||
# don't support this for rooms where the user has left/been banned.
|
||||
[],
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["joined_count"],
|
||||
# FIXME: The actual number should be "1" (user2) but we currently don't
|
||||
# support this for rooms where the user has left/been banned.
|
||||
0,
|
||||
)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["invited_count"],
|
||||
# We shouldn't see user5 since they were invited after user1 was banned.
|
||||
#
|
||||
# FIXME: The actual number should be "1" (user3) but we currently don't
|
||||
# support this for rooms where the user has left/been banned.
|
||||
0,
|
||||
)
|
||||
|
||||
def test_rooms_bump_stamp(self) -> None:
|
||||
"""
|
||||
Test that `bump_stamp` is present and pointing to relevant events.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user1_id,
|
||||
tok=user1_tok,
|
||||
)
|
||||
event_response1 = message_response = self.helper.send(
|
||||
room_id1, "message in room1", tok=user1_tok
|
||||
)
|
||||
event_pos1 = self.get_success(
|
||||
self.store.get_position_for_event(event_response1["event_id"])
|
||||
)
|
||||
room_id2 = self.helper.create_room_as(
|
||||
user1_id,
|
||||
tok=user1_tok,
|
||||
)
|
||||
send_response2 = self.helper.send(room_id2, "message in room2", tok=user1_tok)
|
||||
event_pos2 = self.get_success(
|
||||
self.store.get_position_for_event(send_response2["event_id"])
|
||||
)
|
||||
|
||||
# Send a reaction in room1 but it shouldn't affect the `bump_stamp`
|
||||
# because reactions are not part of the `DEFAULT_BUMP_EVENT_TYPES`
|
||||
self.helper.send_event(
|
||||
room_id1,
|
||||
type=EventTypes.Reaction,
|
||||
content={
|
||||
"m.relates_to": {
|
||||
"event_id": message_response["event_id"],
|
||||
"key": "👍",
|
||||
"rel_type": "m.annotation",
|
||||
}
|
||||
},
|
||||
tok=user1_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 100,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make sure it has the foo-list we requested
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"].keys()),
|
||||
["foo-list"],
|
||||
response_body["lists"].keys(),
|
||||
)
|
||||
|
||||
# Make sure the list includes the rooms in the right order
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["foo-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 1],
|
||||
# room1 sorts before room2 because it has the latest event (the
|
||||
# reaction)
|
||||
"room_ids": [room_id1, room_id2],
|
||||
}
|
||||
],
|
||||
response_body["lists"]["foo-list"],
|
||||
)
|
||||
|
||||
# The `bump_stamp` for room1 should point at the latest message (not the
|
||||
# reaction since it's not one of the `DEFAULT_BUMP_EVENT_TYPES`)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["bump_stamp"],
|
||||
event_pos1.stream,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
# The `bump_stamp` for room2 should point at the latest message
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id2]["bump_stamp"],
|
||||
event_pos2.stream,
|
||||
response_body["rooms"][room_id2],
|
||||
)
|
||||
|
||||
def test_rooms_bump_stamp_backfill(self) -> None:
|
||||
"""
|
||||
Test that `bump_stamp` ignores backfilled events, i.e. events with a
|
||||
negative stream ordering.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Create a remote room
|
||||
creator = "@user:other"
|
||||
room_id = "!foo:other"
|
||||
shared_kwargs = {
|
||||
"room_id": room_id,
|
||||
"room_version": "10",
|
||||
}
|
||||
|
||||
create_tuple = self.get_success(
|
||||
create_event(
|
||||
self.hs,
|
||||
prev_event_ids=[],
|
||||
type=EventTypes.Create,
|
||||
state_key="",
|
||||
sender=creator,
|
||||
**shared_kwargs,
|
||||
)
|
||||
)
|
||||
creator_tuple = self.get_success(
|
||||
create_event(
|
||||
self.hs,
|
||||
prev_event_ids=[create_tuple[0].event_id],
|
||||
auth_event_ids=[create_tuple[0].event_id],
|
||||
type=EventTypes.Member,
|
||||
state_key=creator,
|
||||
content={"membership": Membership.JOIN},
|
||||
sender=creator,
|
||||
**shared_kwargs,
|
||||
)
|
||||
)
|
||||
# We add a message event as a valid "bump type"
|
||||
msg_tuple = self.get_success(
|
||||
create_event(
|
||||
self.hs,
|
||||
prev_event_ids=[creator_tuple[0].event_id],
|
||||
auth_event_ids=[create_tuple[0].event_id],
|
||||
type=EventTypes.Message,
|
||||
content={"body": "foo", "msgtype": "m.text"},
|
||||
sender=creator,
|
||||
**shared_kwargs,
|
||||
)
|
||||
)
|
||||
invite_tuple = self.get_success(
|
||||
create_event(
|
||||
self.hs,
|
||||
prev_event_ids=[msg_tuple[0].event_id],
|
||||
auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id],
|
||||
type=EventTypes.Member,
|
||||
state_key=user1_id,
|
||||
content={"membership": Membership.INVITE},
|
||||
sender=creator,
|
||||
**shared_kwargs,
|
||||
)
|
||||
)
|
||||
|
||||
remote_events_and_contexts = [
|
||||
create_tuple,
|
||||
creator_tuple,
|
||||
msg_tuple,
|
||||
invite_tuple,
|
||||
]
|
||||
|
||||
# Ensure the local HS knows the room version
|
||||
self.get_success(
|
||||
self.store.store_room(room_id, creator, False, RoomVersions.V10)
|
||||
)
|
||||
|
||||
# Persist these events as backfilled events.
|
||||
persistence = self.hs.get_storage_controllers().persistence
|
||||
assert persistence is not None
|
||||
|
||||
for event, context in remote_events_and_contexts:
|
||||
self.get_success(persistence.persist_event(event, context, backfilled=True))
|
||||
|
||||
# Now we join the local user to the room
|
||||
join_tuple = self.get_success(
|
||||
create_event(
|
||||
self.hs,
|
||||
prev_event_ids=[invite_tuple[0].event_id],
|
||||
auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id],
|
||||
type=EventTypes.Member,
|
||||
state_key=user1_id,
|
||||
content={"membership": Membership.JOIN},
|
||||
sender=user1_id,
|
||||
**shared_kwargs,
|
||||
)
|
||||
)
|
||||
self.get_success(persistence.persist_event(*join_tuple))
|
||||
|
||||
# Doing an SS request should return a positive `bump_stamp`, even though
|
||||
# the only event that matches the bump types has as negative stream
|
||||
# ordering.
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 5,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0)
|
||||
707
tests/rest/client/sliding_sync/test_rooms_required_state.py
Normal file
707
tests/rest/client/sliding_sync/test_rooms_required_state.py
Normal file
@@ -0,0 +1,707 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
|
||||
from parameterized import parameterized
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.handlers.sliding_sync import StateValues
|
||||
from synapse.rest.client import login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
from tests.test_utils.event_injection import mark_event_as_partial_state
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
|
||||
"""
|
||||
Test `rooms.required_state` in the Sliding Sync API.
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
def test_rooms_no_required_state(self) -> None:
|
||||
"""
|
||||
Empty `rooms.required_state` should not return any state events in the room
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
# Empty `required_state`
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# No `required_state` in response
|
||||
self.assertIsNone(
|
||||
response_body["rooms"][room_id1].get("required_state"),
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_rooms_required_state_initial_sync(self) -> None:
|
||||
"""
|
||||
Test `rooms.required_state` returns requested state events in the room during an
|
||||
initial sync.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
[EventTypes.RoomHistoryVisibility, ""],
|
||||
# This one doesn't exist in the room
|
||||
[EventTypes.Tombstone, ""],
|
||||
],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Create, "")],
|
||||
state_map[(EventTypes.RoomHistoryVisibility, "")],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_incremental_sync(self) -> None:
|
||||
"""
|
||||
Test `rooms.required_state` returns requested state events in the room during an
|
||||
incremental sync.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
[EventTypes.RoomHistoryVisibility, ""],
|
||||
# This one doesn't exist in the room
|
||||
[EventTypes.Tombstone, ""],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Send a message so the room comes down sync.
|
||||
self.helper.send(room_id1, "msg", tok=user1_tok)
|
||||
|
||||
# Make the incremental Sliding Sync request
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# We only return updates but only if we've sent the room down the
|
||||
# connection before.
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_incremental_sync_restart(self) -> None:
|
||||
"""
|
||||
Test that after a restart (and so the in memory caches are reset) that
|
||||
we correctly return an `M_UNKNOWN_POS`
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
[EventTypes.RoomHistoryVisibility, ""],
|
||||
# This one doesn't exist in the room
|
||||
[EventTypes.Tombstone, ""],
|
||||
],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Reset the in-memory cache
|
||||
self.hs.get_sliding_sync_handler().connection_store._connections.clear()
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
method="POST",
|
||||
path=self.sync_endpoint + f"?pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 400, channel.json_body)
|
||||
self.assertEqual(
|
||||
channel.json_body["errcode"], "M_UNKNOWN_POS", channel.json_body
|
||||
)
|
||||
|
||||
def test_rooms_required_state_wildcard(self) -> None:
|
||||
"""
|
||||
Test `rooms.required_state` returns all state events when using wildcard `["*", "*"]`.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo_state",
|
||||
state_key="",
|
||||
body={"foo": "bar"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo_state",
|
||||
state_key="namespaced",
|
||||
body={"foo": "bar"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request with wildcards for the `event_type` and `state_key`
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[StateValues.WILDCARD, StateValues.WILDCARD],
|
||||
],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
# We should see all the state events in the room
|
||||
state_map.values(),
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_wildcard_event_type(self) -> None:
|
||||
"""
|
||||
Test `rooms.required_state` returns relevant state events when using wildcard in
|
||||
the event_type `["*", "foobarbaz"]`.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo_state",
|
||||
state_key="",
|
||||
body={"foo": "bar"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo_state",
|
||||
state_key=user2_id,
|
||||
body={"foo": "bar"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request with wildcards for the `event_type`
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[StateValues.WILDCARD, user2_id],
|
||||
],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
# We expect at-least any state event with the `user2_id` as the `state_key`
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
state_map[("org.matrix.foo_state", user2_id)],
|
||||
},
|
||||
# Ideally, this would be exact but we're currently returning all state
|
||||
# events when the `event_type` is a wildcard.
|
||||
exact=False,
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_wildcard_state_key(self) -> None:
|
||||
"""
|
||||
Test `rooms.required_state` returns relevant state events when using wildcard in
|
||||
the state_key `["foobarbaz","*"]`.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request with wildcards for the `state_key`
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Member, StateValues.WILDCARD],
|
||||
],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_lazy_loading_room_members(self) -> None:
|
||||
"""
|
||||
Test `rooms.required_state` returns people relevant to the timeline when
|
||||
lazy-loading room members, `["m.room.member","$LAZY"]`.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id1, user3_id, tok=user3_tok)
|
||||
|
||||
self.helper.send(room_id1, "1", tok=user2_tok)
|
||||
self.helper.send(room_id1, "2", tok=user3_tok)
|
||||
self.helper.send(room_id1, "3", tok=user2_tok)
|
||||
|
||||
# Make the Sliding Sync request with lazy loading for the room members
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
[EventTypes.Member, StateValues.LAZY],
|
||||
],
|
||||
"timeline_limit": 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
# Only user2 and user3 sent events in the 3 events we see in the `timeline`
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Create, "")],
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
state_map[(EventTypes.Member, user3_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_me(self) -> None:
|
||||
"""
|
||||
Test `rooms.required_state` correctly handles $ME.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "1", tok=user2_tok)
|
||||
|
||||
# Also send normal state events with state keys of the users, first
|
||||
# change the power levels to allow this.
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type=EventTypes.PowerLevels,
|
||||
body={"users": {user1_id: 50, user2_id: 100}},
|
||||
tok=user2_tok,
|
||||
)
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo",
|
||||
state_key=user1_id,
|
||||
body={},
|
||||
tok=user1_tok,
|
||||
)
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo",
|
||||
state_key=user2_id,
|
||||
body={},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request with a request for '$ME'.
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
[EventTypes.Member, StateValues.ME],
|
||||
["org.matrix.foo", StateValues.ME],
|
||||
],
|
||||
"timeline_limit": 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
# Only user2 and user3 sent events in the 3 events we see in the `timeline`
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Create, "")],
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
state_map[("org.matrix.foo", user1_id)],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
@parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)])
|
||||
def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None:
|
||||
"""
|
||||
Test `rooms.required_state` should not return state past a leave/ban event.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass")
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
[EventTypes.Member, "*"],
|
||||
["org.matrix.foo_state", ""],
|
||||
],
|
||||
"timeline_limit": 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.join(room_id1, user3_id, tok=user3_tok)
|
||||
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo_state",
|
||||
state_key="",
|
||||
body={"foo": "bar"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
if stop_membership == Membership.LEAVE:
|
||||
# User 1 leaves
|
||||
self.helper.leave(room_id1, user1_id, tok=user1_tok)
|
||||
elif stop_membership == Membership.BAN:
|
||||
# User 1 is banned
|
||||
self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
# Change the state after user 1 leaves
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo_state",
|
||||
state_key="",
|
||||
body={"foo": "qux"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
self.helper.leave(room_id1, user3_id, tok=user3_tok)
|
||||
|
||||
# Make the Sliding Sync request with lazy loading for the room members
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# Only user2 and user3 sent events in the 3 events we see in the `timeline`
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Create, "")],
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
state_map[(EventTypes.Member, user3_id)],
|
||||
state_map[("org.matrix.foo_state", "")],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_combine_superset(self) -> None:
|
||||
"""
|
||||
Test `rooms.required_state` is combined across lists and room subscriptions.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.foo_state",
|
||||
state_key="",
|
||||
body={"foo": "bar"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="org.matrix.bar_state",
|
||||
state_key="",
|
||||
body={"bar": "qux"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request with wildcards for the `state_key`
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
[EventTypes.Member, user1_id],
|
||||
],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
"bar-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Member, StateValues.WILDCARD],
|
||||
["org.matrix.foo_state", ""],
|
||||
],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
},
|
||||
"room_subscriptions": {
|
||||
room_id1: {
|
||||
"required_state": [["org.matrix.bar_state", ""]],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
},
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
state_map = self.get_success(
|
||||
self.storage_controllers.state.get_current_state(room_id1)
|
||||
)
|
||||
|
||||
self._assertRequiredStateIncludes(
|
||||
response_body["rooms"][room_id1]["required_state"],
|
||||
{
|
||||
state_map[(EventTypes.Create, "")],
|
||||
state_map[(EventTypes.Member, user1_id)],
|
||||
state_map[(EventTypes.Member, user2_id)],
|
||||
state_map[("org.matrix.foo_state", "")],
|
||||
state_map[("org.matrix.bar_state", "")],
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_partial_state(self) -> None:
|
||||
"""
|
||||
Test partially-stated room are excluded unless `rooms.required_state` is
|
||||
lazy-loading room members.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
_join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
|
||||
|
||||
# Mark room2 as partial state
|
||||
self.get_success(
|
||||
mark_event_as_partial_state(self.hs, join_response2["event_id"], room_id2)
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request (NOT lazy-loading room members)
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make sure the list includes room1 but room2 is excluded because it's still
|
||||
# partially-stated
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["foo-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 1],
|
||||
"room_ids": [room_id1],
|
||||
}
|
||||
],
|
||||
response_body["lists"]["foo-list"],
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request (with lazy-loading room members)
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [
|
||||
[EventTypes.Create, ""],
|
||||
# Lazy-load room members
|
||||
[EventTypes.Member, StateValues.LAZY],
|
||||
],
|
||||
"timeline_limit": 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# The list should include both rooms now because we're lazy-loading room members
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["foo-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 1],
|
||||
"room_ids": [room_id2, room_id1],
|
||||
}
|
||||
],
|
||||
response_body["lists"]["foo-list"],
|
||||
)
|
||||
575
tests/rest/client/sliding_sync/test_rooms_timeline.py
Normal file
575
tests/rest/client/sliding_sync/test_rooms_timeline.py
Normal file
@@ -0,0 +1,575 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
from typing import List, Optional
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.rest.client import login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import StreamToken, StrSequence
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase):
|
||||
"""
|
||||
Test `rooms.timeline` in the Sliding Sync API.
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
def _assertListEqual(
|
||||
self,
|
||||
actual_items: StrSequence,
|
||||
expected_items: StrSequence,
|
||||
message: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Like `self.assertListEqual(...)` but with an actually understandable diff message.
|
||||
"""
|
||||
|
||||
if actual_items == expected_items:
|
||||
return
|
||||
|
||||
expected_lines: List[str] = []
|
||||
for expected_item in expected_items:
|
||||
is_expected_in_actual = expected_item in actual_items
|
||||
expected_lines.append(
|
||||
"{} {}".format(" " if is_expected_in_actual else "?", expected_item)
|
||||
)
|
||||
|
||||
actual_lines: List[str] = []
|
||||
for actual_item in actual_items:
|
||||
is_actual_in_expected = actual_item in expected_items
|
||||
actual_lines.append(
|
||||
"{} {}".format("+" if is_actual_in_expected else " ", actual_item)
|
||||
)
|
||||
|
||||
newline = "\n"
|
||||
expected_string = f"Expected items to be in actual ('?' = missing expected items):\n [\n{newline.join(expected_lines)}\n ]"
|
||||
actual_string = f"Actual ('+' = found expected items):\n [\n{newline.join(actual_lines)}\n ]"
|
||||
first_message = "Items must"
|
||||
diff_message = f"{first_message}\n{expected_string}\n{actual_string}"
|
||||
|
||||
self.fail(f"{diff_message}\n{message}")
|
||||
|
||||
def _assertTimelineEqual(
|
||||
self,
|
||||
*,
|
||||
room_id: str,
|
||||
actual_event_ids: List[str],
|
||||
expected_event_ids: List[str],
|
||||
message: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Like `self.assertListEqual(...)` for event IDs in a room but will give a nicer
|
||||
output with context for what each event_id is (type, stream_ordering, content,
|
||||
etc).
|
||||
"""
|
||||
if actual_event_ids == expected_event_ids:
|
||||
return
|
||||
|
||||
event_id_set = set(actual_event_ids + expected_event_ids)
|
||||
events = self.get_success(self.store.get_events(event_id_set))
|
||||
|
||||
def event_id_to_string(event_id: str) -> str:
|
||||
event = events.get(event_id)
|
||||
if event:
|
||||
state_key = event.get_state_key()
|
||||
state_key_piece = f", {state_key}" if state_key is not None else ""
|
||||
return (
|
||||
f"({event.internal_metadata.stream_ordering: >2}, {event.internal_metadata.instance_name}) "
|
||||
+ f"{event.event_id} ({event.type}{state_key_piece}) {event.content.get('membership', '')}{event.content.get('body', '')}"
|
||||
)
|
||||
|
||||
return f"{event_id} <event not found in room_id={room_id}>"
|
||||
|
||||
self._assertListEqual(
|
||||
actual_items=[
|
||||
event_id_to_string(event_id) for event_id in actual_event_ids
|
||||
],
|
||||
expected_items=[
|
||||
event_id_to_string(event_id) for event_id in expected_event_ids
|
||||
],
|
||||
message=message,
|
||||
)
|
||||
|
||||
def test_rooms_limited_initial_sync(self) -> None:
|
||||
"""
|
||||
Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit`
|
||||
on initial sync.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity1", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity2", tok=user2_tok)
|
||||
event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok)
|
||||
event_pos3 = self.get_success(
|
||||
self.store.get_position_for_event(event_response3["event_id"])
|
||||
)
|
||||
event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok)
|
||||
event_pos4 = self.get_success(
|
||||
self.store.get_position_for_event(event_response4["event_id"])
|
||||
)
|
||||
event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok)
|
||||
user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# We expect to saturate the `timeline_limit` (there are more than 3 messages in the room)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["limited"],
|
||||
True,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# Check to make sure the latest events are returned
|
||||
self._assertTimelineEqual(
|
||||
room_id=room_id1,
|
||||
actual_event_ids=[
|
||||
event["event_id"]
|
||||
for event in response_body["rooms"][room_id1]["timeline"]
|
||||
],
|
||||
expected_event_ids=[
|
||||
event_response4["event_id"],
|
||||
event_response5["event_id"],
|
||||
user1_join_response["event_id"],
|
||||
],
|
||||
message=str(response_body["rooms"][room_id1]["timeline"]),
|
||||
)
|
||||
|
||||
# Check to make sure the `prev_batch` points at the right place
|
||||
prev_batch_token = self.get_success(
|
||||
StreamToken.from_string(
|
||||
self.store, response_body["rooms"][room_id1]["prev_batch"]
|
||||
)
|
||||
)
|
||||
prev_batch_room_stream_token_serialized = self.get_success(
|
||||
prev_batch_token.room_key.to_string(self.store)
|
||||
)
|
||||
# If we use the `prev_batch` token to look backwards, we should see `event3`
|
||||
# next so make sure the token encompasses it
|
||||
self.assertEqual(
|
||||
event_pos3.persisted_after(prev_batch_token.room_key),
|
||||
False,
|
||||
f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be >= event_pos3={self.get_success(event_pos3.to_room_stream_token().to_string(self.store))}",
|
||||
)
|
||||
# If we use the `prev_batch` token to look backwards, we shouldn't see `event4`
|
||||
# anymore since it was just returned in this response.
|
||||
self.assertEqual(
|
||||
event_pos4.persisted_after(prev_batch_token.room_key),
|
||||
True,
|
||||
f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be < event_pos4={self.get_success(event_pos4.to_room_stream_token().to_string(self.store))}",
|
||||
)
|
||||
|
||||
# With no `from_token` (initial sync), it's all historical since there is no
|
||||
# "live" range
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["num_live"],
|
||||
0,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_rooms_not_limited_initial_sync(self) -> None:
|
||||
"""
|
||||
Test that we mark `rooms` as `limited=False` when there are no more events to
|
||||
paginate to.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity1", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity2", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity3", tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
timeline_limit = 100
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": timeline_limit,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# The timeline should be `limited=False` because we have all of the events (no
|
||||
# more to paginate to)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["limited"],
|
||||
False,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
expected_number_of_events = 9
|
||||
# We're just looking to make sure we got all of the events before hitting the `timeline_limit`
|
||||
self.assertEqual(
|
||||
len(response_body["rooms"][room_id1]["timeline"]),
|
||||
expected_number_of_events,
|
||||
response_body["rooms"][room_id1]["timeline"],
|
||||
)
|
||||
self.assertLessEqual(expected_number_of_events, timeline_limit)
|
||||
|
||||
# With no `from_token` (initial sync), it's all historical since there is no
|
||||
# "live" token range.
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["num_live"],
|
||||
0,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_rooms_incremental_sync(self) -> None:
|
||||
"""
|
||||
Test `rooms` data during an incremental sync after an initial sync.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.send(room_id1, "activity before initial sync1", tok=user2_tok)
|
||||
|
||||
# Make an initial Sliding Sync request to grab a token. This is also a sanity
|
||||
# check that we can go from initial to incremental sync.
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Send some events but don't send enough to saturate the `timeline_limit`.
|
||||
# We want to later test that we only get the new events since the `next_pos`
|
||||
event_response2 = self.helper.send(room_id1, "activity after2", tok=user2_tok)
|
||||
event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request (what we're trying to test)
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# We only expect to see the new events since the last sync which isn't enough to
|
||||
# fill up the `timeline_limit`.
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["limited"],
|
||||
False,
|
||||
f'Our `timeline_limit` was {sync_body["lists"]["foo-list"]["timeline_limit"]} '
|
||||
+ f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
|
||||
+ str(response_body["rooms"][room_id1]),
|
||||
)
|
||||
# Check to make sure the latest events are returned
|
||||
self._assertTimelineEqual(
|
||||
room_id=room_id1,
|
||||
actual_event_ids=[
|
||||
event["event_id"]
|
||||
for event in response_body["rooms"][room_id1]["timeline"]
|
||||
],
|
||||
expected_event_ids=[
|
||||
event_response2["event_id"],
|
||||
event_response3["event_id"],
|
||||
],
|
||||
message=str(response_body["rooms"][room_id1]["timeline"]),
|
||||
)
|
||||
|
||||
# All events are "live"
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["num_live"],
|
||||
2,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_rooms_newly_joined_incremental_sync(self) -> None:
|
||||
"""
|
||||
Test that when we make an incremental sync with a `newly_joined` `rooms`, we are
|
||||
able to see some historical events before the `from_token`.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity before token1", tok=user2_tok)
|
||||
event_response2 = self.helper.send(
|
||||
room_id1, "activity before token2", tok=user2_tok
|
||||
)
|
||||
|
||||
# The `timeline_limit` is set to 4 so we can at least see one historical event
|
||||
# before the `from_token`. We should see historical events because this is a
|
||||
# `newly_joined` room.
|
||||
timeline_limit = 4
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": timeline_limit,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Join the room after the `from_token` which will make us consider this room as
|
||||
# `newly_joined`.
|
||||
user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Send some events but don't send enough to saturate the `timeline_limit`.
|
||||
# We want to later test that we only get the new events since the `next_pos`
|
||||
event_response3 = self.helper.send(
|
||||
room_id1, "activity after token3", tok=user2_tok
|
||||
)
|
||||
event_response4 = self.helper.send(
|
||||
room_id1, "activity after token4", tok=user2_tok
|
||||
)
|
||||
|
||||
# Make an incremental Sliding Sync request (what we're trying to test)
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# We should see the new events and the rest should be filled with historical
|
||||
# events which will make us `limited=True` since there are more to paginate to.
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["limited"],
|
||||
True,
|
||||
f"Our `timeline_limit` was {timeline_limit} "
|
||||
+ f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
|
||||
+ str(response_body["rooms"][room_id1]),
|
||||
)
|
||||
# Check to make sure that the "live" and historical events are returned
|
||||
self._assertTimelineEqual(
|
||||
room_id=room_id1,
|
||||
actual_event_ids=[
|
||||
event["event_id"]
|
||||
for event in response_body["rooms"][room_id1]["timeline"]
|
||||
],
|
||||
expected_event_ids=[
|
||||
event_response2["event_id"],
|
||||
user1_join_response["event_id"],
|
||||
event_response3["event_id"],
|
||||
event_response4["event_id"],
|
||||
],
|
||||
message=str(response_body["rooms"][room_id1]["timeline"]),
|
||||
)
|
||||
|
||||
# Only events after the `from_token` are "live" (join, event3, event4)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["num_live"],
|
||||
3,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_rooms_ban_initial_sync(self) -> None:
|
||||
"""
|
||||
Test that `rooms` we are banned from in an intial sync only allows us to see
|
||||
timeline events up to the ban event.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity before1", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity before2", tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
|
||||
event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
|
||||
user1_ban_response = self.helper.ban(
|
||||
room_id1, src=user2_id, targ=user1_id, tok=user2_tok
|
||||
)
|
||||
|
||||
self.helper.send(room_id1, "activity after5", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after6", tok=user2_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# We should see events before the ban but not after
|
||||
self._assertTimelineEqual(
|
||||
room_id=room_id1,
|
||||
actual_event_ids=[
|
||||
event["event_id"]
|
||||
for event in response_body["rooms"][room_id1]["timeline"]
|
||||
],
|
||||
expected_event_ids=[
|
||||
event_response3["event_id"],
|
||||
event_response4["event_id"],
|
||||
user1_ban_response["event_id"],
|
||||
],
|
||||
message=str(response_body["rooms"][room_id1]["timeline"]),
|
||||
)
|
||||
# No "live" events in an initial sync (no `from_token` to define the "live"
|
||||
# range)
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["num_live"],
|
||||
0,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# There are more events to paginate to
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["limited"],
|
||||
True,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_rooms_ban_incremental_sync1(self) -> None:
|
||||
"""
|
||||
Test that `rooms` we are banned from during the next incremental sync only
|
||||
allows us to see timeline events up to the ban event.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity before1", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity before2", tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 4,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
|
||||
event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
|
||||
# The ban is within the token range (between the `from_token` and the sliding
|
||||
# sync request)
|
||||
user1_ban_response = self.helper.ban(
|
||||
room_id1, src=user2_id, targ=user1_id, tok=user2_tok
|
||||
)
|
||||
|
||||
self.helper.send(room_id1, "activity after5", tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity after6", tok=user2_tok)
|
||||
|
||||
# Make the incremental Sliding Sync request
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# We should see events before the ban but not after
|
||||
self._assertTimelineEqual(
|
||||
room_id=room_id1,
|
||||
actual_event_ids=[
|
||||
event["event_id"]
|
||||
for event in response_body["rooms"][room_id1]["timeline"]
|
||||
],
|
||||
expected_event_ids=[
|
||||
event_response3["event_id"],
|
||||
event_response4["event_id"],
|
||||
user1_ban_response["event_id"],
|
||||
],
|
||||
message=str(response_body["rooms"][room_id1]["timeline"]),
|
||||
)
|
||||
# All live events in the incremental sync
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["num_live"],
|
||||
3,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
# There aren't anymore events to paginate to in this range
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["limited"],
|
||||
False,
|
||||
response_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_rooms_ban_incremental_sync2(self) -> None:
|
||||
"""
|
||||
Test that `rooms` we are banned from before the incremental sync don't return
|
||||
any events in the timeline.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.send(room_id1, "activity before1", tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "activity after2", tok=user2_tok)
|
||||
# The ban is before we get our `from_token`
|
||||
self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
|
||||
self.helper.send(room_id1, "activity after3", tok=user2_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 4,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
self.helper.send(room_id1, "activity after4", tok=user2_tok)
|
||||
|
||||
# Make the incremental Sliding Sync request
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# Nothing to see for this banned user in the room in the token range
|
||||
self.assertIsNone(response_body["rooms"].get(room_id1))
|
||||
974
tests/rest/client/sliding_sync/test_sliding_sync.py
Normal file
974
tests/rest/client/sliding_sync/test_sliding_sync.py
Normal file
@@ -0,0 +1,974 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2024 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
import logging
|
||||
from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple
|
||||
|
||||
from typing_extensions import assert_never
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import (
|
||||
AccountDataTypes,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
RoomTypes,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.rest.client import devices, login, receipts, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
RoomStreamToken,
|
||||
SlidingSyncStreamToken,
|
||||
StreamKeyType,
|
||||
StreamToken,
|
||||
)
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
from tests import unittest
|
||||
from tests.server import TimedOutException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncBase(unittest.HomeserverTestCase):
|
||||
"""Base class for sliding sync test cases"""
|
||||
|
||||
sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync"
|
||||
|
||||
def default_config(self) -> JsonDict:
|
||||
config = super().default_config()
|
||||
# Enable sliding sync
|
||||
config["experimental_features"] = {"msc3575_enabled": True}
|
||||
return config
|
||||
|
||||
def do_sync(
|
||||
self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str
|
||||
) -> Tuple[JsonDict, str]:
|
||||
"""Do a sliding sync request with given body.
|
||||
|
||||
Asserts the request was successful.
|
||||
|
||||
Attributes:
|
||||
sync_body: The full request body to use
|
||||
since: Optional since token
|
||||
tok: Access token to use
|
||||
|
||||
Returns:
|
||||
A tuple of the response body and the `pos` field.
|
||||
"""
|
||||
|
||||
sync_path = self.sync_endpoint
|
||||
if since:
|
||||
sync_path += f"?pos={since}"
|
||||
|
||||
channel = self.make_request(
|
||||
method="POST",
|
||||
path=sync_path,
|
||||
content=sync_body,
|
||||
access_token=tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
return channel.json_body, channel.json_body["pos"]
|
||||
|
||||
def _assertRequiredStateIncludes(
|
||||
self,
|
||||
actual_required_state: Any,
|
||||
expected_state_events: Iterable[EventBase],
|
||||
exact: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Wrapper around `assertIncludes` to give slightly better looking diff error
|
||||
messages that include some context "$event_id (type, state_key)".
|
||||
|
||||
Args:
|
||||
actual_required_state: The "required_state" of a room from a Sliding Sync
|
||||
request response.
|
||||
expected_state_events: The expected state events to be included in the
|
||||
`actual_required_state`.
|
||||
exact: Whether the actual state should be exactly equal to the expected
|
||||
state (no extras).
|
||||
"""
|
||||
|
||||
assert isinstance(actual_required_state, list)
|
||||
for event in actual_required_state:
|
||||
assert isinstance(event, dict)
|
||||
|
||||
self.assertIncludes(
|
||||
{
|
||||
f'{event["event_id"]} ("{event["type"]}", "{event["state_key"]}")'
|
||||
for event in actual_required_state
|
||||
},
|
||||
{
|
||||
f'{event.event_id} ("{event.type}", "{event.state_key}")'
|
||||
for event in expected_state_events
|
||||
},
|
||||
exact=exact,
|
||||
# Message to help understand the diff in context
|
||||
message=str(actual_required_state),
|
||||
)
|
||||
|
||||
def _bump_notifier_wait_for_events(
|
||||
self,
|
||||
user_id: str,
|
||||
wake_stream_key: Literal[
|
||||
StreamKeyType.ACCOUNT_DATA,
|
||||
StreamKeyType.PRESENCE,
|
||||
],
|
||||
) -> None:
|
||||
"""
|
||||
Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding
|
||||
Sync results.
|
||||
|
||||
Args:
|
||||
user_id: The user ID to wake up the notifier for
|
||||
wake_stream_key: The stream key to wake up. This will create an actual new
|
||||
entity in that stream so it's best to choose one that won't affect the
|
||||
Sliding Sync results you're testing for. In other words, if your testing
|
||||
account data, choose `StreamKeyType.PRESENCE` instead. We support two
|
||||
possible stream keys because you're probably testing one or the other so
|
||||
one is always a "safe" option.
|
||||
"""
|
||||
# We're expecting some new activity from this point onwards
|
||||
from_token = self.hs.get_event_sources().get_current_token()
|
||||
|
||||
triggered_notifier_wait_for_events = False
|
||||
|
||||
async def _on_new_acivity(
|
||||
before_token: StreamToken, after_token: StreamToken
|
||||
) -> bool:
|
||||
nonlocal triggered_notifier_wait_for_events
|
||||
triggered_notifier_wait_for_events = True
|
||||
return True
|
||||
|
||||
notifier = self.hs.get_notifier()
|
||||
|
||||
# Listen for some new activity for the user. We're just trying to confirm that
|
||||
# our bump below actually does what we think it does (triggers new activity for
|
||||
# the user).
|
||||
result_awaitable = notifier.wait_for_events(
|
||||
user_id,
|
||||
1000,
|
||||
_on_new_acivity,
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
# Update the account data or presence so that `notifier.wait_for_events(...)`
|
||||
# wakes up. We chose these two options because they're least likely to show up
|
||||
# in the Sliding Sync response so it won't affect whether we have results.
|
||||
if wake_stream_key == StreamKeyType.ACCOUNT_DATA:
|
||||
self.get_success(
|
||||
self.hs.get_account_data_handler().add_account_data_for_user(
|
||||
user_id,
|
||||
"org.matrix.foobarbaz",
|
||||
{"foo": "bar"},
|
||||
)
|
||||
)
|
||||
elif wake_stream_key == StreamKeyType.PRESENCE:
|
||||
sending_user_id = self.register_user(
|
||||
"user_bump_notifier_wait_for_events_" + random_string(10), "pass"
|
||||
)
|
||||
sending_user_tok = self.login(sending_user_id, "pass")
|
||||
test_msg = {"foo": "bar"}
|
||||
chan = self.make_request(
|
||||
"PUT",
|
||||
"/_matrix/client/r0/sendToDevice/m.test/1234",
|
||||
content={"messages": {user_id: {"d1": test_msg}}},
|
||||
access_token=sending_user_tok,
|
||||
)
|
||||
self.assertEqual(chan.code, 200, chan.result)
|
||||
else:
|
||||
assert_never(wake_stream_key)
|
||||
|
||||
# Wait for our notifier result
|
||||
self.get_success(result_awaitable)
|
||||
|
||||
if not triggered_notifier_wait_for_events:
|
||||
raise AssertionError(
|
||||
"Expected `notifier.wait_for_events(...)` to be triggered"
|
||||
)
|
||||
|
||||
|
||||
class SlidingSyncTestCase(SlidingSyncBase):
|
||||
"""
|
||||
Tests regarding MSC3575 Sliding Sync `/sync` endpoint.
|
||||
|
||||
Please put tests in more specific test files if applicable. This test class is meant
|
||||
for generic behavior of the endpoint.
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
sync.register_servlets,
|
||||
devices.register_servlets,
|
||||
receipts.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
self.account_data_handler = hs.get_account_data_handler()
|
||||
|
||||
def _add_new_dm_to_global_account_data(
|
||||
self, source_user_id: str, target_user_id: str, target_room_id: str
|
||||
) -> None:
|
||||
"""
|
||||
Helper to handle inserting a new DM for the source user into global account data
|
||||
(handles all of the list merging).
|
||||
|
||||
Args:
|
||||
source_user_id: The user ID of the DM mapping we're going to update
|
||||
target_user_id: User ID of the person the DM is with
|
||||
target_room_id: Room ID of the DM
|
||||
"""
|
||||
|
||||
# Get the current DM map
|
||||
existing_dm_map = self.get_success(
|
||||
self.store.get_global_account_data_by_type_for_user(
|
||||
source_user_id, AccountDataTypes.DIRECT
|
||||
)
|
||||
)
|
||||
# Scrutinize the account data since it has no concrete type. We're just copying
|
||||
# everything into a known type. It should be a mapping from user ID to a list of
|
||||
# room IDs. Ignore anything else.
|
||||
new_dm_map: Dict[str, List[str]] = {}
|
||||
if isinstance(existing_dm_map, dict):
|
||||
for user_id, room_ids in existing_dm_map.items():
|
||||
if isinstance(user_id, str) and isinstance(room_ids, list):
|
||||
for room_id in room_ids:
|
||||
if isinstance(room_id, str):
|
||||
new_dm_map[user_id] = new_dm_map.get(user_id, []) + [
|
||||
room_id
|
||||
]
|
||||
|
||||
# Add the new DM to the map
|
||||
new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [
|
||||
target_room_id
|
||||
]
|
||||
# Save the DM map to global account data
|
||||
self.get_success(
|
||||
self.store.add_account_data_for_user(
|
||||
source_user_id,
|
||||
AccountDataTypes.DIRECT,
|
||||
new_dm_map,
|
||||
)
|
||||
)
|
||||
|
||||
def _create_dm_room(
|
||||
self,
|
||||
inviter_user_id: str,
|
||||
inviter_tok: str,
|
||||
invitee_user_id: str,
|
||||
invitee_tok: str,
|
||||
should_join_room: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
Helper to create a DM room as the "inviter" and invite the "invitee" user to the
|
||||
room. The "invitee" user also will join the room. The `m.direct` account data
|
||||
will be set for both users.
|
||||
"""
|
||||
|
||||
# Create a room and send an invite the other user
|
||||
room_id = self.helper.create_room_as(
|
||||
inviter_user_id,
|
||||
is_public=False,
|
||||
tok=inviter_tok,
|
||||
)
|
||||
self.helper.invite(
|
||||
room_id,
|
||||
src=inviter_user_id,
|
||||
targ=invitee_user_id,
|
||||
tok=inviter_tok,
|
||||
extra_data={"is_direct": True},
|
||||
)
|
||||
if should_join_room:
|
||||
# Person that was invited joins the room
|
||||
self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
|
||||
|
||||
# Mimic the client setting the room as a direct message in the global account
|
||||
# data for both users.
|
||||
self._add_new_dm_to_global_account_data(
|
||||
invitee_user_id, inviter_user_id, room_id
|
||||
)
|
||||
self._add_new_dm_to_global_account_data(
|
||||
inviter_user_id, invitee_user_id, room_id
|
||||
)
|
||||
|
||||
return room_id
|
||||
|
||||
def test_sync_list(self) -> None:
|
||||
"""
|
||||
Test that room IDs show up in the Sliding Sync `lists`
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make sure it has the foo-list we requested
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"].keys()),
|
||||
["foo-list"],
|
||||
response_body["lists"].keys(),
|
||||
)
|
||||
|
||||
# Make sure the list includes the room we are joined to
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["foo-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 99],
|
||||
"room_ids": [room_id],
|
||||
}
|
||||
],
|
||||
response_body["lists"]["foo-list"],
|
||||
)
|
||||
|
||||
def test_wait_for_sync_token(self) -> None:
|
||||
"""
|
||||
Test that worker will wait until it catches up to the given token
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Create a future token that will cause us to wait. Since we never send a new
|
||||
# event to reach that future stream_ordering, the worker will wait until the
|
||||
# full timeout.
|
||||
stream_id_gen = self.store.get_events_stream_id_generator()
|
||||
stream_id = self.get_success(stream_id_gen.get_next().__aenter__())
|
||||
current_token = self.event_sources.get_current_token()
|
||||
future_position_token = current_token.copy_and_replace(
|
||||
StreamKeyType.ROOM,
|
||||
RoomStreamToken(stream=stream_id),
|
||||
)
|
||||
|
||||
future_position_token_serialized = self.get_success(
|
||||
SlidingSyncStreamToken(future_position_token, 0).to_string(self.store)
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?pos={future_position_token_serialized}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 10 seconds to make `notifier.wait_for_stream_token(from_token)`
|
||||
# timeout
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=9900)
|
||||
channel.await_result(timeout_ms=200)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# We expect the next `pos` in the result to be the same as what we requested
|
||||
# with because we weren't able to find anything new yet.
|
||||
self.assertEqual(channel.json_body["pos"], future_position_token_serialized)
|
||||
|
||||
def test_wait_for_new_data(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive.
|
||||
|
||||
(Only applies to incremental syncs with a `timeout` specified)
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 0]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Bump the room with new events to trigger new results
|
||||
event_response1 = self.helper.send(
|
||||
room_id, "new activity in room", tok=user1_tok
|
||||
)
|
||||
# Should respond before the 10 second timeout
|
||||
channel.await_result(timeout_ms=3000)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Check to make sure the new event is returned
|
||||
self.assertEqual(
|
||||
[
|
||||
event["event_id"]
|
||||
for event in channel.json_body["rooms"][room_id]["timeline"]
|
||||
],
|
||||
[
|
||||
event_response1["event_id"],
|
||||
],
|
||||
channel.json_body["rooms"][room_id]["timeline"],
|
||||
)
|
||||
|
||||
def test_wait_for_new_data_timeout(self) -> None:
|
||||
"""
|
||||
Test to make sure that the Sliding Sync request waits for new data to arrive but
|
||||
no data ever arrives so we timeout. We're also making sure that the default data
|
||||
doesn't trigger a false-positive for new data.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 0]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
|
||||
content=sync_body,
|
||||
access_token=user1_tok,
|
||||
await_result=False,
|
||||
)
|
||||
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=5000)
|
||||
# Wake-up `notifier.wait_for_events(...)` that will cause us test
|
||||
# `SlidingSyncResult.__bool__` for new results.
|
||||
self._bump_notifier_wait_for_events(
|
||||
user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
|
||||
)
|
||||
# Block for a little bit more to ensure we don't see any new results.
|
||||
with self.assertRaises(TimedOutException):
|
||||
channel.await_result(timeout_ms=4000)
|
||||
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
|
||||
# 5000 + 4000 + 1200 > 10000)
|
||||
channel.await_result(timeout_ms=1200)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# There should be no room sent down.
|
||||
self.assertFalse(channel.json_body["rooms"])
|
||||
|
||||
def test_filter_list(self) -> None:
|
||||
"""
|
||||
Test that filters apply to `lists`
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
# Create a DM room
|
||||
joined_dm_room_id = self._create_dm_room(
|
||||
inviter_user_id=user1_id,
|
||||
inviter_tok=user1_tok,
|
||||
invitee_user_id=user2_id,
|
||||
invitee_tok=user2_tok,
|
||||
should_join_room=True,
|
||||
)
|
||||
invited_dm_room_id = self._create_dm_room(
|
||||
inviter_user_id=user1_id,
|
||||
inviter_tok=user1_tok,
|
||||
invitee_user_id=user2_id,
|
||||
invitee_tok=user2_tok,
|
||||
should_join_room=False,
|
||||
)
|
||||
|
||||
# Create a normal room
|
||||
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
|
||||
# Create a room that user1 is invited to
|
||||
invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
# Absense of filters does not imply "False" values
|
||||
"all": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
"filters": {},
|
||||
},
|
||||
# Test single truthy filter
|
||||
"dms": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
"filters": {"is_dm": True},
|
||||
},
|
||||
# Test single falsy filter
|
||||
"non-dms": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
"filters": {"is_dm": False},
|
||||
},
|
||||
# Test how multiple filters should stack (AND'd together)
|
||||
"room-invites": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
"filters": {"is_dm": False, "is_invite": True},
|
||||
},
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make sure it has the foo-list we requested
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"].keys()),
|
||||
["all", "dms", "non-dms", "room-invites"],
|
||||
response_body["lists"].keys(),
|
||||
)
|
||||
|
||||
# Make sure the lists have the correct rooms
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["all"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 99],
|
||||
"room_ids": [
|
||||
invite_room_id,
|
||||
room_id,
|
||||
invited_dm_room_id,
|
||||
joined_dm_room_id,
|
||||
],
|
||||
}
|
||||
],
|
||||
list(response_body["lists"]["all"]),
|
||||
)
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["dms"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 99],
|
||||
"room_ids": [invited_dm_room_id, joined_dm_room_id],
|
||||
}
|
||||
],
|
||||
list(response_body["lists"]["dms"]),
|
||||
)
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["non-dms"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 99],
|
||||
"room_ids": [invite_room_id, room_id],
|
||||
}
|
||||
],
|
||||
list(response_body["lists"]["non-dms"]),
|
||||
)
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["room-invites"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 99],
|
||||
"room_ids": [invite_room_id],
|
||||
}
|
||||
],
|
||||
list(response_body["lists"]["room-invites"]),
|
||||
)
|
||||
|
||||
# Ensure DM's are correctly marked
|
||||
self.assertDictEqual(
|
||||
{
|
||||
room_id: room.get("is_dm")
|
||||
for room_id, room in response_body["rooms"].items()
|
||||
},
|
||||
{
|
||||
invite_room_id: None,
|
||||
room_id: None,
|
||||
invited_dm_room_id: True,
|
||||
joined_dm_room_id: True,
|
||||
},
|
||||
)
|
||||
|
||||
def test_filter_regardless_of_membership_server_left_room(self) -> None:
|
||||
"""
|
||||
Test that filters apply to rooms regardless of membership. We're also
|
||||
compounding the problem by having all of the local users leave the room causing
|
||||
our server to leave the room.
|
||||
|
||||
We want to make sure that if someone is filtering rooms, and leaves, you still
|
||||
get that final update down sync that you left.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
# Create a normal room
|
||||
room_id = self.helper.create_room_as(user1_id, tok=user2_tok)
|
||||
self.helper.join(room_id, user1_id, tok=user1_tok)
|
||||
|
||||
# Create an encrypted space room
|
||||
space_room_id = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
|
||||
},
|
||||
)
|
||||
self.helper.send_state(
|
||||
space_room_id,
|
||||
EventTypes.RoomEncryption,
|
||||
{EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
self.helper.join(space_room_id, user1_id, tok=user1_tok)
|
||||
|
||||
# Make an initial Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint,
|
||||
{
|
||||
"lists": {
|
||||
"all-list": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
"filters": {},
|
||||
},
|
||||
"foo-list": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
"filters": {
|
||||
"is_encrypted": True,
|
||||
"room_types": [RoomTypes.SPACE],
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
from_token = channel.json_body["pos"]
|
||||
|
||||
# Make sure the response has the lists we requested
|
||||
self.assertListEqual(
|
||||
list(channel.json_body["lists"].keys()),
|
||||
["all-list", "foo-list"],
|
||||
channel.json_body["lists"].keys(),
|
||||
)
|
||||
|
||||
# Make sure the lists have the correct rooms
|
||||
self.assertListEqual(
|
||||
list(channel.json_body["lists"]["all-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 99],
|
||||
"room_ids": [space_room_id, room_id],
|
||||
}
|
||||
],
|
||||
)
|
||||
self.assertListEqual(
|
||||
list(channel.json_body["lists"]["foo-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 99],
|
||||
"room_ids": [space_room_id],
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Everyone leaves the encrypted space room
|
||||
self.helper.leave(space_room_id, user1_id, tok=user1_tok)
|
||||
self.helper.leave(space_room_id, user2_id, tok=user2_tok)
|
||||
|
||||
# Make an incremental Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint + f"?pos={from_token}",
|
||||
{
|
||||
"lists": {
|
||||
"all-list": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
"filters": {},
|
||||
},
|
||||
"foo-list": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
"filters": {
|
||||
"is_encrypted": True,
|
||||
"room_types": [RoomTypes.SPACE],
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Make sure the lists have the correct rooms even though we `newly_left`
|
||||
self.assertListEqual(
|
||||
list(channel.json_body["lists"]["all-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 99],
|
||||
"room_ids": [space_room_id, room_id],
|
||||
}
|
||||
],
|
||||
)
|
||||
self.assertListEqual(
|
||||
list(channel.json_body["lists"]["foo-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 99],
|
||||
"room_ids": [space_room_id],
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
def test_sort_list(self) -> None:
|
||||
"""
|
||||
Test that the `lists` are sorted by `stream_ordering`
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
|
||||
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
|
||||
room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
|
||||
|
||||
# Activity that will order the rooms
|
||||
self.helper.send(room_id3, "activity in room3", tok=user1_tok)
|
||||
self.helper.send(room_id1, "activity in room1", tok=user1_tok)
|
||||
self.helper.send(room_id2, "activity in room2", tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 99]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make sure it has the foo-list we requested
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"].keys()),
|
||||
["foo-list"],
|
||||
response_body["lists"].keys(),
|
||||
)
|
||||
|
||||
# Make sure the list is sorted in the way we expect
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["foo-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 99],
|
||||
"room_ids": [room_id2, room_id1, room_id3],
|
||||
}
|
||||
],
|
||||
response_body["lists"]["foo-list"],
|
||||
)
|
||||
|
||||
def test_sliced_windows(self) -> None:
|
||||
"""
|
||||
Test that the `lists` `ranges` are sliced correctly. Both sides of each range
|
||||
are inclusive.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
_room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
|
||||
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
|
||||
room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
|
||||
|
||||
# Make the Sliding Sync request for a single room
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 0]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make sure it has the foo-list we requested
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"].keys()),
|
||||
["foo-list"],
|
||||
response_body["lists"].keys(),
|
||||
)
|
||||
# Make sure the list is sorted in the way we expect
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["foo-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 0],
|
||||
"room_ids": [room_id3],
|
||||
}
|
||||
],
|
||||
response_body["lists"]["foo-list"],
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request for the first two rooms
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make sure it has the foo-list we requested
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"].keys()),
|
||||
["foo-list"],
|
||||
response_body["lists"].keys(),
|
||||
)
|
||||
# Make sure the list is sorted in the way we expect
|
||||
self.assertListEqual(
|
||||
list(response_body["lists"]["foo-list"]["ops"]),
|
||||
[
|
||||
{
|
||||
"op": "SYNC",
|
||||
"range": [0, 1],
|
||||
"room_ids": [room_id3, room_id2],
|
||||
}
|
||||
],
|
||||
response_body["lists"]["foo-list"],
|
||||
)
|
||||
|
||||
def test_rooms_with_no_updates_do_not_come_down_incremental_sync(self) -> None:
|
||||
"""
|
||||
Test that rooms with no updates are returned in subsequent incremental
|
||||
syncs.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Make the incremental Sliding Sync request
|
||||
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
# Nothing has happened in the room, so the room should not come down
|
||||
# /sync.
|
||||
self.assertIsNone(response_body["rooms"].get(room_id1))
|
||||
|
||||
def test_empty_initial_room_comes_down_sync(self) -> None:
|
||||
"""
|
||||
Test that rooms come down /sync even with empty required state and
|
||||
timeline limit in initial sync.
|
||||
"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Make the Sliding Sync request
|
||||
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
|
||||
self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
|
||||
@@ -43,6 +43,7 @@ from twisted.python.failure import Failure
|
||||
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import UNKNOWN_LENGTH, IResponse
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
@@ -1809,13 +1810,19 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
assert channel.code == 200
|
||||
|
||||
@override_config(
|
||||
{
|
||||
"remote_media_download_burst_count": "87M",
|
||||
}
|
||||
)
|
||||
@patch(
|
||||
"synapse.http.matrixfederationclient.read_multipart_response",
|
||||
read_multipart_response_30MiB,
|
||||
)
|
||||
def test_download_ratelimit_max_size_sub(self) -> None:
|
||||
def test_download_ratelimit_unknown_length(self) -> None:
|
||||
"""
|
||||
Test that if no content-length is provided, the default max size is applied instead
|
||||
Test that if no content-length is provided, ratelimiting is still applied after
|
||||
media is downloaded and length is known
|
||||
"""
|
||||
|
||||
# mock out actually sending the request
|
||||
@@ -1831,8 +1838,9 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
self.client._send_request = _send_request # type: ignore
|
||||
|
||||
# ten requests should go through using the max size (500MB/50MB)
|
||||
for i in range(10):
|
||||
# first 3 will go through (note that 3rd request technically violates rate limit but
|
||||
# that since the ratelimiting is applied *after* download it goes through, but next one fails)
|
||||
for i in range(3):
|
||||
channel2 = self.make_request(
|
||||
"GET",
|
||||
f"/_matrix/client/v1/media/download/remote.org/abc{i}",
|
||||
@@ -1841,7 +1849,7 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
assert channel2.code == 200
|
||||
|
||||
# eleventh will hit ratelimit
|
||||
# 4th will hit ratelimit
|
||||
channel3 = self.make_request(
|
||||
"GET",
|
||||
"/_matrix/client/v1/media/download/remote.org/abcd",
|
||||
@@ -1850,6 +1858,39 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
assert channel3.code == 429
|
||||
|
||||
@override_config({"max_upload_size": "29M"})
|
||||
@patch(
|
||||
"synapse.http.matrixfederationclient.read_multipart_response",
|
||||
read_multipart_response_30MiB,
|
||||
)
|
||||
def test_max_download_respected(self) -> None:
|
||||
"""
|
||||
Test that the max download size is enforced - note that max download size is determined
|
||||
by the max_upload_size
|
||||
"""
|
||||
|
||||
# mock out actually sending the request, returns a 30MiB response
|
||||
async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
|
||||
resp = MagicMock(spec=IResponse)
|
||||
resp.code = 200
|
||||
resp.length = 31457280
|
||||
resp.headers = Headers(
|
||||
{"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]}
|
||||
)
|
||||
resp.phrase = b"OK"
|
||||
return resp
|
||||
|
||||
self.client._send_request = _send_request # type: ignore
|
||||
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
"/_matrix/client/v1/media/download/remote.org/abcd",
|
||||
shorthand=False,
|
||||
access_token=self.tok,
|
||||
)
|
||||
assert channel.code == 502
|
||||
assert channel.json_body["errcode"] == "M_TOO_LARGE"
|
||||
|
||||
def test_file_download(self) -> None:
|
||||
content = io.BytesIO(b"file_to_stream")
|
||||
content_uri = self.get_success(
|
||||
@@ -2426,3 +2467,211 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase):
|
||||
server_name=None,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
configs = [
|
||||
{"extra_config": {"dynamic_thumbnails": True}},
|
||||
{"extra_config": {"dynamic_thumbnails": False}},
|
||||
]
|
||||
|
||||
|
||||
@parameterized_class(configs)
|
||||
class AuthenticatedMediaTestCase(unittest.HomeserverTestCase):
|
||||
extra_config: Dict[str, Any]
|
||||
servlets = [
|
||||
media.register_servlets,
|
||||
login.register_servlets,
|
||||
admin.register_servlets,
|
||||
]
|
||||
|
||||
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
||||
config = self.default_config()
|
||||
|
||||
self.clock = clock
|
||||
self.storage_path = self.mktemp()
|
||||
self.media_store_path = self.mktemp()
|
||||
os.mkdir(self.storage_path)
|
||||
os.mkdir(self.media_store_path)
|
||||
config["media_store_path"] = self.media_store_path
|
||||
config["enable_authenticated_media"] = True
|
||||
|
||||
provider_config = {
|
||||
"module": "synapse.media.storage_provider.FileStorageProviderBackend",
|
||||
"store_local": True,
|
||||
"store_synchronous": False,
|
||||
"store_remote": True,
|
||||
"config": {"directory": self.storage_path},
|
||||
}
|
||||
|
||||
config["media_storage_providers"] = [provider_config]
|
||||
config.update(self.extra_config)
|
||||
|
||||
return self.setup_test_homeserver(config=config)
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.repo = hs.get_media_repository()
|
||||
self.client = hs.get_federation_http_client()
|
||||
self.store = hs.get_datastores().main
|
||||
self.user = self.register_user("user", "pass")
|
||||
self.tok = self.login("user", "pass")
|
||||
|
||||
def create_resource_dict(self) -> Dict[str, Resource]:
|
||||
resources = super().create_resource_dict()
|
||||
resources["/_matrix/media"] = self.hs.get_media_repository_resource()
|
||||
return resources
|
||||
|
||||
def test_authenticated_media(self) -> None:
|
||||
# upload some local media with authentication on
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"_matrix/media/v3/upload?filename=test_png_upload",
|
||||
SMALL_PNG,
|
||||
self.tok,
|
||||
shorthand=False,
|
||||
content_type=b"image/png",
|
||||
custom_headers=[("Content-Length", str(67))],
|
||||
)
|
||||
self.assertEqual(channel.code, 200)
|
||||
res = channel.json_body.get("content_uri")
|
||||
assert res is not None
|
||||
uri = res.split("mxc://")[1]
|
||||
|
||||
# request media over authenticated endpoint, should be found
|
||||
channel2 = self.make_request(
|
||||
"GET",
|
||||
f"_matrix/client/v1/media/download/{uri}",
|
||||
access_token=self.tok,
|
||||
shorthand=False,
|
||||
)
|
||||
self.assertEqual(channel2.code, 200)
|
||||
|
||||
# request same media over unauthenticated media, should raise 404 not found
|
||||
channel3 = self.make_request(
|
||||
"GET", f"_matrix/media/v3/download/{uri}", shorthand=False
|
||||
)
|
||||
self.assertEqual(channel3.code, 404)
|
||||
|
||||
# check thumbnails as well
|
||||
params = "?width=32&height=32&method=crop"
|
||||
channel4 = self.make_request(
|
||||
"GET",
|
||||
f"/_matrix/client/v1/media/thumbnail/{uri}{params}",
|
||||
shorthand=False,
|
||||
access_token=self.tok,
|
||||
)
|
||||
self.assertEqual(channel4.code, 200)
|
||||
|
||||
params = "?width=32&height=32&method=crop"
|
||||
channel5 = self.make_request(
|
||||
"GET",
|
||||
f"/_matrix/media/r0/thumbnail/{uri}{params}",
|
||||
shorthand=False,
|
||||
access_token=self.tok,
|
||||
)
|
||||
self.assertEqual(channel5.code, 404)
|
||||
|
||||
# Inject a piece of remote media.
|
||||
file_id = "abcdefg12345"
|
||||
file_info = FileInfo(server_name="lonelyIsland", file_id=file_id)
|
||||
|
||||
media_storage = self.hs.get_media_repository().media_storage
|
||||
|
||||
ctx = media_storage.store_into_file(file_info)
|
||||
(f, fname) = self.get_success(ctx.__aenter__())
|
||||
f.write(SMALL_PNG)
|
||||
self.get_success(ctx.__aexit__(None, None, None))
|
||||
|
||||
# we write the authenticated status when storing media, so this should pick up
|
||||
# config and authenticate the media
|
||||
self.get_success(
|
||||
self.store.store_cached_remote_media(
|
||||
origin="lonelyIsland",
|
||||
media_id="52",
|
||||
media_type="image/png",
|
||||
media_length=1,
|
||||
time_now_ms=self.clock.time_msec(),
|
||||
upload_name="remote_test.png",
|
||||
filesystem_id=file_id,
|
||||
)
|
||||
)
|
||||
|
||||
# ensure we have thumbnails for the non-dynamic code path
|
||||
if self.extra_config == {"dynamic_thumbnails": False}:
|
||||
self.get_success(
|
||||
self.repo._generate_thumbnails(
|
||||
"lonelyIsland", "52", file_id, "image/png"
|
||||
)
|
||||
)
|
||||
|
||||
channel6 = self.make_request(
|
||||
"GET",
|
||||
"_matrix/client/v1/media/download/lonelyIsland/52",
|
||||
access_token=self.tok,
|
||||
shorthand=False,
|
||||
)
|
||||
self.assertEqual(channel6.code, 200)
|
||||
|
||||
channel7 = self.make_request(
|
||||
"GET", f"_matrix/media/v3/download/{uri}", shorthand=False
|
||||
)
|
||||
self.assertEqual(channel7.code, 404)
|
||||
|
||||
params = "?width=32&height=32&method=crop"
|
||||
channel8 = self.make_request(
|
||||
"GET",
|
||||
f"/_matrix/client/v1/media/thumbnail/lonelyIsland/52{params}",
|
||||
shorthand=False,
|
||||
access_token=self.tok,
|
||||
)
|
||||
self.assertEqual(channel8.code, 200)
|
||||
|
||||
channel9 = self.make_request(
|
||||
"GET",
|
||||
f"/_matrix/media/r0/thumbnail/lonelyIsland/52{params}",
|
||||
shorthand=False,
|
||||
access_token=self.tok,
|
||||
)
|
||||
self.assertEqual(channel9.code, 404)
|
||||
|
||||
# Inject a piece of local media that isn't authenticated
|
||||
file_id = "abcdefg123456"
|
||||
file_info = FileInfo(None, file_id=file_id)
|
||||
|
||||
ctx = media_storage.store_into_file(file_info)
|
||||
(f, fname) = self.get_success(ctx.__aenter__())
|
||||
f.write(SMALL_PNG)
|
||||
self.get_success(ctx.__aexit__(None, None, None))
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
"local_media_repository",
|
||||
{
|
||||
"media_id": "abcdefg123456",
|
||||
"media_type": "image/png",
|
||||
"created_ts": self.clock.time_msec(),
|
||||
"upload_name": "test_local",
|
||||
"media_length": 1,
|
||||
"user_id": "someone",
|
||||
"url_cache": None,
|
||||
"authenticated": False,
|
||||
},
|
||||
desc="store_local_media",
|
||||
)
|
||||
)
|
||||
|
||||
# check that unauthenticated media is still available over both endpoints
|
||||
channel9 = self.make_request(
|
||||
"GET",
|
||||
"/_matrix/client/v1/media/download/test/abcdefg123456",
|
||||
shorthand=False,
|
||||
access_token=self.tok,
|
||||
)
|
||||
self.assertEqual(channel9.code, 200)
|
||||
|
||||
channel10 = self.make_request(
|
||||
"GET",
|
||||
"/_matrix/media/r0/download/test/abcdefg123456",
|
||||
shorthand=False,
|
||||
access_token=self.tok,
|
||||
)
|
||||
self.assertEqual(channel10.code, 200)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -307,10 +307,6 @@ class FakeChannel:
|
||||
self._reactor.run()
|
||||
|
||||
while not self.is_finished():
|
||||
# If there's a producer, tell it to resume producing so we get content
|
||||
if self._producer:
|
||||
self._producer.resumeProducing()
|
||||
|
||||
if self._reactor.seconds() > end_time:
|
||||
raise TimedOutException("Timed out waiting for request to finish.")
|
||||
|
||||
|
||||
@@ -19,20 +19,28 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import List, Optional, Tuple, cast
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.rest import admin
|
||||
from synapse.rest.admin import register_servlets_for_client_rest_resource
|
||||
from synapse.rest.client import login, room
|
||||
from synapse.rest.client import knock, login, room
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
|
||||
from synapse.storage.roommember import MemberSummary
|
||||
from synapse.types import UserID, create_requester
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests import unittest
|
||||
from tests.server import TestHomeServer
|
||||
from tests.test_utils import event_injection
|
||||
from tests.unittest import skip_unless
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
|
||||
@@ -240,6 +248,397 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
|
||||
|
||||
class RoomSummaryTestCase(unittest.HomeserverTestCase):
|
||||
"""
|
||||
Test `/sync` room summary related logic like `get_room_summary(...)` and
|
||||
`extract_heroes_from_room_summary(...)`
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
admin.register_servlets,
|
||||
knock.register_servlets,
|
||||
login.register_servlets,
|
||||
room.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
|
||||
self.store = self.hs.get_datastores().main
|
||||
|
||||
def _assert_member_summary(
|
||||
self,
|
||||
actual_member_summary: MemberSummary,
|
||||
expected_member_list: List[str],
|
||||
*,
|
||||
expected_member_count: Optional[int] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Assert that the `MemberSummary` object has the expected members.
|
||||
"""
|
||||
self.assertListEqual(
|
||||
[
|
||||
user_id
|
||||
for user_id, _membership_event_id in actual_member_summary.members
|
||||
],
|
||||
expected_member_list,
|
||||
)
|
||||
self.assertEqual(
|
||||
actual_member_summary.count,
|
||||
(
|
||||
expected_member_count
|
||||
if expected_member_count is not None
|
||||
else len(expected_member_list)
|
||||
),
|
||||
)
|
||||
|
||||
def test_get_room_summary_membership(self) -> None:
|
||||
"""
|
||||
Test that `get_room_summary(...)` gets every kind of membership when there
|
||||
aren't that many members in the room.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
_user3_tok = self.login(user3_id, "pass")
|
||||
user4_id = self.register_user("user4", "pass")
|
||||
user4_tok = self.login(user4_id, "pass")
|
||||
user5_id = self.register_user("user5", "pass")
|
||||
user5_tok = self.login(user5_id, "pass")
|
||||
|
||||
# Setup a room (user1 is the creator and is joined to the room)
|
||||
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
# User2 is banned
|
||||
self.helper.join(room_id, user2_id, tok=user2_tok)
|
||||
self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
|
||||
|
||||
# User3 is invited by user1
|
||||
self.helper.invite(room_id, targ=user3_id, tok=user1_tok)
|
||||
|
||||
# User4 leaves
|
||||
self.helper.join(room_id, user4_id, tok=user4_tok)
|
||||
self.helper.leave(room_id, user4_id, tok=user4_tok)
|
||||
|
||||
# User5 joins
|
||||
self.helper.join(room_id, user5_id, tok=user5_tok)
|
||||
|
||||
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
|
||||
empty_ms = MemberSummary([], 0)
|
||||
|
||||
self._assert_member_summary(
|
||||
room_membership_summary.get(Membership.JOIN, empty_ms),
|
||||
[user1_id, user5_id],
|
||||
)
|
||||
self._assert_member_summary(
|
||||
room_membership_summary.get(Membership.INVITE, empty_ms), [user3_id]
|
||||
)
|
||||
self._assert_member_summary(
|
||||
room_membership_summary.get(Membership.LEAVE, empty_ms), [user4_id]
|
||||
)
|
||||
self._assert_member_summary(
|
||||
room_membership_summary.get(Membership.BAN, empty_ms), [user2_id]
|
||||
)
|
||||
self._assert_member_summary(
|
||||
room_membership_summary.get(Membership.KNOCK, empty_ms),
|
||||
[
|
||||
# No one knocked
|
||||
],
|
||||
)
|
||||
|
||||
def test_get_room_summary_membership_order(self) -> None:
|
||||
"""
|
||||
Test that `get_room_summary(...)` stacks our limit of 6 in this order: joins ->
|
||||
invites -> leave -> everything else (bans/knocks)
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
_user3_tok = self.login(user3_id, "pass")
|
||||
user4_id = self.register_user("user4", "pass")
|
||||
user4_tok = self.login(user4_id, "pass")
|
||||
user5_id = self.register_user("user5", "pass")
|
||||
user5_tok = self.login(user5_id, "pass")
|
||||
user6_id = self.register_user("user6", "pass")
|
||||
user6_tok = self.login(user6_id, "pass")
|
||||
user7_id = self.register_user("user7", "pass")
|
||||
user7_tok = self.login(user7_id, "pass")
|
||||
|
||||
# Setup the room (user1 is the creator and is joined to the room)
|
||||
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
# We expect the order to be joins -> invites -> leave -> bans so setup the users
|
||||
# *NOT* in that same order to make sure we're actually sorting them.
|
||||
|
||||
# User2 is banned
|
||||
self.helper.join(room_id, user2_id, tok=user2_tok)
|
||||
self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
|
||||
|
||||
# User3 is invited by user1
|
||||
self.helper.invite(room_id, targ=user3_id, tok=user1_tok)
|
||||
|
||||
# User4 leaves
|
||||
self.helper.join(room_id, user4_id, tok=user4_tok)
|
||||
self.helper.leave(room_id, user4_id, tok=user4_tok)
|
||||
|
||||
# User5, User6, User7 joins
|
||||
self.helper.join(room_id, user5_id, tok=user5_tok)
|
||||
self.helper.join(room_id, user6_id, tok=user6_tok)
|
||||
self.helper.join(room_id, user7_id, tok=user7_tok)
|
||||
|
||||
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
|
||||
empty_ms = MemberSummary([], 0)
|
||||
|
||||
self._assert_member_summary(
|
||||
room_membership_summary.get(Membership.JOIN, empty_ms),
|
||||
[user1_id, user5_id, user6_id, user7_id],
|
||||
)
|
||||
self._assert_member_summary(
|
||||
room_membership_summary.get(Membership.INVITE, empty_ms), [user3_id]
|
||||
)
|
||||
self._assert_member_summary(
|
||||
room_membership_summary.get(Membership.LEAVE, empty_ms), [user4_id]
|
||||
)
|
||||
self._assert_member_summary(
|
||||
room_membership_summary.get(Membership.BAN, empty_ms),
|
||||
[
|
||||
# The banned user is not in the summary because the summary can only fit
|
||||
# 6 members and prefers everything else before bans
|
||||
#
|
||||
# user2_id
|
||||
],
|
||||
# But we still see the count of banned users
|
||||
expected_member_count=1,
|
||||
)
|
||||
self._assert_member_summary(
|
||||
room_membership_summary.get(Membership.KNOCK, empty_ms),
|
||||
[
|
||||
# No one knocked
|
||||
],
|
||||
)
|
||||
|
||||
def test_extract_heroes_from_room_summary_excludes_self(self) -> None:
|
||||
"""
|
||||
Test that `extract_heroes_from_room_summary(...)` does not include the user
|
||||
itself.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
# Setup the room (user1 is the creator and is joined to the room)
|
||||
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
# User2 joins
|
||||
self.helper.join(room_id, user2_id, tok=user2_tok)
|
||||
|
||||
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
|
||||
|
||||
# We first ask from the perspective of a random fake user
|
||||
hero_user_ids = extract_heroes_from_room_summary(
|
||||
room_membership_summary, me="@fakeuser"
|
||||
)
|
||||
|
||||
# Make sure user1 is in the room (ensure our test setup is correct)
|
||||
self.assertListEqual(hero_user_ids, [user1_id, user2_id])
|
||||
|
||||
# Now, we ask for the room summary from the perspective of user1
|
||||
hero_user_ids = extract_heroes_from_room_summary(
|
||||
room_membership_summary, me=user1_id
|
||||
)
|
||||
|
||||
# User1 should not be included in the list of heroes because they are the one
|
||||
# asking
|
||||
self.assertListEqual(hero_user_ids, [user2_id])
|
||||
|
||||
def test_extract_heroes_from_room_summary_first_five_joins(self) -> None:
|
||||
"""
|
||||
Test that `extract_heroes_from_room_summary(...)` returns the first 5 joins.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass")
|
||||
user4_id = self.register_user("user4", "pass")
|
||||
user4_tok = self.login(user4_id, "pass")
|
||||
user5_id = self.register_user("user5", "pass")
|
||||
user5_tok = self.login(user5_id, "pass")
|
||||
user6_id = self.register_user("user6", "pass")
|
||||
user6_tok = self.login(user6_id, "pass")
|
||||
user7_id = self.register_user("user7", "pass")
|
||||
user7_tok = self.login(user7_id, "pass")
|
||||
|
||||
# Setup the room (user1 is the creator and is joined to the room)
|
||||
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
# User2 -> User7 joins
|
||||
self.helper.join(room_id, user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id, user3_id, tok=user3_tok)
|
||||
self.helper.join(room_id, user4_id, tok=user4_tok)
|
||||
self.helper.join(room_id, user5_id, tok=user5_tok)
|
||||
self.helper.join(room_id, user6_id, tok=user6_tok)
|
||||
self.helper.join(room_id, user7_id, tok=user7_tok)
|
||||
|
||||
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
|
||||
|
||||
hero_user_ids = extract_heroes_from_room_summary(
|
||||
room_membership_summary, me="@fakuser"
|
||||
)
|
||||
|
||||
# First 5 users to join the room
|
||||
self.assertListEqual(
|
||||
hero_user_ids, [user1_id, user2_id, user3_id, user4_id, user5_id]
|
||||
)
|
||||
|
||||
def test_extract_heroes_from_room_summary_membership_order(self) -> None:
|
||||
"""
|
||||
Test that `extract_heroes_from_room_summary(...)` prefers joins/invites over
|
||||
everything else.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
_user3_tok = self.login(user3_id, "pass")
|
||||
user4_id = self.register_user("user4", "pass")
|
||||
user4_tok = self.login(user4_id, "pass")
|
||||
user5_id = self.register_user("user5", "pass")
|
||||
user5_tok = self.login(user5_id, "pass")
|
||||
|
||||
# Setup the room (user1 is the creator and is joined to the room)
|
||||
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
# We expect the order to be joins -> invites -> leave -> bans so setup the users
|
||||
# *NOT* in that same order to make sure we're actually sorting them.
|
||||
|
||||
# User2 is banned
|
||||
self.helper.join(room_id, user2_id, tok=user2_tok)
|
||||
self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
|
||||
|
||||
# User3 is invited by user1
|
||||
self.helper.invite(room_id, targ=user3_id, tok=user1_tok)
|
||||
|
||||
# User4 leaves
|
||||
self.helper.join(room_id, user4_id, tok=user4_tok)
|
||||
self.helper.leave(room_id, user4_id, tok=user4_tok)
|
||||
|
||||
# User5 joins
|
||||
self.helper.join(room_id, user5_id, tok=user5_tok)
|
||||
|
||||
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
|
||||
|
||||
hero_user_ids = extract_heroes_from_room_summary(
|
||||
room_membership_summary, me="@fakeuser"
|
||||
)
|
||||
|
||||
# Prefer joins -> invites, over everything else
|
||||
self.assertListEqual(
|
||||
hero_user_ids,
|
||||
[
|
||||
# The joins
|
||||
user1_id,
|
||||
user5_id,
|
||||
# The invites
|
||||
user3_id,
|
||||
],
|
||||
)
|
||||
|
||||
@skip_unless(
|
||||
False,
|
||||
"Test is not possible because when everyone leaves the room, "
|
||||
+ "the server is `no_longer_in_room` and we don't have any `current_state_events` to query",
|
||||
)
|
||||
def test_extract_heroes_from_room_summary_fallback_leave_ban(self) -> None:
|
||||
"""
|
||||
Test that `extract_heroes_from_room_summary(...)` falls back to leave/ban if
|
||||
there aren't any joins/invites.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
user3_tok = self.login(user3_id, "pass")
|
||||
|
||||
# Setup the room (user1 is the creator and is joined to the room)
|
||||
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
|
||||
|
||||
# User2 is banned
|
||||
self.helper.join(room_id, user2_id, tok=user2_tok)
|
||||
self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
|
||||
|
||||
# User3 leaves
|
||||
self.helper.join(room_id, user3_id, tok=user3_tok)
|
||||
self.helper.leave(room_id, user3_id, tok=user3_tok)
|
||||
|
||||
# User1 leaves (we're doing this last because they're the room creator)
|
||||
self.helper.leave(room_id, user1_id, tok=user1_tok)
|
||||
|
||||
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
|
||||
|
||||
hero_user_ids = extract_heroes_from_room_summary(
|
||||
room_membership_summary, me="@fakeuser"
|
||||
)
|
||||
|
||||
# Fallback to people who left -> banned
|
||||
self.assertListEqual(
|
||||
hero_user_ids,
|
||||
[user3_id, user1_id, user3_id],
|
||||
)
|
||||
|
||||
def test_extract_heroes_from_room_summary_excludes_knocks(self) -> None:
|
||||
"""
|
||||
People who knock on the room have (potentially) never been in the room before
|
||||
and are total outsiders. Plus the spec doesn't mention them at all for heroes.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
# Setup the knock room (user1 is the creator and is joined to the room)
|
||||
knock_room_id = self.helper.create_room_as(
|
||||
user1_id, tok=user1_tok, room_version=RoomVersions.V7.identifier
|
||||
)
|
||||
self.helper.send_state(
|
||||
knock_room_id,
|
||||
EventTypes.JoinRules,
|
||||
{"join_rule": JoinRules.KNOCK},
|
||||
tok=user1_tok,
|
||||
)
|
||||
|
||||
# User2 knocks on the room
|
||||
knock_channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/r0/knock/%s" % (knock_room_id,),
|
||||
b"{}",
|
||||
user2_tok,
|
||||
)
|
||||
self.assertEqual(knock_channel.code, 200, knock_channel.result)
|
||||
|
||||
room_membership_summary = self.get_success(
|
||||
self.store.get_room_summary(knock_room_id)
|
||||
)
|
||||
|
||||
hero_user_ids = extract_heroes_from_room_summary(
|
||||
room_membership_summary, me="@fakeuser"
|
||||
)
|
||||
|
||||
# user1 is the creator and is joined to the room (should show up as a hero)
|
||||
# user2 is knocking on the room (should not show up as a hero)
|
||||
self.assertListEqual(
|
||||
hero_user_ids,
|
||||
[user1_id],
|
||||
)
|
||||
|
||||
|
||||
class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase):
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
@@ -19,9 +19,18 @@
|
||||
#
|
||||
#
|
||||
|
||||
from typing import Type
|
||||
from unittest import skipUnless
|
||||
|
||||
from immutabledict import immutabledict
|
||||
from parameterized import parameterized_class
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import (
|
||||
AbstractMultiWriterStreamToken,
|
||||
MultiWriterStreamToken,
|
||||
RoomAlias,
|
||||
RoomStreamToken,
|
||||
UserID,
|
||||
get_domain_from_id,
|
||||
get_localpart_from_id,
|
||||
@@ -29,6 +38,7 @@ from synapse.types import (
|
||||
)
|
||||
|
||||
from tests import unittest
|
||||
from tests.utils import USE_POSTGRES_FOR_TESTS
|
||||
|
||||
|
||||
class IsMineIDTests(unittest.HomeserverTestCase):
|
||||
@@ -127,3 +137,64 @@ class MapUsernameTestCase(unittest.TestCase):
|
||||
# this should work with either a unicode or a bytes
|
||||
self.assertEqual(map_username_to_mxid_localpart("têst"), "t=c3=aast")
|
||||
self.assertEqual(map_username_to_mxid_localpart("têst".encode()), "t=c3=aast")
|
||||
|
||||
|
||||
@parameterized_class(
|
||||
("token_type",),
|
||||
[
|
||||
(MultiWriterStreamToken,),
|
||||
(RoomStreamToken,),
|
||||
],
|
||||
class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{params_dict['token_type'].__name__}",
|
||||
)
|
||||
class MultiWriterTokenTestCase(unittest.HomeserverTestCase):
|
||||
"""Tests for the different types of multi writer tokens."""
|
||||
|
||||
token_type: Type[AbstractMultiWriterStreamToken]
|
||||
|
||||
def test_basic_token(self) -> None:
|
||||
"""Test that a simple stream token can be serialized and unserialized"""
|
||||
store = self.hs.get_datastores().main
|
||||
|
||||
token = self.token_type(stream=5)
|
||||
|
||||
string_token = self.get_success(token.to_string(store))
|
||||
|
||||
if isinstance(token, RoomStreamToken):
|
||||
self.assertEqual(string_token, "s5")
|
||||
else:
|
||||
self.assertEqual(string_token, "5")
|
||||
|
||||
parsed_token = self.get_success(self.token_type.parse(store, string_token))
|
||||
self.assertEqual(parsed_token, token)
|
||||
|
||||
@skipUnless(USE_POSTGRES_FOR_TESTS, "Requires Postgres")
|
||||
def test_instance_map(self) -> None:
|
||||
"""Test for stream token with instance map"""
|
||||
store = self.hs.get_datastores().main
|
||||
|
||||
token = self.token_type(stream=5, instance_map=immutabledict({"foo": 6}))
|
||||
|
||||
string_token = self.get_success(token.to_string(store))
|
||||
self.assertEqual(string_token, "m5~1.6")
|
||||
|
||||
parsed_token = self.get_success(self.token_type.parse(store, string_token))
|
||||
self.assertEqual(parsed_token, token)
|
||||
|
||||
def test_instance_map_assertion(self) -> None:
|
||||
"""Test that we assert values in the instance map are greater than the
|
||||
min stream position"""
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
self.token_type(stream=5, instance_map=immutabledict({"foo": 4}))
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
self.token_type(stream=5, instance_map=immutabledict({"foo": 5}))
|
||||
|
||||
def test_parse_bad_token(self) -> None:
|
||||
"""Test that we can parse tokens produced by a bug in Synapse of the
|
||||
form `m5~`"""
|
||||
store = self.hs.get_datastores().main
|
||||
|
||||
parsed_token = self.get_success(self.token_type.parse(store, "m5~"))
|
||||
self.assertEqual(parsed_token, self.token_type(stream=5))
|
||||
|
||||
@@ -28,6 +28,7 @@ import logging
|
||||
import secrets
|
||||
import time
|
||||
from typing import (
|
||||
AbstractSet,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
@@ -269,6 +270,56 @@ class TestCase(unittest.TestCase):
|
||||
required[key], actual[key], msg="%s mismatch. %s" % (key, actual)
|
||||
)
|
||||
|
||||
def assertIncludes(
|
||||
self,
|
||||
actual_items: AbstractSet[str],
|
||||
expected_items: AbstractSet[str],
|
||||
exact: bool = False,
|
||||
message: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Assert that all of the `expected_items` are included in the `actual_items`.
|
||||
|
||||
This assert could also be called `assertContains`, `assertItemsInSet`
|
||||
|
||||
Args:
|
||||
actual_items: The container
|
||||
expected_items: The items to check for in the container
|
||||
exact: Whether the actual state should be exactly equal to the expected
|
||||
state (no extras).
|
||||
message: Optional message to include in the failure message.
|
||||
"""
|
||||
# Check that each set has the same items
|
||||
if exact and actual_items == expected_items:
|
||||
return
|
||||
# Check for a superset
|
||||
elif not exact and actual_items >= expected_items:
|
||||
return
|
||||
|
||||
expected_lines: List[str] = []
|
||||
for expected_item in expected_items:
|
||||
is_expected_in_actual = expected_item in actual_items
|
||||
expected_lines.append(
|
||||
"{} {}".format(" " if is_expected_in_actual else "?", expected_item)
|
||||
)
|
||||
|
||||
actual_lines: List[str] = []
|
||||
for actual_item in actual_items:
|
||||
is_actual_in_expected = actual_item in expected_items
|
||||
actual_lines.append(
|
||||
"{} {}".format("+" if is_actual_in_expected else " ", actual_item)
|
||||
)
|
||||
|
||||
newline = "\n"
|
||||
expected_string = f"Expected items to be in actual ('?' = missing expected items):\n {{\n{newline.join(expected_lines)}\n }}"
|
||||
actual_string = f"Actual ('+' = found expected items):\n {{\n{newline.join(actual_lines)}\n }}"
|
||||
first_message = (
|
||||
"Items must match exactly" if exact else "Some expected items are missing."
|
||||
)
|
||||
diff_message = f"{first_message}\n{expected_string}\n{actual_string}"
|
||||
|
||||
self.fail(f"{diff_message}\n{message}")
|
||||
|
||||
|
||||
def DEBUG(target: TV) -> TV:
|
||||
"""A decorator to set the .loglevel attribute to logging.DEBUG.
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
from contextlib import contextmanager
|
||||
from os import PathLike
|
||||
from pathlib import Path
|
||||
from typing import Generator, Optional, Union
|
||||
from unittest.mock import patch
|
||||
|
||||
@@ -41,7 +42,7 @@ class DummyDistribution(metadata.Distribution):
|
||||
def version(self) -> str:
|
||||
return self._version
|
||||
|
||||
def locate_file(self, path: Union[str, PathLike]) -> PathLike:
|
||||
def locate_file(self, path: Union[str, PathLike]) -> Path:
|
||||
raise NotImplementedError()
|
||||
|
||||
def read_text(self, filename: str) -> None:
|
||||
|
||||
@@ -249,5 +249,5 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
|
||||
self.assertEqual(cache.get_max_pos_of_last_change("bar@baz.net"), 3)
|
||||
self.assertEqual(cache.get_max_pos_of_last_change("user@elsewhere.org"), 4)
|
||||
|
||||
# Unknown entities will return the stream start position.
|
||||
self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), 1)
|
||||
# Unknown entities will return None
|
||||
self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), None)
|
||||
|
||||
Reference in New Issue
Block a user