Compare commits

...

58 Commits

Author SHA1 Message Date
Erik Johnston
9388b8c3cb Newsfile 2024-08-07 16:53:31 +01:00
Erik Johnston
c786948ebe Make /key/changes and SS use same path as sync 2024-08-07 16:52:33 +01:00
Erik Johnston
6576ecd2b8 Move generation of device lists to device handler 2024-08-07 16:22:46 +01:00
Erik Johnston
a4860d1988 Remove SyncResultBuilder param 2024-08-07 16:19:09 +01:00
dependabot[bot]
30e9f6e469 Bump bytes from 1.6.1 to 1.7.1 (#17526) 2024-08-07 10:37:54 +01:00
dependabot[bot]
eb62d12063 Bump regex from 1.10.5 to 1.10.6 (#17527) 2024-08-07 10:37:13 +01:00
Erik Johnston
ceb3686dcd Fixup sliding sync comment (#17531)
c.f.
https://github.com/element-hq/synapse/pull/17529#discussion_r1705780925
2024-08-07 10:32:36 +01:00
Eric Eastwood
1dfa59b238 Sliding Sync: Add more tracing (#17514)
Spawning from looking at a couple traces and wanting a little more info.

Follow-up to github.com/element-hq/synapse/pull/17501

The changes in this PR allow you to find slow Sliding Sync traces ignoring the
`wait_for_events` time. In Jaeger, you can now filter for the `current_sync_for_user`
operation with `RESULT.result=true` indicating that it actually returned non-empty results.

If you want to find traces for your own user, you can use
`RESULT.result=true ARG.sync_config.user="@madlittlemods:matrix.org"`
2024-08-06 11:43:43 -05:00
Andrew Morgan
bef6568537 Merge branch 'release-v1.113' into develop 2024-08-06 14:19:12 +01:00
Andrew Morgan
244a255065 Clarify auto_accept_invites.worker_to_run_on config docs (#17515) 2024-08-06 13:26:51 +01:00
Andrew Morgan
932cb0a928 1.113.0rc1 2024-08-06 12:24:47 +01:00
dependabot[bot]
2dad718265 Bump phonenumbers from 8.13.39 to 8.13.42 (#17521) 2024-08-06 11:47:19 +01:00
dependabot[bot]
5d8446298c Bump towncrier from 23.11.0 to 24.7.1 (#17523) 2024-08-06 11:47:06 +01:00
dependabot[bot]
d845e939a9 Bump black from 24.4.2 to 24.8.0 (#17522) 2024-08-06 11:46:48 +01:00
dependabot[bot]
23727869c7 Bump serde_json from 1.0.121 to 1.0.122 (#17525) 2024-08-06 11:45:44 +01:00
Erik Johnston
c270355349 SS: Reset connection if token is unrecognized (#17529)
This triggers the client to start a new sliding sync connection. If we
don't do this and the client asks for the full range of rooms, we end up
sending down all rooms and their state from scratch (which can be very
slow)

This causes things like
https://github.com/element-hq/element-x-ios/issues/3115 after we restart
the server

---------

Co-authored-by: Eric Eastwood <eric.eastwood@beta.gouv.fr>
2024-08-06 10:39:11 +01:00
Eric Eastwood
e3db7b2d81 Sliding Sync: Easier to understand timeline assertions in tests (#17511)
Added `_assertTimelineEqual(...)` because I got fed up trying to
understand the crazy diffs from the standard
`self.assertEqual(...)`/`self.assertListEqual(...)`

Before:
```
[FAIL]
Traceback (most recent call last):
  File "/home/eric/Documents/github/element/synapse/tests/rest/client/sliding_sync/test_rooms_timeline.py", line 103, in test_rooms_limited_initial_sync
    self.assertListEqual(
  File "/usr/lib/python3.12/unittest/case.py", line 1091, in assertListEqual
    self.assertSequenceEqual(list1, list2, msg, seq_type=list)
  File "/usr/lib/python3.12/unittest/case.py", line 1073, in assertSequenceEqual
    self.fail(msg)
twisted.trial.unittest.FailTest: Lists differ: ['$4QcmnzhdazSnDYcYSZCS_6-MWSzM_dN3RC7TRvW0w[95 chars]isM'] != ['$8N1XJ7e-3K_wxAanLVD3v8KQ96_B5Xj4huGkgy4N4[95 chars]nnU']

First differing element 0:
'$4QcmnzhdazSnDYcYSZCS_6-MWSzM_dN3RC7TRvW0wWA'
'$8N1XJ7e-3K_wxAanLVD3v8KQ96_B5Xj4huGkgy4N4-E'

- ['$4QcmnzhdazSnDYcYSZCS_6-MWSzM_dN3RC7TRvW0wWA',
-  '$8N1XJ7e-3K_wxAanLVD3v8KQ96_B5Xj4huGkgy4N4-E',
? ^

+ ['$8N1XJ7e-3K_wxAanLVD3v8KQ96_B5Xj4huGkgy4N4-E',
? ^

-  '$q4PRxQ_pBZkQI1keYuZPTtExQ23DqpUI3-Lxwfj_isM']
+  '$4QcmnzhdazSnDYcYSZCS_6-MWSzM_dN3RC7TRvW0wWA',
+  '$j3Xj-t2F1wH9kUHsI8X5yqS7hkdSyN2owaArfvk8nnU']
```

After:

```
[FAIL]
Traceback (most recent call last):
  File "/home/eric/Documents/github/element/synapse/tests/rest/client/sliding_sync/test_rooms_timeline.py", line 178, in test_rooms_limited_initial_sync
    self._assertTimelineEqual(
  File "/home/eric/Documents/github/element/synapse/tests/rest/client/sliding_sync/test_rooms_timeline.py", line 110, in _assertTimelineEqual
    self._assertListEqual(
  File "/home/eric/Documents/github/element/synapse/tests/rest/client/sliding_sync/test_rooms_timeline.py", line 79, in _assertListEqual
    self.fail(f"{diff_message}\n{message}")
twisted.trial.unittest.FailTest: Items must
Expected items to be in actual ('?' = missing expected items):
 [
   (10, master) $w-BoqW1PQQFU4TzVJW5OIelugxh0mY12wrfw6mbC6D4 (m.room.message) activity4
   (11, master) $sSidTZf1EOQmCVDU4mrH_1-bopMQhwcDUO2IhoemR6M (m.room.message) activity5
?  (12, master) $bgOcc3D-2QSkbk4aBxKVyOOQJGs7ZuncRJwG3cEANZg (m.room.member, @user1:test) join
 ]
Actual ('+' = found expected items):
 [
+  (11, master) $sSidTZf1EOQmCVDU4mrH_1-bopMQhwcDUO2IhoemR6M (m.room.message) activity5
+  (10, master) $w-BoqW1PQQFU4TzVJW5OIelugxh0mY12wrfw6mbC6D4 (m.room.message) activity4
   (9, master) $FmCNyc11YeFwiJ4an7_q6H0LCCjQOKd6UCr5VKeXXUw (m.room.message, None) activity3
 ]
```
2024-08-05 13:20:15 -05:00
Eric Eastwood
2b620e0a15 Sliding Sync: Add typing notification extension (MSC3961) (#17505)
[MSC3961](https://github.com/matrix-org/matrix-spec-proposals/pull/3961): Sliding Sync Extension: Typing Notifications

Based on
[MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575):
Sliding Sync
2024-07-31 13:20:23 -05:00
Eric Eastwood
39731bb205 Sliding Sync: Split and move tests (#17504)
Split and move Sliding Sync tests so we have some more sane test file
sizes
2024-07-31 12:20:46 -05:00
Eric Eastwood
1d6186265a Sliding Sync: Fix limited response description (make accurate) (#17507) 2024-07-31 11:47:26 -05:00
Eric Eastwood
46de0ee16b Sliding Sync: Update filters to be robust against remote invite rooms (#17450)
Update `filters.is_encrypted` and `filters.types`/`filters.not_types` to
be robust when dealing with remote invite rooms in Sliding Sync.

Part of
[MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575):
Sliding Sync

Follow-up to https://github.com/element-hq/synapse/pull/17434

We now take into account current state, fallback to stripped state
for invite/knock rooms, then historical state. If we can't determine
the info needed to filter a room (either from state or stripped state),
it is filtered out.
2024-07-30 13:20:29 -05:00
Eric Eastwood
b221f0b84b Sliding Sync: Add receipts extension (MSC3960) (#17489)
[MSC3960](https://github.com/matrix-org/matrix-spec-proposals/pull/3960): Receipts extension

Based on
[MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575):
Sliding Sync
2024-07-30 12:49:55 -05:00
Olivier 'reivilibre
b2c55bd049 Merge branch 'master' into develop 2024-07-30 18:09:05 +01:00
Olivier 'reivilibre
ed583d9c81 Merge branch 'release-v1.112' 2024-07-30 18:07:35 +01:00
dependabot[bot]
f76dc9923c Bump types-setuptools from 70.1.0.20240627 to 71.1.0.20240726 (#17497)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-30 17:33:43 +01:00
dependabot[bot]
7e997fb8b1 Bump types-pyopenssl from 24.1.0.20240425 to 24.1.0.20240722 (#17496)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-30 17:33:18 +01:00
dependabot[bot]
dbc2290cbe Bump bcrypt from 4.1.3 to 4.2.0 (#17495)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-30 17:32:49 +01:00
dependabot[bot]
2f6b86e79a Bump serde_json from 1.0.120 to 1.0.121 (#17493)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-30 17:32:16 +01:00
Olivier 'reivilibre
37f9876ccf 1.112.0 2024-07-30 17:24:09 +01:00
reivilibre
8b449a8ce6 Upgrade locked dependency on Twisted to 24.7.0rc1. (#17502)
I also update the tests and HTTP Proxy code to fix it for this new
Twisted release.

Pulls in fix for
https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7


Signed-off-by: Olivier 'reivilibre <oliverw@matrix.org>
2024-07-30 17:14:14 +01:00
Olivier 'reivilibre
53db8a914e Merge branch 'master' into develop 2024-07-30 17:10:46 +01:00
Olivier 'reivilibre
e4868f8a1e Add bold emphasis to some parts of the changelog 2024-07-30 16:23:58 +01:00
Olivier 'reivilibre
dcad81082c 1.111.1 2024-07-30 16:16:35 +01:00
reivilibre
c56b070e6f Upgrade locked dependency on Twisted to 24.7.0rc1. (#17502)
I also update the tests and HTTP Proxy code to fix it for this new
Twisted release.

Pulls in fix for
https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7


Signed-off-by: Olivier 'reivilibre <oliverw@matrix.org>
2024-07-30 15:23:23 +01:00
dependabot[bot]
be726724a8 Bump ruff from 0.5.4 to 0.5.5 (#17494)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-30 11:44:54 +01:00
Erik Johnston
62ae56a4ac Add some more opentracing to sliding sync (#17501)
This will make it easier to see what it is doing in jaeger.
2024-07-30 10:54:11 +01:00
Richard van der Hoff
808dab0699 Fix failures property in /keys/query (#17499)
Fixes: https://github.com/element-hq/synapse/issues/17498
Fixes: https://github.com/element-hq/element-web/issues/27867
2024-07-30 09:51:24 +01:00
Erik Johnston
34306be5aa Only send rooms with updates down sliding sync (#17479)
Rather than always including all rooms in range.

Also adds a pre-filter to rooms that checks the stream change cache to
see if anything might have happened.

Based on #17447

---------

Co-authored-by: Eric Eastwood <eric.eastwood@beta.gouv.fr>
2024-07-30 09:30:44 +01:00
Erik Johnston
be4a16ff44 Sliding Sync: Track whether we have sent rooms down to clients (#17447)
The basic idea is that we introduce a new token for a sliding sync
connection, which stores the mapping of room to room "status" (i.e. have
we sent the room down?). This token allows us to handle duplicate
requests properly. In future it can be used to store more
"per-connection" information safely.

In future this should be migrated into the DB, so its important that we
try to reduce the number of syncs where we need to update the
per-connection information. In this PoC this only happens when we: a)
send down a set of room for the first time, or b) we have previously
sent down a room and there are updates but we are not sending the room
down the sync (due to not falling in a list range)

Co-authored-by: Eric Eastwood <eric.eastwood@beta.gouv.fr>
2024-07-29 22:45:48 +01:00
Eric Eastwood
568051c0f0 Refactor Sliding Sync tests to better utilize the SlidingSyncBase.do_sync(...) (pt. 2) (#17482)
`SlidingSyncBase.do_sync()` for tests was first introduced in
https://github.com/element-hq/synapse/pull/17452

Part 1: https://github.com/element-hq/synapse/pull/17481
2024-07-25 11:01:47 -05:00
Eric Eastwood
ebbabfe782 Refactor Sliding Sync tests to better utilize the SlidingSyncBase (pt. 1) (#17481)
`SlidingSyncBase` for tests was first introduced in
https://github.com/element-hq/synapse/pull/17452

Part 2: https://github.com/element-hq/synapse/pull/17482
2024-07-25 10:43:35 -05:00
YLong Shi
69ac4b6a6e Update config_documentation - Change example of msisdn in allowed_local_3pids (#17476)
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
2024-07-25 11:07:44 +00:00
Eric Eastwood
729026e604 Sliding Sync: Add Account Data extension (MSC3959) (#17477)
Extensions based on
[MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575):
Sliding Sync
2024-07-24 17:10:38 -05:00
Erik Johnston
bdf37ad4c4 Sliding Sync: ensure bump stamp ignores backfilled events (#17478)
Backfill events have a negative stream ordering, and so its not useful
to use to compare with other (positive) stream orderings.

Plus, the Rust SDK currently assumes `bump_stamp` is positive.
2024-07-24 15:21:56 +01:00
Erik Johnston
8bbc98e66d Use a new token format for sliding sync (#17452)
This is in preparation for adding per-connection state.

---------

Co-authored-by: Eric Eastwood <eric.eastwood@beta.gouv.fr>
2024-07-24 11:47:25 +01:00
Maciej Laskowski
4b9f4c2abf Update debian template - new link to the delegation docs (#17475)
Update debian template - new link to the delegation docs
2024-07-24 10:32:56 +01:00
Devon Hudson
e8ee784c75 Address changelog review comments 2024-07-23 09:14:45 -06:00
Devon Hudson
48c1307911 1.112.0rc1 2024-07-23 09:01:43 -06:00
Erik Johnston
d225b6b3eb Speed up SS room sorting (#17468)
We do this by bulk fetching the latest stream ordering.

---------

Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
2024-07-23 14:03:14 +01:00
reivilibre
1daae43f3a Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. (#17428)
Introduced in: #17215

This caused us a minor bit of grief as the volume of logs produced was
much higher than normal

---------

Signed-off-by: Olivier 'reivilibre <oliverw@matrix.org>
2024-07-23 11:51:34 +01:00
Michael Hollister
a9ee832e48 Fixed presence results not returning offline users on initial sync (#17231)
This is to address an issue in which `m.presence` results on initial
sync are not returning entries of users who are currently offline.

The original behaviour was from
https://github.com/element-hq/synapse/issues/1535

This change is useful for applications that use the
presence system for tracking user profile information/updates (e.g.
https://github.com/element-hq/synapse/pull/16992 or for profile status
messages).

This is gated behind a new configuration option to avoid performance
impact for applications that don't need this, as a pragmatic solution
for now.
2024-07-23 09:59:24 +00:00
dependabot[bot]
13a99fba1b Bump hiredis from 2.3.2 to 3.0.0 (#17464)
Bumps [hiredis](https://github.com/redis/hiredis-py) from 2.3.2 to
3.0.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a
href="https://github.com/redis/hiredis-py/releases">hiredis's
releases</a>.</em></p>
<blockquote>
<h2>3.0.0</h2>
<h1>Changes</h1>
<h2>Breaking Changes</h2>
<ul>
<li>Return Redis sets as Python lists (<a
href="https://redirect.github.com/redis/hiredis-py/issues/189">#189</a>)</li>
</ul>
<h2>🐛 Bug Fixes</h2>
<ul>
<li>Return Redis sets as Python lists (<a
href="https://redirect.github.com/redis/hiredis-py/issues/189">#189</a>)</li>
</ul>
<h2>Contributors</h2>
<p>We'd like to thank all the contributors who worked on this
release!</p>
<p><a href="https://github.com/gerzse"><code>@​gerzse</code></a></p>
<h2>2.4.0</h2>
<h1>Changes</h1>
<h2>🧰 Maintenance</h2>
<ul>
<li>Fix small typo (<a
href="https://redirect.github.com/redis/hiredis-py/issues/192">#192</a>)</li>
<li>Quote version for Python setup action in CI (<a
href="https://redirect.github.com/redis/hiredis-py/issues/191">#191</a>)</li>
<li>Fix building the wheel for windows (<a
href="https://redirect.github.com/redis/hiredis-py/issues/190">#190</a>)</li>
<li>pack: Replace sdsalloc.h with alloc.h (<a
href="https://redirect.github.com/redis/hiredis-py/issues/159">#159</a>)</li>
<li>Bump black from 22.3.0 to 24.3.0 (<a
href="https://redirect.github.com/redis/hiredis-py/issues/185">#185</a>)</li>
<li>Removing python 3.7 trove (<a
href="https://redirect.github.com/redis/hiredis-py/issues/181">#181</a>)</li>
<li>Badge for latest released on Pypi (<a
href="https://redirect.github.com/redis/hiredis-py/issues/182">#182</a>)</li>
<li>Sync license in metadata with LICENSE file (<a
href="https://redirect.github.com/redis/hiredis-py/issues/183">#183</a>)</li>
</ul>
<h2>Contributors</h2>
<p>We'd like to thank all the contributors who worked on this
release!</p>
<p><a href="https://github.com/Apteryks"><code>@​Apteryks</code></a>, <a
href="https://github.com/ArtemIsmagilov"><code>@​ArtemIsmagilov</code></a>,
<a href="https://github.com/chayim"><code>@​chayim</code></a>, <a
href="https://github.com/dependabot"><code>@​dependabot</code></a>, <a
href="https://github.com/dependabot"><code>@​dependabot</code></a>[bot],
<a href="https://github.com/gerzse"><code>@​gerzse</code></a> and <a
href="https://github.com/shadchin"><code>@​shadchin</code></a></p>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a
href="c1eefbdb76"><code>c1eefbd</code></a>
Return Redis sets as Python lists (<a
href="https://redirect.github.com/redis/hiredis-py/issues/189">#189</a>)</li>
<li><a
href="a94bb44717"><code>a94bb44</code></a>
Version 2.4.0 (<a
href="https://redirect.github.com/redis/hiredis-py/issues/193">#193</a>)</li>
<li><a
href="7792dd2338"><code>7792dd2</code></a>
Fix a typo in the README file (<a
href="https://redirect.github.com/redis/hiredis-py/issues/192">#192</a>)</li>
<li><a
href="01fa2fd6f1"><code>01fa2fd</code></a>
Quote version for Python setup action in CI (<a
href="https://redirect.github.com/redis/hiredis-py/issues/191">#191</a>)</li>
<li><a
href="4c970a3365"><code>4c970a3</code></a>
Fix building the wheel for windows (<a
href="https://redirect.github.com/redis/hiredis-py/issues/190">#190</a>)</li>
<li><a
href="f4dd0814c1"><code>f4dd081</code></a>
pack: Replace sdsalloc.h with alloc.h (<a
href="https://redirect.github.com/redis/hiredis-py/issues/159">#159</a>)</li>
<li><a
href="e70af5b94f"><code>e70af5b</code></a>
Bump black from 22.3.0 to 24.3.0 (<a
href="https://redirect.github.com/redis/hiredis-py/issues/185">#185</a>)</li>
<li><a
href="cc239705fb"><code>cc23970</code></a>
Removing Python 3.7 trove (<a
href="https://redirect.github.com/redis/hiredis-py/issues/181">#181</a>)</li>
<li><a
href="64e3394548"><code>64e3394</code></a>
Badge for latest released on Pypi (<a
href="https://redirect.github.com/redis/hiredis-py/issues/182">#182</a>)</li>
<li><a
href="ba18089d74"><code>ba18089</code></a>
Sync license in metadata with LICENSE file (<a
href="https://redirect.github.com/redis/hiredis-py/issues/183">#183</a>)</li>
<li>See full diff in <a
href="https://github.com/redis/hiredis-py/compare/v2.3.2...v3.0.0">compare
view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=hiredis&package-manager=pip&previous-version=2.3.2&new-version=3.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)


</details>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-22 23:58:19 +00:00
dependabot[bot]
e3a0681ecf Bump pyopenssl from 24.1.0 to 24.2.1 (#17465)
Bumps [pyopenssl](https://github.com/pyca/pyopenssl) from 24.1.0 to
24.2.1.
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a
href="https://github.com/pyca/pyopenssl/blob/main/CHANGELOG.rst">pyopenssl's
changelog</a>.</em></p>
<blockquote>
<h2>24.2.1 (2024-07-20)</h2>
<p>Backward-incompatible changes:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^</p>
<p>Deprecations:
^^^^^^^^^^^^^</p>
<p>Changes:
^^^^^^^^</p>
<ul>
<li>Fixed changelog to remove sphinx specific restructured text
strings.</li>
</ul>
<h2>24.2.0 (2024-07-20)</h2>
<p>Backward-incompatible changes:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^</p>
<p>Deprecations:
^^^^^^^^^^^^^</p>
<ul>
<li>Deprecated <code>OpenSSL.crypto.X509Req</code>,
<code>OpenSSL.crypto.load_certificate_request</code>,
<code>OpenSSL.crypto.dump_certificate_request</code>. Instead,
<code>cryptography.x509.CertificateSigningRequest</code>,
<code>cryptography.x509.CertificateSigningRequestBuilder</code>,
<code>cryptography.x509.load_der_x509_csr</code>, or
<code>cryptography.x509.load_pem_x509_csr</code> should be used.</li>
</ul>
<p>Changes:
^^^^^^^^</p>
<ul>
<li>Added type hints for the <code>SSL</code> module.
<code>[#1308](https://github.com/pyca/pyopenssl/issues/1308)
&lt;https://github.com/pyca/pyopenssl/pull/1308&gt;</code>_.</li>
<li>Changed <code>OpenSSL.crypto.PKey.from_cryptography_key</code> to
accept public and private EC, ED25519, ED448 keys.
<code>[#1310](https://github.com/pyca/pyopenssl/issues/1310)
&lt;https://github.com/pyca/pyopenssl/pull/1310&gt;</code>_.</li>
</ul>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a
href="8dd9457865"><code>8dd9457</code></a>
24.2.1 (<a
href="https://redirect.github.com/pyca/pyopenssl/issues/1320">#1320</a>)</li>
<li><a
href="19f093e0c3"><code>19f093e</code></a>
make changelog vanilla rst (<a
href="https://redirect.github.com/pyca/pyopenssl/issues/1319">#1319</a>)</li>
<li><a
href="e265b2867b"><code>e265b28</code></a>
Prepare for 24.2.0 release (<a
href="https://redirect.github.com/pyca/pyopenssl/issues/1318">#1318</a>)</li>
<li><a
href="6943ee524e"><code>6943ee5</code></a>
Deprecate CSR support in pyOpenSSL (<a
href="https://redirect.github.com/pyca/pyopenssl/issues/1316">#1316</a>)</li>
<li><a
href="01b9b56373"><code>01b9b56</code></a>
Add more type definitions for <code>SSL</code> module, check with mypy
(<a
href="https://redirect.github.com/pyca/pyopenssl/issues/1313">#1313</a>)</li>
<li><a
href="cdcb48baf7"><code>cdcb48b</code></a>
Prune redundant <code>:rtype:</code> from SSL module (<a
href="https://redirect.github.com/pyca/pyopenssl/issues/1315">#1315</a>)</li>
<li><a
href="b86914d37f"><code>b86914d</code></a>
Fix <code>ruff</code> invocation (<a
href="https://redirect.github.com/pyca/pyopenssl/issues/1314">#1314</a>)</li>
<li><a
href="caa1ab3ac5"><code>caa1ab3</code></a>
Update changelog for PR <a
href="https://redirect.github.com/pyca/pyopenssl/issues/1308">#1308</a>
and <a
href="https://redirect.github.com/pyca/pyopenssl/issues/1310">#1310</a>
(<a
href="https://redirect.github.com/pyca/pyopenssl/issues/1311">#1311</a>)</li>
<li><a
href="9a2105501f"><code>9a21055</code></a>
Allow loading EC, ED25519, ED448 public keys from cryptography (<a
href="https://redirect.github.com/pyca/pyopenssl/issues/1310">#1310</a>)</li>
<li><a
href="9eaa107362"><code>9eaa107</code></a>
Add type annotations for the <code>SSL</code> module (<a
href="https://redirect.github.com/pyca/pyopenssl/issues/1308">#1308</a>)</li>
<li>Additional commits viewable in <a
href="https://github.com/pyca/pyopenssl/compare/24.1.0...24.2.1">compare
view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyopenssl&package-manager=pip&previous-version=24.1.0&new-version=24.2.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)


</details>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-22 23:42:36 +00:00
Eric Eastwood
de05a64246 Sliding Sync: Add E2EE extension (MSC3884) (#17454)
Spec: [MSC3884](https://github.com/matrix-org/matrix-spec-proposals/pull/3884)

Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
2024-07-22 15:40:06 -05:00
Erik Johnston
d221512498 SS: Implement $ME support (#17469)
`$ME` can be used as a substitute for the requester's user ID.
2024-07-22 17:48:09 +01:00
Erik Johnston
ed0face8ad Speed up room keys query by using read/write lock (#17461)
Linaerizing all access slows things down when devices try and fetch lots
of keys on login
2024-07-22 14:51:17 +01:00
dependabot[bot]
73529d3732 Bump ruff from 0.5.0 to 0.5.4 (#17466) 2024-07-22 14:29:06 +01:00
dependabot[bot]
1648337775 Bump sentry-sdk from 2.8.0 to 2.10.0 (#17467) 2024-07-22 14:28:54 +01:00
82 changed files with 10857 additions and 4133 deletions

View File

@@ -1,3 +1,151 @@
# Synapse 1.113.0rc1 (2024-08-06)
### Features
- Track which rooms have been sent to clients in the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17447](https://github.com/element-hq/synapse/issues/17447))
- Add Account Data extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17477](https://github.com/element-hq/synapse/issues/17477))
- Add receipts extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17489](https://github.com/element-hq/synapse/issues/17489))
- Add typing notification extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17505](https://github.com/element-hq/synapse/issues/17505))
### Bugfixes
- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to handle invite/knock rooms when filtering. ([\#17450](https://github.com/element-hq/synapse/issues/17450))
- Fix a bug introduced in v1.110.0 which caused `/keys/query` to return incomplete results, leading to high network activity and CPU usage on Matrix clients. ([\#17499](https://github.com/element-hq/synapse/issues/17499))
### Improved Documentation
- Update the [`allowed_local_3pids`](https://element-hq.github.io/synapse/v1.112/usage/configuration/config_documentation.html#allowed_local_3pids) config option's msisdn address to a working example. ([\#17476](https://github.com/element-hq/synapse/issues/17476))
### Internal Changes
- Change sliding sync to use their own token format in preparation for storing per-connection state. ([\#17452](https://github.com/element-hq/synapse/issues/17452))
- Ensure we don't send down negative `bump_stamp` in experimental sliding sync endpoint. ([\#17478](https://github.com/element-hq/synapse/issues/17478))
- Do not send down empty room entries down experimental sliding sync endpoint. ([\#17479](https://github.com/element-hq/synapse/issues/17479))
- Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. ([\#17481](https://github.com/element-hq/synapse/issues/17481), [\#17482](https://github.com/element-hq/synapse/issues/17482))
- Add some opentracing tags and logging to the experimental sliding sync implementation. ([\#17501](https://github.com/element-hq/synapse/issues/17501))
- Split and move Sliding Sync tests so we have some more sane test file sizes. ([\#17504](https://github.com/element-hq/synapse/issues/17504))
- Update the `limited` field description in the Sliding Sync response to accurately describe what it actually represents. ([\#17507](https://github.com/element-hq/synapse/issues/17507))
- Easier to understand `timeline` assertions in Sliding Sync tests. ([\#17511](https://github.com/element-hq/synapse/issues/17511))
- Reset the sliding sync connection if we don't recognize the per-connection state position. ([\#17529](https://github.com/element-hq/synapse/issues/17529))
### Updates to locked dependencies
* Bump bcrypt from 4.1.3 to 4.2.0. ([\#17495](https://github.com/element-hq/synapse/issues/17495))
* Bump black from 24.4.2 to 24.8.0. ([\#17522](https://github.com/element-hq/synapse/issues/17522))
* Bump phonenumbers from 8.13.39 to 8.13.42. ([\#17521](https://github.com/element-hq/synapse/issues/17521))
* Bump ruff from 0.5.4 to 0.5.5. ([\#17494](https://github.com/element-hq/synapse/issues/17494))
* Bump serde_json from 1.0.120 to 1.0.121. ([\#17493](https://github.com/element-hq/synapse/issues/17493))
* Bump serde_json from 1.0.121 to 1.0.122. ([\#17525](https://github.com/element-hq/synapse/issues/17525))
* Bump towncrier from 23.11.0 to 24.7.1. ([\#17523](https://github.com/element-hq/synapse/issues/17523))
* Bump types-pyopenssl from 24.1.0.20240425 to 24.1.0.20240722. ([\#17496](https://github.com/element-hq/synapse/issues/17496))
* Bump types-setuptools from 70.1.0.20240627 to 71.1.0.20240726. ([\#17497](https://github.com/element-hq/synapse/issues/17497))
# Synapse 1.112.0 (2024-07-30)
This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
Note that this security fix is also available as **Synapse 1.111.1**, which does not include the rest of the changes in Synapse 1.112.0.
This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request.
If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality.
With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**.
The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration.
Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today.
**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time.
### Internal Changes
- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502))
# Synapse 1.112.0rc1 (2024-07-23)
Please note that this release candidate does not include the security dependency update
included in version 1.111.1 as this version was released before 1.111.1.
The same security fix can be found in the full release of 1.112.0.
### Features
- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416))
- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418))
- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419))
- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429))
- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432))
- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433))
- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454))
### Bugfixes
- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231))
- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434))
- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435))
- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438))
### Improved Documentation
- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387))
- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423))
- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451))
### Internal Changes
- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424))
- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426))
- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428))
- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439))
- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449))
- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453))
- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458))
- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460))
- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461))
- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468))
- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469))
### Updates to locked dependencies
* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441))
* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464))
* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444))
* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440))
* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445))
* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465))
* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466))
* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456))
* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467))
* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448))
* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443))
* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446))
* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442))
* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427))
# Synapse 1.111.1 (2024-07-30)
This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request.
If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality.
With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**.
The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration.
Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today.
**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time.
### Internal Changes
- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502))
# Synapse 1.111.0 (2024-07-16)
No significant changes since 1.111.0rc2.

13
Cargo.lock generated
View File

@@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "bytes"
version = "1.6.1"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952"
checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50"
[[package]]
name = "cfg-if"
@@ -444,9 +444,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.10.5"
version = "1.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
dependencies = [
"aho-corasick",
"memchr",
@@ -505,11 +505,12 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.120"
version = "1.0.122"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5"
checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]

View File

@@ -1 +0,0 @@
Update the readme image to have a white background, so that it is readable in dark mode.

View File

@@ -1 +0,0 @@
Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View File

@@ -1 +0,0 @@
Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View File

@@ -1 +0,0 @@
Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View File

@@ -1 +0,0 @@
Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions.

View File

@@ -1 +0,0 @@
Make sure we always use the right logic for enabling the media repo.

View File

@@ -1 +0,0 @@
Fix documentation on `RateLimiter#record_action`.

View File

@@ -1 +0,0 @@
Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View File

@@ -1 +0,0 @@
Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View File

@@ -1 +0,0 @@
Prepare for authenticated media freeze.

View File

@@ -1 +0,0 @@
Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites.

View File

@@ -1 +0,0 @@
Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`).

View File

@@ -1 +0,0 @@
Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers.

View File

@@ -1 +0,0 @@
Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete.

View File

@@ -1 +0,0 @@
Remove unnecessary call to resume producing in fake channel.

View File

@@ -1 +0,0 @@
Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option.

View File

@@ -1 +0,0 @@
Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created.

View File

@@ -1 +0,0 @@
Speed up generating sliding sync responses.

View File

@@ -1 +0,0 @@
Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync.

1
changelog.d/17514.misc Normal file
View File

@@ -0,0 +1 @@
Add more tracing to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

3
changelog.d/17515.doc Normal file
View File

@@ -0,0 +1,3 @@
Clarify default behaviour of the
[`auto_accept_invites.worker_to_run_on`](https://element-hq.github.io/synapse/develop/usage/configuration/config_documentation.html#auto-accept-invites)
option.

1
changelog.d/17531.misc Normal file
View File

@@ -0,0 +1 @@
Fixup comment in sliding sync implementation.

1
changelog.d/17537.misc Normal file
View File

@@ -0,0 +1 @@
Fix performance of device lists in `/key/changes` and sliding sync.

24
debian/changelog vendored
View File

@@ -1,3 +1,27 @@
matrix-synapse-py3 (1.113.0~rc1) stable; urgency=medium
* New Synapse release 1.113.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Aug 2024 12:23:23 +0100
matrix-synapse-py3 (1.112.0) stable; urgency=medium
* New Synapse release 1.112.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Jul 2024 17:15:48 +0100
matrix-synapse-py3 (1.112.0~rc1) stable; urgency=medium
* New Synapse release 1.112.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 23 Jul 2024 08:58:55 -0600
matrix-synapse-py3 (1.111.1) stable; urgency=medium
* New Synapse release 1.111.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Jul 2024 16:13:52 +0100
matrix-synapse-py3 (1.111.0) stable; urgency=medium
* New Synapse release 1.111.0.

2
debian/templates vendored
View File

@@ -5,7 +5,7 @@ _Description: Name of the server:
servers via federation. This is normally the public hostname of the
server running synapse, but can be different if you set up delegation.
Please refer to the delegation documentation in this case:
https://github.com/element-hq/synapse/blob/master/docs/delegate.md.
https://element-hq.github.io/synapse/latest/delegate.html.
Template: matrix-synapse/report-stats
Type: boolean

View File

@@ -246,6 +246,7 @@ Example configuration:
```yaml
presence:
enabled: false
include_offline_users_on_sync: false
```
`enabled` can also be set to a special value of "untracked" which ignores updates
@@ -254,6 +255,10 @@ received via clients and federation, while still accepting updates from the
*The "untracked" option was added in Synapse 1.96.0.*
When clients perform an initial or `full_state` sync, presence results for offline users are
not included by default. Setting `include_offline_users_on_sync` to `true` will always include
offline users in the results. Defaults to false.
---
### `require_auth_for_profile_requests`
@@ -2381,7 +2386,7 @@ enable_registration_without_verification: true
---
### `registrations_require_3pid`
If this is set, users must provide all of the specified types of 3PID when registering an account.
If this is set, users must provide all of the specified types of [3PID](https://spec.matrix.org/latest/appendices/#3pid-types) when registering an account.
Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
@@ -2406,6 +2411,9 @@ disable_msisdn_registration: true
Mandate that users are only allowed to associate certain formats of
3PIDs with accounts on this server, as specified by the `medium` and `pattern` sub-options.
`pattern` is a [Perl-like regular expression](https://docs.python.org/3/library/re.html#module-re).
More information about 3PIDs, allowed `medium` types and their `address` syntax can be found [in the Matrix spec](https://spec.matrix.org/latest/appendices/#3pid-types).
Example configuration:
```yaml
@@ -2415,7 +2423,7 @@ allowed_local_3pids:
- medium: email
pattern: '^[^@]+@vector\.im$'
- medium: msisdn
pattern: '\+44'
pattern: '^44\d{10}$'
```
---
### `enable_3pid_lookup`
@@ -4677,7 +4685,9 @@ This setting has the following sub-options:
* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
for direct messages. Defaults to false.
* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
* `worker_to_run_on`: Which worker to run this module on. This must match the "worker_name".
* `worker_to_run_on`: Which worker to run this module on. This must match
the "worker_name". If not set or `null`, invites will be accepted on the
main process.
NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed.
The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join

452
poetry.lock generated
View File

@@ -67,38 +67,38 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
[[package]]
name = "bcrypt"
version = "4.1.3"
version = "4.2.0"
description = "Modern password hashing for your software and your servers"
optional = false
python-versions = ">=3.7"
files = [
{file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"},
{file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"},
{file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a"},
{file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05"},
{file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3"},
{file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15"},
{file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d"},
{file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286"},
{file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64"},
{file = "bcrypt-4.1.3-cp37-abi3-win32.whl", hash = "sha256:037c5bf7c196a63dcce75545c8874610c600809d5d82c305dd327cd4969995bf"},
{file = "bcrypt-4.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:8a893d192dfb7c8e883c4576813bf18bb9d59e2cfd88b68b725990f033f1b978"},
{file = "bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c"},
{file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a"},
{file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84"},
{file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08"},
{file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611"},
{file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6"},
{file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834"},
{file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73"},
{file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d"},
{file = "bcrypt-4.1.3-cp39-abi3-win32.whl", hash = "sha256:6004f5229b50f8493c49232b8e75726b568535fd300e5039e255d919fc3a07f2"},
{file = "bcrypt-4.1.3-cp39-abi3-win_amd64.whl", hash = "sha256:2505b54afb074627111b5a8dc9b6ae69d0f01fea65c2fcaea403448c503d3991"},
{file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:cb9c707c10bddaf9e5ba7cdb769f3e889e60b7d4fea22834b261f51ca2b89fed"},
{file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9f8ea645eb94fb6e7bea0cf4ba121c07a3a182ac52876493870033141aa687bc"},
{file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f44a97780677e7ac0ca393bd7982b19dbbd8d7228c1afe10b128fd9550eef5f1"},
{file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d84702adb8f2798d813b17d8187d27076cca3cd52fe3686bb07a9083930ce650"},
{file = "bcrypt-4.1.3.tar.gz", hash = "sha256:2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623"},
{file = "bcrypt-4.2.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb"},
{file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00"},
{file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d84cf6d877918620b687b8fd1bf7781d11e8a0998f576c7aa939776b512b98d"},
{file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:1bb429fedbe0249465cdd85a58e8376f31bb315e484f16e68ca4c786dcc04291"},
{file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:655ea221910bcac76ea08aaa76df427ef8625f92e55a8ee44fbf7753dbabb328"},
{file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:1ee38e858bf5d0287c39b7a1fc59eec64bbf880c7d504d3a06a96c16e14058e7"},
{file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0da52759f7f30e83f1e30a888d9163a81353ef224d82dc58eb5bb52efcabc399"},
{file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3698393a1b1f1fd5714524193849d0c6d524d33523acca37cd28f02899285060"},
{file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:762a2c5fb35f89606a9fde5e51392dad0cd1ab7ae64149a8b935fe8d79dd5ed7"},
{file = "bcrypt-4.2.0-cp37-abi3-win32.whl", hash = "sha256:5a1e8aa9b28ae28020a3ac4b053117fb51c57a010b9f969603ed885f23841458"},
{file = "bcrypt-4.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:8f6ede91359e5df88d1f5c1ef47428a4420136f3ce97763e31b86dd8280fbdf5"},
{file = "bcrypt-4.2.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:c52aac18ea1f4a4f65963ea4f9530c306b56ccd0c6f8c8da0c06976e34a6e841"},
{file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bbbfb2734f0e4f37c5136130405332640a1e46e6b23e000eeff2ba8d005da68"},
{file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3413bd60460f76097ee2e0a493ccebe4a7601918219c02f503984f0a7ee0aebe"},
{file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8d7bb9c42801035e61c109c345a28ed7e84426ae4865511eb82e913df18f58c2"},
{file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3d3a6d28cb2305b43feac298774b997e372e56c7c7afd90a12b3dc49b189151c"},
{file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9c1c4ad86351339c5f320ca372dfba6cb6beb25e8efc659bedd918d921956bae"},
{file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:27fe0f57bb5573104b5a6de5e4153c60814c711b29364c10a75a54bb6d7ff48d"},
{file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:8ac68872c82f1add6a20bd489870c71b00ebacd2e9134a8aa3f98a0052ab4b0e"},
{file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8"},
{file = "bcrypt-4.2.0-cp39-abi3-win32.whl", hash = "sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34"},
{file = "bcrypt-4.2.0-cp39-abi3-win_amd64.whl", hash = "sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9"},
{file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a"},
{file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db"},
{file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:1ff39b78a52cf03fdf902635e4c81e544714861ba3f0efc56558979dd4f09170"},
{file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184"},
{file = "bcrypt-4.2.0.tar.gz", hash = "sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221"},
]
[package.extras]
@@ -107,33 +107,33 @@ typecheck = ["mypy"]
[[package]]
name = "black"
version = "24.4.2"
version = "24.8.0"
description = "The uncompromising code formatter."
optional = false
python-versions = ">=3.8"
files = [
{file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"},
{file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"},
{file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"},
{file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"},
{file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"},
{file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"},
{file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"},
{file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"},
{file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"},
{file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"},
{file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"},
{file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"},
{file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"},
{file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"},
{file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"},
{file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"},
{file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"},
{file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"},
{file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"},
{file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"},
{file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"},
{file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"},
{file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"},
{file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"},
{file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"},
{file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"},
{file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"},
{file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"},
{file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"},
{file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"},
{file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"},
{file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"},
{file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"},
{file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"},
{file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"},
{file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"},
{file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"},
{file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"},
{file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"},
{file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"},
{file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"},
{file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"},
{file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"},
{file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"},
]
[package.dependencies]
@@ -542,120 +542,105 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit",
[[package]]
name = "hiredis"
version = "2.3.2"
version = "3.0.0"
description = "Python wrapper for hiredis"
optional = true
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:742093f33d374098aa21c1696ac6e4874b52658c870513a297a89265a4d08fe5"},
{file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:9e14fb70ca4f7efa924f508975199353bf653f452e4ef0a1e47549e208f943d7"},
{file = "hiredis-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d7302b4b17fcc1cc727ce84ded7f6be4655701e8d58744f73b09cb9ed2b13df"},
{file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed63e8b75c193c5e5a8288d9d7b011da076cc314fafc3bfd59ec1d8a750d48c8"},
{file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b4edee59dc089bc3948f4f6fba309f51aa2ccce63902364900aa0a553a85e97"},
{file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6481c3b7673a86276220140456c2a6fbfe8d1fb5c613b4728293c8634134824"},
{file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684840b014ce83541a087fcf2d48227196576f56ae3e944d4dfe14c0a3e0ccb7"},
{file = "hiredis-2.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c4c0bcf786f0eac9593367b6279e9b89534e008edbf116dcd0de956524702c8"},
{file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66ab949424ac6504d823cba45c4c4854af5c59306a1531edb43b4dd22e17c102"},
{file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:322c668ee1c12d6c5750a4b1057e6b4feee2a75b3d25d630922a463cfe5e7478"},
{file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bfa73e3f163c6e8b2ec26f22285d717a5f77ab2120c97a2605d8f48b26950dac"},
{file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:7f39f28ffc65de577c3bc0c7615f149e35bc927802a0f56e612db9b530f316f9"},
{file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:55ce31bf4711da879b96d511208efb65a6165da4ba91cb3a96d86d5a8d9d23e6"},
{file = "hiredis-2.3.2-cp310-cp310-win32.whl", hash = "sha256:3dd63d0bbbe75797b743f35d37a4cca7ca7ba35423a0de742ae2985752f20c6d"},
{file = "hiredis-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:ea002656a8d974daaf6089863ab0a306962c8b715db6b10879f98b781a2a5bf5"},
{file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:adfbf2e9c38b77d0db2fb32c3bdaea638fa76b4e75847283cd707521ad2475ef"},
{file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:80b02d27864ebaf9b153d4b99015342382eeaed651f5591ce6f07e840307c56d"},
{file = "hiredis-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd40d2e2f82a483de0d0a6dfd8c3895a02e55e5c9949610ecbded18188fd0a56"},
{file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfa904045d7cebfb0f01dad51352551cce1d873d7c3f80c7ded7d42f8cac8f89"},
{file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28bd184b33e0dd6d65816c16521a4ba1ffbe9ff07d66873c42ea4049a62fed83"},
{file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f70481213373d44614148f0f2e38e7905be3f021902ae5167289413196de4ba4"},
{file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8797b528c1ff81eef06713623562b36db3dafa106b59f83a6468df788ff0d1"},
{file = "hiredis-2.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02fc71c8333586871602db4774d3a3e403b4ccf6446dc4603ec12df563127cee"},
{file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0da56915bda1e0a49157191b54d3e27689b70960f0685fdd5c415dacdee2fbed"},
{file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e2674a5a3168349435b08fa0b82998ed2536eb9acccf7087efe26e4cd088a525"},
{file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:dc1c3fd49930494a67dcec37d0558d99d84eca8eb3f03b17198424538f2608d7"},
{file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:14c7b43205e515f538a9defb4e411e0f0576caaeeda76bb9993ed505486f7562"},
{file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7bac7e02915b970c3723a7a7c5df4ba7a11a3426d2a3f181e041aa506a1ff028"},
{file = "hiredis-2.3.2-cp311-cp311-win32.whl", hash = "sha256:63a090761ddc3c1f7db5e67aa4e247b4b3bb9890080bdcdadd1b5200b8b89ac4"},
{file = "hiredis-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:70d226ab0306a5b8d408235cabe51d4bf3554c9e8a72d53ce0b3c5c84cf78881"},
{file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5c614552c6bd1d0d907f448f75550f6b24fb56cbfce80c094908b7990cad9702"},
{file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9c431431abf55b64347ddc8df68b3ef840269cb0aa5bc2d26ad9506eb4b1b866"},
{file = "hiredis-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a45857e87e9d2b005e81ddac9d815a33efd26ec67032c366629f023fe64fb415"},
{file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e138d141ec5a6ec800b6d01ddc3e5561ce1c940215e0eb9960876bfde7186aae"},
{file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:387f655444d912a963ab68abf64bf6e178a13c8e4aa945cb27388fd01a02e6f1"},
{file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4852f4bf88f0e2d9bdf91279892f5740ed22ae368335a37a52b92a5c88691140"},
{file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d711c107e83117129b7f8bd08e9820c43ceec6204fff072a001fd82f6d13db9f"},
{file = "hiredis-2.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92830c16885f29163e1c2da1f3c1edb226df1210ec7e8711aaabba3dd0d5470a"},
{file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:16b01d9ceae265d4ab9547be0cd628ecaff14b3360357a9d30c029e5ae8b7e7f"},
{file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5986fb5f380169270a0293bebebd95466a1c85010b4f1afc2727e4d17c452512"},
{file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:49532d7939cc51f8e99efc326090c54acf5437ed88b9c904cc8015b3c4eda9c9"},
{file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8f34801b251ca43ad70691fb08b606a2e55f06b9c9fb1fc18fd9402b19d70f7b"},
{file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7298562a49d95570ab1c7fc4051e72824c6a80e907993a21a41ba204223e7334"},
{file = "hiredis-2.3.2-cp312-cp312-win32.whl", hash = "sha256:e1d86b75de787481b04d112067a4033e1ecfda2a060e50318a74e4e1c9b2948c"},
{file = "hiredis-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:6dbfe1887ffa5cf3030451a56a8f965a9da2fa82b7149357752b67a335a05fc6"},
{file = "hiredis-2.3.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:4fc242e9da4af48714199216eb535b61e8f8d66552c8819e33fc7806bd465a09"},
{file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e81aa4e9a1fcf604c8c4b51aa5d258e195a6ba81efe1da82dea3204443eba01c"},
{file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:419780f8583ddb544ffa86f9d44a7fcc183cd826101af4e5ffe535b6765f5f6b"},
{file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6871306d8b98a15e53a5f289ec1106a3a1d43e7ab6f4d785f95fcef9a7bd9504"},
{file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb0b35b63717ef1e41d62f4f8717166f7c6245064957907cfe177cc144357c"},
{file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c490191fa1218851f8a80c5a21a05a6f680ac5aebc2e688b71cbfe592f8fec6"},
{file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4baf4b579b108062e91bd2a991dc98b9dc3dc06e6288db2d98895eea8acbac22"},
{file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e627d8ef5e100556e09fb44c9571a432b10e11596d3c4043500080ca9944a91a"},
{file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:ba3dc0af0def8c21ce7d903c59ea1e8ec4cb073f25ece9edaec7f92a286cd219"},
{file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:56e9b7d6051688ca94e68c0c8a54a243f8db841911b683cedf89a29d4de91509"},
{file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:380e029bb4b1d34cf560fcc8950bf6b57c2ef0c9c8b7c7ac20b7c524a730fadd"},
{file = "hiredis-2.3.2-cp37-cp37m-win32.whl", hash = "sha256:948d9f2ca7841794dd9b204644963a4bcd69ced4e959b0d4ecf1b8ce994a6daa"},
{file = "hiredis-2.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:cfa67afe2269b2d203cd1389c00c5bc35a287cd57860441fb0e53b371ea6a029"},
{file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bcbe47da0aebc00a7cfe3ebdcff0373b86ce2b1856251c003e3d69c9db44b5a7"},
{file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f2c9c0d910dd3f7df92f0638e7f65d8edd7f442203caf89c62fc79f11b0b73f8"},
{file = "hiredis-2.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:01b6c24c0840ac7afafbc4db236fd55f56a9a0919a215c25a238f051781f4772"},
{file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1f567489f422d40c21e53212a73bef4638d9f21043848150f8544ef1f3a6ad1"},
{file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28adecb308293e705e44087a1c2d557a816f032430d8a2a9bb7873902a1c6d48"},
{file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27e9619847e9dc70b14b1ad2d0fb4889e7ca18996585c3463cff6c951fd6b10b"},
{file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a0026cfbf29f07649b0e34509091a2a6016ff8844b127de150efce1c3aff60b"},
{file = "hiredis-2.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9de7586522e5da6bee83c9cf0dcccac0857a43249cb4d721a2e312d98a684d1"},
{file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e58494f282215fc461b06709e9a195a24c12ba09570f25bdf9efb036acc05101"},
{file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:de3a32b4b76d46f1eb42b24a918d51d8ca52411a381748196241d59a895f7c5c"},
{file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1979334ccab21a49c544cd1b8d784ffb2747f99a51cb0bd0976eebb517628382"},
{file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0c0773266e1c38a06e7593bd08870ac1503f5f0ce0f5c63f2b4134b090b5d6a4"},
{file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bd1cee053416183adcc8e6134704c46c60c3f66b8faaf9e65bf76191ca59a2f7"},
{file = "hiredis-2.3.2-cp38-cp38-win32.whl", hash = "sha256:5341ce3d01ef3c7418a72e370bf028c7aeb16895e79e115fe4c954fff990489e"},
{file = "hiredis-2.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:8fc7197ff33047ce43a67851ccf190acb5b05c52fd4a001bb55766358f04da68"},
{file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:f47775e27388b58ce52f4f972f80e45b13c65113e9e6b6bf60148f893871dc9b"},
{file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:9412a06b8a8e09abd6313d96864b6d7713c6003a365995a5c70cfb9209df1570"},
{file = "hiredis-2.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3020b60e3fc96d08c2a9b011f1c2e2a6bdcc09cb55df93c509b88be5cb791df"},
{file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53d0f2c59bce399b8010a21bc779b4f8c32d0f582b2284ac8c98dc7578b27bc4"},
{file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57c0d0c7e308ed5280a4900d4468bbfec51f0e1b4cde1deae7d4e639bc6b7766"},
{file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d63318ca189fddc7e75f6a4af8eae9c0545863619fb38cfba5f43e81280b286"},
{file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e741ffe4e2db78a1b9dd6e5d29678ce37fbaaf65dfe132e5b82a794413302ef1"},
{file = "hiredis-2.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb98038ccd368e0d88bd92ee575c58cfaf33e77f788c36b2a89a84ee1936dc6b"},
{file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:eae62ed60d53b3561148bcd8c2383e430af38c0deab9f2dd15f8874888ffd26f"},
{file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ca33c175c1cf60222d9c6d01c38fc17ec3a484f32294af781de30226b003e00f"},
{file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c5f6972d2bdee3cd301d5c5438e31195cf1cabf6fd9274491674d4ceb46914d"},
{file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:a6b54dabfaa5dbaa92f796f0c32819b4636e66aa8e9106c3d421624bd2a2d676"},
{file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e96cd35df012a17c87ae276196ea8f215e77d6eeca90709eb03999e2d5e3fd8a"},
{file = "hiredis-2.3.2-cp39-cp39-win32.whl", hash = "sha256:63b99b5ea9fe4f21469fb06a16ca5244307678636f11917359e3223aaeca0b67"},
{file = "hiredis-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a50c8af811b35b8a43b1590cf890b61ff2233225257a3cad32f43b3ec7ff1b9f"},
{file = "hiredis-2.3.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e8bf4444b09419b77ce671088db9f875b26720b5872d97778e2545cd87dba4a"},
{file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bd42d0d45ea47a2f96babd82a659fbc60612ab9423a68e4a8191e538b85542a"},
{file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80441b55edbef868e2563842f5030982b04349408396e5ac2b32025fb06b5212"},
{file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec444ab8f27562a363672d6a7372bc0700a1bdc9764563c57c5f9efa0e592b5f"},
{file = "hiredis-2.3.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f9f606e810858207d4b4287b4ef0dc622c2aa469548bf02b59dcc616f134f811"},
{file = "hiredis-2.3.2-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c3dde4ca00fe9eee3b76209711f1941bb86db42b8a75d7f2249ff9dfc026ab0e"},
{file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4dd676107a1d3c724a56a9d9db38166ad4cf44f924ee701414751bd18a784a0"},
{file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce42649e2676ad783186264d5ffc788a7612ecd7f9effb62d51c30d413a3eefe"},
{file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e3f8b1733078ac663dad57e20060e16389a60ab542f18a97931f3a2a2dd64a4"},
{file = "hiredis-2.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:532a84a82156a82529ec401d1c25d677c6543c791e54a263aa139541c363995f"},
{file = "hiredis-2.3.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4d59f88c4daa36b8c38e59ac7bffed6f5d7f68eaccad471484bf587b28ccc478"},
{file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91a14dd95e24dc078204b18b0199226ee44644974c645dc54ee7b00c3157330"},
{file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb777a38797c8c7df0444533119570be18d1a4ce5478dffc00c875684df7bfcb"},
{file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d47c915897a99d0d34a39fad4be97b4b709ab3d0d3b779ebccf2b6024a8c681e"},
{file = "hiredis-2.3.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:333b5e04866758b11bda5f5315b4e671d15755fc6ed3b7969721bc6311d0ee36"},
{file = "hiredis-2.3.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c8937f1100435698c18e4da086968c4b5d70e86ea718376f833475ab3277c9aa"},
{file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa45f7d771094b8145af10db74704ab0f698adb682fbf3721d8090f90e42cc49"},
{file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33d5ebc93c39aed4b5bc769f8ce0819bc50e74bb95d57a35f838f1c4378978e0"},
{file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a797d8c7df9944314d309b0d9e1b354e2fa4430a05bb7604da13b6ad291bf959"},
{file = "hiredis-2.3.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e15a408f71a6c8c87b364f1f15a6cd9c1baca12bbc47a326ac8ab99ec7ad3c64"},
{file = "hiredis-2.3.2.tar.gz", hash = "sha256:733e2456b68f3f126ddaf2cd500a33b25146c3676b97ea843665717bda0c5d43"},
{file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:4b182791c41c5eb1d9ed736f0ff81694b06937ca14b0d4dadde5dadba7ff6dae"},
{file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:13c275b483a052dd645eb2cb60d6380f1f5215e4c22d6207e17b86be6dd87ffa"},
{file = "hiredis-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1018cc7f12824506f165027eabb302735b49e63af73eb4d5450c66c88f47026"},
{file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83a29cc7b21b746cb6a480189e49f49b2072812c445e66a9e38d2004d496b81c"},
{file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e241fab6332e8fb5f14af00a4a9c6aefa22f19a336c069b7ddbf28ef8341e8d6"},
{file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1fb8de899f0145d6c4d5d4bd0ee88a78eb980a7ffabd51e9889251b8f58f1785"},
{file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b23291951959141173eec10f8573538e9349fa27f47a0c34323d1970bf891ee5"},
{file = "hiredis-3.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e421ac9e4b5efc11705a0d5149e641d4defdc07077f748667f359e60dc904420"},
{file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:77c8006c12154c37691b24ff293c077300c22944018c3ff70094a33e10c1d795"},
{file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:41afc0d3c18b59eb50970479a9c0e5544fb4b95e3a79cf2fbaece6ddefb926fe"},
{file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:04ccae6dcd9647eae6025425ab64edb4d79fde8b9e6e115ebfabc6830170e3b2"},
{file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fe91d62b0594db5ea7d23fc2192182b1a7b6973f628a9b8b2e0a42a2be721ac6"},
{file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:99516d99316062824a24d145d694f5b0d030c80da693ea6f8c4ecf71a251d8bb"},
{file = "hiredis-3.0.0-cp310-cp310-win32.whl", hash = "sha256:562eaf820de045eb487afaa37e6293fe7eceb5b25e158b5a1974b7e40bf04543"},
{file = "hiredis-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a1c81c89ed765198da27412aa21478f30d54ef69bf5e4480089d9c3f77b8f882"},
{file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:4664dedcd5933364756d7251a7ea86d60246ccf73a2e00912872dacbfcef8978"},
{file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:47de0bbccf4c8a9f99d82d225f7672b9dd690d8fd872007b933ef51a302c9fa6"},
{file = "hiredis-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e43679eca508ba8240d016d8cca9d27342d70184773c15bea78a23c87a1922f1"},
{file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13c345e7278c210317e77e1934b27b61394fee0dec2e8bd47e71570900f75823"},
{file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00018f22f38530768b73ea86c11f47e8d4df65facd4e562bd78773bd1baef35e"},
{file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ea3a86405baa8eb0d3639ced6926ad03e07113de54cb00fd7510cb0db76a89d"},
{file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c073848d2b1d5561f3903879ccf4e1a70c9b1e7566c7bdcc98d082fa3e7f0a1d"},
{file = "hiredis-3.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a8dffb5f5b3415a4669d25de48b617fd9d44b0bccfc4c2ab24b06406ecc9ecb"},
{file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:22c17c96143c2a62dfd61b13803bc5de2ac526b8768d2141c018b965d0333b66"},
{file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c3ece960008dab66c6b8bb3a1350764677ee7c74ccd6270aaf1b1caf9ccebb46"},
{file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f75999ae00a920f7dce6ecae76fa5e8674a3110e5a75f12c7a2c75ae1af53396"},
{file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e069967cbd5e1900aafc4b5943888f6d34937fc59bf8918a1a546cb729b4b1e4"},
{file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0aacc0a78e1d94d843a6d191f224a35893e6bdfeb77a4a89264155015c65f126"},
{file = "hiredis-3.0.0-cp311-cp311-win32.whl", hash = "sha256:719c32147ba29528cb451f037bf837dcdda4ff3ddb6cdb12c4216b0973174718"},
{file = "hiredis-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:bdc144d56333c52c853c31b4e2e52cfbdb22d3da4374c00f5f3d67c42158970f"},
{file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:484025d2eb8f6348f7876fc5a2ee742f568915039fcb31b478fd5c242bb0fe3a"},
{file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fcdb552ffd97151dab8e7bc3ab556dfa1512556b48a367db94b5c20253a35ee1"},
{file = "hiredis-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bb6f9fd92f147ba11d338ef5c68af4fd2908739c09e51f186e1d90958c68cc1"},
{file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa86bf9a0ed339ec9e8a9a9d0ae4dccd8671625c83f9f9f2640729b15e07fbfd"},
{file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e194a0d5df9456995d8f510eab9f529213e7326af6b94770abf8f8b7952ddcaa"},
{file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a1df39d74ec507d79c7a82c8063eee60bf80537cdeee652f576059b9cdd15c"},
{file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f91456507427ba36fd81b2ca11053a8e112c775325acc74e993201ea912d63e9"},
{file = "hiredis-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9862db92ef67a8a02e0d5370f07d380e14577ecb281b79720e0d7a89aedb9ee5"},
{file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d10fcd9e0eeab835f492832b2a6edb5940e2f1230155f33006a8dfd3bd2c94e4"},
{file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:48727d7d405d03977d01885f317328dc21d639096308de126c2c4e9950cbd3c9"},
{file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e0bb6102ebe2efecf8a3292c6660a0e6fac98176af6de67f020bea1c2343717"},
{file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:df274e3abb4df40f4c7274dd3e587dfbb25691826c948bc98d5fead019dfb001"},
{file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:034925b5fb514f7b11aac38cd55b3fd7e9d3af23bd6497f3f20aa5b8ba58e232"},
{file = "hiredis-3.0.0-cp312-cp312-win32.whl", hash = "sha256:120f2dda469b28d12ccff7c2230225162e174657b49cf4cd119db525414ae281"},
{file = "hiredis-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:e584fe5f4e6681d8762982be055f1534e0170f6308a7a90f58d737bab12ff6a8"},
{file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:122171ff47d96ed8dd4bba6c0e41d8afaba3e8194949f7720431a62aa29d8895"},
{file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ba9fc605ac558f0de67463fb588722878641e6fa1dabcda979e8e69ff581d0bd"},
{file = "hiredis-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a631e2990b8be23178f655cae8ac6c7422af478c420dd54e25f2e26c29e766f1"},
{file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63482db3fadebadc1d01ad33afa6045ebe2ea528eb77ccaabd33ee7d9c2bad48"},
{file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f669212c390eebfbe03c4e20181f5970b82c5d0a0ad1df1785f7ffbe7d61150"},
{file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a49ef161739f8018c69b371528bdb47d7342edfdee9ddc75a4d8caddf45a6e"},
{file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98a152052b8878e5e43a2e3a14075218adafc759547c98668a21e9485882696c"},
{file = "hiredis-3.0.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50a196af0ce657fcde9bf8a0bbe1032e22c64d8fcec2bc926a35e7ff68b3a166"},
{file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f2f312eef8aafc2255e3585dcf94d5da116c43ef837db91db9ecdc1bc930072d"},
{file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:6ca41fa40fa019cde42c21add74aadd775e71458051a15a352eabeb12eb4d084"},
{file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:6eecb343c70629f5af55a8b3e53264e44fa04e155ef7989de13668a0cb102a90"},
{file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:c3fdad75e7837a475900a1d3a5cc09aa024293c3b0605155da2d42f41bc0e482"},
{file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8854969e7480e8d61ed7549eb232d95082a743e94138d98d7222ba4e9f7ecacd"},
{file = "hiredis-3.0.0-cp38-cp38-win32.whl", hash = "sha256:f114a6c86edbf17554672b050cce72abf489fe58d583c7921904d5f1c9691605"},
{file = "hiredis-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:7d99b91e42217d7b4b63354b15b41ce960e27d216783e04c4a350224d55842a4"},
{file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:4c6efcbb5687cf8d2aedcc2c3ed4ac6feae90b8547427d417111194873b66b06"},
{file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5b5cff42a522a0d81c2ae7eae5e56d0ee7365e0c4ad50c4de467d8957aff4414"},
{file = "hiredis-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:82f794d564f4bc76b80c50b03267fe5d6589e93f08e66b7a2f674faa2fa76ebc"},
{file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a4c1791d7aa7e192f60fe028ae409f18ccdd540f8b1e6aeb0df7816c77e4a4"},
{file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2537b2cd98192323fce4244c8edbf11f3cac548a9d633dbbb12b48702f379f4"},
{file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fed69bbaa307040c62195a269f82fc3edf46b510a17abb6b30a15d7dab548df"},
{file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869f6d5537d243080f44253491bb30aa1ec3c21754003b3bddeadedeb65842b0"},
{file = "hiredis-3.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d435ae89073d7cd51e6b6bf78369c412216261c9c01662e7008ff00978153729"},
{file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:204b79b30a0e6be0dc2301a4d385bb61472809f09c49f400497f1cdd5a165c66"},
{file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3ea635101b739c12effd189cc19b2671c268abb03013fd1f6321ca29df3ca625"},
{file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f359175197fd833c8dd7a8c288f1516be45415bb5c939862ab60c2918e1e1943"},
{file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ac6d929cb33dd12ad3424b75725975f0a54b5b12dbff95f2a2d660c510aa106d"},
{file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:100431e04d25a522ef2c3b94f294c4219c4de3bfc7d557b6253296145a144c11"},
{file = "hiredis-3.0.0-cp39-cp39-win32.whl", hash = "sha256:e1a9c14ae9573d172dc050a6f63a644457df5d01ec4d35a6a0f097f812930f83"},
{file = "hiredis-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:54a6dd7b478e6eb01ce15b3bb5bf771e108c6c148315bf194eb2ab776a3cac4d"},
{file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:50da7a9edf371441dfcc56288d790985ee9840d982750580710a9789b8f4a290"},
{file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9b285ef6bf1581310b0d5e8f6ce64f790a1c40e89c660e1320b35f7515433672"},
{file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dcfa684966f25b335072115de2f920228a3c2caf79d4bfa2b30f6e4f674a948"},
{file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a41be8af1fd78ca97bc948d789a09b730d1e7587d07ca53af05758f31f4b985d"},
{file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:038756db735e417ab36ee6fd7725ce412385ed2bd0767e8179a4755ea11b804f"},
{file = "hiredis-3.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:fcecbd39bd42cef905c0b51c9689c39d0cc8b88b1671e7f40d4fb213423aef3a"},
{file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a131377493a59fb0f5eaeb2afd49c6540cafcfba5b0b3752bed707be9e7c4eaf"},
{file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d22c53f0ec5c18ecb3d92aa9420563b1c5d657d53f01356114978107b00b860"},
{file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a91e9520fbc65a799943e5c970ffbcd67905744d8becf2e75f9f0a5e8414f0"},
{file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc8043959b50141df58ab4f398e8ae84c6f9e673a2c9407be65fc789138f4a6"},
{file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51b99cfac514173d7b8abdfe10338193e8a0eccdfe1870b646009d2fb7cbe4b5"},
{file = "hiredis-3.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:fa1fcad89d8a41d8dc10b1e54951ec1e161deabd84ed5a2c95c3c7213bdb3514"},
{file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:898636a06d9bf575d2c594129085ad6b713414038276a4bfc5db7646b8a5be78"},
{file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:466f836dbcf86de3f9692097a7a01533dc9926986022c6617dc364a402b265c5"},
{file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23142a8af92a13fc1e3f2ca1d940df3dcf2af1d176be41fe8d89e30a837a0b60"},
{file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:793c80a3d6b0b0e8196a2d5de37a08330125668c8012922685e17aa9108c33ac"},
{file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:467d28112c7faa29b7db743f40803d927c8591e9da02b6ce3d5fadc170a542a2"},
{file = "hiredis-3.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:dc384874a719c767b50a30750f937af18842ee5e288afba95a5a3ed703b1515a"},
{file = "hiredis-3.0.0.tar.gz", hash = "sha256:fed8581ae26345dea1f1e0d1a96e05041a727a45e7d8d459164583e23c6ac441"},
]
[[package]]
@@ -836,18 +821,21 @@ testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-chec
[[package]]
name = "incremental"
version = "22.10.0"
description = "\"A small library that versions your Python projects.\""
version = "24.7.2"
description = "A small library that versions your Python projects."
optional = false
python-versions = "*"
python-versions = ">=3.8"
files = [
{file = "incremental-22.10.0-py2.py3-none-any.whl", hash = "sha256:b864a1f30885ee72c5ac2835a761b8fe8aa9c28b9395cacf27286602688d3e51"},
{file = "incremental-22.10.0.tar.gz", hash = "sha256:912feeb5e0f7e0188e6f42241d2f450002e11bbc0937c65865045854c24c0bd0"},
{file = "incremental-24.7.2-py3-none-any.whl", hash = "sha256:8cb2c3431530bec48ad70513931a760f446ad6c25e8333ca5d95e24b0ed7b8fe"},
{file = "incremental-24.7.2.tar.gz", hash = "sha256:fb4f1d47ee60efe87d4f6f0ebb5f70b9760db2b2574c59c8e8912be4ebd464c9"},
]
[package.dependencies]
setuptools = ">=61.0"
tomli = {version = "*", markers = "python_version < \"3.11\""}
[package.extras]
mypy = ["click (>=6.0)", "mypy (==0.812)", "twisted (>=16.4.0)"]
scripts = ["click (>=6.0)", "twisted (>=16.4.0)"]
scripts = ["click (>=6.0)"]
[[package]]
name = "isort"
@@ -1528,13 +1516,13 @@ files = [
[[package]]
name = "phonenumbers"
version = "8.13.39"
version = "8.13.42"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
files = [
{file = "phonenumbers-8.13.39-py2.py3-none-any.whl", hash = "sha256:3ad2d086fa71e7eef409001b9195ac54bebb0c6e3e752209b558ca192c9229a0"},
{file = "phonenumbers-8.13.39.tar.gz", hash = "sha256:db7ca4970d206b2056231105300753b1a5b229f43416f8c2b3010e63fbb68d77"},
{file = "phonenumbers-8.13.42-py2.py3-none-any.whl", hash = "sha256:18acc22ee03116d27b26e990f53806a1770a3e05f05e1620bc09ad187f889456"},
{file = "phonenumbers-8.13.42.tar.gz", hash = "sha256:7137904f2db3b991701e853174ce8e1cb8f540b8bfdf27617540de04c0b7bed5"},
]
[[package]]
@@ -2013,17 +2001,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
name = "pyopenssl"
version = "24.1.0"
version = "24.2.1"
description = "Python wrapper module around the OpenSSL library"
optional = false
python-versions = ">=3.7"
files = [
{file = "pyOpenSSL-24.1.0-py3-none-any.whl", hash = "sha256:17ed5be5936449c5418d1cd269a1a9e9081bc54c17aed272b45856a3d3dc86ad"},
{file = "pyOpenSSL-24.1.0.tar.gz", hash = "sha256:cabed4bfaa5df9f1a16c0ef64a0cb65318b5cd077a7eda7d6970131ca2f41a6f"},
{file = "pyOpenSSL-24.2.1-py3-none-any.whl", hash = "sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d"},
{file = "pyopenssl-24.2.1.tar.gz", hash = "sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95"},
]
[package.dependencies]
cryptography = ">=41.0.5,<43"
cryptography = ">=41.0.5,<44"
[package.extras]
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"]
@@ -2373,29 +2361,29 @@ files = [
[[package]]
name = "ruff"
version = "0.5.0"
version = "0.5.5"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.5.0-py3-none-linux_armv6l.whl", hash = "sha256:ee770ea8ab38918f34e7560a597cc0a8c9a193aaa01bfbd879ef43cb06bd9c4c"},
{file = "ruff-0.5.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38f3b8327b3cb43474559d435f5fa65dacf723351c159ed0dc567f7ab735d1b6"},
{file = "ruff-0.5.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7594f8df5404a5c5c8f64b8311169879f6cf42142da644c7e0ba3c3f14130370"},
{file = "ruff-0.5.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:adc7012d6ec85032bc4e9065110df205752d64010bed5f958d25dbee9ce35de3"},
{file = "ruff-0.5.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d505fb93b0fabef974b168d9b27c3960714d2ecda24b6ffa6a87ac432905ea38"},
{file = "ruff-0.5.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dc5cfd3558f14513ed0d5b70ce531e28ea81a8a3b1b07f0f48421a3d9e7d80a"},
{file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:db3ca35265de239a1176d56a464b51557fce41095c37d6c406e658cf80bbb362"},
{file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1a321c4f68809fddd9b282fab6a8d8db796b270fff44722589a8b946925a2a8"},
{file = "ruff-0.5.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c4dfcd8d34b143916994b3876b63d53f56724c03f8c1a33a253b7b1e6bf2a7d"},
{file = "ruff-0.5.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81e5facfc9f4a674c6a78c64d38becfbd5e4f739c31fcd9ce44c849f1fad9e4c"},
{file = "ruff-0.5.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e589e27971c2a3efff3fadafb16e5aef7ff93250f0134ec4b52052b673cf988d"},
{file = "ruff-0.5.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2ffbc3715a52b037bcb0f6ff524a9367f642cdc5817944f6af5479bbb2eb50e"},
{file = "ruff-0.5.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cd096e23c6a4f9c819525a437fa0a99d1c67a1b6bb30948d46f33afbc53596cf"},
{file = "ruff-0.5.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:46e193b36f2255729ad34a49c9a997d506e58f08555366b2108783b3064a0e1e"},
{file = "ruff-0.5.0-py3-none-win32.whl", hash = "sha256:49141d267100f5ceff541b4e06552e98527870eafa1acc9dec9139c9ec5af64c"},
{file = "ruff-0.5.0-py3-none-win_amd64.whl", hash = "sha256:e9118f60091047444c1b90952736ee7b1792910cab56e9b9a9ac20af94cd0440"},
{file = "ruff-0.5.0-py3-none-win_arm64.whl", hash = "sha256:ed5c4df5c1fb4518abcb57725b576659542bdbe93366f4f329e8f398c4b71178"},
{file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"},
{file = "ruff-0.5.5-py3-none-linux_armv6l.whl", hash = "sha256:605d589ec35d1da9213a9d4d7e7a9c761d90bba78fc8790d1c5e65026c1b9eaf"},
{file = "ruff-0.5.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00817603822a3e42b80f7c3298c8269e09f889ee94640cd1fc7f9329788d7bf8"},
{file = "ruff-0.5.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:187a60f555e9f865a2ff2c6984b9afeffa7158ba6e1eab56cb830404c942b0f3"},
{file = "ruff-0.5.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe26fc46fa8c6e0ae3f47ddccfbb136253c831c3289bba044befe68f467bfb16"},
{file = "ruff-0.5.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad25dd9c5faac95c8e9efb13e15803cd8bbf7f4600645a60ffe17c73f60779b"},
{file = "ruff-0.5.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f70737c157d7edf749bcb952d13854e8f745cec695a01bdc6e29c29c288fc36e"},
{file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:cfd7de17cef6ab559e9f5ab859f0d3296393bc78f69030967ca4d87a541b97a0"},
{file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a09b43e02f76ac0145f86a08e045e2ea452066f7ba064fd6b0cdccb486f7c3e7"},
{file = "ruff-0.5.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0b856cb19c60cd40198be5d8d4b556228e3dcd545b4f423d1ad812bfdca5884"},
{file = "ruff-0.5.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3687d002f911e8a5faf977e619a034d159a8373514a587249cc00f211c67a091"},
{file = "ruff-0.5.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ac9dc814e510436e30d0ba535f435a7f3dc97f895f844f5b3f347ec8c228a523"},
{file = "ruff-0.5.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:af9bdf6c389b5add40d89b201425b531e0a5cceb3cfdcc69f04d3d531c6be74f"},
{file = "ruff-0.5.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d40a8533ed545390ef8315b8e25c4bb85739b90bd0f3fe1280a29ae364cc55d8"},
{file = "ruff-0.5.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:cab904683bf9e2ecbbe9ff235bfe056f0eba754d0168ad5407832928d579e7ab"},
{file = "ruff-0.5.5-py3-none-win32.whl", hash = "sha256:696f18463b47a94575db635ebb4c178188645636f05e934fdf361b74edf1bb2d"},
{file = "ruff-0.5.5-py3-none-win_amd64.whl", hash = "sha256:50f36d77f52d4c9c2f1361ccbfbd09099a1b2ea5d2b2222c586ab08885cf3445"},
{file = "ruff-0.5.5-py3-none-win_arm64.whl", hash = "sha256:3191317d967af701f1b73a31ed5788795936e423b7acce82a2b63e26eb3e89d6"},
{file = "ruff-0.5.5.tar.gz", hash = "sha256:cc5516bdb4858d972fbc31d246bdb390eab8df1a26e2353be2dbc0c2d7f5421a"},
]
[[package]]
@@ -2430,13 +2418,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
version = "2.8.0"
version = "2.10.0"
description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
files = [
{file = "sentry_sdk-2.8.0-py2.py3-none-any.whl", hash = "sha256:6051562d2cfa8087bb8b4b8b79dc44690f8a054762a29c07e22588b1f619bfb5"},
{file = "sentry_sdk-2.8.0.tar.gz", hash = "sha256:aa4314f877d9cd9add5a0c9ba18e3f27f99f7de835ce36bd150e48a41c7c646f"},
{file = "sentry_sdk-2.10.0-py2.py3-none-any.whl", hash = "sha256:87b3d413c87d8e7f816cc9334bff255a83d8b577db2b22042651c30c19c09190"},
{file = "sentry_sdk-2.10.0.tar.gz", hash = "sha256:545fcc6e36c335faa6d6cda84669b6e17025f31efbf3b2211ec14efe008b75d1"},
]
[package.dependencies]
@@ -2661,24 +2649,24 @@ files = [
[[package]]
name = "towncrier"
version = "23.11.0"
version = "24.7.1"
description = "Building newsfiles for your project."
optional = false
python-versions = ">=3.8"
files = [
{file = "towncrier-23.11.0-py3-none-any.whl", hash = "sha256:2e519ca619426d189e3c98c99558fe8be50c9ced13ea1fc20a4a353a95d2ded7"},
{file = "towncrier-23.11.0.tar.gz", hash = "sha256:13937c247e3f8ae20ac44d895cf5f96a60ad46cfdcc1671759530d7837d9ee5d"},
{file = "towncrier-24.7.1-py3-none-any.whl", hash = "sha256:685e2a94335b5dc47537b4d3b449a25b18571ea85b07dcf6e8df31ba40f692dd"},
{file = "towncrier-24.7.1.tar.gz", hash = "sha256:57a057faedabcadf1a62f6f9bad726ae566c1f31a411338ddb8316993f583b3d"},
]
[package.dependencies]
click = "*"
importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""}
importlib-resources = {version = ">=5", markers = "python_version < \"3.10\""}
incremental = "*"
jinja2 = "*"
tomli = {version = "*", markers = "python_version < \"3.11\""}
[package.extras]
dev = ["furo", "packaging", "sphinx (>=5)", "twisted"]
dev = ["furo (>=2024.05.06)", "nox", "packaging", "sphinx (>=5)", "twisted"]
[[package]]
name = "treq"
@@ -2726,13 +2714,13 @@ urllib3 = ">=1.26.0"
[[package]]
name = "twisted"
version = "24.3.0"
version = "24.7.0rc1"
description = "An asynchronous networking framework written in Python"
optional = false
python-versions = ">=3.8.0"
files = [
{file = "twisted-24.3.0-py3-none-any.whl", hash = "sha256:039f2e6a49ab5108abd94de187fa92377abe5985c7a72d68d0ad266ba19eae63"},
{file = "twisted-24.3.0.tar.gz", hash = "sha256:6b38b6ece7296b5e122c9eb17da2eeab3d98a198f50ca9efd00fb03e5b4fd4ae"},
{file = "twisted-24.7.0rc1-py3-none-any.whl", hash = "sha256:f37d6656fe4e2871fab29d8952ae90bd6ca8b48a9e4dfa1b348f4cd62e6ba0bb"},
{file = "twisted-24.7.0rc1.tar.gz", hash = "sha256:bbc4a2193ca34cfa32f626300746698a6d70fcd77d9c0b79a664c347e39634fc"},
]
[package.dependencies]
@@ -2741,48 +2729,26 @@ automat = ">=0.8.0"
constantly = ">=15.1"
hyperlink = ">=17.1.1"
idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""}
incremental = ">=22.10.0"
incremental = ">=24.7.0"
pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""}
service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""}
twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""}
typing-extensions = ">=4.2.0"
zope-interface = ">=5"
[package.extras]
all-non-platform = ["twisted[conch,http2,serial,test,tls]", "twisted[conch,http2,serial,test,tls]"]
all-non-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"]
dev = ["coverage (>=6b1,<7)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "twisted[dev-release]", "twistedchecker (>=0.7,<1.0)"]
dev = ["coverage (>=7.5,<8.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "python-subunit (>=1.4,<2.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)"]
dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"]
gtk-platform = ["pygobject", "pygobject", "twisted[all-non-platform]", "twisted[all-non-platform]"]
gtk-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pygobject", "pygobject", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
macos-platform = ["pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "twisted[all-non-platform]", "twisted[all-non-platform]"]
mypy = ["mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"]
osx-platform = ["twisted[macos-platform]", "twisted[macos-platform]"]
macos-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
mypy = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "coverage (>=7.5,<8.0)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "idna (>=2.4)", "mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "priority (>=1.1.0,<2.0)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)", "types-pyopenssl", "types-setuptools"]
osx-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"]
tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"]
windows-platform = ["pywin32 (!=226)", "pywin32 (!=226)", "twisted[all-non-platform]", "twisted[all-non-platform]"]
[[package]]
name = "twisted-iocpsupport"
version = "1.0.2"
description = "An extension for use in the twisted I/O Completion Ports reactor."
optional = false
python-versions = "*"
files = [
{file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"},
{file = "twisted_iocpsupport-1.0.2-cp310-cp310-win32.whl", hash = "sha256:985c06a33f5c0dae92c71a036d1ea63872ee86a21dd9b01e1f287486f15524b4"},
{file = "twisted_iocpsupport-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:81b3abe3527b367da0220482820cb12a16c661672b7bcfcde328902890d63323"},
{file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win32.whl", hash = "sha256:9dbb8823b49f06d4de52721b47de4d3b3026064ef4788ce62b1a21c57c3fff6f"},
{file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b9fed67cf0f951573f06d560ac2f10f2a4bbdc6697770113a2fc396ea2cb2565"},
{file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win32.whl", hash = "sha256:b76b4eed9b27fd63ddb0877efdd2d15835fdcb6baa745cb85b66e5d016ac2878"},
{file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:851b3735ca7e8102e661872390e3bce88f8901bece95c25a0c8bb9ecb8a23d32"},
{file = "twisted_iocpsupport-1.0.2-cp38-cp38-win32.whl", hash = "sha256:bf4133139d77fc706d8f572e6b7d82871d82ec7ef25d685c2351bdacfb701415"},
{file = "twisted_iocpsupport-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:306becd6e22ab6e8e4f36b6bdafd9c92e867c98a5ce517b27fdd27760ee7ae41"},
{file = "twisted_iocpsupport-1.0.2-cp39-cp39-win32.whl", hash = "sha256:3c61742cb0bc6c1ac117a7e5f422c129832f0c295af49e01d8a6066df8cfc04d"},
{file = "twisted_iocpsupport-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:b435857b9efcbfc12f8c326ef0383f26416272260455bbca2cd8d8eca470c546"},
{file = "twisted_iocpsupport-1.0.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:7d972cfa8439bdcb35a7be78b7ef86d73b34b808c74be56dfa785c8a93b851bf"},
]
windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "twisted-iocpsupport (>=1.0.2)", "twisted-iocpsupport (>=1.0.2)"]
[[package]]
name = "txredisapi"
@@ -2909,13 +2875,13 @@ files = [
[[package]]
name = "types-pyopenssl"
version = "24.1.0.20240425"
version = "24.1.0.20240722"
description = "Typing stubs for pyOpenSSL"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-pyOpenSSL-24.1.0.20240425.tar.gz", hash = "sha256:0a7e82626c1983dc8dc59292bf20654a51c3c3881bcbb9b337c1da6e32f0204e"},
{file = "types_pyOpenSSL-24.1.0.20240425-py3-none-any.whl", hash = "sha256:f51a156835555dd2a1f025621e8c4fbe7493470331afeef96884d1d29bf3a473"},
{file = "types-pyOpenSSL-24.1.0.20240722.tar.gz", hash = "sha256:47913b4678a01d879f503a12044468221ed8576263c1540dcb0484ca21b08c39"},
{file = "types_pyOpenSSL-24.1.0.20240722-py3-none-any.whl", hash = "sha256:6a7a5d2ec042537934cfb4c9d4deb0e16c4c6250b09358df1f083682fe6fda54"},
]
[package.dependencies]
@@ -2949,13 +2915,13 @@ urllib3 = ">=2"
[[package]]
name = "types-setuptools"
version = "70.1.0.20240627"
version = "71.1.0.20240726"
description = "Typing stubs for setuptools"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-setuptools-70.1.0.20240627.tar.gz", hash = "sha256:385907a47b5cf302b928ce07953cd91147d5de6f3da604c31905fdf0ec309e83"},
{file = "types_setuptools-70.1.0.20240627-py3-none-any.whl", hash = "sha256:c7bdf05cd0a8b66868b4774c7b3c079d01ae025d8c9562bfc8bf2ff44d263c9c"},
{file = "types-setuptools-71.1.0.20240726.tar.gz", hash = "sha256:85ba28e9461bb1be86ebba4db0f1c2408f2b11115b1966334ea9dc464e29303e"},
{file = "types_setuptools-71.1.0.20240726-py3-none-any.whl", hash = "sha256:a7775376f36e0ff09bcad236bf265777590a66b11623e48c20bfc30f1444ea36"},
]
[[package]]
@@ -3230,4 +3196,4 @@ user-search = ["pyicu"]
[metadata]
lock-version = "2.0"
python-versions = "^3.8.0"
content-hash = "3372a97db99050a34f8eddad2ddf8efe8b7b704b6123df4a3e36ddc171e8f34d"
content-hash = "c165cdc1f6612c9f1b5bfd8063c23e2d595d717dd8ac1a468519e902be2cdf93"

View File

@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.111.0"
version = "1.113.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"
@@ -201,8 +201,8 @@ netaddr = ">=0.7.18"
# add a lower bound to the Jinja2 dependency.
Jinja2 = ">=3.0"
bleach = ">=1.4.3"
# We use `Self`, which were added in `typing-extensions` 4.0.
typing-extensions = ">=4.0"
# We use `assert_never`, which were added in `typing-extensions` 4.1.
typing-extensions = ">=4.1"
# We enforce that we have a `cryptography` version that bundles an `openssl`
# with the latest security patches.
cryptography = ">=3.4.7"
@@ -322,7 +322,7 @@ all = [
# This helps prevents merge conflicts when running a batch of dependabot updates.
isort = ">=5.10.1"
black = ">=22.7.0"
ruff = "0.5.0"
ruff = "0.5.5"
# Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2"

View File

@@ -225,6 +225,11 @@ class EventContentFields:
# This is deprecated in MSC2175.
ROOM_CREATOR: Final = "creator"
# The version of the room for `m.room.create` events.
ROOM_VERSION: Final = "room_version"
ROOM_NAME: Final = "name"
# Used in m.room.guest_access events.
GUEST_ACCESS: Final = "guest_access"
@@ -237,6 +242,9 @@ class EventContentFields:
# an unspecced field added to to-device messages to identify them uniquely-ish
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
# `m.room.encryption`` algorithm field
ENCRYPTION_ALGORITHM: Final = "algorithm"
class EventUnsignedContentFields:
"""Fields found inside the 'unsigned' data on events"""

View File

@@ -128,6 +128,10 @@ class Codes(str, Enum):
# MSC2677
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
# MSC3575 we are telling the client they need to expire their sliding sync
# connection.
UNKNOWN_POS = "M_UNKNOWN_POS"
class CodeMessageException(RuntimeError):
"""An exception with integer code, a message string attributes and optional headers.
@@ -847,3 +851,17 @@ class PartialStateConflictError(SynapseError):
msg=PartialStateConflictError.message(),
errcode=Codes.UNKNOWN,
)
class SlidingSyncUnknownPosition(SynapseError):
"""An error that Synapse can return to signal to the client to expire their
sliding sync connection (i.e. send a new request without a `?since=`
param).
"""
def __init__(self) -> None:
super().__init__(
HTTPStatus.BAD_REQUEST,
msg="Unknown position",
errcode=Codes.UNKNOWN_POS,
)

View File

@@ -384,6 +384,11 @@ class ServerConfig(Config):
# Whether to internally track presence, requires that presence is enabled,
self.track_presence = self.presence_enabled and presence_enabled != "untracked"
# Determines if presence results for offline users are included on initial/full sync
self.presence_include_offline_users_on_sync = presence_config.get(
"include_offline_users_on_sync", False
)
# Custom presence router module
# This is the legacy way of configuring it (the config should now be put in the modules section)
self.presence_router_module_class = None

View File

@@ -554,3 +554,22 @@ def relation_from_event(event: EventBase) -> Optional[_EventRelation]:
aggregation_key = None
return _EventRelation(parent_id, rel_type, aggregation_key)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class StrippedStateEvent:
"""
A stripped down state event. Usually used for remote invite/knocks so the user can
make an informed decision on whether they want to join.
Attributes:
type: Event `type`
state_key: Event `state_key`
sender: Event `sender`
content: Event `content`
"""
type: str
state_key: str
sender: str
content: Dict[str, Any]

View File

@@ -49,7 +49,7 @@ from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.types import JsonDict, Requester
from . import EventBase, make_event_from_dict
from . import EventBase, StrippedStateEvent, make_event_from_dict
if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations
@@ -854,3 +854,30 @@ def strip_event(event: EventBase) -> JsonDict:
"content": event.content,
"sender": event.sender,
}
def parse_stripped_state_event(raw_stripped_event: Any) -> Optional[StrippedStateEvent]:
"""
Given a raw value from an event's `unsigned` field, attempt to parse it into a
`StrippedStateEvent`.
"""
if isinstance(raw_stripped_event, dict):
# All of these fields are required
type = raw_stripped_event.get("type")
state_key = raw_stripped_event.get("state_key")
sender = raw_stripped_event.get("sender")
content = raw_stripped_event.get("content")
if (
isinstance(type, str)
and isinstance(state_key, str)
and isinstance(sender, str)
and isinstance(content, dict)
):
return StrippedStateEvent(
type=type,
state_key=state_key,
sender=sender,
content=content,
)
return None

View File

@@ -20,10 +20,20 @@
#
#
import logging
from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Set, Tuple
from typing import (
TYPE_CHECKING,
AbstractSet,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
)
from synapse.api import errors
from synapse.api.constants import EduTypes, EventTypes
from synapse.api.constants import EduTypes, EventTypes, Membership
from synapse.api.errors import (
Codes,
FederationDeniedError,
@@ -38,7 +48,9 @@ from synapse.metrics.background_process_metrics import (
wrap_as_background_process,
)
from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo
from synapse.storage.databases.main.state_deltas import StateDelta
from synapse.types import (
DeviceListUpdates,
JsonDict,
JsonMapping,
ScheduledTask,
@@ -214,138 +226,192 @@ class DeviceWorkerHandler:
@cancellable
async def get_user_ids_changed(
self, user_id: str, from_token: StreamToken
) -> JsonDict:
) -> DeviceListUpdates:
"""Get list of users that have had the devices updated, or have newly
joined a room, that `user_id` may be interested in.
"""
set_tag("user_id", user_id)
set_tag("from_token", str(from_token))
now_room_key = self.store.get_room_max_token()
room_ids = await self.store.get_rooms_for_user(user_id)
now_token = self._event_sources.get_current_token()
changed = await self.get_device_changes_in_shared_rooms(
user_id, room_ids, from_token
joined_room_ids = await self.store.get_rooms_for_user(user_id)
membership_changes = (
await self.store.get_current_state_delta_membership_changes_for_user(
user_id, from_key=from_token.room_key, to_key=now_token.room_key
)
)
newly_joined_rooms = set()
newly_left_rooms = set()
for change in membership_changes:
if change.membership == Membership.JOIN:
if change.prev_membership != Membership.JOIN:
newly_joined_rooms.add(change.room_id)
elif change.prev_membership == Membership.JOIN:
newly_left_rooms.add(change.room_id)
newly_left_rooms -= newly_joined_rooms
# Then work out if any users have since joined
rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
member_events = await self.store.get_membership_changes_for_user(
user_id, from_token.room_key, now_room_key
rooms_changed = self.store.get_rooms_that_changed(
joined_room_ids, from_token.room_key
)
rooms_changed.update(event.room_id for event in member_events)
stream_ordering = from_token.room_key.stream
possibly_changed = set(changed)
possibly_left = set()
room_to_deltas: Dict[str, List[StateDelta]] = {}
memberships_to_fetch: Set[str] = set()
for room_id in rooms_changed:
# Check if the forward extremities have changed. If not then we know
# the current state won't have changed, and so we can skip this room.
try:
if not await self.store.have_room_forward_extremities_changed_since(
room_id, stream_ordering
):
continue
except errors.StoreError:
pass
current_state_ids = await self._state_storage.get_current_state_ids(
room_id, await_full_state=False
# TODO: Only pull out membership events?
state_changes = await self.store.get_current_state_deltas_for_room(
room_id, from_token=from_token.room_key, to_token=now_token.room_key
)
# The user may have left the room
# TODO: Check if they actually did or if we were just invited.
if room_id not in room_ids:
for etype, state_key in current_state_ids.keys():
if etype != EventTypes.Member:
continue
possibly_left.add(state_key)
continue
# Fetch the current state at the time.
try:
event_ids = await self.store.get_forward_extremities_for_room_at_stream_ordering(
room_id, stream_ordering=stream_ordering
)
except errors.StoreError:
# we have purged the stream_ordering index since the stream
# ordering: treat it the same as a new room
event_ids = []
# special-case for an empty prev state: include all members
# in the changed list
if not event_ids:
log_kv(
{"event": "encountered empty previous state", "room_id": room_id}
)
for etype, state_key in current_state_ids.keys():
if etype != EventTypes.Member:
continue
possibly_changed.add(state_key)
continue
current_member_id = current_state_ids.get((EventTypes.Member, user_id))
if not current_member_id:
continue
# mapping from event_id -> state_dict
prev_state_ids = await self._state_storage.get_state_ids_for_events(
event_ids,
await_full_state=False,
)
# Check if we've joined the room? If so we just blindly add all the users to
# the "possibly changed" users.
for state_dict in prev_state_ids.values():
member_event = state_dict.get((EventTypes.Member, user_id), None)
if not member_event or member_event != current_member_id:
for etype, state_key in current_state_ids.keys():
if etype != EventTypes.Member:
continue
possibly_changed.add(state_key)
break
# If there has been any change in membership, include them in the
# possibly changed list. We'll check if they are joined below,
# and we're not toooo worried about spuriously adding users.
for key, event_id in current_state_ids.items():
etype, state_key = key
if etype != EventTypes.Member:
for delta in state_changes:
if delta.event_type != EventTypes.Member:
continue
# check if this member has changed since any of the extremities
# at the stream_ordering, and add them to the list if so.
for state_dict in prev_state_ids.values():
prev_event_id = state_dict.get(key, None)
if not prev_event_id or prev_event_id != event_id:
if state_key != user_id:
possibly_changed.add(state_key)
break
room_to_deltas.setdefault(room_id, []).append(delta)
if delta.event_id:
memberships_to_fetch.add(delta.event_id)
if delta.prev_event_id:
memberships_to_fetch.add(delta.prev_event_id)
if possibly_changed or possibly_left:
possibly_joined = possibly_changed
possibly_left = possibly_changed | possibly_left
event_id_to_memberships = await self.store.get_membership_from_event_ids(
memberships_to_fetch
)
# Double check if we still share rooms with the given user.
users_rooms = await self.store.get_rooms_for_users(possibly_left)
for changed_user_id, entries in users_rooms.items():
if any(rid in room_ids for rid in entries):
possibly_left.discard(changed_user_id)
else:
possibly_joined.discard(changed_user_id)
joined_invited_knocked = (
Membership.JOIN,
Membership.INVITE,
Membership.KNOCK,
)
else:
possibly_joined = set()
possibly_left = set()
newly_joined_or_invited_or_knocked_users = set()
newly_left_users = set()
for _, deltas in room_to_deltas.items():
for delta in deltas:
new_membership = None
prev_membership = None
result = {"changed": list(possibly_joined), "left": list(possibly_left)}
if delta.event_id:
m = event_id_to_memberships.get(delta.event_id)
if m is not None:
new_membership = m.membership
if delta.prev_event_id:
m = event_id_to_memberships.get(delta.prev_event_id)
if m is not None:
prev_membership = m.membership
log_kv(result)
if new_membership in joined_invited_knocked:
if prev_membership not in joined_invited_knocked:
newly_joined_or_invited_or_knocked_users.add(delta.state_key)
elif prev_membership in joined_invited_knocked:
newly_left_users.add(delta.state_key)
return result
newly_left_users -= newly_joined_or_invited_or_knocked_users
device_list_updates = await self.generate_sync_entry_for_device_list(
user_id=user_id,
since_token=from_token,
now_token=now_token,
joined_room_ids=joined_room_ids,
newly_joined_rooms=newly_joined_rooms,
newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
newly_left_rooms=newly_left_rooms,
newly_left_users=newly_left_users,
)
log_kv(
{
"changed": device_list_updates.changed,
"left": device_list_updates.left,
}
)
return device_list_updates
@measure_func("_generate_sync_entry_for_device_list")
async def generate_sync_entry_for_device_list(
self,
user_id: str,
since_token: StreamToken,
now_token: StreamToken,
joined_room_ids: AbstractSet[str],
newly_joined_rooms: AbstractSet[str],
newly_joined_or_invited_or_knocked_users: AbstractSet[str],
newly_left_rooms: AbstractSet[str],
newly_left_users: AbstractSet[str],
) -> DeviceListUpdates:
"""Generate the DeviceListUpdates section of sync
Args:
sync_result_builder
newly_joined_rooms: Set of rooms user has joined since previous sync
newly_joined_or_invited_or_knocked_users: Set of users that have joined,
been invited to a room or are knocking on a room since
previous sync.
newly_left_rooms: Set of rooms user has left since previous sync
newly_left_users: Set of users that have left a room we're in since
previous sync
"""
# Take a copy since these fields will be mutated later.
newly_joined_or_invited_or_knocked_users = set(
newly_joined_or_invited_or_knocked_users
)
newly_left_users = set(newly_left_users)
# We want to figure out what user IDs the client should refetch
# device keys for, and which users we aren't going to track changes
# for anymore.
#
# For the first step we check:
# a. if any users we share a room with have updated their devices,
# and
# b. we also check if we've joined any new rooms, or if a user has
# joined a room we're in.
#
# For the second step we just find any users we no longer share a
# room with by looking at all users that have left a room plus users
# that were in a room we've left.
users_that_have_changed = set()
# Step 1a, check for changes in devices of users we share a room
# with
users_that_have_changed = await self.get_device_changes_in_shared_rooms(
user_id,
joined_room_ids,
from_token=since_token,
now_token=now_token,
)
# Step 1b, check for newly joined rooms
for room_id in newly_joined_rooms:
joined_users = await self.store.get_users_in_room(room_id)
newly_joined_or_invited_or_knocked_users.update(joined_users)
# TODO: Check that these users are actually new, i.e. either they
# weren't in the previous sync *or* they left and rejoined.
users_that_have_changed.update(newly_joined_or_invited_or_knocked_users)
user_signatures_changed = await self.store.get_users_whose_signatures_changed(
user_id, since_token.device_list_key
)
users_that_have_changed.update(user_signatures_changed)
# Now find users that we no longer track
for room_id in newly_left_rooms:
left_users = await self.store.get_users_in_room(room_id)
newly_left_users.update(left_users)
# Remove any users that we still share a room with.
left_users_rooms = await self.store.get_rooms_for_users(newly_left_users)
for user_id, entries in left_users_rooms.items():
if any(rid in joined_room_ids for rid in entries):
newly_left_users.discard(user_id)
return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users)
async def on_federation_query_user_devices(self, user_id: str) -> JsonDict:
if not self.hs.is_mine(UserID.from_string(user_id)):

View File

@@ -291,13 +291,20 @@ class E2eKeysHandler:
# Only try and fetch keys for destinations that are not marked as
# down.
filtered_destinations = await filter_destinations_by_retry_limiter(
remote_queries_not_in_cache.keys(),
self.clock,
self.store,
# Let's give an arbitrary grace period for those hosts that are
# only recently down
retry_due_within_ms=60 * 1000,
unfiltered_destinations = remote_queries_not_in_cache.keys()
filtered_destinations = set(
await filter_destinations_by_retry_limiter(
unfiltered_destinations,
self.clock,
self.store,
# Let's give an arbitrary grace period for those hosts that are
# only recently down
retry_due_within_ms=60 * 1000,
)
)
failures.update(
(dest, _NOT_READY_FOR_RETRY_FAILURE)
for dest in (unfiltered_destinations - filtered_destinations)
)
await concurrently_execute(
@@ -1641,6 +1648,9 @@ def _check_device_signature(
raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
_NOT_READY_FOR_RETRY_FAILURE = {"status": 503, "message": "Not ready for retry"}
def _exception_to_failure(e: Exception) -> JsonDict:
if isinstance(e, SynapseError):
return {"status": e.code, "errcode": e.errcode, "message": str(e)}
@@ -1649,7 +1659,7 @@ def _exception_to_failure(e: Exception) -> JsonDict:
return {"status": e.code, "message": str(e)}
if isinstance(e, NotRetryingDestination):
return {"status": 503, "message": "Not ready for retry"}
return _NOT_READY_FOR_RETRY_FAILURE
# include ConnectionRefused and other errors
#

View File

@@ -34,7 +34,7 @@ from synapse.api.errors import (
from synapse.logging.opentracing import log_kv, trace
from synapse.storage.databases.main.e2e_room_keys import RoomKey
from synapse.types import JsonDict
from synapse.util.async_helpers import Linearizer
from synapse.util.async_helpers import ReadWriteLock
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -58,7 +58,7 @@ class E2eRoomKeysHandler:
# clients belonging to a user will receive and try to upload a new session at
# roughly the same time. Also used to lock out uploads when the key is being
# changed.
self._upload_linearizer = Linearizer("upload_room_keys_lock")
self._upload_lock = ReadWriteLock()
@trace
async def get_room_keys(
@@ -89,7 +89,7 @@ class E2eRoomKeysHandler:
# we deliberately take the lock to get keys so that changing the version
# works atomically
async with self._upload_linearizer.queue(user_id):
async with self._upload_lock.read(user_id):
# make sure the backup version exists
try:
await self.store.get_e2e_room_keys_version_info(user_id, version)
@@ -132,7 +132,7 @@ class E2eRoomKeysHandler:
"""
# lock for consistency with uploading
async with self._upload_linearizer.queue(user_id):
async with self._upload_lock.write(user_id):
# make sure the backup version exists
try:
version_info = await self.store.get_e2e_room_keys_version_info(
@@ -193,7 +193,7 @@ class E2eRoomKeysHandler:
# TODO: Validate the JSON to make sure it has the right keys.
# XXX: perhaps we should use a finer grained lock here?
async with self._upload_linearizer.queue(user_id):
async with self._upload_lock.write(user_id):
# Check that the version we're trying to upload is the current version
try:
version_info = await self.store.get_e2e_room_keys_version_info(user_id)
@@ -355,7 +355,7 @@ class E2eRoomKeysHandler:
# TODO: Validate the JSON to make sure it has the right keys.
# lock everyone out until we've switched version
async with self._upload_linearizer.queue(user_id):
async with self._upload_lock.write(user_id):
new_version = await self.store.create_e2e_room_keys_version(
user_id, version_info
)
@@ -382,7 +382,7 @@ class E2eRoomKeysHandler:
}
"""
async with self._upload_linearizer.queue(user_id):
async with self._upload_lock.read(user_id):
try:
res = await self.store.get_e2e_room_keys_version_info(user_id, version)
except StoreError as e:
@@ -407,7 +407,7 @@ class E2eRoomKeysHandler:
NotFoundError: if this backup version doesn't exist
"""
async with self._upload_linearizer.queue(user_id):
async with self._upload_lock.write(user_id):
try:
await self.store.delete_e2e_room_keys_version(user_id, version)
except StoreError as e:
@@ -437,7 +437,7 @@ class E2eRoomKeysHandler:
raise SynapseError(
400, "Version in body does not match", Codes.INVALID_PARAM
)
async with self._upload_linearizer.queue(user_id):
async with self._upload_lock.write(user_id):
try:
old_info = await self.store.get_e2e_room_keys_version_info(
user_id, version

View File

@@ -286,8 +286,14 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]):
room_ids: Iterable[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
to_key: Optional[MultiWriterStreamToken] = None,
) -> Tuple[List[JsonMapping], MultiWriterStreamToken]:
to_key = self.get_current_key()
"""
Find read receipts for given rooms (> `from_token` and <= `to_token`)
"""
if to_key is None:
to_key = self.get_current_key()
if from_key == to_key:
return [], to_key

File diff suppressed because it is too large Load Diff

View File

@@ -293,7 +293,9 @@ class StatsHandler:
"history_visibility"
)
elif delta.event_type == EventTypes.RoomEncryption:
room_state["encryption"] = event_content.get("algorithm")
room_state["encryption"] = event_content.get(
EventContentFields.ENCRYPTION_ALGORITHM
)
elif delta.event_type == EventTypes.Name:
room_state["name"] = event_content.get("name")
elif delta.event_type == EventTypes.Topic:

View File

@@ -84,7 +84,7 @@ from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext
from synapse.util.metrics import Measure, measure_func
from synapse.util.metrics import Measure
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
@@ -1750,8 +1750,15 @@ class SyncHandler:
)
if include_device_list_updates:
device_lists = await self._generate_sync_entry_for_device_list(
sync_result_builder,
# include_device_list_updates can only be True if we have a
# since token.
assert since_token is not None
device_lists = await self._device_handler.generate_sync_entry_for_device_list(
user_id=user_id,
since_token=since_token,
now_token=sync_result_builder.now_token,
joined_room_ids=sync_result_builder.joined_room_ids,
newly_joined_rooms=newly_joined_rooms,
newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
newly_left_rooms=newly_left_rooms,
@@ -1863,8 +1870,14 @@ class SyncHandler:
newly_left_users,
) = sync_result_builder.calculate_user_changes()
device_lists = await self._generate_sync_entry_for_device_list(
sync_result_builder,
# include_device_list_updates can only be True if we have a
# since token.
assert since_token is not None
device_lists = await self._device_handler.generate_sync_entry_for_device_list(
user_id=user_id,
since_token=since_token,
now_token=sync_result_builder.now_token,
joined_room_ids=sync_result_builder.joined_room_ids,
newly_joined_rooms=newly_joined_rooms,
newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
newly_left_rooms=newly_left_rooms,
@@ -2041,94 +2054,6 @@ class SyncHandler:
return sync_result_builder
@measure_func("_generate_sync_entry_for_device_list")
async def _generate_sync_entry_for_device_list(
self,
sync_result_builder: "SyncResultBuilder",
newly_joined_rooms: AbstractSet[str],
newly_joined_or_invited_or_knocked_users: AbstractSet[str],
newly_left_rooms: AbstractSet[str],
newly_left_users: AbstractSet[str],
) -> DeviceListUpdates:
"""Generate the DeviceListUpdates section of sync
Args:
sync_result_builder
newly_joined_rooms: Set of rooms user has joined since previous sync
newly_joined_or_invited_or_knocked_users: Set of users that have joined,
been invited to a room or are knocking on a room since
previous sync.
newly_left_rooms: Set of rooms user has left since previous sync
newly_left_users: Set of users that have left a room we're in since
previous sync
"""
user_id = sync_result_builder.sync_config.user.to_string()
since_token = sync_result_builder.since_token
assert since_token is not None
# Take a copy since these fields will be mutated later.
newly_joined_or_invited_or_knocked_users = set(
newly_joined_or_invited_or_knocked_users
)
newly_left_users = set(newly_left_users)
# We want to figure out what user IDs the client should refetch
# device keys for, and which users we aren't going to track changes
# for anymore.
#
# For the first step we check:
# a. if any users we share a room with have updated their devices,
# and
# b. we also check if we've joined any new rooms, or if a user has
# joined a room we're in.
#
# For the second step we just find any users we no longer share a
# room with by looking at all users that have left a room plus users
# that were in a room we've left.
users_that_have_changed = set()
joined_room_ids = sync_result_builder.joined_room_ids
# Step 1a, check for changes in devices of users we share a room
# with
users_that_have_changed = (
await self._device_handler.get_device_changes_in_shared_rooms(
user_id,
joined_room_ids,
from_token=since_token,
now_token=sync_result_builder.now_token,
)
)
# Step 1b, check for newly joined rooms
for room_id in newly_joined_rooms:
joined_users = await self.store.get_users_in_room(room_id)
newly_joined_or_invited_or_knocked_users.update(joined_users)
# TODO: Check that these users are actually new, i.e. either they
# weren't in the previous sync *or* they left and rejoined.
users_that_have_changed.update(newly_joined_or_invited_or_knocked_users)
user_signatures_changed = await self.store.get_users_whose_signatures_changed(
user_id, since_token.device_list_key
)
users_that_have_changed.update(user_signatures_changed)
# Now find users that we no longer track
for room_id in newly_left_rooms:
left_users = await self.store.get_users_in_room(room_id)
newly_left_users.update(left_users)
# Remove any users that we still share a room with.
left_users_rooms = await self.store.get_rooms_for_users(newly_left_users)
for user_id, entries in left_users_rooms.items():
if any(rid in joined_room_ids for rid in entries):
newly_left_users.discard(user_id)
return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users)
@trace
async def _generate_sync_entry_for_to_device(
self, sync_result_builder: "SyncResultBuilder"
@@ -2270,7 +2195,11 @@ class SyncHandler:
user=user,
from_key=presence_key,
is_guest=sync_config.is_guest,
include_offline=include_offline,
include_offline=(
True
if self.hs_config.server.presence_include_offline_users_on_sync
else include_offline
),
)
assert presence_key
sync_result_builder.now_token = now_token.copy_and_replace(

View File

@@ -565,7 +565,12 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]):
room_ids: Iterable[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
to_key: Optional[int] = None,
) -> Tuple[List[JsonMapping], int]:
"""
Find typing notifications for given rooms (> `from_token` and <= `to_token`)
"""
with Measure(self.clock, "typing.get_new_events"):
from_key = int(from_key)
handler = self.get_typing_handler()
@@ -574,7 +579,9 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]):
for room_id in room_ids:
if room_id not in handler._room_serials:
continue
if handler._room_serials[room_id] <= from_key:
if handler._room_serials[room_id] <= from_key or (
to_key is not None and handler._room_serials[room_id] > to_key
):
continue
events.append(self._make_event_for(room_id))

View File

@@ -62,6 +62,15 @@ HOP_BY_HOP_HEADERS = {
"Upgrade",
}
if hasattr(Headers, "_canonicalNameCaps"):
# Twisted < 24.7.0rc1
_canonicalHeaderName = Headers()._canonicalNameCaps # type: ignore[attr-defined]
else:
# Twisted >= 24.7.0rc1
# But note that `_encodeName` still exists on prior versions,
# it just encodes differently
_canonicalHeaderName = Headers()._encodeName
def parse_connection_header_value(
connection_header_value: Optional[bytes],
@@ -85,11 +94,10 @@ def parse_connection_header_value(
The set of header names that should not be copied over from the remote response.
The keys are capitalized in canonical capitalization.
"""
headers = Headers()
extra_headers_to_remove: Set[str] = set()
if connection_header_value:
extra_headers_to_remove = {
headers._canonicalNameCaps(connection_option.strip()).decode("ascii")
_canonicalHeaderName(connection_option.strip()).decode("ascii")
for connection_option in connection_header_value.split(b",")
}

View File

@@ -74,6 +74,7 @@ from synapse.api.errors import (
from synapse.config.homeserver import HomeServerConfig
from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background
from synapse.logging.opentracing import active_span, start_active_span, trace_servlet
from synapse.types import ISynapseReactor
from synapse.util import json_encoder
from synapse.util.caches import intern_dict
from synapse.util.cancellation import is_function_cancellable
@@ -868,7 +869,8 @@ async def _async_write_json_to_request_in_thread(
with start_active_span("encode_json_response"):
span = active_span()
json_str = await defer_to_thread(request.reactor, encode, span)
reactor: ISynapseReactor = request.reactor # type: ignore
json_str = await defer_to_thread(reactor, encode, span)
_write_bytes_to_request(request, json_str)

View File

@@ -683,7 +683,7 @@ class SynapseSite(ProxySite):
self.access_logger = logging.getLogger(logger_name)
self.server_version_string = server_version_string.encode("ascii")
def log(self, request: SynapseRequest) -> None:
def log(self, request: SynapseRequest) -> None: # type: ignore[override]
pass

View File

@@ -773,6 +773,7 @@ class Notifier:
stream_token = await self.event_sources.bound_future_token(stream_token)
start = self.clock.time_msec()
logged = False
while True:
current_token = self.event_sources.get_current_token()
if stream_token.is_before_or_eq(current_token):
@@ -783,11 +784,13 @@ class Notifier:
if now - start > 10_000:
return False
logger.info(
"Waiting for current token to reach %s; currently at %s",
stream_token,
current_token,
)
if not logged:
logger.info(
"Waiting for current token to reach %s; currently at %s",
stream_token,
current_token,
)
logged = True
# TODO: be better
await self.clock.sleep(0.5)

View File

@@ -256,9 +256,15 @@ class KeyChangesServlet(RestServlet):
user_id = requester.user.to_string()
results = await self.device_handler.get_user_ids_changed(user_id, from_token)
device_list_updates = await self.device_handler.get_user_ids_changed(
user_id, from_token
)
return 200, results
response: JsonDict = {}
response["changed"] = list(device_list_updates.changed)
response["left"] = list(device_list_updates.left)
return 200, response
class OneTimeKeyServlet(RestServlet):

View File

@@ -52,9 +52,9 @@ from synapse.http.servlet import (
parse_string,
)
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import trace_with_opname
from synapse.logging.opentracing import log_kv, set_tag, trace_with_opname
from synapse.rest.admin.experimental_features import ExperimentalFeature
from synapse.types import JsonDict, Requester, StreamToken
from synapse.types import JsonDict, Requester, SlidingSyncStreamToken, StreamToken
from synapse.types.rest.client import SlidingSyncBody
from synapse.util import json_decoder
from synapse.util.caches.lrucache import LruCache
@@ -881,7 +881,6 @@ class SlidingSyncRestServlet(RestServlet):
)
user = requester.user
device_id = requester.device_id
timeout = parse_integer(request, "timeout", default=0)
# Position in the stream
@@ -889,22 +888,50 @@ class SlidingSyncRestServlet(RestServlet):
from_token = None
if from_token_string is not None:
from_token = await StreamToken.from_string(self.store, from_token_string)
from_token = await SlidingSyncStreamToken.from_string(
self.store, from_token_string
)
# TODO: We currently don't know whether we're going to use sticky params or
# maybe some filters like sync v2 where they are built up once and referenced
# by filter ID. For now, we will just prototype with always passing everything
# in.
body = parse_and_validate_json_object_from_request(request, SlidingSyncBody)
logger.info("Sliding sync request: %r", body)
# Tag and log useful data to differentiate requests.
set_tag(
"sliding_sync.sync_type", "initial" if from_token is None else "incremental"
)
set_tag("sliding_sync.conn_id", body.conn_id or "")
log_kv(
{
"sliding_sync.lists": {
list_name: {
"ranges": list_config.ranges,
"timeline_limit": list_config.timeline_limit,
}
for list_name, list_config in (body.lists or {}).items()
},
"sliding_sync.room_subscriptions": list(
(body.room_subscriptions or {}).keys()
),
# We also include the number of room subscriptions because logs are
# limited to 1024 characters and the large room ID list above can be cut
# off.
"sliding_sync.num_room_subscriptions": len(
(body.room_subscriptions or {}).keys()
),
}
)
sync_config = SlidingSyncConfig(
user=user,
device_id=device_id,
requester=requester,
# FIXME: Currently, we're just manually copying the fields from the
# `SlidingSyncBody` into the config. How can we gurantee into the future
# `SlidingSyncBody` into the config. How can we guarantee into the future
# that we don't forget any? I would like something more structured like
# `copy_attributes(from=body, to=config)`
conn_id=body.conn_id,
lists=body.lists,
room_subscriptions=body.room_subscriptions,
extensions=body.extensions,
@@ -927,7 +954,6 @@ class SlidingSyncRestServlet(RestServlet):
return 200, response_content
# TODO: Is there a better way to encode things?
async def encode_response(
self,
requester: Requester,
@@ -1081,15 +1107,69 @@ class SlidingSyncRestServlet(RestServlet):
async def encode_extensions(
self, requester: Requester, extensions: SlidingSyncResult.Extensions
) -> JsonDict:
result = {}
serialized_extensions: JsonDict = {}
if extensions.to_device is not None:
result["to_device"] = {
serialized_extensions["to_device"] = {
"next_batch": extensions.to_device.next_batch,
"events": extensions.to_device.events,
}
return result
if extensions.e2ee is not None:
serialized_extensions["e2ee"] = {
# We always include this because
# https://github.com/vector-im/element-android/issues/3725. The spec
# isn't terribly clear on when this can be omitted and how a client
# would tell the difference between "no keys present" and "nothing
# changed" in terms of whole field absent / individual key type entry
# absent Corresponding synapse issue:
# https://github.com/matrix-org/synapse/issues/10456
"device_one_time_keys_count": extensions.e2ee.device_one_time_keys_count,
# https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md
# states that this field should always be included, as long as the
# server supports the feature.
"device_unused_fallback_key_types": extensions.e2ee.device_unused_fallback_key_types,
}
if extensions.e2ee.device_list_updates is not None:
serialized_extensions["e2ee"]["device_lists"] = {}
serialized_extensions["e2ee"]["device_lists"]["changed"] = list(
extensions.e2ee.device_list_updates.changed
)
serialized_extensions["e2ee"]["device_lists"]["left"] = list(
extensions.e2ee.device_list_updates.left
)
if extensions.account_data is not None:
serialized_extensions["account_data"] = {
# Same as the the top-level `account_data.events` field in Sync v2.
"global": [
{"type": account_data_type, "content": content}
for account_data_type, content in extensions.account_data.global_account_data_map.items()
],
# Same as the joined room's account_data field in Sync v2, e.g the path
# `rooms.join["!foo:bar"].account_data.events`.
"rooms": {
room_id: [
{"type": account_data_type, "content": content}
for account_data_type, content in event_map.items()
]
for room_id, event_map in extensions.account_data.account_data_by_room_map.items()
},
}
if extensions.receipts is not None:
serialized_extensions["receipts"] = {
"rooms": extensions.receipts.room_id_to_receipt_map,
}
if extensions.typing is not None:
serialized_extensions["typing"] = {
"rooms": extensions.typing.room_id_to_typing_map,
}
return serialized_extensions
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:

View File

@@ -559,6 +559,7 @@ class HomeServer(metaclass=abc.ABCMeta):
def get_sync_handler(self) -> SyncHandler:
return SyncHandler(self)
@cache_in_self
def get_sliding_sync_handler(self) -> SlidingSyncHandler:
return SlidingSyncHandler(self)

View File

@@ -127,6 +127,8 @@ class SQLBaseStore(metaclass=ABCMeta):
# Purge other caches based on room state.
self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,))
self._attempt_to_invalidate_cache("get_room_type", (room_id,))
self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
def _invalidate_state_caches_all(self, room_id: str) -> None:
"""Invalidates caches that are based on the current state, but does
@@ -153,6 +155,8 @@ class SQLBaseStore(metaclass=ABCMeta):
"_get_rooms_for_local_user_where_membership_is_inner", None
)
self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
self._attempt_to_invalidate_cache("get_room_type", (room_id,))
self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
def _attempt_to_invalidate_cache(
self, cache_name: str, key: Optional[Collection[Any]]

View File

@@ -268,13 +268,23 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined]
if data.type == EventTypes.Member:
self.get_rooms_for_user.invalidate((data.state_key,)) # type: ignore[attr-defined]
self._attempt_to_invalidate_cache(
"get_rooms_for_user", (data.state_key,)
)
elif data.type == EventTypes.RoomEncryption:
self._attempt_to_invalidate_cache(
"get_room_encryption", (data.room_id,)
)
elif data.type == EventTypes.Create:
self._attempt_to_invalidate_cache("get_room_type", (data.room_id,))
elif row.type == EventsStreamAllStateRow.TypeId:
assert isinstance(data, EventsStreamAllStateRow)
# Similar to the above, but the entire caches are invalidated. This is
# unfortunate for the membership caches, but should recover quickly.
self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined]
self.get_rooms_for_user.invalidate_all() # type: ignore[attr-defined]
self._attempt_to_invalidate_cache("get_rooms_for_user", None)
self._attempt_to_invalidate_cache("get_room_type", (data.room_id,))
self._attempt_to_invalidate_cache("get_room_encryption", (data.room_id,))
else:
raise Exception("Unknown events stream row type %s" % (row.type,))
@@ -345,6 +355,10 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._attempt_to_invalidate_cache(
"get_forgotten_rooms_for_user", (state_key,)
)
elif etype == EventTypes.Create:
self._attempt_to_invalidate_cache("get_room_type", (room_id,))
elif etype == EventTypes.RoomEncryption:
self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
if relates_to:
self._attempt_to_invalidate_cache(
@@ -405,6 +419,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._attempt_to_invalidate_cache("get_thread_summary", None)
self._attempt_to_invalidate_cache("get_thread_participated", None)
self._attempt_to_invalidate_cache("get_threads", (room_id,))
self._attempt_to_invalidate_cache("get_room_type", (room_id,))
self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
self._attempt_to_invalidate_cache("_get_state_group_for_event", None)
@@ -457,6 +473,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None)
self._attempt_to_invalidate_cache("_get_membership_from_event_id", None)
self._attempt_to_invalidate_cache("get_room_version_id", (room_id,))
self._attempt_to_invalidate_cache("get_room_type", (room_id,))
self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
# And delete state caches.

View File

@@ -1313,6 +1313,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
# We want to make the cache more effective, so we clamp to the last
# change before the given ordering.
last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id) # type: ignore[attr-defined]
if last_change is None:
# If the room isn't in the cache we know that the last change was
# somewhere before the earliest known position of the cache, so we
# can clamp to that.
last_change = self._events_stream_cache.get_earliest_known_position() # type: ignore[attr-defined]
# We don't always have a full stream_to_exterm_id table, e.g. after
# the upgrade that introduced it, so we make sure we never ask for a

View File

@@ -39,6 +39,7 @@ from typing import (
import attr
from synapse.api.constants import EventTypes, Membership
from synapse.logging.opentracing import trace
from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
@@ -422,6 +423,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
return invite
return None
@trace
async def get_rooms_for_local_user_where_membership_is(
self,
user_id: str,

View File

@@ -30,6 +30,7 @@ from typing import (
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Set,
Tuple,
@@ -72,10 +73,18 @@ logger = logging.getLogger(__name__)
_T = TypeVar("_T")
MAX_STATE_DELTA_HOPS = 100
# Freeze so it's immutable and we can use it as a cache value
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Sentinel:
pass
ROOM_UNKNOWN_SENTINEL = Sentinel()
@attr.s(slots=True, frozen=True, auto_attribs=True)
class EventMetadata:
"""Returned by `get_metadata_for_events`"""
@@ -300,51 +309,189 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
@cached(max_entries=10000)
async def get_room_type(self, room_id: str) -> Optional[str]:
"""Get the room type for a given room. The server must be joined to the
given room.
"""
row = await self.db_pool.simple_select_one(
table="room_stats_state",
keyvalues={"room_id": room_id},
retcols=("room_type",),
allow_none=True,
desc="get_room_type",
)
if row is not None:
return row[0]
# If we haven't updated `room_stats_state` with the room yet, query the
# create event directly.
create_event = await self.get_create_event_for_room(room_id)
room_type = create_event.content.get(EventContentFields.ROOM_TYPE)
return room_type
raise NotImplementedError()
@cachedList(cached_method_name="get_room_type", list_name="room_ids")
async def bulk_get_room_type(
self, room_ids: Set[str]
) -> Mapping[str, Optional[str]]:
"""Bulk fetch room types for the given rooms, the server must be in all
the rooms given.
) -> Mapping[str, Union[Optional[str], Sentinel]]:
"""
Bulk fetch room types for the given rooms (via current state).
Since this function is cached, any missing values would be cached as `None`. In
order to distinguish between an unencrypted room that has `None` encryption and
a room that is unknown to the server where we might want to omit the value
(which would make it cached as `None`), instead we use the sentinel value
`ROOM_UNKNOWN_SENTINEL`.
Returns:
A mapping from room ID to the room's type (`None` is a valid room type).
Rooms unknown to this server will return `ROOM_UNKNOWN_SENTINEL`.
"""
rows = await self.db_pool.simple_select_many_batch(
table="room_stats_state",
column="room_id",
iterable=room_ids,
retcols=("room_id", "room_type"),
desc="bulk_get_room_type",
def txn(
txn: LoggingTransaction,
) -> MutableMapping[str, Union[Optional[str], Sentinel]]:
clause, args = make_in_list_sql_clause(
txn.database_engine, "room_id", room_ids
)
# We can't rely on `room_stats_state.room_type` if the server has left the
# room because the `room_id` will still be in the table but everything will
# be set to `None` but `None` is a valid room type value. We join against
# the `room_stats_current` table which keeps track of the
# `current_state_events` count (and a proxy value `local_users_in_room`
# which can used to assume the server is participating in the room and has
# current state) to ensure that the data in `room_stats_state` is up-to-date
# with the current state.
#
# FIXME: Use `room_stats_current.current_state_events` instead of
# `room_stats_current.local_users_in_room` once
# https://github.com/element-hq/synapse/issues/17457 is fixed.
sql = f"""
SELECT room_id, room_type
FROM room_stats_state
INNER JOIN room_stats_current USING (room_id)
WHERE
{clause}
AND local_users_in_room > 0
"""
txn.execute(sql, args)
room_id_to_type_map = {}
for row in txn:
room_id_to_type_map[row[0]] = row[1]
return room_id_to_type_map
results = await self.db_pool.runInteraction(
"bulk_get_room_type",
txn,
)
# If we haven't updated `room_stats_state` with the room yet, query the
# create events directly. This should happen only rarely so we don't
# mind if we do this in a loop.
results = dict(rows)
for room_id in room_ids - results.keys():
create_event = await self.get_create_event_for_room(room_id)
room_type = create_event.content.get(EventContentFields.ROOM_TYPE)
results[room_id] = room_type
try:
create_event = await self.get_create_event_for_room(room_id)
room_type = create_event.content.get(EventContentFields.ROOM_TYPE)
results[room_id] = room_type
except NotFoundError:
# We use the sentinel value to distinguish between `None` which is a
# valid room type and a room that is unknown to the server so the value
# is just unset.
results[room_id] = ROOM_UNKNOWN_SENTINEL
return results
@cached(max_entries=10000)
async def get_room_encryption(self, room_id: str) -> Optional[str]:
raise NotImplementedError()
@cachedList(cached_method_name="get_room_encryption", list_name="room_ids")
async def bulk_get_room_encryption(
self, room_ids: Set[str]
) -> Mapping[str, Union[Optional[str], Sentinel]]:
"""
Bulk fetch room encryption for the given rooms (via current state).
Since this function is cached, any missing values would be cached as `None`. In
order to distinguish between an unencrypted room that has `None` encryption and
a room that is unknown to the server where we might want to omit the value
(which would make it cached as `None`), instead we use the sentinel value
`ROOM_UNKNOWN_SENTINEL`.
Returns:
A mapping from room ID to the room's encryption algorithm if the room is
encrypted, otherwise `None`. Rooms unknown to this server will return
`ROOM_UNKNOWN_SENTINEL`.
"""
def txn(
txn: LoggingTransaction,
) -> MutableMapping[str, Union[Optional[str], Sentinel]]:
clause, args = make_in_list_sql_clause(
txn.database_engine, "room_id", room_ids
)
# We can't rely on `room_stats_state.encryption` if the server has left the
# room because the `room_id` will still be in the table but everything will
# be set to `None` but `None` is a valid encryption value. We join against
# the `room_stats_current` table which keeps track of the
# `current_state_events` count (and a proxy value `local_users_in_room`
# which can used to assume the server is participating in the room and has
# current state) to ensure that the data in `room_stats_state` is up-to-date
# with the current state.
#
# FIXME: Use `room_stats_current.current_state_events` instead of
# `room_stats_current.local_users_in_room` once
# https://github.com/element-hq/synapse/issues/17457 is fixed.
sql = f"""
SELECT room_id, encryption
FROM room_stats_state
INNER JOIN room_stats_current USING (room_id)
WHERE
{clause}
AND local_users_in_room > 0
"""
txn.execute(sql, args)
room_id_to_encryption_map = {}
for row in txn:
room_id_to_encryption_map[row[0]] = row[1]
return room_id_to_encryption_map
results = await self.db_pool.runInteraction(
"bulk_get_room_encryption",
txn,
)
# If we haven't updated `room_stats_state` with the room yet, query the state
# directly. This should happen only rarely so we don't mind if we do this in a
# loop.
encryption_event_ids: List[str] = []
for room_id in room_ids - results.keys():
state_map = await self.get_partial_filtered_current_state_ids(
room_id,
state_filter=StateFilter.from_types(
[
(EventTypes.Create, ""),
(EventTypes.RoomEncryption, ""),
]
),
)
# We can use the create event as a canary to tell whether the server has
# seen the room before
create_event_id = state_map.get((EventTypes.Create, ""))
encryption_event_id = state_map.get((EventTypes.RoomEncryption, ""))
if create_event_id is None:
# We use the sentinel value to distinguish between `None` which is a
# valid room type and a room that is unknown to the server so the value
# is just unset.
results[room_id] = ROOM_UNKNOWN_SENTINEL
continue
if encryption_event_id is None:
results[room_id] = None
else:
encryption_event_ids.append(encryption_event_id)
encryption_event_map = await self.get_events(encryption_event_ids)
for encryption_event_id in encryption_event_ids:
encryption_event = encryption_event_map.get(encryption_event_id)
# If the curent state says there is an encryption event, we should have it
# in the database.
assert encryption_event is not None
results[encryption_event.room_id] = encryption_event.content.get(
EventContentFields.ENCRYPTION_ALGORITHM
)
return results

View File

@@ -24,8 +24,11 @@ from typing import List, Optional, Tuple
import attr
from synapse.logging.opentracing import trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import LoggingTransaction
from synapse.storage.databases.main.stream import _filter_results_by_stream
from synapse.types import RoomStreamToken
from synapse.util.caches.stream_change_cache import StreamChangeCache
logger = logging.getLogger(__name__)
@@ -156,3 +159,44 @@ class StateDeltasStore(SQLBaseStore):
"get_max_stream_id_in_current_state_deltas",
self._get_max_stream_id_in_current_state_deltas_txn,
)
@trace
async def get_current_state_deltas_for_room(
self, room_id: str, from_token: RoomStreamToken, to_token: RoomStreamToken
) -> List[StateDelta]:
"""Get the state deltas between two tokens."""
if not self._curr_state_delta_stream_cache.has_entity_changed(
room_id, from_token.stream
):
return []
def get_current_state_deltas_for_room_txn(
txn: LoggingTransaction,
) -> List[StateDelta]:
sql = """
SELECT instance_name, stream_id, type, state_key, event_id, prev_event_id
FROM current_state_delta_stream
WHERE room_id = ? AND ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC
"""
txn.execute(
sql, (room_id, from_token.stream, to_token.get_max_stream_pos())
)
return [
StateDelta(
stream_id=row[1],
room_id=room_id,
event_type=row[2],
state_key=row[3],
event_id=row[4],
prev_event_id=row[5],
)
for row in txn
if _filter_results_by_stream(from_token, to_token, row[0], row[1])
]
return await self.db_pool.runInteraction(
"get_current_state_deltas_for_room", get_current_state_deltas_for_room_txn
)

View File

@@ -67,7 +67,7 @@ from synapse.api.constants import Direction, EventTypes, Membership
from synapse.api.filtering import Filter
from synapse.events import EventBase
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import trace
from synapse.logging.opentracing import tag_args, trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
@@ -78,10 +78,11 @@ from synapse.storage.database import (
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.types import PersistedEventPosition, RoomStreamToken
from synapse.types import PersistedEventPosition, RoomStreamToken, StrCollection
from synapse.util.caches.descriptors import cached
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -811,6 +812,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
return ret, key
@trace
async def get_current_state_delta_membership_changes_for_user(
self,
user_id: str,
@@ -1185,6 +1187,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
return None
@trace
async def get_last_event_pos_in_room_before_stream_ordering(
self,
room_id: str,
@@ -1293,6 +1296,126 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
get_last_event_pos_in_room_before_stream_ordering_txn,
)
async def bulk_get_last_event_pos_in_room_before_stream_ordering(
self,
room_ids: StrCollection,
end_token: RoomStreamToken,
) -> Dict[str, int]:
"""Bulk fetch the stream position of the latest events in the given
rooms
"""
min_token = end_token.stream
max_token = end_token.get_max_stream_pos()
results: Dict[str, int] = {}
# First, we check for the rooms in the stream change cache to see if we
# can just use the latest position from it.
missing_room_ids: Set[str] = set()
for room_id in room_ids:
stream_pos = self._events_stream_cache.get_max_pos_of_last_change(room_id)
if stream_pos and stream_pos <= min_token:
results[room_id] = stream_pos
else:
missing_room_ids.add(room_id)
# Next, we query the stream position from the DB. At first we fetch all
# positions less than the *max* stream pos in the token, then filter
# them down. We do this as a) this is a cheaper query, and b) the vast
# majority of rooms will have a latest token from before the min stream
# pos.
def bulk_get_last_event_pos_txn(
txn: LoggingTransaction, batch_room_ids: StrCollection
) -> Dict[str, int]:
# This query fetches the latest stream position in the rooms before
# the given max position.
clause, args = make_in_list_sql_clause(
self.database_engine, "room_id", batch_room_ids
)
sql = f"""
SELECT room_id, (
SELECT stream_ordering FROM events AS e
LEFT JOIN rejections USING (event_id)
WHERE e.room_id = r.room_id
AND stream_ordering <= ?
AND NOT outlier
AND rejection_reason IS NULL
ORDER BY stream_ordering DESC
LIMIT 1
)
FROM rooms AS r
WHERE {clause}
"""
txn.execute(sql, [max_token] + args)
return {row[0]: row[1] for row in txn}
recheck_rooms: Set[str] = set()
for batched in batch_iter(missing_room_ids, 1000):
result = await self.db_pool.runInteraction(
"bulk_get_last_event_pos_in_room_before_stream_ordering",
bulk_get_last_event_pos_txn,
batched,
)
# Check that the stream position for the rooms are from before the
# minimum position of the token. If not then we need to fetch more
# rows.
for room_id, stream in result.items():
if stream <= min_token:
results[room_id] = stream
else:
recheck_rooms.add(room_id)
if not recheck_rooms:
return results
# For the remaining rooms we need to fetch all rows between the min and
# max stream positions in the end token, and filter out the rows that
# are after the end token.
#
# This query should be fast as the range between the min and max should
# be small.
def bulk_get_last_event_pos_recheck_txn(
txn: LoggingTransaction, batch_room_ids: StrCollection
) -> Dict[str, int]:
clause, args = make_in_list_sql_clause(
self.database_engine, "room_id", batch_room_ids
)
sql = f"""
SELECT room_id, instance_name, stream_ordering
FROM events
WHERE ? < stream_ordering AND stream_ordering <= ?
AND NOT outlier
AND rejection_reason IS NULL
AND {clause}
ORDER BY stream_ordering ASC
"""
txn.execute(sql, [min_token, max_token] + args)
# We take the max stream ordering that is less than the token. Since
# we ordered by stream ordering we just need to iterate through and
# take the last matching stream ordering.
txn_results: Dict[str, int] = {}
for row in txn:
room_id = row[0]
event_pos = PersistedEventPosition(row[1], row[2])
if not event_pos.persisted_after(end_token):
txn_results[room_id] = event_pos.stream
return txn_results
for batched in batch_iter(recheck_rooms, 1000):
recheck_result = await self.db_pool.runInteraction(
"bulk_get_last_event_pos_in_room_before_stream_ordering_recheck",
bulk_get_last_event_pos_recheck_txn,
batched,
)
results.update(recheck_result)
return results
async def get_current_room_stream_token_for_room_id(
self, room_id: str
) -> RoomStreamToken:
@@ -1819,6 +1942,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
return rows, next_token
@trace
@tag_args
async def paginate_room_events(
self,
room_id: str,
@@ -1983,3 +2107,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
return RoomStreamToken(stream=last_position.stream - 1)
return None
@trace
def get_rooms_that_might_have_updates(
self, room_ids: StrCollection, from_token: RoomStreamToken
) -> StrCollection:
"""Filters given room IDs down to those that might have updates, i.e.
removes rooms that definitely do not have updates.
"""
return self._events_stream_cache.get_entities_changed(
room_ids, from_token.stream
)

View File

@@ -777,6 +777,13 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
return super().bound_stream_token(max_stream)
def __str__(self) -> str:
instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items()))
return (
f"RoomStreamToken(stream: {self.stream}, topological: {self.topological}, "
f"instances: {{{instances}}})"
)
@attr.s(frozen=True, slots=True, order=False)
class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
@@ -873,6 +880,13 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
return True
def __str__(self) -> str:
instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items()))
return (
f"MultiWriterStreamToken(stream: {self.stream}, "
f"instances: {{{instances}}})"
)
class StreamKeyType(Enum):
"""Known stream types.
@@ -1131,12 +1145,64 @@ class StreamToken:
return True
def __str__(self) -> str:
return (
f"StreamToken(room: {self.room_key}, presence: {self.presence_key}, "
f"typing: {self.typing_key}, receipt: {self.receipt_key}, "
f"account_data: {self.account_data_key}, push_rules: {self.push_rules_key}, "
f"to_device: {self.to_device_key}, device_list: {self.device_list_key}, "
f"groups: {self.groups_key}, un_partial_stated_rooms: {self.un_partial_stated_rooms_key})"
)
StreamToken.START = StreamToken(
RoomStreamToken(stream=0), 0, 0, MultiWriterStreamToken(stream=0), 0, 0, 0, 0, 0, 0
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class SlidingSyncStreamToken:
"""The same as a `StreamToken`, but includes an extra field at the start for
the sliding sync connection token (separated by a '/'). This is used to
store per-connection state.
This then looks something like:
5/s2633508_17_338_6732159_1082514_541479_274711_265584_1_379
Attributes:
stream_token: Token representing the position of all the standard
streams.
connection_position: Token used by sliding sync to track updates to any
per-connection state stored by Synapse.
"""
stream_token: StreamToken
connection_position: int
@staticmethod
@cancellable
async def from_string(store: "DataStore", string: str) -> "SlidingSyncStreamToken":
"""Creates a SlidingSyncStreamToken from its textual representation."""
try:
connection_position_str, stream_token_str = string.split("/", 1)
connection_position = int(connection_position_str)
stream_token = await StreamToken.from_string(store, stream_token_str)
return SlidingSyncStreamToken(
stream_token=stream_token,
connection_position=connection_position,
)
except CancelledError:
raise
except Exception:
raise SynapseError(400, "Invalid stream token")
async def to_string(self, store: "DataStore") -> str:
"""Serializes the token to a string"""
stream_token_str = await self.stream_token.to_string(store)
return f"{self.connection_position}/{stream_token_str}"
@attr.s(slots=True, frozen=True, auto_attribs=True)
class PersistedPosition:
"""Position of a newly persisted row with instance that persisted it."""
@@ -1219,11 +1285,12 @@ class ReadReceipt:
@attr.s(slots=True, frozen=True, auto_attribs=True)
class DeviceListUpdates:
"""
An object containing a diff of information regarding other users' device lists, intended for
a recipient to carry out device list tracking.
An object containing a diff of information regarding other users' device lists,
intended for a recipient to carry out device list tracking.
Attributes:
changed: A set of users whose device lists have changed recently.
changed: A set of users who have updated their device identity or
cross-signing keys, or who now share an encrypted room with.
left: A set of users who the recipient no longer needs to track the device lists of.
Typically when those users no longer share any end-to-end encryption enabled rooms.
"""

View File

@@ -18,7 +18,7 @@
#
#
from enum import Enum
from typing import TYPE_CHECKING, Dict, Final, List, Optional, Sequence, Tuple
from typing import TYPE_CHECKING, Dict, Final, List, Mapping, Optional, Sequence, Tuple
import attr
from typing_extensions import TypedDict
@@ -31,7 +31,15 @@ else:
from pydantic import Extra
from synapse.events import EventBase
from synapse.types import JsonDict, JsonMapping, StreamToken, UserID
from synapse.types import (
DeviceListUpdates,
JsonDict,
JsonMapping,
Requester,
SlidingSyncStreamToken,
StreamToken,
UserID,
)
from synapse.types.rest.client import SlidingSyncBody
if TYPE_CHECKING:
@@ -102,7 +110,7 @@ class SlidingSyncConfig(SlidingSyncBody):
"""
user: UserID
device_id: Optional[str]
requester: Requester
# Pydantic config
class Config:
@@ -144,7 +152,7 @@ class SlidingSyncResult:
Attributes:
next_pos: The next position token in the sliding window to request (next_batch).
lists: Sliding window API. A map of list key to list results.
rooms: Room subscription API. A map of room ID to room subscription to room results.
rooms: Room subscription API. A map of room ID to room results.
extensions: Extensions API. A map of extension key to extension results.
"""
@@ -174,8 +182,8 @@ class SlidingSyncResult:
absent on joined/left rooms
prev_batch: A token that can be passed as a start parameter to the
`/rooms/<room_id>/messages` API to retrieve earlier messages.
limited: True if their are more events than fit between the given position and now.
Sync again to get more.
limited: True if there are more events than `timeline_limit` looking
backwards from the `response.pos` to the `request.pos`.
num_live: The number of timeline events which have just occurred and are not historical.
The last N events are 'live' and should be treated as such. This is mostly
useful to determine whether a given @mention event should make a noise or not.
@@ -230,6 +238,17 @@ class SlidingSyncResult:
notification_count: int
highlight_count: int
def __bool__(self) -> bool:
return (
# If this is the first time the client is seeing the room, we should not filter it out
# under any circumstance.
self.initial
# We need to let the client know if there are any new events
or bool(self.required_state)
or bool(self.timeline_events)
or bool(self.stripped_state)
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class SlidingWindowList:
"""
@@ -264,6 +283,7 @@ class SlidingSyncResult:
Attributes:
to_device: The to-device extension (MSC3885)
e2ee: The E2EE device extension (MSC3884)
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -282,12 +302,109 @@ class SlidingSyncResult:
def __bool__(self) -> bool:
return bool(self.events)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class E2eeExtension:
"""The E2EE device extension (MSC3884)
Attributes:
device_list_updates: List of user_ids whose devices have changed or left (only
present on incremental syncs).
device_one_time_keys_count: Map from key algorithm to the number of
unclaimed one-time keys currently held on the server for this device. If
an algorithm is unlisted, the count for that algorithm is assumed to be
zero. If this entire parameter is missing, the count for all algorithms
is assumed to be zero.
device_unused_fallback_key_types: List of unused fallback key algorithms
for this device.
"""
# Only present on incremental syncs
device_list_updates: Optional[DeviceListUpdates]
device_one_time_keys_count: Mapping[str, int]
device_unused_fallback_key_types: Sequence[str]
def __bool__(self) -> bool:
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
default_otk = self.device_one_time_keys_count.get("signed_curve25519")
more_than_default_otk = len(self.device_one_time_keys_count) > 1 or (
default_otk is not None and default_otk > 0
)
return bool(
more_than_default_otk
or self.device_list_updates
or self.device_unused_fallback_key_types
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class AccountDataExtension:
"""The Account Data extension (MSC3959)
Attributes:
global_account_data_map: Mapping from `type` to `content` of global account
data events.
account_data_by_room_map: Mapping from room_id to mapping of `type` to
`content` of room account data events.
"""
global_account_data_map: Mapping[str, JsonMapping]
account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]]
def __bool__(self) -> bool:
return bool(
self.global_account_data_map or self.account_data_by_room_map
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ReceiptsExtension:
"""The Receipts extension (MSC3960)
Attributes:
room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral
event (type, content)
"""
room_id_to_receipt_map: Mapping[str, JsonMapping]
def __bool__(self) -> bool:
return bool(self.room_id_to_receipt_map)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class TypingExtension:
"""The Typing Notification extension (MSC3961)
Attributes:
room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral
event (type, content)
"""
room_id_to_typing_map: Mapping[str, JsonMapping]
def __bool__(self) -> bool:
return bool(self.room_id_to_typing_map)
to_device: Optional[ToDeviceExtension] = None
e2ee: Optional[E2eeExtension] = None
account_data: Optional[AccountDataExtension] = None
receipts: Optional[ReceiptsExtension] = None
typing: Optional[TypingExtension] = None
def __bool__(self) -> bool:
return bool(self.to_device)
return bool(
self.to_device
or self.e2ee
or self.account_data
or self.receipts
or self.typing
)
next_pos: StreamToken
next_pos: SlidingSyncStreamToken
lists: Dict[str, SlidingWindowList]
rooms: Dict[str, RoomResult]
extensions: Extensions
@@ -297,10 +414,14 @@ class SlidingSyncResult:
to tell if the notifier needs to wait for more events when polling for
events.
"""
return bool(self.lists or self.rooms or self.extensions)
# We don't include `self.lists` here, as a) `lists` is always non-empty even if
# there are no changes, and b) since we're sorting rooms by `stream_ordering` of
# the latest activity, anything that would cause the order to change would end
# up in `self.rooms` and cause us to send down the change.
return bool(self.rooms or self.extensions)
@staticmethod
def empty(next_pos: StreamToken) -> "SlidingSyncResult":
def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult":
"Return a new empty result"
return SlidingSyncResult(
next_pos=next_pos,

View File

@@ -120,6 +120,9 @@ class SlidingSyncBody(RequestBodyModel):
Sliding Sync API request body.
Attributes:
conn_id: An optional string to identify this connection to the server.
Only one sliding sync connection is allowed per given conn_id (empty
or not).
lists: Sliding window API. A map of list key to list information
(:class:`SlidingSyncList`). Max lists: 100. The list keys should be
arbitrary strings which the client is using to refer to the list. Keep this
@@ -313,7 +316,73 @@ class SlidingSyncBody(RequestBodyModel):
return value
class E2eeExtension(RequestBodyModel):
"""The E2EE device extension (MSC3884)
Attributes:
enabled
"""
enabled: Optional[StrictBool] = False
class AccountDataExtension(RequestBodyModel):
"""The Account Data extension (MSC3959)
Attributes:
enabled
lists: List of list keys (from the Sliding Window API) to apply this
extension to.
rooms: List of room IDs (from the Room Subscription API) to apply this
extension to.
"""
enabled: Optional[StrictBool] = False
# Process all lists defined in the Sliding Window API. (This is the default.)
lists: Optional[List[StrictStr]] = ["*"]
# Process all room subscriptions defined in the Room Subscription API. (This is the default.)
rooms: Optional[List[StrictStr]] = ["*"]
class ReceiptsExtension(RequestBodyModel):
"""The Receipts extension (MSC3960)
Attributes:
enabled
lists: List of list keys (from the Sliding Window API) to apply this
extension to.
rooms: List of room IDs (from the Room Subscription API) to apply this
extension to.
"""
enabled: Optional[StrictBool] = False
# Process all lists defined in the Sliding Window API. (This is the default.)
lists: Optional[List[StrictStr]] = ["*"]
# Process all room subscriptions defined in the Room Subscription API. (This is the default.)
rooms: Optional[List[StrictStr]] = ["*"]
class TypingExtension(RequestBodyModel):
"""The Typing Notification extension (MSC3961)
Attributes:
enabled
lists: List of list keys (from the Sliding Window API) to apply this
extension to.
rooms: List of room IDs (from the Room Subscription API) to apply this
extension to.
"""
enabled: Optional[StrictBool] = False
# Process all lists defined in the Sliding Window API. (This is the default.)
lists: Optional[List[StrictStr]] = ["*"]
# Process all room subscriptions defined in the Room Subscription API. (This is the default.)
rooms: Optional[List[StrictStr]] = ["*"]
to_device: Optional[ToDeviceExtension] = None
e2ee: Optional[E2eeExtension] = None
account_data: Optional[AccountDataExtension] = None
receipts: Optional[ReceiptsExtension] = None
typing: Optional[TypingExtension] = None
conn_id: Optional[str]
# mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884
if TYPE_CHECKING:

View File

@@ -327,7 +327,7 @@ class StreamChangeCache:
for entity in r:
self._entity_to_key.pop(entity, None)
def get_max_pos_of_last_change(self, entity: EntityType) -> int:
def get_max_pos_of_last_change(self, entity: EntityType) -> Optional[int]:
"""Returns an upper bound of the stream id of the last change to an
entity.
@@ -335,7 +335,11 @@ class StreamChangeCache:
entity: The entity to check.
Return:
The stream position of the latest change for the given entity or
the earliest known stream position if the entitiy is unknown.
The stream position of the latest change for the given entity, if
known
"""
return self._entity_to_key.get(entity, self._earliest_known_stream_pos)
return self._entity_to_key.get(entity)
def get_earliest_known_position(self) -> int:
"""Returns the earliest position in the cache."""
return self._earliest_known_stream_pos

View File

@@ -43,9 +43,7 @@ from tests.unittest import override_config
class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.appservice_api = mock.AsyncMock()
return self.setup_test_homeserver(
federation_client=mock.Mock(), application_service_api=self.appservice_api
)
return self.setup_test_homeserver(application_service_api=self.appservice_api)
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.handler = hs.get_e2e_keys_handler()
@@ -1224,6 +1222,61 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
},
)
def test_query_devices_remote_down(self) -> None:
"""Tests that querying keys for a remote user on an unreachable server returns
results in the "failures" property
"""
remote_user_id = "@test:other"
local_user_id = "@test:test"
# The backoff code treats time zero as special
self.reactor.advance(5)
self.hs.get_federation_http_client().agent.request = mock.AsyncMock( # type: ignore[method-assign]
side_effect=Exception("boop")
)
e2e_handler = self.hs.get_e2e_keys_handler()
query_result = self.get_success(
e2e_handler.query_devices(
{
"device_keys": {remote_user_id: []},
},
timeout=10,
from_user_id=local_user_id,
from_device_id="some_device_id",
)
)
self.assertEqual(
query_result["failures"],
{
"other": {
"message": "Failed to send request: Exception: boop",
"status": 503,
}
},
)
# Do it again: we should hit the backoff
query_result = self.get_success(
e2e_handler.query_devices(
{
"device_keys": {remote_user_id: []},
},
timeout=10,
from_user_id=local_user_id,
from_device_id="some_device_id",
)
)
self.assertEqual(
query_result["failures"],
{"other": {"message": "Not ready for retry", "status": 503}},
)
@parameterized.expand(
[
# The remote homeserver's response indicates that this user has 0/1/2 devices.

View File

@@ -19,7 +19,7 @@
#
import logging
from copy import deepcopy
from typing import Dict, Optional
from typing import Dict, List, Optional
from unittest.mock import patch
from parameterized import parameterized
@@ -35,7 +35,7 @@ from synapse.api.constants import (
RoomTypes,
)
from synapse.api.room_versions import RoomVersions
from synapse.events import make_event_from_dict
from synapse.events import StrippedStateEvent, make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.handlers.sliding_sync import (
RoomSyncConfig,
@@ -3093,6 +3093,78 @@ class FilterRoomsTestCase(HomeserverTestCase):
return room_id
_remote_invite_count: int = 0
def _create_remote_invite_room_for_user(
self,
invitee_user_id: str,
unsigned_invite_room_state: Optional[List[StrippedStateEvent]],
) -> str:
"""
Create a fake invite for a remote room and persist it.
We don't have any state for these kind of rooms and can only rely on the
stripped state included in the unsigned portion of the invite event to identify
the room.
Args:
invitee_user_id: The person being invited
unsigned_invite_room_state: List of stripped state events to assist the
receiver in identifying the room.
Returns:
The room ID of the remote invite room
"""
invite_room_id = f"!test_room{self._remote_invite_count}:remote_server"
invite_event_dict = {
"room_id": invite_room_id,
"sender": "@inviter:remote_server",
"state_key": invitee_user_id,
"depth": 1,
"origin_server_ts": 1,
"type": EventTypes.Member,
"content": {"membership": Membership.INVITE},
"auth_events": [],
"prev_events": [],
}
if unsigned_invite_room_state is not None:
serialized_stripped_state_events = []
for stripped_event in unsigned_invite_room_state:
serialized_stripped_state_events.append(
{
"type": stripped_event.type,
"state_key": stripped_event.state_key,
"sender": stripped_event.sender,
"content": stripped_event.content,
}
)
invite_event_dict["unsigned"] = {
"invite_room_state": serialized_stripped_state_events
}
invite_event = make_event_from_dict(
invite_event_dict,
room_version=RoomVersions.V10,
)
invite_event.internal_metadata.outlier = True
invite_event.internal_metadata.out_of_band_membership = True
self.get_success(
self.store.maybe_store_room_on_outlier_membership(
room_id=invite_room_id, room_version=invite_event.room_version
)
)
context = EventContext.for_outlier(self.hs.get_storage_controllers())
persist_controller = self.hs.get_storage_controllers().persistence
assert persist_controller is not None
self.get_success(persist_controller.persist_event(invite_event, context))
self._remote_invite_count += 1
return invite_room_id
def test_filter_dm_rooms(self) -> None:
"""
Test `filter.is_dm` for DM rooms
@@ -3157,7 +3229,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a normal room
# Create an unencrypted room
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create an encrypted room
@@ -3165,7 +3237,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
self.helper.send_state(
encrypted_room_id,
EventTypes.RoomEncryption,
{"algorithm": "m.megolm.v1.aes-sha2"},
{EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
tok=user1_tok,
)
@@ -3206,6 +3278,460 @@ class FilterRoomsTestCase(HomeserverTestCase):
self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
def test_filter_encrypted_server_left_room(self) -> None:
"""
Test that we can apply a `filter.is_encrypted` against a room that everyone has left.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
before_rooms_token = self.event_sources.get_current_token()
# Create an unencrypted room
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Leave the room
self.helper.leave(room_id, user1_id, tok=user1_tok)
# Create an encrypted room
encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.send_state(
encrypted_room_id,
EventTypes.RoomEncryption,
{EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
tok=user1_tok,
)
# Leave the room
self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok)
after_rooms_token = self.event_sources.get_current_token()
# Get the rooms the user should be syncing with
sync_room_map = self._get_sync_room_ids_for_user(
UserID.from_string(user1_id),
# We're using a `from_token` so that the room is considered `newly_left` and
# appears in our list of relevant sync rooms
from_token=before_rooms_token,
to_token=after_rooms_token,
)
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=True,
),
after_rooms_token,
)
)
self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id})
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=False,
),
after_rooms_token,
)
)
self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
def test_filter_encrypted_server_left_room2(self) -> None:
"""
Test that we can apply a `filter.is_encrypted` against a room that everyone has
left.
There is still someone local who is invited to the rooms but that doesn't affect
whether the server is participating in the room (users need to be joined).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
_user2_tok = self.login(user2_id, "pass")
before_rooms_token = self.event_sources.get_current_token()
# Create an unencrypted room
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Invite user2
self.helper.invite(room_id, targ=user2_id, tok=user1_tok)
# User1 leaves the room
self.helper.leave(room_id, user1_id, tok=user1_tok)
# Create an encrypted room
encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.send_state(
encrypted_room_id,
EventTypes.RoomEncryption,
{EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
tok=user1_tok,
)
# Invite user2
self.helper.invite(encrypted_room_id, targ=user2_id, tok=user1_tok)
# User1 leaves the room
self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok)
after_rooms_token = self.event_sources.get_current_token()
# Get the rooms the user should be syncing with
sync_room_map = self._get_sync_room_ids_for_user(
UserID.from_string(user1_id),
# We're using a `from_token` so that the room is considered `newly_left` and
# appears in our list of relevant sync rooms
from_token=before_rooms_token,
to_token=after_rooms_token,
)
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=True,
),
after_rooms_token,
)
)
self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id})
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=False,
),
after_rooms_token,
)
)
self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
def test_filter_encrypted_after_we_left(self) -> None:
"""
Test that we can apply a `filter.is_encrypted` against a room that was encrypted
after we left the room (make sure we don't just use the current state)
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
before_rooms_token = self.event_sources.get_current_token()
# Create an unencrypted room
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
# Leave the room
self.helper.join(room_id, user1_id, tok=user1_tok)
self.helper.leave(room_id, user1_id, tok=user1_tok)
# Create a room that will be encrypted
encrypted_after_we_left_room_id = self.helper.create_room_as(
user2_id, tok=user2_tok
)
# Leave the room
self.helper.join(encrypted_after_we_left_room_id, user1_id, tok=user1_tok)
self.helper.leave(encrypted_after_we_left_room_id, user1_id, tok=user1_tok)
# Encrypt the room after we've left
self.helper.send_state(
encrypted_after_we_left_room_id,
EventTypes.RoomEncryption,
{EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
tok=user2_tok,
)
after_rooms_token = self.event_sources.get_current_token()
# Get the rooms the user should be syncing with
sync_room_map = self._get_sync_room_ids_for_user(
UserID.from_string(user1_id),
# We're using a `from_token` so that the room is considered `newly_left` and
# appears in our list of relevant sync rooms
from_token=before_rooms_token,
to_token=after_rooms_token,
)
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=True,
),
after_rooms_token,
)
)
# Even though we left the room before it was encrypted, we still see it because
# someone else on our server is still participating in the room and we "leak"
# the current state to the left user. But we consider the room encryption status
# to not be a secret given it's often set at the start of the room and it's one
# of the stripped state events that is normally handed out.
self.assertEqual(
truthy_filtered_room_map.keys(), {encrypted_after_we_left_room_id}
)
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=False,
),
after_rooms_token,
)
)
# Even though we left the room before it was encrypted... (see comment above)
self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
def test_filter_encrypted_with_remote_invite_room_no_stripped_state(self) -> None:
"""
Test that we can apply a `filter.is_encrypted` filter against a remote invite
room without any `unsigned.invite_room_state` (stripped state).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a remote invite room without any `unsigned.invite_room_state`
_remote_invite_room_id = self._create_remote_invite_room_for_user(
user1_id, None
)
# Create an unencrypted room
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create an encrypted room
encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.send_state(
encrypted_room_id,
EventTypes.RoomEncryption,
{EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
tok=user1_tok,
)
after_rooms_token = self.event_sources.get_current_token()
# Get the rooms the user should be syncing with
sync_room_map = self._get_sync_room_ids_for_user(
UserID.from_string(user1_id),
from_token=None,
to_token=after_rooms_token,
)
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=True,
),
after_rooms_token,
)
)
# `remote_invite_room_id` should not appear because we can't figure out whether
# it is encrypted or not (no stripped state, `unsigned.invite_room_state`).
self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id})
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=False,
),
after_rooms_token,
)
)
# `remote_invite_room_id` should not appear because we can't figure out whether
# it is encrypted or not (no stripped state, `unsigned.invite_room_state`).
self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
def test_filter_encrypted_with_remote_invite_encrypted_room(self) -> None:
"""
Test that we can apply a `filter.is_encrypted` filter against a remote invite
encrypted room with some `unsigned.invite_room_state` (stripped state).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a remote invite room with some `unsigned.invite_room_state`
# indicating that the room is encrypted.
remote_invite_room_id = self._create_remote_invite_room_for_user(
user1_id,
[
StrippedStateEvent(
type=EventTypes.Create,
state_key="",
sender="@inviter:remote_server",
content={
EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
},
),
StrippedStateEvent(
type=EventTypes.RoomEncryption,
state_key="",
sender="@inviter:remote_server",
content={
EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
},
),
],
)
# Create an unencrypted room
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create an encrypted room
encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.send_state(
encrypted_room_id,
EventTypes.RoomEncryption,
{EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
tok=user1_tok,
)
after_rooms_token = self.event_sources.get_current_token()
# Get the rooms the user should be syncing with
sync_room_map = self._get_sync_room_ids_for_user(
UserID.from_string(user1_id),
from_token=None,
to_token=after_rooms_token,
)
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=True,
),
after_rooms_token,
)
)
# `remote_invite_room_id` should appear here because it is encrypted
# according to the stripped state
self.assertEqual(
truthy_filtered_room_map.keys(), {encrypted_room_id, remote_invite_room_id}
)
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=False,
),
after_rooms_token,
)
)
# `remote_invite_room_id` should not appear here because it is encrypted
# according to the stripped state
self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
def test_filter_encrypted_with_remote_invite_unencrypted_room(self) -> None:
"""
Test that we can apply a `filter.is_encrypted` filter against a remote invite
unencrypted room with some `unsigned.invite_room_state` (stripped state).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a remote invite room with some `unsigned.invite_room_state`
# but don't set any room encryption event.
remote_invite_room_id = self._create_remote_invite_room_for_user(
user1_id,
[
StrippedStateEvent(
type=EventTypes.Create,
state_key="",
sender="@inviter:remote_server",
content={
EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
},
),
# No room encryption event
],
)
# Create an unencrypted room
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create an encrypted room
encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.send_state(
encrypted_room_id,
EventTypes.RoomEncryption,
{EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
tok=user1_tok,
)
after_rooms_token = self.event_sources.get_current_token()
# Get the rooms the user should be syncing with
sync_room_map = self._get_sync_room_ids_for_user(
UserID.from_string(user1_id),
from_token=None,
to_token=after_rooms_token,
)
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=True,
),
after_rooms_token,
)
)
# `remote_invite_room_id` should not appear here because it is unencrypted
# according to the stripped state
self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id})
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
is_encrypted=False,
),
after_rooms_token,
)
)
# `remote_invite_room_id` should appear because it is unencrypted according to
# the stripped state
self.assertEqual(
falsy_filtered_room_map.keys(), {room_id, remote_invite_room_id}
)
def test_filter_invite_rooms(self) -> None:
"""
Test `filter.is_invite` for rooms that the user has been invited to
@@ -3461,47 +3987,159 @@ class FilterRoomsTestCase(HomeserverTestCase):
self.assertEqual(filtered_room_map.keys(), {space_room_id})
def test_filter_room_types_with_invite_remote_room(self) -> None:
"""Test that we can apply a room type filter, even if we have an invite
for a remote room.
This is a regression test.
def test_filter_room_types_server_left_room(self) -> None:
"""
Test that we can apply a `filter.room_types` against a room that everyone has left.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a fake remote invite and persist it.
invite_room_id = "!some:room"
invite_event = make_event_from_dict(
{
"room_id": invite_room_id,
"sender": "@user:test.serv",
"state_key": user1_id,
"depth": 1,
"origin_server_ts": 1,
"type": EventTypes.Member,
"content": {"membership": Membership.INVITE},
"auth_events": [],
"prev_events": [],
},
room_version=RoomVersions.V10,
)
invite_event.internal_metadata.outlier = True
invite_event.internal_metadata.out_of_band_membership = True
self.get_success(
self.store.maybe_store_room_on_outlier_membership(
room_id=invite_room_id, room_version=invite_event.room_version
)
)
context = EventContext.for_outlier(self.hs.get_storage_controllers())
persist_controller = self.hs.get_storage_controllers().persistence
assert persist_controller is not None
self.get_success(persist_controller.persist_event(invite_event, context))
before_rooms_token = self.event_sources.get_current_token()
# Create a normal room (no room type)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Leave the room
self.helper.leave(room_id, user1_id, tok=user1_tok)
# Create a space room
space_room_id = self.helper.create_room_as(
user1_id,
tok=user1_tok,
extra_content={
"creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
},
)
# Leave the room
self.helper.leave(space_room_id, user1_id, tok=user1_tok)
after_rooms_token = self.event_sources.get_current_token()
# Get the rooms the user should be syncing with
sync_room_map = self._get_sync_room_ids_for_user(
UserID.from_string(user1_id),
# We're using a `from_token` so that the room is considered `newly_left` and
# appears in our list of relevant sync rooms
from_token=before_rooms_token,
to_token=after_rooms_token,
)
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
after_rooms_token,
)
)
self.assertEqual(filtered_room_map.keys(), {room_id})
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
after_rooms_token,
)
)
self.assertEqual(filtered_room_map.keys(), {space_room_id})
def test_filter_room_types_server_left_room2(self) -> None:
"""
Test that we can apply a `filter.room_types` against a room that everyone has left.
There is still someone local who is invited to the rooms but that doesn't affect
whether the server is participating in the room (users need to be joined).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
_user2_tok = self.login(user2_id, "pass")
before_rooms_token = self.event_sources.get_current_token()
# Create a normal room (no room type)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Invite user2
self.helper.invite(room_id, targ=user2_id, tok=user1_tok)
# User1 leaves the room
self.helper.leave(room_id, user1_id, tok=user1_tok)
# Create a space room
space_room_id = self.helper.create_room_as(
user1_id,
tok=user1_tok,
extra_content={
"creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
},
)
# Invite user2
self.helper.invite(space_room_id, targ=user2_id, tok=user1_tok)
# User1 leaves the room
self.helper.leave(space_room_id, user1_id, tok=user1_tok)
after_rooms_token = self.event_sources.get_current_token()
# Get the rooms the user should be syncing with
sync_room_map = self._get_sync_room_ids_for_user(
UserID.from_string(user1_id),
# We're using a `from_token` so that the room is considered `newly_left` and
# appears in our list of relevant sync rooms
from_token=before_rooms_token,
to_token=after_rooms_token,
)
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
after_rooms_token,
)
)
self.assertEqual(filtered_room_map.keys(), {room_id})
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
after_rooms_token,
)
)
self.assertEqual(filtered_room_map.keys(), {space_room_id})
def test_filter_room_types_with_remote_invite_room_no_stripped_state(self) -> None:
"""
Test that we can apply a `filter.room_types` filter against a remote invite
room without any `unsigned.invite_room_state` (stripped state).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a remote invite room without any `unsigned.invite_room_state`
_remote_invite_room_id = self._create_remote_invite_room_for_user(
user1_id, None
)
# Create a normal room (no room type)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create a space room
space_room_id = self.helper.create_room_as(
user1_id,
tok=user1_tok,
extra_content={
"creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
},
)
after_rooms_token = self.event_sources.get_current_token()
@@ -3512,18 +4150,186 @@ class FilterRoomsTestCase(HomeserverTestCase):
to_token=after_rooms_token,
)
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
room_types=[None, RoomTypes.SPACE],
),
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
after_rooms_token,
)
)
self.assertEqual(filtered_room_map.keys(), {room_id, invite_room_id})
# `remote_invite_room_id` should not appear because we can't figure out what
# room type it is (no stripped state, `unsigned.invite_room_state`)
self.assertEqual(filtered_room_map.keys(), {room_id})
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
after_rooms_token,
)
)
# `remote_invite_room_id` should not appear because we can't figure out what
# room type it is (no stripped state, `unsigned.invite_room_state`)
self.assertEqual(filtered_room_map.keys(), {space_room_id})
def test_filter_room_types_with_remote_invite_space(self) -> None:
"""
Test that we can apply a `filter.room_types` filter against a remote invite
to a space room with some `unsigned.invite_room_state` (stripped state).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a remote invite room with some `unsigned.invite_room_state` indicating
# that it is a space room
remote_invite_room_id = self._create_remote_invite_room_for_user(
user1_id,
[
StrippedStateEvent(
type=EventTypes.Create,
state_key="",
sender="@inviter:remote_server",
content={
EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
# Specify that it is a space room
EventContentFields.ROOM_TYPE: RoomTypes.SPACE,
},
),
],
)
# Create a normal room (no room type)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create a space room
space_room_id = self.helper.create_room_as(
user1_id,
tok=user1_tok,
extra_content={
"creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
},
)
after_rooms_token = self.event_sources.get_current_token()
# Get the rooms the user should be syncing with
sync_room_map = self._get_sync_room_ids_for_user(
UserID.from_string(user1_id),
from_token=None,
to_token=after_rooms_token,
)
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
after_rooms_token,
)
)
# `remote_invite_room_id` should not appear here because it is a space room
# according to the stripped state
self.assertEqual(filtered_room_map.keys(), {room_id})
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
after_rooms_token,
)
)
# `remote_invite_room_id` should appear here because it is a space room
# according to the stripped state
self.assertEqual(
filtered_room_map.keys(), {space_room_id, remote_invite_room_id}
)
def test_filter_room_types_with_remote_invite_normal_room(self) -> None:
"""
Test that we can apply a `filter.room_types` filter against a remote invite
to a normal room with some `unsigned.invite_room_state` (stripped state).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a remote invite room with some `unsigned.invite_room_state`
# but the create event does not specify a room type (normal room)
remote_invite_room_id = self._create_remote_invite_room_for_user(
user1_id,
[
StrippedStateEvent(
type=EventTypes.Create,
state_key="",
sender="@inviter:remote_server",
content={
EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
# No room type means this is a normal room
},
),
],
)
# Create a normal room (no room type)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create a space room
space_room_id = self.helper.create_room_as(
user1_id,
tok=user1_tok,
extra_content={
"creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
},
)
after_rooms_token = self.event_sources.get_current_token()
# Get the rooms the user should be syncing with
sync_room_map = self._get_sync_room_ids_for_user(
UserID.from_string(user1_id),
from_token=None,
to_token=after_rooms_token,
)
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
after_rooms_token,
)
)
# `remote_invite_room_id` should appear here because it is a normal room
# according to the stripped state (no room type)
self.assertEqual(filtered_room_map.keys(), {room_id, remote_invite_room_id})
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
after_rooms_token,
)
)
# `remote_invite_room_id` should not appear here because it is a normal room
# according to the stripped state (no room type)
self.assertEqual(filtered_room_map.keys(), {space_room_id})
class SortRoomsTestCase(HomeserverTestCase):

View File

@@ -0,0 +1,13 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#

View File

@@ -0,0 +1,453 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import EventTypes
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.types import SlidingSyncStreamToken
from synapse.types.handlers import SlidingSyncConfig
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
logger = logging.getLogger(__name__)
class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase):
"""
Test connection tracking in the Sliding Sync API.
"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
def test_rooms_required_state_incremental_sync_LIVE(self) -> None:
"""Test that we only get state updates in incremental sync for rooms
we've already seen (LIVE).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.RoomHistoryVisibility, ""],
# This one doesn't exist in the room
[EventTypes.Name, ""],
],
"timeline_limit": 0,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
state_map[(EventTypes.RoomHistoryVisibility, "")],
},
exact=True,
)
# Send a state event
self.helper.send_state(
room_id1, EventTypes.Name, body={"name": "foo"}, tok=user2_tok
)
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self.assertNotIn("initial", response_body["rooms"][room_id1])
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Name, "")],
},
exact=True,
)
@parameterized.expand([(False,), (True,)])
def test_rooms_timeline_incremental_sync_PREVIOUSLY(self, limited: bool) -> None:
"""
Test getting room data where we have previously sent down the room, but
we missed sending down some timeline events previously and so its status
is considered PREVIOUSLY.
There are two versions of this test, one where there are more messages
than the timeline limit, and one where there isn't.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.send(room_id1, "msg", tok=user1_tok)
timeline_limit = 5
conn_id = "conn_id"
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 0]],
"required_state": [],
"timeline_limit": timeline_limit,
}
},
"conn_id": "conn_id",
}
# The first room gets sent down the initial sync
response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok)
self.assertCountEqual(
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
)
# We now send down some events in room1 (depending on the test param).
expected_events = [] # The set of events in the timeline
if limited:
for _ in range(10):
resp = self.helper.send(room_id1, "msg1", tok=user1_tok)
expected_events.append(resp["event_id"])
else:
resp = self.helper.send(room_id1, "msg1", tok=user1_tok)
expected_events.append(resp["event_id"])
# A second messages happens in the other room, so room1 won't get sent down.
self.helper.send(room_id2, "msg", tok=user1_tok)
# Only the second room gets sent down sync.
response_body, from_token = self.do_sync(
sync_body, since=initial_from_token, tok=user1_tok
)
self.assertCountEqual(
response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
)
# FIXME: This is a hack to record that the first room wasn't sent down
# sync, as we don't implement that currently.
sliding_sync_handler = self.hs.get_sliding_sync_handler()
requester = self.get_success(
self.hs.get_auth().get_user_by_access_token(user1_tok)
)
sync_config = SlidingSyncConfig(
user=requester.user,
requester=requester,
conn_id=conn_id,
)
parsed_initial_from_token = self.get_success(
SlidingSyncStreamToken.from_string(self.store, initial_from_token)
)
connection_position = self.get_success(
sliding_sync_handler.connection_store.record_rooms(
sync_config,
parsed_initial_from_token,
sent_room_ids=[],
unsent_room_ids=[room_id1],
)
)
# FIXME: Now fix up `from_token` with new connect position above.
parsed_from_token = self.get_success(
SlidingSyncStreamToken.from_string(self.store, from_token)
)
parsed_from_token = SlidingSyncStreamToken(
stream_token=parsed_from_token.stream_token,
connection_position=connection_position,
)
from_token = self.get_success(parsed_from_token.to_string(self.store))
# We now send another event to room1, so we should sync all the missing events.
resp = self.helper.send(room_id1, "msg2", tok=user1_tok)
expected_events.append(resp["event_id"])
# This sync should contain the messages from room1 not yet sent down.
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
self.assertCountEqual(
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
)
self.assertNotIn("initial", response_body["rooms"][room_id1])
self.assertEqual(
[ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]],
expected_events[-timeline_limit:],
)
self.assertEqual(response_body["rooms"][room_id1]["limited"], limited)
self.assertEqual(response_body["rooms"][room_id1].get("required_state"), None)
def test_rooms_required_state_incremental_sync_PREVIOUSLY(self) -> None:
"""
Test getting room data where we have previously sent down the room, but
we missed sending down some state previously and so its status is
considered PREVIOUSLY.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.send(room_id1, "msg", tok=user1_tok)
conn_id = "conn_id"
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 0]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.RoomHistoryVisibility, ""],
# This one doesn't exist in the room
[EventTypes.Name, ""],
],
"timeline_limit": 0,
}
},
"conn_id": "conn_id",
}
# The first room gets sent down the initial sync
response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok)
self.assertCountEqual(
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
)
# We now send down some state in room1
resp = self.helper.send_state(
room_id1, EventTypes.Name, {"name": "foo"}, tok=user1_tok
)
name_change_id = resp["event_id"]
# A second messages happens in the other room, so room1 won't get sent down.
self.helper.send(room_id2, "msg", tok=user1_tok)
# Only the second room gets sent down sync.
response_body, from_token = self.do_sync(
sync_body, since=initial_from_token, tok=user1_tok
)
self.assertCountEqual(
response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
)
# FIXME: This is a hack to record that the first room wasn't sent down
# sync, as we don't implement that currently.
sliding_sync_handler = self.hs.get_sliding_sync_handler()
requester = self.get_success(
self.hs.get_auth().get_user_by_access_token(user1_tok)
)
sync_config = SlidingSyncConfig(
user=requester.user,
requester=requester,
conn_id=conn_id,
)
parsed_initial_from_token = self.get_success(
SlidingSyncStreamToken.from_string(self.store, initial_from_token)
)
connection_position = self.get_success(
sliding_sync_handler.connection_store.record_rooms(
sync_config,
parsed_initial_from_token,
sent_room_ids=[],
unsent_room_ids=[room_id1],
)
)
# FIXME: Now fix up `from_token` with new connect position above.
parsed_from_token = self.get_success(
SlidingSyncStreamToken.from_string(self.store, from_token)
)
parsed_from_token = SlidingSyncStreamToken(
stream_token=parsed_from_token.stream_token,
connection_position=connection_position,
)
from_token = self.get_success(parsed_from_token.to_string(self.store))
# We now send another event to room1, so we should sync all the missing state.
self.helper.send(room_id1, "msg", tok=user1_tok)
# This sync should contain the state changes from room1.
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
self.assertCountEqual(
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
)
self.assertNotIn("initial", response_body["rooms"][room_id1])
# We should only see the name change.
self.assertEqual(
[
ev["event_id"]
for ev in response_body["rooms"][room_id1]["required_state"]
],
[name_change_id],
)
def test_rooms_required_state_incremental_sync_NEVER(self) -> None:
"""
Test getting `required_state` where we have NEVER sent down the room before
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.send(room_id1, "msg", tok=user1_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 0]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.RoomHistoryVisibility, ""],
# This one doesn't exist in the room
[EventTypes.Name, ""],
],
"timeline_limit": 1,
}
},
}
# A message happens in the other room, so room1 won't get sent down.
self.helper.send(room_id2, "msg", tok=user1_tok)
# Only the second room gets sent down sync.
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
self.assertCountEqual(
response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
)
# We now send another event to room1, so we should send down the full
# room.
self.helper.send(room_id1, "msg2", tok=user1_tok)
# This sync should contain the messages from room1 not yet sent down.
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
self.assertCountEqual(
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
)
self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
state_map[(EventTypes.RoomHistoryVisibility, "")],
},
exact=True,
)
def test_rooms_timeline_incremental_sync_NEVER(self) -> None:
"""
Test getting timeline room data where we have NEVER sent down the room
before
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 0]],
"required_state": [],
"timeline_limit": 5,
}
},
}
expected_events = []
for _ in range(4):
resp = self.helper.send(room_id1, "msg", tok=user1_tok)
expected_events.append(resp["event_id"])
# A message happens in the other room, so room1 won't get sent down.
self.helper.send(room_id2, "msg", tok=user1_tok)
# Only the second room gets sent down sync.
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
self.assertCountEqual(
response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
)
# We now send another event to room1 so it comes down sync
resp = self.helper.send(room_id1, "msg2", tok=user1_tok)
expected_events.append(resp["event_id"])
# This sync should contain the messages from room1 not yet sent down.
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
self.assertCountEqual(
response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
)
self.assertEqual(
[ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]],
expected_events,
)
self.assertEqual(response_body["rooms"][room_id1]["limited"], True)
self.assertEqual(response_body["rooms"][room_id1]["initial"], True)

View File

@@ -0,0 +1,495 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import AccountDataTypes
from synapse.rest.client import login, room, sendtodevice, sync
from synapse.server import HomeServer
from synapse.types import StreamKeyType
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
from tests.server import TimedOutException
logger = logging.getLogger(__name__)
class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase):
"""Tests for the account_data sliding sync extension"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
sendtodevice.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.account_data_handler = hs.get_account_data_handler()
def test_no_data_initial_sync(self) -> None:
"""
Test that enabling the account_data extension works during an intitial sync,
even if there is no-data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Make an initial Sliding Sync request with the account_data extension enabled
sync_body = {
"lists": {},
"extensions": {
"account_data": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
self.assertIncludes(
{
global_event["type"]
for global_event in response_body["extensions"]["account_data"].get(
"global"
)
},
# Even though we don't have any global account data set, Synapse saves some
# default push rules for us.
{AccountDataTypes.PUSH_RULES},
exact=True,
)
self.assertIncludes(
response_body["extensions"]["account_data"].get("rooms").keys(),
set(),
exact=True,
)
def test_no_data_incremental_sync(self) -> None:
"""
Test that enabling account_data extension works during an incremental sync, even
if there is no-data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
sync_body = {
"lists": {},
"extensions": {
"account_data": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make an incremental Sliding Sync request with the account_data extension enabled
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# There has been no account data changes since the `from_token` so we shouldn't
# see any account data here.
self.assertIncludes(
{
global_event["type"]
for global_event in response_body["extensions"]["account_data"].get(
"global"
)
},
set(),
exact=True,
)
self.assertIncludes(
response_body["extensions"]["account_data"].get("rooms").keys(),
set(),
exact=True,
)
def test_global_account_data_initial_sync(self) -> None:
"""
On initial sync, we should return all global account data on initial sync.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Update the global account data
self.get_success(
self.account_data_handler.add_account_data_for_user(
user_id=user1_id,
account_data_type="org.matrix.foobarbaz",
content={"foo": "bar"},
)
)
# Make an initial Sliding Sync request with the account_data extension enabled
sync_body = {
"lists": {},
"extensions": {
"account_data": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# It should show us all of the global account data
self.assertIncludes(
{
global_event["type"]
for global_event in response_body["extensions"]["account_data"].get(
"global"
)
},
{AccountDataTypes.PUSH_RULES, "org.matrix.foobarbaz"},
exact=True,
)
self.assertIncludes(
response_body["extensions"]["account_data"].get("rooms").keys(),
set(),
exact=True,
)
def test_global_account_data_incremental_sync(self) -> None:
"""
On incremental sync, we should only account data that has changed since the
`from_token`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Add some global account data
self.get_success(
self.account_data_handler.add_account_data_for_user(
user_id=user1_id,
account_data_type="org.matrix.foobarbaz",
content={"foo": "bar"},
)
)
sync_body = {
"lists": {},
"extensions": {
"account_data": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Add some other global account data
self.get_success(
self.account_data_handler.add_account_data_for_user(
user_id=user1_id,
account_data_type="org.matrix.doodardaz",
content={"doo": "dar"},
)
)
# Make an incremental Sliding Sync request with the account_data extension enabled
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
self.assertIncludes(
{
global_event["type"]
for global_event in response_body["extensions"]["account_data"].get(
"global"
)
},
# We should only see the new global account data that happened after the `from_token`
{"org.matrix.doodardaz"},
exact=True,
)
self.assertIncludes(
response_body["extensions"]["account_data"].get("rooms").keys(),
set(),
exact=True,
)
def test_room_account_data_initial_sync(self) -> None:
"""
On initial sync, we return all account data for a given room but only for
rooms that we request and are being returned in the Sliding Sync response.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a room and add some room account data
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
self.get_success(
self.account_data_handler.add_account_data_to_room(
user_id=user1_id,
room_id=room_id1,
account_data_type="org.matrix.roorarraz",
content={"roo": "rar"},
)
)
# Create another room with some room account data
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
self.get_success(
self.account_data_handler.add_account_data_to_room(
user_id=user1_id,
room_id=room_id2,
account_data_type="org.matrix.roorarraz",
content={"roo": "rar"},
)
)
# Make an initial Sliding Sync request with the account_data extension enabled
sync_body = {
"lists": {},
"room_subscriptions": {
room_id1: {
"required_state": [],
"timeline_limit": 0,
}
},
"extensions": {
"account_data": {
"enabled": True,
"rooms": [room_id1, room_id2],
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
self.assertIsNotNone(response_body["extensions"]["account_data"].get("global"))
# Even though we requested room2, we only expect room1 to show up because that's
# the only room in the Sliding Sync response (room2 is not one of our room
# subscriptions or in a sliding window list).
self.assertIncludes(
response_body["extensions"]["account_data"].get("rooms").keys(),
{room_id1},
exact=True,
)
self.assertIncludes(
{
event["type"]
for event in response_body["extensions"]["account_data"]
.get("rooms")
.get(room_id1)
},
{"org.matrix.roorarraz"},
exact=True,
)
def test_room_account_data_incremental_sync(self) -> None:
"""
On incremental sync, we return all account data for a given room but only for
rooms that we request and are being returned in the Sliding Sync response.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a room and add some room account data
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
self.get_success(
self.account_data_handler.add_account_data_to_room(
user_id=user1_id,
room_id=room_id1,
account_data_type="org.matrix.roorarraz",
content={"roo": "rar"},
)
)
# Create another room with some room account data
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
self.get_success(
self.account_data_handler.add_account_data_to_room(
user_id=user1_id,
room_id=room_id2,
account_data_type="org.matrix.roorarraz",
content={"roo": "rar"},
)
)
sync_body = {
"lists": {},
"room_subscriptions": {
room_id1: {
"required_state": [],
"timeline_limit": 0,
}
},
"extensions": {
"account_data": {
"enabled": True,
"rooms": [room_id1, room_id2],
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Add some other room account data
self.get_success(
self.account_data_handler.add_account_data_to_room(
user_id=user1_id,
room_id=room_id1,
account_data_type="org.matrix.roorarraz2",
content={"roo": "rar"},
)
)
self.get_success(
self.account_data_handler.add_account_data_to_room(
user_id=user1_id,
room_id=room_id2,
account_data_type="org.matrix.roorarraz2",
content={"roo": "rar"},
)
)
# Make an incremental Sliding Sync request with the account_data extension enabled
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
self.assertIsNotNone(response_body["extensions"]["account_data"].get("global"))
# Even though we requested room2, we only expect room1 to show up because that's
# the only room in the Sliding Sync response (room2 is not one of our room
# subscriptions or in a sliding window list).
self.assertIncludes(
response_body["extensions"]["account_data"].get("rooms").keys(),
{room_id1},
exact=True,
)
# We should only see the new room account data that happened after the `from_token`
self.assertIncludes(
{
event["type"]
for event in response_body["extensions"]["account_data"]
.get("rooms")
.get(room_id1)
},
{"org.matrix.roorarraz2"},
exact=True,
)
def test_wait_for_new_data(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive.
(Only applies to incremental syncs with a `timeout` specified)
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id, user1_id, tok=user1_tok)
sync_body = {
"lists": {},
"extensions": {
"account_data": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make an incremental Sliding Sync request with the account_data extension enabled
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Bump the global account data to trigger new results
self.get_success(
self.account_data_handler.add_account_data_for_user(
user1_id,
"org.matrix.foobarbaz",
{"foo": "bar"},
)
)
# Should respond before the 10 second timeout
channel.await_result(timeout_ms=3000)
self.assertEqual(channel.code, 200, channel.json_body)
# We should see the global account data update
self.assertIncludes(
{
global_event["type"]
for global_event in channel.json_body["extensions"]["account_data"].get(
"global"
)
},
{"org.matrix.foobarbaz"},
exact=True,
)
self.assertIncludes(
channel.json_body["extensions"]["account_data"].get("rooms").keys(),
set(),
exact=True,
)
def test_wait_for_new_data_timeout(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive but
no data ever arrives so we timeout. We're also making sure that the default data
from the account_data extension doesn't trigger a false-positive for new data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
sync_body = {
"lists": {},
"extensions": {
"account_data": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make the Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Wake-up `notifier.wait_for_events(...)` that will cause us test
# `SlidingSyncResult.__bool__` for new results.
self._bump_notifier_wait_for_events(
user1_id,
# We choose `StreamKeyType.PRESENCE` because we're testing for account data
# and don't want to contaminate the account data results using
# `StreamKeyType.ACCOUNT_DATA`.
wake_stream_key=StreamKeyType.PRESENCE,
)
# Block for a little bit more to ensure we don't see any new results.
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=4000)
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
# 5000 + 4000 + 1200 > 10000)
channel.await_result(timeout_ms=1200)
self.assertEqual(channel.code, 200, channel.json_body)
self.assertIsNotNone(
channel.json_body["extensions"]["account_data"].get("global")
)
self.assertIsNotNone(
channel.json_body["extensions"]["account_data"].get("rooms")
)

View File

@@ -0,0 +1,441 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.rest.client import devices, login, room, sync
from synapse.server import HomeServer
from synapse.types import JsonDict, StreamKeyType
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
from tests.server import TimedOutException
logger = logging.getLogger(__name__)
class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase):
"""Tests for the e2ee sliding sync extension"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
devices.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.e2e_keys_handler = hs.get_e2e_keys_handler()
def test_no_data_initial_sync(self) -> None:
"""
Test that enabling e2ee extension works during an intitial sync, even if there
is no-data
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Make an initial Sliding Sync request with the e2ee extension enabled
sync_body = {
"lists": {},
"extensions": {
"e2ee": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Device list updates are only present for incremental syncs
self.assertIsNone(response_body["extensions"]["e2ee"].get("device_lists"))
# Both of these should be present even when empty
self.assertEqual(
response_body["extensions"]["e2ee"]["device_one_time_keys_count"],
{
# This is always present because of
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
"signed_curve25519": 0
},
)
self.assertEqual(
response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"],
[],
)
def test_no_data_incremental_sync(self) -> None:
"""
Test that enabling e2ee extension works during an incremental sync, even if
there is no-data
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
sync_body = {
"lists": {},
"extensions": {
"e2ee": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make an incremental Sliding Sync request with the e2ee extension enabled
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Device list shows up for incremental syncs
self.assertEqual(
response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"),
[],
)
self.assertEqual(
response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
[],
)
# Both of these should be present even when empty
self.assertEqual(
response_body["extensions"]["e2ee"]["device_one_time_keys_count"],
{
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
"signed_curve25519": 0
},
)
self.assertEqual(
response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"],
[],
)
def test_wait_for_new_data(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive.
(Only applies to incremental syncs with a `timeout` specified)
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
test_device_id = "TESTDEVICE"
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass", device_id=test_device_id)
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id, user1_id, tok=user1_tok)
self.helper.join(room_id, user3_id, tok=user3_tok)
sync_body = {
"lists": {},
"extensions": {
"e2ee": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make the Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Bump the device lists to trigger new results
# Have user3 update their device list
device_update_channel = self.make_request(
"PUT",
f"/devices/{test_device_id}",
{
"display_name": "New Device Name",
},
access_token=user3_tok,
)
self.assertEqual(
device_update_channel.code, 200, device_update_channel.json_body
)
# Should respond before the 10 second timeout
channel.await_result(timeout_ms=3000)
self.assertEqual(channel.code, 200, channel.json_body)
# We should see the device list update
self.assertEqual(
channel.json_body["extensions"]["e2ee"]
.get("device_lists", {})
.get("changed"),
[user3_id],
)
self.assertEqual(
channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
[],
)
def test_wait_for_new_data_timeout(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive but
no data ever arrives so we timeout. We're also making sure that the default data
from the E2EE extension doesn't trigger a false-positive for new data (see
`device_one_time_keys_count.signed_curve25519`).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
sync_body = {
"lists": {},
"extensions": {
"e2ee": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make the Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Wake-up `notifier.wait_for_events(...)` that will cause us test
# `SlidingSyncResult.__bool__` for new results.
self._bump_notifier_wait_for_events(
user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
)
# Block for a little bit more to ensure we don't see any new results.
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=4000)
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
# 5000 + 4000 + 1200 > 10000)
channel.await_result(timeout_ms=1200)
self.assertEqual(channel.code, 200, channel.json_body)
# Device lists are present for incremental syncs but empty because no device changes
self.assertEqual(
channel.json_body["extensions"]["e2ee"]
.get("device_lists", {})
.get("changed"),
[],
)
self.assertEqual(
channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
[],
)
# Both of these should be present even when empty
self.assertEqual(
channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"],
{
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
"signed_curve25519": 0
},
)
self.assertEqual(
channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"],
[],
)
def test_device_lists(self) -> None:
"""
Test that device list updates are included in the response
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
test_device_id = "TESTDEVICE"
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass", device_id=test_device_id)
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id, user1_id, tok=user1_tok)
self.helper.join(room_id, user3_id, tok=user3_tok)
self.helper.join(room_id, user4_id, tok=user4_tok)
sync_body = {
"lists": {},
"extensions": {
"e2ee": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Have user3 update their device list
channel = self.make_request(
"PUT",
f"/devices/{test_device_id}",
{
"display_name": "New Device Name",
},
access_token=user3_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User4 leaves the room
self.helper.leave(room_id, user4_id, tok=user4_tok)
# Make an incremental Sliding Sync request with the e2ee extension enabled
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Device list updates show up
self.assertEqual(
response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"),
[user3_id],
)
self.assertEqual(
response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
[user4_id],
)
def test_device_one_time_keys_count(self) -> None:
"""
Test that `device_one_time_keys_count` are included in the response
"""
test_device_id = "TESTDEVICE"
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass", device_id=test_device_id)
# Upload one time keys for the user/device
keys: JsonDict = {
"alg1:k1": "key1",
"alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}},
"alg2:k3": {"key": "key3"},
}
upload_keys_response = self.get_success(
self.e2e_keys_handler.upload_keys_for_user(
user1_id, test_device_id, {"one_time_keys": keys}
)
)
self.assertDictEqual(
upload_keys_response,
{
"one_time_key_counts": {
"alg1": 1,
"alg2": 2,
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
"signed_curve25519": 0,
}
},
)
# Make a Sliding Sync request with the e2ee extension enabled
sync_body = {
"lists": {},
"extensions": {
"e2ee": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Check for those one time key counts
self.assertEqual(
response_body["extensions"]["e2ee"].get("device_one_time_keys_count"),
{
"alg1": 1,
"alg2": 2,
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
"signed_curve25519": 0,
},
)
def test_device_unused_fallback_key_types(self) -> None:
"""
Test that `device_unused_fallback_key_types` are included in the response
"""
test_device_id = "TESTDEVICE"
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass", device_id=test_device_id)
# We shouldn't have any unused fallback keys yet
res = self.get_success(
self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id)
)
self.assertEqual(res, [])
# Upload a fallback key for the user/device
self.get_success(
self.e2e_keys_handler.upload_keys_for_user(
user1_id,
test_device_id,
{"fallback_keys": {"alg1:k1": "fallback_key1"}},
)
)
# We should now have an unused alg1 key
fallback_res = self.get_success(
self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id)
)
self.assertEqual(fallback_res, ["alg1"], fallback_res)
# Make a Sliding Sync request with the e2ee extension enabled
sync_body = {
"lists": {},
"extensions": {
"e2ee": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Check for the unused fallback key types
self.assertListEqual(
response_body["extensions"]["e2ee"].get("device_unused_fallback_key_types"),
["alg1"],
)

View File

@@ -0,0 +1,679 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import EduTypes, ReceiptTypes
from synapse.rest.client import login, receipts, room, sync
from synapse.server import HomeServer
from synapse.types import StreamKeyType
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
from tests.server import TimedOutException
logger = logging.getLogger(__name__)
class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase):
"""Tests for the receipts sliding sync extension"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
receipts.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
def test_no_data_initial_sync(self) -> None:
"""
Test that enabling the receipts extension works during an intitial sync,
even if there is no-data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Make an initial Sliding Sync request with the receipts extension enabled
sync_body = {
"lists": {},
"extensions": {
"receipts": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
self.assertIncludes(
response_body["extensions"]["receipts"].get("rooms").keys(),
set(),
exact=True,
)
def test_no_data_incremental_sync(self) -> None:
"""
Test that enabling receipts extension works during an incremental sync, even
if there is no-data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
sync_body = {
"lists": {},
"extensions": {
"receipts": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make an incremental Sliding Sync request with the receipts extension enabled
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
self.assertIncludes(
response_body["extensions"]["receipts"].get("rooms").keys(),
set(),
exact=True,
)
def test_receipts_initial_sync_with_timeline(self) -> None:
"""
On initial sync, we only return receipts for events in a given room's timeline.
We also make sure that we only return receipts for rooms that we request and are
already being returned in the Sliding Sync response.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
# Create a room
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.join(room_id1, user4_id, tok=user4_tok)
room1_event_response1 = self.helper.send(
room_id1, body="new event1", tok=user2_tok
)
room1_event_response2 = self.helper.send(
room_id1, body="new event2", tok=user2_tok
)
# User1 reads the last event
channel = self.make_request(
"POST",
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}",
{},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User2 reads the last event
channel = self.make_request(
"POST",
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}",
{},
access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User3 reads the first event
channel = self.make_request(
"POST",
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
{},
access_token=user3_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User4 privately reads the last event (make sure this doesn't leak to the other users)
channel = self.make_request(
"POST",
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ_PRIVATE}/{room1_event_response2['event_id']}",
{},
access_token=user4_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Create another room
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id2, user1_id, tok=user1_tok)
self.helper.join(room_id2, user3_id, tok=user3_tok)
self.helper.join(room_id2, user4_id, tok=user4_tok)
room2_event_response1 = self.helper.send(
room_id2, body="new event2", tok=user2_tok
)
# User1 reads the last event
channel = self.make_request(
"POST",
f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}",
{},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User2 reads the last event
channel = self.make_request(
"POST",
f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}",
{},
access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User4 privately reads the last event (make sure this doesn't leak to the other users)
channel = self.make_request(
"POST",
f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}",
{},
access_token=user4_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Make an initial Sliding Sync request with the receipts extension enabled
sync_body = {
"lists": {},
"room_subscriptions": {
room_id1: {
"required_state": [],
# On initial sync, we only have receipts for events in the timeline
"timeline_limit": 1,
}
},
"extensions": {
"receipts": {
"enabled": True,
"rooms": [room_id1, room_id2],
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Only the latest event in the room is in the timelie because the `timeline_limit` is 1
self.assertIncludes(
{
event["event_id"]
for event in response_body["rooms"][room_id1].get("timeline", [])
},
{room1_event_response2["event_id"]},
exact=True,
message=str(response_body["rooms"][room_id1]),
)
# Even though we requested room2, we only expect room1 to show up because that's
# the only room in the Sliding Sync response (room2 is not one of our room
# subscriptions or in a sliding window list).
self.assertIncludes(
response_body["extensions"]["receipts"].get("rooms").keys(),
{room_id1},
exact=True,
)
# Sanity check that it's the correct ephemeral event type
self.assertEqual(
response_body["extensions"]["receipts"]["rooms"][room_id1]["type"],
EduTypes.RECEIPT,
)
# We can see user1 and user2 read receipts
self.assertIncludes(
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
room1_event_response2["event_id"]
][ReceiptTypes.READ].keys(),
{user1_id, user2_id},
exact=True,
)
# User1 did not have a private read receipt and we shouldn't leak others'
# private read receipts
self.assertIncludes(
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
room1_event_response2["event_id"]
]
.get(ReceiptTypes.READ_PRIVATE, {})
.keys(),
set(),
exact=True,
)
# We shouldn't see receipts for event2 since it wasn't in the timeline and this is an initial sync
self.assertIsNone(
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"].get(
room1_event_response1["event_id"]
)
)
def test_receipts_incremental_sync(self) -> None:
"""
On incremental sync, we return all receipts in the token range for a given room
but only for rooms that we request and are being returned in the Sliding Sync
response.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
# Create room1
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
room1_event_response1 = self.helper.send(
room_id1, body="new event2", tok=user2_tok
)
# User2 reads the last event (before the `from_token`)
channel = self.make_request(
"POST",
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
{},
access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Create room2
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id2, user1_id, tok=user1_tok)
room2_event_response1 = self.helper.send(
room_id2, body="new event2", tok=user2_tok
)
# User1 reads the last event (before the `from_token`)
channel = self.make_request(
"POST",
f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}",
{},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Create room3
room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id3, user1_id, tok=user1_tok)
self.helper.join(room_id3, user3_id, tok=user3_tok)
room3_event_response1 = self.helper.send(
room_id3, body="new event", tok=user2_tok
)
# Create room4
room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id4, user1_id, tok=user1_tok)
self.helper.join(room_id4, user3_id, tok=user3_tok)
event_response4 = self.helper.send(room_id4, body="new event", tok=user2_tok)
# User1 reads the last event (before the `from_token`)
channel = self.make_request(
"POST",
f"/rooms/{room_id4}/receipt/{ReceiptTypes.READ}/{event_response4['event_id']}",
{},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
sync_body = {
"lists": {},
"room_subscriptions": {
room_id1: {
"required_state": [],
"timeline_limit": 0,
},
room_id3: {
"required_state": [],
"timeline_limit": 0,
},
room_id4: {
"required_state": [],
"timeline_limit": 0,
},
},
"extensions": {
"receipts": {
"enabled": True,
"rooms": [room_id1, room_id2, room_id3, room_id4],
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Add some more read receipts after the `from_token`
#
# User1 reads room1
channel = self.make_request(
"POST",
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
{},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User1 privately reads room2
channel = self.make_request(
"POST",
f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}",
{},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User3 reads room3
channel = self.make_request(
"POST",
f"/rooms/{room_id3}/receipt/{ReceiptTypes.READ}/{room3_event_response1['event_id']}",
{},
access_token=user3_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# No activity for room4 after the `from_token`
# Make an incremental Sliding Sync request with the receipts extension enabled
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Even though we requested room2, we only expect rooms to show up if they are
# already in the Sliding Sync response. room4 doesn't show up because there is
# no activity after the `from_token`.
self.assertIncludes(
response_body["extensions"]["receipts"].get("rooms").keys(),
{room_id1, room_id3},
exact=True,
)
# Check room1:
#
# Sanity check that it's the correct ephemeral event type
self.assertEqual(
response_body["extensions"]["receipts"]["rooms"][room_id1]["type"],
EduTypes.RECEIPT,
)
# We only see that user1 has read something in room1 since the `from_token`
self.assertIncludes(
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
room1_event_response1["event_id"]
][ReceiptTypes.READ].keys(),
{user1_id},
exact=True,
)
# User1 did not send a private read receipt in this room and we shouldn't leak
# others' private read receipts
self.assertIncludes(
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
room1_event_response1["event_id"]
]
.get(ReceiptTypes.READ_PRIVATE, {})
.keys(),
set(),
exact=True,
)
# No events in the timeline since they were sent before the `from_token`
self.assertNotIn(room_id1, response_body["rooms"])
# Check room3:
#
# Sanity check that it's the correct ephemeral event type
self.assertEqual(
response_body["extensions"]["receipts"]["rooms"][room_id3]["type"],
EduTypes.RECEIPT,
)
# We only see that user3 has read something in room1 since the `from_token`
self.assertIncludes(
response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][
room3_event_response1["event_id"]
][ReceiptTypes.READ].keys(),
{user3_id},
exact=True,
)
# User1 did not send a private read receipt in this room and we shouldn't leak
# others' private read receipts
self.assertIncludes(
response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][
room3_event_response1["event_id"]
]
.get(ReceiptTypes.READ_PRIVATE, {})
.keys(),
set(),
exact=True,
)
# No events in the timeline since they were sent before the `from_token`
self.assertNotIn(room_id3, response_body["rooms"])
def test_receipts_incremental_sync_all_live_receipts(self) -> None:
"""
On incremental sync, we return all receipts in the token range for a given room
even if they are not in the timeline.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
# Create room1
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
sync_body = {
"lists": {},
"room_subscriptions": {
room_id1: {
"required_state": [],
# The timeline will only include event2
"timeline_limit": 1,
},
},
"extensions": {
"receipts": {
"enabled": True,
"rooms": [room_id1],
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
room1_event_response1 = self.helper.send(
room_id1, body="new event1", tok=user2_tok
)
room1_event_response2 = self.helper.send(
room_id1, body="new event2", tok=user2_tok
)
# User1 reads event1
channel = self.make_request(
"POST",
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
{},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User2 reads event2
channel = self.make_request(
"POST",
f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}",
{},
access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Make an incremental Sliding Sync request with the receipts extension enabled
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# We should see room1 because it has receipts in the token range
self.assertIncludes(
response_body["extensions"]["receipts"].get("rooms").keys(),
{room_id1},
exact=True,
)
# Sanity check that it's the correct ephemeral event type
self.assertEqual(
response_body["extensions"]["receipts"]["rooms"][room_id1]["type"],
EduTypes.RECEIPT,
)
# We should see all receipts in the token range regardless of whether the events
# are in the timeline
self.assertIncludes(
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
room1_event_response1["event_id"]
][ReceiptTypes.READ].keys(),
{user1_id},
exact=True,
)
self.assertIncludes(
response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
room1_event_response2["event_id"]
][ReceiptTypes.READ].keys(),
{user2_id},
exact=True,
)
# Only the latest event in the timeline because the `timeline_limit` is 1
self.assertIncludes(
{
event["event_id"]
for event in response_body["rooms"][room_id1].get("timeline", [])
},
{room1_event_response2["event_id"]},
exact=True,
message=str(response_body["rooms"][room_id1]),
)
def test_wait_for_new_data(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive.
(Only applies to incremental syncs with a `timeout` specified)
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id, user1_id, tok=user1_tok)
event_response = self.helper.send(room_id, body="new event", tok=user2_tok)
sync_body = {
"lists": {},
"room_subscriptions": {
room_id: {
"required_state": [],
"timeline_limit": 0,
},
},
"extensions": {
"receipts": {
"enabled": True,
"rooms": [room_id],
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make an incremental Sliding Sync request with the receipts extension enabled
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Bump the receipts to trigger new results
receipt_channel = self.make_request(
"POST",
f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}",
{},
access_token=user2_tok,
)
self.assertEqual(receipt_channel.code, 200, receipt_channel.json_body)
# Should respond before the 10 second timeout
channel.await_result(timeout_ms=3000)
self.assertEqual(channel.code, 200, channel.json_body)
# We should see the new receipt
self.assertIncludes(
channel.json_body.get("extensions", {})
.get("receipts", {})
.get("rooms", {})
.keys(),
{room_id},
exact=True,
message=str(channel.json_body),
)
self.assertIncludes(
channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][
event_response["event_id"]
][ReceiptTypes.READ].keys(),
{user2_id},
exact=True,
)
# User1 did not send a private read receipt in this room and we shouldn't leak
# others' private read receipts
self.assertIncludes(
channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][
event_response["event_id"]
]
.get(ReceiptTypes.READ_PRIVATE, {})
.keys(),
set(),
exact=True,
)
def test_wait_for_new_data_timeout(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive but
no data ever arrives so we timeout. We're also making sure that the default data
from the receipts extension doesn't trigger a false-positive for new data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
sync_body = {
"lists": {},
"extensions": {
"receipts": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make the Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Wake-up `notifier.wait_for_events(...)` that will cause us test
# `SlidingSyncResult.__bool__` for new results.
self._bump_notifier_wait_for_events(
user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
)
# Block for a little bit more to ensure we don't see any new results.
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=4000)
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
# 5000 + 4000 + 1200 > 10000)
channel.await_result(timeout_ms=1200)
self.assertEqual(channel.code, 200, channel.json_body)
self.assertIncludes(
channel.json_body["extensions"]["receipts"].get("rooms").keys(),
set(),
exact=True,
)

View File

@@ -0,0 +1,278 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from typing import List
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.rest.client import login, sendtodevice, sync
from synapse.server import HomeServer
from synapse.types import JsonDict, StreamKeyType
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
from tests.server import TimedOutException
logger = logging.getLogger(__name__)
class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase):
"""Tests for the to-device sliding sync extension"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
sync.register_servlets,
sendtodevice.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
def _assert_to_device_response(
self, response_body: JsonDict, expected_messages: List[JsonDict]
) -> str:
"""Assert the sliding sync response was successful and has the expected
to-device messages.
Returns the next_batch token from the to-device section.
"""
extensions = response_body["extensions"]
to_device = extensions["to_device"]
self.assertIsInstance(to_device["next_batch"], str)
self.assertEqual(to_device["events"], expected_messages)
return to_device["next_batch"]
def test_no_data(self) -> None:
"""Test that enabling to-device extension works, even if there is
no-data
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
sync_body = {
"lists": {},
"extensions": {
"to_device": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# We expect no to-device messages
self._assert_to_device_response(response_body, [])
def test_data_initial_sync(self) -> None:
"""Test that we get to-device messages when we don't specify a since
token"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass", "d1")
user2_id = self.register_user("u2", "pass")
user2_tok = self.login(user2_id, "pass", "d2")
# Send the to-device message
test_msg = {"foo": "bar"}
chan = self.make_request(
"PUT",
"/_matrix/client/r0/sendToDevice/m.test/1234",
content={"messages": {user1_id: {"d1": test_msg}}},
access_token=user2_tok,
)
self.assertEqual(chan.code, 200, chan.result)
sync_body = {
"lists": {},
"extensions": {
"to_device": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
self._assert_to_device_response(
response_body,
[{"content": test_msg, "sender": user2_id, "type": "m.test"}],
)
def test_data_incremental_sync(self) -> None:
"""Test that we get to-device messages over incremental syncs"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass", "d1")
user2_id = self.register_user("u2", "pass")
user2_tok = self.login(user2_id, "pass", "d2")
sync_body: JsonDict = {
"lists": {},
"extensions": {
"to_device": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# No to-device messages yet.
next_batch = self._assert_to_device_response(response_body, [])
test_msg = {"foo": "bar"}
chan = self.make_request(
"PUT",
"/_matrix/client/r0/sendToDevice/m.test/1234",
content={"messages": {user1_id: {"d1": test_msg}}},
access_token=user2_tok,
)
self.assertEqual(chan.code, 200, chan.result)
sync_body = {
"lists": {},
"extensions": {
"to_device": {
"enabled": True,
"since": next_batch,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
next_batch = self._assert_to_device_response(
response_body,
[{"content": test_msg, "sender": user2_id, "type": "m.test"}],
)
# The next sliding sync request should not include the to-device
# message.
sync_body = {
"lists": {},
"extensions": {
"to_device": {
"enabled": True,
"since": next_batch,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
self._assert_to_device_response(response_body, [])
# An initial sliding sync request should not include the to-device
# message, as it should have been deleted
sync_body = {
"lists": {},
"extensions": {
"to_device": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
self._assert_to_device_response(response_body, [])
def test_wait_for_new_data(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive.
(Only applies to incremental syncs with a `timeout` specified)
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass", "d1")
user2_id = self.register_user("u2", "pass")
user2_tok = self.login(user2_id, "pass", "d2")
sync_body = {
"lists": {},
"extensions": {
"to_device": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make the Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Bump the to-device messages to trigger new results
test_msg = {"foo": "bar"}
send_to_device_channel = self.make_request(
"PUT",
"/_matrix/client/r0/sendToDevice/m.test/1234",
content={"messages": {user1_id: {"d1": test_msg}}},
access_token=user2_tok,
)
self.assertEqual(
send_to_device_channel.code, 200, send_to_device_channel.result
)
# Should respond before the 10 second timeout
channel.await_result(timeout_ms=3000)
self.assertEqual(channel.code, 200, channel.json_body)
self._assert_to_device_response(
channel.json_body,
[{"content": test_msg, "sender": user2_id, "type": "m.test"}],
)
def test_wait_for_new_data_timeout(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive but
no data ever arrives so we timeout. We're also making sure that the default data
from the To-Device extension doesn't trigger a false-positive for new data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
sync_body = {
"lists": {},
"extensions": {
"to_device": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make the Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Wake-up `notifier.wait_for_events(...)` that will cause us test
# `SlidingSyncResult.__bool__` for new results.
self._bump_notifier_wait_for_events(
user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
)
# Block for a little bit more to ensure we don't see any new results.
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=4000)
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
# 5000 + 4000 + 1200 > 10000)
channel.await_result(timeout_ms=1200)
self.assertEqual(channel.code, 200, channel.json_body)
self._assert_to_device_response(channel.json_body, [])

View File

@@ -0,0 +1,482 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import EduTypes
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.types import StreamKeyType
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
from tests.server import TimedOutException
logger = logging.getLogger(__name__)
class SlidingSyncTypingExtensionTestCase(SlidingSyncBase):
"""Tests for the typing notification sliding sync extension"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
def test_no_data_initial_sync(self) -> None:
"""
Test that enabling the typing extension works during an intitial sync,
even if there is no-data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Make an initial Sliding Sync request with the typing extension enabled
sync_body = {
"lists": {},
"extensions": {
"typing": {
"enabled": True,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
self.assertIncludes(
response_body["extensions"]["typing"].get("rooms").keys(),
set(),
exact=True,
)
def test_no_data_incremental_sync(self) -> None:
"""
Test that enabling typing extension works during an incremental sync, even
if there is no-data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
sync_body = {
"lists": {},
"extensions": {
"typing": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make an incremental Sliding Sync request with the typing extension enabled
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
self.assertIncludes(
response_body["extensions"]["typing"].get("rooms").keys(),
set(),
exact=True,
)
def test_typing_initial_sync(self) -> None:
"""
On initial sync, we return all typing notifications for rooms that we request
and are being returned in the Sliding Sync response.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
# Create a room
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.join(room_id1, user4_id, tok=user4_tok)
# User1 starts typing in room1
channel = self.make_request(
"PUT",
f"/rooms/{room_id1}/typing/{user1_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User2 starts typing in room1
channel = self.make_request(
"PUT",
f"/rooms/{room_id1}/typing/{user2_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Create another room
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id2, user1_id, tok=user1_tok)
self.helper.join(room_id2, user3_id, tok=user3_tok)
self.helper.join(room_id2, user4_id, tok=user4_tok)
# User1 starts typing in room2
channel = self.make_request(
"PUT",
f"/rooms/{room_id2}/typing/{user1_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User2 starts typing in room2
channel = self.make_request(
"PUT",
f"/rooms/{room_id2}/typing/{user2_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Make an initial Sliding Sync request with the typing extension enabled
sync_body = {
"lists": {},
"room_subscriptions": {
room_id1: {
"required_state": [],
"timeline_limit": 0,
}
},
"extensions": {
"typing": {
"enabled": True,
"rooms": [room_id1, room_id2],
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Even though we requested room2, we only expect room1 to show up because that's
# the only room in the Sliding Sync response (room2 is not one of our room
# subscriptions or in a sliding window list).
self.assertIncludes(
response_body["extensions"]["typing"].get("rooms").keys(),
{room_id1},
exact=True,
)
# Sanity check that it's the correct ephemeral event type
self.assertEqual(
response_body["extensions"]["typing"]["rooms"][room_id1]["type"],
EduTypes.TYPING,
)
# We can see user1 and user2 typing
self.assertIncludes(
set(
response_body["extensions"]["typing"]["rooms"][room_id1]["content"][
"user_ids"
]
),
{user1_id, user2_id},
exact=True,
)
def test_typing_incremental_sync(self) -> None:
"""
On incremental sync, we return all typing notifications in the token range for a
given room but only for rooms that we request and are being returned in the
Sliding Sync response.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
# Create room1
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
# User2 starts typing in room1
channel = self.make_request(
"PUT",
f"/rooms/{room_id1}/typing/{user2_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Create room2
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id2, user1_id, tok=user1_tok)
# User1 starts typing in room2 (before the `from_token`)
channel = self.make_request(
"PUT",
f"/rooms/{room_id2}/typing/{user1_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Create room3
room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id3, user1_id, tok=user1_tok)
self.helper.join(room_id3, user3_id, tok=user3_tok)
# Create room4
room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id4, user1_id, tok=user1_tok)
self.helper.join(room_id4, user3_id, tok=user3_tok)
# User1 starts typing in room4 (before the `from_token`)
channel = self.make_request(
"PUT",
f"/rooms/{room_id4}/typing/{user1_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Advance time so all of the typing notifications timeout before we make our
# Sliding Sync requests. Even though these are sent before the `from_token`, the
# typing code only keeps track of stream position of the latest typing
# notification so "old" typing notifications that are still "alive" (haven't
# timed out) can appear in the response.
self.reactor.advance(36)
sync_body = {
"lists": {},
"room_subscriptions": {
room_id1: {
"required_state": [],
"timeline_limit": 0,
},
room_id3: {
"required_state": [],
"timeline_limit": 0,
},
room_id4: {
"required_state": [],
"timeline_limit": 0,
},
},
"extensions": {
"typing": {
"enabled": True,
"rooms": [room_id1, room_id2, room_id3, room_id4],
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Add some more typing notifications after the `from_token`
#
# User1 starts typing in room1
channel = self.make_request(
"PUT",
f"/rooms/{room_id1}/typing/{user1_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User1 starts typing in room2
channel = self.make_request(
"PUT",
f"/rooms/{room_id2}/typing/{user1_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# User3 starts typing in room3
channel = self.make_request(
"PUT",
f"/rooms/{room_id3}/typing/{user3_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user3_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# No activity for room4 after the `from_token`
# Make an incremental Sliding Sync request with the typing extension enabled
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Even though we requested room2, we only expect rooms to show up if they are
# already in the Sliding Sync response. room4 doesn't show up because there is
# no activity after the `from_token`.
self.assertIncludes(
response_body["extensions"]["typing"].get("rooms").keys(),
{room_id1, room_id3},
exact=True,
)
# Check room1:
#
# Sanity check that it's the correct ephemeral event type
self.assertEqual(
response_body["extensions"]["typing"]["rooms"][room_id1]["type"],
EduTypes.TYPING,
)
# We only see that user1 is typing in room1 since the `from_token`
self.assertIncludes(
set(
response_body["extensions"]["typing"]["rooms"][room_id1]["content"][
"user_ids"
]
),
{user1_id},
exact=True,
)
# Check room3:
#
# Sanity check that it's the correct ephemeral event type
self.assertEqual(
response_body["extensions"]["typing"]["rooms"][room_id3]["type"],
EduTypes.TYPING,
)
# We only see that user3 is typing in room1 since the `from_token`
self.assertIncludes(
set(
response_body["extensions"]["typing"]["rooms"][room_id3]["content"][
"user_ids"
]
),
{user3_id},
exact=True,
)
def test_wait_for_new_data(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive.
(Only applies to incremental syncs with a `timeout` specified)
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id, user1_id, tok=user1_tok)
sync_body = {
"lists": {},
"room_subscriptions": {
room_id: {
"required_state": [],
"timeline_limit": 0,
},
},
"extensions": {
"typing": {
"enabled": True,
"rooms": [room_id],
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make an incremental Sliding Sync request with the typing extension enabled
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Bump the typing status to trigger new results
typing_channel = self.make_request(
"PUT",
f"/rooms/{room_id}/typing/{user2_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user2_tok,
)
self.assertEqual(typing_channel.code, 200, typing_channel.json_body)
# Should respond before the 10 second timeout
channel.await_result(timeout_ms=3000)
self.assertEqual(channel.code, 200, channel.json_body)
# We should see the new typing notification
self.assertIncludes(
channel.json_body.get("extensions", {})
.get("typing", {})
.get("rooms", {})
.keys(),
{room_id},
exact=True,
message=str(channel.json_body),
)
self.assertIncludes(
set(
channel.json_body["extensions"]["typing"]["rooms"][room_id]["content"][
"user_ids"
]
),
{user2_id},
exact=True,
)
def test_wait_for_new_data_timeout(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive but
no data ever arrives so we timeout. We're also making sure that the default data
from the typing extension doesn't trigger a false-positive for new data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
sync_body = {
"lists": {},
"extensions": {
"typing": {
"enabled": True,
}
},
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make the Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Wake-up `notifier.wait_for_events(...)` that will cause us test
# `SlidingSyncResult.__bool__` for new results.
self._bump_notifier_wait_for_events(
user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
)
# Block for a little bit more to ensure we don't see any new results.
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=4000)
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
# 5000 + 4000 + 1200 > 10000)
channel.await_result(timeout_ms=1200)
self.assertEqual(channel.code, 200, channel.json_body)
self.assertIncludes(
channel.json_body["extensions"]["typing"].get("rooms").keys(),
set(),
exact=True,
)

View File

@@ -0,0 +1,283 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from typing import Literal
from parameterized import parameterized
from typing_extensions import assert_never
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import ReceiptTypes
from synapse.rest.client import login, receipts, room, sync
from synapse.server import HomeServer
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
logger = logging.getLogger(__name__)
class SlidingSyncExtensionsTestCase(SlidingSyncBase):
"""
Test general extensions behavior in the Sliding Sync API. Each extension has their
own suite of tests in their own file as well.
"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
receipts.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
self.account_data_handler = hs.get_account_data_handler()
# Any extensions that use `lists`/`rooms` should be tested here
@parameterized.expand([("account_data",), ("receipts",), ("typing",)])
def test_extensions_lists_rooms_relevant_rooms(
self,
extension_name: Literal["account_data", "receipts", "typing"],
) -> None:
"""
With various extensions, test out requesting different variations of
`lists`/`rooms`.
Stresses `SlidingSyncHandler.find_relevant_room_ids_for_extension(...)`
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create some rooms
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok)
room_id4 = self.helper.create_room_as(user1_id, tok=user1_tok)
room_id5 = self.helper.create_room_as(user1_id, tok=user1_tok)
room_id_to_human_name_map = {
room_id1: "room1",
room_id2: "room2",
room_id3: "room3",
room_id4: "room4",
room_id5: "room5",
}
for room_id in room_id_to_human_name_map.keys():
if extension_name == "account_data":
# Add some account data to each room
self.get_success(
self.account_data_handler.add_account_data_to_room(
user_id=user1_id,
room_id=room_id,
account_data_type="org.matrix.roorarraz",
content={"roo": "rar"},
)
)
elif extension_name == "receipts":
event_response = self.helper.send(
room_id, body="new event", tok=user1_tok
)
# Read last event
channel = self.make_request(
"POST",
f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}",
{},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
elif extension_name == "typing":
# Start a typing notification
channel = self.make_request(
"PUT",
f"/rooms/{room_id}/typing/{user1_id}",
b'{"typing": true, "timeout": 30000}',
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
else:
assert_never(extension_name)
main_sync_body = {
"lists": {
# We expect this list range to include room5 and room4
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 0,
},
# We expect this list range to include room5, room4, room3
"bar-list": {
"ranges": [[0, 2]],
"required_state": [],
"timeline_limit": 0,
},
},
"room_subscriptions": {
room_id1: {
"required_state": [],
"timeline_limit": 0,
}
},
}
# Mix lists and rooms
sync_body = {
**main_sync_body,
"extensions": {
extension_name: {
"enabled": True,
"lists": ["foo-list", "non-existent-list"],
"rooms": [room_id1, room_id2, "!non-existent-room"],
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# room1: ✅ Requested via `rooms` and a room subscription exists
# room2: ❌ Requested via `rooms` but not in the response (from lists or room subscriptions)
# room3: ❌ Not requested
# room4: ✅ Shows up because requested via `lists` and list exists in the response
# room5: ✅ Shows up because requested via `lists` and list exists in the response
self.assertIncludes(
{
room_id_to_human_name_map[room_id]
for room_id in response_body["extensions"][extension_name]
.get("rooms")
.keys()
},
{"room1", "room4", "room5"},
exact=True,
)
# Try wildcards (this is the default)
sync_body = {
**main_sync_body,
"extensions": {
extension_name: {
"enabled": True,
# "lists": ["*"],
# "rooms": ["*"],
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# room1: ✅ Shows up because of default `rooms` wildcard and is in one of the room subscriptions
# room2: ❌ Not requested
# room3: ✅ Shows up because of default `lists` wildcard and is in a list
# room4: ✅ Shows up because of default `lists` wildcard and is in a list
# room5: ✅ Shows up because of default `lists` wildcard and is in a list
self.assertIncludes(
{
room_id_to_human_name_map[room_id]
for room_id in response_body["extensions"][extension_name]
.get("rooms")
.keys()
},
{"room1", "room3", "room4", "room5"},
exact=True,
)
# Empty list will return nothing
sync_body = {
**main_sync_body,
"extensions": {
extension_name: {
"enabled": True,
"lists": [],
"rooms": [],
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# room1: ❌ Not requested
# room2: ❌ Not requested
# room3: ❌ Not requested
# room4: ❌ Not requested
# room5: ❌ Not requested
self.assertIncludes(
{
room_id_to_human_name_map[room_id]
for room_id in response_body["extensions"][extension_name]
.get("rooms")
.keys()
},
set(),
exact=True,
)
# Try wildcard and none
sync_body = {
**main_sync_body,
"extensions": {
extension_name: {
"enabled": True,
"lists": ["*"],
"rooms": [],
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# room1: ❌ Not requested
# room2: ❌ Not requested
# room3: ✅ Shows up because of default `lists` wildcard and is in a list
# room4: ✅ Shows up because of default `lists` wildcard and is in a list
# room5: ✅ Shows up because of default `lists` wildcard and is in a list
self.assertIncludes(
{
room_id_to_human_name_map[room_id]
for room_id in response_body["extensions"][extension_name]
.get("rooms")
.keys()
},
{"room3", "room4", "room5"},
exact=True,
)
# Try requesting a room that is only in a list
sync_body = {
**main_sync_body,
"extensions": {
extension_name: {
"enabled": True,
"lists": [],
"rooms": [room_id5],
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# room1: ❌ Not requested
# room2: ❌ Not requested
# room3: ❌ Not requested
# room4: ❌ Not requested
# room5: ✅ Requested via `rooms` and is in a list
self.assertIncludes(
{
room_id_to_human_name_map[room_id]
for room_id in response_body["extensions"][extension_name]
.get("rooms")
.keys()
},
{"room5"},
exact=True,
)

View File

@@ -0,0 +1,285 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from http import HTTPStatus
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import EventTypes, HistoryVisibility
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
logger = logging.getLogger(__name__)
class SlidingSyncRoomSubscriptionsTestCase(SlidingSyncBase):
"""
Test `room_subscriptions` in the Sliding Sync API.
"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
def test_room_subscriptions_with_join_membership(self) -> None:
"""
Test `room_subscriptions` with a joined room should give us timeline and current
state events.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
# Make the Sliding Sync request with just the room subscription
sync_body = {
"room_subscriptions": {
room_id1: {
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 1,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# We should see some state
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
# We should see some events
self.assertEqual(
[
event["event_id"]
for event in response_body["rooms"][room_id1]["timeline"]
],
[
join_response["event_id"],
],
response_body["rooms"][room_id1]["timeline"],
)
# No "live" events in an initial sync (no `from_token` to define the "live"
# range)
self.assertEqual(
response_body["rooms"][room_id1]["num_live"],
0,
response_body["rooms"][room_id1],
)
# There are more events to paginate to
self.assertEqual(
response_body["rooms"][room_id1]["limited"],
True,
response_body["rooms"][room_id1],
)
def test_room_subscriptions_with_leave_membership(self) -> None:
"""
Test `room_subscriptions` with a leave room should give us timeline and state
events up to the leave event.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.send_state(
room_id1,
event_type="org.matrix.foo_state",
state_key="",
body={"foo": "bar"},
tok=user2_tok,
)
join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# Send some events after user1 leaves
self.helper.send(room_id1, "activity after leave", tok=user2_tok)
# Update state after user1 leaves
self.helper.send_state(
room_id1,
event_type="org.matrix.foo_state",
state_key="",
body={"foo": "qux"},
tok=user2_tok,
)
# Make the Sliding Sync request with just the room subscription
sync_body = {
"room_subscriptions": {
room_id1: {
"required_state": [
["org.matrix.foo_state", ""],
],
"timeline_limit": 2,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# We should see the state at the time of the leave
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[("org.matrix.foo_state", "")],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
# We should see some before we left (nothing after)
self.assertEqual(
[
event["event_id"]
for event in response_body["rooms"][room_id1]["timeline"]
],
[
join_response["event_id"],
leave_response["event_id"],
],
response_body["rooms"][room_id1]["timeline"],
)
# No "live" events in an initial sync (no `from_token` to define the "live"
# range)
self.assertEqual(
response_body["rooms"][room_id1]["num_live"],
0,
response_body["rooms"][room_id1],
)
# There are more events to paginate to
self.assertEqual(
response_body["rooms"][room_id1]["limited"],
True,
response_body["rooms"][room_id1],
)
def test_room_subscriptions_no_leak_private_room(self) -> None:
"""
Test `room_subscriptions` with a private room we have never been in should not
leak any data to the user.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=False)
# We should not be able to join the private room
self.helper.join(
room_id1, user1_id, tok=user1_tok, expect_code=HTTPStatus.FORBIDDEN
)
# Make the Sliding Sync request with just the room subscription
sync_body = {
"room_subscriptions": {
room_id1: {
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 1,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# We should not see the room at all (we're not in it)
self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"])
def test_room_subscriptions_world_readable(self) -> None:
"""
Test `room_subscriptions` with a room that has `world_readable` history visibility
FIXME: We should be able to see the room timeline and state
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
# Create a room with `world_readable` history visibility
room_id1 = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
"preset": "public_chat",
"initial_state": [
{
"content": {
"history_visibility": HistoryVisibility.WORLD_READABLE
},
"state_key": "",
"type": EventTypes.RoomHistoryVisibility,
}
],
},
)
# Ensure we're testing with a room with `world_readable` history visibility
# which means events are visible to anyone even without membership.
history_visibility_response = self.helper.get_state(
room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
)
self.assertEqual(
history_visibility_response.get("history_visibility"),
HistoryVisibility.WORLD_READABLE,
)
# Note: We never join the room
# Make the Sliding Sync request with just the room subscription
sync_body = {
"room_subscriptions": {
room_id1: {
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 1,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# FIXME: In the future, we should be able to see the room because it's
# `world_readable` but currently we don't support this.
self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"])

View File

@@ -0,0 +1,510 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import EventTypes, HistoryVisibility
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.types import UserID
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
logger = logging.getLogger(__name__)
class SlidingSyncRoomsInvitesTestCase(SlidingSyncBase):
"""
Test to make sure the `rooms` response looks good for invites in the Sliding Sync API.
Invites behave a lot different than other rooms because we don't include the
`timeline` (`num_live`, `limited`, `prev_batch`) or `required_state` in favor of
some stripped state under the `invite_state` key.
Knocks probably have the same behavior but the spec doesn't mention knocks yet.
"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
def test_rooms_invite_shared_history_initial_sync(self) -> None:
"""
Test that `rooms` we are invited to have some stripped `invite_state` during an
initial sync.
This is an `invite` room so we should only have `stripped_state` (no `timeline`)
but we also shouldn't see any timeline events because the history visiblity is
`shared` and we haven't joined the room yet.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user1 = UserID.from_string(user1_id)
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user2 = UserID.from_string(user2_id)
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
# Ensure we're testing with a room with `shared` history visibility which means
# history visible until you actually join the room.
history_visibility_response = self.helper.get_state(
room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
)
self.assertEqual(
history_visibility_response.get("history_visibility"),
HistoryVisibility.SHARED,
)
self.helper.send(room_id1, "activity before1", tok=user2_tok)
self.helper.send(room_id1, "activity before2", tok=user2_tok)
self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
self.helper.send(room_id1, "activity after3", tok=user2_tok)
self.helper.send(room_id1, "activity after4", tok=user2_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 3,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# `timeline` is omitted for `invite` rooms with `stripped_state`
self.assertIsNone(
response_body["rooms"][room_id1].get("timeline"),
response_body["rooms"][room_id1],
)
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("num_live"),
response_body["rooms"][room_id1],
)
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("limited"),
response_body["rooms"][room_id1],
)
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("prev_batch"),
response_body["rooms"][room_id1],
)
# `required_state` is omitted for `invite` rooms with `stripped_state`
self.assertIsNone(
response_body["rooms"][room_id1].get("required_state"),
response_body["rooms"][room_id1],
)
# We should have some `stripped_state` so the potential joiner can identify the
# room (we don't care about the order).
self.assertCountEqual(
response_body["rooms"][room_id1]["invite_state"],
[
{
"content": {"creator": user2_id, "room_version": "10"},
"sender": user2_id,
"state_key": "",
"type": "m.room.create",
},
{
"content": {"join_rule": "public"},
"sender": user2_id,
"state_key": "",
"type": "m.room.join_rules",
},
{
"content": {"displayname": user2.localpart, "membership": "join"},
"sender": user2_id,
"state_key": user2_id,
"type": "m.room.member",
},
{
"content": {"displayname": user1.localpart, "membership": "invite"},
"sender": user2_id,
"state_key": user1_id,
"type": "m.room.member",
},
],
response_body["rooms"][room_id1]["invite_state"],
)
def test_rooms_invite_shared_history_incremental_sync(self) -> None:
"""
Test that `rooms` we are invited to have some stripped `invite_state` during an
incremental sync.
This is an `invite` room so we should only have `stripped_state` (no `timeline`)
but we also shouldn't see any timeline events because the history visiblity is
`shared` and we haven't joined the room yet.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user1 = UserID.from_string(user1_id)
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user2 = UserID.from_string(user2_id)
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
# Ensure we're testing with a room with `shared` history visibility which means
# history visible until you actually join the room.
history_visibility_response = self.helper.get_state(
room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
)
self.assertEqual(
history_visibility_response.get("history_visibility"),
HistoryVisibility.SHARED,
)
self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 3,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
self.helper.send(room_id1, "activity after token5", tok=user2_tok)
self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
# Make the Sliding Sync request
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
# `timeline` is omitted for `invite` rooms with `stripped_state`
self.assertIsNone(
response_body["rooms"][room_id1].get("timeline"),
response_body["rooms"][room_id1],
)
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("num_live"),
response_body["rooms"][room_id1],
)
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("limited"),
response_body["rooms"][room_id1],
)
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("prev_batch"),
response_body["rooms"][room_id1],
)
# `required_state` is omitted for `invite` rooms with `stripped_state`
self.assertIsNone(
response_body["rooms"][room_id1].get("required_state"),
response_body["rooms"][room_id1],
)
# We should have some `stripped_state` so the potential joiner can identify the
# room (we don't care about the order).
self.assertCountEqual(
response_body["rooms"][room_id1]["invite_state"],
[
{
"content": {"creator": user2_id, "room_version": "10"},
"sender": user2_id,
"state_key": "",
"type": "m.room.create",
},
{
"content": {"join_rule": "public"},
"sender": user2_id,
"state_key": "",
"type": "m.room.join_rules",
},
{
"content": {"displayname": user2.localpart, "membership": "join"},
"sender": user2_id,
"state_key": user2_id,
"type": "m.room.member",
},
{
"content": {"displayname": user1.localpart, "membership": "invite"},
"sender": user2_id,
"state_key": user1_id,
"type": "m.room.member",
},
],
response_body["rooms"][room_id1]["invite_state"],
)
def test_rooms_invite_world_readable_history_initial_sync(self) -> None:
"""
Test that `rooms` we are invited to have some stripped `invite_state` during an
initial sync.
This is an `invite` room so we should only have `stripped_state` (no `timeline`)
but depending on the semantics we decide, we could potentially see some
historical events before/after the `from_token` because the history is
`world_readable`. Same situation for events after the `from_token` if the
history visibility was set to `invited`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user1 = UserID.from_string(user1_id)
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user2 = UserID.from_string(user2_id)
room_id1 = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
"preset": "public_chat",
"initial_state": [
{
"content": {
"history_visibility": HistoryVisibility.WORLD_READABLE
},
"state_key": "",
"type": EventTypes.RoomHistoryVisibility,
}
],
},
)
# Ensure we're testing with a room with `world_readable` history visibility
# which means events are visible to anyone even without membership.
history_visibility_response = self.helper.get_state(
room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
)
self.assertEqual(
history_visibility_response.get("history_visibility"),
HistoryVisibility.WORLD_READABLE,
)
self.helper.send(room_id1, "activity before1", tok=user2_tok)
self.helper.send(room_id1, "activity before2", tok=user2_tok)
self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
self.helper.send(room_id1, "activity after3", tok=user2_tok)
self.helper.send(room_id1, "activity after4", tok=user2_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
# Large enough to see the latest events and before the invite
"timeline_limit": 4,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# `timeline` is omitted for `invite` rooms with `stripped_state`
self.assertIsNone(
response_body["rooms"][room_id1].get("timeline"),
response_body["rooms"][room_id1],
)
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("num_live"),
response_body["rooms"][room_id1],
)
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("limited"),
response_body["rooms"][room_id1],
)
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("prev_batch"),
response_body["rooms"][room_id1],
)
# `required_state` is omitted for `invite` rooms with `stripped_state`
self.assertIsNone(
response_body["rooms"][room_id1].get("required_state"),
response_body["rooms"][room_id1],
)
# We should have some `stripped_state` so the potential joiner can identify the
# room (we don't care about the order).
self.assertCountEqual(
response_body["rooms"][room_id1]["invite_state"],
[
{
"content": {"creator": user2_id, "room_version": "10"},
"sender": user2_id,
"state_key": "",
"type": "m.room.create",
},
{
"content": {"join_rule": "public"},
"sender": user2_id,
"state_key": "",
"type": "m.room.join_rules",
},
{
"content": {"displayname": user2.localpart, "membership": "join"},
"sender": user2_id,
"state_key": user2_id,
"type": "m.room.member",
},
{
"content": {"displayname": user1.localpart, "membership": "invite"},
"sender": user2_id,
"state_key": user1_id,
"type": "m.room.member",
},
],
response_body["rooms"][room_id1]["invite_state"],
)
def test_rooms_invite_world_readable_history_incremental_sync(self) -> None:
"""
Test that `rooms` we are invited to have some stripped `invite_state` during an
incremental sync.
This is an `invite` room so we should only have `stripped_state` (no `timeline`)
but depending on the semantics we decide, we could potentially see some
historical events before/after the `from_token` because the history is
`world_readable`. Same situation for events after the `from_token` if the
history visibility was set to `invited`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user1 = UserID.from_string(user1_id)
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user2 = UserID.from_string(user2_id)
room_id1 = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
"preset": "public_chat",
"initial_state": [
{
"content": {
"history_visibility": HistoryVisibility.WORLD_READABLE
},
"state_key": "",
"type": EventTypes.RoomHistoryVisibility,
}
],
},
)
# Ensure we're testing with a room with `world_readable` history visibility
# which means events are visible to anyone even without membership.
history_visibility_response = self.helper.get_state(
room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
)
self.assertEqual(
history_visibility_response.get("history_visibility"),
HistoryVisibility.WORLD_READABLE,
)
self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
# Large enough to see the latest events and before the invite
"timeline_limit": 4,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
self.helper.send(room_id1, "activity after token5", tok=user2_tok)
self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
# Make the incremental Sliding Sync request
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# `timeline` is omitted for `invite` rooms with `stripped_state`
self.assertIsNone(
response_body["rooms"][room_id1].get("timeline"),
response_body["rooms"][room_id1],
)
# `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("num_live"),
response_body["rooms"][room_id1],
)
# `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("limited"),
response_body["rooms"][room_id1],
)
# `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
self.assertIsNone(
response_body["rooms"][room_id1].get("prev_batch"),
response_body["rooms"][room_id1],
)
# `required_state` is omitted for `invite` rooms with `stripped_state`
self.assertIsNone(
response_body["rooms"][room_id1].get("required_state"),
response_body["rooms"][room_id1],
)
# We should have some `stripped_state` so the potential joiner can identify the
# room (we don't care about the order).
self.assertCountEqual(
response_body["rooms"][room_id1]["invite_state"],
[
{
"content": {"creator": user2_id, "room_version": "10"},
"sender": user2_id,
"state_key": "",
"type": "m.room.create",
},
{
"content": {"join_rule": "public"},
"sender": user2_id,
"state_key": "",
"type": "m.room.join_rules",
},
{
"content": {"displayname": user2.localpart, "membership": "join"},
"sender": user2_id,
"state_key": user2_id,
"type": "m.room.member",
},
{
"content": {"displayname": user1.localpart, "membership": "invite"},
"sender": user2_id,
"state_key": user1_id,
"type": "m.room.member",
},
],
response_body["rooms"][room_id1]["invite_state"],
)

View File

@@ -0,0 +1,710 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
from tests.test_utils.event_injection import create_event
logger = logging.getLogger(__name__)
class SlidingSyncRoomsMetaTestCase(SlidingSyncBase):
"""
Test rooms meta info like name, avatar, joined_count, invited_count, is_dm,
bump_stamp in the Sliding Sync API.
"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
def test_rooms_meta_when_joined(self) -> None:
"""
Test that the `rooms` `name` and `avatar` are included in the response and
reflect the current state of the room when the user is joined to the room.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
"name": "my super room",
},
)
# Set the room avatar URL
self.helper.send_state(
room_id1,
EventTypes.RoomAvatar,
{"url": "mxc://DUMMY_MEDIA_ID"},
tok=user2_tok,
)
self.helper.join(room_id1, user1_id, tok=user1_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Reflect the current state of the room
self.assertEqual(
response_body["rooms"][room_id1]["name"],
"my super room",
response_body["rooms"][room_id1],
)
self.assertEqual(
response_body["rooms"][room_id1]["avatar"],
"mxc://DUMMY_MEDIA_ID",
response_body["rooms"][room_id1],
)
self.assertEqual(
response_body["rooms"][room_id1]["joined_count"],
2,
)
self.assertEqual(
response_body["rooms"][room_id1]["invited_count"],
0,
)
self.assertIsNone(
response_body["rooms"][room_id1].get("is_dm"),
)
def test_rooms_meta_when_invited(self) -> None:
"""
Test that the `rooms` `name` and `avatar` are included in the response and
reflect the current state of the room when the user is invited to the room.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
"name": "my super room",
},
)
# Set the room avatar URL
self.helper.send_state(
room_id1,
EventTypes.RoomAvatar,
{"url": "mxc://DUMMY_MEDIA_ID"},
tok=user2_tok,
)
# User1 is invited to the room
self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
# Update the room name after user1 has left
self.helper.send_state(
room_id1,
EventTypes.Name,
{"name": "my super duper room"},
tok=user2_tok,
)
# Update the room avatar URL after user1 has left
self.helper.send_state(
room_id1,
EventTypes.RoomAvatar,
{"url": "mxc://UPDATED_DUMMY_MEDIA_ID"},
tok=user2_tok,
)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# This should still reflect the current state of the room even when the user is
# invited.
self.assertEqual(
response_body["rooms"][room_id1]["name"],
"my super duper room",
response_body["rooms"][room_id1],
)
self.assertEqual(
response_body["rooms"][room_id1]["avatar"],
"mxc://UPDATED_DUMMY_MEDIA_ID",
response_body["rooms"][room_id1],
)
self.assertEqual(
response_body["rooms"][room_id1]["joined_count"],
1,
)
self.assertEqual(
response_body["rooms"][room_id1]["invited_count"],
1,
)
self.assertIsNone(
response_body["rooms"][room_id1].get("is_dm"),
)
def test_rooms_meta_when_banned(self) -> None:
"""
Test that the `rooms` `name` and `avatar` reflect the state of the room when the
user was banned (do not leak current state).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
"name": "my super room",
},
)
# Set the room avatar URL
self.helper.send_state(
room_id1,
EventTypes.RoomAvatar,
{"url": "mxc://DUMMY_MEDIA_ID"},
tok=user2_tok,
)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
# Update the room name after user1 has left
self.helper.send_state(
room_id1,
EventTypes.Name,
{"name": "my super duper room"},
tok=user2_tok,
)
# Update the room avatar URL after user1 has left
self.helper.send_state(
room_id1,
EventTypes.RoomAvatar,
{"url": "mxc://UPDATED_DUMMY_MEDIA_ID"},
tok=user2_tok,
)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Reflect the state of the room at the time of leaving
self.assertEqual(
response_body["rooms"][room_id1]["name"],
"my super room",
response_body["rooms"][room_id1],
)
self.assertEqual(
response_body["rooms"][room_id1]["avatar"],
"mxc://DUMMY_MEDIA_ID",
response_body["rooms"][room_id1],
)
self.assertEqual(
response_body["rooms"][room_id1]["joined_count"],
# FIXME: The actual number should be "1" (user2) but we currently don't
# support this for rooms where the user has left/been banned.
0,
)
self.assertEqual(
response_body["rooms"][room_id1]["invited_count"],
0,
)
self.assertIsNone(
response_body["rooms"][room_id1].get("is_dm"),
)
def test_rooms_meta_heroes(self) -> None:
"""
Test that the `rooms` `heroes` are included in the response when the room
doesn't have a room name set.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
_user3_tok = self.login(user3_id, "pass")
room_id1 = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
"name": "my super room",
},
)
self.helper.join(room_id1, user1_id, tok=user1_tok)
# User3 is invited
self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok)
room_id2 = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
# No room name set so that `heroes` is populated
#
# "name": "my super room2",
},
)
self.helper.join(room_id2, user1_id, tok=user1_tok)
# User3 is invited
self.helper.invite(room_id2, src=user2_id, targ=user3_id, tok=user2_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Room1 has a name so we shouldn't see any `heroes` which the client would use
# the calculate the room name themselves.
self.assertEqual(
response_body["rooms"][room_id1]["name"],
"my super room",
response_body["rooms"][room_id1],
)
self.assertIsNone(response_body["rooms"][room_id1].get("heroes"))
self.assertEqual(
response_body["rooms"][room_id1]["joined_count"],
2,
)
self.assertEqual(
response_body["rooms"][room_id1]["invited_count"],
1,
)
# Room2 doesn't have a name so we should see `heroes` populated
self.assertIsNone(response_body["rooms"][room_id2].get("name"))
self.assertCountEqual(
[
hero["user_id"]
for hero in response_body["rooms"][room_id2].get("heroes", [])
],
# Heroes shouldn't include the user themselves (we shouldn't see user1)
[user2_id, user3_id],
)
self.assertEqual(
response_body["rooms"][room_id2]["joined_count"],
2,
)
self.assertEqual(
response_body["rooms"][room_id2]["invited_count"],
1,
)
# We didn't request any state so we shouldn't see any `required_state`
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
self.assertIsNone(response_body["rooms"][room_id2].get("required_state"))
def test_rooms_meta_heroes_max(self) -> None:
"""
Test that the `rooms` `heroes` only includes the first 5 users (not including
yourself).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
user5_id = self.register_user("user5", "pass")
user5_tok = self.login(user5_id, "pass")
user6_id = self.register_user("user6", "pass")
user6_tok = self.login(user6_id, "pass")
user7_id = self.register_user("user7", "pass")
user7_tok = self.login(user7_id, "pass")
room_id1 = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
# No room name set so that `heroes` is populated
#
# "name": "my super room",
},
)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.join(room_id1, user4_id, tok=user4_tok)
self.helper.join(room_id1, user5_id, tok=user5_tok)
self.helper.join(room_id1, user6_id, tok=user6_tok)
self.helper.join(room_id1, user7_id, tok=user7_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Room2 doesn't have a name so we should see `heroes` populated
self.assertIsNone(response_body["rooms"][room_id1].get("name"))
self.assertCountEqual(
[
hero["user_id"]
for hero in response_body["rooms"][room_id1].get("heroes", [])
],
# Heroes should be the first 5 users in the room (excluding the user
# themselves, we shouldn't see `user1`)
[user2_id, user3_id, user4_id, user5_id, user6_id],
)
self.assertEqual(
response_body["rooms"][room_id1]["joined_count"],
7,
)
self.assertEqual(
response_body["rooms"][room_id1]["invited_count"],
0,
)
# We didn't request any state so we shouldn't see any `required_state`
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
def test_rooms_meta_heroes_when_banned(self) -> None:
"""
Test that the `rooms` `heroes` are included in the response when the room
doesn't have a room name set but doesn't leak information past their ban.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
_user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
user5_id = self.register_user("user5", "pass")
_user5_tok = self.login(user5_id, "pass")
room_id1 = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
# No room name set so that `heroes` is populated
#
# "name": "my super room",
},
)
# User1 joins the room
self.helper.join(room_id1, user1_id, tok=user1_tok)
# User3 is invited
self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok)
# User1 is banned from the room
self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
# User4 joins the room after user1 is banned
self.helper.join(room_id1, user4_id, tok=user4_tok)
# User5 is invited after user1 is banned
self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Room2 doesn't have a name so we should see `heroes` populated
self.assertIsNone(response_body["rooms"][room_id1].get("name"))
self.assertCountEqual(
[
hero["user_id"]
for hero in response_body["rooms"][room_id1].get("heroes", [])
],
# Heroes shouldn't include the user themselves (we shouldn't see user1). We
# also shouldn't see user4 since they joined after user1 was banned.
#
# FIXME: The actual result should be `[user2_id, user3_id]` but we currently
# don't support this for rooms where the user has left/been banned.
[],
)
self.assertEqual(
response_body["rooms"][room_id1]["joined_count"],
# FIXME: The actual number should be "1" (user2) but we currently don't
# support this for rooms where the user has left/been banned.
0,
)
self.assertEqual(
response_body["rooms"][room_id1]["invited_count"],
# We shouldn't see user5 since they were invited after user1 was banned.
#
# FIXME: The actual number should be "1" (user3) but we currently don't
# support this for rooms where the user has left/been banned.
0,
)
def test_rooms_bump_stamp(self) -> None:
"""
Test that `bump_stamp` is present and pointing to relevant events.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(
user1_id,
tok=user1_tok,
)
event_response1 = message_response = self.helper.send(
room_id1, "message in room1", tok=user1_tok
)
event_pos1 = self.get_success(
self.store.get_position_for_event(event_response1["event_id"])
)
room_id2 = self.helper.create_room_as(
user1_id,
tok=user1_tok,
)
send_response2 = self.helper.send(room_id2, "message in room2", tok=user1_tok)
event_pos2 = self.get_success(
self.store.get_position_for_event(send_response2["event_id"])
)
# Send a reaction in room1 but it shouldn't affect the `bump_stamp`
# because reactions are not part of the `DEFAULT_BUMP_EVENT_TYPES`
self.helper.send_event(
room_id1,
type=EventTypes.Reaction,
content={
"m.relates_to": {
"event_id": message_response["event_id"],
"key": "👍",
"rel_type": "m.annotation",
}
},
tok=user1_tok,
)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 100,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Make sure it has the foo-list we requested
self.assertListEqual(
list(response_body["lists"].keys()),
["foo-list"],
response_body["lists"].keys(),
)
# Make sure the list includes the rooms in the right order
self.assertListEqual(
list(response_body["lists"]["foo-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 1],
# room1 sorts before room2 because it has the latest event (the
# reaction)
"room_ids": [room_id1, room_id2],
}
],
response_body["lists"]["foo-list"],
)
# The `bump_stamp` for room1 should point at the latest message (not the
# reaction since it's not one of the `DEFAULT_BUMP_EVENT_TYPES`)
self.assertEqual(
response_body["rooms"][room_id1]["bump_stamp"],
event_pos1.stream,
response_body["rooms"][room_id1],
)
# The `bump_stamp` for room2 should point at the latest message
self.assertEqual(
response_body["rooms"][room_id2]["bump_stamp"],
event_pos2.stream,
response_body["rooms"][room_id2],
)
def test_rooms_bump_stamp_backfill(self) -> None:
"""
Test that `bump_stamp` ignores backfilled events, i.e. events with a
negative stream ordering.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a remote room
creator = "@user:other"
room_id = "!foo:other"
shared_kwargs = {
"room_id": room_id,
"room_version": "10",
}
create_tuple = self.get_success(
create_event(
self.hs,
prev_event_ids=[],
type=EventTypes.Create,
state_key="",
sender=creator,
**shared_kwargs,
)
)
creator_tuple = self.get_success(
create_event(
self.hs,
prev_event_ids=[create_tuple[0].event_id],
auth_event_ids=[create_tuple[0].event_id],
type=EventTypes.Member,
state_key=creator,
content={"membership": Membership.JOIN},
sender=creator,
**shared_kwargs,
)
)
# We add a message event as a valid "bump type"
msg_tuple = self.get_success(
create_event(
self.hs,
prev_event_ids=[creator_tuple[0].event_id],
auth_event_ids=[create_tuple[0].event_id],
type=EventTypes.Message,
content={"body": "foo", "msgtype": "m.text"},
sender=creator,
**shared_kwargs,
)
)
invite_tuple = self.get_success(
create_event(
self.hs,
prev_event_ids=[msg_tuple[0].event_id],
auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id],
type=EventTypes.Member,
state_key=user1_id,
content={"membership": Membership.INVITE},
sender=creator,
**shared_kwargs,
)
)
remote_events_and_contexts = [
create_tuple,
creator_tuple,
msg_tuple,
invite_tuple,
]
# Ensure the local HS knows the room version
self.get_success(
self.store.store_room(room_id, creator, False, RoomVersions.V10)
)
# Persist these events as backfilled events.
persistence = self.hs.get_storage_controllers().persistence
assert persistence is not None
for event, context in remote_events_and_contexts:
self.get_success(persistence.persist_event(event, context, backfilled=True))
# Now we join the local user to the room
join_tuple = self.get_success(
create_event(
self.hs,
prev_event_ids=[invite_tuple[0].event_id],
auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id],
type=EventTypes.Member,
state_key=user1_id,
content={"membership": Membership.JOIN},
sender=user1_id,
**shared_kwargs,
)
)
self.get_success(persistence.persist_event(*join_tuple))
# Doing an SS request should return a positive `bump_stamp`, even though
# the only event that matches the bump types has as negative stream
# ordering.
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 5,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0)

View File

@@ -0,0 +1,707 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import EventTypes, Membership
from synapse.handlers.sliding_sync import StateValues
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
from tests.test_utils.event_injection import mark_event_as_partial_state
logger = logging.getLogger(__name__)
class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
"""
Test `rooms.required_state` in the Sliding Sync API.
"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
def test_rooms_no_required_state(self) -> None:
"""
Empty `rooms.required_state` should not return any state events in the room
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
# Empty `required_state`
"required_state": [],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# No `required_state` in response
self.assertIsNone(
response_body["rooms"][room_id1].get("required_state"),
response_body["rooms"][room_id1],
)
def test_rooms_required_state_initial_sync(self) -> None:
"""
Test `rooms.required_state` returns requested state events in the room during an
initial sync.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.RoomHistoryVisibility, ""],
# This one doesn't exist in the room
[EventTypes.Tombstone, ""],
],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
state_map[(EventTypes.RoomHistoryVisibility, "")],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_incremental_sync(self) -> None:
"""
Test `rooms.required_state` returns requested state events in the room during an
incremental sync.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.RoomHistoryVisibility, ""],
# This one doesn't exist in the room
[EventTypes.Tombstone, ""],
],
"timeline_limit": 1,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Send a message so the room comes down sync.
self.helper.send(room_id1, "msg", tok=user1_tok)
# Make the incremental Sliding Sync request
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# We only return updates but only if we've sent the room down the
# connection before.
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_incremental_sync_restart(self) -> None:
"""
Test that after a restart (and so the in memory caches are reset) that
we correctly return an `M_UNKNOWN_POS`
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.RoomHistoryVisibility, ""],
# This one doesn't exist in the room
[EventTypes.Tombstone, ""],
],
"timeline_limit": 1,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Reset the in-memory cache
self.hs.get_sliding_sync_handler().connection_store._connections.clear()
# Make the Sliding Sync request
channel = self.make_request(
method="POST",
path=self.sync_endpoint + f"?pos={from_token}",
content=sync_body,
access_token=user1_tok,
)
self.assertEqual(channel.code, 400, channel.json_body)
self.assertEqual(
channel.json_body["errcode"], "M_UNKNOWN_POS", channel.json_body
)
def test_rooms_required_state_wildcard(self) -> None:
"""
Test `rooms.required_state` returns all state events when using wildcard `["*", "*"]`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.send_state(
room_id1,
event_type="org.matrix.foo_state",
state_key="",
body={"foo": "bar"},
tok=user2_tok,
)
self.helper.send_state(
room_id1,
event_type="org.matrix.foo_state",
state_key="namespaced",
body={"foo": "bar"},
tok=user2_tok,
)
# Make the Sliding Sync request with wildcards for the `event_type` and `state_key`
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[StateValues.WILDCARD, StateValues.WILDCARD],
],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
# We should see all the state events in the room
state_map.values(),
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_wildcard_event_type(self) -> None:
"""
Test `rooms.required_state` returns relevant state events when using wildcard in
the event_type `["*", "foobarbaz"]`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.send_state(
room_id1,
event_type="org.matrix.foo_state",
state_key="",
body={"foo": "bar"},
tok=user2_tok,
)
self.helper.send_state(
room_id1,
event_type="org.matrix.foo_state",
state_key=user2_id,
body={"foo": "bar"},
tok=user2_tok,
)
# Make the Sliding Sync request with wildcards for the `event_type`
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[StateValues.WILDCARD, user2_id],
],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# We expect at-least any state event with the `user2_id` as the `state_key`
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Member, user2_id)],
state_map[("org.matrix.foo_state", user2_id)],
},
# Ideally, this would be exact but we're currently returning all state
# events when the `event_type` is a wildcard.
exact=False,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_wildcard_state_key(self) -> None:
"""
Test `rooms.required_state` returns relevant state events when using wildcard in
the state_key `["foobarbaz","*"]`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
# Make the Sliding Sync request with wildcards for the `state_key`
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Member, StateValues.WILDCARD],
],
"timeline_limit": 0,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Member, user1_id)],
state_map[(EventTypes.Member, user2_id)],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_lazy_loading_room_members(self) -> None:
"""
Test `rooms.required_state` returns people relevant to the timeline when
lazy-loading room members, `["m.room.member","$LAZY"]`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.send(room_id1, "1", tok=user2_tok)
self.helper.send(room_id1, "2", tok=user3_tok)
self.helper.send(room_id1, "3", tok=user2_tok)
# Make the Sliding Sync request with lazy loading for the room members
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.Member, StateValues.LAZY],
],
"timeline_limit": 3,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# Only user2 and user3 sent events in the 3 events we see in the `timeline`
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
state_map[(EventTypes.Member, user2_id)],
state_map[(EventTypes.Member, user3_id)],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_me(self) -> None:
"""
Test `rooms.required_state` correctly handles $ME.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.send(room_id1, "1", tok=user2_tok)
# Also send normal state events with state keys of the users, first
# change the power levels to allow this.
self.helper.send_state(
room_id1,
event_type=EventTypes.PowerLevels,
body={"users": {user1_id: 50, user2_id: 100}},
tok=user2_tok,
)
self.helper.send_state(
room_id1,
event_type="org.matrix.foo",
state_key=user1_id,
body={},
tok=user1_tok,
)
self.helper.send_state(
room_id1,
event_type="org.matrix.foo",
state_key=user2_id,
body={},
tok=user2_tok,
)
# Make the Sliding Sync request with a request for '$ME'.
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.Member, StateValues.ME],
["org.matrix.foo", StateValues.ME],
],
"timeline_limit": 3,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# Only user2 and user3 sent events in the 3 events we see in the `timeline`
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
state_map[(EventTypes.Member, user1_id)],
state_map[("org.matrix.foo", user1_id)],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
@parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)])
def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None:
"""
Test `rooms.required_state` should not return state past a leave/ban event.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.Member, "*"],
["org.matrix.foo_state", ""],
],
"timeline_limit": 3,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.send_state(
room_id1,
event_type="org.matrix.foo_state",
state_key="",
body={"foo": "bar"},
tok=user2_tok,
)
if stop_membership == Membership.LEAVE:
# User 1 leaves
self.helper.leave(room_id1, user1_id, tok=user1_tok)
elif stop_membership == Membership.BAN:
# User 1 is banned
self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# Change the state after user 1 leaves
self.helper.send_state(
room_id1,
event_type="org.matrix.foo_state",
state_key="",
body={"foo": "qux"},
tok=user2_tok,
)
self.helper.leave(room_id1, user3_id, tok=user3_tok)
# Make the Sliding Sync request with lazy loading for the room members
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Only user2 and user3 sent events in the 3 events we see in the `timeline`
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
state_map[(EventTypes.Member, user1_id)],
state_map[(EventTypes.Member, user2_id)],
state_map[(EventTypes.Member, user3_id)],
state_map[("org.matrix.foo_state", "")],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_combine_superset(self) -> None:
"""
Test `rooms.required_state` is combined across lists and room subscriptions.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.send_state(
room_id1,
event_type="org.matrix.foo_state",
state_key="",
body={"foo": "bar"},
tok=user2_tok,
)
self.helper.send_state(
room_id1,
event_type="org.matrix.bar_state",
state_key="",
body={"bar": "qux"},
tok=user2_tok,
)
# Make the Sliding Sync request with wildcards for the `state_key`
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.Member, user1_id],
],
"timeline_limit": 0,
},
"bar-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Member, StateValues.WILDCARD],
["org.matrix.foo_state", ""],
],
"timeline_limit": 0,
},
},
"room_subscriptions": {
room_id1: {
"required_state": [["org.matrix.bar_state", ""]],
"timeline_limit": 0,
}
},
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
state_map[(EventTypes.Member, user1_id)],
state_map[(EventTypes.Member, user2_id)],
state_map[("org.matrix.foo_state", "")],
state_map[("org.matrix.bar_state", "")],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_partial_state(self) -> None:
"""
Test partially-stated room are excluded unless `rooms.required_state` is
lazy-loading room members.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
_join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
# Mark room2 as partial state
self.get_success(
mark_event_as_partial_state(self.hs, join_response2["event_id"], room_id2)
)
# Make the Sliding Sync request (NOT lazy-loading room members)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 0,
},
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Make sure the list includes room1 but room2 is excluded because it's still
# partially-stated
self.assertListEqual(
list(response_body["lists"]["foo-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 1],
"room_ids": [room_id1],
}
],
response_body["lists"]["foo-list"],
)
# Make the Sliding Sync request (with lazy-loading room members)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
# Lazy-load room members
[EventTypes.Member, StateValues.LAZY],
],
"timeline_limit": 0,
},
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# The list should include both rooms now because we're lazy-loading room members
self.assertListEqual(
list(response_body["lists"]["foo-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 1],
"room_ids": [room_id2, room_id1],
}
],
response_body["lists"]["foo-list"],
)

View File

@@ -0,0 +1,575 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from typing import List, Optional
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.types import StreamToken, StrSequence
from synapse.util import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
logger = logging.getLogger(__name__)
class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase):
"""
Test `rooms.timeline` in the Sliding Sync API.
"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
def _assertListEqual(
self,
actual_items: StrSequence,
expected_items: StrSequence,
message: Optional[str] = None,
) -> None:
"""
Like `self.assertListEqual(...)` but with an actually understandable diff message.
"""
if actual_items == expected_items:
return
expected_lines: List[str] = []
for expected_item in expected_items:
is_expected_in_actual = expected_item in actual_items
expected_lines.append(
"{} {}".format(" " if is_expected_in_actual else "?", expected_item)
)
actual_lines: List[str] = []
for actual_item in actual_items:
is_actual_in_expected = actual_item in expected_items
actual_lines.append(
"{} {}".format("+" if is_actual_in_expected else " ", actual_item)
)
newline = "\n"
expected_string = f"Expected items to be in actual ('?' = missing expected items):\n [\n{newline.join(expected_lines)}\n ]"
actual_string = f"Actual ('+' = found expected items):\n [\n{newline.join(actual_lines)}\n ]"
first_message = "Items must"
diff_message = f"{first_message}\n{expected_string}\n{actual_string}"
self.fail(f"{diff_message}\n{message}")
def _assertTimelineEqual(
self,
*,
room_id: str,
actual_event_ids: List[str],
expected_event_ids: List[str],
message: Optional[str] = None,
) -> None:
"""
Like `self.assertListEqual(...)` for event IDs in a room but will give a nicer
output with context for what each event_id is (type, stream_ordering, content,
etc).
"""
if actual_event_ids == expected_event_ids:
return
event_id_set = set(actual_event_ids + expected_event_ids)
events = self.get_success(self.store.get_events(event_id_set))
def event_id_to_string(event_id: str) -> str:
event = events.get(event_id)
if event:
state_key = event.get_state_key()
state_key_piece = f", {state_key}" if state_key is not None else ""
return (
f"({event.internal_metadata.stream_ordering: >2}, {event.internal_metadata.instance_name}) "
+ f"{event.event_id} ({event.type}{state_key_piece}) {event.content.get('membership', '')}{event.content.get('body', '')}"
)
return f"{event_id} <event not found in room_id={room_id}>"
self._assertListEqual(
actual_items=[
event_id_to_string(event_id) for event_id in actual_event_ids
],
expected_items=[
event_id_to_string(event_id) for event_id in expected_event_ids
],
message=message,
)
def test_rooms_limited_initial_sync(self) -> None:
"""
Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit`
on initial sync.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.send(room_id1, "activity1", tok=user2_tok)
self.helper.send(room_id1, "activity2", tok=user2_tok)
event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok)
event_pos3 = self.get_success(
self.store.get_position_for_event(event_response3["event_id"])
)
event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok)
event_pos4 = self.get_success(
self.store.get_position_for_event(event_response4["event_id"])
)
event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok)
user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 3,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# We expect to saturate the `timeline_limit` (there are more than 3 messages in the room)
self.assertEqual(
response_body["rooms"][room_id1]["limited"],
True,
response_body["rooms"][room_id1],
)
# Check to make sure the latest events are returned
self._assertTimelineEqual(
room_id=room_id1,
actual_event_ids=[
event["event_id"]
for event in response_body["rooms"][room_id1]["timeline"]
],
expected_event_ids=[
event_response4["event_id"],
event_response5["event_id"],
user1_join_response["event_id"],
],
message=str(response_body["rooms"][room_id1]["timeline"]),
)
# Check to make sure the `prev_batch` points at the right place
prev_batch_token = self.get_success(
StreamToken.from_string(
self.store, response_body["rooms"][room_id1]["prev_batch"]
)
)
prev_batch_room_stream_token_serialized = self.get_success(
prev_batch_token.room_key.to_string(self.store)
)
# If we use the `prev_batch` token to look backwards, we should see `event3`
# next so make sure the token encompasses it
self.assertEqual(
event_pos3.persisted_after(prev_batch_token.room_key),
False,
f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be >= event_pos3={self.get_success(event_pos3.to_room_stream_token().to_string(self.store))}",
)
# If we use the `prev_batch` token to look backwards, we shouldn't see `event4`
# anymore since it was just returned in this response.
self.assertEqual(
event_pos4.persisted_after(prev_batch_token.room_key),
True,
f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be < event_pos4={self.get_success(event_pos4.to_room_stream_token().to_string(self.store))}",
)
# With no `from_token` (initial sync), it's all historical since there is no
# "live" range
self.assertEqual(
response_body["rooms"][room_id1]["num_live"],
0,
response_body["rooms"][room_id1],
)
def test_rooms_not_limited_initial_sync(self) -> None:
"""
Test that we mark `rooms` as `limited=False` when there are no more events to
paginate to.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.send(room_id1, "activity1", tok=user2_tok)
self.helper.send(room_id1, "activity2", tok=user2_tok)
self.helper.send(room_id1, "activity3", tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
# Make the Sliding Sync request
timeline_limit = 100
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": timeline_limit,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# The timeline should be `limited=False` because we have all of the events (no
# more to paginate to)
self.assertEqual(
response_body["rooms"][room_id1]["limited"],
False,
response_body["rooms"][room_id1],
)
expected_number_of_events = 9
# We're just looking to make sure we got all of the events before hitting the `timeline_limit`
self.assertEqual(
len(response_body["rooms"][room_id1]["timeline"]),
expected_number_of_events,
response_body["rooms"][room_id1]["timeline"],
)
self.assertLessEqual(expected_number_of_events, timeline_limit)
# With no `from_token` (initial sync), it's all historical since there is no
# "live" token range.
self.assertEqual(
response_body["rooms"][room_id1]["num_live"],
0,
response_body["rooms"][room_id1],
)
def test_rooms_incremental_sync(self) -> None:
"""
Test `rooms` data during an incremental sync after an initial sync.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.send(room_id1, "activity before initial sync1", tok=user2_tok)
# Make an initial Sliding Sync request to grab a token. This is also a sanity
# check that we can go from initial to incremental sync.
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 3,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Send some events but don't send enough to saturate the `timeline_limit`.
# We want to later test that we only get the new events since the `next_pos`
event_response2 = self.helper.send(room_id1, "activity after2", tok=user2_tok)
event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
# Make an incremental Sliding Sync request (what we're trying to test)
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# We only expect to see the new events since the last sync which isn't enough to
# fill up the `timeline_limit`.
self.assertEqual(
response_body["rooms"][room_id1]["limited"],
False,
f'Our `timeline_limit` was {sync_body["lists"]["foo-list"]["timeline_limit"]} '
+ f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
+ str(response_body["rooms"][room_id1]),
)
# Check to make sure the latest events are returned
self._assertTimelineEqual(
room_id=room_id1,
actual_event_ids=[
event["event_id"]
for event in response_body["rooms"][room_id1]["timeline"]
],
expected_event_ids=[
event_response2["event_id"],
event_response3["event_id"],
],
message=str(response_body["rooms"][room_id1]["timeline"]),
)
# All events are "live"
self.assertEqual(
response_body["rooms"][room_id1]["num_live"],
2,
response_body["rooms"][room_id1],
)
def test_rooms_newly_joined_incremental_sync(self) -> None:
"""
Test that when we make an incremental sync with a `newly_joined` `rooms`, we are
able to see some historical events before the `from_token`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.send(room_id1, "activity before token1", tok=user2_tok)
event_response2 = self.helper.send(
room_id1, "activity before token2", tok=user2_tok
)
# The `timeline_limit` is set to 4 so we can at least see one historical event
# before the `from_token`. We should see historical events because this is a
# `newly_joined` room.
timeline_limit = 4
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": timeline_limit,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Join the room after the `from_token` which will make us consider this room as
# `newly_joined`.
user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
# Send some events but don't send enough to saturate the `timeline_limit`.
# We want to later test that we only get the new events since the `next_pos`
event_response3 = self.helper.send(
room_id1, "activity after token3", tok=user2_tok
)
event_response4 = self.helper.send(
room_id1, "activity after token4", tok=user2_tok
)
# Make an incremental Sliding Sync request (what we're trying to test)
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# We should see the new events and the rest should be filled with historical
# events which will make us `limited=True` since there are more to paginate to.
self.assertEqual(
response_body["rooms"][room_id1]["limited"],
True,
f"Our `timeline_limit` was {timeline_limit} "
+ f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
+ str(response_body["rooms"][room_id1]),
)
# Check to make sure that the "live" and historical events are returned
self._assertTimelineEqual(
room_id=room_id1,
actual_event_ids=[
event["event_id"]
for event in response_body["rooms"][room_id1]["timeline"]
],
expected_event_ids=[
event_response2["event_id"],
user1_join_response["event_id"],
event_response3["event_id"],
event_response4["event_id"],
],
message=str(response_body["rooms"][room_id1]["timeline"]),
)
# Only events after the `from_token` are "live" (join, event3, event4)
self.assertEqual(
response_body["rooms"][room_id1]["num_live"],
3,
response_body["rooms"][room_id1],
)
def test_rooms_ban_initial_sync(self) -> None:
"""
Test that `rooms` we are banned from in an intial sync only allows us to see
timeline events up to the ban event.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.send(room_id1, "activity before1", tok=user2_tok)
self.helper.send(room_id1, "activity before2", tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
user1_ban_response = self.helper.ban(
room_id1, src=user2_id, targ=user1_id, tok=user2_tok
)
self.helper.send(room_id1, "activity after5", tok=user2_tok)
self.helper.send(room_id1, "activity after6", tok=user2_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 3,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# We should see events before the ban but not after
self._assertTimelineEqual(
room_id=room_id1,
actual_event_ids=[
event["event_id"]
for event in response_body["rooms"][room_id1]["timeline"]
],
expected_event_ids=[
event_response3["event_id"],
event_response4["event_id"],
user1_ban_response["event_id"],
],
message=str(response_body["rooms"][room_id1]["timeline"]),
)
# No "live" events in an initial sync (no `from_token` to define the "live"
# range)
self.assertEqual(
response_body["rooms"][room_id1]["num_live"],
0,
response_body["rooms"][room_id1],
)
# There are more events to paginate to
self.assertEqual(
response_body["rooms"][room_id1]["limited"],
True,
response_body["rooms"][room_id1],
)
def test_rooms_ban_incremental_sync1(self) -> None:
"""
Test that `rooms` we are banned from during the next incremental sync only
allows us to see timeline events up to the ban event.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.send(room_id1, "activity before1", tok=user2_tok)
self.helper.send(room_id1, "activity before2", tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 4,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
# The ban is within the token range (between the `from_token` and the sliding
# sync request)
user1_ban_response = self.helper.ban(
room_id1, src=user2_id, targ=user1_id, tok=user2_tok
)
self.helper.send(room_id1, "activity after5", tok=user2_tok)
self.helper.send(room_id1, "activity after6", tok=user2_tok)
# Make the incremental Sliding Sync request
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# We should see events before the ban but not after
self._assertTimelineEqual(
room_id=room_id1,
actual_event_ids=[
event["event_id"]
for event in response_body["rooms"][room_id1]["timeline"]
],
expected_event_ids=[
event_response3["event_id"],
event_response4["event_id"],
user1_ban_response["event_id"],
],
message=str(response_body["rooms"][room_id1]["timeline"]),
)
# All live events in the incremental sync
self.assertEqual(
response_body["rooms"][room_id1]["num_live"],
3,
response_body["rooms"][room_id1],
)
# There aren't anymore events to paginate to in this range
self.assertEqual(
response_body["rooms"][room_id1]["limited"],
False,
response_body["rooms"][room_id1],
)
def test_rooms_ban_incremental_sync2(self) -> None:
"""
Test that `rooms` we are banned from before the incremental sync don't return
any events in the timeline.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.send(room_id1, "activity before1", tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.send(room_id1, "activity after2", tok=user2_tok)
# The ban is before we get our `from_token`
self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
self.helper.send(room_id1, "activity after3", tok=user2_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 4,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
self.helper.send(room_id1, "activity after4", tok=user2_tok)
# Make the incremental Sliding Sync request
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Nothing to see for this banned user in the room in the token range
self.assertIsNone(response_body["rooms"].get(room_id1))

View File

@@ -0,0 +1,974 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple
from typing_extensions import assert_never
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import (
AccountDataTypes,
EventContentFields,
EventTypes,
RoomTypes,
)
from synapse.events import EventBase
from synapse.rest.client import devices, login, receipts, room, sync
from synapse.server import HomeServer
from synapse.types import (
JsonDict,
RoomStreamToken,
SlidingSyncStreamToken,
StreamKeyType,
StreamToken,
)
from synapse.util import Clock
from synapse.util.stringutils import random_string
from tests import unittest
from tests.server import TimedOutException
logger = logging.getLogger(__name__)
class SlidingSyncBase(unittest.HomeserverTestCase):
"""Base class for sliding sync test cases"""
sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync"
def default_config(self) -> JsonDict:
config = super().default_config()
# Enable sliding sync
config["experimental_features"] = {"msc3575_enabled": True}
return config
def do_sync(
self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str
) -> Tuple[JsonDict, str]:
"""Do a sliding sync request with given body.
Asserts the request was successful.
Attributes:
sync_body: The full request body to use
since: Optional since token
tok: Access token to use
Returns:
A tuple of the response body and the `pos` field.
"""
sync_path = self.sync_endpoint
if since:
sync_path += f"?pos={since}"
channel = self.make_request(
method="POST",
path=sync_path,
content=sync_body,
access_token=tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
return channel.json_body, channel.json_body["pos"]
def _assertRequiredStateIncludes(
self,
actual_required_state: Any,
expected_state_events: Iterable[EventBase],
exact: bool = False,
) -> None:
"""
Wrapper around `assertIncludes` to give slightly better looking diff error
messages that include some context "$event_id (type, state_key)".
Args:
actual_required_state: The "required_state" of a room from a Sliding Sync
request response.
expected_state_events: The expected state events to be included in the
`actual_required_state`.
exact: Whether the actual state should be exactly equal to the expected
state (no extras).
"""
assert isinstance(actual_required_state, list)
for event in actual_required_state:
assert isinstance(event, dict)
self.assertIncludes(
{
f'{event["event_id"]} ("{event["type"]}", "{event["state_key"]}")'
for event in actual_required_state
},
{
f'{event.event_id} ("{event.type}", "{event.state_key}")'
for event in expected_state_events
},
exact=exact,
# Message to help understand the diff in context
message=str(actual_required_state),
)
def _bump_notifier_wait_for_events(
self,
user_id: str,
wake_stream_key: Literal[
StreamKeyType.ACCOUNT_DATA,
StreamKeyType.PRESENCE,
],
) -> None:
"""
Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding
Sync results.
Args:
user_id: The user ID to wake up the notifier for
wake_stream_key: The stream key to wake up. This will create an actual new
entity in that stream so it's best to choose one that won't affect the
Sliding Sync results you're testing for. In other words, if your testing
account data, choose `StreamKeyType.PRESENCE` instead. We support two
possible stream keys because you're probably testing one or the other so
one is always a "safe" option.
"""
# We're expecting some new activity from this point onwards
from_token = self.hs.get_event_sources().get_current_token()
triggered_notifier_wait_for_events = False
async def _on_new_acivity(
before_token: StreamToken, after_token: StreamToken
) -> bool:
nonlocal triggered_notifier_wait_for_events
triggered_notifier_wait_for_events = True
return True
notifier = self.hs.get_notifier()
# Listen for some new activity for the user. We're just trying to confirm that
# our bump below actually does what we think it does (triggers new activity for
# the user).
result_awaitable = notifier.wait_for_events(
user_id,
1000,
_on_new_acivity,
from_token=from_token,
)
# Update the account data or presence so that `notifier.wait_for_events(...)`
# wakes up. We chose these two options because they're least likely to show up
# in the Sliding Sync response so it won't affect whether we have results.
if wake_stream_key == StreamKeyType.ACCOUNT_DATA:
self.get_success(
self.hs.get_account_data_handler().add_account_data_for_user(
user_id,
"org.matrix.foobarbaz",
{"foo": "bar"},
)
)
elif wake_stream_key == StreamKeyType.PRESENCE:
sending_user_id = self.register_user(
"user_bump_notifier_wait_for_events_" + random_string(10), "pass"
)
sending_user_tok = self.login(sending_user_id, "pass")
test_msg = {"foo": "bar"}
chan = self.make_request(
"PUT",
"/_matrix/client/r0/sendToDevice/m.test/1234",
content={"messages": {user_id: {"d1": test_msg}}},
access_token=sending_user_tok,
)
self.assertEqual(chan.code, 200, chan.result)
else:
assert_never(wake_stream_key)
# Wait for our notifier result
self.get_success(result_awaitable)
if not triggered_notifier_wait_for_events:
raise AssertionError(
"Expected `notifier.wait_for_events(...)` to be triggered"
)
class SlidingSyncTestCase(SlidingSyncBase):
"""
Tests regarding MSC3575 Sliding Sync `/sync` endpoint.
Please put tests in more specific test files if applicable. This test class is meant
for generic behavior of the endpoint.
"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
sync.register_servlets,
devices.register_servlets,
receipts.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.event_sources = hs.get_event_sources()
self.storage_controllers = hs.get_storage_controllers()
self.account_data_handler = hs.get_account_data_handler()
def _add_new_dm_to_global_account_data(
self, source_user_id: str, target_user_id: str, target_room_id: str
) -> None:
"""
Helper to handle inserting a new DM for the source user into global account data
(handles all of the list merging).
Args:
source_user_id: The user ID of the DM mapping we're going to update
target_user_id: User ID of the person the DM is with
target_room_id: Room ID of the DM
"""
# Get the current DM map
existing_dm_map = self.get_success(
self.store.get_global_account_data_by_type_for_user(
source_user_id, AccountDataTypes.DIRECT
)
)
# Scrutinize the account data since it has no concrete type. We're just copying
# everything into a known type. It should be a mapping from user ID to a list of
# room IDs. Ignore anything else.
new_dm_map: Dict[str, List[str]] = {}
if isinstance(existing_dm_map, dict):
for user_id, room_ids in existing_dm_map.items():
if isinstance(user_id, str) and isinstance(room_ids, list):
for room_id in room_ids:
if isinstance(room_id, str):
new_dm_map[user_id] = new_dm_map.get(user_id, []) + [
room_id
]
# Add the new DM to the map
new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [
target_room_id
]
# Save the DM map to global account data
self.get_success(
self.store.add_account_data_for_user(
source_user_id,
AccountDataTypes.DIRECT,
new_dm_map,
)
)
def _create_dm_room(
self,
inviter_user_id: str,
inviter_tok: str,
invitee_user_id: str,
invitee_tok: str,
should_join_room: bool = True,
) -> str:
"""
Helper to create a DM room as the "inviter" and invite the "invitee" user to the
room. The "invitee" user also will join the room. The `m.direct` account data
will be set for both users.
"""
# Create a room and send an invite the other user
room_id = self.helper.create_room_as(
inviter_user_id,
is_public=False,
tok=inviter_tok,
)
self.helper.invite(
room_id,
src=inviter_user_id,
targ=invitee_user_id,
tok=inviter_tok,
extra_data={"is_direct": True},
)
if should_join_room:
# Person that was invited joins the room
self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
# Mimic the client setting the room as a direct message in the global account
# data for both users.
self._add_new_dm_to_global_account_data(
invitee_user_id, inviter_user_id, room_id
)
self._add_new_dm_to_global_account_data(
inviter_user_id, invitee_user_id, room_id
)
return room_id
def test_sync_list(self) -> None:
"""
Test that room IDs show up in the Sliding Sync `lists`
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Make sure it has the foo-list we requested
self.assertListEqual(
list(response_body["lists"].keys()),
["foo-list"],
response_body["lists"].keys(),
)
# Make sure the list includes the room we are joined to
self.assertListEqual(
list(response_body["lists"]["foo-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [room_id],
}
],
response_body["lists"]["foo-list"],
)
def test_wait_for_sync_token(self) -> None:
"""
Test that worker will wait until it catches up to the given token
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a future token that will cause us to wait. Since we never send a new
# event to reach that future stream_ordering, the worker will wait until the
# full timeout.
stream_id_gen = self.store.get_events_stream_id_generator()
stream_id = self.get_success(stream_id_gen.get_next().__aenter__())
current_token = self.event_sources.get_current_token()
future_position_token = current_token.copy_and_replace(
StreamKeyType.ROOM,
RoomStreamToken(stream=stream_id),
)
future_position_token_serialized = self.get_success(
SlidingSyncStreamToken(future_position_token, 0).to_string(self.store)
)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
}
}
}
channel = self.make_request(
"POST",
self.sync_endpoint + f"?pos={future_position_token_serialized}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 10 seconds to make `notifier.wait_for_stream_token(from_token)`
# timeout
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=9900)
channel.await_result(timeout_ms=200)
self.assertEqual(channel.code, 200, channel.json_body)
# We expect the next `pos` in the result to be the same as what we requested
# with because we weren't able to find anything new yet.
self.assertEqual(channel.json_body["pos"], future_position_token_serialized)
def test_wait_for_new_data(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive.
(Only applies to incremental syncs with a `timeout` specified)
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id, user1_id, tok=user1_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 0]],
"required_state": [],
"timeline_limit": 1,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make the Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Bump the room with new events to trigger new results
event_response1 = self.helper.send(
room_id, "new activity in room", tok=user1_tok
)
# Should respond before the 10 second timeout
channel.await_result(timeout_ms=3000)
self.assertEqual(channel.code, 200, channel.json_body)
# Check to make sure the new event is returned
self.assertEqual(
[
event["event_id"]
for event in channel.json_body["rooms"][room_id]["timeline"]
],
[
event_response1["event_id"],
],
channel.json_body["rooms"][room_id]["timeline"],
)
def test_wait_for_new_data_timeout(self) -> None:
"""
Test to make sure that the Sliding Sync request waits for new data to arrive but
no data ever arrives so we timeout. We're also making sure that the default data
doesn't trigger a false-positive for new data.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id, user1_id, tok=user1_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 0]],
"required_state": [],
"timeline_limit": 1,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make the Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint + f"?timeout=10000&pos={from_token}",
content=sync_body,
access_token=user1_tok,
await_result=False,
)
# Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=5000)
# Wake-up `notifier.wait_for_events(...)` that will cause us test
# `SlidingSyncResult.__bool__` for new results.
self._bump_notifier_wait_for_events(
user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
)
# Block for a little bit more to ensure we don't see any new results.
with self.assertRaises(TimedOutException):
channel.await_result(timeout_ms=4000)
# Wait for the sync to complete (wait for the rest of the 10 second timeout,
# 5000 + 4000 + 1200 > 10000)
channel.await_result(timeout_ms=1200)
self.assertEqual(channel.code, 200, channel.json_body)
# There should be no room sent down.
self.assertFalse(channel.json_body["rooms"])
def test_filter_list(self) -> None:
"""
Test that filters apply to `lists`
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
# Create a DM room
joined_dm_room_id = self._create_dm_room(
inviter_user_id=user1_id,
inviter_tok=user1_tok,
invitee_user_id=user2_id,
invitee_tok=user2_tok,
should_join_room=True,
)
invited_dm_room_id = self._create_dm_room(
inviter_user_id=user1_id,
inviter_tok=user1_tok,
invitee_user_id=user2_id,
invitee_tok=user2_tok,
should_join_room=False,
)
# Create a normal room
room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id, user1_id, tok=user1_tok)
# Create a room that user1 is invited to
invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
# Absense of filters does not imply "False" values
"all": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
"filters": {},
},
# Test single truthy filter
"dms": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
"filters": {"is_dm": True},
},
# Test single falsy filter
"non-dms": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
"filters": {"is_dm": False},
},
# Test how multiple filters should stack (AND'd together)
"room-invites": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
"filters": {"is_dm": False, "is_invite": True},
},
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Make sure it has the foo-list we requested
self.assertListEqual(
list(response_body["lists"].keys()),
["all", "dms", "non-dms", "room-invites"],
response_body["lists"].keys(),
)
# Make sure the lists have the correct rooms
self.assertListEqual(
list(response_body["lists"]["all"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [
invite_room_id,
room_id,
invited_dm_room_id,
joined_dm_room_id,
],
}
],
list(response_body["lists"]["all"]),
)
self.assertListEqual(
list(response_body["lists"]["dms"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [invited_dm_room_id, joined_dm_room_id],
}
],
list(response_body["lists"]["dms"]),
)
self.assertListEqual(
list(response_body["lists"]["non-dms"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [invite_room_id, room_id],
}
],
list(response_body["lists"]["non-dms"]),
)
self.assertListEqual(
list(response_body["lists"]["room-invites"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [invite_room_id],
}
],
list(response_body["lists"]["room-invites"]),
)
# Ensure DM's are correctly marked
self.assertDictEqual(
{
room_id: room.get("is_dm")
for room_id, room in response_body["rooms"].items()
},
{
invite_room_id: None,
room_id: None,
invited_dm_room_id: True,
joined_dm_room_id: True,
},
)
def test_filter_regardless_of_membership_server_left_room(self) -> None:
"""
Test that filters apply to rooms regardless of membership. We're also
compounding the problem by having all of the local users leave the room causing
our server to leave the room.
We want to make sure that if someone is filtering rooms, and leaves, you still
get that final update down sync that you left.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
# Create a normal room
room_id = self.helper.create_room_as(user1_id, tok=user2_tok)
self.helper.join(room_id, user1_id, tok=user1_tok)
# Create an encrypted space room
space_room_id = self.helper.create_room_as(
user2_id,
tok=user2_tok,
extra_content={
"creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
},
)
self.helper.send_state(
space_room_id,
EventTypes.RoomEncryption,
{EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
tok=user2_tok,
)
self.helper.join(space_room_id, user1_id, tok=user1_tok)
# Make an initial Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint,
{
"lists": {
"all-list": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 0,
"filters": {},
},
"foo-list": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
"filters": {
"is_encrypted": True,
"room_types": [RoomTypes.SPACE],
},
},
}
},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
from_token = channel.json_body["pos"]
# Make sure the response has the lists we requested
self.assertListEqual(
list(channel.json_body["lists"].keys()),
["all-list", "foo-list"],
channel.json_body["lists"].keys(),
)
# Make sure the lists have the correct rooms
self.assertListEqual(
list(channel.json_body["lists"]["all-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [space_room_id, room_id],
}
],
)
self.assertListEqual(
list(channel.json_body["lists"]["foo-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [space_room_id],
}
],
)
# Everyone leaves the encrypted space room
self.helper.leave(space_room_id, user1_id, tok=user1_tok)
self.helper.leave(space_room_id, user2_id, tok=user2_tok)
# Make an incremental Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint + f"?pos={from_token}",
{
"lists": {
"all-list": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 0,
"filters": {},
},
"foo-list": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
"filters": {
"is_encrypted": True,
"room_types": [RoomTypes.SPACE],
},
},
}
},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Make sure the lists have the correct rooms even though we `newly_left`
self.assertListEqual(
list(channel.json_body["lists"]["all-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [space_room_id, room_id],
}
],
)
self.assertListEqual(
list(channel.json_body["lists"]["foo-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [space_room_id],
}
],
)
def test_sort_list(self) -> None:
"""
Test that the `lists` are sorted by `stream_ordering`
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
# Activity that will order the rooms
self.helper.send(room_id3, "activity in room3", tok=user1_tok)
self.helper.send(room_id1, "activity in room1", tok=user1_tok)
self.helper.send(room_id2, "activity in room2", tok=user1_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 1,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Make sure it has the foo-list we requested
self.assertListEqual(
list(response_body["lists"].keys()),
["foo-list"],
response_body["lists"].keys(),
)
# Make sure the list is sorted in the way we expect
self.assertListEqual(
list(response_body["lists"]["foo-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [room_id2, room_id1, room_id3],
}
],
response_body["lists"]["foo-list"],
)
def test_sliced_windows(self) -> None:
"""
Test that the `lists` `ranges` are sliced correctly. Both sides of each range
are inclusive.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
_room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
# Make the Sliding Sync request for a single room
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 0]],
"required_state": [],
"timeline_limit": 1,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Make sure it has the foo-list we requested
self.assertListEqual(
list(response_body["lists"].keys()),
["foo-list"],
response_body["lists"].keys(),
)
# Make sure the list is sorted in the way we expect
self.assertListEqual(
list(response_body["lists"]["foo-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 0],
"room_ids": [room_id3],
}
],
response_body["lists"]["foo-list"],
)
# Make the Sliding Sync request for the first two rooms
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 1,
}
}
}
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
# Make sure it has the foo-list we requested
self.assertListEqual(
list(response_body["lists"].keys()),
["foo-list"],
response_body["lists"].keys(),
)
# Make sure the list is sorted in the way we expect
self.assertListEqual(
list(response_body["lists"]["foo-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 1],
"room_ids": [room_id3, room_id2],
}
],
response_body["lists"]["foo-list"],
)
def test_rooms_with_no_updates_do_not_come_down_incremental_sync(self) -> None:
"""
Test that rooms with no updates are returned in subsequent incremental
syncs.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 0,
}
}
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Make the incremental Sliding Sync request
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Nothing has happened in the room, so the room should not come down
# /sync.
self.assertIsNone(response_body["rooms"].get(room_id1))
def test_empty_initial_room_comes_down_sync(self) -> None:
"""
Test that rooms come down /sync even with empty required state and
timeline limit in initial sync.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [],
"timeline_limit": 0,
}
}
}
# Make the Sliding Sync request
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
self.assertEqual(response_body["rooms"][room_id1]["initial"], True)

View File

@@ -969,9 +969,8 @@ class CASTestCase(unittest.HomeserverTestCase):
# Test that the response is HTML.
self.assertEqual(channel.code, 200, channel.result)
content_type_header_value = ""
for header in channel.result.get("headers", []):
if header[0] == b"Content-Type":
content_type_header_value = header[1].decode("utf8")
for header in channel.headers.getRawHeaders("Content-Type", []):
content_type_header_value = header
self.assertTrue(content_type_header_value.startswith("text/html"))

File diff suppressed because it is too large Load Diff

View File

@@ -198,17 +198,35 @@ class FakeChannel:
def headers(self) -> Headers:
if not self.result:
raise Exception("No result yet.")
h = Headers()
for i in self.result["headers"]:
h.addRawHeader(*i)
h = self.result["headers"]
assert isinstance(h, Headers)
return h
def writeHeaders(
self, version: bytes, code: bytes, reason: bytes, headers: Headers
self,
version: bytes,
code: bytes,
reason: bytes,
headers: Union[Headers, List[Tuple[bytes, bytes]]],
) -> None:
self.result["version"] = version
self.result["code"] = code
self.result["reason"] = reason
if isinstance(headers, list):
# Support prior to Twisted 24.7.0rc1
new_headers = Headers()
for k, v in headers:
assert isinstance(k, bytes), f"key is not of type bytes: {k!r}"
assert isinstance(v, bytes), f"value is not of type bytes: {v!r}"
new_headers.addRawHeader(k, v)
headers = new_headers
assert isinstance(
headers, Headers
), f"headers are of the wrong type: {headers!r}"
self.result["headers"] = headers
def write(self, data: bytes) -> None:

View File

@@ -392,8 +392,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
)
self.assertEqual(channel.code, 301)
headers = channel.result["headers"]
location_headers = [v for k, v in headers if k == b"Location"]
location_headers = channel.headers.getRawHeaders(b"Location", [])
self.assertEqual(location_headers, [b"/look/an/eagle"])
def test_redirect_exception_with_cookie(self) -> None:
@@ -415,10 +414,10 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
)
self.assertEqual(channel.code, 304)
headers = channel.result["headers"]
location_headers = [v for k, v in headers if k == b"Location"]
headers = channel.headers
location_headers = headers.getRawHeaders(b"Location", [])
self.assertEqual(location_headers, [b"/no/over/there"])
cookies_headers = [v for k, v in headers if k == b"Set-Cookie"]
cookies_headers = headers.getRawHeaders(b"Set-Cookie", [])
self.assertEqual(cookies_headers, [b"session=yespls"])
def test_head_request(self) -> None:

View File

@@ -249,5 +249,5 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
self.assertEqual(cache.get_max_pos_of_last_change("bar@baz.net"), 3)
self.assertEqual(cache.get_max_pos_of_last_change("user@elsewhere.org"), 4)
# Unknown entities will return the stream start position.
self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), 1)
# Unknown entities will return None
self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), None)