Compare commits

...

140 Commits

Author SHA1 Message Date
Erik Johnston
76b5a576eb WIP 2024-08-28 18:50:13 +01:00
Erik Johnston
1226bb4de2 Merge branch 'madlittlemods/sliding-sync-pre-populate-room-meta-data' into erikj/ss_room_lists_new_tables 2024-08-28 15:38:29 +01:00
Erik Johnston
90d0e035dd Fix port script tests by handling empty DBs correctly 2024-08-28 14:39:25 +01:00
Erik Johnston
ab414f2ab8 Use event_auth table to get previous membership 2024-08-28 14:23:32 +01:00
Erik Johnston
6f9932d146 Handle old rows with null event_stream_ordering column 2024-08-28 14:23:06 +01:00
Erik Johnston
bb905cd02c Only run the sliding sync background updates on the main database 2024-08-28 11:44:56 +01:00
Erik Johnston
7c9c62051c Remove all rooms pulled out from the queue 2024-08-28 11:31:19 +01:00
Erik Johnston
42f43a8ddf Move computing interested rooms to room list 2024-08-28 10:45:39 +01:00
Eric Eastwood
da463fb102 Add unique index right away for sliding_sync_joined_rooms_to_recalculate
This makes it so we can always `upsert` to avoid duplicates otherwise
I'm not sure of how to not insert duplicates in certain situations
(see FIXME in the diff) which would cause problems down the line
for the unique index being added later.
2024-08-28 00:50:33 -05:00
Eric Eastwood
8468401a97 Adapt to using sliding_sync_joined_rooms_to_recalculate table 2024-08-28 00:42:14 -05:00
Eric Eastwood
53b7309f6c Add sliding_sync_joined_rooms_to_recalculate table 2024-08-27 20:48:56 -05:00
Eric Eastwood
94e1a54687 get_events(...) will omit events from unknown room versions
Thanks @erikjohnston
2024-08-27 20:01:55 -05:00
Eric Eastwood
a507f152c9 Use stream_id of some point before we fetch the current state
This is simpler and some rooms are so old that they don't have
`current_state_delta_stream` yet. It's easier if we just get a
general max `stream_id` of the whole table than the max `stream_id`
for the specific room anyway.

Thanks @erikjohnston
2024-08-27 19:45:50 -05:00
Eric Eastwood
9d08bc2157 Remove debug logs 2024-08-27 19:35:05 -05:00
Eric Eastwood
56a4c0ba6e Round out tests 2024-08-27 19:34:16 -05:00
Eric Eastwood
85a60c3132 More tests 2024-08-27 19:27:24 -05:00
Eric Eastwood
e5e7269998 Add more tests 2024-08-27 18:49:53 -05:00
Eric Eastwood
c8e17f7479 Add test when no rooms 2024-08-27 18:21:25 -05:00
Eric Eastwood
4dc9e268e6 Add test for catch-up background update 2024-08-27 18:08:17 -05:00
Eric Eastwood
9a7d8c2be4 Start catch-up if nothing written yet 2024-08-27 17:28:07 -05:00
Eric Eastwood
c51a309da5 Maybe: always start background update 2024-08-27 17:11:51 -05:00
Eric Eastwood
9764f626ea Fix query in Postgres 2024-08-27 12:09:00 -05:00
Erik Johnston
5592a9e6cb Port room lists to new file 2024-08-27 15:35:02 +01:00
Eric Eastwood
7a0c281028 Add placeholder tests 2024-08-26 19:43:52 -05:00
Eric Eastwood
7fe5d31e20 Note down caveat about forgotten 2024-08-26 18:52:10 -05:00
Eric Eastwood
53473a0eb4 Adapt sliding_sync_joined_rooms background update to use event_stream_ordering for progress
This way we can re-use it for the catch-up background process
2024-08-26 18:35:49 -05:00
Eric Eastwood
eb3c84cf45 Kick-off background update for out-of-date snapshots 2024-08-26 17:31:26 -05:00
Eric Eastwood
6a44686dc3 Why it matters 2024-08-26 16:32:59 -05:00
Eric Eastwood
a94c1dd62c Add more context for why 2024-08-26 16:27:20 -05:00
Eric Eastwood
8bddbe23bd Clear out-of-date rows 2024-08-26 16:19:47 -05:00
Eric Eastwood
addb91485f Split test cases 2024-08-26 16:11:56 -05:00
Eric Eastwood
9795556052 Update comment 2024-08-26 14:42:55 -05:00
Eric Eastwood
a57d47b778 Use simple_upsert_txn for sliding_sync_membership_snapshots in background update 2024-08-22 23:59:06 -05:00
Eric Eastwood
b6a7d2bf6c Use simple_upsert_txn for sliding_sync_joined_rooms in background update 2024-08-22 23:30:00 -05:00
Eric Eastwood
f8926d07df Fix partial-stated room re-syncing state but nothing has changed
Fixes failing test in CI: `tests.handlers.test_federation.PartialJoinTestCase.test_failed_partial_join_is_clean`
```
2024-08-22 18:57:22-0500 [-] synapse.metrics.background_process_metrics - 253 - ERROR - sync_partial_state_room-0 - Background process 'sync_partial_state_room' threw an exception
	Traceback (most recent call last):
	  File "synapse/synapse/metrics/background_process_metrics.py", line 251, in run
	    return await func(*args, **kwargs)
	           ^^^^^^^^^^^^^^^^^^^^^^^^^^^
	  File "synapse/synapse/handlers/federation.py", line 1842, in _sync_partial_state_room_wrapper
	    await self._sync_partial_state_room(
	  File "synapse/synapse/handlers/federation.py", line 1933, in _sync_partial_state_room
	    await self.state_handler.update_current_state(room_id)
	  File "synapse/synapse/state/__init__.py", line 554, in update_current_state
	    await self._storage_controllers.persistence.update_current_state(room_id)
	  File "synapse/synapse/storage/controllers/persist_events.py", line 491, in update_current_state
	    await self._event_persist_queue.add_to_queue(
	  File "synapse/synapse/storage/controllers/persist_events.py", line 245, in add_to_queue
	    res = await make_deferred_yieldable(end_item.deferred.observe())
	          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
	  File "synapse/synapse/storage/controllers/persist_events.py", line 288, in handle_queue_loop
	    ret = await self._per_item_callback(room_id, item.task)
	          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
	  File "synapse/synapse/storage/controllers/persist_events.py", line 370, in _process_event_persist_queue_task
	    await self._update_current_state(room_id, task)
	  File "synapse/synapse/storage/controllers/persist_events.py", line 507, in _update_current_state
	    await self.persist_events_store._calculate_sliding_sync_table_changes(
	  File "synapse/synapse/storage/databases/main/events.py", line 624, in _calculate_sliding_sync_table_changes
	    assert most_recent_event_pos_results, (
	           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
	AssertionError: We should not be seeing `None` here because we are still in the room (!room:example.com) and it should at-least have a join membership event that's keeping us here.
```
2024-08-22 19:06:04 -05:00
Eric Eastwood
21cc97ba9d Use simple_upsert_many_txn for sliding_sync_membership_snapshots
See https://github.com/element-hq/synapse/pull/17512#discussion_r1726820006
2024-08-22 18:52:05 -05:00
Eric Eastwood
fdb8b5931f Correct comment 2024-08-22 18:43:51 -05:00
Eric Eastwood
4b866c4fca Simplify what we need to think about to grab the best effort value 2024-08-22 18:36:43 -05:00
Eric Eastwood
088a4c7cf0 Use simple_upsert_txn to update sliding_sync_joined_rooms
See https://github.com/element-hq/synapse/pull/17512#discussion_r1726817206
2024-08-22 18:26:37 -05:00
Eric Eastwood
0726a6d58b Derive best effort stream_ordering outside of the transaction
See https://github.com/element-hq/synapse/pull/17512#discussion_r1727995882
2024-08-22 18:13:00 -05:00
Eric Eastwood
6edc4c78ce Allow for no bump_stamp (fix portdb CI job)
See https://github.com/element-hq/synapse/pull/17512#discussion_r1725998219
2024-08-22 17:09:43 -05:00
Eric Eastwood
44432e2118 Move tests to dedicated file
See https://github.com/element-hq/synapse/pull/17512#discussion_r1726849798
2024-08-22 16:56:09 -05:00
Eric Eastwood
bcba8cccfe No need for transaction
See https://github.com/element-hq/synapse/pull/17512#discussion_r1726844107
2024-08-22 16:52:31 -05:00
Eric Eastwood
693c06b2f1 Move away from backfill language 2024-08-22 16:48:02 -05:00
Eric Eastwood
4d87fa61c6 "backfill" -> "bg_update"
See https://github.com/element-hq/synapse/pull/17512#discussion_r1726837698
2024-08-22 16:44:02 -05:00
Eric Eastwood
d61aada8ba Simplify _update_sliding_sync_tables_with_new_persisted_events_txn()
See
https://github.com/element-hq/synapse/pull/17512#discussion_r1719997640
https://github.com/element-hq/synapse/pull/17512#discussion_r1726828894
https://github.com/element-hq/synapse/pull/17512#discussion_r1726836440
2024-08-22 16:35:59 -05:00
Eric Eastwood
6723824c4a Prefer simple_delete_many_txn
See https://github.com/element-hq/synapse/pull/17512#discussion_r1726819380
2024-08-22 15:47:44 -05:00
Eric Eastwood
980ee9aad6 Prefer simple_update_txn
See https://github.com/element-hq/synapse/pull/17512#discussion_r1726808112
2024-08-22 15:40:08 -05:00
Eric Eastwood
fc73b6ffc9 Rename insert_key/insert_value
See https://github.com/element-hq/synapse/pull/17512#discussion_r1726805894
2024-08-22 11:44:57 -05:00
Eric Eastwood
9b8d2017af Check events_and_context for state events
See https://github.com/element-hq/synapse/pull/17512#discussion_r1726798803
2024-08-22 11:34:40 -05:00
Eric Eastwood
339500d067 Fix sub-query selecting multiple rows 2024-08-21 22:56:25 -05:00
Eric Eastwood
31300f4ce5 More docstring 2024-08-21 22:15:19 -05:00
Eric Eastwood
b45b1896aa Fill out docstring 2024-08-21 22:09:57 -05:00
Eric Eastwood
ee2ef0b4d9 Add forgotten column 2024-08-21 21:54:22 -05:00
Eric Eastwood
0a938b137a Add missing boolean column to portdb script 2024-08-21 19:59:35 -05:00
Eric Eastwood
97248362d0 Log which room is strange 2024-08-21 19:26:14 -05:00
Eric Eastwood
02711552cf Better handle none case 2024-08-21 19:11:08 -05:00
Eric Eastwood
8ddf5c7235 Add tombstone to tests 2024-08-21 19:05:59 -05:00
Eric Eastwood
513ec8e906 Update tests 2024-08-21 18:51:04 -05:00
Eric Eastwood
cda2311520 Add tombstone_successor_room_id column 2024-08-21 18:21:44 -05:00
Eric Eastwood
c612572d12 Move away from stream_id
See https://github.com/element-hq/synapse/pull/17512#discussion_r1725806543
2024-08-21 17:14:34 -05:00
Eric Eastwood
5b1db39bb7 Add sender column so we can tell leaves from kicks 2024-08-21 16:42:12 -05:00
Eric Eastwood
e7a3328228 Pre-populate membership and membership_event_stream_ordering
See https://github.com/element-hq/synapse/pull/17512#discussion_r1725311745
2024-08-21 16:20:00 -05:00
Eric Eastwood
f6d7ffd9c5 Move _calculate_sliding_sync_table_changes(...) after we assign stream_ordering to events
See https://github.com/element-hq/synapse/pull/17512#discussion_r1725728637
2024-08-21 16:10:14 -05:00
Eric Eastwood
772c501bb6 Use available stream_id
See https://github.com/element-hq/synapse/pull/17512#discussion_r1725310035
2024-08-21 15:04:51 -05:00
Eric Eastwood
cda92af4a6 No need to update event_stream_ordering/bump_stamp ON CONFLICT 2024-08-21 14:46:50 -05:00
Eric Eastwood
d3f90e4bd8 Get full events for _sliding_sync_joined_rooms_backfill 2024-08-21 14:39:54 -05:00
Eric Eastwood
a5e06c6a8d Move back to the main store 2024-08-21 11:14:15 -05:00
Eric Eastwood
0233e20aa3 Use full event version after solving the circular import issues 2024-08-21 10:44:49 -05:00
Eric Eastwood
357132db1d Go back to simpler fetching senders 2024-08-20 21:54:03 -05:00
Eric Eastwood
726a8e9698 Attempt getting real events in backgroun update (needs work) 2024-08-20 21:48:44 -05:00
Eric Eastwood
cc200ee9f5 Merge branch 'develop' into madlittlemods/sliding-sync-pre-populate-room-meta-data 2024-08-20 17:25:48 -05:00
Eric Eastwood
3eb77c3a2a Add sanity checks and fix wrong variable usage 2024-08-20 17:24:43 -05:00
Eric Eastwood
45c89ec625 Move pre-processing completely outside transaction 2024-08-20 15:41:53 -05:00
Eric Eastwood
ac5b05c86b Use TypedDict 2024-08-20 13:29:56 -05:00
Eric Eastwood
2964c567d3 Use dicts 2024-08-20 13:21:19 -05:00
Eric Eastwood
95d39db772 Closer types 2024-08-20 11:55:24 -05:00
Eric Eastwood
6cc6bdbedf Start of moving logic outside of the transaction (pre-process) 2024-08-20 11:10:34 -05:00
Eric Eastwood
574a04a40f Test state reset on membership 2024-08-19 23:30:25 -05:00
Eric Eastwood
8ee2e114dd Add test to handle state reset in the meta data 2024-08-19 23:22:24 -05:00
Eric Eastwood
98fb56e5fe Prefer _update_sliding_sync_tables_with_new_persisted_events_txn(...) to do the right thing
See https://github.com/element-hq/synapse/pull/17512#discussion_r1719992152
2024-08-19 22:30:50 -05:00
Eric Eastwood
df0c57d383 Merge branch 'develop' into madlittlemods/sliding-sync-pre-populate-room-meta-data 2024-08-19 16:34:41 -05:00
Eric Eastwood
d2f5247e77 Update comment 2024-08-16 00:15:03 -05:00
Eric Eastwood
c89d859c7c Fill in docstrings 2024-08-15 23:52:01 -05:00
Eric Eastwood
2ec93e3f0d Move function next to other helpers 2024-08-15 23:39:39 -05:00
Eric Eastwood
fa63c02648 Fix lints 2024-08-15 23:30:16 -05:00
Eric Eastwood
419be7c6b2 Finish off background update tests 2024-08-15 23:29:29 -05:00
Eric Eastwood
ef5f0fca3a Add more tests 2024-08-15 23:18:50 -05:00
Eric Eastwood
fb5af8f5fa Add background update test for sliding_sync_membership_snapshots 2024-08-15 22:13:32 -05:00
Eric Eastwood
8461faf384 Add historical case to background update 2024-08-15 21:56:12 -05:00
Eric Eastwood
6c2fc1d20f Move background updates to StateBackgroundUpdateStore
So we can access `_get_state_groups_from_groups_txn(...)`
2024-08-15 20:51:43 -05:00
Eric Eastwood
cbeff57402 Use helper 2024-08-15 20:31:57 -05:00
Eric Eastwood
4b42e44ef9 Work on background update for sliding_sync_membership_snapshots 2024-08-15 00:21:35 -05:00
Eric Eastwood
d113e743ae Fix lints 2024-08-14 19:30:52 -05:00
Eric Eastwood
23e0d34a2d Add more tests 2024-08-14 19:30:22 -05:00
Eric Eastwood
1c931cb3e7 Add background update for sliding_sync_joined_rooms 2024-08-14 19:19:15 -05:00
Eric Eastwood
9f551f0e97 Fix lints 2024-08-14 11:32:33 -05:00
Eric Eastwood
c8508f113a Clean up tables when a room is purged/deleted 2024-08-14 11:27:57 -05:00
Eric Eastwood
f49003c35c No invites needed 2024-08-13 18:55:59 -05:00
Eric Eastwood
8b0e1692f9 More realistic remote room forgotten test 2024-08-13 18:51:11 -05:00
Eric Eastwood
5df94f47b5 Fix running into StopIteration
More context about how/why `StopIteration` was being ignored silently
which made this problem harder to debug. See
https://github.com/element-hq/synapse/pull/17512#discussion_r1715954505
2024-08-13 17:15:09 -05:00
Eric Eastwood
3566abd9bc Fix boolean schema for Postgres 2024-08-13 15:14:48 -05:00
Eric Eastwood
96a4614f92 Update fixme comment 2024-08-13 14:50:11 -05:00
Eric Eastwood
dc447a673f Clarify when/why we upsert 2024-08-13 14:47:17 -05:00
Eric Eastwood
32ae162278 Fix rejecting invite when no_longer_in_room (and other non-join transitions) 2024-08-13 14:35:24 -05:00
Eric Eastwood
a90f3d4ae2 Merge branch 'develop' into madlittlemods/sliding-sync-pre-populate-room-meta-data 2024-08-13 12:28:36 -05:00
Eric Eastwood
eb3a185cfc Fix federating backfill test 2024-08-13 12:24:53 -05:00
Eric Eastwood
517946d940 Fix lints 2024-08-12 20:31:25 -05:00
Eric Eastwood
f600eacd0d Adjust test description 2024-08-12 20:30:48 -05:00
Eric Eastwood
3423eb72d5 Add test to make sure snapshot evolves with membership 2024-08-12 20:29:58 -05:00
Eric Eastwood
5589ae48ca Add test for remote invite rejected/retracted 2024-08-12 20:14:14 -05:00
Eric Eastwood
83a5858083 Add tests for remote invites 2024-08-12 19:57:28 -05:00
Eric Eastwood
3e1f24ea11 User ID is not unique because user is joined to many rooms 2024-08-12 19:46:23 -05:00
Eric Eastwood
ab074f5335 Fix events from rooms we're not joined to affecting the joined room stream ordering 2024-08-12 19:40:53 -05:00
Eric Eastwood
53232e6df5 Fill in for remote invites (out of band, outlier membership) 2024-08-12 18:14:02 -05:00
Eric Eastwood
f069659343 Fix lints 2024-08-12 15:49:40 -05:00
Eric Eastwood
552f8f496d Update descriptions 2024-08-12 15:43:06 -05:00
Eric Eastwood
0af3b4822c Refactor to sliding_sync_membership_snapshots 2024-08-12 15:10:44 -05:00
Eric Eastwood
ed47a7eff5 Fix bumping when events are persisted out of order 2024-08-12 11:27:17 -05:00
Eric Eastwood
3367422fd3 Need to fix upsert 2024-08-08 18:23:50 -05:00
Eric Eastwood
ca909013c8 Fill in stream_ordering/bump_stamp for any event being persisted 2024-08-08 17:49:15 -05:00
Eric Eastwood
cc2d2b6b9f Fill in stream_ordering/bump_stamp when we add current state to the joined rooms table 2024-08-08 15:41:55 -05:00
Eric Eastwood
bc3796d333 Fix some lints 2024-08-07 20:49:46 -05:00
Eric Eastwood
5cf3ad3d7f Handle server left room 2024-08-07 20:47:13 -05:00
Eric Eastwood
bf78692ba0 Handle to_delete 2024-08-07 20:09:53 -05:00
Eric Eastwood
a1aaa47dad Add more tests 2024-08-07 19:58:51 -05:00
Eric Eastwood
c590474757 Test non-joins 2024-08-07 19:24:58 -05:00
Eric Eastwood
5b1053f23e Better test assertions 2024-08-07 19:07:43 -05:00
Eric Eastwood
68a3daf605 Fix comparison and insert 2024-08-07 18:10:24 -05:00
Eric Eastwood
61cea4e9b7 Closer to right 2024-08-07 18:07:53 -05:00
Eric Eastwood
87d95615d4 Change to updating the latest membership in the room 2024-08-07 16:37:17 -05:00
Eric Eastwood
cb335805d4 Server left room test 2024-08-07 10:46:34 -05:00
Eric Eastwood
2f3bd27284 Test is running 2024-08-06 16:50:14 -05:00
Eric Eastwood
f96d0c36a3 Special treatment for boolean columns
See 1dfa59b238/docs/development/database_schema.md (boolean-columns)
2024-08-06 15:30:51 -05:00
Eric Eastwood
1a251d5211 Fill in sliding_sync_non_join_memberships when current state changes 2024-08-06 15:18:34 -05:00
Eric Eastwood
2b5f07d714 Start of updating sliding_sync_joined_rooms 2024-08-05 22:34:21 -05:00
Eric Eastwood
ad1c887b4c Merge branch 'develop' into madlittlemods/sliding-sync-pre-populate-room-meta-data 2024-08-05 18:19:17 -05:00
Eric Eastwood
8392d6ac3b Use foreign keys 2024-07-31 19:00:55 -05:00
Eric Eastwood
e7e9cb289d Add changelog 2024-07-31 18:43:50 -05:00
Eric Eastwood
d26ac746d4 Start thinking about schemas 2024-07-31 18:40:23 -05:00
27 changed files with 9241 additions and 1470 deletions

1
changelog.d/17512.misc Normal file
View File

@@ -0,0 +1 @@
Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting.

View File

@@ -129,6 +129,11 @@ BOOLEAN_COLUMNS = {
"remote_media_cache": ["authenticated"],
"room_stats_state": ["is_federatable"],
"rooms": ["is_public", "has_auth_chain_index"],
"sliding_sync_joined_rooms": ["is_encrypted"],
"sliding_sync_membership_snapshots": [
"has_known_state",
"is_encrypted",
],
"users": ["shadow_banned", "approved", "locked", "suspended"],
"un_partial_stated_event_stream": ["rejection_status_changed"],
"users_who_share_rooms": ["share_private"],

View File

@@ -245,6 +245,8 @@ class EventContentFields:
# `m.room.encryption`` algorithm field
ENCRYPTION_ALGORITHM: Final = "algorithm"
TOMBSTONE_SUCCESSOR_ROOM: Final = "replacement_room"
class EventUnsignedContentFields:
"""Fields found inside the 'unsigned' data on events"""

File diff suppressed because it is too large Load Diff

View File

@@ -13,7 +13,7 @@
#
import logging
from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Sequence, Set
from typing import TYPE_CHECKING, AbstractSet, Dict, Mapping, Optional, Sequence, Set
from typing_extensions import assert_never
@@ -30,6 +30,7 @@ from synapse.types import (
JsonMapping,
MultiWriterStreamToken,
SlidingSyncStreamToken,
StrCollection,
StreamToken,
)
from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
@@ -55,9 +56,9 @@ class SlidingSyncExtensionHandler:
sync_config: SlidingSyncConfig,
previous_connection_state: "PerConnectionState",
new_connection_state: "MutablePerConnectionState",
actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList],
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
actual_room_ids: Set[str],
actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult],
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
to_token: StreamToken,
from_token: Optional[SlidingSyncStreamToken],
) -> SlidingSyncResult.Extensions:
@@ -144,10 +145,10 @@ class SlidingSyncExtensionHandler:
def find_relevant_room_ids_for_extension(
self,
requested_lists: Optional[List[str]],
requested_room_ids: Optional[List[str]],
actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList],
actual_room_ids: Set[str],
requested_lists: Optional[StrCollection],
requested_room_ids: Optional[StrCollection],
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
actual_room_ids: AbstractSet[str],
) -> Set[str]:
"""
Handle the reserved `lists`/`rooms` keys for extensions. Extensions should only
@@ -343,7 +344,7 @@ class SlidingSyncExtensionHandler:
async def get_account_data_extension_response(
self,
sync_config: SlidingSyncConfig,
actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList],
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
actual_room_ids: Set[str],
account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension,
to_token: StreamToken,
@@ -436,9 +437,9 @@ class SlidingSyncExtensionHandler:
sync_config: SlidingSyncConfig,
previous_connection_state: "PerConnectionState",
new_connection_state: "MutablePerConnectionState",
actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList],
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
actual_room_ids: Set[str],
actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult],
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension,
to_token: StreamToken,
from_token: Optional[SlidingSyncStreamToken],
@@ -598,9 +599,9 @@ class SlidingSyncExtensionHandler:
async def get_typing_extension_response(
self,
sync_config: SlidingSyncConfig,
actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList],
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
actual_room_ids: Set[str],
actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult],
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
typing_request: SlidingSyncConfig.Extensions.TypingExtension,
to_token: StreamToken,
from_token: Optional[SlidingSyncStreamToken],

File diff suppressed because it is too large Load Diff

View File

@@ -21,7 +21,7 @@
import itertools
import logging
from collections import defaultdict
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState
from synapse.api.errors import Codes, StoreError, SynapseError
@@ -975,7 +975,7 @@ class SlidingSyncRestServlet(RestServlet):
return response
def encode_lists(
self, lists: Dict[str, SlidingSyncResult.SlidingWindowList]
self, lists: Mapping[str, SlidingSyncResult.SlidingWindowList]
) -> JsonDict:
def encode_operation(
operation: SlidingSyncResult.SlidingWindowList.Operation,

View File

@@ -502,8 +502,15 @@ class EventsPersistenceStorageController:
"""
state = await self._calculate_current_state(room_id)
delta = await self._calculate_state_delta(room_id, state)
sliding_sync_table_changes = (
await self.persist_events_store._calculate_sliding_sync_table_changes(
room_id, [], delta
)
)
await self.persist_events_store.update_current_state(room_id, delta)
await self.persist_events_store.update_current_state(
room_id, delta, sliding_sync_table_changes
)
async def _calculate_current_state(self, room_id: str) -> StateMap[str]:
"""Calculate the current state of a room, based on the forward extremities

View File

@@ -35,6 +35,7 @@ from typing import (
Iterable,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
@@ -1254,9 +1255,9 @@ class DatabasePool:
self,
txn: LoggingTransaction,
table: str,
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
keyvalues: Mapping[str, Any],
values: Mapping[str, Any],
insertion_values: Optional[Mapping[str, Any]] = None,
where_clause: Optional[str] = None,
) -> bool:
"""
@@ -1299,9 +1300,9 @@ class DatabasePool:
self,
txn: LoggingTransaction,
table: str,
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
keyvalues: Mapping[str, Any],
values: Mapping[str, Any],
insertion_values: Optional[Mapping[str, Any]] = None,
where_clause: Optional[str] = None,
lock: bool = True,
) -> bool:
@@ -1322,7 +1323,7 @@ class DatabasePool:
if lock:
# We need to lock the table :(
self.engine.lock_table(txn, table)
txn.database_engine.lock_table(txn, table)
def _getwhere(key: str) -> str:
# If the value we're passing in is None (aka NULL), we need to use
@@ -1376,13 +1377,13 @@ class DatabasePool:
# successfully inserted
return True
@staticmethod
def simple_upsert_txn_native_upsert(
self,
txn: LoggingTransaction,
table: str,
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
keyvalues: Mapping[str, Any],
values: Mapping[str, Any],
insertion_values: Optional[Mapping[str, Any]] = None,
where_clause: Optional[str] = None,
) -> bool:
"""
@@ -1535,8 +1536,8 @@ class DatabasePool:
self.simple_upsert_txn_emulated(txn, table, _keys, _vals, lock=False)
@staticmethod
def simple_upsert_many_txn_native_upsert(
self,
txn: LoggingTransaction,
table: str,
key_names: Collection[str],
@@ -1966,8 +1967,8 @@ class DatabasePool:
def simple_update_txn(
txn: LoggingTransaction,
table: str,
keyvalues: Dict[str, Any],
updatevalues: Dict[str, Any],
keyvalues: Mapping[str, Any],
updatevalues: Mapping[str, Any],
) -> int:
"""
Update rows in the given database table.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -457,6 +457,8 @@ class EventsWorkerStore(SQLBaseStore):
) -> Optional[EventBase]:
"""Get an event from the database by event_id.
Events for unknown room versions will also be filtered out.
Args:
event_id: The event_id of the event to fetch
@@ -511,6 +513,10 @@ class EventsWorkerStore(SQLBaseStore):
) -> Dict[str, EventBase]:
"""Get events from the database
Unknown events will be omitted from the response.
Events for unknown room versions will also be filtered out.
Args:
event_ids: The event_ids of the events to fetch
@@ -553,6 +559,8 @@ class EventsWorkerStore(SQLBaseStore):
Unknown events will be omitted from the response.
Events for unknown room versions will also be filtered out.
Args:
event_ids: The event_ids of the events to fetch

View File

@@ -454,6 +454,10 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
# so must be deleted first.
"local_current_membership",
"room_memberships",
# Note: the sliding_sync_ tables have foreign keys to the `events` table
# so must be deleted first.
"sliding_sync_joined_rooms",
"sliding_sync_membership_snapshots",
"events",
"federation_inbound_events_staging",
"receipts_graph",

View File

@@ -51,7 +51,12 @@ from synapse.storage.database import (
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.engines import Sqlite3Engine
from synapse.storage.roommember import MemberSummary, ProfileInfo, RoomsForUser
from synapse.storage.roommember import (
MemberSummary,
ProfileInfo,
RoomsForUser,
RoomsForUserSlidingSync,
)
from synapse.types import (
JsonDict,
PersistedEventPosition,
@@ -1337,6 +1342,12 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
keyvalues={"user_id": user_id, "room_id": room_id},
updatevalues={"forgotten": 1},
)
self.db_pool.simple_update_txn(
txn,
table="sliding_sync_membership_snapshots",
keyvalues={"user_id": user_id, "room_id": room_id},
updatevalues={"forgotten": 1},
)
self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id))
self._invalidate_cache_and_stream(
@@ -1371,6 +1382,54 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
desc="room_forgetter_stream_pos",
)
# @cached(iterable=True, max_entries=10000)
async def get_sliding_sync_rooms_for_user(
self,
user_id: str,
) -> Mapping[str, RoomsForUserSlidingSync]:
"""Get all the rooms for a user to handle a sliding sync request.
Ignores forgotten rooms and rooms that the user has been kicked from.
Returns:
Map from room ID to membership info
"""
def get_sliding_sync_rooms_for_user_txn(
txn: LoggingTransaction,
) -> Dict[str, RoomsForUserSlidingSync]:
sql = """
SELECT m.room_id, m.sender, m.membership, m.membership_event_id,
m.event_stream_ordering,
COALESCE(j.room_type, m.room_type),
COALESCE(j.is_encrypted, m.is_encrypted),
COALESCE(j.tombstone_successor_room_id, m.tombstone_successor_room_id)
FROM sliding_sync_membership_snapshots AS m
LEFT JOIN sliding_sync_joined_rooms AS j USING (room_id)
WHERE user_id = ?
AND m.forgotten = 0
AND (membership != 'leave' OR m.sender != m.user_id)
"""
txn.execute(sql, (user_id,))
return {
row[0]: RoomsForUserSlidingSync(
room_id=row[0],
sender=row[1],
membership=row[2],
event_id=row[3],
membership_event_stream_ordering=row[4],
room_type=row[5],
is_encrypted=row[6],
tombstone_successor_room_id=row[7],
)
for row in txn
}
return await self.db_pool.runInteraction(
"get_sliding_sync_rooms_for_user",
get_sliding_sync_rooms_for_user_txn,
)
class RoomMemberBackgroundUpdateStore(SQLBaseStore):
def __init__(

View File

@@ -161,45 +161,80 @@ class StateDeltasStore(SQLBaseStore):
self._get_max_stream_id_in_current_state_deltas_txn,
)
def get_current_state_deltas_for_room_txn(
self,
txn: LoggingTransaction,
room_id: str,
*,
from_token: Optional[RoomStreamToken],
to_token: Optional[RoomStreamToken],
) -> List[StateDelta]:
"""
Get the state deltas between two tokens.
(> `from_token` and <= `to_token`)
"""
from_clause = ""
from_args = []
if from_token is not None:
from_clause = "AND ? < stream_id"
from_args = [from_token.stream]
to_clause = ""
to_args = []
if to_token is not None:
to_clause = "AND stream_id <= ?"
to_args = [to_token.get_max_stream_pos()]
sql = f"""
SELECT instance_name, stream_id, type, state_key, event_id, prev_event_id
FROM current_state_delta_stream
WHERE room_id = ? {from_clause} {to_clause}
ORDER BY stream_id ASC
"""
txn.execute(sql, [room_id] + from_args + to_args)
return [
StateDelta(
stream_id=row[1],
room_id=room_id,
event_type=row[2],
state_key=row[3],
event_id=row[4],
prev_event_id=row[5],
)
for row in txn
if _filter_results_by_stream(from_token, to_token, row[0], row[1])
]
@trace
async def get_current_state_deltas_for_room(
self, room_id: str, from_token: RoomStreamToken, to_token: RoomStreamToken
self,
room_id: str,
*,
from_token: Optional[RoomStreamToken],
to_token: Optional[RoomStreamToken],
) -> List[StateDelta]:
"""Get the state deltas between two tokens."""
"""
Get the state deltas between two tokens.
if not self._curr_state_delta_stream_cache.has_entity_changed(
room_id, from_token.stream
(> `from_token` and <= `to_token`)
"""
if (
from_token is not None
and not self._curr_state_delta_stream_cache.has_entity_changed(
room_id, from_token.stream
)
):
return []
def get_current_state_deltas_for_room_txn(
txn: LoggingTransaction,
) -> List[StateDelta]:
sql = """
SELECT instance_name, stream_id, type, state_key, event_id, prev_event_id
FROM current_state_delta_stream
WHERE room_id = ? AND ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC
"""
txn.execute(
sql, (room_id, from_token.stream, to_token.get_max_stream_pos())
)
return [
StateDelta(
stream_id=row[1],
room_id=room_id,
event_type=row[2],
state_key=row[3],
event_id=row[4],
prev_event_id=row[5],
)
for row in txn
if _filter_results_by_stream(from_token, to_token, row[0], row[1])
]
return await self.db_pool.runInteraction(
"get_current_state_deltas_for_room", get_current_state_deltas_for_room_txn
"get_current_state_deltas_for_room",
self.get_current_state_deltas_for_room_txn,
room_id,
from_token=from_token,
to_token=to_token,
)
@trace

View File

@@ -1264,12 +1264,76 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
return None
async def get_last_event_pos_in_room(
self,
room_id: str,
event_types: Optional[StrCollection] = None,
) -> Optional[Tuple[str, PersistedEventPosition]]:
"""
Returns the ID and event position of the last event in a room.
Based on `get_last_event_pos_in_room_before_stream_ordering(...)`
Args:
room_id
event_types: Optional allowlist of event types to filter by
Returns:
The ID of the most recent event and it's position, or None if there are no
events in the room that match the given event types.
"""
def _get_last_event_pos_in_room_txn(
txn: LoggingTransaction,
) -> Optional[Tuple[str, PersistedEventPosition]]:
event_type_clause = ""
event_type_args: List[str] = []
if event_types is not None and len(event_types) > 0:
event_type_clause, event_type_args = make_in_list_sql_clause(
txn.database_engine, "type", event_types
)
event_type_clause = f"AND {event_type_clause}"
sql = f"""
SELECT event_id, stream_ordering, instance_name
FROM events
LEFT JOIN rejections USING (event_id)
WHERE room_id = ?
{event_type_clause}
AND NOT outlier
AND rejections.event_id IS NULL
ORDER BY stream_ordering DESC
LIMIT 1
"""
txn.execute(
sql,
[room_id] + event_type_args,
)
row = cast(Optional[Tuple[str, int, str]], txn.fetchone())
if row is not None:
event_id, stream_ordering, instance_name = row
return event_id, PersistedEventPosition(
# If instance_name is null we default to "master"
instance_name or "master",
stream_ordering,
)
return None
return await self.db_pool.runInteraction(
"get_last_event_pos_in_room",
_get_last_event_pos_in_room_txn,
)
@trace
async def get_last_event_pos_in_room_before_stream_ordering(
self,
room_id: str,
end_token: RoomStreamToken,
event_types: Optional[Collection[str]] = None,
event_types: Optional[StrCollection] = None,
) -> Optional[Tuple[str, PersistedEventPosition]]:
"""
Returns the ID and event position of the last event in a room at or before a

View File

@@ -39,6 +39,18 @@ class RoomsForUser:
room_version_id: str
@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True)
class RoomsForUserSlidingSync:
room_id: str
sender: str
membership: str
event_id: str
membership_event_stream_ordering: int
room_type: Optional[str]
is_encrypted: bool
tombstone_successor_room_id: Optional[str]
@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True)
class GetRoomsForUserWithStreamOrdering:
room_id: str

View File

@@ -19,7 +19,7 @@
#
#
SCHEMA_VERSION = 86 # remember to update the list below when updating
SCHEMA_VERSION = 87 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -142,6 +142,10 @@ Changes in SCHEMA_VERSION = 85
Changes in SCHEMA_VERSION = 86
- Add a column `authenticated` to the tables `local_media_repository` and `remote_media_cache`
Changes in SCHEMA_VERSION = 87
- Add tables to store Sliding Sync data for quick filtering/sorting
(`sliding_sync_joined_rooms`, `sliding_sync_membership_snapshots`)
"""

View File

@@ -0,0 +1,153 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2024 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- This table is a list/queue used to keep track of which rooms need to be inserted into
-- `sliding_sync_joined_rooms`. We do this to avoid reading from `current_state_events`
-- during the background update to populate `sliding_sync_joined_rooms` which works but
-- it takes a lot of work for the database to grab `DISTINCT` room_ids given how many
-- state events there are for each room.
CREATE TABLE IF NOT EXISTS sliding_sync_joined_rooms_to_recalculate(
room_id TEXT NOT NULL REFERENCES rooms(room_id),
PRIMARY KEY (room_id)
);
-- A table for storing room meta data (current state relevant to sliding sync) that the
-- local server is still participating in (someone local is joined to the room).
--
-- We store the joined rooms in separate table from `sliding_sync_membership_snapshots`
-- because we need up-to-date information for joined rooms and it can be shared across
-- everyone who is joined.
--
-- This table is kept in sync with `current_state_events` which means if the server is
-- no longer participating in a room, the row will be deleted.
CREATE TABLE IF NOT EXISTS sliding_sync_joined_rooms(
room_id TEXT NOT NULL REFERENCES rooms(room_id),
-- The `stream_ordering` of the most-recent/latest event in the room
event_stream_ordering BIGINT NOT NULL REFERENCES events(stream_ordering),
-- The `stream_ordering` of the last event according to the `bump_event_types`
bump_stamp BIGINT,
-- `m.room.create` -> `content.type` (current state)
--
-- Useful for the `spaces`/`not_spaces` filter in the Sliding Sync API
room_type TEXT,
-- `m.room.name` -> `content.name` (current state)
--
-- Useful for the room meta data and `room_name_like` filter in the Sliding Sync API
room_name TEXT,
-- `m.room.encryption` -> `content.algorithm` (current state)
--
-- Useful for the `is_encrypted` filter in the Sliding Sync API
is_encrypted BOOLEAN DEFAULT FALSE NOT NULL,
-- `m.room.tombstone` -> `content.replacement_room` (according to the current state at the
-- time of the membership).
--
-- Useful for the `include_old_rooms` functionality in the Sliding Sync API
tombstone_successor_room_id TEXT,
PRIMARY KEY (room_id)
);
-- So we can purge rooms easily.
--
-- The primary key is already `room_id`
-- So we can sort by `stream_ordering
CREATE UNIQUE INDEX IF NOT EXISTS sliding_sync_joined_rooms_event_stream_ordering ON sliding_sync_joined_rooms(event_stream_ordering);
-- A table for storing a snapshot of room meta data (historical current state relevant
-- for sliding sync) at the time of a local user's membership. Only has rows for the
-- latest membership event for a given local user in a room which matches
-- `local_current_membership` .
--
-- We store all memberships including joins. This makes it easy to reference this table
-- to find all membership for a given user and shares the same semantics as
-- `local_current_membership`. And we get to avoid some table maintenance; if we only
-- stored non-joins, we would have to delete the row for the user when the user joins
-- the room. Stripped state doesn't include the `m.room.tombstone` event, so we just
-- assume that the room doesn't have a tombstone.
--
-- For remote invite/knocks where the server is not participating in the room, we will
-- use stripped state events to populate this table. We assume that if any stripped
-- state is given, it will include all possible stripped state events types. For
-- example, if stripped state is given but `m.room.encryption` isn't included, we will
-- assume that the room is not encrypted.
--
-- We don't include `bump_stamp` here because we can just use the `stream_ordering` from
-- the membership event itself as the `bump_stamp`.
CREATE TABLE IF NOT EXISTS sliding_sync_membership_snapshots(
room_id TEXT NOT NULL REFERENCES rooms(room_id),
user_id TEXT NOT NULL,
-- Useful to be able to tell leaves from kicks (where the `user_id` is different from the `sender`)
sender TEXT NOT NULL,
membership_event_id TEXT NOT NULL REFERENCES events(event_id),
membership TEXT NOT NULL,
-- This is an integer just to match `room_memberships` and also means we don't need
-- to do any casting.
forgotten INTEGER DEFAULT 0 NOT NULL,
-- `stream_ordering` of the `membership_event_id`
event_stream_ordering BIGINT NOT NULL REFERENCES events(stream_ordering),
-- For remote invites/knocks that don't include any stripped state, we want to be
-- able to distinguish between a room with `None` as valid value for some state and
-- room where the state is completely unknown. Basically, this should be True unless
-- no stripped state was provided for a remote invite/knock (False).
has_known_state BOOLEAN DEFAULT FALSE NOT NULL,
-- `m.room.create` -> `content.type` (according to the current state at the time of
-- the membership).
--
-- Useful for the `spaces`/`not_spaces` filter in the Sliding Sync API
room_type TEXT,
-- `m.room.name` -> `content.name` (according to the current state at the time of
-- the membership).
--
-- Useful for the room meta data and `room_name_like` filter in the Sliding Sync API
room_name TEXT,
-- `m.room.encryption` -> `content.algorithm` (according to the current state at the
-- time of the membership).
--
-- Useful for the `is_encrypted` filter in the Sliding Sync API
is_encrypted BOOLEAN DEFAULT FALSE NOT NULL,
-- `m.room.tombstone` -> `content.replacement_room` (according to the current state at the
-- time of the membership).
--
-- Useful for the `include_old_rooms` functionality in the Sliding Sync API
tombstone_successor_room_id TEXT,
PRIMARY KEY (room_id, user_id)
);
-- So we can purge rooms easily.
--
-- Since we're using a multi-column index as the primary key (room_id, user_id), the
-- first index column (room_id) is always usable for searching so we don't need to
-- create a separate index for it.
--
-- CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_room_id ON sliding_sync_membership_snapshots(room_id);
-- So we can fetch all rooms for a given user
CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_user_id ON sliding_sync_membership_snapshots(user_id);
-- So we can sort by `stream_ordering
CREATE UNIQUE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_event_stream_ordering ON sliding_sync_membership_snapshots(event_stream_ordering);
-- Add a series of background updates to populate the new `sliding_sync_joined_rooms` table:
--
-- 1. Add a background update to prefill `sliding_sync_joined_rooms_to_recalculate`.
-- We do a one-shot bulk insert from the `rooms` table to prefill.
-- 2. Add a background update to populate the new `sliding_sync_joined_rooms` table
--
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(8701, 'sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update', '{}');
INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
(8701, 'sliding_sync_joined_rooms_bg_update', '{}', 'sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update');
-- Add a background updates to populate the new `sliding_sync_membership_snapshots` table
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(8701, 'sliding_sync_membership_snapshots_bg_update', '{}');

View File

@@ -30,6 +30,7 @@ if TYPE_CHECKING or HAS_PYDANTIC_V2:
else:
from pydantic import Extra
from synapse.api.constants import EventTypes
from synapse.events import EventBase
from synapse.types import (
DeviceListUpdates,
@@ -45,6 +46,18 @@ from synapse.types.rest.client import SlidingSyncBody
if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations
# Sliding Sync: The event types that clients should consider as new activity and affect
# the `bump_stamp`
SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES = {
EventTypes.Create,
EventTypes.Message,
EventTypes.Encrypted,
EventTypes.Sticker,
EventTypes.CallInvite,
EventTypes.PollStart,
EventTypes.LiveLocationShareStart,
}
class ShutdownRoomParams(TypedDict):
"""
@@ -409,7 +422,7 @@ class SlidingSyncResult:
)
next_pos: SlidingSyncStreamToken
lists: Dict[str, SlidingWindowList]
lists: Mapping[str, SlidingWindowList]
rooms: Dict[str, RoomResult]
extensions: Extensions

View File

@@ -620,7 +620,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
now_token = self.event_sources.get_current_token()
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=now_token,
to_token=now_token,
@@ -647,7 +647,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
after_room_token = self.event_sources.get_current_token()
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room_token,
to_token=after_room_token,
@@ -682,7 +682,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
after_room_token = self.event_sources.get_current_token()
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_room_token,
to_token=after_room_token,
@@ -756,7 +756,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
after_room_token = self.event_sources.get_current_token()
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room_token,
to_token=after_room_token,
@@ -828,7 +828,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
after_kick_token = self.event_sources.get_current_token()
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_kick_token,
to_token=after_kick_token,
@@ -921,7 +921,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
self.assertEqual(channel.code, 200, channel.result)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room_forgets,
to_token=before_room_forgets,
@@ -951,7 +951,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
after_room2_token = self.event_sources.get_current_token()
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_room1_token,
to_token=after_room2_token,
@@ -1001,7 +1001,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
self.helper.join(room_id2, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room1_token,
to_token=after_room1_token,
@@ -1041,7 +1041,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room1_token,
to_token=after_room1_token,
@@ -1088,7 +1088,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_room1_token,
to_token=after_room1_token,
@@ -1152,7 +1152,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
leave_response = self.helper.leave(kick_room_id, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_kick_token,
to_token=after_kick_token,
@@ -1208,7 +1208,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room1_token,
to_token=after_room1_token,
@@ -1263,7 +1263,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room1_token,
to_token=after_room1_token,
@@ -1322,7 +1322,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
self.helper.join(room_id2, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=None,
to_token=after_room1_token,
@@ -1404,7 +1404,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
self.helper.join(room_id4, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=from_token,
to_token=to_token,
@@ -1477,7 +1477,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
self.helper.leave(room_id1, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_room1_token,
to_token=after_room1_token,
@@ -1520,7 +1520,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
self.helper.join(room_id1, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_room1_token,
to_token=after_room1_token,
@@ -1570,7 +1570,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room1_token,
to_token=after_room1_token,
@@ -1632,7 +1632,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_room1_token,
to_token=after_room1_token,
@@ -1691,7 +1691,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_room1_token,
to_token=after_room1_token,
@@ -1765,7 +1765,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room1_token,
to_token=after_room1_token,
@@ -1830,7 +1830,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
after_change1_token = self.event_sources.get_current_token()
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_room1_token,
to_token=after_change1_token,
@@ -1902,7 +1902,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_room1_token,
to_token=after_room1_token,
@@ -1984,7 +1984,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
self.helper.leave(room_id1, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room1_token,
to_token=after_room1_token,
@@ -2052,7 +2052,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room1_token,
to_token=after_room1_token,
@@ -2088,7 +2088,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
after_more_changes_token = self.event_sources.get_current_token()
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=after_room1_token,
to_token=after_more_changes_token,
@@ -2153,7 +2153,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
after_room1_token = self.event_sources.get_current_token()
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room1_token,
to_token=after_room1_token,
@@ -2229,7 +2229,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
self.helper.leave(room_id3, user1_id, tok=user1_tok)
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_room3_token,
to_token=after_room3_token,
@@ -2365,7 +2365,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
# The function under test
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_reset_token,
to_token=after_reset_token,
@@ -2579,7 +2579,7 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa
# The function under test
room_id_results = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
UserID.from_string(user1_id),
from_token=before_stuck_activity_token,
to_token=stuck_activity_token,
@@ -2669,14 +2669,14 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
Get the rooms the user should be syncing with
"""
room_membership_for_user_map = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
user=user,
from_token=from_token,
to_token=to_token,
)
)
filtered_sync_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms_relevant_for_sync(
self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync(
user=user,
room_membership_for_user_map=room_membership_for_user_map,
)
@@ -3030,14 +3030,14 @@ class FilterRoomsTestCase(HomeserverTestCase):
Get the rooms the user should be syncing with
"""
room_membership_for_user_map = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
user=user,
from_token=from_token,
to_token=to_token,
)
)
filtered_sync_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms_relevant_for_sync(
self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync(
user=user,
room_membership_for_user_map=room_membership_for_user_map,
)
@@ -3196,7 +3196,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_dm=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3210,7 +3210,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_dm=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3252,7 +3252,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3266,7 +3266,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3316,7 +3316,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3330,7 +3330,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3390,7 +3390,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3404,7 +3404,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3463,7 +3463,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3484,7 +3484,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3533,7 +3533,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3549,7 +3549,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3619,7 +3619,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3637,7 +3637,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3700,7 +3700,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3716,7 +3716,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_encrypted=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3760,7 +3760,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_invite=True`
truthy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3774,7 +3774,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try with `is_invite=False`
falsy_filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3827,7 +3827,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
@@ -3839,7 +3839,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
@@ -3851,7 +3851,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding normal rooms and spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3865,7 +3865,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding an arbitrary room type
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3918,7 +3918,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding *NOT* normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(not_room_types=[None]),
@@ -3930,7 +3930,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding *NOT* spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3944,7 +3944,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding *NOT* normal rooms or spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3959,7 +3959,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Test how it behaves when we have both `room_types` and `not_room_types`.
# `not_room_types` should win.
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -3975,7 +3975,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Test how it behaves when we have both `room_types` and `not_room_types`.
# `not_room_types` should win.
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(
@@ -4025,7 +4025,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
@@ -4037,7 +4037,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
@@ -4094,7 +4094,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
@@ -4106,7 +4106,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
@@ -4152,7 +4152,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
@@ -4166,7 +4166,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
@@ -4228,7 +4228,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
@@ -4242,7 +4242,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
@@ -4305,7 +4305,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only normal rooms
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
@@ -4319,7 +4319,7 @@ class FilterRoomsTestCase(HomeserverTestCase):
# Try finding only spaces
filtered_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms(
self.sliding_sync_handler.room_lists.filter_rooms(
UserID.from_string(user1_id),
sync_room_map,
SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
@@ -4366,14 +4366,14 @@ class SortRoomsTestCase(HomeserverTestCase):
Get the rooms the user should be syncing with
"""
room_membership_for_user_map = self.get_success(
self.sliding_sync_handler.get_room_membership_for_user_at_to_token(
self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
user=user,
from_token=from_token,
to_token=to_token,
)
)
filtered_sync_room_map = self.get_success(
self.sliding_sync_handler.filter_rooms_relevant_for_sync(
self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync(
user=user,
room_membership_for_user_map=room_membership_for_user_map,
)
@@ -4408,7 +4408,7 @@ class SortRoomsTestCase(HomeserverTestCase):
# Sort the rooms (what we're testing)
sorted_sync_rooms = self.get_success(
self.sliding_sync_handler.sort_rooms(
self.sliding_sync_handler.room_lists.sort_rooms(
sync_room_map=sync_room_map,
to_token=after_rooms_token,
)
@@ -4489,7 +4489,7 @@ class SortRoomsTestCase(HomeserverTestCase):
# Sort the rooms (what we're testing)
sorted_sync_rooms = self.get_success(
self.sliding_sync_handler.sort_rooms(
self.sliding_sync_handler.room_lists.sort_rooms(
sync_room_map=sync_room_map,
to_token=after_rooms_token,
)
@@ -4553,7 +4553,7 @@ class SortRoomsTestCase(HomeserverTestCase):
# Sort the rooms (what we're testing)
sorted_sync_rooms = self.get_success(
self.sliding_sync_handler.sort_rooms(
self.sliding_sync_handler.room_lists.sort_rooms(
sync_room_map=sync_room_map,
to_token=after_rooms_token,
)

View File

@@ -16,7 +16,7 @@ import logging
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import EventTypes, Membership
from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.room_versions import RoomVersions
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
@@ -44,6 +44,10 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
self.state_handler = self.hs.get_state_handler()
persistence = self.hs.get_storage_controllers().persistence
assert persistence is not None
self.persistence = persistence
def test_rooms_meta_when_joined(self) -> None:
"""
@@ -600,16 +604,16 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase):
Test that `bump_stamp` ignores backfilled events, i.e. events with a
negative stream ordering.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a remote room
creator = "@user:other"
room_id = "!foo:other"
room_version = RoomVersions.V10
shared_kwargs = {
"room_id": room_id,
"room_version": "10",
"room_version": room_version.identifier,
}
create_tuple = self.get_success(
@@ -618,6 +622,12 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase):
prev_event_ids=[],
type=EventTypes.Create,
state_key="",
content={
# The `ROOM_CREATOR` field could be removed if we used a room
# version > 10 (in favor of relying on `sender`)
EventContentFields.ROOM_CREATOR: creator,
EventContentFields.ROOM_VERSION: room_version.identifier,
},
sender=creator,
**shared_kwargs,
)
@@ -667,22 +677,29 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase):
]
# Ensure the local HS knows the room version
self.get_success(
self.store.store_room(room_id, creator, False, RoomVersions.V10)
)
self.get_success(self.store.store_room(room_id, creator, False, room_version))
# Persist these events as backfilled events.
persistence = self.hs.get_storage_controllers().persistence
assert persistence is not None
for event, context in remote_events_and_contexts:
self.get_success(persistence.persist_event(event, context, backfilled=True))
self.get_success(
self.persistence.persist_event(event, context, backfilled=True)
)
# Now we join the local user to the room
join_tuple = self.get_success(
# Now we join the local user to the room. We want to make this feel as close to
# the real `process_remote_join()` as possible but we'd like to avoid some of
# the auth checks that would be done in the real code.
#
# FIXME: The test was originally written using this less-real
# `persist_event(...)` shortcut but it would be nice to use the real remote join
# process in a `FederatingHomeserverTestCase`.
flawed_join_tuple = self.get_success(
create_event(
self.hs,
prev_event_ids=[invite_tuple[0].event_id],
# This doesn't work correctly to create an `EventContext` that includes
# both of these state events. I assume it's because we're working on our
# local homeserver which has the remote state set as `outlier`. We have
# to create our own EventContext below to get this right.
auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id],
type=EventTypes.Member,
state_key=user1_id,
@@ -691,7 +708,22 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase):
**shared_kwargs,
)
)
self.get_success(persistence.persist_event(*join_tuple))
# We have to create our own context to get the state set correctly. If we use
# the `EventContext` from the `flawed_join_tuple`, the `current_state_events`
# table will only have the join event in it which should never happen in our
# real server.
join_event = flawed_join_tuple[0]
join_context = self.get_success(
self.state_handler.compute_event_context(
join_event,
state_ids_before_event={
(e.type, e.state_key): e.event_id
for e in [create_tuple[0], invite_tuple[0]]
},
partial_state=False,
)
)
self.get_success(self.persistence.persist_event(join_event, join_context))
# Doing an SS request should return a positive `bump_stamp`, even though
# the only event that matches the bump types has as negative stream

View File

@@ -112,6 +112,24 @@ class UpdateUpsertManyTests(unittest.HomeserverTestCase):
{(1, "user1", "hello"), (2, "user2", "bleb")},
)
self.get_success(
self.storage.db_pool.runInteraction(
"test",
self.storage.db_pool.simple_upsert_many_txn,
self.table_name,
key_names=key_names,
key_values=[[2, "user2"]],
value_names=[],
value_values=[],
)
)
# Check results are what we expect
self.assertEqual(
set(self._dump_table_to_tuple()),
{(1, "user1", "hello"), (2, "user2", "bleb")},
)
def test_simple_update_many(self) -> None:
"""
simple_update_many performs many updates at once.

View File

@@ -19,6 +19,7 @@
#
#
import logging
from typing import List, Optional
from twisted.test.proto_helpers import MemoryReactor
@@ -35,6 +36,8 @@ from synapse.util import Clock
from tests.unittest import HomeserverTestCase
logger = logging.getLogger(__name__)
class ExtremPruneTestCase(HomeserverTestCase):
servlets = [

View File

@@ -24,7 +24,7 @@ from typing import List, Optional, Tuple, cast
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.constants import EventContentFields, EventTypes, JoinRules, Membership
from synapse.api.room_versions import RoomVersions
from synapse.rest import admin
from synapse.rest.admin import register_servlets_for_client_rest_resource
@@ -38,6 +38,7 @@ from synapse.util import Clock
from tests import unittest
from tests.server import TestHomeServer
from tests.test_utils import event_injection
from tests.test_utils.event_injection import create_event
from tests.unittest import skip_unless
logger = logging.getLogger(__name__)
@@ -54,6 +55,10 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
# We can't test the RoomMemberStore on its own without the other event
# storage logic
self.store = hs.get_datastores().main
self.state_handler = self.hs.get_state_handler()
persistence = self.hs.get_storage_controllers().persistence
assert persistence is not None
self.persistence = persistence
self.u_alice = self.register_user("alice", "pass")
self.t_alice = self.login("alice", "pass")
@@ -220,31 +225,166 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
)
def test_join_locally_forgotten_room(self) -> None:
"""Tests if a user joins a forgotten room the room is not forgotten anymore."""
self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
self.assertFalse(
self.get_success(self.store.is_locally_forgotten_room(self.room))
)
"""
Tests if a user joins a forgotten room, the room is not forgotten anymore.
# after leaving and forget the room, it is forgotten
self.get_success(
event_injection.inject_member_event(
self.hs, self.room, self.u_alice, "leave"
Since a room can't be re-joined if everyone has left. This can only happen with
a room with remote users in it.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a remote room
creator = "@user:other"
room_id = "!foo:other"
room_version = RoomVersions.V10
shared_kwargs = {
"room_id": room_id,
"room_version": room_version.identifier,
}
create_tuple = self.get_success(
create_event(
self.hs,
prev_event_ids=[],
type=EventTypes.Create,
state_key="",
content={
# The `ROOM_CREATOR` field could be removed if we used a room
# version > 10 (in favor of relying on `sender`)
EventContentFields.ROOM_CREATOR: creator,
EventContentFields.ROOM_VERSION: room_version.identifier,
},
sender=creator,
**shared_kwargs,
)
)
self.get_success(self.store.forget(self.u_alice, self.room))
self.assertTrue(
self.get_success(self.store.is_locally_forgotten_room(self.room))
)
# after rejoin the room is not forgotten anymore
self.get_success(
event_injection.inject_member_event(
self.hs, self.room, self.u_alice, "join"
creator_tuple = self.get_success(
create_event(
self.hs,
prev_event_ids=[create_tuple[0].event_id],
auth_event_ids=[create_tuple[0].event_id],
type=EventTypes.Member,
state_key=creator,
content={"membership": Membership.JOIN},
sender=creator,
**shared_kwargs,
)
)
remote_events_and_contexts = [
create_tuple,
creator_tuple,
]
# Ensure the local HS knows the room version
self.get_success(self.store.store_room(room_id, creator, False, room_version))
# Persist these events as backfilled events.
for event, context in remote_events_and_contexts:
self.get_success(
self.persistence.persist_event(event, context, backfilled=True)
)
# Now we join the local user to the room. We want to make this feel as close to
# the real `process_remote_join()` as possible but we'd like to avoid some of
# the auth checks that would be done in the real code.
#
# FIXME: The test was originally written using this less-real
# `persist_event(...)` shortcut but it would be nice to use the real remote join
# process in a `FederatingHomeserverTestCase`.
flawed_join_tuple = self.get_success(
create_event(
self.hs,
prev_event_ids=[creator_tuple[0].event_id],
# This doesn't work correctly to create an `EventContext` that includes
# both of these state events. I assume it's because we're working on our
# local homeserver which has the remote state set as `outlier`. We have
# to create our own EventContext below to get this right.
auth_event_ids=[create_tuple[0].event_id],
type=EventTypes.Member,
state_key=user1_id,
content={"membership": Membership.JOIN},
sender=user1_id,
**shared_kwargs,
)
)
# We have to create our own context to get the state set correctly. If we use
# the `EventContext` from the `flawed_join_tuple`, the `current_state_events`
# table will only have the join event in it which should never happen in our
# real server.
join_event = flawed_join_tuple[0]
join_context = self.get_success(
self.state_handler.compute_event_context(
join_event,
state_ids_before_event={
(e.type, e.state_key): e.event_id for e in [create_tuple[0]]
},
partial_state=False,
)
)
self.get_success(self.persistence.persist_event(join_event, join_context))
# The room shouldn't be forgotten because the local user just joined
self.assertFalse(
self.get_success(self.store.is_locally_forgotten_room(self.room))
self.get_success(self.store.is_locally_forgotten_room(room_id))
)
# After all of the local users (there is only user1) leave and forgetting the
# room, it is forgotten
user1_leave_response = self.helper.leave(room_id, user1_id, tok=user1_tok)
user1_leave_event = self.get_success(
self.store.get_event(user1_leave_response["event_id"])
)
self.get_success(self.store.forget(user1_id, room_id))
self.assertTrue(self.get_success(self.store.is_locally_forgotten_room(room_id)))
# Join the local user to the room (again). We want to make this feel as close to
# the real `process_remote_join()` as possible but we'd like to avoid some of
# the auth checks that would be done in the real code.
#
# FIXME: The test was originally written using this less-real
# `event_injection.inject_member_event(...)` shortcut but it would be nice to
# use the real remote join process in a `FederatingHomeserverTestCase`.
flawed_join_tuple = self.get_success(
create_event(
self.hs,
prev_event_ids=[user1_leave_response["event_id"]],
# This doesn't work correctly to create an `EventContext` that includes
# both of these state events. I assume it's because we're working on our
# local homeserver which has the remote state set as `outlier`. We have
# to create our own EventContext below to get this right.
auth_event_ids=[
create_tuple[0].event_id,
user1_leave_response["event_id"],
],
type=EventTypes.Member,
state_key=user1_id,
content={"membership": Membership.JOIN},
sender=user1_id,
**shared_kwargs,
)
)
# We have to create our own context to get the state set correctly. If we use
# the `EventContext` from the `flawed_join_tuple`, the `current_state_events`
# table will only have the join event in it which should never happen in our
# real server.
join_event = flawed_join_tuple[0]
join_context = self.get_success(
self.state_handler.compute_event_context(
join_event,
state_ids_before_event={
(e.type, e.state_key): e.event_id
for e in [create_tuple[0], user1_leave_event]
},
partial_state=False,
)
)
self.get_success(self.persistence.persist_event(join_event, join_context))
# After the local user rejoins the remote room, it isn't forgotten anymore
self.assertFalse(
self.get_success(self.store.is_locally_forgotten_room(room_id))
)

File diff suppressed because it is too large Load Diff

View File

@@ -272,8 +272,8 @@ class TestCase(unittest.TestCase):
def assertIncludes(
self,
actual_items: AbstractSet[str],
expected_items: AbstractSet[str],
actual_items: AbstractSet[TV],
expected_items: AbstractSet[TV],
exact: bool = False,
message: Optional[str] = None,
) -> None: