mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-19 02:20:44 +00:00
Compare commits
10 Commits
quenting/l
...
erikj/msc4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
981746c495 | ||
|
|
d135aedff2 | ||
|
|
bd5f124f11 | ||
|
|
56b6b30e41 | ||
|
|
b0afe11ca1 | ||
|
|
b4e1825a54 | ||
|
|
b42ddb7203 | ||
|
|
9089494d51 | ||
|
|
f5bbcb1938 | ||
|
|
6620edc4cb |
1
changelog.d/17888.feature
Normal file
1
changelog.d/17888.feature
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222).
|
||||||
@@ -5,6 +5,7 @@ basis. The currently supported features are:
|
|||||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||||
for another client
|
for another client
|
||||||
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
|
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
|
||||||
|
- [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222): adding `state_after` to sync v2
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an `access_token`
|
To use it, you will need to authenticate by providing an `access_token`
|
||||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||||
|
|||||||
@@ -450,3 +450,6 @@ class ExperimentalConfig(Config):
|
|||||||
|
|
||||||
# MSC4210: Remove legacy mentions
|
# MSC4210: Remove legacy mentions
|
||||||
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
|
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
|
||||||
|
|
||||||
|
# MSC4222: Adding `state_after` to sync v2
|
||||||
|
self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False)
|
||||||
|
|||||||
@@ -143,6 +143,7 @@ class SyncConfig:
|
|||||||
filter_collection: FilterCollection
|
filter_collection: FilterCollection
|
||||||
is_guest: bool
|
is_guest: bool
|
||||||
device_id: Optional[str]
|
device_id: Optional[str]
|
||||||
|
use_state_after: bool
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
@@ -1141,6 +1142,7 @@ class SyncHandler:
|
|||||||
since_token: Optional[StreamToken],
|
since_token: Optional[StreamToken],
|
||||||
end_token: StreamToken,
|
end_token: StreamToken,
|
||||||
full_state: bool,
|
full_state: bool,
|
||||||
|
joined: bool,
|
||||||
) -> MutableStateMap[EventBase]:
|
) -> MutableStateMap[EventBase]:
|
||||||
"""Works out the difference in state between the end of the previous sync and
|
"""Works out the difference in state between the end of the previous sync and
|
||||||
the start of the timeline.
|
the start of the timeline.
|
||||||
@@ -1155,6 +1157,7 @@ class SyncHandler:
|
|||||||
the point just after their leave event.
|
the point just after their leave event.
|
||||||
full_state: Whether to force returning the full state.
|
full_state: Whether to force returning the full state.
|
||||||
`lazy_load_members` still applies when `full_state` is `True`.
|
`lazy_load_members` still applies when `full_state` is `True`.
|
||||||
|
joined: whether the user is currently joined to the room
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The state to return in the sync response for the room.
|
The state to return in the sync response for the room.
|
||||||
@@ -1230,11 +1233,12 @@ class SyncHandler:
|
|||||||
if full_state:
|
if full_state:
|
||||||
state_ids = await self._compute_state_delta_for_full_sync(
|
state_ids = await self._compute_state_delta_for_full_sync(
|
||||||
room_id,
|
room_id,
|
||||||
sync_config.user,
|
sync_config,
|
||||||
batch,
|
batch,
|
||||||
end_token,
|
end_token,
|
||||||
members_to_fetch,
|
members_to_fetch,
|
||||||
timeline_state,
|
timeline_state,
|
||||||
|
joined,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# If this is an initial sync then full_state should be set, and
|
# If this is an initial sync then full_state should be set, and
|
||||||
@@ -1244,6 +1248,7 @@ class SyncHandler:
|
|||||||
|
|
||||||
state_ids = await self._compute_state_delta_for_incremental_sync(
|
state_ids = await self._compute_state_delta_for_incremental_sync(
|
||||||
room_id,
|
room_id,
|
||||||
|
sync_config,
|
||||||
batch,
|
batch,
|
||||||
since_token,
|
since_token,
|
||||||
end_token,
|
end_token,
|
||||||
@@ -1316,20 +1321,24 @@ class SyncHandler:
|
|||||||
async def _compute_state_delta_for_full_sync(
|
async def _compute_state_delta_for_full_sync(
|
||||||
self,
|
self,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
syncing_user: UserID,
|
sync_config: SyncConfig,
|
||||||
batch: TimelineBatch,
|
batch: TimelineBatch,
|
||||||
end_token: StreamToken,
|
end_token: StreamToken,
|
||||||
members_to_fetch: Optional[Set[str]],
|
members_to_fetch: Optional[Set[str]],
|
||||||
timeline_state: StateMap[str],
|
timeline_state: StateMap[str],
|
||||||
|
joined: bool,
|
||||||
) -> StateMap[str]:
|
) -> StateMap[str]:
|
||||||
"""Calculate the state events to be included in a full sync response.
|
"""Calculate the state events to be included in a full sync response.
|
||||||
|
|
||||||
As with `_compute_state_delta_for_incremental_sync`, the result will include
|
As with `_compute_state_delta_for_incremental_sync`, the result will include
|
||||||
the membership events for the senders of each event in `members_to_fetch`.
|
the membership events for the senders of each event in `members_to_fetch`.
|
||||||
|
|
||||||
|
Note that whether this returns the state at the start or the end of the
|
||||||
|
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
room_id: The room we are calculating for.
|
room_id: The room we are calculating for.
|
||||||
syncing_user: The user that is calling `/sync`.
|
sync_confg: The user that is calling `/sync`.
|
||||||
batch: The timeline batch for the room that will be sent to the user.
|
batch: The timeline batch for the room that will be sent to the user.
|
||||||
end_token: Token of the end of the current batch. Normally this will be
|
end_token: Token of the end of the current batch. Normally this will be
|
||||||
the same as the global "now_token", but if the user has left the room,
|
the same as the global "now_token", but if the user has left the room,
|
||||||
@@ -1338,10 +1347,11 @@ class SyncHandler:
|
|||||||
events in the timeline.
|
events in the timeline.
|
||||||
timeline_state: The contribution to the room state from state events in
|
timeline_state: The contribution to the room state from state events in
|
||||||
`batch`. Only contains the last event for any given state key.
|
`batch`. Only contains the last event for any given state key.
|
||||||
|
joined: whether the user is currently joined to the room
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A map from (type, state_key) to event_id, for each event that we believe
|
A map from (type, state_key) to event_id, for each event that we believe
|
||||||
should be included in the `state` part of the sync response.
|
should be included in the `state` or `state_after` part of the sync response.
|
||||||
"""
|
"""
|
||||||
if members_to_fetch is not None:
|
if members_to_fetch is not None:
|
||||||
# Lazy-loading of membership events is enabled.
|
# Lazy-loading of membership events is enabled.
|
||||||
@@ -1359,7 +1369,7 @@ class SyncHandler:
|
|||||||
# is no guarantee that our membership will be in the auth events of
|
# is no guarantee that our membership will be in the auth events of
|
||||||
# timeline events when the room is partial stated.
|
# timeline events when the room is partial stated.
|
||||||
state_filter = StateFilter.from_lazy_load_member_list(
|
state_filter = StateFilter.from_lazy_load_member_list(
|
||||||
members_to_fetch.union((syncing_user.to_string(),))
|
members_to_fetch.union((sync_config.user.to_string(),))
|
||||||
)
|
)
|
||||||
|
|
||||||
# We are happy to use partial state to compute the `/sync` response.
|
# We are happy to use partial state to compute the `/sync` response.
|
||||||
@@ -1373,6 +1383,61 @@ class SyncHandler:
|
|||||||
await_full_state = True
|
await_full_state = True
|
||||||
lazy_load_members = False
|
lazy_load_members = False
|
||||||
|
|
||||||
|
# Check if we are wanting to return the state at the start or end of the
|
||||||
|
# timeline. If at the end we can just use the current state.
|
||||||
|
if sync_config.use_state_after:
|
||||||
|
# If we're getting the state at the end of the timeline, we can just
|
||||||
|
# use the current state of the room (and roll back any changes
|
||||||
|
# between when we fetched the current state and `end_token`).
|
||||||
|
#
|
||||||
|
# For rooms we're not joined to, there might be a very large number
|
||||||
|
# of deltas between `end_token` and "now", and so instead we fetch
|
||||||
|
# the state at the end of the timeline.
|
||||||
|
if joined:
|
||||||
|
state_ids = await self._state_storage_controller.get_current_state_ids(
|
||||||
|
room_id,
|
||||||
|
state_filter=state_filter,
|
||||||
|
await_full_state=await_full_state,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Now roll back the state by looking at the state deltas between
|
||||||
|
# end_token and now.
|
||||||
|
deltas = await self.store.get_current_state_deltas_for_room(
|
||||||
|
room_id,
|
||||||
|
from_token=end_token.room_key,
|
||||||
|
to_token=self.store.get_room_max_token(),
|
||||||
|
)
|
||||||
|
if deltas:
|
||||||
|
mutable_state_ids = dict(state_ids)
|
||||||
|
|
||||||
|
# We iterate over the deltas backwards so that if there are
|
||||||
|
# multiple changes of the same type/state_key we'll
|
||||||
|
# correctly pick the earliest delta.
|
||||||
|
for delta in reversed(deltas):
|
||||||
|
if delta.prev_event_id:
|
||||||
|
mutable_state_ids[(delta.event_type, delta.state_key)] = (
|
||||||
|
delta.prev_event_id
|
||||||
|
)
|
||||||
|
elif (delta.event_type, delta.state_key) in mutable_state_ids:
|
||||||
|
mutable_state_ids.pop((delta.event_type, delta.state_key))
|
||||||
|
|
||||||
|
state_ids = mutable_state_ids
|
||||||
|
|
||||||
|
return state_ids
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Just use state groups to get the state at the end of the
|
||||||
|
# timeline, i.e. the state at the leave/etc event.
|
||||||
|
state_at_timeline_end = (
|
||||||
|
await self._state_storage_controller.get_state_ids_at(
|
||||||
|
room_id,
|
||||||
|
stream_position=end_token,
|
||||||
|
state_filter=state_filter,
|
||||||
|
await_full_state=await_full_state,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return state_at_timeline_end
|
||||||
|
|
||||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||||
room_id,
|
room_id,
|
||||||
stream_position=end_token,
|
stream_position=end_token,
|
||||||
@@ -1405,6 +1470,7 @@ class SyncHandler:
|
|||||||
async def _compute_state_delta_for_incremental_sync(
|
async def _compute_state_delta_for_incremental_sync(
|
||||||
self,
|
self,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
|
sync_config: SyncConfig,
|
||||||
batch: TimelineBatch,
|
batch: TimelineBatch,
|
||||||
since_token: StreamToken,
|
since_token: StreamToken,
|
||||||
end_token: StreamToken,
|
end_token: StreamToken,
|
||||||
@@ -1419,8 +1485,12 @@ class SyncHandler:
|
|||||||
(`compute_state_delta`) is responsible for keeping track of which membership
|
(`compute_state_delta`) is responsible for keeping track of which membership
|
||||||
events we have already sent to the client, and hence ripping them out.
|
events we have already sent to the client, and hence ripping them out.
|
||||||
|
|
||||||
|
Note that whether this returns the state at the start or the end of the
|
||||||
|
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
room_id: The room we are calculating for.
|
room_id: The room we are calculating for.
|
||||||
|
sync_config
|
||||||
batch: The timeline batch for the room that will be sent to the user.
|
batch: The timeline batch for the room that will be sent to the user.
|
||||||
since_token: Token of the end of the previous batch.
|
since_token: Token of the end of the previous batch.
|
||||||
end_token: Token of the end of the current batch. Normally this will be
|
end_token: Token of the end of the current batch. Normally this will be
|
||||||
@@ -1433,7 +1503,7 @@ class SyncHandler:
|
|||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A map from (type, state_key) to event_id, for each event that we believe
|
A map from (type, state_key) to event_id, for each event that we believe
|
||||||
should be included in the `state` part of the sync response.
|
should be included in the `state` or `state_after` part of the sync response.
|
||||||
"""
|
"""
|
||||||
if members_to_fetch is not None:
|
if members_to_fetch is not None:
|
||||||
# Lazy-loading is enabled. Only return the state that is needed.
|
# Lazy-loading is enabled. Only return the state that is needed.
|
||||||
@@ -1445,6 +1515,51 @@ class SyncHandler:
|
|||||||
await_full_state = True
|
await_full_state = True
|
||||||
lazy_load_members = False
|
lazy_load_members = False
|
||||||
|
|
||||||
|
# Check if we are wanting to return the state at the start or end of the
|
||||||
|
# timeline. If at the end we can just use the current state delta stream.
|
||||||
|
if sync_config.use_state_after:
|
||||||
|
delta_state_ids: MutableStateMap[str] = {}
|
||||||
|
|
||||||
|
if members_to_fetch is not None:
|
||||||
|
# We're lazy-loading, so the client might need some more member
|
||||||
|
# events to understand the events in this timeline. So we always
|
||||||
|
# fish out all the member events corresponding to the timeline
|
||||||
|
# here. The caller will then dedupe any redundant ones.
|
||||||
|
member_ids = await self._state_storage_controller.get_current_state_ids(
|
||||||
|
room_id=room_id,
|
||||||
|
state_filter=StateFilter.from_types(
|
||||||
|
(EventTypes.Member, member) for member in members_to_fetch
|
||||||
|
),
|
||||||
|
await_full_state=await_full_state,
|
||||||
|
)
|
||||||
|
delta_state_ids.update(member_ids)
|
||||||
|
|
||||||
|
# We don't do LL filtering for incremental syncs - see
|
||||||
|
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
|
||||||
|
# N.B. this slows down incr syncs as we are now processing way more
|
||||||
|
# state in the server than if we were LLing.
|
||||||
|
#
|
||||||
|
# i.e. we return all state deltas, including membership changes that
|
||||||
|
# we'd normally exclude due to LL.
|
||||||
|
deltas = await self.store.get_current_state_deltas_for_room(
|
||||||
|
room_id=room_id,
|
||||||
|
from_token=since_token.room_key,
|
||||||
|
to_token=end_token.room_key,
|
||||||
|
)
|
||||||
|
for delta in deltas:
|
||||||
|
if delta.event_id is None:
|
||||||
|
# There was a state reset and this state entry is no longer
|
||||||
|
# present, but we have no way of informing the client about
|
||||||
|
# this, so we just skip it for now.
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Note that deltas are in stream ordering, so if there are
|
||||||
|
# multiple deltas for a given type/state_key we'll always pick
|
||||||
|
# the latest one.
|
||||||
|
delta_state_ids[(delta.event_type, delta.state_key)] = delta.event_id
|
||||||
|
|
||||||
|
return delta_state_ids
|
||||||
|
|
||||||
# For a non-gappy sync if the events in the timeline are simply a linear
|
# For a non-gappy sync if the events in the timeline are simply a linear
|
||||||
# chain (i.e. no merging/branching of the graph), then we know the state
|
# chain (i.e. no merging/branching of the graph), then we know the state
|
||||||
# delta between the end of the previous sync and start of the new one is
|
# delta between the end of the previous sync and start of the new one is
|
||||||
@@ -2867,6 +2982,7 @@ class SyncHandler:
|
|||||||
since_token,
|
since_token,
|
||||||
room_builder.end_token,
|
room_builder.end_token,
|
||||||
full_state=full_state,
|
full_state=full_state,
|
||||||
|
joined=room_builder.rtype == "joined",
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# An out of band room won't have any state changes.
|
# An out of band room won't have any state changes.
|
||||||
|
|||||||
@@ -43,12 +43,15 @@ class ExperimentalFeature(str, Enum):
|
|||||||
|
|
||||||
MSC3881 = "msc3881"
|
MSC3881 = "msc3881"
|
||||||
MSC3575 = "msc3575"
|
MSC3575 = "msc3575"
|
||||||
|
MSC4222 = "msc4222"
|
||||||
|
|
||||||
def is_globally_enabled(self, config: "HomeServerConfig") -> bool:
|
def is_globally_enabled(self, config: "HomeServerConfig") -> bool:
|
||||||
if self is ExperimentalFeature.MSC3881:
|
if self is ExperimentalFeature.MSC3881:
|
||||||
return config.experimental.msc3881_enabled
|
return config.experimental.msc3881_enabled
|
||||||
if self is ExperimentalFeature.MSC3575:
|
if self is ExperimentalFeature.MSC3575:
|
||||||
return config.experimental.msc3575_enabled
|
return config.experimental.msc3575_enabled
|
||||||
|
if self is ExperimentalFeature.MSC4222:
|
||||||
|
return config.experimental.msc4222_enabled
|
||||||
|
|
||||||
assert_never(self)
|
assert_never(self)
|
||||||
|
|
||||||
|
|||||||
@@ -152,6 +152,14 @@ class SyncRestServlet(RestServlet):
|
|||||||
filter_id = parse_string(request, "filter")
|
filter_id = parse_string(request, "filter")
|
||||||
full_state = parse_boolean(request, "full_state", default=False)
|
full_state = parse_boolean(request, "full_state", default=False)
|
||||||
|
|
||||||
|
use_state_after = False
|
||||||
|
if await self.store.is_feature_enabled(
|
||||||
|
user.to_string(), ExperimentalFeature.MSC4222
|
||||||
|
):
|
||||||
|
use_state_after = parse_boolean(
|
||||||
|
request, "org.matrix.msc4222.use_state_after", default=False
|
||||||
|
)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"/sync: user=%r, timeout=%r, since=%r, "
|
"/sync: user=%r, timeout=%r, since=%r, "
|
||||||
"set_presence=%r, filter_id=%r, device_id=%r",
|
"set_presence=%r, filter_id=%r, device_id=%r",
|
||||||
@@ -184,6 +192,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
full_state,
|
full_state,
|
||||||
device_id,
|
device_id,
|
||||||
last_ignore_accdata_streampos,
|
last_ignore_accdata_streampos,
|
||||||
|
use_state_after,
|
||||||
)
|
)
|
||||||
|
|
||||||
if filter_id is None:
|
if filter_id is None:
|
||||||
@@ -220,6 +229,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
filter_collection=filter_collection,
|
filter_collection=filter_collection,
|
||||||
is_guest=requester.is_guest,
|
is_guest=requester.is_guest,
|
||||||
device_id=device_id,
|
device_id=device_id,
|
||||||
|
use_state_after=use_state_after,
|
||||||
)
|
)
|
||||||
|
|
||||||
since_token = None
|
since_token = None
|
||||||
@@ -258,7 +268,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
# We know that the the requester has an access token since appservices
|
# We know that the the requester has an access token since appservices
|
||||||
# cannot use sync.
|
# cannot use sync.
|
||||||
response_content = await self.encode_response(
|
response_content = await self.encode_response(
|
||||||
time_now, sync_result, requester, filter_collection
|
time_now, sync_config, sync_result, requester, filter_collection
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("Event formatting complete")
|
logger.debug("Event formatting complete")
|
||||||
@@ -268,6 +278,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
async def encode_response(
|
async def encode_response(
|
||||||
self,
|
self,
|
||||||
time_now: int,
|
time_now: int,
|
||||||
|
sync_config: SyncConfig,
|
||||||
sync_result: SyncResult,
|
sync_result: SyncResult,
|
||||||
requester: Requester,
|
requester: Requester,
|
||||||
filter: FilterCollection,
|
filter: FilterCollection,
|
||||||
@@ -292,7 +303,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
)
|
)
|
||||||
|
|
||||||
joined = await self.encode_joined(
|
joined = await self.encode_joined(
|
||||||
sync_result.joined, time_now, serialize_options
|
sync_config, sync_result.joined, time_now, serialize_options
|
||||||
)
|
)
|
||||||
|
|
||||||
invited = await self.encode_invited(
|
invited = await self.encode_invited(
|
||||||
@@ -304,7 +315,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
)
|
)
|
||||||
|
|
||||||
archived = await self.encode_archived(
|
archived = await self.encode_archived(
|
||||||
sync_result.archived, time_now, serialize_options
|
sync_config, sync_result.archived, time_now, serialize_options
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("building sync response dict")
|
logger.debug("building sync response dict")
|
||||||
@@ -372,6 +383,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
@trace_with_opname("sync.encode_joined")
|
@trace_with_opname("sync.encode_joined")
|
||||||
async def encode_joined(
|
async def encode_joined(
|
||||||
self,
|
self,
|
||||||
|
sync_config: SyncConfig,
|
||||||
rooms: List[JoinedSyncResult],
|
rooms: List[JoinedSyncResult],
|
||||||
time_now: int,
|
time_now: int,
|
||||||
serialize_options: SerializeEventConfig,
|
serialize_options: SerializeEventConfig,
|
||||||
@@ -380,6 +392,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
Encode the joined rooms in a sync result
|
Encode the joined rooms in a sync result
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
sync_config
|
||||||
rooms: list of sync results for rooms this user is joined to
|
rooms: list of sync results for rooms this user is joined to
|
||||||
time_now: current time - used as a baseline for age calculations
|
time_now: current time - used as a baseline for age calculations
|
||||||
serialize_options: Event serializer options
|
serialize_options: Event serializer options
|
||||||
@@ -389,7 +402,11 @@ class SyncRestServlet(RestServlet):
|
|||||||
joined = {}
|
joined = {}
|
||||||
for room in rooms:
|
for room in rooms:
|
||||||
joined[room.room_id] = await self.encode_room(
|
joined[room.room_id] = await self.encode_room(
|
||||||
room, time_now, joined=True, serialize_options=serialize_options
|
sync_config,
|
||||||
|
room,
|
||||||
|
time_now,
|
||||||
|
joined=True,
|
||||||
|
serialize_options=serialize_options,
|
||||||
)
|
)
|
||||||
|
|
||||||
return joined
|
return joined
|
||||||
@@ -477,6 +494,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
@trace_with_opname("sync.encode_archived")
|
@trace_with_opname("sync.encode_archived")
|
||||||
async def encode_archived(
|
async def encode_archived(
|
||||||
self,
|
self,
|
||||||
|
sync_config: SyncConfig,
|
||||||
rooms: List[ArchivedSyncResult],
|
rooms: List[ArchivedSyncResult],
|
||||||
time_now: int,
|
time_now: int,
|
||||||
serialize_options: SerializeEventConfig,
|
serialize_options: SerializeEventConfig,
|
||||||
@@ -485,6 +503,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
Encode the archived rooms in a sync result
|
Encode the archived rooms in a sync result
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
sync_config
|
||||||
rooms: list of sync results for rooms this user is joined to
|
rooms: list of sync results for rooms this user is joined to
|
||||||
time_now: current time - used as a baseline for age calculations
|
time_now: current time - used as a baseline for age calculations
|
||||||
serialize_options: Event serializer options
|
serialize_options: Event serializer options
|
||||||
@@ -494,13 +513,18 @@ class SyncRestServlet(RestServlet):
|
|||||||
joined = {}
|
joined = {}
|
||||||
for room in rooms:
|
for room in rooms:
|
||||||
joined[room.room_id] = await self.encode_room(
|
joined[room.room_id] = await self.encode_room(
|
||||||
room, time_now, joined=False, serialize_options=serialize_options
|
sync_config,
|
||||||
|
room,
|
||||||
|
time_now,
|
||||||
|
joined=False,
|
||||||
|
serialize_options=serialize_options,
|
||||||
)
|
)
|
||||||
|
|
||||||
return joined
|
return joined
|
||||||
|
|
||||||
async def encode_room(
|
async def encode_room(
|
||||||
self,
|
self,
|
||||||
|
sync_config: SyncConfig,
|
||||||
room: Union[JoinedSyncResult, ArchivedSyncResult],
|
room: Union[JoinedSyncResult, ArchivedSyncResult],
|
||||||
time_now: int,
|
time_now: int,
|
||||||
joined: bool,
|
joined: bool,
|
||||||
@@ -508,6 +532,7 @@ class SyncRestServlet(RestServlet):
|
|||||||
) -> JsonDict:
|
) -> JsonDict:
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
|
sync_config
|
||||||
room: sync result for a single room
|
room: sync result for a single room
|
||||||
time_now: current time - used as a baseline for age calculations
|
time_now: current time - used as a baseline for age calculations
|
||||||
token_id: ID of the user's auth token - used for namespacing
|
token_id: ID of the user's auth token - used for namespacing
|
||||||
@@ -548,13 +573,20 @@ class SyncRestServlet(RestServlet):
|
|||||||
|
|
||||||
account_data = room.account_data
|
account_data = room.account_data
|
||||||
|
|
||||||
|
# We either include a `state` or `state_after` field depending on
|
||||||
|
# whether the client has opted in to the newer `state_after` behavior.
|
||||||
|
if sync_config.use_state_after:
|
||||||
|
state_key_name = "org.matrix.msc4222.state_after"
|
||||||
|
else:
|
||||||
|
state_key_name = "state"
|
||||||
|
|
||||||
result: JsonDict = {
|
result: JsonDict = {
|
||||||
"timeline": {
|
"timeline": {
|
||||||
"events": serialized_timeline,
|
"events": serialized_timeline,
|
||||||
"prev_batch": await room.timeline.prev_batch.to_string(self.store),
|
"prev_batch": await room.timeline.prev_batch.to_string(self.store),
|
||||||
"limited": room.timeline.limited,
|
"limited": room.timeline.limited,
|
||||||
},
|
},
|
||||||
"state": {"events": serialized_state},
|
state_key_name: {"events": serialized_state},
|
||||||
"account_data": {"events": account_data},
|
"account_data": {"events": account_data},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -688,6 +720,7 @@ class SlidingSyncE2eeRestServlet(RestServlet):
|
|||||||
filter_collection=self.only_member_events_filter_collection,
|
filter_collection=self.only_member_events_filter_collection,
|
||||||
is_guest=requester.is_guest,
|
is_guest=requester.is_guest,
|
||||||
device_id=device_id,
|
device_id=device_id,
|
||||||
|
use_state_after=False, # We don't return any rooms so this flag is a no-op
|
||||||
)
|
)
|
||||||
|
|
||||||
since_token = None
|
since_token = None
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
from typing import Collection, ContextManager, List, Optional
|
from typing import Collection, ContextManager, List, Optional
|
||||||
from unittest.mock import AsyncMock, Mock, patch
|
from unittest.mock import AsyncMock, Mock, patch
|
||||||
|
|
||||||
from parameterized import parameterized
|
from parameterized import parameterized, parameterized_class
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.test.proto_helpers import MemoryReactor
|
from twisted.test.proto_helpers import MemoryReactor
|
||||||
@@ -58,9 +58,21 @@ def generate_request_key() -> SyncRequestKey:
|
|||||||
return ("request_key", _request_key)
|
return ("request_key", _request_key)
|
||||||
|
|
||||||
|
|
||||||
|
@parameterized_class(
|
||||||
|
("use_state_after",),
|
||||||
|
[
|
||||||
|
(True,),
|
||||||
|
(False,),
|
||||||
|
],
|
||||||
|
class_name_func=lambda cls,
|
||||||
|
num,
|
||||||
|
params_dict: f"{cls.__name__}_{'state_after' if params_dict['use_state_after'] else 'state'}",
|
||||||
|
)
|
||||||
class SyncTestCase(tests.unittest.HomeserverTestCase):
|
class SyncTestCase(tests.unittest.HomeserverTestCase):
|
||||||
"""Tests Sync Handler."""
|
"""Tests Sync Handler."""
|
||||||
|
|
||||||
|
use_state_after: bool
|
||||||
|
|
||||||
servlets = [
|
servlets = [
|
||||||
admin.register_servlets,
|
admin.register_servlets,
|
||||||
knock.register_servlets,
|
knock.register_servlets,
|
||||||
@@ -79,7 +91,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
def test_wait_for_sync_for_user_auth_blocking(self) -> None:
|
def test_wait_for_sync_for_user_auth_blocking(self) -> None:
|
||||||
user_id1 = "@user1:test"
|
user_id1 = "@user1:test"
|
||||||
user_id2 = "@user2:test"
|
user_id2 = "@user2:test"
|
||||||
sync_config = generate_sync_config(user_id1)
|
sync_config = generate_sync_config(
|
||||||
|
user_id1, use_state_after=self.use_state_after
|
||||||
|
)
|
||||||
requester = create_requester(user_id1)
|
requester = create_requester(user_id1)
|
||||||
|
|
||||||
self.reactor.advance(100) # So we get not 0 time
|
self.reactor.advance(100) # So we get not 0 time
|
||||||
@@ -112,7 +126,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
|
|
||||||
self.auth_blocking._hs_disabled = False
|
self.auth_blocking._hs_disabled = False
|
||||||
|
|
||||||
sync_config = generate_sync_config(user_id2)
|
sync_config = generate_sync_config(
|
||||||
|
user_id2, use_state_after=self.use_state_after
|
||||||
|
)
|
||||||
requester = create_requester(user_id2)
|
requester = create_requester(user_id2)
|
||||||
|
|
||||||
e = self.get_failure(
|
e = self.get_failure(
|
||||||
@@ -141,7 +157,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
initial_result = self.get_success(
|
initial_result = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
requester,
|
requester,
|
||||||
sync_config=generate_sync_config(user, device_id="dev"),
|
sync_config=generate_sync_config(
|
||||||
|
user, device_id="dev", use_state_after=self.use_state_after
|
||||||
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -175,7 +193,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
result = self.get_success(
|
result = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
requester,
|
requester,
|
||||||
sync_config=generate_sync_config(user),
|
sync_config=generate_sync_config(
|
||||||
|
user, use_state_after=self.use_state_after
|
||||||
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -188,7 +208,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
result = self.get_success(
|
result = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
requester,
|
requester,
|
||||||
sync_config=generate_sync_config(user, device_id="dev"),
|
sync_config=generate_sync_config(
|
||||||
|
user, device_id="dev", use_state_after=self.use_state_after
|
||||||
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
since_token=initial_result.next_batch,
|
since_token=initial_result.next_batch,
|
||||||
@@ -220,7 +242,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
result = self.get_success(
|
result = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
requester,
|
requester,
|
||||||
sync_config=generate_sync_config(user),
|
sync_config=generate_sync_config(
|
||||||
|
user, use_state_after=self.use_state_after
|
||||||
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -233,7 +257,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
result = self.get_success(
|
result = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
requester,
|
requester,
|
||||||
sync_config=generate_sync_config(user, device_id="dev"),
|
sync_config=generate_sync_config(
|
||||||
|
user, device_id="dev", use_state_after=self.use_state_after
|
||||||
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
since_token=initial_result.next_batch,
|
since_token=initial_result.next_batch,
|
||||||
@@ -276,7 +302,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
alice_sync_result: SyncResult = self.get_success(
|
alice_sync_result: SyncResult = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
create_requester(owner),
|
create_requester(owner),
|
||||||
generate_sync_config(owner),
|
generate_sync_config(owner, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -296,7 +322,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
|
|
||||||
# Eve syncs.
|
# Eve syncs.
|
||||||
eve_requester = create_requester(eve)
|
eve_requester = create_requester(eve)
|
||||||
eve_sync_config = generate_sync_config(eve)
|
eve_sync_config = generate_sync_config(
|
||||||
|
eve, use_state_after=self.use_state_after
|
||||||
|
)
|
||||||
eve_sync_after_ban: SyncResult = self.get_success(
|
eve_sync_after_ban: SyncResult = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
eve_requester,
|
eve_requester,
|
||||||
@@ -367,7 +395,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
initial_sync_result = self.get_success(
|
initial_sync_result = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
alice_requester,
|
alice_requester,
|
||||||
generate_sync_config(alice),
|
generate_sync_config(alice, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -396,6 +424,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
filter_collection=FilterCollection(
|
filter_collection=FilterCollection(
|
||||||
self.hs, {"room": {"timeline": {"limit": 2}}}
|
self.hs, {"room": {"timeline": {"limit": 2}}}
|
||||||
),
|
),
|
||||||
|
use_state_after=self.use_state_after,
|
||||||
),
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
@@ -442,7 +471,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
initial_sync_result = self.get_success(
|
initial_sync_result = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
alice_requester,
|
alice_requester,
|
||||||
generate_sync_config(alice),
|
generate_sync_config(alice, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -481,6 +510,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
|
use_state_after=self.use_state_after,
|
||||||
),
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
@@ -518,6 +548,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
|
|
||||||
... and a filter that means we only return 1 event, represented by the dashed
|
... and a filter that means we only return 1 event, represented by the dashed
|
||||||
horizontal lines: `S2` must be included in the `state` section on the second sync.
|
horizontal lines: `S2` must be included in the `state` section on the second sync.
|
||||||
|
|
||||||
|
When `use_state_after` is enabled, then we expect to see `s2` in the first sync.
|
||||||
"""
|
"""
|
||||||
alice = self.register_user("alice", "password")
|
alice = self.register_user("alice", "password")
|
||||||
alice_tok = self.login(alice, "password")
|
alice_tok = self.login(alice, "password")
|
||||||
@@ -528,7 +560,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
initial_sync_result = self.get_success(
|
initial_sync_result = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
alice_requester,
|
alice_requester,
|
||||||
generate_sync_config(alice),
|
generate_sync_config(alice, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -554,6 +586,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
filter_collection=FilterCollection(
|
filter_collection=FilterCollection(
|
||||||
self.hs, {"room": {"timeline": {"limit": 1}}}
|
self.hs, {"room": {"timeline": {"limit": 1}}}
|
||||||
),
|
),
|
||||||
|
use_state_after=self.use_state_after,
|
||||||
),
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
@@ -567,6 +600,14 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
[e.event_id for e in room_sync.timeline.events],
|
[e.event_id for e in room_sync.timeline.events],
|
||||||
[e3_event],
|
[e3_event],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.use_state_after:
|
||||||
|
# When using `state_after` we get told about s2 immediately
|
||||||
|
self.assertEqual(
|
||||||
|
[e.event_id for e in room_sync.state.values()],
|
||||||
|
[s2_event],
|
||||||
|
)
|
||||||
|
else:
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
[e.event_id for e in room_sync.state.values()],
|
[e.event_id for e in room_sync.state.values()],
|
||||||
[],
|
[],
|
||||||
@@ -585,6 +626,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
filter_collection=FilterCollection(
|
filter_collection=FilterCollection(
|
||||||
self.hs, {"room": {"timeline": {"limit": 1}}}
|
self.hs, {"room": {"timeline": {"limit": 1}}}
|
||||||
),
|
),
|
||||||
|
use_state_after=self.use_state_after,
|
||||||
),
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
@@ -598,6 +640,15 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
[e.event_id for e in room_sync.timeline.events],
|
[e.event_id for e in room_sync.timeline.events],
|
||||||
[e4_event],
|
[e4_event],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.use_state_after:
|
||||||
|
# When using `state_after` we got told about s2 previously, so we
|
||||||
|
# don't again.
|
||||||
|
self.assertEqual(
|
||||||
|
[e.event_id for e in room_sync.state.values()],
|
||||||
|
[],
|
||||||
|
)
|
||||||
|
else:
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
[e.event_id for e in room_sync.state.values()],
|
[e.event_id for e in room_sync.state.values()],
|
||||||
[s2_event],
|
[s2_event],
|
||||||
@@ -638,6 +689,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
|
|
||||||
This is the last chance for us to tell the client about S2, so it *must* be
|
This is the last chance for us to tell the client about S2, so it *must* be
|
||||||
included in the response.
|
included in the response.
|
||||||
|
|
||||||
|
When `use_state_after` is enabled, then we expect to see `s2` in the first sync.
|
||||||
"""
|
"""
|
||||||
alice = self.register_user("alice", "password")
|
alice = self.register_user("alice", "password")
|
||||||
alice_tok = self.login(alice, "password")
|
alice_tok = self.login(alice, "password")
|
||||||
@@ -648,7 +701,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
initial_sync_result = self.get_success(
|
initial_sync_result = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
alice_requester,
|
alice_requester,
|
||||||
generate_sync_config(alice),
|
generate_sync_config(alice, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -673,6 +726,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
filter_collection=FilterCollection(
|
filter_collection=FilterCollection(
|
||||||
self.hs, {"room": {"timeline": {"limit": 1}}}
|
self.hs, {"room": {"timeline": {"limit": 1}}}
|
||||||
),
|
),
|
||||||
|
use_state_after=self.use_state_after,
|
||||||
),
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
@@ -684,6 +738,10 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
[e.event_id for e in room_sync.timeline.events],
|
[e.event_id for e in room_sync.timeline.events],
|
||||||
[e3_event],
|
[e3_event],
|
||||||
)
|
)
|
||||||
|
if self.use_state_after:
|
||||||
|
# When using `state_after` we get told about s2 immediately
|
||||||
|
self.assertIn(s2_event, [e.event_id for e in room_sync.state.values()])
|
||||||
|
else:
|
||||||
self.assertNotIn(s2_event, [e.event_id for e in room_sync.state.values()])
|
self.assertNotIn(s2_event, [e.event_id for e in room_sync.state.values()])
|
||||||
|
|
||||||
# More events, E4 and E5
|
# More events, E4 and E5
|
||||||
@@ -695,7 +753,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
incremental_sync = self.get_success(
|
incremental_sync = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
alice_requester,
|
alice_requester,
|
||||||
generate_sync_config(alice),
|
generate_sync_config(alice, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
since_token=initial_sync_result.next_batch,
|
since_token=initial_sync_result.next_batch,
|
||||||
@@ -710,6 +768,15 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
[e.event_id for e in room_sync.timeline.events],
|
[e.event_id for e in room_sync.timeline.events],
|
||||||
[e4_event, e5_event],
|
[e4_event, e5_event],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.use_state_after:
|
||||||
|
# When using `state_after` we got told about s2 previously, so we
|
||||||
|
# don't again.
|
||||||
|
self.assertEqual(
|
||||||
|
[e.event_id for e in room_sync.state.values()],
|
||||||
|
[],
|
||||||
|
)
|
||||||
|
else:
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
[e.event_id for e in room_sync.state.values()],
|
[e.event_id for e in room_sync.state.values()],
|
||||||
[s2_event],
|
[s2_event],
|
||||||
@@ -721,7 +788,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
(True, False),
|
(True, False),
|
||||||
(False, True),
|
(False, True),
|
||||||
(True, True),
|
(True, True),
|
||||||
]
|
],
|
||||||
|
name_func=lambda func, num, p: f"{func.__name__}_{p.args[0]}_{p.args[1]}",
|
||||||
)
|
)
|
||||||
def test_archived_rooms_do_not_include_state_after_leave(
|
def test_archived_rooms_do_not_include_state_after_leave(
|
||||||
self, initial_sync: bool, empty_timeline: bool
|
self, initial_sync: bool, empty_timeline: bool
|
||||||
@@ -749,7 +817,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
initial_sync_result = self.get_success(
|
initial_sync_result = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
bob_requester,
|
bob_requester,
|
||||||
generate_sync_config(bob),
|
generate_sync_config(bob, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -780,7 +848,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
bob_requester,
|
bob_requester,
|
||||||
generate_sync_config(
|
generate_sync_config(
|
||||||
bob, filter_collection=FilterCollection(self.hs, filter_dict)
|
bob,
|
||||||
|
filter_collection=FilterCollection(self.hs, filter_dict),
|
||||||
|
use_state_after=self.use_state_after,
|
||||||
),
|
),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
@@ -791,7 +861,15 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
if empty_timeline:
|
if empty_timeline:
|
||||||
# The timeline should be empty
|
# The timeline should be empty
|
||||||
self.assertEqual(sync_room_result.timeline.events, [])
|
self.assertEqual(sync_room_result.timeline.events, [])
|
||||||
|
else:
|
||||||
|
# The last three events in the timeline should be those leading up to the
|
||||||
|
# leave
|
||||||
|
self.assertEqual(
|
||||||
|
[e.event_id for e in sync_room_result.timeline.events[-3:]],
|
||||||
|
[before_message_event, before_state_event, leave_event],
|
||||||
|
)
|
||||||
|
|
||||||
|
if empty_timeline or self.use_state_after:
|
||||||
# And the state should include the leave event...
|
# And the state should include the leave event...
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
sync_room_result.state[("m.room.member", bob)].event_id, leave_event
|
sync_room_result.state[("m.room.member", bob)].event_id, leave_event
|
||||||
@@ -801,12 +879,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
sync_room_result.state[("test_state", "")].event_id, before_state_event
|
sync_room_result.state[("test_state", "")].event_id, before_state_event
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# The last three events in the timeline should be those leading up to the
|
|
||||||
# leave
|
|
||||||
self.assertEqual(
|
|
||||||
[e.event_id for e in sync_room_result.timeline.events[-3:]],
|
|
||||||
[before_message_event, before_state_event, leave_event],
|
|
||||||
)
|
|
||||||
# ... And the state should be empty
|
# ... And the state should be empty
|
||||||
self.assertEqual(sync_room_result.state, {})
|
self.assertEqual(sync_room_result.state, {})
|
||||||
|
|
||||||
@@ -879,7 +951,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
sync_result: SyncResult = self.get_success(
|
sync_result: SyncResult = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
create_requester(user),
|
create_requester(user),
|
||||||
generate_sync_config(user),
|
generate_sync_config(user, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -928,7 +1000,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
private_sync_result: SyncResult = self.get_success(
|
private_sync_result: SyncResult = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
create_requester(user2),
|
create_requester(user2),
|
||||||
generate_sync_config(user2),
|
generate_sync_config(user2, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -954,7 +1026,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
sync_result: SyncResult = self.get_success(
|
sync_result: SyncResult = self.get_success(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
create_requester(user),
|
create_requester(user),
|
||||||
generate_sync_config(user),
|
generate_sync_config(user, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
)
|
)
|
||||||
@@ -991,7 +1063,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
sync_d = defer.ensureDeferred(
|
sync_d = defer.ensureDeferred(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
create_requester(user),
|
create_requester(user),
|
||||||
generate_sync_config(user),
|
generate_sync_config(user, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
since_token=since_token,
|
since_token=since_token,
|
||||||
@@ -1046,7 +1118,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
|
|||||||
sync_d = defer.ensureDeferred(
|
sync_d = defer.ensureDeferred(
|
||||||
self.sync_handler.wait_for_sync_for_user(
|
self.sync_handler.wait_for_sync_for_user(
|
||||||
create_requester(user),
|
create_requester(user),
|
||||||
generate_sync_config(user),
|
generate_sync_config(user, use_state_after=self.use_state_after),
|
||||||
sync_version=SyncVersion.SYNC_V2,
|
sync_version=SyncVersion.SYNC_V2,
|
||||||
request_key=generate_request_key(),
|
request_key=generate_request_key(),
|
||||||
since_token=since_token,
|
since_token=since_token,
|
||||||
@@ -1062,6 +1134,7 @@ def generate_sync_config(
|
|||||||
user_id: str,
|
user_id: str,
|
||||||
device_id: Optional[str] = "device_id",
|
device_id: Optional[str] = "device_id",
|
||||||
filter_collection: Optional[FilterCollection] = None,
|
filter_collection: Optional[FilterCollection] = None,
|
||||||
|
use_state_after: bool = False,
|
||||||
) -> SyncConfig:
|
) -> SyncConfig:
|
||||||
"""Generate a sync config (with a unique request key).
|
"""Generate a sync config (with a unique request key).
|
||||||
|
|
||||||
@@ -1070,6 +1143,7 @@ def generate_sync_config(
|
|||||||
device_id: device that is syncing. Defaults to "device_id".
|
device_id: device that is syncing. Defaults to "device_id".
|
||||||
filter_collection: filter to apply. Defaults to the default filter (ie,
|
filter_collection: filter to apply. Defaults to the default filter (ie,
|
||||||
return everything, with a default limit)
|
return everything, with a default limit)
|
||||||
|
use_state_after: whether the `use_state_after` flag was set.
|
||||||
"""
|
"""
|
||||||
if filter_collection is None:
|
if filter_collection is None:
|
||||||
filter_collection = Filtering(Mock()).DEFAULT_FILTER_COLLECTION
|
filter_collection = Filtering(Mock()).DEFAULT_FILTER_COLLECTION
|
||||||
@@ -1079,4 +1153,5 @@ def generate_sync_config(
|
|||||||
filter_collection=filter_collection,
|
filter_collection=filter_collection,
|
||||||
is_guest=False,
|
is_guest=False,
|
||||||
device_id=device_id,
|
device_id=device_id,
|
||||||
|
use_state_after=use_state_after,
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user