mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-09 01:30:18 +00:00
Compare commits
3 Commits
v1.111.0rc
...
travis/rat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3d8d3305b2 | ||
|
|
2617452444 | ||
|
|
1cf3ff6b40 |
17
CHANGES.md
17
CHANGES.md
@@ -1,20 +1,3 @@
|
||||
# Synapse 1.111.0rc2 (2024-07-10)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where using `synapse.app.media_repository` worker configuration would break the new media endpoints. ([\#17420](https://github.com/element-hq/synapse/issues/17420))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Document the new federation media worker endpoints in the [upgrade notes](https://element-hq.github.io/synapse/v1.111/upgrade.html) and [worker docs](https://element-hq.github.io/synapse/v1.111/workers.html). ([\#17421](https://github.com/element-hq/synapse/issues/17421))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Route authenticated federation media requests to media repository workers in Complement tests. ([\#17422](https://github.com/element-hq/synapse/issues/17422))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.111.0rc1 (2024-07-09)
|
||||
|
||||
### Features
|
||||
|
||||
1
changelog.d/17418.feature
Normal file
1
changelog.d/17418.feature
Normal file
@@ -0,0 +1 @@
|
||||
Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
1
changelog.d/17426.misc
Normal file
1
changelog.d/17426.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix documentation on `RateLimiter#record_action`.
|
||||
6
debian/changelog
vendored
6
debian/changelog
vendored
@@ -1,9 +1,3 @@
|
||||
matrix-synapse-py3 (1.111.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.111.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 10 Jul 2024 08:46:54 +0000
|
||||
|
||||
matrix-synapse-py3 (1.111.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.111.0rc1.
|
||||
|
||||
@@ -126,7 +126,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
"^/_matrix/client/v1/media/.*$",
|
||||
"^/_matrix/federation/v1/media/.*$",
|
||||
],
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
|
||||
@@ -119,14 +119,13 @@ stacking them up. You can monitor the currently running background updates with
|
||||
|
||||
# Upgrading to v1.111.0
|
||||
|
||||
## New worker endpoints for authenticated client and federation media
|
||||
## New worker endpoints for authenticated client media
|
||||
|
||||
[Media repository workers](./workers.md#synapseappmedia_repository) handling
|
||||
Media APIs can now handle the following endpoint patterns:
|
||||
Media APIs can now handle the following endpoint pattern:
|
||||
|
||||
```
|
||||
^/_matrix/client/v1/media/.*$
|
||||
^/_matrix/federation/v1/media/.*$
|
||||
```
|
||||
|
||||
Please update your reverse proxy configuration.
|
||||
|
||||
@@ -740,7 +740,6 @@ Handles the media repository. It can handle all endpoints starting with:
|
||||
|
||||
/_matrix/media/
|
||||
/_matrix/client/v1/media/
|
||||
/_matrix/federation/v1/media/
|
||||
|
||||
... and the following regular expressions matching media-specific administration APIs:
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.111.0rc2"
|
||||
version = "1.111.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
||||
@@ -236,9 +236,8 @@ class Ratelimiter:
|
||||
requester: The requester that is doing the action, if any.
|
||||
key: An arbitrary key used to classify an action. Defaults to the
|
||||
requester's user ID.
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
n_actions: The number of times the user performed the action. May be negative
|
||||
to "refund" the rate limit.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
"""
|
||||
|
||||
@@ -321,7 +321,7 @@ def register_servlets(
|
||||
servletclass == FederationMediaDownloadServlet
|
||||
or servletclass == FederationMediaThumbnailServlet
|
||||
):
|
||||
if not hs.config.media.can_load_media_repo:
|
||||
if not hs.config.server.enable_media_repo:
|
||||
continue
|
||||
|
||||
servletclass(
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from itertools import chain
|
||||
from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Set, Tuple
|
||||
|
||||
import attr
|
||||
@@ -464,6 +465,7 @@ class SlidingSyncHandler:
|
||||
membership_state_keys = room_sync_config.required_state_map.get(
|
||||
EventTypes.Member
|
||||
)
|
||||
# Also see `StateFilter.must_await_full_state(...)` for comparison
|
||||
lazy_loading = (
|
||||
membership_state_keys is not None
|
||||
and len(membership_state_keys) == 1
|
||||
@@ -1202,7 +1204,7 @@ class SlidingSyncHandler:
|
||||
|
||||
# Figure out any stripped state events for invite/knocks. This allows the
|
||||
# potential joiner to identify the room.
|
||||
stripped_state: List[JsonDict] = []
|
||||
stripped_state: Optional[List[JsonDict]] = None
|
||||
if room_membership_for_user_at_to_token.membership in (
|
||||
Membership.INVITE,
|
||||
Membership.KNOCK,
|
||||
@@ -1239,7 +1241,7 @@ class SlidingSyncHandler:
|
||||
# updates.
|
||||
initial = True
|
||||
|
||||
# Fetch the required state for the room
|
||||
# Fetch the `required_state` for the room
|
||||
#
|
||||
# No `required_state` for invite/knock rooms (just `stripped_state`)
|
||||
#
|
||||
@@ -1247,13 +1249,15 @@ class SlidingSyncHandler:
|
||||
# of membership. Currently, we have to make this optional because
|
||||
# `invite`/`knock` rooms only have `stripped_state`. See
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932
|
||||
#
|
||||
# Calculate the `StateFilter` based on the `required_state` for the room
|
||||
room_state: Optional[StateMap[EventBase]] = None
|
||||
required_room_state: Optional[StateMap[EventBase]] = None
|
||||
if room_membership_for_user_at_to_token.membership not in (
|
||||
Membership.INVITE,
|
||||
Membership.KNOCK,
|
||||
):
|
||||
# Calculate the `StateFilter` based on the `required_state` for the room
|
||||
state_filter: Optional[StateFilter] = StateFilter.none()
|
||||
required_state_filter = StateFilter.none()
|
||||
# If we have a double wildcard ("*", "*") in the `required_state`, we need
|
||||
# to fetch all state for the room
|
||||
#
|
||||
@@ -1276,7 +1280,7 @@ class SlidingSyncHandler:
|
||||
if StateValues.WILDCARD in room_sync_config.required_state_map.get(
|
||||
StateValues.WILDCARD, set()
|
||||
):
|
||||
state_filter = StateFilter.all()
|
||||
required_state_filter = StateFilter.all()
|
||||
# TODO: `StateFilter` currently doesn't support wildcard event types. We're
|
||||
# currently working around this by returning all state to the client but it
|
||||
# would be nice to fetch less from the database and return just what the
|
||||
@@ -1285,7 +1289,7 @@ class SlidingSyncHandler:
|
||||
room_sync_config.required_state_map.get(StateValues.WILDCARD)
|
||||
is not None
|
||||
):
|
||||
state_filter = StateFilter.all()
|
||||
required_state_filter = StateFilter.all()
|
||||
else:
|
||||
required_state_types: List[Tuple[str, Optional[str]]] = []
|
||||
for (
|
||||
@@ -1317,51 +1321,88 @@ class SlidingSyncHandler:
|
||||
else:
|
||||
required_state_types.append((state_type, state_key))
|
||||
|
||||
state_filter = StateFilter.from_types(required_state_types)
|
||||
required_state_filter = StateFilter.from_types(required_state_types)
|
||||
|
||||
# We can skip fetching state if we don't need any
|
||||
if state_filter != StateFilter.none():
|
||||
# We can return all of the state that was requested if we're doing an
|
||||
# initial sync
|
||||
if initial:
|
||||
# People shouldn't see past their leave/ban event
|
||||
if room_membership_for_user_at_to_token.membership in (
|
||||
Membership.LEAVE,
|
||||
Membership.BAN,
|
||||
):
|
||||
room_state = await self.storage_controllers.state.get_state_at(
|
||||
room_id,
|
||||
stream_position=to_token.copy_and_replace(
|
||||
StreamKeyType.ROOM,
|
||||
room_membership_for_user_at_to_token.event_pos.to_room_stream_token(),
|
||||
),
|
||||
state_filter=state_filter,
|
||||
# Partially-stated rooms should have all state events except for
|
||||
# the membership events and since we've already excluded
|
||||
# partially-stated rooms unless `required_state` only has
|
||||
# `["m.room.member", "$LAZY"]` for membership, we should be able
|
||||
# to retrieve everything requested. Plus we don't want to block
|
||||
# the whole sync waiting for this one room.
|
||||
await_full_state=False,
|
||||
)
|
||||
# Otherwise, we can get the latest current state in the room
|
||||
else:
|
||||
room_state = await self.storage_controllers.state.get_current_state(
|
||||
room_id,
|
||||
state_filter,
|
||||
# Partially-stated rooms should have all state events except for
|
||||
# the membership events and since we've already excluded
|
||||
# partially-stated rooms unless `required_state` only has
|
||||
# `["m.room.member", "$LAZY"]` for membership, we should be able
|
||||
# to retrieve everything requested. Plus we don't want to block
|
||||
# the whole sync waiting for this one room.
|
||||
await_full_state=False,
|
||||
)
|
||||
# TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token`
|
||||
# We need this base set of info for the response so let's just fetch it along
|
||||
# with the `required_state` for the room
|
||||
META_ROOM_STATE = [(EventTypes.Name, ""), (EventTypes.RoomAvatar, "")]
|
||||
state_filter = StateFilter(
|
||||
types=StateFilter.from_types(
|
||||
chain(META_ROOM_STATE, required_state_filter.to_types())
|
||||
).types,
|
||||
include_others=required_state_filter.include_others,
|
||||
)
|
||||
|
||||
# We can return all of the state that was requested if this was the first
|
||||
# time we've sent the room down this connection.
|
||||
if initial:
|
||||
# People shouldn't see past their leave/ban event
|
||||
if room_membership_for_user_at_to_token.membership in (
|
||||
Membership.LEAVE,
|
||||
Membership.BAN,
|
||||
):
|
||||
room_state = await self.storage_controllers.state.get_state_at(
|
||||
room_id,
|
||||
stream_position=to_token.copy_and_replace(
|
||||
StreamKeyType.ROOM,
|
||||
room_membership_for_user_at_to_token.event_pos.to_room_stream_token(),
|
||||
),
|
||||
state_filter=state_filter,
|
||||
# Partially-stated rooms should have all state events except for
|
||||
# remote membership events. Since we've already excluded
|
||||
# partially-stated rooms unless `required_state` only has
|
||||
# `["m.room.member", "$LAZY"]` for membership, we should be able to
|
||||
# retrieve everything requested. When we're lazy-loading, if there
|
||||
# are some remote senders in the timeline, we should also have their
|
||||
# membership event because we had to auth that timeline event. Plus
|
||||
# we don't want to block the whole sync waiting for this one room.
|
||||
await_full_state=False,
|
||||
)
|
||||
# Otherwise, we can get the latest current state in the room
|
||||
else:
|
||||
# TODO: Once we can figure out if we've sent a room down this connection before,
|
||||
# we can return updates instead of the full required state.
|
||||
raise NotImplementedError()
|
||||
room_state = await self.storage_controllers.state.get_current_state(
|
||||
room_id,
|
||||
state_filter,
|
||||
# Partially-stated rooms should have all state events except for
|
||||
# remote membership events. Since we've already excluded
|
||||
# partially-stated rooms unless `required_state` only has
|
||||
# `["m.room.member", "$LAZY"]` for membership, we should be able to
|
||||
# retrieve everything requested. When we're lazy-loading, if there
|
||||
# are some remote senders in the timeline, we should also have their
|
||||
# membership event because we had to auth that timeline event. Plus
|
||||
# we don't want to block the whole sync waiting for this one room.
|
||||
await_full_state=False,
|
||||
)
|
||||
# TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token`
|
||||
else:
|
||||
# TODO: Once we can figure out if we've sent a room down this connection before,
|
||||
# we can return updates instead of the full required state.
|
||||
raise NotImplementedError()
|
||||
|
||||
if required_state_filter != StateFilter.none():
|
||||
required_room_state = required_state_filter.filter_state(room_state)
|
||||
|
||||
# Find the room name and avatar from the state
|
||||
room_name: Optional[str] = None
|
||||
room_avatar: Optional[str] = None
|
||||
if room_state is not None:
|
||||
name_event = room_state.get((EventTypes.Name, ""))
|
||||
if name_event is not None:
|
||||
room_name = name_event.content.get("name")
|
||||
|
||||
avatar_event = room_state.get((EventTypes.RoomAvatar, ""))
|
||||
if avatar_event is not None:
|
||||
room_avatar = avatar_event.content.get("url")
|
||||
elif stripped_state is not None:
|
||||
for event in stripped_state:
|
||||
if event["type"] == EventTypes.Name:
|
||||
room_name = event.get("content", {}).get("name")
|
||||
elif event["type"] == EventTypes.RoomAvatar:
|
||||
room_avatar = event.get("content", {}).get("url")
|
||||
|
||||
# Found everything so we can stop looking
|
||||
if room_name is not None and room_avatar is not None:
|
||||
break
|
||||
|
||||
# Figure out the last bump event in the room
|
||||
last_bump_event_result = (
|
||||
@@ -1378,16 +1419,16 @@ class SlidingSyncHandler:
|
||||
bump_stamp = bump_event_pos.stream
|
||||
|
||||
return SlidingSyncResult.RoomResult(
|
||||
# TODO: Dummy value
|
||||
name=None,
|
||||
# TODO: Dummy value
|
||||
avatar=None,
|
||||
name=room_name,
|
||||
avatar=room_avatar,
|
||||
# TODO: Dummy value
|
||||
heroes=None,
|
||||
# TODO: Dummy value
|
||||
is_dm=False,
|
||||
initial=initial,
|
||||
required_state=list(room_state.values()) if room_state else None,
|
||||
required_state=(
|
||||
list(required_room_state.values()) if required_room_state else None
|
||||
),
|
||||
timeline_events=timeline_events,
|
||||
bundled_aggregations=bundled_aggregations,
|
||||
stripped_state=stripped_state,
|
||||
|
||||
@@ -145,7 +145,7 @@ class ClientRestResource(JsonResource):
|
||||
password_policy.register_servlets(hs, client_resource)
|
||||
knock.register_servlets(hs, client_resource)
|
||||
appservice_ping.register_servlets(hs, client_resource)
|
||||
if hs.config.media.can_load_media_repo:
|
||||
if hs.config.server.enable_media_repo:
|
||||
from synapse.rest.client import media
|
||||
|
||||
media.register_servlets(hs, client_resource)
|
||||
|
||||
@@ -1802,6 +1802,206 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
|
||||
channel.json_body["lists"]["foo-list"],
|
||||
)
|
||||
|
||||
def test_rooms_meta_when_joined(self) -> None:
|
||||
"""
|
||||
Test that the `rooms` `name` and `avatar` (soon to test `heroes`) are included
|
||||
in the response when the user is joined to the room.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"name": "my super room",
|
||||
},
|
||||
)
|
||||
# Set the room avatar URL
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.RoomAvatar,
|
||||
{"url": "mxc://DUMMY_MEDIA_ID"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint,
|
||||
{
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Reflect the current state of the room
|
||||
self.assertEqual(
|
||||
channel.json_body["rooms"][room_id1]["name"],
|
||||
"my super room",
|
||||
channel.json_body["rooms"][room_id1],
|
||||
)
|
||||
self.assertEqual(
|
||||
channel.json_body["rooms"][room_id1]["avatar"],
|
||||
"mxc://DUMMY_MEDIA_ID",
|
||||
channel.json_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_rooms_meta_when_invited(self) -> None:
|
||||
"""
|
||||
Test that the `rooms` `name` and `avatar` (soon to test `heroes`) are included
|
||||
in the response when the user is invited to the room.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"name": "my super room",
|
||||
},
|
||||
)
|
||||
# Set the room avatar URL
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.RoomAvatar,
|
||||
{"url": "mxc://DUMMY_MEDIA_ID"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
# Update the room name after user1 has left
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.Name,
|
||||
{"name": "my super duper room"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
# Update the room avatar URL after user1 has left
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.RoomAvatar,
|
||||
{"url": "mxc://UPDATED_DUMMY_MEDIA_ID"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint,
|
||||
{
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# This should still reflect the current state of the room even when the user is
|
||||
# invited.
|
||||
self.assertEqual(
|
||||
channel.json_body["rooms"][room_id1]["name"],
|
||||
"my super duper room",
|
||||
channel.json_body["rooms"][room_id1],
|
||||
)
|
||||
self.assertEqual(
|
||||
channel.json_body["rooms"][room_id1]["avatar"],
|
||||
"mxc://UPDATED_DUMMY_MEDIA_ID",
|
||||
channel.json_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_rooms_meta_when_banned(self) -> None:
|
||||
"""
|
||||
Test that the `rooms` `name` and `avatar` (soon to test `heroes`) reflect the
|
||||
state of the room when the user was banned (do not leak current state).
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"name": "my super room",
|
||||
},
|
||||
)
|
||||
# Set the room avatar URL
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.RoomAvatar,
|
||||
{"url": "mxc://DUMMY_MEDIA_ID"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
|
||||
|
||||
# Update the room name after user1 has left
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.Name,
|
||||
{"name": "my super duper room"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
# Update the room avatar URL after user1 has left
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
EventTypes.RoomAvatar,
|
||||
{"url": "mxc://UPDATED_DUMMY_MEDIA_ID"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
self.sync_endpoint,
|
||||
{
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 0,
|
||||
}
|
||||
}
|
||||
},
|
||||
access_token=user1_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Reflect the state of the room at the time of leaving
|
||||
self.assertEqual(
|
||||
channel.json_body["rooms"][room_id1]["name"],
|
||||
"my super room",
|
||||
channel.json_body["rooms"][room_id1],
|
||||
)
|
||||
self.assertEqual(
|
||||
channel.json_body["rooms"][room_id1]["avatar"],
|
||||
"mxc://DUMMY_MEDIA_ID",
|
||||
channel.json_body["rooms"][room_id1],
|
||||
)
|
||||
|
||||
def test_rooms_limited_initial_sync(self) -> None:
|
||||
"""
|
||||
Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit`
|
||||
@@ -2973,6 +3173,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_incremental_sync(self) -> None:
|
||||
"""
|
||||
@@ -3027,6 +3228,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_wildcard(self) -> None:
|
||||
"""
|
||||
@@ -3084,6 +3286,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
|
||||
state_map.values(),
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_wildcard_event_type(self) -> None:
|
||||
"""
|
||||
@@ -3147,6 +3350,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
|
||||
# events when the `event_type` is a wildcard.
|
||||
exact=False,
|
||||
)
|
||||
self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_wildcard_state_key(self) -> None:
|
||||
"""
|
||||
@@ -3192,6 +3396,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_lazy_loading_room_members(self) -> None:
|
||||
"""
|
||||
@@ -3247,6 +3452,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
@parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)])
|
||||
def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None:
|
||||
@@ -3329,6 +3535,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_combine_superset(self) -> None:
|
||||
"""
|
||||
@@ -3401,6 +3608,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state"))
|
||||
|
||||
def test_rooms_required_state_partial_state(self) -> None:
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user